diff --git a/.github/workflows/build_and_push_docker_images.yaml b/.github/workflows/build_and_push_docker_images.yaml index c54ae7bd9..1b724d9e3 100644 --- a/.github/workflows/build_and_push_docker_images.yaml +++ b/.github/workflows/build_and_push_docker_images.yaml @@ -121,6 +121,8 @@ jobs: - aws-arm64 - ubuntu-20.04 exclude: + - registry: 'public.ecr.aws' + profile: emqx-enterprise - arch: arm64 build_machine: ubuntu-20.04 - arch: amd64 diff --git a/.github/workflows/build_slim_packages.yaml b/.github/workflows/build_slim_packages.yaml index 56d2a6394..118b40521 100644 --- a/.github/workflows/build_slim_packages.yaml +++ b/.github/workflows/build_slim_packages.yaml @@ -205,7 +205,7 @@ jobs: - emqx - emqx-enterprise runs-on: aws-amd64 - container: "ghcr.io/emqx/emqx-schema-validate:0.3.3" + container: "ghcr.io/emqx/emqx-schema-validate:0.3.5" steps: - uses: actions/download-artifact@v2 name: Download schema dump diff --git a/apps/emqx/include/emqx_release.hrl b/apps/emqx/include/emqx_release.hrl index 3bf2aabb6..c843ad92c 100644 --- a/apps/emqx/include/emqx_release.hrl +++ b/apps/emqx/include/emqx_release.hrl @@ -35,7 +35,7 @@ -define(EMQX_RELEASE_CE, "5.0.5-beta.1"). %% Enterprise edition --define(EMQX_RELEASE_EE, "5.0.0-alpha.1"). +-define(EMQX_RELEASE_EE, "5.0.0-beta.1"). %% the HTTP API version -define(EMQX_API_VERSION, "5.0"). diff --git a/apps/emqx_authn/src/emqx_authn_utils.erl b/apps/emqx_authn/src/emqx_authn_utils.erl index b989da3b4..24227345a 100644 --- a/apps/emqx_authn/src/emqx_authn_utils.erl +++ b/apps/emqx_authn/src/emqx_authn_utils.erl @@ -48,7 +48,6 @@ ]). -define(DEFAULT_RESOURCE_OPTS, #{ - auto_retry_interval => 6000, start_after_created => false }). diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_jwks_connector.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_jwks_connector.erl index 8f98e2f1e..480950143 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_jwks_connector.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_jwks_connector.erl @@ -22,15 +22,18 @@ %% callbacks of behaviour emqx_resource -export([ + callback_mode/0, on_start/2, on_stop/2, - on_query/4, + on_query/3, on_get_status/2, connect/1 ]). -define(DEFAULT_POOL_SIZE, 8). +callback_mode() -> always_sync. + on_start(InstId, Opts) -> PoolName = emqx_plugin_libs_pool:pool_name(InstId), PoolOpts = [ @@ -45,7 +48,7 @@ on_start(InstId, Opts) -> on_stop(_InstId, #{pool_name := PoolName}) -> emqx_plugin_libs_pool:stop_pool(PoolName). -on_query(InstId, get_jwks, AfterQuery, #{pool_name := PoolName}) -> +on_query(InstId, get_jwks, #{pool_name := PoolName}) -> Result = ecpool:pick_and_do(PoolName, {emqx_authn_jwks_client, get_jwks, []}, no_handover), case Result of {error, Reason} -> @@ -54,20 +57,18 @@ on_query(InstId, get_jwks, AfterQuery, #{pool_name := PoolName}) -> connector => InstId, command => get_jwks, reason => Reason - }), - emqx_resource:query_failed(AfterQuery); + }); _ -> - emqx_resource:query_success(AfterQuery) + ok end, Result; -on_query(_InstId, {update, Opts}, AfterQuery, #{pool_name := PoolName}) -> +on_query(_InstId, {update, Opts}, #{pool_name := PoolName}) -> lists:foreach( fun({_, Worker}) -> ok = ecpool_worker:exec(Worker, {emqx_authn_jwks_client, update, [Opts]}, infinity) end, ecpool:workers(PoolName) ), - emqx_resource:query_success(AfterQuery), ok. on_get_status(_InstId, #{pool_name := PoolName}) -> diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_mongodb.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_mongodb.erl index 43a1ebd3b..9357265e7 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_mongodb.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_mongodb.erl @@ -166,7 +166,7 @@ authenticate( begin Filter = emqx_authn_utils:render_deep(FilterTemplate, Credential), case emqx_resource:query(ResourceId, {find_one, Collection, Filter, #{}}) of - undefined -> + {ok, undefined} -> ignore; {error, Reason} -> ?TRACE_AUTHN_PROVIDER(error, "mongodb_query_failed", #{ @@ -176,7 +176,7 @@ authenticate( reason => Reason }), ignore; - Doc -> + {ok, Doc} -> case check_password(Password, Doc, State) of ok -> {ok, is_superuser(Doc, State)}; diff --git a/apps/emqx_authz/src/emqx_authz_mongodb.erl b/apps/emqx_authz/src/emqx_authz_mongodb.erl index ac450e4cc..a1e1b8136 100644 --- a/apps/emqx_authz/src/emqx_authz_mongodb.erl +++ b/apps/emqx_authz/src/emqx_authz_mongodb.erl @@ -92,9 +92,9 @@ authorize( resource_id => ResourceID }), nomatch; - [] -> + {ok, []} -> nomatch; - Rows -> + {ok, Rows} -> Rules = [ emqx_authz_rule:compile({Permission, all, Action, Topics}) || #{ diff --git a/apps/emqx_authz/src/emqx_authz_utils.erl b/apps/emqx_authz/src/emqx_authz_utils.erl index d364bc5fa..6eb92fecb 100644 --- a/apps/emqx_authz/src/emqx_authz_utils.erl +++ b/apps/emqx_authz/src/emqx_authz_utils.erl @@ -40,7 +40,6 @@ ]). -define(DEFAULT_RESOURCE_OPTS, #{ - auto_retry_interval => 6000, start_after_created => false }). diff --git a/apps/emqx_authz/test/emqx_authz_api_cache_SUITE.erl b/apps/emqx_authz/test/emqx_authz_api_cache_SUITE.erl index 306fe3f13..0c49cc03a 100644 --- a/apps/emqx_authz/test/emqx_authz_api_cache_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_api_cache_SUITE.erl @@ -23,6 +23,8 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). +suite() -> [{timetrap, {seconds, 60}}]. + all() -> emqx_common_test_helpers:all(?MODULE). diff --git a/apps/emqx_bridge/src/emqx_bridge.erl b/apps/emqx_bridge/src/emqx_bridge.erl index ba6c64dbc..354c4faee 100644 --- a/apps/emqx_bridge/src/emqx_bridge.erl +++ b/apps/emqx_bridge/src/emqx_bridge.erl @@ -49,14 +49,14 @@ -export([get_basic_usage_info/0]). load() -> - %% set wait_for_resource_ready => 0 to start resources async - Opts = #{auto_retry_interval => 60000, wait_for_resource_ready => 0}, Bridges = emqx:get_config([bridges], #{}), lists:foreach( fun({Type, NamedConf}) -> lists:foreach( fun({Name, Conf}) -> - safe_load_bridge(Type, Name, Conf, Opts) + %% fetch opts for `emqx_resource_worker` + ResOpts = emqx_resource:fetch_creation_opts(Conf), + safe_load_bridge(Type, Name, Conf, ResOpts) end, maps:to_list(NamedConf) ) @@ -171,9 +171,9 @@ post_config_update(_, _Req, NewConf, OldConf, _AppEnv) -> diff_confs(NewConf, OldConf), %% The config update will be failed if any task in `perform_bridge_changes` failed. Result = perform_bridge_changes([ - {fun emqx_bridge_resource:remove/3, Removed}, - {fun emqx_bridge_resource:create/3, Added}, - {fun emqx_bridge_resource:update/3, Updated} + {fun emqx_bridge_resource:remove/4, Removed}, + {fun emqx_bridge_resource:create/4, Added}, + {fun emqx_bridge_resource:update/4, Updated} ]), ok = unload_hook(), ok = load_hook(NewConf), @@ -260,8 +260,16 @@ perform_bridge_changes([{Action, MapConfs} | Tasks], Result0) -> fun ({_Type, _Name}, _Conf, {error, Reason}) -> {error, Reason}; + %% for emqx_bridge_resource:update/4 + ({Type, Name}, {OldConf, Conf}, _) -> + ResOpts = emqx_resource:fetch_creation_opts(Conf), + case Action(Type, Name, {OldConf, Conf}, ResOpts) of + {error, Reason} -> {error, Reason}; + Return -> Return + end; ({Type, Name}, Conf, _) -> - case Action(Type, Name, Conf) of + ResOpts = emqx_resource:fetch_creation_opts(Conf), + case Action(Type, Name, Conf, ResOpts) of {error, Reason} -> {error, Reason}; Return -> Return end diff --git a/apps/emqx_bridge/src/emqx_bridge_api.erl b/apps/emqx_bridge/src/emqx_bridge_api.erl index 37a42ab3d..e48833f78 100644 --- a/apps/emqx_bridge/src/emqx_bridge_api.erl +++ b/apps/emqx_bridge/src/emqx_bridge_api.erl @@ -163,7 +163,7 @@ bridge_info_examples(Method) -> }). conn_bridge_examples(Method) -> - lists:foldl( + Fun = fun(Type, Acc) -> SType = atom_to_list(Type), KeyIngress = bin(SType ++ "_ingress"), @@ -179,9 +179,17 @@ conn_bridge_examples(Method) -> } }) end, - #{}, - ?CONN_TYPES - ). + Broker = lists:foldl(Fun, #{}, ?CONN_TYPES), + EE = ee_conn_bridge_examples(Method), + maps:merge(Broker, EE). + +-if(?EMQX_RELEASE_EDITION == ee). +ee_conn_bridge_examples(Method) -> + emqx_ee_bridge:conn_bridge_examples(Method). +-else. +ee_conn_bridge_examples(_Method) -> + #{}. +-endif. info_example(Type, Direction, Method) -> maps:merge( diff --git a/apps/emqx_bridge/src/emqx_bridge_app.erl b/apps/emqx_bridge/src/emqx_bridge_app.erl index 3fc4d57ba..cac6ab1e6 100644 --- a/apps/emqx_bridge/src/emqx_bridge_app.erl +++ b/apps/emqx_bridge/src/emqx_bridge_app.erl @@ -29,6 +29,7 @@ start(_StartType, _StartArgs) -> {ok, Sup} = emqx_bridge_sup:start_link(), + ok = start_ee_apps(), ok = emqx_bridge:load(), ok = emqx_bridge:load_hook(), ok = emqx_config_handler:add_handler(?LEAF_NODE_HDLR_PATH, ?MODULE), @@ -41,6 +42,16 @@ stop(_State) -> ok = emqx_bridge:unload_hook(), ok. +-if(?EMQX_RELEASE_EDITION == ee). +start_ee_apps() -> + {ok, _} = application:ensure_all_started(emqx_ee_bridge), + {ok, _} = application:ensure_all_started(emqx_ee_connector), + ok. +-else. +start_ee_apps() -> + ok. +-endif. + %% NOTE: We depends on the `emqx_bridge:pre_config_update/3` to restart/stop the %% underlying resources. pre_config_update(_, {_Oper, _, _}, undefined) -> diff --git a/apps/emqx_bridge/src/emqx_bridge_resource.erl b/apps/emqx_bridge/src/emqx_bridge_resource.erl index d19cc8426..f7aeec30d 100644 --- a/apps/emqx_bridge/src/emqx_bridge_resource.erl +++ b/apps/emqx_bridge/src/emqx_bridge_resource.erl @@ -34,18 +34,27 @@ create_dry_run/2, remove/1, remove/2, - remove/3, + remove/4, update/2, update/3, + update/4, stop/2, restart/2, reset_metrics/1 ]). +-if(?EMQX_RELEASE_EDITION == ee). +bridge_to_resource_type(<<"mqtt">>) -> emqx_connector_mqtt; +bridge_to_resource_type(mqtt) -> emqx_connector_mqtt; +bridge_to_resource_type(<<"webhook">>) -> emqx_connector_http; +bridge_to_resource_type(webhook) -> emqx_connector_http; +bridge_to_resource_type(BridgeType) -> emqx_ee_bridge:resource_type(BridgeType). +-else. bridge_to_resource_type(<<"mqtt">>) -> emqx_connector_mqtt; bridge_to_resource_type(mqtt) -> emqx_connector_mqtt; bridge_to_resource_type(<<"webhook">>) -> emqx_connector_http; bridge_to_resource_type(webhook) -> emqx_connector_http. +-endif. resource_id(BridgeId) when is_binary(BridgeId) -> <<"bridge:", BridgeId/binary>>. @@ -80,7 +89,7 @@ create(BridgeId, Conf) -> create(BridgeType, BridgeName, Conf). create(Type, Name, Conf) -> - create(Type, Name, Conf, #{auto_retry_interval => 60000}). + create(Type, Name, Conf, #{}). create(Type, Name, Conf, Opts) -> ?SLOG(info, #{ @@ -103,6 +112,9 @@ update(BridgeId, {OldConf, Conf}) -> update(BridgeType, BridgeName, {OldConf, Conf}). update(Type, Name, {OldConf, Conf}) -> + update(Type, Name, {OldConf, Conf}, #{}). + +update(Type, Name, {OldConf, Conf}, Opts) -> %% TODO: sometimes its not necessary to restart the bridge connection. %% %% - if the connection related configs like `servers` is updated, we should restart/start @@ -119,7 +131,7 @@ update(Type, Name, {OldConf, Conf}) -> name => Name, config => Conf }), - case recreate(Type, Name, Conf) of + case recreate(Type, Name, Conf, Opts) of {ok, _} -> maybe_disable_bridge(Type, Name, Conf); {error, not_found} -> @@ -129,7 +141,7 @@ update(Type, Name, {OldConf, Conf}) -> name => Name, config => Conf }), - create(Type, Name, Conf); + create(Type, Name, Conf, Opts); {error, Reason} -> {error, {update_bridge_failed, Reason}} end; @@ -150,11 +162,14 @@ recreate(Type, Name) -> recreate(Type, Name, emqx:get_config([bridges, Type, Name])). recreate(Type, Name, Conf) -> + recreate(Type, Name, Conf, #{}). + +recreate(Type, Name, Conf, Opts) -> emqx_resource:recreate_local( resource_id(Type, Name), bridge_to_resource_type(Type), parse_confs(Type, Name, Conf), - #{auto_retry_interval => 60000} + Opts ). create_dry_run(Type, Conf) -> @@ -178,13 +193,13 @@ create_dry_run(Type, Conf) -> remove(BridgeId) -> {BridgeType, BridgeName} = parse_bridge_id(BridgeId), - remove(BridgeType, BridgeName, #{}). + remove(BridgeType, BridgeName, #{}, #{}). remove(Type, Name) -> - remove(Type, Name, undefined). + remove(Type, Name, #{}, #{}). %% just for perform_bridge_changes/1 -remove(Type, Name, _Conf) -> +remove(Type, Name, _Conf, _Opts) -> ?SLOG(info, #{msg => "remove_bridge", type => Type, name => Name}), case emqx_resource:remove_local(resource_id(Type, Name)) of ok -> ok; @@ -231,7 +246,7 @@ is_tmp_path(TmpPath, File) -> string:str(str(File), str(TmpPath)) > 0. parse_confs( - webhook, + Type, _Name, #{ url := Url, @@ -241,7 +256,7 @@ parse_confs( request_timeout := ReqTimeout, max_retries := Retry } = Conf -) -> +) when Type == webhook orelse Type == <<"webhook">> -> {BaseUrl, Path} = parse_url(Url), {ok, BaseUrl2} = emqx_http_lib:uri_parse(BaseUrl), Conf#{ diff --git a/apps/emqx_bridge/src/emqx_bridge_mqtt_schema.erl b/apps/emqx_bridge/src/schema/emqx_bridge_mqtt_schema.erl similarity index 100% rename from apps/emqx_bridge/src/emqx_bridge_mqtt_schema.erl rename to apps/emqx_bridge/src/schema/emqx_bridge_mqtt_schema.erl diff --git a/apps/emqx_bridge/src/emqx_bridge_schema.erl b/apps/emqx_bridge/src/schema/emqx_bridge_schema.erl similarity index 84% rename from apps/emqx_bridge/src/emqx_bridge_schema.erl rename to apps/emqx_bridge/src/schema/emqx_bridge_schema.erl index d512df323..4343dc223 100644 --- a/apps/emqx_bridge/src/emqx_bridge_schema.erl +++ b/apps/emqx_bridge/src/schema/emqx_bridge_schema.erl @@ -27,28 +27,41 @@ %%====================================================================================== %% For HTTP APIs get_response() -> - http_schema("get"). + api_schema("get"). put_request() -> - http_schema("put"). + api_schema("put"). post_request() -> - http_schema("post"). + api_schema("post"). -http_schema(Method) -> - Schemas = lists:flatmap( - fun(Type) -> - [ - ref(schema_mod(Type), Method ++ "_ingress"), - ref(schema_mod(Type), Method ++ "_egress") - ] - end, - ?CONN_TYPES - ), - hoconsc:union([ - ref(emqx_bridge_webhook_schema, Method) - | Schemas - ]). +api_schema(Method) -> + Broker = + lists:flatmap( + fun(Type) -> + [ + ref(schema_mod(Type), Method ++ "_ingress"), + ref(schema_mod(Type), Method ++ "_egress") + ] + end, + ?CONN_TYPES + ) ++ [ref(Module, Method) || Module <- [emqx_bridge_webhook_schema]], + EE = ee_api_schemas(Method), + hoconsc:union(Broker ++ EE). + +-if(?EMQX_RELEASE_EDITION == ee). +ee_api_schemas(Method) -> + emqx_ee_bridge:api_schemas(Method). + +ee_fields_bridges() -> + emqx_ee_bridge:fields(bridges). +-else. +ee_api_schemas(_) -> + []. + +ee_fields_bridges() -> + []. +-endif. common_bridge_fields(ConnectorRef) -> [ @@ -127,7 +140,7 @@ fields(bridges) -> #{desc => ?DESC("bridges_name")} )} || T <- ?CONN_TYPES - ]; + ] ++ ee_fields_bridges(); fields("metrics") -> [ {"matched", mk(integer(), #{desc => ?DESC("metric_matched")})}, diff --git a/apps/emqx_bridge/src/emqx_bridge_webhook_schema.erl b/apps/emqx_bridge/src/schema/emqx_bridge_webhook_schema.erl similarity index 90% rename from apps/emqx_bridge/src/emqx_bridge_webhook_schema.erl rename to apps/emqx_bridge/src/schema/emqx_bridge_webhook_schema.erl index 02dd0a76d..37bee3c3c 100644 --- a/apps/emqx_bridge/src/emqx_bridge_webhook_schema.erl +++ b/apps/emqx_bridge/src/schema/emqx_bridge_webhook_schema.erl @@ -9,7 +9,7 @@ %%====================================================================================== %% Hocon Schema Definitions -namespace() -> "bridge". +namespace() -> "bridge_webhook". roots() -> []. @@ -50,7 +50,7 @@ basic_config() -> default => egress } )} - ] ++ + ] ++ webhook_creation_opts() ++ proplists:delete( max_retries, proplists:delete(base_url, emqx_connector_http:fields(config)) ). @@ -118,6 +118,22 @@ request_config() -> )} ]. +webhook_creation_opts() -> + Opts = emqx_resource_schema:fields(creation_opts), + lists:filter( + fun({K, _V}) -> + not lists:member(K, unsupported_opts()) + end, + Opts + ). + +unsupported_opts() -> + [ + enable_batch, + batch_size, + batch_time + ]. + %%====================================================================================== type_field() -> diff --git a/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl index c048a13fe..9346fb9c0 100644 --- a/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl @@ -24,7 +24,7 @@ -include_lib("common_test/include/ct.hrl"). -define(CONF_DEFAULT, <<"bridges: {}">>). -define(BRIDGE_TYPE, <<"webhook">>). --define(BRIDGE_NAME, <<"test_bridge">>). +-define(BRIDGE_NAME, (atom_to_binary(?FUNCTION_NAME))). -define(URL(PORT, PATH), list_to_binary( io_lib:format( @@ -78,8 +78,12 @@ set_special_configs(_) -> init_per_testcase(_, Config) -> {ok, _} = emqx_cluster_rpc:start_link(node(), emqx_cluster_rpc, 1000), - Config. -end_per_testcase(_, _Config) -> + {Port, Sock, Acceptor} = start_http_server(fun handle_fun_200_ok/2), + [{port, Port}, {sock, Sock}, {acceptor, Acceptor} | Config]. +end_per_testcase(_, Config) -> + Sock = ?config(sock, Config), + Acceptor = ?config(acceptor, Config), + stop_http_server(Sock, Acceptor), clear_resources(), ok. @@ -95,31 +99,39 @@ clear_resources() -> %% HTTP server for testing %%------------------------------------------------------------------------------ start_http_server(HandleFun) -> + process_flag(trap_exit, true), Parent = self(), - spawn_link(fun() -> - {Port, Sock} = listen_on_random_port(), - Parent ! {port, Port}, - loop(Sock, HandleFun, Parent) + {Port, Sock} = listen_on_random_port(), + Acceptor = spawn_link(fun() -> + accept_loop(Sock, HandleFun, Parent) end), - receive - {port, Port} -> Port - after 2000 -> error({timeout, start_http_server}) - end. + timer:sleep(100), + {Port, Sock, Acceptor}. + +stop_http_server(Sock, Acceptor) -> + exit(Acceptor, kill), + gen_tcp:close(Sock). listen_on_random_port() -> Min = 1024, Max = 65000, + rand:seed(exsplus, erlang:timestamp()), Port = rand:uniform(Max - Min) + Min, - case gen_tcp:listen(Port, [{active, false}, {reuseaddr, true}, binary]) of + case + gen_tcp:listen(Port, [ + binary, {active, false}, {packet, raw}, {reuseaddr, true}, {backlog, 1000} + ]) + of {ok, Sock} -> {Port, Sock}; {error, eaddrinuse} -> listen_on_random_port() end. -loop(Sock, HandleFun, Parent) -> +accept_loop(Sock, HandleFun, Parent) -> + process_flag(trap_exit, true), {ok, Conn} = gen_tcp:accept(Sock), - Handler = spawn(fun() -> HandleFun(Conn, Parent) end), + Handler = spawn_link(fun() -> HandleFun(Conn, Parent) end), gen_tcp:controlling_process(Conn, Handler), - loop(Sock, HandleFun, Parent). + accept_loop(Sock, HandleFun, Parent). make_response(CodeStr, Str) -> B = iolist_to_binary(Str), @@ -138,7 +150,9 @@ handle_fun_200_ok(Conn, Parent) -> Parent ! {http_server, received, Req}, gen_tcp:send(Conn, make_response("200 OK", "Request OK")), handle_fun_200_ok(Conn, Parent); - {error, closed} -> + {error, Reason} -> + ct:pal("the http handler recv error: ~p", [Reason]), + timer:sleep(100), gen_tcp:close(Conn) end. @@ -153,24 +167,25 @@ parse_http_request(ReqStr0) -> %% Testcases %%------------------------------------------------------------------------------ -t_http_crud_apis(_) -> - Port = start_http_server(fun handle_fun_200_ok/2), +t_http_crud_apis(Config) -> + Port = ?config(port, Config), %% assert we there's no bridges at first {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), %% then we add a webhook bridge, using POST %% POST /bridges/ will create a bridge URL1 = ?URL(Port, "path1"), + Name = ?BRIDGE_NAME, {ok, 201, Bridge} = request( post, uri(["bridges"]), - ?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME) + ?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, Name) ), %ct:pal("---bridge: ~p", [Bridge]), #{ <<"type">> := ?BRIDGE_TYPE, - <<"name">> := ?BRIDGE_NAME, + <<"name">> := Name, <<"enable">> := true, <<"status">> := _, <<"node_status">> := [_ | _], @@ -179,7 +194,7 @@ t_http_crud_apis(_) -> <<"url">> := URL1 } = jsx:decode(Bridge), - BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME), + BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name), %% send an message to emqx and the message should be forwarded to the HTTP server Body = <<"my msg">>, emqx:publish(emqx_message:make(<<"emqx_webhook/1">>, Body)), @@ -203,12 +218,12 @@ t_http_crud_apis(_) -> {ok, 200, Bridge2} = request( put, uri(["bridges", BridgeID]), - ?HTTP_BRIDGE(URL2, ?BRIDGE_TYPE, ?BRIDGE_NAME) + ?HTTP_BRIDGE(URL2, ?BRIDGE_TYPE, Name) ), ?assertMatch( #{ <<"type">> := ?BRIDGE_TYPE, - <<"name">> := ?BRIDGE_NAME, + <<"name">> := Name, <<"enable">> := true, <<"status">> := _, <<"node_status">> := [_ | _], @@ -225,7 +240,7 @@ t_http_crud_apis(_) -> [ #{ <<"type">> := ?BRIDGE_TYPE, - <<"name">> := ?BRIDGE_NAME, + <<"name">> := Name, <<"enable">> := true, <<"status">> := _, <<"node_status">> := [_ | _], @@ -242,7 +257,7 @@ t_http_crud_apis(_) -> ?assertMatch( #{ <<"type">> := ?BRIDGE_TYPE, - <<"name">> := ?BRIDGE_NAME, + <<"name">> := Name, <<"enable">> := true, <<"status">> := _, <<"node_status">> := [_ | _], @@ -275,7 +290,7 @@ t_http_crud_apis(_) -> {ok, 404, ErrMsg2} = request( put, uri(["bridges", BridgeID]), - ?HTTP_BRIDGE(URL2, ?BRIDGE_TYPE, ?BRIDGE_NAME) + ?HTTP_BRIDGE(URL2, ?BRIDGE_TYPE, Name) ), ?assertMatch( #{ @@ -286,29 +301,28 @@ t_http_crud_apis(_) -> ), ok. -t_start_stop_bridges(_) -> - lists:foreach( - fun(Type) -> - do_start_stop_bridges(Type) - end, - [node, cluster] - ). +t_start_stop_bridges_node(Config) -> + do_start_stop_bridges(node, Config). -do_start_stop_bridges(Type) -> +t_start_stop_bridges_cluster(Config) -> + do_start_stop_bridges(cluster, Config). + +do_start_stop_bridges(Type, Config) -> %% assert we there's no bridges at first {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), - Port = start_http_server(fun handle_fun_200_ok/2), + Port = ?config(port, Config), URL1 = ?URL(Port, "abc"), + Name = atom_to_binary(Type), {ok, 201, Bridge} = request( post, uri(["bridges"]), - ?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME) + ?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, Name) ), %ct:pal("the bridge ==== ~p", [Bridge]), #{ <<"type">> := ?BRIDGE_TYPE, - <<"name">> := ?BRIDGE_NAME, + <<"name">> := Name, <<"enable">> := true, <<"status">> := <<"connected">>, <<"node_status">> := [_ | _], @@ -316,11 +330,11 @@ do_start_stop_bridges(Type) -> <<"node_metrics">> := [_ | _], <<"url">> := URL1 } = jsx:decode(Bridge), - BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME), + BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name), %% stop it {ok, 200, <<>>} = request(post, operation_path(Type, stop, BridgeID), <<"">>), {ok, 200, Bridge2} = request(get, uri(["bridges", BridgeID]), []), - ?assertMatch(#{<<"status">> := <<"disconnected">>}, jsx:decode(Bridge2)), + ?assertMatch(#{<<"status">> := <<"stopped">>}, jsx:decode(Bridge2)), %% start again {ok, 200, <<>>} = request(post, operation_path(Type, restart, BridgeID), <<"">>), {ok, 200, Bridge3} = request(get, uri(["bridges", BridgeID]), []), @@ -339,21 +353,22 @@ do_start_stop_bridges(Type) -> {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []), {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []). -t_enable_disable_bridges(_) -> +t_enable_disable_bridges(Config) -> %% assert we there's no bridges at first {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), - Port = start_http_server(fun handle_fun_200_ok/2), + Name = ?BRIDGE_NAME, + Port = ?config(port, Config), URL1 = ?URL(Port, "abc"), {ok, 201, Bridge} = request( post, uri(["bridges"]), - ?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME) + ?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, Name) ), %ct:pal("the bridge ==== ~p", [Bridge]), #{ <<"type">> := ?BRIDGE_TYPE, - <<"name">> := ?BRIDGE_NAME, + <<"name">> := Name, <<"enable">> := true, <<"status">> := <<"connected">>, <<"node_status">> := [_ | _], @@ -361,11 +376,11 @@ t_enable_disable_bridges(_) -> <<"node_metrics">> := [_ | _], <<"url">> := URL1 } = jsx:decode(Bridge), - BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME), + BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name), %% disable it {ok, 200, <<>>} = request(post, operation_path(cluster, disable, BridgeID), <<"">>), {ok, 200, Bridge2} = request(get, uri(["bridges", BridgeID]), []), - ?assertMatch(#{<<"status">> := <<"disconnected">>}, jsx:decode(Bridge2)), + ?assertMatch(#{<<"status">> := <<"stopped">>}, jsx:decode(Bridge2)), %% enable again {ok, 200, <<>>} = request(post, operation_path(cluster, enable, BridgeID), <<"">>), {ok, 200, Bridge3} = request(get, uri(["bridges", BridgeID]), []), @@ -391,21 +406,22 @@ t_enable_disable_bridges(_) -> {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []), {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []). -t_reset_bridges(_) -> +t_reset_bridges(Config) -> %% assert we there's no bridges at first {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), - Port = start_http_server(fun handle_fun_200_ok/2), + Name = ?BRIDGE_NAME, + Port = ?config(port, Config), URL1 = ?URL(Port, "abc"), {ok, 201, Bridge} = request( post, uri(["bridges"]), - ?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME) + ?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, Name) ), %ct:pal("the bridge ==== ~p", [Bridge]), #{ <<"type">> := ?BRIDGE_TYPE, - <<"name">> := ?BRIDGE_NAME, + <<"name">> := Name, <<"enable">> := true, <<"status">> := <<"connected">>, <<"node_status">> := [_ | _], @@ -413,7 +429,7 @@ t_reset_bridges(_) -> <<"node_metrics">> := [_ | _], <<"url">> := URL1 } = jsx:decode(Bridge), - BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME), + BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name), {ok, 200, <<"Reset success">>} = request(put, uri(["bridges", BridgeID, "reset_metrics"]), []), %% delete the bridge diff --git a/apps/emqx_conf/src/emqx_conf.app.src b/apps/emqx_conf/src/emqx_conf.app.src index 1441a4180..854fdac07 100644 --- a/apps/emqx_conf/src/emqx_conf.app.src +++ b/apps/emqx_conf/src/emqx_conf.app.src @@ -1,6 +1,6 @@ {application, emqx_conf, [ {description, "EMQX configuration management"}, - {vsn, "0.1.2"}, + {vsn, "0.1.3"}, {registered, []}, {mod, {emqx_conf_app, []}}, {applications, [kernel, stdlib]}, diff --git a/apps/emqx_conf/src/emqx_conf.erl b/apps/emqx_conf/src/emqx_conf.erl index bc8b52702..ce54695a5 100644 --- a/apps/emqx_conf/src/emqx_conf.erl +++ b/apps/emqx_conf/src/emqx_conf.erl @@ -399,6 +399,10 @@ typename_to_spec("failure_strategy()", _Mod) -> #{type => enum, symbols => [force, drop, throw]}; typename_to_spec("initial()", _Mod) -> #{type => string}; +typename_to_spec("map()", _Mod) -> + #{type => object}; +typename_to_spec("#{" ++ _, Mod) -> + typename_to_spec("map()", Mod); typename_to_spec(Name, Mod) -> Spec = range(Name), Spec1 = remote_module_type(Spec, Name, Mod), diff --git a/apps/emqx_connector/src/emqx_connector_api.erl b/apps/emqx_connector/src/emqx_connector_api.erl index 18409fafd..8dcd3a4aa 100644 --- a/apps/emqx_connector/src/emqx_connector_api.erl +++ b/apps/emqx_connector/src/emqx_connector_api.erl @@ -80,7 +80,7 @@ connector_info_array_example(Method) -> [Config || #{value := Config} <- maps:values(connector_info_examples(Method))]. connector_info_examples(Method) -> - lists:foldl( + Fun = fun(Type, Acc) -> SType = atom_to_list(Type), maps:merge(Acc, #{ @@ -90,9 +90,17 @@ connector_info_examples(Method) -> } }) end, - #{}, - ?CONN_TYPES - ). + Broker = lists:foldl(Fun, #{}, ?CONN_TYPES), + EE = ee_example(Method), + maps:merge(Broker, EE). + +-if(?EMQX_RELEASE_EDITION == ee). +ee_example(Method) -> + emqx_ee_connector:connector_examples(Method). +-else. +ee_example(_Method) -> + #{}. +-endif. info_example(Type, Method) -> maps:merge( diff --git a/apps/emqx_connector/src/emqx_connector_http.erl b/apps/emqx_connector/src/emqx_connector_http.erl index b12b38838..5658b385d 100644 --- a/apps/emqx_connector/src/emqx_connector_http.erl +++ b/apps/emqx_connector/src/emqx_connector_http.erl @@ -26,9 +26,11 @@ %% callbacks of behaviour emqx_resource -export([ + callback_mode/0, on_start/2, on_stop/2, - on_query/4, + on_query/3, + on_query_async/4, on_get_status/2 ]). @@ -174,6 +176,8 @@ ref(Field) -> hoconsc:ref(?MODULE, Field). %% =================================================================== +callback_mode() -> async_if_possible. + on_start( InstId, #{ @@ -235,10 +239,11 @@ on_stop(InstId, #{pool_name := PoolName}) -> }), ehttpc_sup:stop_pool(PoolName). -on_query(InstId, {send_message, Msg}, AfterQuery, State) -> +on_query(InstId, {send_message, Msg}, State) -> case maps:get(request, State, undefined) of undefined -> - ?SLOG(error, #{msg => "request_not_found", connector => InstId}); + ?SLOG(error, #{msg => "arg_request_not_found", connector => InstId}), + {error, arg_request_not_found}; Request -> #{ method := Method, @@ -251,18 +256,16 @@ on_query(InstId, {send_message, Msg}, AfterQuery, State) -> on_query( InstId, {undefined, Method, {Path, Headers, Body}, Timeout, Retry}, - AfterQuery, State ) end; -on_query(InstId, {Method, Request}, AfterQuery, State) -> - on_query(InstId, {undefined, Method, Request, 5000, 2}, AfterQuery, State); -on_query(InstId, {Method, Request, Timeout}, AfterQuery, State) -> - on_query(InstId, {undefined, Method, Request, Timeout, 2}, AfterQuery, State); +on_query(InstId, {Method, Request}, State) -> + on_query(InstId, {undefined, Method, Request, 5000, 2}, State); +on_query(InstId, {Method, Request, Timeout}, State) -> + on_query(InstId, {undefined, Method, Request, Timeout, 2}, State); on_query( InstId, {KeyOrNum, Method, Request, Timeout, Retry}, - AfterQuery, #{pool_name := PoolName, base_path := BasePath} = State ) -> ?TRACE( @@ -285,35 +288,77 @@ on_query( of {error, Reason} -> ?SLOG(error, #{ - msg => "http_connector_do_reqeust_failed", + msg => "http_connector_do_request_failed", request => NRequest, reason => Reason, connector => InstId - }), - emqx_resource:query_failed(AfterQuery); + }); {ok, StatusCode, _} when StatusCode >= 200 andalso StatusCode < 300 -> - emqx_resource:query_success(AfterQuery); + ok; {ok, StatusCode, _, _} when StatusCode >= 200 andalso StatusCode < 300 -> - emqx_resource:query_success(AfterQuery); + ok; {ok, StatusCode, _} -> ?SLOG(error, #{ msg => "http connector do request, received error response", request => NRequest, connector => InstId, status_code => StatusCode - }), - emqx_resource:query_failed(AfterQuery); + }); {ok, StatusCode, _, _} -> ?SLOG(error, #{ msg => "http connector do request, received error response", request => NRequest, connector => InstId, status_code => StatusCode - }), - emqx_resource:query_failed(AfterQuery) + }) end, Result. +on_query_async(InstId, {send_message, Msg}, ReplyFunAndArgs, State) -> + case maps:get(request, State, undefined) of + undefined -> + ?SLOG(error, #{msg => "arg_request_not_found", connector => InstId}), + {error, arg_request_not_found}; + Request -> + #{ + method := Method, + path := Path, + body := Body, + headers := Headers, + request_timeout := Timeout + } = process_request(Request, Msg), + on_query_async( + InstId, + {undefined, Method, {Path, Headers, Body}, Timeout}, + ReplyFunAndArgs, + State + ) + end; +on_query_async( + InstId, + {KeyOrNum, Method, Request, Timeout}, + ReplyFunAndArgs, + #{pool_name := PoolName, base_path := BasePath} = State +) -> + ?TRACE( + "QUERY_ASYNC", + "http_connector_received", + #{request => Request, connector => InstId, state => State} + ), + NRequest = formalize_request(Method, BasePath, Request), + Worker = + case KeyOrNum of + undefined -> ehttpc_pool:pick_worker(PoolName); + _ -> ehttpc_pool:pick_worker(PoolName, KeyOrNum) + end, + ok = ehttpc:request_async( + Worker, + Method, + NRequest, + Timeout, + ReplyFunAndArgs + ). + on_get_status(_InstId, #{pool_name := PoolName, connect_timeout := Timeout} = State) -> case do_get_status(PoolName, Timeout) of true -> @@ -355,7 +400,6 @@ do_get_status(PoolName, Timeout) -> %%-------------------------------------------------------------------- %% Internal functions %%-------------------------------------------------------------------- - preprocess_request(undefined) -> undefined; preprocess_request(Req) when map_size(Req) == 0 -> diff --git a/apps/emqx_connector/src/emqx_connector_ldap.erl b/apps/emqx_connector/src/emqx_connector_ldap.erl index 195aa89a9..d53c0e41b 100644 --- a/apps/emqx_connector/src/emqx_connector_ldap.erl +++ b/apps/emqx_connector/src/emqx_connector_ldap.erl @@ -25,9 +25,10 @@ %% callbacks of behaviour emqx_resource -export([ + callback_mode/0, on_start/2, on_stop/2, - on_query/4, + on_query/3, on_get_status/2 ]). @@ -42,6 +43,8 @@ roots() -> fields(_) -> []. %% =================================================================== +callback_mode() -> always_sync. + on_start( InstId, #{ @@ -99,7 +102,7 @@ on_stop(InstId, #{poolname := PoolName}) -> }), emqx_plugin_libs_pool:stop_pool(PoolName). -on_query(InstId, {search, Base, Filter, Attributes}, AfterQuery, #{poolname := PoolName} = State) -> +on_query(InstId, {search, Base, Filter, Attributes}, #{poolname := PoolName} = State) -> Request = {Base, Filter, Attributes}, ?TRACE( "QUERY", @@ -119,10 +122,9 @@ on_query(InstId, {search, Base, Filter, Attributes}, AfterQuery, #{poolname := P request => Request, connector => InstId, reason => Reason - }), - emqx_resource:query_failed(AfterQuery); + }); _ -> - emqx_resource:query_success(AfterQuery) + ok end, Result. diff --git a/apps/emqx_connector/src/emqx_connector_mongo.erl b/apps/emqx_connector/src/emqx_connector_mongo.erl index 5b07c5003..07208545f 100644 --- a/apps/emqx_connector/src/emqx_connector_mongo.erl +++ b/apps/emqx_connector/src/emqx_connector_mongo.erl @@ -25,9 +25,10 @@ %% callbacks of behaviour emqx_resource -export([ + callback_mode/0, on_start/2, on_stop/2, - on_query/4, + on_query/3, on_get_status/2 ]). @@ -139,6 +140,8 @@ mongo_fields() -> %% =================================================================== +callback_mode() -> always_sync. + on_start( InstId, Config = #{ @@ -189,7 +192,6 @@ on_stop(InstId, #{poolname := PoolName}) -> on_query( InstId, {Action, Collection, Filter, Projector}, - AfterQuery, #{poolname := PoolName} = State ) -> Request = {Action, Collection, Filter, Projector}, @@ -212,14 +214,11 @@ on_query( reason => Reason, connector => InstId }), - emqx_resource:query_failed(AfterQuery), {error, Reason}; {ok, Cursor} when is_pid(Cursor) -> - emqx_resource:query_success(AfterQuery), - mc_cursor:foldl(fun(O, Acc2) -> [O | Acc2] end, [], Cursor, 1000); + {ok, mc_cursor:foldl(fun(O, Acc2) -> [O | Acc2] end, [], Cursor, 1000)}; Result -> - emqx_resource:query_success(AfterQuery), - Result + {ok, Result} end. -dialyzer({nowarn_function, [on_get_status/2]}). diff --git a/apps/emqx_connector/src/emqx_connector_mqtt.erl b/apps/emqx_connector/src/emqx_connector_mqtt.erl index 21e201504..e37f6a9a2 100644 --- a/apps/emqx_connector/src/emqx_connector_mqtt.erl +++ b/apps/emqx_connector/src/emqx_connector_mqtt.erl @@ -24,6 +24,7 @@ %% API and callbacks for supervisor -export([ + callback_mode/0, start_link/0, init/1, create_bridge/1, @@ -37,7 +38,7 @@ -export([ on_start/2, on_stop/2, - on_query/4, + on_query/3, on_get_status/2 ]). @@ -133,11 +134,14 @@ drop_bridge(Name) -> %% =================================================================== %% When use this bridge as a data source, ?MODULE:on_message_received will be called %% if the bridge received msgs from the remote broker. -on_message_received(Msg, HookPoint, InstId) -> - _ = emqx_resource:query(InstId, {message_received, Msg}), +on_message_received(Msg, HookPoint, ResId) -> + emqx_resource:inc_matched(ResId), + emqx_resource:inc_success(ResId), emqx:run_hook(HookPoint, [Msg]). %% =================================================================== +callback_mode() -> always_sync. + on_start(InstId, Conf) -> InstanceId = binary_to_atom(InstId, utf8), ?SLOG(info, #{ @@ -181,12 +185,10 @@ on_stop(_InstId, #{name := InstanceId}) -> }) end. -on_query(_InstId, {message_received, _Msg}, AfterQuery, _State) -> - emqx_resource:query_success(AfterQuery); -on_query(_InstId, {send_message, Msg}, AfterQuery, #{name := InstanceId}) -> +on_query(_InstId, {send_message, Msg}, #{name := InstanceId}) -> ?TRACE("QUERY", "send_msg_to_remote_node", #{message => Msg, connector => InstanceId}), emqx_connector_mqtt_worker:send_to_remote(InstanceId, Msg), - emqx_resource:query_success(AfterQuery). + ok. on_get_status(_InstId, #{name := InstanceId, bridge_conf := Conf}) -> AutoReconn = maps:get(auto_reconnect, Conf, true), @@ -207,11 +209,12 @@ make_sub_confs(EmptyMap, _) when map_size(EmptyMap) == 0 -> make_sub_confs(undefined, _) -> undefined; make_sub_confs(SubRemoteConf, InstId) -> + ResId = emqx_resource_manager:manager_id_to_resource_id(InstId), case maps:take(hookpoint, SubRemoteConf) of error -> SubRemoteConf; {HookPoint, SubConf} -> - MFA = {?MODULE, on_message_received, [HookPoint, InstId]}, + MFA = {?MODULE, on_message_received, [HookPoint, ResId]}, SubConf#{on_message_received => MFA} end. diff --git a/apps/emqx_connector/src/emqx_connector_mysql.erl b/apps/emqx_connector/src/emqx_connector_mysql.erl index d6963d04e..cae07433a 100644 --- a/apps/emqx_connector/src/emqx_connector_mysql.erl +++ b/apps/emqx_connector/src/emqx_connector_mysql.erl @@ -24,9 +24,10 @@ %% callbacks of behaviour emqx_resource -export([ + callback_mode/0, on_start/2, on_stop/2, - on_query/4, + on_query/3, on_get_status/2 ]). @@ -44,6 +45,16 @@ default_port => ?MYSQL_DEFAULT_PORT }). +-type prepares() :: #{atom() => binary()}. +-type params_tokens() :: #{atom() => list()}. +-type state() :: + #{ + poolname := atom(), + prepare_statement := prepares(), + auto_reconnect := boolean(), + params_tokens := params_tokens() + }. + %%===================================================================== %% Hocon schema roots() -> @@ -63,6 +74,9 @@ server(desc) -> ?DESC("server"); server(_) -> undefined. %% =================================================================== +callback_mode() -> always_sync. + +-spec on_start(binary(), hoconsc:config()) -> {ok, state()} | {error, _}. on_start( InstId, #{ @@ -97,8 +111,8 @@ on_start( {pool_size, PoolSize} ], PoolName = emqx_plugin_libs_pool:pool_name(InstId), - Prepares = maps:get(prepare_statement, Config, #{}), - State = #{poolname => PoolName, prepare_statement => Prepares, auto_reconnect => AutoReconn}, + Prepares = parse_prepare_sql(Config), + State = maps:merge(#{poolname => PoolName, auto_reconnect => AutoReconn}, Prepares), case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options ++ SslOpts) of ok -> {ok, init_prepare(State)}; {error, Reason} -> {error, Reason} @@ -111,22 +125,22 @@ on_stop(InstId, #{poolname := PoolName}) -> }), emqx_plugin_libs_pool:stop_pool(PoolName). -on_query(InstId, {Type, SQLOrKey}, AfterQuery, State) -> - on_query(InstId, {Type, SQLOrKey, [], default_timeout}, AfterQuery, State); -on_query(InstId, {Type, SQLOrKey, Params}, AfterQuery, State) -> - on_query(InstId, {Type, SQLOrKey, Params, default_timeout}, AfterQuery, State); +on_query(InstId, {TypeOrKey, SQLOrKey}, State) -> + on_query(InstId, {TypeOrKey, SQLOrKey, [], default_timeout}, State); +on_query(InstId, {TypeOrKey, SQLOrKey, Params}, State) -> + on_query(InstId, {TypeOrKey, SQLOrKey, Params, default_timeout}, State); on_query( InstId, - {Type, SQLOrKey, Params, Timeout}, - AfterQuery, + {TypeOrKey, SQLOrKey, Params, Timeout}, #{poolname := PoolName, prepare_statement := Prepares} = State ) -> LogMeta = #{connector => InstId, sql => SQLOrKey, state => State}, ?TRACE("QUERY", "mysql_connector_received", LogMeta), Worker = ecpool:get_client(PoolName), {ok, Conn} = ecpool_worker:client(Worker), - MySqlFunction = mysql_function(Type), - Result = erlang:apply(mysql, MySqlFunction, [Conn, SQLOrKey, Params, Timeout]), + MySqlFunction = mysql_function(TypeOrKey), + {SQLOrKey2, Data} = proc_sql_params(TypeOrKey, SQLOrKey, Params, State), + Result = erlang:apply(mysql, MySqlFunction, [Conn, SQLOrKey2, Data, Timeout]), case Result of {error, disconnected} -> ?SLOG( @@ -135,7 +149,6 @@ on_query( ), %% kill the poll worker to trigger reconnection _ = exit(Conn, restart), - emqx_resource:query_failed(AfterQuery), Result; {error, not_prepared} -> ?SLOG( @@ -145,13 +158,12 @@ on_query( case prepare_sql(Prepares, PoolName) of ok -> %% not return result, next loop will try again - on_query(InstId, {Type, SQLOrKey, Params, Timeout}, AfterQuery, State); + on_query(InstId, {TypeOrKey, SQLOrKey, Params, Timeout}, State); {error, Reason} -> ?SLOG( error, LogMeta#{msg => "mysql_connector_do_prepare_failed", reason => Reason} ), - emqx_resource:query_failed(AfterQuery), {error, Reason} end; {error, Reason} -> @@ -159,15 +171,18 @@ on_query( error, LogMeta#{msg => "mysql_connector_do_sql_query_failed", reason => Reason} ), - emqx_resource:query_failed(AfterQuery), Result; _ -> - emqx_resource:query_success(AfterQuery), Result end. -mysql_function(sql) -> query; -mysql_function(prepared_query) -> execute. +mysql_function(sql) -> + query; +mysql_function(prepared_query) -> + execute; +%% for bridge +mysql_function(_) -> + mysql_function(prepared_query). on_get_status(_InstId, #{poolname := Pool, auto_reconnect := AutoReconn} = State) -> case emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1) of @@ -287,3 +302,36 @@ prepare_sql_to_conn(Conn, [{Key, SQL} | PrepareList]) when is_pid(Conn) -> unprepare_sql_to_conn(Conn, PrepareSqlKey) -> mysql:unprepare(Conn, PrepareSqlKey). + +parse_prepare_sql(Config) -> + SQL = + case maps:get(prepare_statement, Config, undefined) of + undefined -> + case emqx_map_lib:deep_get([egress, sql_template], Config, undefined) of + undefined -> + #{}; + Template -> + #{send_message => Template} + end; + Any -> + Any + end, + parse_prepare_sql(maps:to_list(SQL), #{}, #{}). + +parse_prepare_sql([{Key, H} | T], SQL, Tokens) -> + {PrepareSQL, ParamsTokens} = emqx_plugin_libs_rule:preproc_sql(H), + parse_prepare_sql(T, SQL#{Key => PrepareSQL}, Tokens#{Key => ParamsTokens}); +parse_prepare_sql([], SQL, Tokens) -> + #{prepare_statement => SQL, params_tokens => Tokens}. + +proc_sql_params(query, SQLOrKey, Params, _State) -> + {SQLOrKey, Params}; +proc_sql_params(prepared_query, SQLOrKey, Params, _State) -> + {SQLOrKey, Params}; +proc_sql_params(TypeOrKey, SQLOrData, Params, #{params_tokens := ParamsTokens}) -> + case maps:get(TypeOrKey, ParamsTokens, undefined) of + undefined -> + {SQLOrData, Params}; + Tokens -> + {TypeOrKey, emqx_plugin_libs_rule:proc_sql(Tokens, SQLOrData)} + end. diff --git a/apps/emqx_connector/src/emqx_connector_pgsql.erl b/apps/emqx_connector/src/emqx_connector_pgsql.erl index 6f89e7ff1..4b188e5a5 100644 --- a/apps/emqx_connector/src/emqx_connector_pgsql.erl +++ b/apps/emqx_connector/src/emqx_connector_pgsql.erl @@ -27,9 +27,10 @@ %% callbacks of behaviour emqx_resource -export([ + callback_mode/0, on_start/2, on_stop/2, - on_query/4, + on_query/3, on_get_status/2 ]). @@ -66,6 +67,8 @@ server(desc) -> ?DESC("server"); server(_) -> undefined. %% =================================================================== +callback_mode() -> always_sync. + on_start( InstId, #{ @@ -116,9 +119,9 @@ on_stop(InstId, #{poolname := PoolName}) -> }), emqx_plugin_libs_pool:stop_pool(PoolName). -on_query(InstId, {Type, NameOrSQL}, AfterQuery, #{poolname := _PoolName} = State) -> - on_query(InstId, {Type, NameOrSQL, []}, AfterQuery, State); -on_query(InstId, {Type, NameOrSQL, Params}, AfterQuery, #{poolname := PoolName} = State) -> +on_query(InstId, {Type, NameOrSQL}, #{poolname := _PoolName} = State) -> + on_query(InstId, {Type, NameOrSQL, []}, State); +on_query(InstId, {Type, NameOrSQL, Params}, #{poolname := PoolName} = State) -> ?SLOG(debug, #{ msg => "postgresql connector received sql query", connector => InstId, @@ -132,10 +135,9 @@ on_query(InstId, {Type, NameOrSQL, Params}, AfterQuery, #{poolname := PoolName} connector => InstId, sql => NameOrSQL, reason => Reason - }), - emqx_resource:query_failed(AfterQuery); + }); _ -> - emqx_resource:query_success(AfterQuery) + ok end, Result. diff --git a/apps/emqx_connector/src/emqx_connector_redis.erl b/apps/emqx_connector/src/emqx_connector_redis.erl index 67310dbac..fae628d9e 100644 --- a/apps/emqx_connector/src/emqx_connector_redis.erl +++ b/apps/emqx_connector/src/emqx_connector_redis.erl @@ -26,9 +26,10 @@ %% callbacks of behaviour emqx_resource -export([ + callback_mode/0, on_start/2, on_stop/2, - on_query/4, + on_query/3, on_get_status/2 ]). @@ -112,6 +113,8 @@ servers(desc) -> ?DESC("servers"); servers(_) -> undefined. %% =================================================================== +callback_mode() -> always_sync. + on_start( InstId, #{ @@ -177,7 +180,7 @@ on_stop(InstId, #{poolname := PoolName, type := Type}) -> _ -> emqx_plugin_libs_pool:stop_pool(PoolName) end. -on_query(InstId, {cmd, Command}, AfterCommand, #{poolname := PoolName, type := Type} = State) -> +on_query(InstId, {cmd, Command}, #{poolname := PoolName, type := Type} = State) -> ?TRACE( "QUERY", "redis_connector_received", @@ -195,10 +198,9 @@ on_query(InstId, {cmd, Command}, AfterCommand, #{poolname := PoolName, type := T connector => InstId, sql => Command, reason => Reason - }), - emqx_resource:query_failed(AfterCommand); + }); _ -> - emqx_resource:query_success(AfterCommand) + ok end, Result. diff --git a/apps/emqx_connector/src/emqx_connector_schema.erl b/apps/emqx_connector/src/emqx_connector_schema.erl index b0f20924f..f0c9479de 100644 --- a/apps/emqx_connector/src/emqx_connector_schema.erl +++ b/apps/emqx_connector/src/emqx_connector_schema.erl @@ -44,7 +44,9 @@ post_request() -> http_schema("post"). http_schema(Method) -> - Schemas = [?R_REF(schema_mod(Type), Method) || Type <- ?CONN_TYPES], + Broker = [?R_REF(schema_mod(Type), Method) || Type <- ?CONN_TYPES], + EE = ee_schemas(Method), + Schemas = Broker ++ EE, ?UNION(Schemas). %%====================================================================================== @@ -57,13 +59,29 @@ roots() -> ["connectors"]. fields(connectors) -> fields("connectors"); fields("connectors") -> - [ + Broker = [ {mqtt, ?HOCON( ?MAP(name, ?R_REF(emqx_connector_mqtt_schema, "connector")), #{desc => ?DESC("mqtt")} )} - ]. + ], + EE = ee_fields_connectors(), + Broker ++ EE. + +-if(?EMQX_RELEASE_EDITION == ee). +ee_schemas(Method) -> + emqx_ee_connector:api_schemas(Method). + +ee_fields_connectors() -> + emqx_ee_connector:fields(connectors). +-else. +ee_fields_connectors() -> + []. + +ee_schemas(_) -> + []. +-endif. desc(Record) when Record =:= connectors; diff --git a/apps/emqx_connector/test/emqx_connector_mongo_SUITE.erl b/apps/emqx_connector/test/emqx_connector_mongo_SUITE.erl index d76b8420a..e918be84a 100644 --- a/apps/emqx_connector/test/emqx_connector_mongo_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_mongo_SUITE.erl @@ -85,8 +85,8 @@ perform_lifecycle_check(PoolName, InitialConfig) -> emqx_resource:get_instance(PoolName), ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), % % Perform query as further check that the resource is working as expected - ?assertMatch([], emqx_resource:query(PoolName, test_query_find())), - ?assertMatch(undefined, emqx_resource:query(PoolName, test_query_find_one())), + ?assertMatch({ok, []}, emqx_resource:query(PoolName, test_query_find())), + ?assertMatch({ok, undefined}, emqx_resource:query(PoolName, test_query_find_one())), ?assertEqual(ok, emqx_resource:stop(PoolName)), % Resource will be listed still, but state will be changed and healthcheck will fail % as the worker no longer exists. @@ -95,7 +95,7 @@ perform_lifecycle_check(PoolName, InitialConfig) -> status := StoppedStatus }} = emqx_resource:get_instance(PoolName), - ?assertEqual(StoppedStatus, disconnected), + ?assertEqual(stopped, StoppedStatus), ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), @@ -108,8 +108,8 @@ perform_lifecycle_check(PoolName, InitialConfig) -> {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} = emqx_resource:get_instance(PoolName), ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), - ?assertMatch([], emqx_resource:query(PoolName, test_query_find())), - ?assertMatch(undefined, emqx_resource:query(PoolName, test_query_find_one())), + ?assertMatch({ok, []}, emqx_resource:query(PoolName, test_query_find())), + ?assertMatch({ok, undefined}, emqx_resource:query(PoolName, test_query_find_one())), % Stop and remove the resource in one go. ?assertEqual(ok, emqx_resource:remove_local(PoolName)), ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), diff --git a/apps/emqx_connector/test/emqx_connector_mysql_SUITE.erl b/apps/emqx_connector/test/emqx_connector_mysql_SUITE.erl index d7f5cec63..3fd7191b9 100644 --- a/apps/emqx_connector/test/emqx_connector_mysql_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_mysql_SUITE.erl @@ -101,7 +101,7 @@ perform_lifecycle_check(PoolName, InitialConfig) -> status := StoppedStatus }} = emqx_resource:get_instance(PoolName), - ?assertEqual(StoppedStatus, disconnected), + ?assertEqual(stopped, StoppedStatus), ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), diff --git a/apps/emqx_connector/test/emqx_connector_pgsql_SUITE.erl b/apps/emqx_connector/test/emqx_connector_pgsql_SUITE.erl index d99d8ab6c..9442a1810 100644 --- a/apps/emqx_connector/test/emqx_connector_pgsql_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_pgsql_SUITE.erl @@ -95,7 +95,7 @@ perform_lifecycle_check(PoolName, InitialConfig) -> status := StoppedStatus }} = emqx_resource:get_instance(PoolName), - ?assertEqual(StoppedStatus, disconnected), + ?assertEqual(stopped, StoppedStatus), ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), diff --git a/apps/emqx_connector/test/emqx_connector_redis_SUITE.erl b/apps/emqx_connector/test/emqx_connector_redis_SUITE.erl index 4770bbeee..a60702036 100644 --- a/apps/emqx_connector/test/emqx_connector_redis_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_redis_SUITE.erl @@ -117,7 +117,7 @@ perform_lifecycle_check(PoolName, InitialConfig, RedisCommand) -> status := StoppedStatus }} = emqx_resource:get_instance(PoolName), - ?assertEqual(StoppedStatus, disconnected), + ?assertEqual(stopped, StoppedStatus), ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), diff --git a/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl b/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl index 34f32d8be..08429eb22 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl @@ -656,6 +656,13 @@ typename_to_spec("file()", _Mod) -> #{type => string, example => <<"/path/to/file">>}; typename_to_spec("ip_port()", _Mod) -> #{type => string, example => <<"127.0.0.1:80">>}; +typename_to_spec("write_syntax()", _Mod) -> + #{ + type => string, + example => + <<"${topic},clientid=${clientid}", " ", "payload=${payload},", + "${clientid}_int_value=${payload.int_key}i,", "bool=${payload.bool}">> + }; typename_to_spec("ip_ports()", _Mod) -> #{type => string, example => <<"127.0.0.1:80, 127.0.0.2:80">>}; typename_to_spec("url()", _Mod) -> diff --git a/apps/emqx_gateway/test/emqx_gateway_authz_SUITE.erl b/apps/emqx_gateway/test/emqx_gateway_authz_SUITE.erl index 6bbd2135b..4dd5819b3 100644 --- a/apps/emqx_gateway/test/emqx_gateway_authz_SUITE.erl +++ b/apps/emqx_gateway/test/emqx_gateway_authz_SUITE.erl @@ -79,7 +79,7 @@ end_per_suite(Config) -> emqx_gateway_auth_ct:stop(), ok = emqx_authz_test_lib:restore_authorizers(), emqx_config:erase(gateway), - emqx_mgmt_api_test_util:end_suite([cowboy, emqx_authz, emqx_authn, emqx_gateway]), + emqx_mgmt_api_test_util:end_suite([emqx_gateway, emqx_authn, emqx_authz, emqx_conf]), Config. init_per_testcase(_Case, Config) -> diff --git a/apps/emqx_resource/README.md b/apps/emqx_resource/README.md index 04f3c2205..0f61df7ff 100644 --- a/apps/emqx_resource/README.md +++ b/apps/emqx_resource/README.md @@ -14,5 +14,5 @@ the config operations (like config validation, config dump back to files), and t And we put all the `specific` codes to the callback modules. See -* `test/emqx_test_resource.erl` for a minimal `emqx_resource` implementation; +* `test/emqx_connector_demo.erl` for a minimal `emqx_resource` implementation; * `test/emqx_resource_SUITE.erl` for examples of `emqx_resource` usage. diff --git a/apps/emqx_resource/i18n/emqx_resource_schema_i18n.conf b/apps/emqx_resource/i18n/emqx_resource_schema_i18n.conf new file mode 100644 index 000000000..c07573b1a --- /dev/null +++ b/apps/emqx_resource/i18n/emqx_resource_schema_i18n.conf @@ -0,0 +1,136 @@ +emqx_resource_schema { + + health_check_interval { + desc { + en: """Health check interval, in milliseconds.""" + zh: """健康检查间隔,单位毫秒。""" + } + label { + en: """Health Check Interval""" + zh: """健康检查间隔""" + } + } + + start_after_created { + desc { + en: """Whether start the resource right after created.""" + zh: """是否在创建资源后立即启动资源。""" + } + label { + en: """Start After Created""" + zh: """创建后立即启动""" + } + } + + start_timeout { + desc { + en: """If 'start_after_created' enabled, how long time do we wait for the resource get started, in milliseconds.""" + zh: """如果选择了创建后立即启动资源,此选项用来设置等待资源启动的超时时间,单位毫秒。""" + } + label { + en: """Start Timeout""" + zh: """启动超时时间""" + } + } + + auto_restart_interval { + desc { + en: """The auto restart interval after the resource is disconnected, in milliseconds.""" + zh: """资源断开以后,自动重连的时间间隔,单位毫秒。""" + } + label { + en: """Auto Restart Interval""" + zh: """自动重连间隔""" + } + } + + query_mode { + desc { + en: """Query mode. Optional 'sync/async', default 'sync'.""" + zh: """请求模式。可选 '同步/异步',默认为'同步'模式。""" + } + label { + en: """Query mode""" + zh: """请求模式""" + } + } + + enable_batch { + desc { + en: """Batch mode enabled.""" + zh: """启用批量模式。""" + } + label { + en: """Enable batch""" + zh: """启用批量模式""" + } + } + + enable_queue { + desc { + en: """Queue mode enabled.""" + zh: """启用队列模式。""" + } + label { + en: """Enable queue""" + zh: """启用队列模式""" + } + } + + resume_interval { + desc { + en: """Resume time interval when resource down.""" + zh: """资源不可用时的重试时间。""" + } + label { + en: """Resume interval""" + zh: """恢复时间""" + } + } + + async_inflight_window { + desc { + en: """Async query inflight window.""" + zh: """异步请求飞行队列窗口大小。""" + } + label { + en: """Async inflight window""" + zh: """异步请求飞行队列窗口""" + } + } + + batch_size { + desc { + en: """Maximum batch count.""" + zh: """批量请求大小。""" + } + label { + en: """Batch size""" + zh: """批量请求大小""" + } + } + + batch_time { + desc { + en: """Maximum batch waiting interval.""" + zh: """最大批量请求等待时间。""" + } + label { + en: """Batch time""" + zh: """批量等待间隔""" + } + } + + queue_max_bytes { + desc { + en: """Maximum queue storage size in bytes.""" + zh: """消息队列的最大长度,以字节计。""" + } + label { + en: """Queue max bytes""" + zh: """队列最大长度""" + } + } + + +} diff --git a/apps/emqx_resource/include/emqx_resource.hrl b/apps/emqx_resource/include/emqx_resource.hrl index dd384af7c..8ec57a00e 100644 --- a/apps/emqx_resource/include/emqx_resource.hrl +++ b/apps/emqx_resource/include/emqx_resource.hrl @@ -21,36 +21,90 @@ -type resource_config() :: term(). -type resource_spec() :: map(). -type resource_state() :: term(). --type resource_status() :: connected | disconnected | connecting. +-type resource_status() :: connected | disconnected | connecting | stopped. +-type callback_mode() :: always_sync | async_if_possible. +-type result() :: term(). +-type reply_fun() :: {fun((result(), Args :: term()) -> any()), Args :: term()} | undefined. +-type query_opts() :: #{ + %% The key used for picking a resource worker + pick_key => term(), + async_reply_fun => reply_fun() +}. -type resource_data() :: #{ id := resource_id(), mod := module(), + callback_mode := callback_mode(), config := resource_config(), state := resource_state(), status := resource_status(), metrics := emqx_metrics_worker:metrics() }. -type resource_group() :: binary(). --type create_opts() :: #{ - health_check_interval => integer(), +-type creation_opts() :: #{ + %%======================================= Deprecated Opts: + %% use health_check_interval instead health_check_timeout => integer(), - %% We can choose to block the return of emqx_resource:start until - %% the resource connected, wait max to `wait_for_resource_ready` ms. + %% use start_timeout instead wait_for_resource_ready => integer(), + %% use auto_restart_interval instead + auto_retry_interval => integer(), + %%======================================= Deprecated Opts End + health_check_interval => integer(), + %% We can choose to block the return of emqx_resource:start until + %% the resource connected, wait max to `start_timeout` ms. + start_timeout => integer(), %% If `start_after_created` is set to true, the resource is started right %% after it is created. But note that a `started` resource is not guaranteed %% to be `connected`. start_after_created => boolean(), %% If the resource disconnected, we can set to retry starting the resource %% periodically. - auto_retry_interval => integer() + auto_restart_interval => integer(), + enable_batch => boolean(), + batch_size => integer(), + batch_time => integer(), + enable_queue => boolean(), + queue_max_bytes => integer(), + query_mode => async | sync | dynamic, + resume_interval => integer(), + async_inflight_window => integer() }. --type after_query() :: - {[OnSuccess :: after_query_fun()], [OnFailed :: after_query_fun()]} - | undefined. +-type query_result() :: + ok + | {ok, term()} + | {error, term()} + | {resource_down, term()}. -%% the `after_query_fun()` is mainly for callbacks that increment counters or do some fallback -%% actions upon query failure --type after_query_fun() :: {fun((...) -> ok), Args :: [term()]}. +-define(DEFAULT_QUEUE_SIZE, 1024 * 1024 * 1024). +-define(DEFAULT_QUEUE_SIZE_RAW, <<"1GB">>). + +%% count +-define(DEFAULT_BATCH_SIZE, 100). + +%% milliseconds +-define(DEFAULT_BATCH_TIME, 10). +-define(DEFAULT_BATCH_TIME_RAW, <<"10ms">>). + +%% count +-define(DEFAULT_INFLIGHT, 100). + +%% milliseconds +-define(HEALTHCHECK_INTERVAL, 15000). +-define(HEALTHCHECK_INTERVAL_RAW, <<"15s">>). + +%% milliseconds +-define(RESUME_INTERVAL, 15000). +-define(RESUME_INTERVAL_RAW, <<"15s">>). + +-define(START_AFTER_CREATED, true). + +%% milliseconds +-define(START_TIMEOUT, 5000). +-define(START_TIMEOUT_RAW, <<"5s">>). + +%% milliseconds +-define(AUTO_RESTART_INTERVAL, 60000). +-define(AUTO_RESTART_INTERVAL_RAW, <<"60s">>). -define(TEST_ID_PREFIX, "_test_:"). +-define(RES_METRICS, resource_metrics). diff --git a/apps/emqx_resource/include/emqx_resource_utils.hrl b/apps/emqx_resource/include/emqx_resource_utils.hrl index 8d94746eb..3df64b1e5 100644 --- a/apps/emqx_resource/include/emqx_resource_utils.hrl +++ b/apps/emqx_resource/include/emqx_resource_utils.hrl @@ -15,7 +15,7 @@ %%-------------------------------------------------------------------- -define(SAFE_CALL(_EXP_), - ?SAFE_CALL(_EXP_, ok) + ?SAFE_CALL(_EXP_, {error, {_EXCLASS_, _EXCPTION_, _ST_}}) ). -define(SAFE_CALL(_EXP_, _EXP_ON_FAIL_), @@ -24,8 +24,7 @@ (_EXP_) catch _EXCLASS_:_EXCPTION_:_ST_ -> - _EXP_ON_FAIL_, - {error, {_EXCLASS_, _EXCPTION_, _ST_}} + _EXP_ON_FAIL_ end end() ). diff --git a/apps/emqx_resource/src/emqx_resource.app.src b/apps/emqx_resource/src/emqx_resource.app.src index 1bfd02323..b688e3c11 100644 --- a/apps/emqx_resource/src/emqx_resource.app.src +++ b/apps/emqx_resource/src/emqx_resource.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_resource, [ {description, "Manager for all external resources"}, - {vsn, "0.1.1"}, + {vsn, "0.1.2"}, {registered, []}, {mod, {emqx_resource_app, []}}, {applications, [ diff --git a/apps/emqx_resource/src/emqx_resource.erl b/apps/emqx_resource/src/emqx_resource.erl index 33f0d0a3d..99e1f6057 100644 --- a/apps/emqx_resource/src/emqx_resource.erl +++ b/apps/emqx_resource/src/emqx_resource.erl @@ -23,13 +23,6 @@ -export([list_types/0]). -%% APIs for behaviour implementations - --export([ - query_success/1, - query_failed/1 -]). - %% APIs for instances -export([ @@ -83,14 +76,18 @@ stop/1, %% query the instance query/2, - %% query the instance with after_query() - query/3 + query/3, + %% query the instance without batching and queuing messages. + simple_sync_query/2, + simple_async_query/3 ]). %% Direct calls to the callback module -%% start the instance -export([ + %% get the callback mode of a specific module + get_callback_mode/1, + %% start the instance call_start/3, %% verify if the resource is working normally call_health_check/3, @@ -105,14 +102,20 @@ list_instances_verbose/0, %% return the data of the instance get_instance/1, + fetch_creation_opts/1, %% return all the instances of the same resource type list_instances_by_type/1, generate_id/1, list_group_instances/1 ]). +-export([inc_metrics_funcs/1, inc_matched/1, inc_success/1, inc_failed/1]). + -optional_callbacks([ - on_query/4, + on_query/3, + on_batch_query/3, + on_query_async/4, + on_batch_query_async/4, on_get_status/2 ]). @@ -124,7 +127,26 @@ -callback on_stop(resource_id(), resource_state()) -> term(). %% when calling emqx_resource:query/3 --callback on_query(resource_id(), Request :: term(), after_query(), resource_state()) -> term(). +-callback on_query(resource_id(), Request :: term(), resource_state()) -> query_result(). + +%% when calling emqx_resource:on_batch_query/3 +-callback on_batch_query(resource_id(), Request :: term(), resource_state()) -> query_result(). + +%% when calling emqx_resource:on_query_async/4 +-callback on_query_async( + resource_id(), + Request :: term(), + {ReplyFun :: function(), Args :: list()}, + resource_state() +) -> query_result(). + +%% when calling emqx_resource:on_batch_query_async/4 +-callback on_batch_query_async( + resource_id(), + Request :: term(), + {ReplyFun :: function(), Args :: list()}, + resource_state() +) -> query_result(). %% when calling emqx_resource:health_check/2 -callback on_get_status(resource_id(), resource_state()) -> @@ -148,22 +170,6 @@ is_resource_mod(Module) -> proplists:get_value(behaviour, Info, []), lists:member(?MODULE, Behaviour). --spec query_success(after_query()) -> ok. -query_success(undefined) -> ok; -query_success({OnSucc, _}) -> apply_query_after_calls(OnSucc). - --spec query_failed(after_query()) -> ok. -query_failed(undefined) -> ok; -query_failed({_, OnFailed}) -> apply_query_after_calls(OnFailed). - -apply_query_after_calls(Funcs) -> - lists:foreach( - fun({Fun, Args}) -> - safe_apply(Fun, Args) - end, - Funcs - ). - %% ================================================================================= %% APIs for resource instances %% ================================================================================= @@ -172,7 +178,7 @@ apply_query_after_calls(Funcs) -> create(ResId, Group, ResourceType, Config) -> create(ResId, Group, ResourceType, Config, #{}). --spec create(resource_id(), resource_group(), resource_type(), resource_config(), create_opts()) -> +-spec create(resource_id(), resource_group(), resource_type(), resource_config(), creation_opts()) -> {ok, resource_data() | 'already_created'} | {error, Reason :: term()}. create(ResId, Group, ResourceType, Config, Opts) -> emqx_resource_proto_v1:create(ResId, Group, ResourceType, Config, Opts). @@ -188,7 +194,7 @@ create_local(ResId, Group, ResourceType, Config) -> resource_group(), resource_type(), resource_config(), - create_opts() + creation_opts() ) -> {ok, resource_data()}. create_local(ResId, Group, ResourceType, Config, Opts) -> @@ -209,7 +215,7 @@ create_dry_run_local(ResourceType, Config) -> recreate(ResId, ResourceType, Config) -> recreate(ResId, ResourceType, Config, #{}). --spec recreate(resource_id(), resource_type(), resource_config(), create_opts()) -> +-spec recreate(resource_id(), resource_type(), resource_config(), creation_opts()) -> {ok, resource_data()} | {error, Reason :: term()}. recreate(ResId, ResourceType, Config, Opts) -> emqx_resource_proto_v1:recreate(ResId, ResourceType, Config, Opts). @@ -219,7 +225,7 @@ recreate(ResId, ResourceType, Config, Opts) -> recreate_local(ResId, ResourceType, Config) -> recreate_local(ResId, ResourceType, Config, #{}). --spec recreate_local(resource_id(), resource_type(), resource_config(), create_opts()) -> +-spec recreate_local(resource_id(), resource_type(), resource_config(), creation_opts()) -> {ok, resource_data()} | {error, Reason :: term()}. recreate_local(ResId, ResourceType, Config, Opts) -> emqx_resource_manager:recreate(ResId, ResourceType, Config, Opts). @@ -243,35 +249,26 @@ reset_metrics(ResId) -> %% ================================================================================= -spec query(resource_id(), Request :: term()) -> Result :: term(). query(ResId, Request) -> - query(ResId, Request, inc_metrics_funcs(ResId)). + query(ResId, Request, #{}). -%% same to above, also defines what to do when the Module:on_query success or failed -%% it is the duty of the Module to apply the `after_query()` functions. --spec query(resource_id(), Request :: term(), after_query()) -> Result :: term(). -query(ResId, Request, AfterQuery) -> - case emqx_resource_manager:ets_lookup(ResId) of - {ok, _Group, #{mod := Mod, state := ResourceState, status := connected}} -> - %% the resource state is readonly to Module:on_query/4 - %% and the `after_query()` functions should be thread safe - ok = emqx_metrics_worker:inc(resource_metrics, ResId, matched), - try - Mod:on_query(ResId, Request, AfterQuery, ResourceState) - catch - Err:Reason:ST -> - emqx_metrics_worker:inc(resource_metrics, ResId, exception), - erlang:raise(Err, Reason, ST) - end; - {ok, _Group, _Data} -> - query_error(not_connected, <<"resource not connected">>); - {error, not_found} -> - query_error(not_found, <<"resource not found">>) - end. +-spec query(resource_id(), Request :: term(), emqx_resource_worker:query_opts()) -> + Result :: term(). +query(ResId, Request, Opts) -> + emqx_resource_worker:query(ResId, Request, Opts). + +-spec simple_sync_query(resource_id(), Request :: term()) -> Result :: term(). +simple_sync_query(ResId, Request) -> + emqx_resource_worker:simple_sync_query(ResId, Request). + +-spec simple_async_query(resource_id(), Request :: term(), reply_fun()) -> Result :: term(). +simple_async_query(ResId, Request, ReplyFun) -> + emqx_resource_worker:simple_async_query(ResId, Request, ReplyFun). -spec start(resource_id()) -> ok | {error, Reason :: term()}. start(ResId) -> start(ResId, #{}). --spec start(resource_id(), create_opts()) -> ok | {error, Reason :: term()}. +-spec start(resource_id(), creation_opts()) -> ok | {error, Reason :: term()}. start(ResId, Opts) -> emqx_resource_manager:start(ResId, Opts). @@ -279,7 +276,7 @@ start(ResId, Opts) -> restart(ResId) -> restart(ResId, #{}). --spec restart(resource_id(), create_opts()) -> ok | {error, Reason :: term()}. +-spec restart(resource_id(), creation_opts()) -> ok | {error, Reason :: term()}. restart(ResId, Opts) -> emqx_resource_manager:restart(ResId, Opts). @@ -299,6 +296,24 @@ set_resource_status_connecting(ResId) -> get_instance(ResId) -> emqx_resource_manager:lookup(ResId). +-spec fetch_creation_opts(map()) -> creation_opts(). +fetch_creation_opts(Opts) -> + SupportedOpts = [ + health_check_interval, + start_timeout, + start_after_created, + auto_restart_interval, + enable_batch, + batch_size, + batch_time, + enable_queue, + queue_max_bytes, + query_mode, + resume_interval, + async_inflight_window + ], + maps:with(SupportedOpts, Opts). + -spec list_instances() -> [resource_id()]. list_instances() -> [Id || #{id := Id} <- list_instances_verbose()]. @@ -322,6 +337,10 @@ generate_id(Name) when is_binary(Name) -> -spec list_group_instances(resource_group()) -> [resource_id()]. list_group_instances(Group) -> emqx_resource_manager:list_group(Group). +-spec get_callback_mode(module()) -> callback_mode(). +get_callback_mode(Mod) -> + Mod:callback_mode(). + -spec call_start(manager_id(), module(), resource_config()) -> {ok, resource_state()} | {error, Reason :: term()}. call_start(MgrId, Mod, Config) -> @@ -359,7 +378,7 @@ check_and_create(ResId, Group, ResourceType, RawConfig) -> resource_group(), resource_type(), raw_resource_config(), - create_opts() + creation_opts() ) -> {ok, resource_data() | 'already_created'} | {error, term()}. check_and_create(ResId, Group, ResourceType, RawConfig, Opts) -> @@ -384,7 +403,7 @@ check_and_create_local(ResId, Group, ResourceType, RawConfig) -> resource_group(), resource_type(), raw_resource_config(), - create_opts() + creation_opts() ) -> {ok, resource_data()} | {error, term()}. check_and_create_local(ResId, Group, ResourceType, RawConfig, Opts) -> check_and_do( @@ -397,7 +416,7 @@ check_and_create_local(ResId, Group, ResourceType, RawConfig, Opts) -> resource_id(), resource_type(), raw_resource_config(), - create_opts() + creation_opts() ) -> {ok, resource_data()} | {error, term()}. check_and_recreate(ResId, ResourceType, RawConfig, Opts) -> @@ -411,7 +430,7 @@ check_and_recreate(ResId, ResourceType, RawConfig, Opts) -> resource_id(), resource_type(), raw_resource_config(), - create_opts() + creation_opts() ) -> {ok, resource_data()} | {error, term()}. check_and_recreate_local(ResId, ResourceType, RawConfig, Opts) -> @@ -429,16 +448,19 @@ check_and_do(ResourceType, RawConfig, Do) when is_function(Do) -> %% ================================================================================= +inc_matched(ResId) -> + emqx_metrics_worker:inc(?RES_METRICS, ResId, matched). + +inc_success(ResId) -> + emqx_metrics_worker:inc(?RES_METRICS, ResId, success). + +inc_failed(ResId) -> + emqx_metrics_worker:inc(?RES_METRICS, ResId, failed). + filter_instances(Filter) -> [Id || #{id := Id, mod := Mod} <- list_instances_verbose(), Filter(Id, Mod)]. inc_metrics_funcs(ResId) -> - OnFailed = [{fun emqx_metrics_worker:inc/3, [resource_metrics, ResId, failed]}], - OnSucc = [{fun emqx_metrics_worker:inc/3, [resource_metrics, ResId, success]}], + OnSucc = [{fun ?MODULE:inc_success/1, ResId}], + OnFailed = [{fun ?MODULE:inc_failed/1, ResId}], {OnSucc, OnFailed}. - -safe_apply(Func, Args) -> - ?SAFE_CALL(erlang:apply(Func, Args)). - -query_error(Reason, Msg) -> - {error, {?MODULE, #{reason => Reason, msg => Msg}}}. diff --git a/apps/emqx_resource/src/emqx_resource_manager.erl b/apps/emqx_resource/src/emqx_resource_manager.erl index 3a1afd27c..66d9e32b0 100644 --- a/apps/emqx_resource/src/emqx_resource_manager.erl +++ b/apps/emqx_resource/src/emqx_resource_manager.erl @@ -38,8 +38,12 @@ list_group/1, ets_lookup/1, get_metrics/1, - reset_metrics/1, - set_resource_status_connecting/1 + reset_metrics/1 +]). + +-export([ + set_resource_status_connecting/1, + manager_id_to_resource_id/1 ]). % Server @@ -49,11 +53,11 @@ -export([init/1, callback_mode/0, handle_event/4, terminate/3]). % State record --record(data, {id, manager_id, group, mod, config, opts, status, state, error}). +-record(data, {id, manager_id, group, mod, callback_mode, config, opts, status, state, error}). +-type data() :: #data{}. -define(SHORT_HEALTHCHECK_INTERVAL, 1000). --define(HEALTHCHECK_INTERVAL, 15000). --define(ETS_TABLE, emqx_resource_manager). +-define(ETS_TABLE, ?MODULE). -define(WAIT_FOR_RESOURCE_DELAY, 100). -define(T_OPERATION, 5000). -define(T_LOOKUP, 1000). @@ -64,6 +68,13 @@ %% API %%------------------------------------------------------------------------------ +make_manager_id(ResId) -> + emqx_resource:generate_id(ResId). + +manager_id_to_resource_id(MgrId) -> + [ResId, _Index] = string:split(MgrId, ":", trailing), + ResId. + %% @doc Called from emqx_resource when starting a resource instance. %% %% Triggers the emqx_resource_manager_sup supervisor to actually create @@ -73,7 +84,7 @@ resource_group(), resource_type(), resource_config(), - create_opts() + creation_opts() ) -> {ok, resource_data()}. ensure_resource(ResId, Group, ResourceType, Config, Opts) -> case lookup(ResId) of @@ -85,7 +96,7 @@ ensure_resource(ResId, Group, ResourceType, Config, Opts) -> end. %% @doc Called from emqx_resource when recreating a resource which may or may not exist --spec recreate(resource_id(), resource_type(), resource_config(), create_opts()) -> +-spec recreate(resource_id(), resource_type(), resource_config(), creation_opts()) -> {ok, resource_data()} | {error, not_found} | {error, updating_to_incorrect_resource_type}. recreate(ResId, ResourceType, NewConfig, Opts) -> case lookup(ResId) of @@ -109,14 +120,17 @@ create(MgrId, ResId, Group, ResourceType, Config, Opts) -> % The state machine will make the actual call to the callback/resource module after init ok = emqx_resource_manager_sup:ensure_child(MgrId, ResId, Group, ResourceType, Config, Opts), ok = emqx_metrics_worker:create_metrics( - resource_metrics, + ?RES_METRICS, ResId, - [matched, success, failed, exception], + [matched, success, failed, exception, resource_down], [matched] ), - case maps:get(start_after_created, Opts, true) of - true -> wait_for_resource_ready(ResId, maps:get(wait_for_resource_ready, Opts, 5000)); - false -> ok + ok = emqx_resource_worker_sup:start_workers(ResId, Opts), + case maps:get(start_after_created, Opts, ?START_AFTER_CREATED) of + true -> + wait_for_ready(ResId, maps:get(start_timeout, Opts, ?START_TIMEOUT)); + false -> + ok end, ok. @@ -132,7 +146,7 @@ create_dry_run(ResourceType, Config) -> ok = emqx_resource_manager_sup:ensure_child( MgrId, ResId, <<"dry_run">>, ResourceType, Config, #{} ), - case wait_for_resource_ready(ResId, 15000) of + case wait_for_ready(ResId, 15000) of ok -> remove(ResId); timeout -> @@ -151,22 +165,22 @@ remove(ResId, ClearMetrics) when is_binary(ResId) -> safe_call(ResId, {remove, ClearMetrics}, ?T_OPERATION). %% @doc Stops and then starts an instance that was already running --spec restart(resource_id(), create_opts()) -> ok | {error, Reason :: term()}. +-spec restart(resource_id(), creation_opts()) -> ok | {error, Reason :: term()}. restart(ResId, Opts) when is_binary(ResId) -> case safe_call(ResId, restart, ?T_OPERATION) of ok -> - wait_for_resource_ready(ResId, maps:get(wait_for_resource_ready, Opts, 5000)), + wait_for_ready(ResId, maps:get(start_timeout, Opts, 5000)), ok; {error, _Reason} = Error -> Error end. %% @doc Start the resource --spec start(resource_id(), create_opts()) -> ok | {error, Reason :: term()}. +-spec start(resource_id(), creation_opts()) -> ok | {error, Reason :: term()}. start(ResId, Opts) -> case safe_call(ResId, start, ?T_OPERATION) of ok -> - wait_for_resource_ready(ResId, maps:get(wait_for_resource_ready, Opts, 5000)), + wait_for_ready(ResId, maps:get(start_timeout, Opts, 5000)), ok; {error, _Reason} = Error -> Error @@ -207,14 +221,14 @@ ets_lookup(ResId) -> %% @doc Get the metrics for the specified resource get_metrics(ResId) -> - emqx_metrics_worker:get_metrics(resource_metrics, ResId). + emqx_metrics_worker:get_metrics(?RES_METRICS, ResId). %% @doc Reset the metrics for the specified resource -spec reset_metrics(resource_id()) -> ok. reset_metrics(ResId) -> - emqx_metrics_worker:reset_metrics(resource_metrics, ResId). + emqx_metrics_worker:reset_metrics(?RES_METRICS, ResId). -%% @doc Returns the data for all resorces +%% @doc Returns the data for all resources -spec list_all() -> [resource_data()] | []. list_all() -> try @@ -245,6 +259,7 @@ start_link(MgrId, ResId, Group, ResourceType, Config, Opts) -> manager_id = MgrId, group = Group, mod = ResourceType, + callback_mode = emqx_resource:get_callback_mode(ResourceType), config = Config, opts = Opts, status = connecting, @@ -298,8 +313,7 @@ handle_event({call, From}, stop, stopped, _Data) -> {keep_state_and_data, [{reply, From, ok}]}; handle_event({call, From}, stop, _State, Data) -> Result = stop_resource(Data), - UpdatedData = Data#data{status = disconnected}, - {next_state, stopped, UpdatedData, [{reply, From, Result}]}; + {next_state, stopped, Data, [{reply, From, Result}]}; % Called when a resource is to be stopped and removed. handle_event({call, From}, {remove, ClearMetrics}, _State, Data) -> handle_remove_event(From, ClearMetrics, Data); @@ -315,9 +329,10 @@ handle_event({call, From}, health_check, _State, Data) -> handle_manually_health_check(From, Data); % State: CONNECTING handle_event(enter, _OldState, connecting, Data) -> + UpdatedData = Data#data{status = connecting}, insert_cache(Data#data.id, Data#data.group, Data), Actions = [{state_timeout, 0, health_check}], - {keep_state_and_data, Actions}; + {keep_state, UpdatedData, Actions}; handle_event(internal, start_resource, connecting, Data) -> start_resource(Data, undefined); handle_event(state_timeout, health_check, connecting, Data) -> @@ -326,22 +341,24 @@ handle_event(state_timeout, health_check, connecting, Data) -> %% The connected state is entered after a successful on_start/2 of the callback mod %% and successful health_checks handle_event(enter, _OldState, connected, Data) -> - insert_cache(Data#data.id, Data#data.group, Data), + UpdatedData = Data#data{status = connected}, + insert_cache(Data#data.id, Data#data.group, UpdatedData), _ = emqx_alarm:deactivate(Data#data.id), - Actions = [{state_timeout, ?HEALTHCHECK_INTERVAL, health_check}], - {next_state, connected, Data, Actions}; + Actions = [{state_timeout, health_check_interval(Data#data.opts), health_check}], + {next_state, connected, UpdatedData, Actions}; handle_event(state_timeout, health_check, connected, Data) -> handle_connected_health_check(Data); %% State: DISCONNECTED handle_event(enter, _OldState, disconnected, Data) -> - insert_cache(Data#data.id, Data#data.group, Data), - handle_disconnected_state_enter(Data); + UpdatedData = Data#data{status = disconnected}, + insert_cache(Data#data.id, Data#data.group, UpdatedData), + handle_disconnected_state_enter(UpdatedData); handle_event(state_timeout, auto_retry, disconnected, Data) -> start_resource(Data, undefined); %% State: STOPPED %% The stopped state is entered after the resource has been explicitly stopped handle_event(enter, _OldState, stopped, Data) -> - UpdatedData = Data#data{status = disconnected}, + UpdatedData = Data#data{status = stopped}, insert_cache(Data#data.id, Data#data.group, UpdatedData), {next_state, stopped, UpdatedData}; % Ignore all other events @@ -404,7 +421,7 @@ get_owner(ResId) -> end. handle_disconnected_state_enter(Data) -> - case maps:get(auto_retry_interval, Data#data.opts, undefined) of + case maps:get(auto_restart_interval, Data#data.opts, ?AUTO_RESTART_INTERVAL) of undefined -> {next_state, disconnected, Data}; RetryInterval -> @@ -415,9 +432,10 @@ handle_disconnected_state_enter(Data) -> handle_remove_event(From, ClearMetrics, Data) -> stop_resource(Data), case ClearMetrics of - true -> ok = emqx_metrics_worker:clear_metrics(resource_metrics, Data#data.id); + true -> ok = emqx_metrics_worker:clear_metrics(?RES_METRICS, Data#data.id); false -> ok end, + ok = emqx_resource_worker_sup:stop_workers(Data#data.id, Data#data.opts), {stop_and_reply, normal, [{reply, From, ok}]}. start_resource(Data, From) -> @@ -433,7 +451,7 @@ start_resource(Data, From) -> _ = maybe_alarm(disconnected, Data#data.id), %% Keep track of the error reason why the connection did not work %% so that the Reason can be returned when the verification call is made. - UpdatedData = Data#data{status = disconnected, error = Reason}, + UpdatedData = Data#data{error = Reason}, Actions = maybe_reply([], From, Err), {next_state, disconnected, UpdatedData, Actions} end. @@ -449,9 +467,6 @@ stop_resource(Data) -> _ = maybe_clear_alarm(Data#data.id), ok. -make_manager_id(ResId) -> - emqx_resource:generate_id(ResId). - make_test_id() -> RandId = iolist_to_binary(emqx_misc:gen_id(16)), <>. @@ -481,7 +496,7 @@ handle_connected_health_check(Data) -> Data, fun (connected, UpdatedData) -> - Actions = [{state_timeout, ?HEALTHCHECK_INTERVAL, health_check}], + Actions = [{state_timeout, health_check_interval(Data#data.opts), health_check}], {keep_state, UpdatedData, Actions}; (Status, UpdatedData) -> ?SLOG(error, #{ @@ -504,6 +519,9 @@ with_health_check(Data, Func) -> insert_cache(ResId, UpdatedData#data.group, UpdatedData), Func(Status, UpdatedData). +health_check_interval(Opts) -> + maps:get(health_check_interval, Opts, ?HEALTHCHECK_INTERVAL). + maybe_alarm(connected, _ResId) -> ok; maybe_alarm(_Status, <>) -> @@ -542,29 +560,31 @@ maybe_reply(Actions, undefined, _Reply) -> maybe_reply(Actions, From, Reply) -> [{reply, From, Reply} | Actions]. +-spec data_record_to_external_map_with_metrics(data()) -> resource_data(). data_record_to_external_map_with_metrics(Data) -> #{ id => Data#data.id, mod => Data#data.mod, + callback_mode => Data#data.callback_mode, config => Data#data.config, status => Data#data.status, state => Data#data.state, metrics => get_metrics(Data#data.id) }. --spec wait_for_resource_ready(resource_id(), integer()) -> ok | timeout. -wait_for_resource_ready(ResId, WaitTime) -> - do_wait_for_resource_ready(ResId, WaitTime div ?WAIT_FOR_RESOURCE_DELAY). +-spec wait_for_ready(resource_id(), integer()) -> ok | timeout. +wait_for_ready(ResId, WaitTime) -> + do_wait_for_ready(ResId, WaitTime div ?WAIT_FOR_RESOURCE_DELAY). -do_wait_for_resource_ready(_ResId, 0) -> +do_wait_for_ready(_ResId, 0) -> timeout; -do_wait_for_resource_ready(ResId, Retry) -> +do_wait_for_ready(ResId, Retry) -> case ets_lookup(ResId) of {ok, _Group, #{status := connected}} -> ok; _ -> timer:sleep(?WAIT_FOR_RESOURCE_DELAY), - do_wait_for_resource_ready(ResId, Retry - 1) + do_wait_for_ready(ResId, Retry - 1) end. safe_call(ResId, Message, Timeout) -> diff --git a/apps/emqx_resource/src/emqx_resource_sup.erl b/apps/emqx_resource/src/emqx_resource_sup.erl index 1120723c3..920743101 100644 --- a/apps/emqx_resource/src/emqx_resource_sup.erl +++ b/apps/emqx_resource/src/emqx_resource_sup.erl @@ -15,22 +15,20 @@ %%-------------------------------------------------------------------- -module(emqx_resource_sup). +-include("emqx_resource.hrl"). + -behaviour(supervisor). -export([start_link/0]). -export([init/1]). -%% set a very large pool size in case all the workers busy --define(POOL_SIZE, 64). - start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). init([]) -> SupFlags = #{strategy => one_for_one, intensity => 10, period => 10}, - Metrics = emqx_metrics_worker:child_spec(resource_metrics), - + Metrics = emqx_metrics_worker:child_spec(?RES_METRICS), ResourceManager = #{ id => emqx_resource_manager_sup, @@ -40,4 +38,11 @@ init([]) -> type => supervisor, modules => [emqx_resource_manager_sup] }, - {ok, {SupFlags, [Metrics, ResourceManager]}}. + WorkerSup = #{ + id => emqx_resource_worker_sup, + start => {emqx_resource_worker_sup, start_link, []}, + restart => permanent, + shutdown => infinity, + type => supervisor + }, + {ok, {SupFlags, [Metrics, ResourceManager, WorkerSup]}}. diff --git a/apps/emqx_resource/src/emqx_resource_utils.erl b/apps/emqx_resource/src/emqx_resource_utils.erl new file mode 100644 index 000000000..715691d2a --- /dev/null +++ b/apps/emqx_resource/src/emqx_resource_utils.erl @@ -0,0 +1,17 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2020-2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_resource_utils). diff --git a/apps/emqx_resource/src/emqx_resource_worker.erl b/apps/emqx_resource/src/emqx_resource_worker.erl new file mode 100644 index 000000000..e940dcb69 --- /dev/null +++ b/apps/emqx_resource/src/emqx_resource_worker.erl @@ -0,0 +1,536 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2020-2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +%% This module implements async message sending, disk message queuing, +%% and message batching using ReplayQ. + +-module(emqx_resource_worker). + +-include("emqx_resource.hrl"). +-include("emqx_resource_utils.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-behaviour(gen_statem). + +-export([ + start_link/3, + query/3, + block/1, + block/2, + resume/1 +]). + +-export([ + simple_sync_query/2, + simple_async_query/3 +]). + +-export([ + callback_mode/0, + init/1, + terminate/2, + code_change/3 +]). + +-export([running/3, blocked/3]). + +-export([queue_item_marshaller/1, estimate_size/1]). + +-export([reply_after_query/6, batch_reply_after_query/6]). + +-define(Q_ITEM(REQUEST), {q_item, REQUEST}). + +-define(QUERY(FROM, REQUEST), {query, FROM, REQUEST}). +-define(REPLY(FROM, REQUEST, RESULT), {reply, FROM, REQUEST, RESULT}). +-define(EXPAND(RESULT, BATCH), [?REPLY(FROM, REQUEST, RESULT) || ?QUERY(FROM, REQUEST) <- BATCH]). + +-define(RESOURCE_ERROR(Reason, Msg), + {error, {resource_error, #{reason => Reason, msg => iolist_to_binary(Msg)}}} +). +-define(RESOURCE_ERROR_M(Reason, Msg), {error, {resource_error, #{reason := Reason, msg := Msg}}}). + +-type id() :: binary(). +-type query() :: {query, from(), request()}. +-type request() :: term(). +-type from() :: pid() | reply_fun(). + +-callback batcher_flush(Acc :: [{from(), request()}], CbState :: term()) -> + {{from(), result()}, NewCbState :: term()}. + +callback_mode() -> [state_functions, state_enter]. + +start_link(Id, Index, Opts) -> + gen_statem:start_link({local, name(Id, Index)}, ?MODULE, {Id, Index, Opts}, []). + +-spec query(id(), request(), query_opts()) -> Result :: term(). +query(Id, Request, Opts) -> + PickKey = maps:get(pick_key, Opts, self()), + Timeout = maps:get(timeout, Opts, infinity), + pick_call(Id, PickKey, {query, Request, Opts}, Timeout). + +%% simple query the resource without batching and queuing messages. +-spec simple_sync_query(id(), request()) -> Result :: term(). +simple_sync_query(Id, Request) -> + Result = call_query(sync, Id, ?QUERY(self(), Request), #{}), + _ = handle_query_result(Id, Result, false), + Result. + +-spec simple_async_query(id(), request(), reply_fun()) -> Result :: term(). +simple_async_query(Id, Request, ReplyFun) -> + Result = call_query(async, Id, ?QUERY(ReplyFun, Request), #{}), + _ = handle_query_result(Id, Result, false), + Result. + +-spec block(pid() | atom()) -> ok. +block(ServerRef) -> + gen_statem:cast(ServerRef, block). + +-spec block(pid() | atom(), [query()]) -> ok. +block(ServerRef, Query) -> + gen_statem:cast(ServerRef, {block, Query}). + +-spec resume(pid() | atom()) -> ok. +resume(ServerRef) -> + gen_statem:cast(ServerRef, resume). + +init({Id, Index, Opts}) -> + process_flag(trap_exit, true), + true = gproc_pool:connect_worker(Id, {Id, Index}), + Name = name(Id, Index), + BatchSize = maps:get(batch_size, Opts, ?DEFAULT_BATCH_SIZE), + Queue = + case maps:get(enable_queue, Opts, false) of + true -> + replayq:open(#{ + dir => disk_queue_dir(Id, Index), + seg_bytes => maps:get(queue_max_bytes, Opts, ?DEFAULT_QUEUE_SIZE), + sizer => fun ?MODULE:estimate_size/1, + marshaller => fun ?MODULE:queue_item_marshaller/1 + }); + false -> + undefined + end, + ok = inflight_new(Name), + St = #{ + id => Id, + index => Index, + name => Name, + %% query_mode = dynamic | sync | async + %% TODO: + %% dynamic mode is async mode when things are going well, but becomes sync mode + %% if the resource worker is overloaded + query_mode => maps:get(query_mode, Opts, sync), + async_inflight_window => maps:get(async_inflight_window, Opts, ?DEFAULT_INFLIGHT), + enable_batch => maps:get(enable_batch, Opts, false), + batch_size => BatchSize, + batch_time => maps:get(batch_time, Opts, ?DEFAULT_BATCH_TIME), + queue => Queue, + resume_interval => maps:get(resume_interval, Opts, ?RESUME_INTERVAL), + acc => [], + acc_left => BatchSize, + tref => undefined + }, + {ok, blocked, St, {next_event, cast, resume}}. + +running(enter, _, _St) -> + keep_state_and_data; +running(cast, resume, _St) -> + keep_state_and_data; +running(cast, block, St) -> + {next_state, block, St}; +running(cast, {block, [?QUERY(_, _) | _] = Batch}, #{queue := Q} = St) when is_list(Batch) -> + Q1 = maybe_append_queue(Q, [?Q_ITEM(Query) || Query <- Batch]), + {next_state, block, St#{queue := Q1}}; +running({call, From0}, {query, Request, Opts}, #{query_mode := QM} = St) -> + From = maybe_quick_return(QM, From0, maps:get(async_reply_fun, Opts, undefined)), + query_or_acc(From, Request, St); +running(info, {flush, Ref}, St = #{tref := {_TRef, Ref}}) -> + flush(St#{tref := undefined}); +running(info, {flush, _Ref}, _St) -> + keep_state_and_data; +running(info, Info, _St) -> + ?SLOG(error, #{msg => unexpected_msg, info => Info}), + keep_state_and_data. + +blocked(enter, _, #{resume_interval := ResumeT} = _St) -> + {keep_state_and_data, {state_timeout, ResumeT, resume}}; +blocked(cast, block, _St) -> + keep_state_and_data; +blocked(cast, {block, [?QUERY(_, _) | _] = Batch}, #{queue := Q} = St) when is_list(Batch) -> + Q1 = maybe_append_queue(Q, [?Q_ITEM(Query) || Query <- Batch]), + {keep_state, St#{queue := Q1}}; +blocked(cast, resume, St) -> + do_resume(St); +blocked(state_timeout, resume, St) -> + do_resume(St); +blocked({call, From0}, {query, Request, Opts}, #{id := Id, queue := Q, query_mode := QM} = St) -> + From = maybe_quick_return(QM, From0, maps:get(async_reply_fun, Opts, undefined)), + Error = ?RESOURCE_ERROR(blocked, "resource is blocked"), + _ = reply_caller(Id, ?REPLY(From, Request, Error)), + {keep_state, St#{queue := maybe_append_queue(Q, [?Q_ITEM(?QUERY(From, Request))])}}. + +terminate(_Reason, #{id := Id, index := Index}) -> + gproc_pool:disconnect_worker(Id, {Id, Index}). + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +queue_item_marshaller(?Q_ITEM(_) = I) -> + term_to_binary(I); +queue_item_marshaller(Bin) when is_binary(Bin) -> + binary_to_term(Bin). + +estimate_size(QItem) -> + size(queue_item_marshaller(QItem)). + +%%============================================================================== +maybe_quick_return(sync, From, _ReplyFun) -> + From; +maybe_quick_return(async, From, ReplyFun) -> + gen_statem:reply(From, ok), + ReplyFun. + +pick_call(Id, Key, Query, Timeout) -> + try gproc_pool:pick_worker(Id, Key) of + Pid when is_pid(Pid) -> + gen_statem:call(Pid, Query, {clean_timeout, Timeout}); + _ -> + ?RESOURCE_ERROR(not_created, "resource not found") + catch + error:badarg -> + ?RESOURCE_ERROR(not_created, "resource not found"); + exit:{timeout, _} -> + ?RESOURCE_ERROR(timeout, "call resource timeout") + end. + +do_resume(#{queue := Q, id := Id, name := Name} = St) -> + case inflight_get_first(Name) of + empty -> + retry_first_from_queue(Q, Id, St); + {Ref, FirstQuery} -> + retry_first_sync(Id, FirstQuery, Name, Ref, undefined, St) + end. + +retry_first_from_queue(undefined, _Id, St) -> + {next_state, running, St}; +retry_first_from_queue(Q, Id, St) -> + case replayq:peek(Q) of + empty -> + {next_state, running, St}; + ?Q_ITEM(FirstQuery) -> + retry_first_sync(Id, FirstQuery, undefined, undefined, Q, St) + end. + +retry_first_sync(Id, FirstQuery, Name, Ref, Q, #{resume_interval := ResumeT} = St0) -> + Result = call_query(sync, Id, FirstQuery, #{}), + case handle_query_result(Id, Result, false) of + %% Send failed because resource down + true -> + {keep_state, St0, {state_timeout, ResumeT, resume}}; + %% Send ok or failed but the resource is working + false -> + %% We Send 'resume' to the end of the mailbox to give the worker + %% a chance to process 'query' requests. + St = + case Q of + undefined -> + inflight_drop(Name, Ref), + St0; + _ -> + St0#{queue => drop_head(Q)} + end, + {keep_state, St, {state_timeout, 0, resume}} + end. + +drop_head(Q) -> + {Q1, AckRef, _} = replayq:pop(Q, #{count_limit => 1}), + ok = replayq:ack(Q1, AckRef), + Q1. + +query_or_acc(From, Request, #{enable_batch := true, acc := Acc, acc_left := Left} = St0) -> + Acc1 = [?QUERY(From, Request) | Acc], + St = St0#{acc := Acc1, acc_left := Left - 1}, + case Left =< 1 of + true -> flush(St); + false -> {keep_state, ensure_flush_timer(St)} + end; +query_or_acc(From, Request, #{enable_batch := false, queue := Q, id := Id, query_mode := QM} = St) -> + QueryOpts = #{ + inflight_name => maps:get(name, St), + inflight_window => maps:get(async_inflight_window, St) + }, + case send_query(QM, From, Request, Id, QueryOpts) of + true -> + Query = ?QUERY(From, Request), + {next_state, blocked, St#{queue := maybe_append_queue(Q, [?Q_ITEM(Query)])}}; + false -> + {keep_state, St} + end. + +send_query(QM, From, Request, Id, QueryOpts) -> + Result = call_query(QM, Id, ?QUERY(From, Request), QueryOpts), + reply_caller(Id, ?REPLY(From, Request, Result)). + +flush(#{acc := []} = St) -> + {keep_state, St}; +flush( + #{ + id := Id, + acc := Batch, + batch_size := Size, + queue := Q0, + query_mode := QM + } = St +) -> + QueryOpts = #{ + inflight_name => maps:get(name, St), + inflight_window => maps:get(async_inflight_window, St) + }, + Result = call_query(QM, Id, Batch, QueryOpts), + St1 = cancel_flush_timer(St#{acc_left := Size, acc := []}), + case batch_reply_caller(Id, Result, Batch) of + true -> + Q1 = maybe_append_queue(Q0, [?Q_ITEM(Query) || Query <- Batch]), + {next_state, blocked, St1#{queue := Q1}}; + false -> + {keep_state, St1} + end. + +maybe_append_queue(undefined, _Items) -> undefined; +maybe_append_queue(Q, Items) -> replayq:append(Q, Items). + +batch_reply_caller(Id, BatchResult, Batch) -> + lists:foldl( + fun(Reply, BlockWorker) -> + reply_caller(Id, Reply, BlockWorker) + end, + false, + %% the `Mod:on_batch_query/3` returns a single result for a batch, + %% so we need to expand + ?EXPAND(BatchResult, Batch) + ). + +reply_caller(Id, Reply) -> + reply_caller(Id, Reply, false). + +reply_caller(Id, ?REPLY(undefined, _, Result), BlockWorker) -> + handle_query_result(Id, Result, BlockWorker); +reply_caller(Id, ?REPLY({ReplyFun, Args}, _, Result), BlockWorker) when is_function(ReplyFun) -> + _ = + case Result of + {async_return, _} -> ok; + _ -> apply(ReplyFun, Args ++ [Result]) + end, + handle_query_result(Id, Result, BlockWorker); +reply_caller(Id, ?REPLY(From, _, Result), BlockWorker) -> + gen_statem:reply(From, Result), + handle_query_result(Id, Result, BlockWorker). + +handle_query_result(Id, ?RESOURCE_ERROR_M(exception, _), BlockWorker) -> + emqx_metrics_worker:inc(?RES_METRICS, Id, exception), + BlockWorker; +handle_query_result(_Id, ?RESOURCE_ERROR_M(NotWorking, _), _) when + NotWorking == not_connected; NotWorking == blocked +-> + true; +handle_query_result(_Id, ?RESOURCE_ERROR_M(_, _), BlockWorker) -> + BlockWorker; +handle_query_result(Id, {error, _}, BlockWorker) -> + emqx_metrics_worker:inc(?RES_METRICS, Id, failed), + BlockWorker; +handle_query_result(Id, {resource_down, _}, _BlockWorker) -> + emqx_metrics_worker:inc(?RES_METRICS, Id, resource_down), + true; +handle_query_result(_Id, {async_return, inflight_full}, _BlockWorker) -> + true; +handle_query_result(_Id, {async_return, {resource_down, _}}, _BlockWorker) -> + true; +handle_query_result(_Id, {async_return, ok}, BlockWorker) -> + BlockWorker; +handle_query_result(Id, Result, BlockWorker) -> + assert_ok_result(Result), + emqx_metrics_worker:inc(?RES_METRICS, Id, success), + BlockWorker. + +call_query(QM, Id, Query, QueryOpts) -> + case emqx_resource_manager:ets_lookup(Id) of + {ok, _Group, #{callback_mode := CM, mod := Mod, state := ResSt, status := connected}} -> + apply_query_fun(call_mode(QM, CM), Mod, Id, Query, ResSt, QueryOpts); + {ok, _Group, #{status := stopped}} -> + ?RESOURCE_ERROR(stopped, "resource stopped or disabled"); + {ok, _Group, #{status := S}} when S == connecting; S == disconnected -> + ?RESOURCE_ERROR(not_connected, "resource not connected"); + {error, not_found} -> + ?RESOURCE_ERROR(not_found, "resource not found") + end. + +-define(APPLY_RESOURCE(EXPR, REQ), + try + %% if the callback module (connector) wants to return an error that + %% makes the current resource goes into the `error` state, it should + %% return `{resource_down, Reason}` + EXPR + catch + ERR:REASON:STACKTRACE -> + MSG = io_lib:format( + "call query failed, func: ~s, id: ~s, error: ~0p, Request: ~0p", + [??EXPR, Id, {ERR, REASON, STACKTRACE}, REQ], + [{chars_limit, 1024}] + ), + ?RESOURCE_ERROR(exception, MSG) + end +). + +apply_query_fun(sync, Mod, Id, ?QUERY(_, Request) = _Query, ResSt, _QueryOpts) -> + ?tp(call_query, #{id => Id, mod => Mod, query => _Query, res_st => ResSt}), + ok = emqx_metrics_worker:inc(?RES_METRICS, Id, matched), + ?APPLY_RESOURCE(Mod:on_query(Id, Request, ResSt), Request); +apply_query_fun(async, Mod, Id, ?QUERY(_, Request) = Query, ResSt, QueryOpts) -> + ?tp(call_query_async, #{id => Id, mod => Mod, query => Query, res_st => ResSt}), + Name = maps:get(inflight_name, QueryOpts, undefined), + WinSize = maps:get(inflight_window, QueryOpts, undefined), + ?APPLY_RESOURCE( + case inflight_is_full(Name, WinSize) of + true -> + ?tp(inflight_full, #{id => Id, wind_size => WinSize}), + {async_return, inflight_full}; + false -> + ok = emqx_metrics_worker:inc(?RES_METRICS, Id, matched), + ReplyFun = fun ?MODULE:reply_after_query/6, + Ref = make_message_ref(), + Args = [self(), Id, Name, Ref, Query], + ok = inflight_append(Name, Ref, Query), + Result = Mod:on_query_async(Id, Request, {ReplyFun, Args}, ResSt), + {async_return, Result} + end, + Request + ); +apply_query_fun(sync, Mod, Id, [?QUERY(_, _) | _] = Batch, ResSt, _QueryOpts) -> + ?tp(call_batch_query, #{id => Id, mod => Mod, batch => Batch, res_st => ResSt}), + Requests = [Request || ?QUERY(_From, Request) <- Batch], + ok = emqx_metrics_worker:inc(?RES_METRICS, Id, matched, length(Batch)), + ?APPLY_RESOURCE(Mod:on_batch_query(Id, Requests, ResSt), Batch); +apply_query_fun(async, Mod, Id, [?QUERY(_, _) | _] = Batch, ResSt, QueryOpts) -> + ?tp(call_batch_query_async, #{id => Id, mod => Mod, batch => Batch, res_st => ResSt}), + Name = maps:get(inflight_name, QueryOpts, undefined), + WinSize = maps:get(inflight_window, QueryOpts, undefined), + ?APPLY_RESOURCE( + case inflight_is_full(Name, WinSize) of + true -> + ?tp(inflight_full, #{id => Id, wind_size => WinSize}), + {async_return, inflight_full}; + false -> + ok = emqx_metrics_worker:inc(?RES_METRICS, Id, matched, length(Batch)), + ReplyFun = fun ?MODULE:batch_reply_after_query/6, + Ref = make_message_ref(), + Args = {ReplyFun, [self(), Id, Name, Ref, Batch]}, + Requests = [Request || ?QUERY(_From, Request) <- Batch], + ok = inflight_append(Name, Ref, Batch), + Result = Mod:on_batch_query_async(Id, Requests, Args, ResSt), + {async_return, Result} + end, + Batch + ). + +reply_after_query(Pid, Id, Name, Ref, ?QUERY(From, Request), Result) -> + case reply_caller(Id, ?REPLY(From, Request, Result)) of + true -> ?MODULE:block(Pid); + false -> inflight_drop(Name, Ref) + end. + +batch_reply_after_query(Pid, Id, Name, Ref, Batch, Result) -> + case batch_reply_caller(Id, Result, Batch) of + true -> ?MODULE:block(Pid); + false -> inflight_drop(Name, Ref) + end. +%%============================================================================== +%% the inflight queue for async query + +inflight_new(Name) -> + _ = ets:new(Name, [named_table, ordered_set, public, {write_concurrency, true}]), + ok. + +inflight_get_first(Name) -> + case ets:first(Name) of + '$end_of_table' -> + empty; + Ref -> + case ets:lookup(Name, Ref) of + [Object] -> Object; + [] -> inflight_get_first(Name) + end + end. + +inflight_is_full(undefined, _) -> + false; +inflight_is_full(Name, MaxSize) -> + case ets:info(Name, size) of + Size when Size >= MaxSize -> true; + _ -> false + end. + +inflight_append(undefined, _Ref, _Query) -> + ok; +inflight_append(Name, Ref, Query) -> + ets:insert(Name, {Ref, Query}), + ok. + +inflight_drop(undefined, _) -> + ok; +inflight_drop(Name, Ref) -> + ets:delete(Name, Ref), + ok. + +%%============================================================================== +call_mode(sync, _) -> sync; +call_mode(async, always_sync) -> sync; +call_mode(async, async_if_possible) -> async. + +assert_ok_result(ok) -> + true; +assert_ok_result({async_return, R}) -> + assert_ok_result(R); +assert_ok_result(R) when is_tuple(R) -> + ok = erlang:element(1, R); +assert_ok_result(R) -> + error({not_ok_result, R}). + +-spec name(id(), integer()) -> atom(). +name(Id, Index) -> + Mod = atom_to_list(?MODULE), + Id1 = binary_to_list(Id), + Index1 = integer_to_list(Index), + list_to_atom(lists:concat([Mod, ":", Id1, ":", Index1])). + +disk_queue_dir(Id, Index) -> + filename:join([node(), emqx:data_dir(), Id, "queue:" ++ integer_to_list(Index)]). + +ensure_flush_timer(St = #{tref := undefined, batch_time := T}) -> + Ref = make_ref(), + TRef = erlang:send_after(T, self(), {flush, Ref}), + St#{tref => {TRef, Ref}}; +ensure_flush_timer(St) -> + St. + +cancel_flush_timer(St = #{tref := undefined}) -> + St; +cancel_flush_timer(St = #{tref := {TRef, _Ref}}) -> + _ = erlang:cancel_timer(TRef), + St#{tref => undefined}. + +make_message_ref() -> + erlang:unique_integer([monotonic, positive]). diff --git a/apps/emqx_resource/src/emqx_resource_worker_sup.erl b/apps/emqx_resource/src/emqx_resource_worker_sup.erl new file mode 100644 index 000000000..a2b3a1ba5 --- /dev/null +++ b/apps/emqx_resource/src/emqx_resource_worker_sup.erl @@ -0,0 +1,136 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2020-2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-module(emqx_resource_worker_sup). +-behaviour(supervisor). + +%%%============================================================================= +%%% Exports and Definitions +%%%============================================================================= + +%% External API +-export([start_link/0]). + +-export([start_workers/2, stop_workers/2]). + +%% Callbacks +-export([init/1]). + +-define(SERVER, ?MODULE). + +%%%============================================================================= +%%% API +%%%============================================================================= + +-spec start_link() -> supervisor:startlink_ret(). +start_link() -> + supervisor:start_link({local, ?SERVER}, ?MODULE, []). + +%%%============================================================================= +%%% Callbacks +%%%============================================================================= + +-spec init(list()) -> {ok, {supervisor:sup_flags(), [supervisor:child_spec()]}} | ignore. +init([]) -> + SupFlags = #{ + strategy => one_for_one, + intensity => 100, + period => 30 + }, + ChildSpecs = [], + {ok, {SupFlags, ChildSpecs}}. + +start_workers(ResId, Opts) -> + PoolSize = pool_size(Opts), + _ = ensure_worker_pool(ResId, hash, [{size, PoolSize}]), + lists:foreach( + fun(Idx) -> + _ = ensure_worker_added(ResId, Idx), + ok = ensure_worker_started(ResId, Idx, Opts) + end, + lists:seq(1, PoolSize) + ). + +stop_workers(ResId, Opts) -> + PoolSize = pool_size(Opts), + lists:foreach( + fun(Idx) -> + ensure_worker_removed(ResId, Idx) + end, + lists:seq(1, PoolSize) + ), + ensure_worker_pool_removed(ResId), + ok. + +%%%============================================================================= +%%% Internal +%%%============================================================================= +pool_size(Opts) -> + maps:get(worker_pool_size, Opts, erlang:system_info(schedulers_online)). + +ensure_worker_pool(ResId, Type, Opts) -> + try + gproc_pool:new(ResId, Type, Opts) + catch + error:exists -> ok + end, + ok. + +ensure_worker_added(ResId, Idx) -> + try + gproc_pool:add_worker(ResId, {ResId, Idx}, Idx) + catch + error:exists -> ok + end, + ok. + +-define(CHILD_ID(MOD, RESID, INDEX), {MOD, RESID, INDEX}). +ensure_worker_started(ResId, Idx, Opts) -> + Mod = emqx_resource_worker, + Spec = #{ + id => ?CHILD_ID(Mod, ResId, Idx), + start => {Mod, start_link, [ResId, Idx, Opts]}, + restart => transient, + shutdown => 5000, + type => worker, + modules => [Mod] + }, + case supervisor:start_child(emqx_resource_sup, Spec) of + {ok, _Pid} -> ok; + {error, {already_started, _}} -> ok; + {error, already_present} -> ok; + {error, _} = Err -> Err + end. + +ensure_worker_removed(ResId, Idx) -> + ChildId = ?CHILD_ID(emqx_resource_worker, ResId, Idx), + case supervisor:terminate_child(emqx_resource_sup, ChildId) of + ok -> + Res = supervisor:delete_child(emqx_resource_sup, ChildId), + _ = gproc_pool:remove_worker(ResId, {ResId, Idx}), + Res; + {error, not_found} -> + ok; + {error, Reason} -> + {error, Reason} + end. + +ensure_worker_pool_removed(ResId) -> + try + gproc_pool:delete(ResId) + catch + error:badarg -> ok + end, + ok. diff --git a/apps/emqx_resource/src/proto/emqx_resource_proto_v1.erl b/apps/emqx_resource/src/proto/emqx_resource_proto_v1.erl index cdd2592d9..11af1a62c 100644 --- a/apps/emqx_resource/src/proto/emqx_resource_proto_v1.erl +++ b/apps/emqx_resource/src/proto/emqx_resource_proto_v1.erl @@ -38,7 +38,7 @@ introduced_in() -> resource_group(), resource_type(), resource_config(), - create_opts() + creation_opts() ) -> {ok, resource_data() | 'already_created'} | {error, Reason :: term()}. create(ResId, Group, ResourceType, Config, Opts) -> @@ -58,7 +58,7 @@ create_dry_run(ResourceType, Config) -> resource_id(), resource_type(), resource_config(), - create_opts() + creation_opts() ) -> {ok, resource_data()} | {error, Reason :: term()}. recreate(ResId, ResourceType, Config, Opts) -> diff --git a/apps/emqx_resource/src/schema/emqx_resource_schema.erl b/apps/emqx_resource/src/schema/emqx_resource_schema.erl new file mode 100644 index 000000000..6111543d2 --- /dev/null +++ b/apps/emqx_resource/src/schema/emqx_resource_schema.erl @@ -0,0 +1,119 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_resource_schema). + +-include("emqx_resource.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([namespace/0, roots/0, fields/1]). + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions + +namespace() -> "resource_schema". + +roots() -> []. + +fields('creation_opts') -> + [ + {health_check_interval, fun health_check_interval/1}, + {start_after_created, fun start_after_created/1}, + {start_timeout, fun start_timeout/1}, + {auto_restart_interval, fun auto_restart_interval/1}, + {query_mode, fun query_mode/1}, + {resume_interval, fun resume_interval/1}, + {async_inflight_window, fun async_inflight_window/1}, + {enable_batch, fun enable_batch/1}, + {batch_size, fun batch_size/1}, + {batch_time, fun batch_time/1}, + {enable_queue, fun enable_queue/1}, + {max_queue_bytes, fun queue_max_bytes/1} + ]. + +health_check_interval(type) -> emqx_schema:duration_ms(); +health_check_interval(desc) -> ?DESC("health_check_interval"); +health_check_interval(default) -> ?HEALTHCHECK_INTERVAL_RAW; +health_check_interval(required) -> false; +health_check_interval(_) -> undefined. + +start_after_created(type) -> boolean(); +start_after_created(required) -> false; +start_after_created(default) -> ?START_AFTER_CREATED; +start_after_created(desc) -> ?DESC("start_after_created"); +start_after_created(_) -> undefined. + +start_timeout(type) -> emqx_schema:duration_ms(); +start_timeout(desc) -> ?DESC("start_timeout"); +start_timeout(default) -> ?START_TIMEOUT_RAW; +start_timeout(required) -> false; +start_timeout(_) -> undefined. + +auto_restart_interval(type) -> hoconsc:union([infinity, emqx_schema:duration_ms()]); +auto_restart_interval(desc) -> ?DESC("auto_restart_interval"); +auto_restart_interval(default) -> ?AUTO_RESTART_INTERVAL_RAW; +auto_restart_interval(required) -> false; +auto_restart_interval(_) -> undefined. + +query_mode(type) -> enum([sync, async]); +query_mode(desc) -> ?DESC("query_mode"); +query_mode(default) -> sync; +query_mode(required) -> false; +query_mode(_) -> undefined. + +enable_batch(type) -> boolean(); +enable_batch(required) -> false; +enable_batch(default) -> false; +enable_batch(desc) -> ?DESC("enable_batch"); +enable_batch(_) -> undefined. + +enable_queue(type) -> boolean(); +enable_queue(required) -> false; +enable_queue(default) -> false; +enable_queue(desc) -> ?DESC("enable_queue"); +enable_queue(_) -> undefined. + +resume_interval(type) -> emqx_schema:duration_ms(); +resume_interval(desc) -> ?DESC("resume_interval"); +resume_interval(default) -> ?RESUME_INTERVAL_RAW; +resume_interval(required) -> false; +resume_interval(_) -> undefined. + +async_inflight_window(type) -> pos_integer(); +async_inflight_window(desc) -> ?DESC("async_inflight_window"); +async_inflight_window(default) -> ?DEFAULT_INFLIGHT; +async_inflight_window(required) -> false; +async_inflight_window(_) -> undefined. + +batch_size(type) -> pos_integer(); +batch_size(desc) -> ?DESC("batch_size"); +batch_size(default) -> ?DEFAULT_BATCH_SIZE; +batch_size(required) -> false; +batch_size(_) -> undefined. + +batch_time(type) -> emqx_schema:duration_ms(); +batch_time(desc) -> ?DESC("batch_time"); +batch_time(default) -> ?DEFAULT_BATCH_TIME_RAW; +batch_time(required) -> false; +batch_time(_) -> undefined. + +queue_max_bytes(type) -> emqx_schema:bytesize(); +queue_max_bytes(desc) -> ?DESC("queue_max_bytes"); +queue_max_bytes(default) -> ?DEFAULT_QUEUE_SIZE_RAW; +queue_max_bytes(required) -> false; +queue_max_bytes(_) -> undefined. diff --git a/apps/emqx_resource/test/emqx_connector_demo.erl b/apps/emqx_resource/test/emqx_connector_demo.erl new file mode 100644 index 000000000..6e7bca18a --- /dev/null +++ b/apps/emqx_resource/test/emqx_connector_demo.erl @@ -0,0 +1,198 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2021-2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_connector_demo). + +-include_lib("typerefl/include/types.hrl"). + +-behaviour(emqx_resource). + +%% callbacks of behaviour emqx_resource +-export([ + callback_mode/0, + on_start/2, + on_stop/2, + on_query/3, + on_query_async/4, + on_batch_query/3, + on_get_status/2 +]). + +-export([counter_loop/0, set_callback_mode/1]). + +%% callbacks for emqx_resource config schema +-export([roots/0]). + +roots() -> + [ + {name, fun name/1}, + {register, fun register/1} + ]. + +name(type) -> atom(); +name(required) -> true; +name(_) -> undefined. + +register(type) -> boolean(); +register(required) -> true; +register(default) -> false; +register(_) -> undefined. + +-define(CM_KEY, {?MODULE, callback_mode}). +callback_mode() -> + persistent_term:get(?CM_KEY). + +set_callback_mode(Mode) -> + persistent_term:put(?CM_KEY, Mode). + +on_start(_InstId, #{create_error := true}) -> + error("some error"); +on_start(InstId, #{name := Name, stop_error := true} = Opts) -> + Register = maps:get(register, Opts, false), + {ok, Opts#{ + id => InstId, + stop_error => true, + pid => spawn_counter_process(Name, Register) + }}; +on_start(InstId, #{name := Name} = Opts) -> + Register = maps:get(register, Opts, false), + {ok, Opts#{ + id => InstId, + pid => spawn_counter_process(Name, Register) + }}. + +on_stop(_InstId, #{stop_error := true}) -> + {error, stop_error}; +on_stop(_InstId, #{pid := Pid}) -> + erlang:exit(Pid, shutdown), + ok. + +on_query(_InstId, get_state, State) -> + {ok, State}; +on_query(_InstId, get_state_failed, State) -> + {error, State}; +on_query(_InstId, block, #{pid := Pid}) -> + Pid ! block, + ok; +on_query(_InstId, resume, #{pid := Pid}) -> + Pid ! resume, + ok; +on_query(_InstId, {inc_counter, N}, #{pid := Pid}) -> + ReqRef = make_ref(), + From = {self(), ReqRef}, + Pid ! {From, {inc, N}}, + receive + {ReqRef, ok} -> ok; + {ReqRef, incorrect_status} -> {resource_down, incorrect_status} + after 1000 -> + {error, timeout} + end; +on_query(_InstId, get_counter, #{pid := Pid}) -> + ReqRef = make_ref(), + From = {self(), ReqRef}, + Pid ! {From, get}, + receive + {ReqRef, Num} -> {ok, Num} + after 1000 -> + {error, timeout} + end. + +on_query_async(_InstId, {inc_counter, N}, ReplyFun, #{pid := Pid}) -> + Pid ! {inc, N, ReplyFun}, + ok; +on_query_async(_InstId, get_counter, ReplyFun, #{pid := Pid}) -> + Pid ! {get, ReplyFun}, + ok. + +on_batch_query(InstId, BatchReq, State) -> + %% Requests can be either 'get_counter' or 'inc_counter', but cannot be mixed. + case hd(BatchReq) of + {inc_counter, _} -> + batch_inc_counter(InstId, BatchReq, State); + get_counter -> + batch_get_counter(InstId, State) + end. + +batch_inc_counter(InstId, BatchReq, State) -> + TotalN = lists:foldl( + fun + ({inc_counter, N}, Total) -> + Total + N; + (Req, _Total) -> + error({mixed_requests_not_allowed, {inc_counter, Req}}) + end, + 0, + BatchReq + ), + on_query(InstId, {inc_counter, TotalN}, State). + +batch_get_counter(InstId, State) -> + on_query(InstId, get_counter, State). + +on_get_status(_InstId, #{health_check_error := true}) -> + disconnected; +on_get_status(_InstId, #{pid := Pid}) -> + timer:sleep(300), + case is_process_alive(Pid) of + true -> connected; + false -> disconnected + end. + +spawn_counter_process(Name, Register) -> + Pid = spawn_link(?MODULE, counter_loop, []), + true = maybe_register(Name, Pid, Register), + Pid. + +counter_loop() -> + counter_loop(#{counter => 0, status => running}). + +counter_loop(#{counter := Num, status := Status} = State) -> + NewState = + receive + block -> + ct:pal("counter recv: ~p", [block]), + State#{status => blocked}; + resume -> + {messages, Msgs} = erlang:process_info(self(), messages), + ct:pal("counter recv: ~p, buffered msgs: ~p", [resume, length(Msgs)]), + State#{status => running}; + {inc, N, ReplyFun} when Status == running -> + apply_reply(ReplyFun, ok), + State#{counter => Num + N}; + {{FromPid, ReqRef}, {inc, N}} when Status == running -> + FromPid ! {ReqRef, ok}, + State#{counter => Num + N}; + {{FromPid, ReqRef}, {inc, _N}} when Status == blocked -> + FromPid ! {ReqRef, incorrect_status}, + State; + {get, ReplyFun} -> + apply_reply(ReplyFun, Num), + State; + {{FromPid, ReqRef}, get} -> + FromPid ! {ReqRef, Num}, + State + end, + counter_loop(NewState). + +maybe_register(Name, Pid, true) -> + ct:pal("---- Register Name: ~p", [Name]), + ct:pal("---- whereis(): ~p", [whereis(Name)]), + erlang:register(Name, Pid); +maybe_register(_Name, _Pid, false) -> + true. + +apply_reply({ReplyFun, Args}, Result) when is_function(ReplyFun) -> + apply(ReplyFun, Args ++ [Result]). diff --git a/apps/emqx_resource/test/emqx_resource_SUITE.erl b/apps/emqx_resource/test/emqx_resource_SUITE.erl index 51e6bac43..ddd671b75 100644 --- a/apps/emqx_resource/test/emqx_resource_SUITE.erl +++ b/apps/emqx_resource/test/emqx_resource_SUITE.erl @@ -22,10 +22,13 @@ -include_lib("common_test/include/ct.hrl"). -include("emqx_resource.hrl"). -include_lib("stdlib/include/ms_transform.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). --define(TEST_RESOURCE, emqx_test_resource). +-define(TEST_RESOURCE, emqx_connector_demo). -define(ID, <<"id">>). -define(DEFAULT_RESOURCE_GROUP, <<"default">>). +-define(RESOURCE_ERROR(REASON), {error, {resource_error, #{reason := REASON}}}). +-define(TRACE_OPTS, #{timetrap => 10000, timeout => 1000}). all() -> emqx_common_test_helpers:all(?MODULE). @@ -34,7 +37,10 @@ groups() -> []. init_per_testcase(_, Config) -> + emqx_connector_demo:set_callback_mode(always_sync), Config. +end_per_testcase(_, _Config) -> + _ = emqx_resource:remove(?ID). init_per_suite(Config) -> code:ensure_loaded(?TEST_RESOURCE), @@ -80,7 +86,7 @@ t_create_remove(_) -> #{name => test_resource}, #{} ), - #{pid := Pid} = emqx_resource:query(?ID, get_state), + {ok, #{pid := Pid}} = emqx_resource:query(?ID, get_state), ?assert(is_process_alive(Pid)), @@ -110,7 +116,7 @@ t_create_remove_local(_) -> #{name => test_resource}, #{} ), - #{pid := Pid} = emqx_resource:query(?ID, get_state), + {ok, #{pid := Pid}} = emqx_resource:query(?ID, get_state), ?assert(is_process_alive(Pid)), @@ -127,7 +133,7 @@ t_create_remove_local(_) -> {error, _} = emqx_resource:remove_local(?ID), ?assertMatch( - {error, {emqx_resource, #{reason := not_found}}}, + ?RESOURCE_ERROR(not_created), emqx_resource:query(?ID, get_state) ), ?assertNot(is_process_alive(Pid)). @@ -143,23 +149,23 @@ t_do_not_start_after_created(_) -> %% the resource should remain `disconnected` after created timer:sleep(200), ?assertMatch( - {error, {emqx_resource, #{reason := not_connected}}}, + ?RESOURCE_ERROR(stopped), emqx_resource:query(?ID, get_state) ), ?assertMatch( - {ok, _, #{status := disconnected}}, + {ok, _, #{status := stopped}}, emqx_resource:get_instance(?ID) ), %% start the resource manually.. ok = emqx_resource:start(?ID), - #{pid := Pid} = emqx_resource:query(?ID, get_state), + {ok, #{pid := Pid}} = emqx_resource:query(?ID, get_state), ?assert(is_process_alive(Pid)), %% restart the resource ok = emqx_resource:restart(?ID), ?assertNot(is_process_alive(Pid)), - #{pid := Pid2} = emqx_resource:query(?ID, get_state), + {ok, #{pid := Pid2}} = emqx_resource:query(?ID, get_state), ?assert(is_process_alive(Pid2)), ok = emqx_resource:remove_local(?ID), @@ -174,38 +180,272 @@ t_query(_) -> #{name => test_resource} ), - Pid = self(), - Success = fun() -> Pid ! success end, - Failure = fun() -> Pid ! failure end, - - #{pid := _} = emqx_resource:query(?ID, get_state), - #{pid := _} = emqx_resource:query(?ID, get_state, {[{Success, []}], [{Failure, []}]}), - #{pid := _} = emqx_resource:query(?ID, get_state, undefined), - #{pid := _} = emqx_resource:query(?ID, get_state_failed, undefined), - - receive - Message -> ?assertEqual(success, Message) - after 100 -> - ?assert(false) - end, + {ok, #{pid := _}} = emqx_resource:query(?ID, get_state), ?assertMatch( - {error, {emqx_resource, #{reason := not_found}}}, + ?RESOURCE_ERROR(not_created), emqx_resource:query(<<"unknown">>, get_state) ), ok = emqx_resource:remove_local(?ID). +t_query_counter(_) -> + {ok, _} = emqx_resource:create_local( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource, register => true} + ), + + {ok, 0} = emqx_resource:query(?ID, get_counter), + ok = emqx_resource:query(?ID, {inc_counter, 1}), + {ok, 1} = emqx_resource:query(?ID, get_counter), + ok = emqx_resource:query(?ID, {inc_counter, 5}), + {ok, 6} = emqx_resource:query(?ID, get_counter), + + ok = emqx_resource:remove_local(?ID). + +t_batch_query_counter(_) -> + {ok, _} = emqx_resource:create_local( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource, register => true}, + #{enable_batch => true} + ), + + ?check_trace( + ?TRACE_OPTS, + emqx_resource:query(?ID, get_counter), + fun(Result, Trace) -> + ?assertMatch({ok, 0}, Result), + QueryTrace = ?of_kind(call_batch_query, Trace), + ?assertMatch([#{batch := [{query, _, get_counter}]}], QueryTrace) + end + ), + + ?check_trace( + ?TRACE_OPTS, + inc_counter_in_parallel(1000), + fun(Trace) -> + QueryTrace = ?of_kind(call_batch_query, Trace), + ?assertMatch([#{batch := BatchReq} | _] when length(BatchReq) > 1, QueryTrace) + end + ), + {ok, 1000} = emqx_resource:query(?ID, get_counter), + + ok = emqx_resource:remove_local(?ID). + +t_query_counter_async_query(_) -> + {ok, _} = emqx_resource:create_local( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource, register => true}, + #{query_mode => async} + ), + ?assertMatch({ok, 0}, emqx_resource:simple_sync_query(?ID, get_counter)), + ?check_trace( + ?TRACE_OPTS, + inc_counter_in_parallel(1000), + fun(Trace) -> + %% the callback_mode if 'emqx_connector_demo' is 'always_sync'. + QueryTrace = ?of_kind(call_query, Trace), + ?assertMatch([#{query := {query, _, {inc_counter, 1}}} | _], QueryTrace) + end + ), + %% wait for 1s to make sure all the aysnc query is sent to the resource. + timer:sleep(1000), + %% simple query ignores the query_mode and batching settings in the resource_worker + ?check_trace( + ?TRACE_OPTS, + emqx_resource:simple_sync_query(?ID, get_counter), + fun(Result, Trace) -> + ?assertMatch({ok, 1000}, Result), + %% the callback_mode if 'emqx_connector_demo' is 'always_sync'. + QueryTrace = ?of_kind(call_query, Trace), + ?assertMatch([#{query := {query, _, get_counter}}], QueryTrace) + end + ), + {ok, _, #{metrics := #{counters := C}}} = emqx_resource:get_instance(?ID), + ?assertMatch(#{matched := 1002, success := 1002, failed := 0}, C), + ok = emqx_resource:remove_local(?ID). + +t_query_counter_async_callback(_) -> + emqx_connector_demo:set_callback_mode(async_if_possible), + + Tab0 = ets:new(?FUNCTION_NAME, [bag, public]), + Insert = fun(Tab, Result) -> + ets:insert(Tab, {make_ref(), Result}) + end, + ReqOpts = #{async_reply_fun => {Insert, [Tab0]}}, + {ok, _} = emqx_resource:create_local( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource, register => true}, + #{query_mode => async, async_inflight_window => 1000000} + ), + ?assertMatch({ok, 0}, emqx_resource:simple_sync_query(?ID, get_counter)), + ?check_trace( + ?TRACE_OPTS, + inc_counter_in_parallel(1000, ReqOpts), + fun(Trace) -> + QueryTrace = ?of_kind(call_query_async, Trace), + ?assertMatch([#{query := {query, _, {inc_counter, 1}}} | _], QueryTrace) + end + ), + + %% wait for 1s to make sure all the aysnc query is sent to the resource. + timer:sleep(1000), + %% simple query ignores the query_mode and batching settings in the resource_worker + ?check_trace( + ?TRACE_OPTS, + emqx_resource:simple_sync_query(?ID, get_counter), + fun(Result, Trace) -> + ?assertMatch({ok, 1000}, Result), + QueryTrace = ?of_kind(call_query, Trace), + ?assertMatch([#{query := {query, _, get_counter}}], QueryTrace) + end + ), + {ok, _, #{metrics := #{counters := C}}} = emqx_resource:get_instance(?ID), + ?assertMatch(#{matched := 1002, success := 1002, failed := 0}, C), + ?assertMatch(1000, ets:info(Tab0, size)), + ?assert( + lists:all( + fun + ({_, ok}) -> true; + (_) -> false + end, + ets:tab2list(Tab0) + ) + ), + ok = emqx_resource:remove_local(?ID). + +t_query_counter_async_inflight(_) -> + emqx_connector_demo:set_callback_mode(async_if_possible), + + Tab0 = ets:new(?FUNCTION_NAME, [bag, public]), + Insert0 = fun(Tab, Result) -> + ets:insert(Tab, {make_ref(), Result}) + end, + ReqOpts = #{async_reply_fun => {Insert0, [Tab0]}}, + WindowSize = 15, + {ok, _} = emqx_resource:create_local( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource, register => true}, + #{ + query_mode => async, + async_inflight_window => WindowSize, + worker_pool_size => 1, + resume_interval => 300 + } + ), + ?assertMatch({ok, 0}, emqx_resource:simple_sync_query(?ID, get_counter)), + + %% block the resource + ?assertMatch(ok, emqx_resource:simple_sync_query(?ID, block)), + + %% send async query to make the inflight window full + ?check_trace( + ?TRACE_OPTS, + inc_counter_in_parallel(WindowSize, ReqOpts), + fun(Trace) -> + QueryTrace = ?of_kind(call_query_async, Trace), + ?assertMatch([#{query := {query, _, {inc_counter, 1}}} | _], QueryTrace) + end + ), + + %% this will block the resource_worker + ok = emqx_resource:query(?ID, {inc_counter, 1}), + ?assertMatch(0, ets:info(Tab0, size)), + %% sleep to make the resource_worker resume some times + timer:sleep(2000), + + %% send query now will fail because the resource is blocked. + Insert = fun(Tab, Ref, Result) -> + ets:insert(Tab, {Ref, Result}) + end, + ok = emqx_resource:query(?ID, {inc_counter, 1}, #{ + async_reply_fun => {Insert, [Tab0, tmp_query]} + }), + ?assertMatch([{_, {error, {resource_error, #{reason := blocked}}}}], ets:take(Tab0, tmp_query)), + + %% all response should be received after the resource is resumed. + ?assertMatch(ok, emqx_resource:simple_sync_query(?ID, resume)), + timer:sleep(1000), + ?assertEqual(WindowSize, ets:info(Tab0, size)), + + %% send async query, this time everything should be ok. + Num = 10, + ?check_trace( + ?TRACE_OPTS, + inc_counter_in_parallel(Num, ReqOpts), + fun(Trace) -> + QueryTrace = ?of_kind(call_query_async, Trace), + ?assertMatch([#{query := {query, _, {inc_counter, 1}}} | _], QueryTrace) + end + ), + timer:sleep(1000), + ?assertEqual(WindowSize + Num, ets:info(Tab0, size)), + + %% block the resource + ?assertMatch(ok, emqx_resource:simple_sync_query(?ID, block)), + %% again, send async query to make the inflight window full + ?check_trace( + ?TRACE_OPTS, + inc_counter_in_parallel(WindowSize, ReqOpts), + fun(Trace) -> + QueryTrace = ?of_kind(call_query_async, Trace), + ?assertMatch([#{query := {query, _, {inc_counter, 1}}} | _], QueryTrace) + end + ), + + %% this will block the resource_worker + ok = emqx_resource:query(?ID, {inc_counter, 1}), + + Sent = WindowSize + Num + WindowSize, + ?assertMatch(ok, emqx_resource:simple_sync_query(?ID, resume)), + timer:sleep(1000), + ?assertEqual(Sent, ets:info(Tab0, size)), + + {ok, Counter} = emqx_resource:simple_sync_query(?ID, get_counter), + ct:pal("get_counter: ~p, sent: ~p", [Counter, Sent]), + ?assert(Sent == Counter), + + {ok, _, #{metrics := #{counters := C}}} = emqx_resource:get_instance(?ID), + ct:pal("metrics: ~p", [C]), + ?assertMatch( + #{matched := M, success := S, exception := E, failed := F, resource_down := RD} when + M >= Sent andalso M == S + E + F + RD, + C + ), + ?assert( + lists:all( + fun + ({_, ok}) -> true; + (_) -> false + end, + ets:tab2list(Tab0) + ) + ), + ok = emqx_resource:remove_local(?ID). + t_healthy_timeout(_) -> {ok, _} = emqx_resource:create_local( ?ID, ?DEFAULT_RESOURCE_GROUP, ?TEST_RESOURCE, - #{name => <<"test_resource">>}, - #{health_check_timeout => 200} + #{name => <<"bad_not_atom_name">>, register => true}, + %% the ?TEST_RESOURCE always returns the `Mod:on_get_status/2` 300ms later. + #{health_check_interval => 200} + ), + ?assertMatch( + ?RESOURCE_ERROR(not_connected), + emqx_resource:query(?ID, get_state) ), - timer:sleep(500), - ok = emqx_resource:remove_local(?ID). t_healthy(_) -> @@ -213,11 +453,9 @@ t_healthy(_) -> ?ID, ?DEFAULT_RESOURCE_GROUP, ?TEST_RESOURCE, - #{name => <<"test_resource">>} + #{name => test_resource} ), - timer:sleep(400), - - #{pid := Pid} = emqx_resource:query(?ID, get_state), + {ok, #{pid := Pid}} = emqx_resource:query(?ID, get_state), timer:sleep(300), emqx_resource:set_resource_status_connecting(?ID), @@ -229,10 +467,10 @@ t_healthy(_) -> erlang:exit(Pid, shutdown), - ?assertEqual({ok, connecting}, emqx_resource:health_check(?ID)), + ?assertEqual({ok, disconnected}, emqx_resource:health_check(?ID)), ?assertMatch( - [#{status := connecting}], + [#{status := disconnected}], emqx_resource:list_instances_verbose() ), @@ -260,7 +498,7 @@ t_stop_start(_) -> #{} ), - #{pid := Pid0} = emqx_resource:query(?ID, get_state), + {ok, #{pid := Pid0}} = emqx_resource:query(?ID, get_state), ?assert(is_process_alive(Pid0)), @@ -269,14 +507,14 @@ t_stop_start(_) -> ?assertNot(is_process_alive(Pid0)), ?assertMatch( - {error, {emqx_resource, #{reason := not_connected}}}, + ?RESOURCE_ERROR(stopped), emqx_resource:query(?ID, get_state) ), ok = emqx_resource:restart(?ID), timer:sleep(300), - #{pid := Pid1} = emqx_resource:query(?ID, get_state), + {ok, #{pid := Pid1}} = emqx_resource:query(?ID, get_state), ?assert(is_process_alive(Pid1)). @@ -302,7 +540,7 @@ t_stop_start_local(_) -> #{} ), - #{pid := Pid0} = emqx_resource:query(?ID, get_state), + {ok, #{pid := Pid0}} = emqx_resource:query(?ID, get_state), ?assert(is_process_alive(Pid0)), @@ -311,13 +549,13 @@ t_stop_start_local(_) -> ?assertNot(is_process_alive(Pid0)), ?assertMatch( - {error, {emqx_resource, #{reason := not_connected}}}, + ?RESOURCE_ERROR(stopped), emqx_resource:query(?ID, get_state) ), ok = emqx_resource:restart(?ID), - #{pid := Pid1} = emqx_resource:query(?ID, get_state), + {ok, #{pid := Pid1}} = emqx_resource:query(?ID, get_state), ?assert(is_process_alive(Pid1)). @@ -358,6 +596,10 @@ t_create_dry_run_local(_) -> [] = ets:match(emqx_resource_manager, {{owner, '$1'}, '_'}). create_dry_run_local_succ() -> + case whereis(test_resource) of + undefined -> ok; + Pid -> exit(Pid, kill) + end, ?assertEqual( ok, emqx_resource:create_dry_run_local( @@ -368,17 +610,17 @@ create_dry_run_local_succ() -> ?assertEqual(undefined, whereis(test_resource)). t_create_dry_run_local_failed(_) -> - {Res1, _} = emqx_resource:create_dry_run_local( + Res1 = emqx_resource:create_dry_run_local( ?TEST_RESOURCE, - #{cteate_error => true} + #{create_error => true} ), - ?assertEqual(error, Res1), + ?assertMatch({error, _}, Res1), - {Res2, _} = emqx_resource:create_dry_run_local( + Res2 = emqx_resource:create_dry_run_local( ?TEST_RESOURCE, #{name => test_resource, health_check_error => true} ), - ?assertEqual(error, Res2), + ?assertMatch({error, _}, Res2), Res3 = emqx_resource:create_dry_run_local( ?TEST_RESOURCE, @@ -400,7 +642,7 @@ t_reset_metrics(_) -> #{name => test_resource} ), - #{pid := Pid} = emqx_resource:query(?ID, get_state), + {ok, #{pid := Pid}} = emqx_resource:query(?ID, get_state), emqx_resource:reset_metrics(?ID), ?assert(is_process_alive(Pid)), ok = emqx_resource:remove(?ID), @@ -419,6 +661,37 @@ t_auto_retry(_) -> %%------------------------------------------------------------------------------ %% Helpers %%------------------------------------------------------------------------------ +inc_counter_in_parallel(N) -> + inc_counter_in_parallel(N, #{}). + +inc_counter_in_parallel(N, Opts) -> + Parent = self(), + Pids = [ + erlang:spawn(fun() -> + emqx_resource:query(?ID, {inc_counter, 1}, Opts), + Parent ! {complete, self()} + end) + || _ <- lists:seq(1, N) + ], + [ + receive + {complete, Pid} -> ok + after 1000 -> + ct:fail({wait_for_query_timeout, Pid}) + end + || Pid <- Pids + ]. + +% verify_inflight_full(WindowSize) -> +% ?check_trace( +% ?TRACE_OPTS, +% emqx_resource:query(?ID, {inc_counter, 1}), +% fun(Return, Trace) -> +% QueryTrace = ?of_kind(inflight_full, Trace), +% ?assertMatch([#{wind_size := WindowSize} | _], QueryTrace), +% ?assertMatch(ok, Return) +% end +% ). bin_config() -> <<"\"name\": \"test_resource\"">>. diff --git a/apps/emqx_resource/test/emqx_test_resource.erl b/apps/emqx_resource/test/emqx_test_resource.erl deleted file mode 100644 index c23f87d50..000000000 --- a/apps/emqx_resource/test/emqx_test_resource.erl +++ /dev/null @@ -1,110 +0,0 @@ -%%-------------------------------------------------------------------- -%% Copyright (c) 2021-2022 EMQ Technologies Co., Ltd. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%%-------------------------------------------------------------------- - --module(emqx_test_resource). - --include_lib("typerefl/include/types.hrl"). - --behaviour(emqx_resource). - -%% callbacks of behaviour emqx_resource --export([ - on_start/2, - on_stop/2, - on_query/4, - on_get_status/2 -]). - -%% callbacks for emqx_resource config schema --export([roots/0]). - -roots() -> - [ - {name, fun name/1}, - {register, fun register/1} - ]. - -name(type) -> atom(); -name(required) -> true; -name(_) -> undefined. - -register(type) -> boolean(); -register(required) -> true; -register(default) -> false; -register(_) -> undefined. - -on_start(_InstId, #{create_error := true}) -> - error("some error"); -on_start(InstId, #{name := Name, stop_error := true} = Opts) -> - Register = maps:get(register, Opts, false), - {ok, #{ - name => Name, - id => InstId, - stop_error => true, - pid => spawn_dummy_process(Name, Register) - }}; -on_start(InstId, #{name := Name, health_check_error := true} = Opts) -> - Register = maps:get(register, Opts, false), - {ok, #{ - name => Name, - id => InstId, - health_check_error => true, - pid => spawn_dummy_process(Name, Register) - }}; -on_start(InstId, #{name := Name} = Opts) -> - Register = maps:get(register, Opts, false), - {ok, #{ - name => Name, - id => InstId, - pid => spawn_dummy_process(Name, Register) - }}. - -on_stop(_InstId, #{stop_error := true}) -> - {error, stop_error}; -on_stop(_InstId, #{pid := Pid}) -> - erlang:exit(Pid, shutdown), - ok. - -on_query(_InstId, get_state, AfterQuery, State) -> - emqx_resource:query_success(AfterQuery), - State; -on_query(_InstId, get_state_failed, AfterQuery, State) -> - emqx_resource:query_failed(AfterQuery), - State. - -on_get_status(_InstId, #{health_check_error := true}) -> - disconnected; -on_get_status(_InstId, #{pid := Pid}) -> - timer:sleep(300), - case is_process_alive(Pid) of - true -> connected; - false -> connecting - end. - -spawn_dummy_process(Name, Register) -> - spawn( - fun() -> - true = - case Register of - true -> register(Name, self()); - _ -> true - end, - Ref = make_ref(), - receive - Ref -> ok - end - end - ). diff --git a/build b/build index 92f033be6..5f0c96744 100755 --- a/build +++ b/build @@ -112,7 +112,7 @@ make_docs() { fi case "$(is_enterprise "$PROFILE")" in 'yes') - SCHEMA_MODULE='emqx_enterprise_conf_schema' + SCHEMA_MODULE='emqx_ee_conf_schema' ;; 'no') SCHEMA_MODULE='emqx_conf_schema' diff --git a/deploy/charts/emqx/README.md b/deploy/charts/emqx/README.md index ed331619d..9c3762fdd 100644 --- a/deploy/charts/emqx/README.md +++ b/deploy/charts/emqx/README.md @@ -75,14 +75,14 @@ The following table lists the configurable parameters of the emqx chart and thei | `service.externalIPs` | ExternalIPs for the service | [] | | `service.annotations` | Service annotations | {}(evaluated as a template) | | `ingress.dashboard.enabled` | Enable ingress for EMQX Dashboard | false | -| `ingress.dashboard.ingressClassName` | Set the ingress class for EMQX Dashboard | | +| `ingress.dashboard.ingressClassName` | Set the ingress class for EMQX Dashboard | | | `ingress.dashboard.path` | Ingress path for EMQX Dashboard | / | | `ingress.dashboard.pathType` | Ingress pathType for EMQX Dashboard | `ImplementationSpecific` | | `ingress.dashboard.hosts` | Ingress hosts for EMQX Mgmt API | dashboard.emqx.local | | `ingress.dashboard.tls` | Ingress tls for EMQX Mgmt API | [] | | `ingress.dashboard.annotations` | Ingress annotations for EMQX Mgmt API | {} | | `ingress.mgmt.enabled` | Enable ingress for EMQX Mgmt API | false | -| `ingress.dashboard.ingressClassName` | Set the ingress class for EMQX Mgmt API | | +| `ingress.dashboard.ingressClassName` | Set the ingress class for EMQX Mgmt API | | | `ingress.mgmt.path` | Ingress path for EMQX Mgmt API | / | | `ingress.mgmt.hosts` | Ingress hosts for EMQX Mgmt API | api.emqx.local | | `ingress.mgmt.tls` | Ingress tls for EMQX Mgmt API | [] | diff --git a/git-blame-ignore-revs b/git-blame-ignore-revs index b21b6a552..41c6e5e49 100644 --- a/git-blame-ignore-revs +++ b/git-blame-ignore-revs @@ -13,7 +13,7 @@ acb3544d4b112121b5d9414237d2af7860ccc2a3 # reformat lib-ee/emqx_license 4f396cceb84d79d5ef540e91c1a8420e8de74a56 4e3fd9febd0df11f3fe5f221cd2c4362be57c886 -# reformat lib-ee/emqx_enterprise_conf +# reformat lib-ee/emqx_ee_conf 1aa82992616ad848539a533a5cd20ba6f9071e5a # reformat apps/emqx_gateway 3f6d78dda03fd0d8e968a352e134f11a7f16bfe8 diff --git a/lib-ee/emqx_enterprise_conf/.gitignore b/lib-ee/emqx_ee_bridge/.gitignore similarity index 100% rename from lib-ee/emqx_enterprise_conf/.gitignore rename to lib-ee/emqx_ee_bridge/.gitignore diff --git a/lib-ee/emqx_ee_bridge/README.md b/lib-ee/emqx_ee_bridge/README.md new file mode 100644 index 000000000..5cb4d8694 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/README.md @@ -0,0 +1,9 @@ +emqx_ee_bridge +===== + +An OTP application + +Build +----- + + $ rebar3 compile diff --git a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_hstreamdb.conf b/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_hstreamdb.conf new file mode 100644 index 000000000..dd3346579 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_hstreamdb.conf @@ -0,0 +1,94 @@ +emqx_ee_bridge_hstreamdb { + local_topic { + desc { + en: """ +The MQTT topic filter to be forwarded to the HStreamDB. All MQTT 'PUBLISH' messages with the topic +matching the local_topic will be forwarded.
+NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is +configured, then both the data got from the rule and the MQTT messages that match local_topic +will be forwarded. +""" + zh: """ +发送到 'local_topic' 的消息都会转发到 HStreamDB。
+注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发到 HStreamDB。 +""" + } + label { + en: "Local Topic" + zh: "本地 Topic" + } + } + payload { + desc { + en: """The payload to be forwarded to the HStreamDB. Placeholders supported.""" + zh: """要转发到 HStreamDB 的数据内容,支持占位符""" + } + label { + en: "Payload" + zh: "消息内容" + } + } + config_enable { + desc { + en: """Enable or disable this bridge""" + zh: """启用/禁用桥接""" + } + label { + en: "Enable Or Disable Bridge" + zh: "启用/禁用桥接" + } + } + config_direction { + desc { + en: """The direction of this bridge, MUST be 'egress'""" + zh: """桥接的方向, 必须是 egress""" + } + label { + en: "Bridge Direction" + zh: "桥接方向" + } + } + + desc_config { + desc { + en: """Configuration for an HStreamDB bridge.""" + zh: """HStreamDB 桥接配置""" + } + label: { + en: "HStreamDB Bridge Configuration" + zh: "HStreamDB 桥接配置" + } + } + + desc_type { + desc { + en: """The Bridge Type""" + zh: """Bridge 类型""" + } + label { + en: "Bridge Type" + zh: "桥接类型" + } + } + + desc_name { + desc { + en: """Bridge name, used as a human-readable description of the bridge.""" + zh: """桥接名字,可读描述""" + } + label { + en: "Bridge Name" + zh: "桥接名字" + } + } + desc_connector { + desc { + en: """Generic configuration for the connector.""" + zh: """连接器的通用配置。""" + } + label: { + en: "Connector Generic Configuration" + zh: "连接器通用配置。" + } + } +} diff --git a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_influxdb.conf b/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_influxdb.conf new file mode 100644 index 000000000..9e805132e --- /dev/null +++ b/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_influxdb.conf @@ -0,0 +1,107 @@ +emqx_ee_bridge_influxdb { + local_topic { + desc { + en: """The MQTT topic filter to be forwarded to the InfluxDB. All MQTT 'PUBLISH' messages with the topic +matching the local_topic will be forwarded.
+NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is +configured, then both the data got from the rule and the MQTT messages that match local_topic +will be forwarded. +""" + zh: """发送到 'local_topic' 的消息都会转发到 InfluxDB。
+注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发到 InfluxDB。 +""" + } + label { + en: "Local Topic" + zh: "本地 Topic" + } + } + write_syntax { + desc { + en: """Conf of InfluxDB line protocol to write data points. It is a text-based format that provides the measurement, tag set, field set, and timestamp of a data point, and placeholder supported. +See also [InfluxDB 2.3 Line Protocol](https://docs.influxdata.com/influxdb/v2.3/reference/syntax/line-protocol/) and +[InfluxDB 1.8 Line Protocol](https://docs.influxdata.com/influxdb/v1.8/write_protocols/line_protocol_tutorial/)
+TLDR:
+``` +[,=[,=]] =[,=] [] +``` +""" + zh: """使用 InfluxDB API Line Protocol 写入 InfluxDB 的数据,支持占位符
+参考 [InfluxDB 2.3 Line Protocol](https://docs.influxdata.com/influxdb/v2.3/reference/syntax/line-protocol/) 及 +[InfluxDB 1.8 Line Protocol](https://docs.influxdata.com/influxdb/v1.8/write_protocols/line_protocol_tutorial/)
+TLDR:
+``` +[,=[,=]] =[,=] [] +``` +""" + } + label { + en: "Write Syntax" + zh: "写语句" + } + } + config_enable { + desc { + en: """Enable or disable this bridge.""" + zh: """启用/禁用桥接。""" + } + label { + en: "Enable Or Disable Bridge" + zh: "启用/禁用桥接" + } + } + config_direction { + desc { + en: """The direction of this bridge, MUST be 'egress'.""" + zh: """桥接的方向,必须是 egress。""" + } + label { + en: "Bridge Direction" + zh: "桥接方向" + } + } + + desc_config { + desc { + en: """Configuration for an InfluxDB bridge.""" + zh: """InfluxDB 桥接配置。""" + } + label: { + en: "InfluxDB Bridge Configuration" + zh: "InfluxDB 桥接配置" + } + } + + desc_type { + desc { + en: """The Bridge Type.""" + zh: """Bridge 类型。""" + } + label { + en: "Bridge Type" + zh: "桥接类型" + } + } + + desc_name { + desc { + en: """Bridge name, used as a human-readable description of the bridge.""" + zh: """桥接名字,人类可读的描述信息。""" + } + label { + en: "Bridge Name" + zh: "桥接名字" + } + } + + desc_connector { + desc { + en: """Generic configuration for the connector.""" + zh: """连接器的通用配置。""" + } + label: { + en: "Connector Generic Configuration" + zh: "连接器通用配置。" + } + } +} diff --git a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_mysql.conf b/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_mysql.conf new file mode 100644 index 000000000..48fbd1007 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_mysql.conf @@ -0,0 +1,75 @@ +emqx_ee_bridge_mysql { + sql_template { + desc { + en: """SQL Template""" + zh: """SQL 模板""" + } + label { + en: "SQL Template" + zh: "SQL 模板" + } + } + config_enable { + desc { + en: """Enable or disable this bridge""" + zh: """启用/禁用桥接""" + } + label { + en: "Enable Or Disable Bridge" + zh: "启用/禁用桥接" + } + } + config_direction { + desc { + en: """The direction of this bridge, MUST be 'egress'""" + zh: """桥接的方向, 必须是 egress""" + } + label { + en: "Bridge Direction" + zh: "桥接方向" + } + } + + desc_config { + desc { + en: """Configuration for an HStreamDB bridge.""" + zh: """HStreamDB 桥接配置""" + } + label: { + en: "HStreamDB Bridge Configuration" + zh: "HStreamDB 桥接配置" + } + } + + desc_type { + desc { + en: """The Bridge Type""" + zh: """Bridge 类型""" + } + label { + en: "Bridge Type" + zh: "桥接类型" + } + } + + desc_name { + desc { + en: """Bridge name, used as a human-readable description of the bridge.""" + zh: """桥接名字,可读描述""" + } + label { + en: "Bridge Name" + zh: "桥接名字" + } + } + desc_connector { + desc { + en: """Generic configuration for the connector.""" + zh: """连接器的通用配置。""" + } + label: { + en: "Connector Generic Configuration" + zh: "连接器通用配置。" + } + } +} diff --git a/lib-ee/emqx_ee_bridge/include/emqx_ee_bridge.hrl b/lib-ee/emqx_ee_bridge/include/emqx_ee_bridge.hrl new file mode 100644 index 000000000..0065db56b --- /dev/null +++ b/lib-ee/emqx_ee_bridge/include/emqx_ee_bridge.hrl @@ -0,0 +1,18 @@ +-define(METRICS(MATCH, SUCC, FAILED, RATE, RATE_5, RATE_MAX), #{ + matched => MATCH, + success => SUCC, + failed => FAILED, + rate => RATE, + rate_last5m => RATE_5, + rate_max => RATE_MAX +}). + +-define(METRICS_EXAMPLE, #{ + metrics => ?METRICS(0, 0, 0, 0, 0, 0), + node_metrics => [ + #{ + node => node(), + metrics => ?METRICS(0, 0, 0, 0, 0, 0) + } + ] +}). diff --git a/lib-ee/emqx_ee_bridge/rebar.config b/lib-ee/emqx_ee_bridge/rebar.config new file mode 100644 index 000000000..5dd22ccef --- /dev/null +++ b/lib-ee/emqx_ee_bridge/rebar.config @@ -0,0 +1,6 @@ +{erl_opts, [debug_info]}. +{deps, []}. + +{shell, [ + {apps, [emqx_ee_bridge]} +]}. diff --git a/lib-ee/emqx_enterprise_conf/src/emqx_enterprise_conf.app.src b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src similarity index 54% rename from lib-ee/emqx_enterprise_conf/src/emqx_enterprise_conf.app.src rename to lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src index 37cb78b54..a578b7d0d 100644 --- a/lib-ee/emqx_enterprise_conf/src/emqx_enterprise_conf.app.src +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src @@ -1,5 +1,4 @@ -{application, emqx_enterprise_conf, [ - {description, "EMQX Enterprise configuration schema"}, +{application, emqx_ee_bridge, [ {vsn, "0.1.0"}, {registered, []}, {applications, [ @@ -9,6 +8,5 @@ {env, []}, {modules, []}, - {licenses, ["Apache 2.0"]}, {links, []} ]}. diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl new file mode 100644 index 000000000..ebc06d211 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl @@ -0,0 +1,71 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_ee_bridge). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + api_schemas/1, + conn_bridge_examples/1, + resource_type/1, + fields/1 +]). + +api_schemas(Method) -> + [ + ref(emqx_ee_bridge_mysql, Method), + ref(emqx_ee_bridge_hstreamdb, Method), + ref(emqx_ee_bridge_influxdb, Method ++ "_udp"), + ref(emqx_ee_bridge_influxdb, Method ++ "_api_v1"), + ref(emqx_ee_bridge_influxdb, Method ++ "_api_v2") + ]. + +schema_modules() -> + [ + emqx_ee_bridge_hstreamdb, + emqx_ee_bridge_influxdb, + emqx_ee_bridge_mysql + ]. + +conn_bridge_examples(Method) -> + MergeFun = + fun(Example, Examples) -> + maps:merge(Examples, Example) + end, + Fun = + fun(Module, Examples) -> + ConnectorExamples = erlang:apply(Module, conn_bridge_examples, [Method]), + lists:foldl(MergeFun, Examples, ConnectorExamples) + end, + lists:foldl(Fun, #{}, schema_modules()). + +resource_type(Type) when is_binary(Type) -> resource_type(binary_to_atom(Type, utf8)); +resource_type(hstreamdb) -> emqx_ee_connector_hstreamdb; +resource_type(mysql) -> emqx_connector_mysql; +resource_type(influxdb_udp) -> emqx_ee_connector_influxdb; +resource_type(influxdb_api_v1) -> emqx_ee_connector_influxdb; +resource_type(influxdb_api_v2) -> emqx_ee_connector_influxdb. + +fields(bridges) -> + [ + {hstreamdb, + mk( + hoconsc:map(name, ref(emqx_ee_bridge_hstreamdb, "config")), + #{desc => <<"EMQX Enterprise Config">>} + )}, + {mysql, + mk( + hoconsc:map(name, ref(emqx_ee_bridge_mysql, "config")), + #{desc => <<"EMQX Enterprise Config">>} + )} + ] ++ fields(influxdb); +fields(influxdb) -> + [ + {Protocol, + mk( + hoconsc:map(name, ref(emqx_ee_bridge_influxdb, Protocol)), + #{desc => <<"EMQX Enterprise Config">>} + )} + || Protocol <- [influxdb_udp, influxdb_api_v1, influxdb_api_v2] + ]. diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_hstreamdb.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_hstreamdb.erl new file mode 100644 index 000000000..3b5183150 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_hstreamdb.erl @@ -0,0 +1,95 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_ee_bridge_hstreamdb). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include("emqx_ee_bridge.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + conn_bridge_examples/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +%% ------------------------------------------------------------------------------------------------- +%% api + +conn_bridge_examples(Method) -> + [ + #{ + <<"hstreamdb">> => #{ + summary => <<"HStreamDB Bridge">>, + value => values(Method) + } + } + ]. + +values(get) -> + maps:merge(values(post), ?METRICS_EXAMPLE); +values(post) -> + #{ + type => hstreamdb, + name => <<"demo">>, + connector => <<"hstreamdb:connector">>, + enable => true, + direction => egress, + local_topic => <<"local/topic/#">>, + payload => <<"${payload}">> + }; +values(put) -> + values(post). + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions +namespace() -> "bridge_hstreamdb". + +roots() -> []. + +fields("config") -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {direction, mk(egress, #{desc => ?DESC("config_direction"), default => egress})}, + {local_topic, mk(binary(), #{desc => ?DESC("local_topic")})}, + {payload, mk(binary(), #{default => <<"${payload}">>, desc => ?DESC("payload")})}, + {connector, field(connector)} + ]; +fields("post") -> + [type_field(), name_field() | fields("config")]; +fields("put") -> + fields("config"); +fields("get") -> + emqx_bridge_schema:metrics_status_fields() ++ fields("post"). + +field(connector) -> + mk( + hoconsc:union([binary(), ref(emqx_ee_connector_hstreamdb, config)]), + #{ + required => true, + example => <<"hstreamdb:demo">>, + desc => ?DESC("desc_connector") + } + ). + +desc("config") -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for HStream using `", string:to_upper(Method), "` method."]; +desc(_) -> + undefined. + +%% ------------------------------------------------------------------------------------------------- +%% internal +type_field() -> + {type, mk(enum([hstreamdb]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_influxdb.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_influxdb.erl new file mode 100644 index 000000000..6a1b2677f --- /dev/null +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_influxdb.erl @@ -0,0 +1,237 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_ee_bridge_influxdb). + +-include("emqx_ee_bridge.hrl"). +-include_lib("emqx_connector/include/emqx_connector.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + conn_bridge_examples/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +-type write_syntax() :: list(). +-reflect_type([write_syntax/0]). +-typerefl_from_string({write_syntax/0, ?MODULE, to_influx_lines}). +-export([to_influx_lines/1]). + +%% ------------------------------------------------------------------------------------------------- +%% api + +conn_bridge_examples(Method) -> + [ + #{ + <<"influxdb_udp">> => #{ + summary => <<"InfluxDB UDP Bridge">>, + value => values("influxdb_udp", Method) + } + }, + #{ + <<"influxdb_api_v1">> => #{ + summary => <<"InfluxDB HTTP API V1 Bridge">>, + value => values("influxdb_api_v1", Method) + } + }, + #{ + <<"influxdb_api_v2">> => #{ + summary => <<"InfluxDB HTTP API V2 Bridge">>, + value => values("influxdb_api_v2", Method) + } + } + ]. + +values(Protocol, get) -> + maps:merge(values(Protocol, post), ?METRICS_EXAMPLE); +values(Protocol, post) -> + case Protocol of + "influxdb_api_v2" -> + SupportUint = <<"uint_value=${payload.uint_key}u,">>; + _ -> + SupportUint = <<>> + end, + #{ + type => list_to_atom(Protocol), + name => <<"demo">>, + connector => list_to_binary(Protocol ++ ":connector"), + enable => true, + direction => egress, + local_topic => <<"local/topic/#">>, + write_syntax => + <<"${topic},clientid=${clientid}", " ", "payload=${payload},", + "${clientid}_int_value=${payload.int_key}i,", SupportUint/binary, + "bool=${payload.bool}">>, + enable_batch => false, + batch_size => 5, + batch_time => <<"1m">> + }; +values(Protocol, put) -> + values(Protocol, post). + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions +namespace() -> "bridge_influxdb". + +roots() -> []. + +fields(basic) -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {direction, mk(egress, #{desc => ?DESC("config_direction"), default => egress})}, + {local_topic, mk(binary(), #{desc => ?DESC("local_topic")})}, + {write_syntax, fun write_syntax/1} + ] ++ + emqx_resource_schema:fields('creation_opts'); +fields("post_udp") -> + method_fileds(post, influxdb_udp); +fields("post_api_v1") -> + method_fileds(post, influxdb_api_v1); +fields("post_api_v2") -> + method_fileds(post, influxdb_api_v2); +fields("put_udp") -> + method_fileds(put, influxdb_udp); +fields("put_api_v1") -> + method_fileds(put, influxdb_api_v1); +fields("put_api_v2") -> + method_fileds(put, influxdb_api_v2); +fields("get_udp") -> + method_fileds(get, influxdb_udp); +fields("get_api_v1") -> + method_fileds(get, influxdb_api_v1); +fields("get_api_v2") -> + method_fileds(get, influxdb_api_v2); +fields(Name) when + Name == influxdb_udp orelse Name == influxdb_api_v1 orelse Name == influxdb_api_v2 +-> + fields(basic) ++ + connector_field(Name). + +method_fileds(post, ConnectorType) -> + fields(basic) ++ connector_field(ConnectorType) ++ type_name_field(ConnectorType); +method_fileds(get, ConnectorType) -> + fields(basic) ++ + emqx_bridge_schema:metrics_status_fields() ++ + connector_field(ConnectorType) ++ type_name_field(ConnectorType); +method_fileds(put, ConnectorType) -> + fields(basic) ++ connector_field(ConnectorType). + +connector_field(Type) -> + [ + {connector, + mk( + hoconsc:union([binary(), ref(emqx_ee_connector_influxdb, Type)]), + #{ + required => true, + example => list_to_binary(atom_to_list(Type) ++ ":connector"), + desc => ?DESC(<<"desc_connector">>) + } + )} + ]. + +type_name_field(Type) -> + [ + {type, mk(Type, #{required => true, desc => ?DESC("desc_type")})}, + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})} + ]. + +desc("config") -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for InfluxDB using `", string:to_upper(Method), "` method."]; +desc(influxdb_udp) -> + ?DESC(emqx_ee_connector_influxdb, "influxdb_udp"); +desc(influxdb_api_v1) -> + ?DESC(emqx_ee_connector_influxdb, "influxdb_api_v1"); +desc(influxdb_api_v2) -> + ?DESC(emqx_ee_connector_influxdb, "influxdb_api_v2"); +desc(_) -> + undefined. + +write_syntax(type) -> + ?MODULE:write_syntax(); +write_syntax(required) -> + true; +write_syntax(validator) -> + [?NOT_EMPTY("the value of the field 'write_syntax' cannot be empty")]; +write_syntax(converter) -> + fun to_influx_lines/1; +write_syntax(desc) -> + ?DESC("write_syntax"); +write_syntax(format) -> + <<"sql">>; +write_syntax(_) -> + undefined. + +to_influx_lines(RawLines) -> + Lines = string:tokens(str(RawLines), "\n"), + lists:reverse(lists:foldl(fun converter_influx_line/2, [], Lines)). + +converter_influx_line(Line, AccIn) -> + case string:tokens(str(Line), " ") of + [MeasurementAndTags, Fields, Timestamp] -> + {Measurement, Tags} = split_measurement_and_tags(MeasurementAndTags), + [ + #{ + measurement => Measurement, + tags => kv_pairs(Tags), + fields => kv_pairs(string:tokens(Fields, ",")), + timestamp => Timestamp + } + | AccIn + ]; + [MeasurementAndTags, Fields] -> + {Measurement, Tags} = split_measurement_and_tags(MeasurementAndTags), + %% TODO: fix here both here and influxdb driver. + %% Default value should evaluated by InfluxDB. + [ + #{ + measurement => Measurement, + tags => kv_pairs(Tags), + fields => kv_pairs(string:tokens(Fields, ",")), + timestamp => "${timestamp}" + } + | AccIn + ]; + _ -> + throw("Bad InfluxDB Line Protocol schema") + end. + +split_measurement_and_tags(Subject) -> + case string:tokens(Subject, ",") of + [] -> + throw("Bad Measurement schema"); + [Measurement] -> + {Measurement, []}; + [Measurement | Tags] -> + {Measurement, Tags} + end. + +kv_pairs(Pairs) -> + kv_pairs(Pairs, []). +kv_pairs([], Acc) -> + lists:reverse(Acc); +kv_pairs([Pair | Rest], Acc) -> + case string:tokens(Pair, "=") of + [K, V] -> + %% Reduplicated keys will be overwritten. Follows InfluxDB Line Protocol. + kv_pairs(Rest, [{K, V} | Acc]); + _ -> + throw(io_lib:format("Bad InfluxDB Line Protocol Key Value pair: ~p", Pair)) + end. + +str(A) when is_atom(A) -> + atom_to_list(A); +str(B) when is_binary(B) -> + binary_to_list(B); +str(S) when is_list(S) -> + S. diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_mysql.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_mysql.erl new file mode 100644 index 000000000..5d143bf85 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_mysql.erl @@ -0,0 +1,110 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_ee_bridge_mysql). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include("emqx_ee_bridge.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + conn_bridge_examples/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +-define(DEFAULT_SQL, << + "insert into t_mqtt_msg(msgid, topic, qos, payload, arrived) " + "values (${id}, ${topic}, ${qos}, ${payload}, FROM_UNIXTIME(${timestamp}/1000))" +>>). + +%% ------------------------------------------------------------------------------------------------- +%% api + +conn_bridge_examples(Method) -> + [ + #{ + <<"mysql">> => #{ + summary => <<"MySQL Bridge">>, + value => values(Method) + } + } + ]. + +values(get) -> + maps:merge(values(post), ?METRICS_EXAMPLE); +values(post) -> + #{ + type => mysql, + name => <<"mysql">>, + sql_template => ?DEFAULT_SQL, + connector => #{ + server => <<"127.0.0.1:3306">>, + database => <<"test">>, + pool_size => 8, + username => <<"root">>, + password => <<"public">>, + auto_reconnect => true + }, + enable => true, + direction => egress + }; +values(put) -> + values(post). + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions +namespace() -> "bridge". + +roots() -> []. + +fields("config") -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {direction, mk(egress, #{desc => ?DESC("config_direction"), default => egress})}, + {sql_template, + mk( + binary(), + #{desc => ?DESC("sql_template"), default => ?DEFAULT_SQL, format => <<"sql">>} + )}, + {connector, + mk( + ref(?MODULE, connector), + #{ + required => true, + desc => ?DESC("desc_connector") + } + )} + ]; +fields("post") -> + [type_field(), name_field() | fields("config")]; +fields("put") -> + fields("config"); +fields("get") -> + emqx_bridge_schema:metrics_status_fields() ++ fields("post"); +fields(connector) -> + emqx_connector_mysql:fields(config) -- emqx_connector_schema_lib:prepare_statement_fields(). + +desc("config") -> + ?DESC("desc_config"); +desc(connector) -> + ?DESC("desc_connector"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for MySQL using `", string:to_upper(Method), "` method."]; +desc(_) -> + undefined. + +%% ------------------------------------------------------------------------------------------------- +%% internal +type_field() -> + {type, mk(enum([mysql]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. diff --git a/lib-ee/emqx_ee_bridge/test/ee_bridge_hstreamdb_SUITE.erl b/lib-ee/emqx_ee_bridge/test/ee_bridge_hstreamdb_SUITE.erl new file mode 100644 index 000000000..429323ad7 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/test/ee_bridge_hstreamdb_SUITE.erl @@ -0,0 +1,16 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(ee_bridge_hstreamdb_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +%% TODO: diff --git a/lib-ee/emqx_ee_conf/.gitignore b/lib-ee/emqx_ee_conf/.gitignore new file mode 100644 index 000000000..f1c455451 --- /dev/null +++ b/lib-ee/emqx_ee_conf/.gitignore @@ -0,0 +1,19 @@ +.rebar3 +_* +.eunit +*.o +*.beam +*.plt +*.swp +*.swo +.erlang.cookie +ebin +log +erl_crash.dump +.rebar +logs +_build +.idea +*.iml +rebar3.crashdump +*~ diff --git a/lib-ee/emqx_enterprise_conf/README.md b/lib-ee/emqx_ee_conf/README.md similarity index 62% rename from lib-ee/emqx_enterprise_conf/README.md rename to lib-ee/emqx_ee_conf/README.md index b5b28dfdb..701d285cc 100644 --- a/lib-ee/emqx_enterprise_conf/README.md +++ b/lib-ee/emqx_ee_conf/README.md @@ -1,3 +1,3 @@ -# emqx_enterprise_conf +# emqx_ee_conf EMQX Enterprise configuration schema diff --git a/lib-ee/emqx_enterprise_conf/rebar.config b/lib-ee/emqx_ee_conf/rebar.config similarity index 100% rename from lib-ee/emqx_enterprise_conf/rebar.config rename to lib-ee/emqx_ee_conf/rebar.config diff --git a/lib-ee/emqx_ee_conf/src/emqx_ee_conf.app.src b/lib-ee/emqx_ee_conf/src/emqx_ee_conf.app.src new file mode 100644 index 000000000..324e7e308 --- /dev/null +++ b/lib-ee/emqx_ee_conf/src/emqx_ee_conf.app.src @@ -0,0 +1,13 @@ +{application, emqx_ee_conf, [ + {description, "EMQX Enterprise Edition configuration schema"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [ + kernel, + stdlib + ]}, + {env, []}, + {modules, []}, + + {links, []} +]}. diff --git a/lib-ee/emqx_enterprise_conf/src/emqx_enterprise_conf_schema.erl b/lib-ee/emqx_ee_conf/src/emqx_ee_conf_schema.erl similarity index 95% rename from lib-ee/emqx_enterprise_conf/src/emqx_enterprise_conf_schema.erl rename to lib-ee/emqx_ee_conf/src/emqx_ee_conf_schema.erl index 60aeb1f81..38f6689c5 100644 --- a/lib-ee/emqx_enterprise_conf/src/emqx_enterprise_conf_schema.erl +++ b/lib-ee/emqx_ee_conf/src/emqx_ee_conf_schema.erl @@ -2,7 +2,7 @@ %% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_enterprise_conf_schema). +-module(emqx_ee_conf_schema). -behaviour(hocon_schema). diff --git a/lib-ee/emqx_enterprise_conf/test/emqx_enterprise_conf_schema_SUITE.erl b/lib-ee/emqx_ee_conf/test/emqx_ee_conf_schema_SUITE.erl similarity index 78% rename from lib-ee/emqx_enterprise_conf/test/emqx_enterprise_conf_schema_SUITE.erl rename to lib-ee/emqx_ee_conf/test/emqx_ee_conf_schema_SUITE.erl index 396faa4f5..0d6d4f061 100644 --- a/lib-ee/emqx_enterprise_conf/test/emqx_enterprise_conf_schema_SUITE.erl +++ b/lib-ee/emqx_ee_conf/test/emqx_ee_conf_schema_SUITE.erl @@ -2,7 +2,7 @@ %% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_enterprise_conf_schema_SUITE). +-module(emqx_ee_conf_schema_SUITE). -compile(nowarn_export_all). -compile(export_all). @@ -20,12 +20,12 @@ all() -> t_namespace(_Config) -> ?assertEqual( emqx_conf_schema:namespace(), - emqx_enterprise_conf_schema:namespace() + emqx_ee_conf_schema:namespace() ). t_roots(_Config) -> BaseRoots = emqx_conf_schema:roots(), - EnterpriseRoots = emqx_enterprise_conf_schema:roots(), + EnterpriseRoots = emqx_ee_conf_schema:roots(), ?assertEqual([], BaseRoots -- EnterpriseRoots), @@ -42,12 +42,12 @@ t_roots(_Config) -> t_fields(_Config) -> ?assertEqual( emqx_conf_schema:fields("node"), - emqx_enterprise_conf_schema:fields("node") + emqx_ee_conf_schema:fields("node") ). t_translations(_Config) -> - [Root | _] = emqx_enterprise_conf_schema:translations(), + [Root | _] = emqx_ee_conf_schema:translations(), ?assertEqual( emqx_conf_schema:translation(Root), - emqx_enterprise_conf_schema:translation(Root) + emqx_ee_conf_schema:translation(Root) ). diff --git a/lib-ee/emqx_enterprise_conf/test/emqx_enterprise_conf_schema_tests.erl b/lib-ee/emqx_ee_conf/test/emqx_ee_conf_schema_tests.erl similarity index 84% rename from lib-ee/emqx_enterprise_conf/test/emqx_enterprise_conf_schema_tests.erl rename to lib-ee/emqx_ee_conf/test/emqx_ee_conf_schema_tests.erl index d7c4e35dd..b4bf0de3d 100644 --- a/lib-ee/emqx_enterprise_conf/test/emqx_enterprise_conf_schema_tests.erl +++ b/lib-ee/emqx_ee_conf/test/emqx_ee_conf_schema_tests.erl @@ -2,7 +2,7 @@ %% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_enterprise_conf_schema_tests). +-module(emqx_ee_conf_schema_tests). -include_lib("eunit/include/eunit.hrl"). @@ -22,7 +22,7 @@ doc_gen_test() -> "priv", "i18n.conf" ]), - _ = emqx_conf:dump_schema(Dir, emqx_enterprise_conf_schema, I18nFile), + _ = emqx_conf:dump_schema(Dir, emqx_ee_conf_schema, I18nFile), ok end }. diff --git a/lib-ee/emqx_ee_connector/.gitignore b/lib-ee/emqx_ee_connector/.gitignore new file mode 100644 index 000000000..f1c455451 --- /dev/null +++ b/lib-ee/emqx_ee_connector/.gitignore @@ -0,0 +1,19 @@ +.rebar3 +_* +.eunit +*.o +*.beam +*.plt +*.swp +*.swo +.erlang.cookie +ebin +log +erl_crash.dump +.rebar +logs +_build +.idea +*.iml +rebar3.crashdump +*~ diff --git a/lib-ee/emqx_ee_connector/README.md b/lib-ee/emqx_ee_connector/README.md new file mode 100644 index 000000000..e665af458 --- /dev/null +++ b/lib-ee/emqx_ee_connector/README.md @@ -0,0 +1,9 @@ +emqx_ee_connector +===== + +An OTP application + +Build +----- + + $ rebar3 compile diff --git a/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_hstreamdb.conf b/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_hstreamdb.conf new file mode 100644 index 000000000..0826c8f0c --- /dev/null +++ b/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_hstreamdb.conf @@ -0,0 +1,74 @@ +emqx_ee_connector_hstreamdb { + config { + desc { + en: "HStreamDB connection config" + zh: "HStreamDB 连接配置。" + } + label: { + en: "Connection config" + zh: "连接配置" + } + } + + type { + desc { + en: "The Connector Type." + zh: "连接器类型。" + } + label: { + en: "Connector Type" + zh: "连接器类型" + } + } + + name { + desc { + en: "Connector name, used as a human-readable description of the connector." + zh: "连接器名称,人类可读的连接器描述。" + } + label: { + en: "Connector Name" + zh: "连接器名称" + } + } + url { + desc { + en: """HStreamDB Server URL""" + zh: """HStreamDB 服务器 URL""" + } + label { + en: """HStreamDB Server URL""" + zh: """HStreamDB 服务器 URL""" + } + } + stream_name { + desc { + en: """HStreamDB Stream Name""" + zh: """HStreamDB 流名称""" + } + label { + en: """HStreamDB Stream Name""" + zh: """HStreamDB 流名称""" + } + } + ordering_key { + desc { + en: """HStreamDB Ordering Key""" + zh: """HStreamDB 分区键""" + } + label { + en: """HStreamDB Ordering Key""" + zh: """HStreamDB 分区键""" + } + } + pool_size { + desc { + en: """HStreamDB Pool Size""" + zh: """HStreamDB 连接池大小""" + } + label { + en: """HStreamDB Pool Size""" + zh: """HStreamDB 连接池大小""" + } + } +} diff --git a/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_influxdb.conf b/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_influxdb.conf new file mode 100644 index 000000000..4d2dc168c --- /dev/null +++ b/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_influxdb.conf @@ -0,0 +1,164 @@ +emqx_ee_connector_influxdb { + type { + desc { + en: """The Connector Type.""" + zh: """连接器类型。""" + } + label: { + en: """Connector Type""" + zh: """连接器类型""" + } + } + + name { + desc { + en: """Connector name, used as a human-readable description of the connector.""" + zh: """连接器名称,人类可读的连接器描述。""" + } + label: { + en: """Connector Name""" + zh: """连接器名称""" + } + } + host { + desc { + en: """InfluxDB host.""" + zh: """InfluxDB 主机地址。""" + } + label: { + en: """Host""" + zh: """主机""" + } + } + port { + desc { + en: """InfluxDB port.""" + zh: """InfluxDB 端口。""" + } + label: { + en: """Port""" + zh: """端口""" + } + } + protocol { + desc { + en: """InfluxDB's protocol. UDP or HTTP API or HTTP API V2.""" + zh: """InfluxDB 协议。UDP 或 HTTP API 或 HTTP API V2。""" + } + label: { + en: """Protocol""" + zh: """协议""" + } + } + influxdb_udp { + desc { + en: """InfluxDB's UDP protocol.""" + zh: """InfluxDB UDP 协议。""" + } + label: { + en: """UDP Protocol""" + zh: """UDP 协议""" + } + } + influxdb_api_v1 { + desc { + en: """InfluxDB's protocol. Support InfluxDB v1.8 and before.""" + zh: """InfluxDB HTTP API 协议。支持 Influxdb v1.8 以及之前的版本。""" + } + label: { + en: """HTTP API Protocol""" + zh: """HTTP API 协议""" + } + } + influxdb_api_v2 { + desc { + en: """InfluxDB's protocol. Support InfluxDB v2.0 and after.""" + zh: """InfluxDB HTTP API V2 协议。支持 Influxdb v2.0 以及之后的版本。""" + } + label: { + en: """HTTP API V2 Protocol""" + zh: """HTTP API V2 协议""" + } + } + database { + desc { + en: """InfluxDB database.""" + zh: """InfluxDB 数据库。""" + } + label: { + en: "Database" + zh: "数据库" + } + } + username { + desc { + en: "InfluxDB username." + zh: "InfluxDB 用户名。" + } + label: { + en: "Username" + zh: "用户名" + } + } + password { + desc { + en: "InfluxDB password." + zh: "InfluxDB 密码。" + } + label: { + en: "Password" + zh: "密码" + } + } + bucket { + desc { + en: "InfluxDB bucket name." + zh: "InfluxDB bucket 名称。" + } + label: { + en: "Bucket" + zh: "Bucket" + } + } + org { + desc { + en: """Organization name of InfluxDB.""" + zh: """InfluxDB 组织名称。""" + } + label: { + en: """Organization""" + zh: """组织""" + } + } + token { + desc { + en: """InfluxDB token.""" + zh: """InfluxDB token。""" + } + label: { + en: """Token""" + zh: """Token""" + } + } + precision { + desc { + en: """InfluxDB time precision.""" + zh: """InfluxDB 时间精度。""" + } + label: { + en: """Time Precision""" + zh: """时间精度""" + } + } + pool_size { + desc { + en: """InfluxDB Pool Size. Default value is CPU threads.""" + zh: """InfluxDB 连接池大小,默认为 CPU 线程数。""" + } + label { + en: """InfluxDB Pool Size""" + zh: """InfluxDB 连接池大小""" + } + } + +} diff --git a/lib-ee/emqx_ee_connector/rebar.config b/lib-ee/emqx_ee_connector/rebar.config new file mode 100644 index 000000000..5963b7ab0 --- /dev/null +++ b/lib-ee/emqx_ee_connector/rebar.config @@ -0,0 +1,9 @@ +{erl_opts, [debug_info]}. +{deps, [ + {hstreamdb_erl, {git, "https://github.com/hstreamdb/hstreamdb_erl.git", {tag, "0.2.5"}}}, + {influxdb, {git, "https://github.com/emqx/influxdb-client-erl", {tag, "1.1.4"}}} +]}. + +{shell, [ + {apps, [emqx_ee_connector]} +]}. diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src b/lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src new file mode 100644 index 000000000..675a934aa --- /dev/null +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src @@ -0,0 +1,14 @@ +{application, emqx_ee_connector, [ + {vsn, "0.1.0"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + hstreamdb_erl, + influxdb + ]}, + {env, []}, + {modules, []}, + + {links, []} +]}. diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector.erl new file mode 100644 index 000000000..6846ea740 --- /dev/null +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector.erl @@ -0,0 +1,57 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_ee_connector). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + fields/1, + connector_examples/1, + api_schemas/1 +]). + +api_schemas(Method) -> + [ + ref(emqx_ee_connector_hstreamdb, Method), + ref(emqx_ee_connector_influxdb, "udp_" ++ Method), + ref(emqx_ee_connector_influxdb, "api_v1_" ++ Method), + ref(emqx_ee_connector_influxdb, "api_v2_" ++ Method) + ]. + +fields(connectors) -> + [ + {hstreamdb, + mk( + hoconsc:map(name, ref(emqx_ee_connector_hstreamdb, config)), + #{desc => <<"EMQX Enterprise Config">>} + )} + ] ++ fields(influxdb); +fields(influxdb) -> + [ + { + Protocol, + mk(hoconsc:map(name, ref(emqx_ee_connector_influxdb, Protocol)), #{ + desc => <<"EMQX Enterprise Config">> + }) + } + || Protocol <- [influxdb_udp, influxdb_api_v1, influxdb_api_v2] + ]. + +connector_examples(Method) -> + MergeFun = + fun(Example, Examples) -> + maps:merge(Examples, Example) + end, + Fun = + fun(Module, Examples) -> + ConnectorExamples = erlang:apply(Module, connector_examples, [Method]), + lists:foldl(MergeFun, Examples, ConnectorExamples) + end, + lists:foldl(Fun, #{}, schema_modules()). + +schema_modules() -> + [ + emqx_ee_connector_hstreamdb, + emqx_ee_connector_influxdb + ]. diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_hstreamdb.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_hstreamdb.erl new file mode 100644 index 000000000..3892b7fc0 --- /dev/null +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_hstreamdb.erl @@ -0,0 +1,309 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_ee_connector_hstreamdb). + +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("emqx/include/logger.hrl"). + +-import(hoconsc, [mk/2, enum/1]). + +-behaviour(emqx_resource). + +%% callbacks of behaviour emqx_resource +-export([ + callback_mode/0, + on_start/2, + on_stop/2, + on_query/3, + on_get_status/2 +]). + +-export([ + on_flush_result/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1, + connector_examples/1 +]). + +%% ------------------------------------------------------------------------------------------------- +%% resource callback +callback_mode() -> always_sync. + +on_start(InstId, Config) -> + start_client(InstId, Config). + +on_stop(InstId, #{client := Client, producer := Producer}) -> + StopClientRes = hstreamdb:stop_client(Client), + StopProducerRes = hstreamdb:stop_producer(Producer), + ?SLOG(info, #{ + msg => "stop hstreamdb connector", + connector => InstId, + client => Client, + producer => Producer, + stop_client => StopClientRes, + stop_producer => StopProducerRes + }). + +on_query( + _InstId, + {send_message, Data}, + #{producer := Producer, ordering_key := OrderingKey, payload := Payload} +) -> + Record = to_record(OrderingKey, Payload, Data), + do_append(Producer, Record). + +on_get_status(_InstId, #{client := Client}) -> + case is_alive(Client) of + true -> + connected; + false -> + disconnected + end. + +%% ------------------------------------------------------------------------------------------------- +%% hstreamdb batch callback +%% TODO: maybe remove it after disk cache is ready + +on_flush_result({{flush, _Stream, _Records}, {ok, _Resp}}) -> + ok; +on_flush_result({{flush, _Stream, _Records}, {error, _Reason}}) -> + ok. + +%% ------------------------------------------------------------------------------------------------- +%% schema +namespace() -> connector_hstreamdb. + +roots() -> + fields(config). + +fields(config) -> + [ + {url, mk(binary(), #{required => true, desc => ?DESC("url")})}, + {stream, mk(binary(), #{required => true, desc => ?DESC("stream_name")})}, + {ordering_key, mk(binary(), #{required => false, desc => ?DESC("ordering_key")})}, + {pool_size, mk(pos_integer(), #{required => true, desc => ?DESC("pool_size")})} + ]; +fields("get") -> + fields("post"); +fields("put") -> + fields(config); +fields("post") -> + [ + {type, mk(hstreamdb, #{required => true, desc => ?DESC("type")})}, + {name, mk(binary(), #{required => true, desc => ?DESC("name")})} + ] ++ fields("put"). + +connector_examples(Method) -> + [ + #{ + <<"hstreamdb">> => #{ + summary => <<"HStreamDB Connector">>, + value => values(Method) + } + } + ]. + +values(post) -> + maps:merge(values(put), #{name => <<"connector">>}); +values(get) -> + values(post); +values(put) -> + #{ + type => hstreamdb, + url => <<"http://127.0.0.1:6570">>, + stream => <<"stream1">>, + ordering_key => <<"some_key">>, + pool_size => 8 + }; +values(_) -> + #{}. + +desc(config) -> + ?DESC("config"). + +%% ------------------------------------------------------------------------------------------------- +%% internal functions +start_client(InstId, Config) -> + try + do_start_client(InstId, Config) + catch + E:R:S -> + ?SLOG(error, #{ + msg => "start hstreamdb connector error", + connector => InstId, + error => E, + reason => R, + stack => S + }) + end. + +do_start_client(InstId, Config = #{url := Server, pool_size := PoolSize}) -> + ?SLOG(info, #{ + msg => "starting hstreamdb connector: client", + connector => InstId, + config => Config + }), + ClientName = client_name(InstId), + ClientOptions = [ + {url, binary_to_list(Server)}, + {rpc_options, #{pool_size => PoolSize}} + ], + case hstreamdb:start_client(ClientName, ClientOptions) of + {ok, Client} -> + case is_alive(Client) of + true -> + ?SLOG(info, #{ + msg => "hstreamdb connector: client started", + connector => InstId, + client => Client + }), + start_producer(InstId, Client, Config); + _ -> + ?SLOG(error, #{ + msg => "hstreamdb connector: client not alive", + connector => InstId + }), + {error, connect_failed} + end; + {error, {already_started, Pid}} -> + ?SLOG(info, #{ + msg => "starting hstreamdb connector: client, find old client. restart client", + old_client_pid => Pid, + old_client_name => ClientName + }), + _ = hstreamdb:stop_client(ClientName), + start_client(InstId, Config); + {error, Error} -> + ?SLOG(error, #{ + msg => "hstreamdb connector: client failed", + connector => InstId, + reason => Error + }), + {error, Error} + end. + +is_alive(Client) -> + case hstreamdb:echo(Client) of + {ok, _Echo} -> + true; + _ErrorEcho -> + false + end. + +start_producer( + InstId, + Client, + Options = #{stream := Stream, pool_size := PoolSize, egress := #{payload := PayloadBin}} +) -> + %% TODO: change these batch options after we have better disk cache. + BatchSize = maps:get(batch_size, Options, 100), + Interval = maps:get(batch_interval, Options, 1000), + ProducerOptions = [ + {stream, Stream}, + {callback, {?MODULE, on_flush_result, []}}, + {max_records, BatchSize}, + {interval, Interval}, + {pool_size, PoolSize} + ], + Name = produce_name(InstId), + ?SLOG(info, #{ + msg => "starting hstreamdb connector: producer", + connector => InstId + }), + case hstreamdb:start_producer(Client, Name, ProducerOptions) of + {ok, Producer} -> + ?SLOG(info, #{ + msg => "hstreamdb connector: producer started" + }), + EnableBatch = maps:get(enable_batch, Options, false), + Payload = emqx_plugin_libs_rule:preproc_tmpl(PayloadBin), + OrderingKeyBin = maps:get(ordering_key, Options, <<"">>), + OrderingKey = emqx_plugin_libs_rule:preproc_tmpl(OrderingKeyBin), + State = #{ + client => Client, + producer => Producer, + enable_batch => EnableBatch, + ordering_key => OrderingKey, + payload => Payload + }, + {ok, State}; + {error, {already_started, Pid}} -> + ?SLOG(info, #{ + msg => + "starting hstreamdb connector: producer, find old producer. restart producer", + old_producer_pid => Pid, + old_producer_name => Name + }), + _ = hstreamdb:stop_producer(Name), + start_producer(InstId, Client, Options); + {error, Reason} -> + ?SLOG(error, #{ + msg => "starting hstreamdb connector: producer, failed", + reason => Reason + }), + {error, Reason} + end. + +to_record(OrderingKeyTmpl, PayloadTmpl, Data) -> + OrderingKey = emqx_plugin_libs_rule:proc_tmpl(OrderingKeyTmpl, Data), + Payload = emqx_plugin_libs_rule:proc_tmpl(PayloadTmpl, Data), + to_record(OrderingKey, Payload). + +to_record(OrderingKey, Payload) when is_binary(OrderingKey) -> + to_record(binary_to_list(OrderingKey), Payload); +to_record(OrderingKey, Payload) -> + hstreamdb:to_record(OrderingKey, raw, Payload). + +do_append(Producer, Record) -> + do_append(false, Producer, Record). + +%% TODO: this append is async, remove or change it after we have better disk cache. +% do_append(true, Producer, Record) -> +% case hstreamdb:append(Producer, Record) of +% ok -> +% ?SLOG(debug, #{ +% msg => "hstreamdb producer async append success", +% record => Record +% }); +% {error, Reason} = Err -> +% ?SLOG(error, #{ +% msg => "hstreamdb producer async append failed", +% reason => Reason, +% record => Record +% }), +% Err +% end; +do_append(false, Producer, Record) -> + %% TODO: this append is sync, but it does not support [Record], can only append one Record. + %% Change it after we have better dick cache. + case hstreamdb:append_flush(Producer, Record) of + {ok, _} -> + ?SLOG(debug, #{ + msg => "hstreamdb producer sync append success", + record => Record + }); + {error, Reason} = Err -> + ?SLOG(error, #{ + msg => "hstreamdb producer sync append failed", + reason => Reason, + record => Record + }), + Err + end. + +client_name(InstId) -> + "client:" ++ to_string(InstId). + +produce_name(ActionId) -> + list_to_atom("producer:" ++ to_string(ActionId)). + +to_string(List) when is_list(List) -> List; +to_string(Bin) when is_binary(Bin) -> binary_to_list(Bin); +to_string(Atom) when is_atom(Atom) -> atom_to_list(Atom). diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_influxdb.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_influxdb.erl new file mode 100644 index 000000000..09a09aa44 --- /dev/null +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_influxdb.erl @@ -0,0 +1,562 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_ee_connector_influxdb). + +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("emqx/include/logger.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-behaviour(emqx_resource). + +%% callbacks of behaviour emqx_resource +-export([ + callback_mode/0, + on_start/2, + on_stop/2, + on_query/3, + on_batch_query/3, + on_query_async/4, + on_batch_query_async/4, + on_get_status/2 +]). + +-export([ + namespace/0, + fields/1, + desc/1, + connector_examples/1 +]). + +%% ------------------------------------------------------------------------------------------------- +%% resource callback +callback_mode() -> async_if_possible. + +on_start(InstId, Config) -> + start_client(InstId, Config). + +on_stop(_InstId, #{client := Client}) -> + influxdb:stop_client(Client). + +on_query(InstId, {send_message, Data}, _State = #{write_syntax := SyntaxLines, client := Client}) -> + case data_to_points(Data, SyntaxLines) of + {ok, Points} -> + do_query(InstId, Client, Points); + {error, ErrorPoints} = Err -> + log_error_points(InstId, ErrorPoints), + Err + end. + +%% Once a Batched Data trans to points failed. +%% This batch query failed +on_batch_query(InstId, BatchData, _State = #{write_syntax := SyntaxLines, client := Client}) -> + case parse_batch_data(InstId, BatchData, SyntaxLines) of + {ok, Points} -> + do_query(InstId, Client, Points); + {error, Reason} -> + {error, Reason} + end. + +on_query_async( + InstId, + {send_message, Data}, + {ReplayFun, Args}, + _State = #{write_syntax := SyntaxLines, client := Client} +) -> + case data_to_points(Data, SyntaxLines) of + {ok, Points} -> + do_async_query(InstId, Client, Points, {ReplayFun, Args}); + {error, ErrorPoints} = Err -> + log_error_points(InstId, ErrorPoints), + Err + end. + +on_batch_query_async( + InstId, + BatchData, + {ReplayFun, Args}, + State = #{write_syntax := SyntaxLines, client := Client} +) -> + case on_get_status(InstId, State) of + connected -> + case parse_batch_data(InstId, BatchData, SyntaxLines) of + {ok, Points} -> + do_async_query(InstId, Client, Points, {ReplayFun, Args}); + {error, Reason} -> + {error, Reason} + end; + disconnected -> + {resource_down, disconnected} + end. + +on_get_status(_InstId, #{client := Client}) -> + case influxdb:is_alive(Client) of + true -> + connected; + false -> + disconnected + end. + +%% ------------------------------------------------------------------------------------------------- +%% schema +namespace() -> connector_influxdb. + +fields("udp_get") -> + Key = influxdb_udp, + fields(Key) ++ type_name_field(Key); +fields("udp_post") -> + Key = influxdb_udp, + fields(Key) ++ type_name_field(Key); +fields("udp_put") -> + fields(influxdb_udp); +fields("api_v1_get") -> + Key = influxdb_api_v1, + fields(Key) ++ type_name_field(Key); +fields("api_v1_post") -> + Key = influxdb_api_v1, + fields(Key) ++ type_name_field(Key); +fields("api_v1_put") -> + fields(influxdb_api_v1); +fields("api_v2_get") -> + Key = influxdb_api_v2, + fields(Key) ++ type_name_field(Key); +fields("api_v2_post") -> + Key = influxdb_api_v2, + fields(Key) ++ type_name_field(Key); +fields("api_v2_put") -> + fields(influxdb_api_v2); +fields(basic) -> + [ + {host, + mk(binary(), #{required => true, default => <<"127.0.0.1">>, desc => ?DESC("host")})}, + {port, mk(pos_integer(), #{required => true, default => 8086, desc => ?DESC("port")})}, + {precision, + mk(enum([ns, us, ms, s, m, h]), #{ + required => false, default => ms, desc => ?DESC("precision") + })}, + {pool_size, mk(pos_integer(), #{desc => ?DESC("pool_size")})} + ]; +fields(influxdb_udp) -> + fields(basic); +fields(influxdb_api_v1) -> + [ + {database, mk(binary(), #{required => true, desc => ?DESC("database")})}, + {username, mk(binary(), #{required => true, desc => ?DESC("username")})}, + {password, mk(binary(), #{required => true, desc => ?DESC("password")})} + ] ++ emqx_connector_schema_lib:ssl_fields() ++ fields(basic); +fields(influxdb_api_v2) -> + [ + {bucket, mk(binary(), #{required => true, desc => ?DESC("bucket")})}, + {org, mk(binary(), #{required => true, desc => ?DESC("org")})}, + {token, mk(binary(), #{required => true, desc => ?DESC("token")})} + ] ++ emqx_connector_schema_lib:ssl_fields() ++ fields(basic). + +type_name_field(Type) -> + [ + {type, mk(Type, #{required => true, desc => ?DESC("type")})}, + {name, mk(binary(), #{required => true, desc => ?DESC("name")})} + ]. + +connector_examples(Method) -> + [ + #{ + <<"influxdb_udp">> => #{ + summary => <<"InfluxDB UDP Connector">>, + value => values(udp, Method) + } + }, + #{ + <<"influxdb_api_v1">> => #{ + summary => <<"InfluxDB HTTP API V1 Connector">>, + value => values(api_v1, Method) + } + }, + #{ + <<"influxdb_api_v2">> => #{ + summary => <<"InfluxDB HTTP API V2 Connector">>, + value => values(api_v2, Method) + } + } + ]. + +values(Protocol, get) -> + values(Protocol, post); +values(Protocol, post) -> + Type = list_to_atom("influxdb_" ++ atom_to_list(Protocol)), + maps:merge(values(Protocol, put), #{type => Type, name => <<"connector">>}); +values(udp, put) -> + #{ + host => <<"127.0.0.1">>, + port => 8089, + precision => ms, + pool_size => 8 + }; +values(api_v1, put) -> + #{ + host => <<"127.0.0.1">>, + port => 8086, + precision => ms, + pool_size => 8, + database => <<"my_db">>, + username => <<"my_user">>, + password => <<"my_password">>, + ssl => #{enable => false} + }; +values(api_v2, put) -> + #{ + host => <<"127.0.0.1">>, + port => 8086, + precision => ms, + pool_size => 8, + bucket => <<"my_bucket">>, + org => <<"my_org">>, + token => <<"my_token">>, + ssl => #{enable => false} + }. + +desc(influxdb_udp) -> + ?DESC("influxdb_udp"); +desc(influxdb_api_v1) -> + ?DESC("influxdb_api_v1"); +desc(influxdb_api_v2) -> + ?DESC("influxdb_api_v2"). + +%% ------------------------------------------------------------------------------------------------- +%% internal functions + +start_client(InstId, Config) -> + ClientConfig = client_config(InstId, Config), + ?SLOG(info, #{ + msg => "starting influxdb connector", + connector => InstId, + config => Config, + client_config => ClientConfig + }), + try + do_start_client(InstId, ClientConfig, Config) + catch + E:R:S -> + ?SLOG(error, #{ + msg => "start influxdb connector error", + connector => InstId, + error => E, + reason => R, + stack => S + }), + {error, R} + end. + +do_start_client( + InstId, + ClientConfig, + Config = #{ + egress := #{ + write_syntax := Lines + } + } +) -> + case influxdb:start_client(ClientConfig) of + {ok, Client} -> + case influxdb:is_alive(Client) of + true -> + State = #{ + client => Client, + write_syntax => to_config(Lines) + }, + ?SLOG(info, #{ + msg => "starting influxdb connector success", + connector => InstId, + client => Client, + state => State + }), + {ok, State}; + false -> + ?SLOG(error, #{ + msg => "starting influxdb connector failed", + connector => InstId, + client => Client, + reason => "client is not alive" + }), + {error, influxdb_client_not_alive} + end; + {error, {already_started, Client0}} -> + ?SLOG(info, #{ + msg => "starting influxdb connector,find already started client", + connector => InstId, + old_client => Client0 + }), + _ = influxdb:stop_client(Client0), + do_start_client(InstId, ClientConfig, Config); + {error, Reason} -> + ?SLOG(error, #{ + msg => "starting influxdb connector failed", + connector => InstId, + reason => Reason + }), + {error, Reason} + end. + +client_config( + InstId, + Config = #{ + host := Host, + port := Port, + pool_size := PoolSize + } +) -> + [ + {host, binary_to_list(Host)}, + {port, Port}, + {pool_size, PoolSize}, + {pool, binary_to_atom(InstId, utf8)}, + {precision, atom_to_binary(maps:get(precision, Config, ms), utf8)} + ] ++ protocol_config(Config). + +%% api v2 config +protocol_config(#{ + username := Username, + password := Password, + database := DB, + ssl := SSL +}) -> + [ + {protocol, http}, + {version, v1}, + {username, binary_to_list(Username)}, + {password, binary_to_list(Password)}, + {database, binary_to_list(DB)} + ] ++ ssl_config(SSL); +%% api v1 config +protocol_config(#{ + bucket := Bucket, + org := Org, + token := Token, + ssl := SSL +}) -> + [ + {protocol, http}, + {version, v2}, + {bucket, binary_to_list(Bucket)}, + {org, binary_to_list(Org)}, + {token, Token} + ] ++ ssl_config(SSL); +%% udp config +protocol_config(_) -> + [ + {protocol, udp} + ]. + +ssl_config(#{enable := false}) -> + [ + {https_enabled, false} + ]; +ssl_config(SSL = #{enable := true}) -> + [ + {https_enabled, true}, + {transport, ssl} + ] ++ maps:to_list(maps:remove(enable, SSL)). + +%% ------------------------------------------------------------------------------------------------- +%% Query +do_query(InstId, Client, Points) -> + case influxdb:write(Client, Points) of + ok -> + ?SLOG(debug, #{ + msg => "influxdb write point success", + connector => InstId, + points => Points + }); + {error, Reason} = Err -> + ?SLOG(error, #{ + msg => "influxdb write point failed", + connector => InstId, + reason => Reason + }), + Err + end. + +do_async_query(InstId, Client, Points, ReplayFunAndArgs) -> + ?SLOG(info, #{ + msg => "influxdb write point async", + connector => InstId, + points => Points + }), + ok = influxdb:write_async(Client, Points, ReplayFunAndArgs). + +%% ------------------------------------------------------------------------------------------------- +%% Tags & Fields Config Trans + +to_config(Lines) -> + to_config(Lines, []). + +to_config([], Acc) -> + lists:reverse(Acc); +to_config( + [ + #{ + measurement := Measurement, + timestamp := Timestamp, + tags := Tags, + fields := Fields + } + | Rest + ], + Acc +) -> + Res = #{ + measurement => emqx_plugin_libs_rule:preproc_tmpl(Measurement), + timestamp => emqx_plugin_libs_rule:preproc_tmpl(Timestamp), + tags => to_kv_config(Tags), + fields => to_kv_config(Fields) + }, + to_config(Rest, [Res | Acc]). + +to_kv_config(KVfields) -> + maps:fold(fun to_maps_config/3, #{}, proplists:to_map(KVfields)). + +to_maps_config(K, V, Res) -> + NK = emqx_plugin_libs_rule:preproc_tmpl(bin(K)), + NV = emqx_plugin_libs_rule:preproc_tmpl(bin(V)), + Res#{NK => NV}. + +%% ------------------------------------------------------------------------------------------------- +%% Tags & Fields Data Trans +parse_batch_data(InstId, BatchData, SyntaxLines) -> + {Points, Errors} = lists:foldl( + fun({send_message, Data}, {ListOfPoints, ErrAccIn}) -> + case data_to_points(Data, SyntaxLines) of + {ok, Points} -> + {[Points | ListOfPoints], ErrAccIn}; + {error, ErrorPoints} -> + log_error_points(InstId, ErrorPoints), + {ListOfPoints, ErrAccIn + 1} + end + end, + {[], 0}, + BatchData + ), + case Errors of + 0 -> + {ok, lists:flatten(Points)}; + _ -> + ?SLOG(error, #{ + msg => io_lib:format("InfluxDB trans point failed, count: ~p", [Errors]), + connector => InstId, + reason => points_trans_failed + }), + {error, points_trans_failed} + end. + +data_to_points(Data, SyntaxLines) -> + lines_to_points(Data, SyntaxLines, [], []). + +%% When converting multiple rows data into InfluxDB Line Protocol, they are considered to be strongly correlated. +%% And once a row fails to convert, all of them are considered to have failed. +lines_to_points(_, [], Points, ErrorPoints) -> + case ErrorPoints of + [] -> + {ok, Points}; + _ -> + %% ignore trans succeeded points + {error, ErrorPoints} + end; +lines_to_points( + Data, + [ + #{ + measurement := Measurement, + timestamp := Timestamp, + tags := Tags, + fields := Fields + } + | Rest + ], + ResultPointsAcc, + ErrorPointsAcc +) -> + TransOptions = #{return => rawlist, var_trans => fun data_filter/1}, + case emqx_plugin_libs_rule:proc_tmpl(Timestamp, Data, TransOptions) of + [TimestampInt] when is_integer(TimestampInt) -> + {_, EncodeTags} = maps:fold(fun maps_config_to_data/3, {Data, #{}}, Tags), + {_, EncodeFields} = maps:fold(fun maps_config_to_data/3, {Data, #{}}, Fields), + Point = #{ + measurement => emqx_plugin_libs_rule:proc_tmpl(Measurement, Data), + timestamp => TimestampInt, + tags => EncodeTags, + fields => EncodeFields + }, + lines_to_points(Data, Rest, [Point | ResultPointsAcc], ErrorPointsAcc); + BadTimestamp -> + lines_to_points(Data, Rest, ResultPointsAcc, [ + {error, {bad_timestamp, BadTimestamp}} | ErrorPointsAcc + ]) + end. + +maps_config_to_data(K, V, {Data, Res}) -> + KTransOptions = #{return => full_binary}, + VTransOptions = #{return => rawlist, var_trans => fun data_filter/1}, + NK = emqx_plugin_libs_rule:proc_tmpl(K, Data, KTransOptions), + NV = emqx_plugin_libs_rule:proc_tmpl(V, Data, VTransOptions), + case {NK, NV} of + {[undefined], _} -> + {Data, Res}; + %% undefined value in normal format [undefined] or int/uint format [undefined, <<"i">>] + {_, [undefined | _]} -> + {Data, Res}; + _ -> + {Data, Res#{NK => value_type(NV)}} + end. + +value_type([Int, <<"i">>]) when + is_integer(Int) +-> + {int, Int}; +value_type([UInt, <<"u">>]) when + is_integer(UInt) +-> + {uint, UInt}; +value_type([<<"t">>]) -> + 't'; +value_type([<<"T">>]) -> + 'T'; +value_type([true]) -> + 'true'; +value_type([<<"TRUE">>]) -> + 'TRUE'; +value_type([<<"True">>]) -> + 'True'; +value_type([<<"f">>]) -> + 'f'; +value_type([<<"F">>]) -> + 'F'; +value_type([false]) -> + 'false'; +value_type([<<"FALSE">>]) -> + 'FALSE'; +value_type([<<"False">>]) -> + 'False'; +value_type(Val) -> + Val. + +data_filter(undefined) -> undefined; +data_filter(Int) when is_integer(Int) -> Int; +data_filter(Number) when is_number(Number) -> Number; +data_filter(Bool) when is_boolean(Bool) -> Bool; +data_filter(Data) -> bin(Data). + +bin(Data) -> emqx_plugin_libs_rule:bin(Data). + +%% helper funcs +log_error_points(InstId, Errs) -> + lists:foreach( + fun({error, Reason}) -> + ?SLOG(error, #{ + msg => "influxdb trans point failed", + connector => InstId, + reason => Reason + }) + end, + Errs + ). diff --git a/lib-ee/emqx_ee_connector/test/ee_connector_hstreamdb_SUITE.erl b/lib-ee/emqx_ee_connector/test/ee_connector_hstreamdb_SUITE.erl new file mode 100644 index 000000000..4de456b2b --- /dev/null +++ b/lib-ee/emqx_ee_connector/test/ee_connector_hstreamdb_SUITE.erl @@ -0,0 +1,16 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(ee_connector_hstreamdb_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +%% TODO: diff --git a/lib-ee/emqx_license/test/emqx_license_SUITE.erl b/lib-ee/emqx_license/test/emqx_license_SUITE.erl index 851ef30ef..fcf9a3801 100644 --- a/lib-ee/emqx_license/test/emqx_license_SUITE.erl +++ b/lib-ee/emqx_license/test/emqx_license_SUITE.erl @@ -104,10 +104,10 @@ setup_test(TestCase, Config) when [ {apps, [emqx_conf, emqx_license]}, {load_schema, false}, - {schema_mod, emqx_enterprise_conf_schema}, + {schema_mod, emqx_ee_conf_schema}, {env_handler, fun (emqx) -> - emqx_config:save_schema_mod_and_names(emqx_enterprise_conf_schema), + emqx_config:save_schema_mod_and_names(emqx_ee_conf_schema), %% emqx_config:save_schema_mod_and_names(emqx_license_schema), application:set_env(emqx, boot_modules, []), application:set_env( @@ -121,7 +121,7 @@ setup_test(TestCase, Config) when ), ok; (emqx_conf) -> - emqx_config:save_schema_mod_and_names(emqx_enterprise_conf_schema), + emqx_config:save_schema_mod_and_names(emqx_ee_conf_schema), %% emqx_config:save_schema_mod_and_names(emqx_license_schema), application:set_env( emqx, diff --git a/mix.exs b/mix.exs index 8b8ded1a2..3a15b36b4 100644 --- a/mix.exs +++ b/mix.exs @@ -47,7 +47,7 @@ defmodule EMQXUmbrella.MixProject do {:lc, github: "emqx/lc", tag: "0.3.1"}, {:redbug, "2.0.7"}, {:typerefl, github: "ieQu1/typerefl", tag: "0.9.1", override: true}, - {:ehttpc, github: "emqx/ehttpc", tag: "0.3.0"}, + {:ehttpc, github: "emqx/ehttpc", tag: "0.4.0", override: true}, {:gproc, github: "uwiger/gproc", tag: "0.8.0", override: true}, {:jiffy, github: "emqx/jiffy", tag: "1.0.5", override: true}, {:cowboy, github: "emqx/cowboy", tag: "2.9.0", override: true}, @@ -56,7 +56,7 @@ defmodule EMQXUmbrella.MixProject do {:gen_rpc, github: "emqx/gen_rpc", tag: "2.8.1", override: true}, {:grpc, github: "emqx/grpc-erl", tag: "0.6.6", override: true}, {:minirest, github: "emqx/minirest", tag: "1.3.6", override: true}, - {:ecpool, github: "emqx/ecpool", tag: "0.5.2"}, + {:ecpool, github: "emqx/ecpool", tag: "0.5.2", override: true}, {:replayq, "0.3.4", override: true}, {:pbkdf2, github: "emqx/erlang-pbkdf2", tag: "2.0.4", override: true}, {:emqtt, github: "emqx/emqtt", tag: "1.6.0", override: true}, @@ -88,7 +88,9 @@ defmodule EMQXUmbrella.MixProject do {:ranch, github: "ninenines/ranch", ref: "a692f44567034dacf5efcaa24a24183788594eb7", override: true}, # in conflict by grpc and eetcd - {:gpb, "4.11.2", override: true, runtime: false} + {:gpb, "4.11.2", override: true, runtime: false}, + {:hstreamdb_erl, github: "hstreamdb/hstreamdb_erl", tag: "0.2.5"}, + {:influxdb, github: "emqx/influxdb-client-erl", tag: "1.1.3", override: true} ] ++ umbrella_apps() ++ enterprise_apps(profile_info) ++ bcrypt_dep() ++ jq_dep() ++ quicer_dep() end @@ -234,7 +236,9 @@ defmodule EMQXUmbrella.MixProject do if(edition_type == :enterprise, do: [ emqx_license: :permanent, - emqx_enterprise_conf: :load + emqx_ee_conf: :load, + emqx_ee_connector: :permanent, + emqx_ee_bridge: :permanent ], else: [] ) @@ -599,7 +603,7 @@ defmodule EMQXUmbrella.MixProject do end end - defp emqx_schema_mod(:enterprise), do: :emqx_enterprise_conf_schema + defp emqx_schema_mod(:enterprise), do: :emqx_ee_conf_schema defp emqx_schema_mod(:community), do: :emqx_conf_schema defp bcrypt_dep() do diff --git a/rebar.config b/rebar.config index 5f7892d87..8370278f1 100644 --- a/rebar.config +++ b/rebar.config @@ -49,7 +49,7 @@ , {gpb, "4.11.2"} %% gpb only used to build, but not for release, pin it here to avoid fetching a wrong version due to rebar plugins scattered in all the deps , {typerefl, {git, "https://github.com/ieQu1/typerefl", {tag, "0.9.1"}}} , {gun, {git, "https://github.com/emqx/gun", {tag, "1.3.7"}}} - , {ehttpc, {git, "https://github.com/emqx/ehttpc", {tag, "0.3.0"}}} + , {ehttpc, {git, "https://github.com/emqx/ehttpc", {tag, "0.4.0"}}} , {gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}} , {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}} , {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}} diff --git a/rebar.config.erl b/rebar.config.erl index 17c94374c..5dc273c1d 100644 --- a/rebar.config.erl +++ b/rebar.config.erl @@ -313,7 +313,7 @@ overlay_vars_edition(ce) -> ]; overlay_vars_edition(ee) -> [ - {emqx_schema_mod, emqx_enterprise_conf_schema}, + {emqx_schema_mod, emqx_ee_conf_schema}, {is_enterprise, "yes"} ]. @@ -411,7 +411,9 @@ is_app(Name) -> relx_apps_per_edition(ee) -> [ emqx_license, - {emqx_enterprise_conf, load} + {emqx_ee_conf, load}, + emqx_ee_connector, + emqx_ee_bridge ]; relx_apps_per_edition(ce) -> []. diff --git a/scripts/spellcheck b/scripts/spellcheck index 51d8d2907..f8af8c3f6 100755 --- a/scripts/spellcheck +++ b/scripts/spellcheck @@ -7,7 +7,7 @@ else SCHEMA="$1" fi -docker run -d --name langtool "ghcr.io/emqx/emqx-schema-validate:0.3.3" +docker run -d --name langtool "ghcr.io/emqx/emqx-schema-validate:0.3.5" docker exec -i langtool ./emqx_schema_validate - < "${SCHEMA}" success="$?"