Merge pull request #12387 from thalesmg/sync-r55-m-20240124

sync release-55 to master
This commit is contained in:
zhongwencool 2024-01-25 17:35:41 +08:00 committed by GitHub
commit 3b0736fc67
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
65 changed files with 2904 additions and 763 deletions

View File

@ -21,7 +21,7 @@ endif
# Dashboard version
# from https://github.com/emqx/emqx-dashboard5
export EMQX_DASHBOARD_VERSION ?= v1.6.1
export EMQX_EE_DASHBOARD_VERSION ?= e1.5.0-beta.3
export EMQX_EE_DASHBOARD_VERSION ?= e1.5.0-beta.8
PROFILE ?= emqx
REL_PROFILES := emqx emqx-enterprise

View File

@ -59,6 +59,7 @@
{emqx_persistent_session_ds,1}.
{emqx_plugins,1}.
{emqx_prometheus,1}.
{emqx_prometheus,2}.
{emqx_resource,1}.
{emqx_retainer,1}.
{emqx_retainer,2}.

View File

@ -28,7 +28,7 @@
{gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}},
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}},
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.11.1"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.18.1"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.18.3"}}},
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.3.1"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.40.4"}}},
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.3"}}},

View File

@ -166,6 +166,8 @@ names() ->
emqx_live_connections_max,
emqx_sessions_count,
emqx_sessions_max,
emqx_channels_count,
emqx_channels_max,
emqx_topics_count,
emqx_topics_max,
emqx_suboptions_count,

View File

@ -476,7 +476,6 @@ t_metrics_not_dropped(_Config) ->
{ok, _, [?RC_GRANTED_QOS_1]} = emqtt:subscribe(Sub, <<"t/+">>, ?QOS_1),
emqtt:publish(Pub, <<"t/ps">>, <<"payload">>, ?QOS_1),
?assertMatch([_], receive_messages(1)),
DroppedAfter = emqx_metrics:val('messages.dropped'),
DroppedNoSubAfter = emqx_metrics:val('messages.dropped.no_subscribers'),

View File

@ -103,33 +103,37 @@
load() ->
Bridges = emqx:get_config([?ROOT_KEY], #{}),
lists:foreach(
emqx_utils:pforeach(
fun({Type, NamedConf}) ->
lists:foreach(
emqx_utils:pforeach(
fun({Name, Conf}) ->
%% fetch opts for `emqx_resource_buffer_worker`
ResOpts = emqx_resource:fetch_creation_opts(Conf),
safe_load_bridge(Type, Name, Conf, ResOpts)
end,
maps:to_list(NamedConf)
maps:to_list(NamedConf),
infinity
)
end,
maps:to_list(Bridges)
maps:to_list(Bridges),
infinity
).
unload() ->
unload_hook(),
Bridges = emqx:get_config([?ROOT_KEY], #{}),
lists:foreach(
emqx_utils:pforeach(
fun({Type, NamedConf}) ->
lists:foreach(
emqx_utils:pforeach(
fun({Name, _Conf}) ->
_ = emqx_bridge_resource:stop(Type, Name)
end,
maps:to_list(NamedConf)
maps:to_list(NamedConf),
infinity
)
end,
maps:to_list(Bridges)
maps:to_list(Bridges),
infinity
).
safe_load_bridge(Type, Name, Conf, Opts) ->
@ -284,15 +288,15 @@ pre_config_update([?ROOT_KEY], NewConf, _RawConf) ->
post_config_update([?ROOT_KEY], _Req, NewConf, OldConf, _AppEnv) ->
#{added := Added, removed := Removed, changed := Updated} =
diff_confs(NewConf, OldConf),
%% The config update will be failed if any task in `perform_bridge_changes` failed.
Result = perform_bridge_changes([
#{action => fun emqx_bridge_resource:remove/4, data => Removed},
#{action => fun emqx_bridge_resource:remove/4, action_name => remove, data => Removed},
#{
action => fun emqx_bridge_resource:create/4,
action_name => create,
data => Added,
on_exception_fn => fun emqx_bridge_resource:remove/4
},
#{action => fun emqx_bridge_resource:update/4, data => Updated}
#{action => fun emqx_bridge_resource:update/4, action_name => update, data => Updated}
]),
ok = unload_hook(),
ok = load_hook(NewConf),
@ -534,28 +538,21 @@ convert_certs(BridgesConf) ->
).
perform_bridge_changes(Tasks) ->
perform_bridge_changes(Tasks, ok).
perform_bridge_changes(Tasks, []).
perform_bridge_changes([], Result) ->
Result;
perform_bridge_changes([#{action := Action, data := MapConfs} = Task | Tasks], Result0) ->
OnException = maps:get(on_exception_fn, Task, fun(_Type, _Name, _Conf, _Opts) -> ok end),
Result = maps:fold(
fun
({_Type, _Name}, _Conf, {error, Reason}) ->
{error, Reason};
%% for emqx_bridge_resource:update/4
({Type, Name}, {OldConf, Conf}, _) ->
ResOpts = emqx_resource:fetch_creation_opts(Conf),
case Action(Type, Name, {OldConf, Conf}, ResOpts) of
{error, Reason} -> {error, Reason};
Return -> Return
perform_bridge_changes([], Errors) ->
case Errors of
[] -> ok;
_ -> {error, Errors}
end;
({Type, Name}, Conf, _) ->
ResOpts = emqx_resource:fetch_creation_opts(Conf),
try Action(Type, Name, Conf, ResOpts) of
{error, Reason} -> {error, Reason};
Return -> Return
perform_bridge_changes([#{action := Action, data := MapConfs} = Task | Tasks], Errors0) ->
OnException = maps:get(on_exception_fn, Task, fun(_Type, _Name, _Conf, _Opts) -> ok end),
Results = emqx_utils:pmap(
fun({{Type, Name}, Conf}) ->
ResOpts = creation_opts(Conf),
Res =
try
Action(Type, Name, Conf, ResOpts)
catch
Kind:Error:Stacktrace ->
?SLOG(error, #{
@ -567,13 +564,34 @@ perform_bridge_changes([#{action := Action, data := MapConfs} = Task | Tasks], R
stacktrace => Stacktrace
}),
OnException(Type, Name, Conf, ResOpts),
erlang:raise(Kind, Error, Stacktrace)
end
{error, Error}
end,
Result0,
MapConfs
{{Type, Name}, Res}
end,
maps:to_list(MapConfs),
infinity
),
perform_bridge_changes(Tasks, Result).
Errs = lists:filter(
fun
({_TypeName, {error, _}}) -> true;
(_) -> false
end,
Results
),
Errors =
case Errs of
[] ->
Errors0;
_ ->
#{action_name := ActionName} = Task,
[#{action => ActionName, errors => Errs} | Errors0]
end,
perform_bridge_changes(Tasks, Errors).
creation_opts({_OldConf, Conf}) ->
emqx_resource:fetch_creation_opts(Conf);
creation_opts(Conf) ->
emqx_resource:fetch_creation_opts(Conf).
diff_confs(NewConfs, OldConfs) ->
emqx_utils_maps:diff_maps(

View File

@ -1132,15 +1132,14 @@ maybe_unwrap({error, not_implemented}) ->
maybe_unwrap(RpcMulticallResult) ->
emqx_rpc:unwrap_erpc(RpcMulticallResult).
supported_versions(start_bridge_to_node) -> bpapi_version_range(2, latest);
supported_versions(start_bridges_to_all_nodes) -> bpapi_version_range(2, latest);
supported_versions(get_metrics_from_all_nodes) -> bpapi_version_range(4, latest);
supported_versions(_Call) -> bpapi_version_range(1, latest).
supported_versions(start_bridge_to_node) -> bpapi_version_range(2, 6);
supported_versions(start_bridges_to_all_nodes) -> bpapi_version_range(2, 6);
supported_versions(get_metrics_from_all_nodes) -> bpapi_version_range(4, 6);
supported_versions(_Call) -> bpapi_version_range(1, 6).
%% [From, To] (inclusive on both ends)
bpapi_version_range(From, latest) ->
ThisNodeVsn = emqx_bpapi:supported_version(node(), ?BPAPI_NAME),
lists:seq(From, ThisNodeVsn).
bpapi_version_range(From, To) ->
lists:seq(From, To).
redact(Term) ->
emqx_utils:redact(Term).

View File

@ -182,17 +182,20 @@ load() ->
load_bridges(RootName) ->
Bridges = emqx:get_config([RootName], #{}),
lists:foreach(
_ = emqx_utils:pmap(
fun({Type, Bridge}) ->
lists:foreach(
emqx_utils:pmap(
fun({Name, BridgeConf}) ->
install_bridge_v2(RootName, Type, Name, BridgeConf)
end,
maps:to_list(Bridge)
maps:to_list(Bridge),
infinity
)
end,
maps:to_list(Bridges)
).
maps:to_list(Bridges),
infinity
),
ok.
unload() ->
unload_bridges(?ROOT_KEY_ACTIONS),
@ -204,17 +207,20 @@ unload() ->
unload_bridges(ConfRooKey) ->
Bridges = emqx:get_config([ConfRooKey], #{}),
lists:foreach(
_ = emqx_utils:pmap(
fun({Type, Bridge}) ->
lists:foreach(
emqx_utils:pmap(
fun({Name, BridgeConf}) ->
uninstall_bridge_v2(ConfRooKey, Type, Name, BridgeConf)
end,
maps:to_list(Bridge)
maps:to_list(Bridge),
infinity
)
end,
maps:to_list(Bridges)
).
maps:to_list(Bridges),
infinity
),
ok.
%%====================================================================
%% CRUD API
@ -641,8 +647,8 @@ reset_metrics(ConfRootKey, Type, Name) ->
reset_metrics_helper(_ConfRootKey, _Type, _Name, #{enable := false}) ->
ok;
reset_metrics_helper(ConfRootKey, BridgeV2Type, BridgeName, #{connector := ConnectorName}) ->
BridgeV2Id = id_with_root_name(ConfRootKey, BridgeV2Type, BridgeName, ConnectorName),
ok = emqx_metrics_worker:reset_metrics(?RES_METRICS, BridgeV2Id);
ResourceId = id_with_root_name(ConfRootKey, BridgeV2Type, BridgeName, ConnectorName),
emqx_resource:reset_metrics(ResourceId);
reset_metrics_helper(_, _, _, _) ->
{error, not_found}.
@ -1059,7 +1065,6 @@ post_config_update([ConfRootKey], _Req, NewConf, OldConf, _AppEnv) when
->
#{added := Added, removed := Removed, changed := Updated} =
diff_confs(NewConf, OldConf),
%% The config update will be failed if any task in `perform_bridge_changes` failed.
RemoveFun = fun(Type, Name, Conf) ->
uninstall_bridge_v2(ConfRootKey, Type, Name, Conf)
end,
@ -1071,13 +1076,14 @@ post_config_update([ConfRootKey], _Req, NewConf, OldConf, _AppEnv) when
install_bridge_v2(ConfRootKey, Type, Name, Conf)
end,
Result = perform_bridge_changes([
#{action => RemoveFun, data => Removed},
#{action => RemoveFun, action_name => remove, data => Removed},
#{
action => CreateFun,
action_name => create,
data => Added,
on_exception_fn => fun emqx_bridge_resource:remove/4
},
#{action => UpdateFun, data => Updated}
#{action => UpdateFun, action_name => update, data => Updated}
]),
reload_message_publish_hook(NewConf),
?tp(bridge_post_config_update_done, #{}),
@ -1141,26 +1147,20 @@ do_flatten_confs(Type, Conf0) ->
[{{Type, Name}, Conf} || {Name, Conf} <- maps:to_list(Conf0)].
perform_bridge_changes(Tasks) ->
perform_bridge_changes(Tasks, ok).
perform_bridge_changes(Tasks, []).
perform_bridge_changes([], Result) ->
Result;
perform_bridge_changes([#{action := Action, data := MapConfs} = Task | Tasks], Result0) ->
OnException = maps:get(on_exception_fn, Task, fun(_Type, _Name, _Conf, _Opts) -> ok end),
Result = maps:fold(
fun
({_Type, _Name}, _Conf, {error, Reason}) ->
{error, Reason};
%% for update
({Type, Name}, {OldConf, Conf}, _) ->
case Action(Type, Name, {OldConf, Conf}) of
{error, Reason} -> {error, Reason};
Return -> Return
perform_bridge_changes([], Errors) ->
case Errors of
[] -> ok;
_ -> {error, Errors}
end;
({Type, Name}, Conf, _) ->
try Action(Type, Name, Conf) of
{error, Reason} -> {error, Reason};
Return -> Return
perform_bridge_changes([#{action := Action, data := MapConfs} = Task | Tasks], Errors0) ->
OnException = maps:get(on_exception_fn, Task, fun(_Type, _Name, _Conf, _Opts) -> ok end),
Results = emqx_utils:pmap(
fun({{Type, Name}, Conf}) ->
Res =
try
Action(Type, Name, Conf)
catch
Kind:Error:Stacktrace ->
?SLOG(error, #{
@ -1172,13 +1172,29 @@ perform_bridge_changes([#{action := Action, data := MapConfs} = Task | Tasks], R
stacktrace => Stacktrace
}),
OnException(Type, Name, Conf),
erlang:raise(Kind, Error, Stacktrace)
end
{error, Error}
end,
Result0,
MapConfs
{{Type, Name}, Res}
end,
maps:to_list(MapConfs),
infinity
),
perform_bridge_changes(Tasks, Result).
Errs = lists:filter(
fun
({_TypeName, {error, _}}) -> true;
(_) -> false
end,
Results
),
Errors =
case Errs of
[] ->
Errors0;
_ ->
#{action_name := ActionName} = Task,
[#{action => ActionName, errors => Errs} | Errors0]
end,
perform_bridge_changes(Tasks, Errors).
fill_defaults(Type, RawConf, TopLevelConf, SchemaModule) ->
PackedConf = pack_bridge_conf(Type, RawConf, TopLevelConf),

View File

@ -1052,12 +1052,11 @@ do_bpapi_call_vsn(Version, Call, Args) ->
is_supported_version(Version, Call) ->
lists:member(Version, supported_versions(Call)).
supported_versions(_Call) -> bpapi_version_range(6, latest).
supported_versions(_Call) -> bpapi_version_range(6, 6).
%% [From, To] (inclusive on both ends)
bpapi_version_range(From, latest) ->
ThisNodeVsn = emqx_bpapi:supported_version(node(), ?BPAPI_NAME),
lists:seq(From, ThisNodeVsn).
bpapi_version_range(From, To) ->
lists:seq(From, To).
maybe_unwrap({error, not_implemented}) ->
{error, not_implemented};
@ -1178,6 +1177,9 @@ format_resource(
)
).
%% FIXME:
%% missing metrics:
%% 'retried.success' and 'retried.failed'
format_metrics(#{
counters := #{
'dropped' := Dropped,

View File

@ -606,12 +606,22 @@ t_load_no_matching_connector(_Config) ->
},
?assertMatch(
{error,
{post_config_update, _HandlerMod, #{
{post_config_update, _HandlerMod, [
#{
errors := [
{
{_, my_test_bridge_update},
{error, #{
bridge_name := my_test_bridge_update,
connector_name := <<"unknown">>,
bridge_type := _,
reason := <<"connector_not_found_or_wrong_type">>
}}},
}}
}
],
action := update
}
]}},
update_root_config(RootConf0)
),
@ -623,12 +633,22 @@ t_load_no_matching_connector(_Config) ->
},
?assertMatch(
{error,
{post_config_update, _HandlerMod, #{
{post_config_update, _HandlerMod, [
#{
errors := [
{
{_, my_test_bridge_new},
{error, #{
bridge_name := my_test_bridge_new,
connector_name := <<"unknown">>,
bridge_type := _,
reason := <<"connector_not_found_or_wrong_type">>
}}},
}}
}
],
action := create
}
]}},
update_root_config(RootConf1)
),

View File

@ -286,6 +286,10 @@ init_mocks() ->
ok = meck:expect(?CONNECTOR_IMPL, on_get_channels, fun(ResId) ->
emqx_bridge_v2:get_channels_for_connector(ResId)
end),
meck:expect(?CONNECTOR_IMPL, on_query_async, fun(_ResId, _Req, ReplyFunAndArgs, _ConnState) ->
emqx_resource:apply_reply_fun(ReplyFunAndArgs, ok),
{ok, self()}
end),
ok.
clear_resources() ->
@ -378,6 +382,9 @@ enable_path(Enable, BridgeID) ->
publish_message(Topic, Body, Config) ->
Node = ?config(node, Config),
publish_message(Topic, Body, Node, Config).
publish_message(Topic, Body, Node, _Config) ->
erpc:call(Node, emqx, publish, [emqx_message:make(Topic, Body)]).
update_config(Path, Value, Config) ->
@ -524,6 +531,17 @@ get_common_values(Kind, FnName) ->
}
end.
maybe_get_other_node(Config) ->
%% In the single node test group, this simply returns the lone node. Otherwise, it'll
%% return a node that's not the primary one that receives API calls.
PrimaryNode = ?config(node, Config),
case proplists:get_value(cluster_nodes, Config, []) -- [PrimaryNode] of
[] ->
PrimaryNode;
[OtherNode | _] ->
OtherNode
end.
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
@ -1385,7 +1403,8 @@ t_reset_metrics(Config) ->
ActionID = emqx_bridge_resource:bridge_id(?ACTION_TYPE, ActionName),
Body = <<"my msg">>,
_ = publish_message(?MQTT_LOCAL_TOPIC, Body, Config),
OtherNode = maybe_get_other_node(Config),
_ = publish_message(?MQTT_LOCAL_TOPIC, Body, OtherNode, Config),
?retry(
_Sleep0 = 200,
_Retries0 = 20,
@ -1400,16 +1419,30 @@ t_reset_metrics(Config) ->
{ok, 204, <<>>} = request(put, uri([?ACTIONS_ROOT, ActionID, "metrics", "reset"]), Config),
?retry(
Res = ?retry(
_Sleep0 = 200,
_Retries0 = 20,
begin
Res0 = request_json(get, uri([?ACTIONS_ROOT, ActionID, "metrics"]), Config),
?assertMatch(
{ok, 200, #{
<<"metrics">> := #{<<"matched">> := 0},
<<"node_metrics">> := [#{<<"metrics">> := #{}} | _]
}},
request_json(get, uri([?ACTIONS_ROOT, ActionID, "metrics"]), Config)
)
Res0
),
Res0
end
),
{ok, 200, #{<<"node_metrics">> := NodeMetrics}} = Res,
?assert(
lists:all(
fun(#{<<"metrics">> := #{<<"matched">> := Matched}}) ->
Matched == 0
end,
NodeMetrics
),
#{node_metrics => NodeMetrics}
),
ok.

View File

@ -10,8 +10,7 @@
{applications, [
kernel,
stdlib,
emqx_resource,
emqx_connector
emqx_resource
]},
{env, []},
{licenses, ["Business Source License 1.1"]},

View File

@ -34,6 +34,7 @@
]).
-export([render_template/2]).
-export([convert_server/2]).
%% emqx_connector_resource behaviour callbacks
-export([connector_config/2]).
@ -92,7 +93,7 @@ connector_example_values() ->
<<"username">> => <<"root">>,
<<"password">> => <<"******">>
},
base_url => <<"http://127.0.0.1:9200/">>,
server => <<"127.0.0.1:9200">>,
connect_timeout => <<"15s">>,
pool_type => <<"random">>,
pool_size => 8,
@ -116,14 +117,7 @@ fields(config) ->
fields("connection_fields");
fields("connection_fields") ->
[
{base_url,
?HOCON(
emqx_schema:url(),
#{
required => true,
desc => ?DESC(emqx_bridge_es, "config_base_url")
}
)},
{server, server()},
{authentication,
?HOCON(
?UNION([?R_REF(auth_basic)]),
@ -158,30 +152,36 @@ desc(auth_basic) ->
"Basic Authentication";
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->
["Configuration for Elastic Search using `", string:to_upper(Method), "` method."];
desc("server") ->
?DESC("server");
desc(_) ->
undefined.
server() ->
Meta = #{
required => true,
default => <<"127.0.0.1:9200">>,
desc => ?DESC("server"),
converter => fun ?MODULE:convert_server/2
},
emqx_schema:servers_sc(Meta, #{default_port => 9200}).
convert_server(<<"http://", Server/binary>>, HoconOpts) ->
convert_server(Server, HoconOpts);
convert_server(<<"https://", Server/binary>>, HoconOpts) ->
convert_server(Server, HoconOpts);
convert_server(Server0, HoconOpts) ->
Server = string:trim(Server0, trailing, "/"),
emqx_schema:convert_servers(Server, HoconOpts).
connector_config(Conf, #{name := Name, parse_confs := ParseConfs}) ->
#{
base_url := BaseUrl,
authentication :=
#{
username := Username,
password := Password0
}
} = Conf,
Password = emqx_secret:unwrap(Password0),
Base64 = base64:encode(<<Username/binary, ":", Password/binary>>),
BasicToken = <<"Basic ", Base64/binary>>,
WebhookConfig =
Conf#{
method => <<"post">>,
url => BaseUrl,
url => base_url(Conf),
headers => [
{<<"Content-type">>, <<"application/json">>},
{<<"Authorization">>, BasicToken}
{<<"Authorization">>, basic_token(Conf)}
]
},
ParseConfs(
@ -190,6 +190,19 @@ connector_config(Conf, #{name := Name, parse_confs := ParseConfs}) ->
WebhookConfig
).
basic_token(#{
authentication :=
#{
username := Username,
password := Password0
}
}) ->
Password = emqx_secret:unwrap(Password0),
Base64 = base64:encode(<<Username/binary, ":", Password/binary>>),
<<"Basic ", Base64/binary>>.
base_url(#{ssl := #{enable := true}, server := Server}) -> "https://" ++ Server;
base_url(#{server := Server}) -> "http://" ++ Server.
%%-------------------------------------------------------------------------------------
%% `emqx_resource' API
%%-------------------------------------------------------------------------------------
@ -316,6 +329,10 @@ on_get_channel_status(_InstanceId, ChannelId, #{channels := Channels}) ->
{error, not_exists}
end.
render_template([<<"update_without_doc_template">>], Msg) ->
emqx_utils_json:encode(#{<<"doc">> => Msg});
render_template([<<"create_without_doc_template">>], Msg) ->
emqx_utils_json:encode(#{<<"doc">> => Msg, <<"doc_as_upsert">> => true});
render_template(Template, Msg) ->
% Ignoring errors here, undefined bindings will be replaced with empty string.
Opts = #{var_trans => fun to_string/2},
@ -395,6 +412,11 @@ get_body_template(#{action := update, doc := Doc} = Template) ->
false -> <<"{\"doc\":", Doc/binary, "}">>;
true -> <<"{\"doc\":", Doc/binary, ",\"doc_as_upsert\": true}">>
end;
get_body_template(#{action := update} = Template) ->
case maps:get(doc_as_upsert, Template, false) of
false -> <<"update_without_doc_template">>;
true -> <<"create_without_doc_template">>
end;
get_body_template(#{doc := Doc}) ->
Doc;
get_body_template(_) ->

View File

@ -103,13 +103,13 @@ end_per_testcase(_TestCase, _Config) ->
%% Helper fns
%%-------------------------------------------------------------------------------------
check_send_message_with_action(Topic, ActionName, ConnectorName) ->
check_send_message_with_action(Topic, ActionName, ConnectorName, Expect) ->
send_message(Topic),
%% ######################################
%% Check if message is sent to es
%% ######################################
timer:sleep(500),
check_action_metrics(ActionName, ConnectorName).
check_action_metrics(ActionName, ConnectorName, Expect).
send_message(Topic) ->
Now = emqx_utils_calendar:now_to_rfc3339(microsecond),
@ -123,7 +123,7 @@ send_message(Topic) ->
ok = emqtt:publish(Client, Topic, Payload, [{qos, 0}]),
ok.
check_action_metrics(ActionName, ConnectorName) ->
check_action_metrics(ActionName, ConnectorName, Expect) ->
ActionId = emqx_bridge_v2:id(?TYPE, ActionName, ConnectorName),
Metrics =
#{
@ -134,13 +134,7 @@ check_action_metrics(ActionName, ConnectorName) ->
dropped => emqx_resource_metrics:dropped_get(ActionId)
},
?assertEqual(
#{
match => 1,
success => 1,
dropped => 0,
failed => 0,
queuing => 0
},
Expect,
Metrics,
{ActionName, ConnectorName, ActionId}
).
@ -169,11 +163,10 @@ action(ConnectorName) ->
}
}.
base_url(Config) ->
server(Config) ->
Host = ?config(es_host, Config),
Port = ?config(es_port, Config),
iolist_to_binary([
"https://",
Host,
":",
integer_to_binary(Port)
@ -185,7 +178,7 @@ connector_config(Config) ->
connector_config(Overrides, Config) ->
Defaults =
#{
<<"base_url">> => base_url(Config),
<<"server">> => server(Config),
<<"enable">> => true,
<<"authentication">> => #{
<<"password">> => <<"emqx123">>,
@ -249,7 +242,7 @@ t_create_remove_list(Config) ->
ok.
%% Test sending a message to a bridge V2
t_send_message(Config) ->
t_create_message(Config) ->
ConnectorConfig = connector_config(Config),
{ok, _} = emqx_connector:create(?TYPE, test_connector2, ConnectorConfig),
ActionConfig = action(<<"test_connector2">>),
@ -262,7 +255,8 @@ t_send_message(Config) ->
},
{ok, _} = emqx_rule_engine:create_rule(Rule),
%% Use the action to send a message
check_send_message_with_action(<<"es/1">>, test_action_1, test_connector2),
Expect = #{match => 1, success => 1, dropped => 0, failed => 0, queuing => 0},
check_send_message_with_action(<<"es/1">>, test_action_1, test_connector2, Expect),
%% Create a few more bridges with the same connector and test them
ActionNames1 =
lists:foldl(
@ -279,7 +273,7 @@ t_send_message(Config) ->
},
{ok, _} = emqx_rule_engine:create_rule(Rule1),
Topic = <<"es/", Seq/binary>>,
check_send_message_with_action(Topic, ActionName, test_connector2),
check_send_message_with_action(Topic, ActionName, test_connector2, Expect),
[ActionName | Acc]
end,
[],
@ -294,6 +288,74 @@ t_send_message(Config) ->
ActionNames
),
emqx_connector:remove(?TYPE, test_connector2),
lists:foreach(
fun(#{id := Id}) ->
emqx_rule_engine:delete_rule(Id)
end,
emqx_rule_engine:get_rules()
),
ok.
t_update_message(Config) ->
ConnectorConfig = connector_config(Config),
{ok, _} = emqx_connector:create(?TYPE, update_connector, ConnectorConfig),
ActionConfig0 = action(<<"update_connector">>),
DocId = emqx_guid:to_hexstr(emqx_guid:gen()),
ActionConfig1 = ActionConfig0#{
<<"parameters">> => #{
<<"index">> => <<"${payload.index}">>,
<<"id">> => DocId,
<<"max_retries">> => 0,
<<"action">> => <<"update">>,
<<"doc">> => <<"${payload.doc}">>
}
},
{ok, _} = emqx_bridge_v2:create(?TYPE, update_action, ActionConfig1),
Rule = #{
id => <<"rule:t_es_1">>,
sql => <<"SELECT\n *\nFROM\n \"es/#\"">>,
actions => [<<"elasticsearch:update_action">>],
description => <<"sink doc to elasticsearch">>
},
{ok, _} = emqx_rule_engine:create_rule(Rule),
%% failed to update a nonexistent doc
Expect0 = #{match => 1, success => 0, dropped => 0, failed => 1, queuing => 0},
check_send_message_with_action(<<"es/1">>, update_action, update_connector, Expect0),
%% doc_as_upsert to insert a new doc
ActionConfig2 = ActionConfig1#{
<<"parameters">> => #{
<<"index">> => <<"${payload.index}">>,
<<"id">> => DocId,
<<"action">> => <<"update">>,
<<"doc">> => <<"${payload.doc}">>,
<<"doc_as_upsert">> => true,
<<"max_retries">> => 0
}
},
{ok, _} = emqx_bridge_v2:create(?TYPE, update_action, ActionConfig2),
Expect1 = #{match => 1, success => 1, dropped => 0, failed => 0, queuing => 0},
check_send_message_with_action(<<"es/1">>, update_action, update_connector, Expect1),
%% update without doc, use msg as default
ActionConfig3 = ActionConfig1#{
<<"parameters">> => #{
<<"index">> => <<"${payload.index}">>,
<<"id">> => DocId,
<<"action">> => <<"update">>,
<<"max_retries">> => 0
}
},
{ok, _} = emqx_bridge_v2:create(?TYPE, update_action, ActionConfig3),
Expect2 = #{match => 1, success => 1, dropped => 0, failed => 0, queuing => 0},
check_send_message_with_action(<<"es/1">>, update_action, update_connector, Expect2),
%% Clean
ok = emqx_bridge_v2:remove(?TYPE, update_action),
emqx_connector:remove(?TYPE, update_connector),
lists:foreach(
fun(#{id := Id}) ->
emqx_rule_engine:delete_rule(Id)
end,
emqx_rule_engine:get_rules()
),
ok.
%% Test that we can get the status of the bridge V2
@ -314,7 +376,7 @@ t_bad_url(Config) ->
ActionName = <<"test_action">>,
ActionConfig = action(<<"test_connector">>),
ConnectorConfig0 = connector_config(Config),
ConnectorConfig = ConnectorConfig0#{<<"base_url">> := <<"bad_host:9092">>},
ConnectorConfig = ConnectorConfig0#{<<"server">> := <<"bad_host:9092">>},
?assertMatch({ok, _}, create_connector(ConnectorName, ConnectorConfig)),
?assertMatch({ok, _}, create_action(ActionName, ActionConfig)),
?assertMatch(

View File

@ -2,7 +2,7 @@
{description, "EMQX HTTP Bridge and Connector Application"},
{vsn, "0.2.2"},
{registered, []},
{applications, [kernel, stdlib, emqx_connector, emqx_resource, ehttpc]},
{applications, [kernel, stdlib, emqx_resource, ehttpc]},
{env, [{emqx_action_info_modules, [emqx_bridge_http_action_info]}]},
{modules, []},
{links, []}

View File

@ -865,7 +865,8 @@ convert_server(<<"http://", Server/binary>>, HoconOpts) ->
convert_server(Server, HoconOpts);
convert_server(<<"https://", Server/binary>>, HoconOpts) ->
convert_server(Server, HoconOpts);
convert_server(Server, HoconOpts) ->
convert_server(Server0, HoconOpts) ->
Server = string:trim(Server0, trailing, "/"),
emqx_schema:convert_servers(Server, HoconOpts).
str(A) when is_atom(A) ->

View File

@ -10,9 +10,7 @@
{applications, [
kernel,
stdlib,
emqx_resource,
%% for module emqx_connector_http
emqx_connector
emqx_resource
]},
{env, []},
{licenses, ["Business Source License 1.1"]},

View File

@ -5,7 +5,6 @@
{applications, [
kernel,
stdlib,
emqx_connector,
emqx_resource,
emqx_mongodb
]},

View File

@ -18,6 +18,7 @@
-include_lib("emqx/include/logger.hrl").
-include_lib("emqx/include/emqx_mqtt.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
%% management APIs
-export([
@ -284,9 +285,10 @@ maybe_on_message_received(Msg, {Mod, Func, Args}) ->
maybe_on_message_received(_Msg, undefined) ->
ok.
maybe_publish_local(Msg, Local = #{}, Props) ->
maybe_publish_local(Msg, Local = #{topic := Topic}, Props) when Topic =/= undefined ->
?tp(mqtt_ingress_publish_local, #{msg => Msg, local => Local}),
emqx_broker:publish(to_broker_msg(Msg, Local, Props));
maybe_publish_local(_Msg, undefined, _Props) ->
maybe_publish_local(_Msg, _Local, _Props) ->
ok.
%%

View File

@ -48,13 +48,14 @@ fields(action) ->
)};
fields("mqtt_publisher_action") ->
emqx_bridge_v2_schema:make_producer_action_schema(
hoconsc:mk(
hoconsc:ref(?MODULE, action_parameters),
mk(
ref(?MODULE, action_parameters),
#{
required => true,
desc => ?DESC("action_parameters")
}
)
),
#{resource_opts_ref => ref(?MODULE, action_resource_opts)}
);
fields(action_parameters) ->
[
@ -86,7 +87,8 @@ fields("mqtt_subscriber_source") ->
required => true,
desc => ?DESC("source_parameters")
}
)
),
#{resource_opts_ref => ref(?MODULE, source_resource_opts)}
);
fields(ingress_parameters) ->
[
@ -187,7 +189,7 @@ source_examples(Method) ->
#{
parameters => #{
topic => <<"remote/topic">>,
qos => 2
qos => 1
}
}
)

View File

@ -238,6 +238,9 @@ t_receive_via_rule(Config) ->
end,
fun(Trace) ->
?assertEqual([], ?of_kind("action_references_nonexistent_bridges", Trace)),
%% We don't have the hidden, legacy `local' config set, so we shouldn't
%% attempt to publish directly.
?assertEqual([], ?of_kind(mqtt_ingress_publish_local, Trace)),
ok
end
),

View File

@ -5,7 +5,6 @@
{applications, [
kernel,
stdlib,
emqx_connector,
emqx_resource,
emqx_mysql
]},

View File

@ -5,7 +5,6 @@
{applications, [
kernel,
stdlib,
emqx_connector,
emqx_resource,
emqx_redis
]},

View File

@ -67,11 +67,17 @@ on_start(
{tcp_options, [{mode, binary}, {reuseaddr, true}, {nodelay, true}]}
],
MFArgs = {?MODULE, start_link, [maps:with([handshake_timeout], Config)]},
ok = emqx_resource:allocate_resource(InstanceId, listen_on, ListenOn),
%% Since the esockd only supports atomic name and we don't want to introduce a new atom per each instance
%% when the port is same for two instance/connector, them will reference to a same esockd listener
%% to prevent the failed one dealloctes the listener which created by a earlier instance
%% we need record only when the listen is successed
case esockd:open(?MODULE, ListenOn, Options, MFArgs) of
{ok, _} ->
ok = emqx_resource:allocate_resource(InstanceId, listen_on, ListenOn),
{ok, #{listen_on => ListenOn}};
{error, {already_started, _}} ->
{error, eaddrinuse};
Error ->
Error
end.
@ -83,7 +89,12 @@ on_stop(InstanceId, _State) ->
}),
case emqx_resource:get_allocated_resources(InstanceId) of
#{listen_on := ListenOn} ->
esockd:close(?MODULE, ListenOn);
case esockd:close(?MODULE, ListenOn) of
{error, not_found} ->
ok;
Result ->
Result
end;
_ ->
ok
end.

View File

@ -1106,6 +1106,8 @@ tr_prometheus_collectors(Conf) ->
prometheus_summary,
%% emqx collectors
emqx_prometheus,
{'/prometheus/auth', emqx_prometheus_auth},
{'/prometheus/data_integration', emqx_prometheus_data_integration},
emqx_prometheus_mria
%% builtin vm collectors
| prometheus_collectors(Conf)

View File

@ -54,30 +54,34 @@
load() ->
Connectors = emqx:get_config([?ROOT_KEY], #{}),
lists:foreach(
emqx_utils:pforeach(
fun({Type, NamedConf}) ->
lists:foreach(
emqx_utils:pforeach(
fun({Name, Conf}) ->
safe_load_connector(Type, Name, Conf)
end,
maps:to_list(NamedConf)
maps:to_list(NamedConf),
infinity
)
end,
maps:to_list(Connectors)
maps:to_list(Connectors),
infinity
).
unload() ->
Connectors = emqx:get_config([?ROOT_KEY], #{}),
lists:foreach(
emqx_utils:pforeach(
fun({Type, NamedConf}) ->
lists:foreach(
emqx_utils:pforeach(
fun({Name, _Conf}) ->
_ = emqx_connector_resource:stop(Type, Name)
end,
maps:to_list(NamedConf)
maps:to_list(NamedConf),
infinity
)
end,
maps:to_list(Connectors)
maps:to_list(Connectors),
infinity
).
safe_load_connector(Type, Name, Conf) ->
@ -169,16 +173,16 @@ post_config_update([?ROOT_KEY, Type, Name], _Req, NewConf, OldConf, _AppEnvs) ->
?tp(connector_post_config_update_done, #{}),
ok.
%% The config update will be failed if any task in `perform_connector_changes` failed.
perform_connector_changes(Removed, Added, Updated) ->
Result = perform_connector_changes([
#{action => fun emqx_connector_resource:remove/4, data => Removed},
#{action => fun emqx_connector_resource:remove/4, action_name => remove, data => Removed},
#{
action => fun emqx_connector_resource:create/4,
action_name => create,
data => Added,
on_exception_fn => fun emqx_connector_resource:remove/4
},
#{action => fun emqx_connector_resource:update/4, data => Updated}
#{action => fun emqx_connector_resource:update/4, action_name => update, data => Updated}
]),
?tp(connector_post_config_update_done, #{}),
Result.
@ -351,28 +355,21 @@ convert_certs(ConnectorsConf) ->
).
perform_connector_changes(Tasks) ->
perform_connector_changes(Tasks, ok).
perform_connector_changes(Tasks, []).
perform_connector_changes([], Result) ->
Result;
perform_connector_changes([#{action := Action, data := MapConfs} = Task | Tasks], Result0) ->
OnException = maps:get(on_exception_fn, Task, fun(_Type, _Name, _Conf, _Opts) -> ok end),
Result = maps:fold(
fun
({_Type, _Name}, _Conf, {error, Reason}) ->
{error, Reason};
%% for emqx_connector_resource:update/4
({Type, Name}, {OldConf, Conf}, _) ->
ResOpts = emqx_resource:fetch_creation_opts(Conf),
case Action(Type, Name, {OldConf, Conf}, ResOpts) of
{error, Reason} -> {error, Reason};
Return -> Return
perform_connector_changes([], Errors) ->
case Errors of
[] -> ok;
_ -> {error, Errors}
end;
({Type, Name}, Conf, _) ->
ResOpts = emqx_resource:fetch_creation_opts(Conf),
try Action(Type, Name, Conf, ResOpts) of
{error, Reason} -> {error, Reason};
Return -> Return
perform_connector_changes([#{action := Action, data := MapConfs} = Task | Tasks], Errors0) ->
OnException = maps:get(on_exception_fn, Task, fun(_Type, _Name, _Conf, _Opts) -> ok end),
Results = emqx_utils:pmap(
fun({{Type, Name}, Conf}) ->
ResOpts = creation_opts(Conf),
Res =
try
Action(Type, Name, Conf, ResOpts)
catch
Kind:Error:Stacktrace ->
?SLOG(error, #{
@ -384,13 +381,34 @@ perform_connector_changes([#{action := Action, data := MapConfs} = Task | Tasks]
stacktrace => Stacktrace
}),
OnException(Type, Name, Conf, ResOpts),
erlang:raise(Kind, Error, Stacktrace)
end
{error, Error}
end,
Result0,
MapConfs
{{Type, Name}, Res}
end,
maps:to_list(MapConfs),
infinity
),
perform_connector_changes(Tasks, Result).
Errs = lists:filter(
fun
({_TypeName, {error, _}}) -> true;
(_) -> false
end,
Results
),
Errors =
case Errs of
[] ->
Errors0;
_ ->
#{action_name := ActionName} = Task,
[#{action => ActionName, errors => Errs} | Errors0]
end,
perform_connector_changes(Tasks, Errors).
creation_opts({_OldConf, Conf}) ->
emqx_resource:fetch_creation_opts(Conf);
creation_opts(Conf) ->
emqx_resource:fetch_creation_opts(Conf).
diff_confs(NewConfs, OldConfs) ->
emqx_utils_maps:diff_maps(

View File

@ -107,7 +107,7 @@ parse_connector_id(ConnectorId) ->
{atom(), atom() | binary()}.
parse_connector_id(<<"connector:", ConnectorId/binary>>, Opts) ->
parse_connector_id(ConnectorId, Opts);
parse_connector_id(<<?TEST_ID_PREFIX, ConnectorId/binary>>, Opts) ->
parse_connector_id(<<?TEST_ID_PREFIX, _:16/binary, ConnectorId/binary>>, Opts) ->
parse_connector_id(ConnectorId, Opts);
parse_connector_id(ConnectorId, Opts) ->
emqx_resource:parse_resource_id(ConnectorId, Opts).
@ -229,7 +229,10 @@ create_dry_run(Type, Conf0, Callback) ->
TypeBin = bin(Type),
TypeAtom = safe_atom(Type),
%% We use a fixed name here to avoid creating an atom
TmpName = iolist_to_binary([?TEST_ID_PREFIX, TypeBin, ":", <<"probedryrun">>]),
%% to avoid potential race condition, the resource id should be unique
Prefix = emqx_resource_manager:make_test_id(),
TmpName =
iolist_to_binary([Prefix, TypeBin, ":", <<"probedryrun">>]),
TmpPath = emqx_utils:safe_filename(TmpName),
Conf1 = maps:without([<<"name">>], Conf0),
RawConf = #{<<"connectors">> => #{TypeBin => #{<<"temp_name">> => Conf1}}},

View File

@ -40,11 +40,14 @@
-export([
samplers/0,
samplers/2,
current_rate/0,
current_rate/1,
granularity_adapter/1
]).
-ifdef(TEST).
-export([current_rate_cluster/0]).
-endif.
%% for rpc
-export([do_sample/2]).
@ -112,8 +115,33 @@ granularity_adapter(List) when length(List) > 1000 ->
granularity_adapter(List) ->
List.
current_rate(all) ->
current_rate_cluster();
current_rate(Node) when Node == node() ->
try
{ok, Rate} = do_call(current_rate),
{ok, Rate}
catch
_E:R ->
?SLOG(warning, #{msg => "dashboard_monitor_error", reason => R}),
%% Rate map 0, ensure api will not crash.
%% When joining cluster, dashboard monitor restart.
Rate0 = [
{Key, 0}
|| Key <- ?GAUGE_SAMPLER_LIST ++ maps:values(?DELTA_SAMPLER_RATE_MAP)
],
{ok, maps:merge(maps:from_list(Rate0), non_rate_value())}
end;
current_rate(Node) ->
case emqx_dashboard_proto_v1:current_rate(Node) of
{badrpc, Reason} ->
{badrpc, {Node, Reason}};
{ok, Rate} ->
{ok, Rate}
end.
%% Get the current rate. Not the current sampler data.
current_rate() ->
current_rate_cluster() ->
Fun =
fun
(Node, Cluster) when is_map(Cluster) ->
@ -133,31 +161,6 @@ current_rate() ->
{ok, Rate}
end.
current_rate(all) ->
current_rate();
current_rate(Node) when Node == node() ->
try
{ok, Rate} = do_call(current_rate),
{ok, Rate}
catch
_E:R ->
?SLOG(warning, #{msg => "dashboard_monitor_error", reason => R}),
%% Rate map 0, ensure api will not crash.
%% When joining cluster, dashboard monitor restart.
Rate0 = [
{Key, 0}
|| Key <- ?GAUGE_SAMPLER_LIST ++ maps:values(?DELTA_SAMPLER_RATE_MAP)
],
{ok, maps:from_list(Rate0)}
end;
current_rate(Node) ->
case emqx_dashboard_proto_v1:current_rate(Node) of
{badrpc, Reason} ->
{badrpc, {Node, Reason}};
{ok, Rate} ->
{ok, Rate}
end.
%% -------------------------------------------------------------------------------------------------
%% gen_server functions
@ -173,7 +176,9 @@ handle_call(current_rate, _From, State = #state{last = Last}) ->
NowTime = erlang:system_time(millisecond),
NowSamplers = sample(NowTime),
Rate = cal_rate(NowSamplers, Last),
{reply, {ok, Rate}, State};
NonRateValue = non_rate_value(),
Samples = maps:merge(Rate, NonRateValue),
{reply, {ok, Samples}, State};
handle_call(_Request, _From, State = #state{}) ->
{reply, ok, State}.
@ -256,8 +261,16 @@ merge_cluster_sampler_map(M1, M2) ->
merge_cluster_rate(Node, Cluster) ->
Fun =
fun
(topics, Value, NCluster) ->
NCluster#{topics => Value};
%% cluster-synced values
(topics, V, NCluster) ->
NCluster#{topics => V};
(retained_msg_count, V, NCluster) ->
NCluster#{retained_msg_count => V};
(license_quota, V, NCluster) ->
NCluster#{license_quota => V};
%% for cluster sample, ignore node_uptime
(node_uptime, _V, NCluster) ->
NCluster;
(Key, Value, NCluster) ->
ClusterValue = maps:get(Key, NCluster, 0),
NCluster#{Key => Value + ClusterValue}
@ -409,3 +422,26 @@ stats(received_bytes) -> emqx_metrics:val('bytes.received');
stats(sent) -> emqx_metrics:val('messages.sent');
stats(sent_bytes) -> emqx_metrics:val('bytes.sent');
stats(dropped) -> emqx_metrics:val('messages.dropped').
%% -------------------------------------------------------------------------------------------------
%% Retained && License Quota
%% the non rate values should be same on all nodes
non_rate_value() ->
(license_quota())#{
retained_msg_count => emqx_retainer:retained_count(),
node_uptime => emqx_sys:uptime()
}.
-if(?EMQX_RELEASE_EDITION == ee).
license_quota() ->
case emqx_license_checker:limits() of
{ok, #{max_connections := Quota}} ->
#{license_quota => Quota};
{error, no_license} ->
#{license_quota => 0}
end.
-else.
license_quota() ->
#{}.
-endif.

View File

@ -1,5 +1,17 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2019-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%% Copyright (c) 2020-2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_dashboard_monitor_api).
@ -133,13 +145,15 @@ dashboard_samplers_fun(Latest) ->
end
end.
monitor_current(get, #{bindings := []}) ->
emqx_utils_api:with_node_or_cluster(erlang:node(), fun emqx_dashboard_monitor:current_rate/1);
monitor_current(get, #{bindings := Bindings}) ->
RawNode = maps:get(node, Bindings, <<"all">>),
emqx_utils_api:with_node_or_cluster(RawNode, fun current_rate/1).
-spec current_rate(atom()) ->
{error, term()}
| {ok, Result :: map()}.
current_rate(Node) ->
%% Node :: 'all' or `NodeName`
case emqx_dashboard_monitor:current_rate(Node) of
{badrpc, _} = BadRpc ->
{error, BadRpc};

View File

@ -31,10 +31,13 @@ all() ->
emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) ->
meck:new(emqx_retainer, [non_strict, passthrough, no_history, no_link]),
meck:expect(emqx_retainer, retained_count, fun() -> 0 end),
emqx_mgmt_api_test_util:init_suite([]),
Config.
end_per_suite(_Config) ->
meck:unload([emqx_retainer]),
emqx_mgmt_api_test_util:end_suite([]).
t_monitor_samplers_all(_Config) ->
@ -198,5 +201,5 @@ waiting_emqx_stats_and_monitor_update(WaitKey) ->
end,
meck:unload([emqx_stats]),
%% manually call monitor update
_ = emqx_dashboard_monitor:current_rate(),
_ = emqx_dashboard_monitor:current_rate_cluster(),
ok.

View File

@ -1,6 +1,6 @@
{application, emqx_license, [
{description, "EMQX License"},
{vsn, "5.0.14"},
{vsn, "5.0.15"},
{modules, []},
{registered, [emqx_license_sup]},
{applications, [kernel, stdlib, emqx_ctl]},

View File

@ -30,6 +30,7 @@
start_link/2,
update/1,
dump/0,
expiry_epoch/0,
purge/0,
limits/0,
print_warnings/1
@ -67,6 +68,10 @@ update(License) ->
dump() ->
gen_server:call(?MODULE, dump, infinity).
-spec expiry_epoch() -> integer().
expiry_epoch() ->
gen_server:call(?MODULE, expiry_epoch, infinity).
-spec limits() -> {ok, limits()} | {error, any()}.
limits() ->
try ets:lookup(?LICENSE_TAB, limits) of
@ -111,6 +116,9 @@ handle_call({update, License}, _From, #{license := Old} = State) ->
{reply, check_license(License), State1#{license => License}};
handle_call(dump, _From, #{license := License} = State) ->
{reply, emqx_license_parser:dump(License), State};
handle_call(expiry_epoch, _From, #{license := License} = State) ->
ExpiryEpoch = date_to_expiry_epoch(emqx_license_parser:expiry_date(License)),
{reply, ExpiryEpoch, State};
handle_call(purge, _From, State) ->
_ = ets:delete_all_objects(?LICENSE_TAB),
{reply, ok, State};
@ -234,6 +242,12 @@ small_customer_overdue(_CType, _DaysLeft) -> false.
non_official_license_overdue(?OFFICIAL, _) -> false;
non_official_license_overdue(_, DaysLeft) -> DaysLeft < 0.
%% 62167219200 =:= calendar:datetime_to_gregorian_seconds({{1970, 1, 1}, {0, 0, 0}}).
-define(EPOCH_START, 62167219200).
-spec date_to_expiry_epoch(calendar:date()) -> Seconds :: non_neg_integer().
date_to_expiry_epoch({Y, M, D}) ->
calendar:datetime_to_gregorian_seconds({{Y, M, D}, {0, 0, 0}}) - ?EPOCH_START.
apply_limits(Limits) ->
ets:insert(?LICENSE_TAB, {limits, Limits}).

View File

@ -179,6 +179,11 @@ app_deps(App, RebootApps) ->
%% `emqx_bridge' is special in that it needs all the bridges apps to
%% be started before it, so that, when it loads the bridges from
%% configuration, the bridge app and its dependencies need to be up.
%%
%% `emqx_connector' also needs to start all connector dependencies for the same reason.
%% Since standalone apps like `emqx_mongodb' are already dependencies of `emqx_bridge_*'
%% apps, we may apply the same tactic for `emqx_connector' and inject individual bridges
%% as its dependencies.
inject_bridge_deps(RebootAppDeps) ->
BridgeApps = [
App
@ -189,6 +194,8 @@ inject_bridge_deps(RebootAppDeps) ->
fun
({emqx_bridge, Deps0}) when is_list(Deps0) ->
{emqx_bridge, Deps0 ++ BridgeApps};
({emqx_connector, Deps0}) when is_list(Deps0) ->
{emqx_connector, Deps0 ++ BridgeApps};
(App) ->
App
end,

View File

@ -69,7 +69,8 @@ handle_call({invite_async, Node, JoinTo}, _From, State) ->
undefined ->
Caller = self(),
Task = spawn_link_invite_worker(Node, JoinTo, Caller),
{reply, ok, State#{Node => Task}};
State1 = remove_finished_task(Node, State),
{reply, ok, State1#{Node => Task}};
WorkerPid ->
{reply, {error, {already_started, WorkerPid}}, State}
end;
@ -157,6 +158,11 @@ find_node_name_via_worker_pid(WorkerPid, {Key, Task, I}) ->
find_node_name_via_worker_pid(WorkerPid, maps:next(I))
end.
remove_finished_task(Node, State = #{history := History}) ->
State#{history => maps:remove(Node, History)};
remove_finished_task(_Node, State) ->
State.
state_to_invitation_status(State) ->
History = maps:get(history, State, #{}),
{Succ, Failed} = lists:foldl(

View File

@ -190,7 +190,7 @@ t_cluster_invite_async(Config) ->
lists:sort(Core1Resp)
),
%% force leave the core2 and replicant
%% force leave the core2
{204} = rpc:call(
Core1,
emqx_mgmt_api_cluster,
@ -260,7 +260,41 @@ t_cluster_invite_async(Config) ->
}
],
lists:sort(Core1Resp3)
).
),
%% force leave the core2
{204} = rpc:call(
Core1,
emqx_mgmt_api_cluster,
force_leave,
[delete, #{bindings => #{node => atom_to_binary(Core2)}}]
),
%% invite core2 again
?assertMatch(
{200},
Invite(Core2)
),
%% assert: core2 is in_progress status
{200, InvitationStatus1} = rpc:call(Core1, emqx_mgmt_api_cluster, get_invitation_status, [
get, #{}
]),
?assertMatch(
#{succeed := [], in_progress := [#{node := Core2}], failed := []},
InvitationStatus1
),
%% waiting the async invitation_succeed
?assertMatch({succeed, _}, waiting_the_async_invitation_succeed(Core1, Core2)),
{200, InvitationStatus2} = rpc:call(Core1, emqx_mgmt_api_cluster, get_invitation_status, [
get, #{}
]),
?assertMatch(
#{succeed := [#{node := Core2}], in_progress := [], failed := []},
InvitationStatus2
),
ok.
cluster(Config) ->
NodeSpec = #{apps => ?APPS},

View File

@ -392,7 +392,7 @@ t_create_webhook_v1_bridges_api({'init', Config}) ->
lists:foreach(
fun(App) ->
_ = application:stop(App),
{ok, [App]} = application:ensure_all_started(App)
{ok, _} = application:ensure_all_started(App)
end,
[emqx_connector, emqx_bridge]
),

View File

@ -1,12 +1,11 @@
{application, emqx_mongodb, [
{description, "EMQX MongoDB Connector"},
{vsn, "0.1.4"},
{vsn, "0.1.5"},
{registered, []},
{applications, [
kernel,
stdlib,
mongodb,
emqx_connector,
emqx_resource
]},
{env, []},

View File

@ -1,12 +1,11 @@
{application, emqx_mysql, [
{description, "EMQX MySQL Database Connector"},
{vsn, "0.1.6"},
{vsn, "0.1.7"},
{registered, []},
{applications, [
kernel,
stdlib,
mysql,
emqx_connector,
emqx_resource
]},
{env, []},

View File

@ -1,12 +1,11 @@
{application, emqx_postgresql, [
{description, "EMQX PostgreSQL Database Connector"},
{vsn, "0.1.1"},
{vsn, "0.1.2"},
{registered, []},
{applications, [
kernel,
stdlib,
epgsql,
emqx_connector,
emqx_resource
]},
{env, []},

View File

@ -16,3 +16,30 @@
-define(APP, emqx_prometheus).
-define(PROMETHEUS, [prometheus]).
-define(PROMETHEUS_DEFAULT_REGISTRY, default).
-define(PROMETHEUS_AUTH_REGISTRY, '/prometheus/auth').
-define(PROMETHEUS_AUTH_COLLECTOR, emqx_prometheus_auth).
-define(PROMETHEUS_DATA_INTEGRATION_REGISTRY, '/prometheus/data_integration').
-define(PROMETHEUS_DATA_INTEGRATION_COLLECTOR, emqx_prometheus_data_integration).
-define(PROMETHEUS_ALL_REGISTRYS, [
?PROMETHEUS_DEFAULT_REGISTRY,
?PROMETHEUS_AUTH_REGISTRY,
?PROMETHEUS_DATA_INTEGRATION_REGISTRY
]).
-define(PROM_DATA_MODE__NODE, node).
-define(PROM_DATA_MODE__ALL_NODES_AGGREGATED, all_nodes_aggregated).
-define(PROM_DATA_MODE__ALL_NODES_UNAGGREGATED, all_nodes_unaggregated).
-define(PROM_DATA_MODES, [
?PROM_DATA_MODE__NODE,
?PROM_DATA_MODE__ALL_NODES_AGGREGATED,
?PROM_DATA_MODE__ALL_NODES_UNAGGREGATED
]).
-define(PROM_DATA_MODE_KEY__, prom_data_mode).
-define(PUT_PROM_DATA_MODE(MODE__), erlang:put(?PROM_DATA_MODE_KEY__, MODE__)).
-define(GET_PROM_DATA_MODE(), erlang:get(?PROM_DATA_MODE_KEY__)).

View File

@ -3,7 +3,9 @@
{deps, [
{emqx, {path, "../emqx"}},
{emqx_utils, {path, "../emqx_utils"}},
{prometheus, {git, "https://github.com/emqx/prometheus.erl", {tag, "v4.10.0.1"}}}
{emqx_auth, {path, "../emqx_auth"}},
{emqx_resource, {path, "../emqx_resource"}},
{prometheus, {git, "https://github.com/emqx/prometheus.erl", {tag, "v4.10.0.2"}}}
]}.
{edoc_opts, [{preprocess, true}]}.

View File

@ -5,7 +5,7 @@
{vsn, "5.0.19"},
{modules, []},
{registered, [emqx_prometheus_sup]},
{applications, [kernel, stdlib, prometheus, emqx, emqx_management]},
{applications, [kernel, stdlib, prometheus, emqx, emqx_auth, emqx_resource, emqx_management]},
{mod, {emqx_prometheus_app, []}},
{env, []},
{licenses, ["Apache-2.0"]},

File diff suppressed because it is too large Load Diff

View File

@ -18,20 +18,37 @@
-behaviour(minirest_api).
-include("emqx_prometheus.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-include_lib("emqx/include/logger.hrl").
-import(
hoconsc,
[
mk/2,
ref/1
]
).
-export([
api_spec/0,
paths/0,
schema/1
schema/1,
fields/1
]).
-export([
setting/2,
stats/2
stats/2,
auth/2,
data_integration/2
]).
-export([lookup_from_local_nodes/3]).
-define(TAGS, [<<"Monitor">>]).
-define(IS_TRUE(Val), ((Val =:= true) orelse (Val =:= <<"true">>))).
-define(IS_FALSE(Val), ((Val =:= false) orelse (Val =:= <<"false">>))).
api_spec() ->
emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}).
@ -39,7 +56,9 @@ api_spec() ->
paths() ->
[
"/prometheus",
"/prometheus/stats"
"/prometheus/auth",
"/prometheus/stats",
"/prometheus/data_integration"
].
schema("/prometheus") ->
@ -61,6 +80,19 @@ schema("/prometheus") ->
#{200 => prometheus_setting_response()}
}
};
schema("/prometheus/auth") ->
#{
'operationId' => auth,
get =>
#{
description => ?DESC(get_prom_auth_data),
tags => ?TAGS,
parameters => [ref(mode)],
security => security(),
responses =>
#{200 => prometheus_data_schema()}
}
};
schema("/prometheus/stats") ->
#{
'operationId' => stats,
@ -68,6 +100,20 @@ schema("/prometheus/stats") ->
#{
description => ?DESC(get_prom_data),
tags => ?TAGS,
parameters => [ref(mode)],
security => security(),
responses =>
#{200 => prometheus_data_schema()}
}
};
schema("/prometheus/data_integration") ->
#{
'operationId' => data_integration,
get =>
#{
description => ?DESC(get_prom_data_integration_data),
tags => ?TAGS,
parameters => [ref(mode)],
security => security(),
responses =>
#{200 => prometheus_data_schema()}
@ -79,6 +125,41 @@ security() ->
true -> [#{'basicAuth' => []}, #{'bearerAuth' => []}];
false -> []
end.
%% erlfmt-ignore
fields(mode) ->
[
{mode,
mk(
hoconsc:enum(?PROM_DATA_MODES),
#{
default => node,
desc => <<"
Metrics format mode.
`node`:
Return metrics from local node. And it is the default behaviour if `mode` not specified.
`all_nodes_aggregated`:
Return metrics for all nodes.
And if possible, calculate the arithmetic sum or logical sum of the indicators of all nodes.
`all_nodes_unaggregated`:
Return metrics from all nodes, and the metrics are not aggregated.
The node name will be included in the returned results to
indicate that certain metrics were returned on a certain node.
">>,
in => query,
required => false,
example => node
}
)}
].
%% bpapi
lookup_from_local_nodes(M, F, A) ->
erlang:apply(M, F, A).
%%--------------------------------------------------------------------
%% API Handler funcs
%%--------------------------------------------------------------------
@ -100,24 +181,60 @@ setting(put, #{body := Body}) ->
{500, 'INTERNAL_ERROR', Message}
end.
stats(get, #{headers := Headers}) ->
Type =
case maps:get(<<"accept">>, Headers, <<"text/plain">>) of
<<"application/json">> -> <<"json">>;
_ -> <<"prometheus">>
end,
Data = emqx_prometheus:collect(Type),
case Type of
<<"json">> ->
{200, Data};
<<"prometheus">> ->
{200, #{<<"content-type">> => <<"text/plain">>}, Data}
end.
stats(get, #{headers := Headers, query_string := Qs}) ->
collect(emqx_prometheus, collect_opts(Headers, Qs)).
auth(get, #{headers := Headers, query_string := Qs}) ->
collect(emqx_prometheus_auth, collect_opts(Headers, Qs)).
data_integration(get, #{headers := Headers, query_string := Qs}) ->
collect(emqx_prometheus_data_integration, collect_opts(Headers, Qs)).
%%--------------------------------------------------------------------
%% Internal funcs
%%--------------------------------------------------------------------
collect(Module, #{type := Type, mode := Mode}) ->
%% `Mode` is used to control the format of the returned data
%% It will used in callback `Module:collect_mf/1` to fetch data from node or cluster
%% And use this mode parameter to determine the formatting method of the returned information.
%% Since the arity of the callback function has been fixed.
%% so it is placed in the process dictionary of the current process.
?PUT_PROM_DATA_MODE(Mode),
Data =
case erlang:function_exported(Module, collect, 1) of
true ->
erlang:apply(Module, collect, [Type]);
false ->
?SLOG(error, #{
msg => "prometheus callback module not found, empty data responded",
module_name => Module
}),
<<>>
end,
gen_response(Type, Data).
collect_opts(Headers, Qs) ->
#{type => response_type(Headers), mode => mode(Qs)}.
response_type(#{<<"accept">> := <<"application/json">>}) ->
<<"json">>;
response_type(_) ->
<<"prometheus">>.
mode(#{<<"mode">> := Mode}) ->
case lists:member(Mode, ?PROM_DATA_MODES) of
true -> Mode;
false -> ?PROM_DATA_MODE__NODE
end;
mode(_) ->
?PROM_DATA_MODE__NODE.
gen_response(<<"json">>, Data) ->
{200, Data};
gen_response(<<"prometheus">>, Data) ->
{200, #{<<"content-type">> => <<"text/plain">>}, Data}.
prometheus_setting_request() ->
[{prometheus, #{type := Setting}}] = emqx_prometheus_schema:roots(),
emqx_dashboard_swagger:schema_with_examples(
@ -181,7 +298,7 @@ recommend_setting_example() ->
prometheus_data_schema() ->
#{
description =>
<<"Get Prometheus Data. Note that support for JSON output is deprecated and will be removed in v5.2.">>,
<<"Get Prometheus Data.">>,
content =>
[
{'text/plain', #{schema => #{type => string}}},

View File

@ -0,0 +1,498 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_prometheus_auth).
-export([
deregister_cleanup/1,
collect_mf/2,
collect_metrics/2
]).
-export([collect/1]).
%% for bpapi
-behaviour(emqx_prometheus_cluster).
-export([
fetch_from_local_node/1,
fetch_cluster_consistented_data/0,
aggre_or_zip_init_acc/0,
logic_sum_metrics/0
]).
%% %% @private
-export([
zip_json_auth_metrics/3
]).
-include("emqx_prometheus.hrl").
-include_lib("emqx_auth/include/emqx_authn_chains.hrl").
-include_lib("prometheus/include/prometheus.hrl").
-import(
prometheus_model_helpers,
[
create_mf/5,
gauge_metric/1,
gauge_metrics/1,
counter_metrics/1
]
).
-type authn_metric_name() ::
emqx_authn_enable
| emqx_authn_status
| emqx_authn_nomatch
| emqx_authn_total
| emqx_authn_success
| emqx_authn_failed.
-type authz_metric_name() ::
emqx_authz_enable
| emqx_authz_status
| emqx_authz_nomatch
| emqx_authz_total
| emqx_authz_allow
| emqx_authz_deny.
%% Please don't remove this attribute, prometheus uses it to
%% automatically register collectors.
-behaviour(prometheus_collector).
%%--------------------------------------------------------------------
%% Macros
%%--------------------------------------------------------------------
-define(METRIC_NAME_PREFIX, "emqx_auth_").
-define(MG(K, MAP), maps:get(K, MAP)).
-define(MG0(K, MAP), maps:get(K, MAP, 0)).
-define(PG0(K, PROPLISTS), proplists:get_value(K, PROPLISTS, 0)).
%%--------------------------------------------------------------------
%% Collector API
%%--------------------------------------------------------------------
%% @private
deregister_cleanup(_) -> ok.
%% @private
-spec collect_mf(_Registry, Callback) -> ok when
_Registry :: prometheus_registry:registry(),
Callback :: prometheus_collector:collect_mf_callback().
%% erlfmt-ignore
collect_mf(?PROMETHEUS_AUTH_REGISTRY, Callback) ->
RawData = emqx_prometheus_cluster:raw_data(?MODULE, ?GET_PROM_DATA_MODE()),
ok = add_collect_family(Callback, authn_metric_meta(), ?MG(authn_data, RawData)),
ok = add_collect_family(Callback, authn_users_count_metric_meta(), ?MG(authn_users_count_data, RawData)),
ok = add_collect_family(Callback, authz_metric_meta(), ?MG(authz_data, RawData)),
ok = add_collect_family(Callback, authz_rules_count_metric_meta(), ?MG(authz_rules_count_data, RawData)),
ok = add_collect_family(Callback, banned_count_metric_meta(), ?MG(banned_count_data, RawData)),
ok;
collect_mf(_, _) ->
ok.
%% @private
collect(<<"json">>) ->
RawData = emqx_prometheus_cluster:raw_data(?MODULE, ?GET_PROM_DATA_MODE()),
#{
emqx_authn => collect_json_data(?MG(authn_data, RawData)),
emqx_authz => collect_json_data(?MG(authz_data, RawData)),
emqx_banned => collect_banned_data()
};
collect(<<"prometheus">>) ->
prometheus_text_format:format(?PROMETHEUS_AUTH_REGISTRY).
add_collect_family(Callback, MetricWithType, Data) ->
_ = [add_collect_family(Name, Data, Callback, Type) || {Name, Type} <- MetricWithType],
ok.
add_collect_family(Name, Data, Callback, Type) ->
Callback(create_mf(Name, _Help = <<"">>, Type, ?MODULE, Data)).
collect_metrics(Name, Metrics) ->
collect_auth(Name, Metrics).
%% behaviour
fetch_from_local_node(Mode) ->
{node(self()), #{
authn_data => authn_data(Mode),
authz_data => authz_data(Mode)
}}.
fetch_cluster_consistented_data() ->
#{
authn_users_count_data => authn_users_count_data(),
authz_rules_count_data => authz_rules_count_data(),
banned_count_data => banned_count_data()
}.
aggre_or_zip_init_acc() ->
#{
authn_data => maps:from_keys(authn_metric(names), []),
authz_data => maps:from_keys(authz_metric(names), [])
}.
logic_sum_metrics() ->
[
emqx_authn_enable,
emqx_authn_status,
emqx_authz_enable,
emqx_authz_status
].
%%--------------------------------------------------------------------
%% Collector
%%--------------------------------------------------------------------
%%====================
%% Authn overview
collect_auth(K = emqx_authn_enable, Data) ->
gauge_metrics(?MG(K, Data));
collect_auth(K = emqx_authn_status, Data) ->
gauge_metrics(?MG(K, Data));
collect_auth(K = emqx_authn_nomatch, Data) ->
counter_metrics(?MG(K, Data));
collect_auth(K = emqx_authn_total, Data) ->
counter_metrics(?MG(K, Data));
collect_auth(K = emqx_authn_success, Data) ->
counter_metrics(?MG(K, Data));
collect_auth(K = emqx_authn_failed, Data) ->
counter_metrics(?MG(K, Data));
%%====================
%% Authn users count
%% Only provided for `password_based:built_in_database` and `scram:built_in_database`
collect_auth(K = emqx_authn_users_count, Data) ->
gauge_metrics(?MG(K, Data));
%%====================
%% Authz overview
collect_auth(K = emqx_authz_enable, Data) ->
gauge_metrics(?MG(K, Data));
collect_auth(K = emqx_authz_status, Data) ->
gauge_metrics(?MG(K, Data));
collect_auth(K = emqx_authz_nomatch, Data) ->
counter_metrics(?MG(K, Data));
collect_auth(K = emqx_authz_total, Data) ->
counter_metrics(?MG(K, Data));
collect_auth(K = emqx_authz_allow, Data) ->
counter_metrics(?MG(K, Data));
collect_auth(K = emqx_authz_deny, Data) ->
counter_metrics(?MG(K, Data));
%%====================
%% Authz rules count
%% Only provided for `file` and `built_in_database`
collect_auth(K = emqx_authz_rules_count, Data) ->
gauge_metrics(?MG(K, Data));
%%====================
%% Banned
collect_auth(emqx_banned_count, Data) ->
gauge_metric(Data).
%%--------------------------------------------------------------------
%% Internal functions
%%--------------------------------------------------------------------
%%========================================
%% AuthN (Authentication)
%%========================================
%%====================
%% Authn overview
authn_metric_meta() ->
[
{emqx_authn_enable, gauge},
{emqx_authn_status, gauge},
{emqx_authn_nomatch, counter},
{emqx_authn_total, counter},
{emqx_authn_success, counter},
{emqx_authn_failed, counter}
].
authn_metric(names) ->
emqx_prometheus_cluster:metric_names(authn_metric_meta()).
-spec authn_data(atom()) -> #{Key => [Point]} when
Key :: authn_metric_name(),
Point :: {[Label], Metric},
Label :: IdLabel,
IdLabel :: {id, AuthnName :: binary()},
Metric :: number().
authn_data(Mode) ->
Authns = emqx_config:get([authentication]),
lists:foldl(
fun(Key, AccIn) ->
AccIn#{Key => authn_backend_to_points(Mode, Key, Authns)}
end,
#{},
authn_metric(names)
).
-spec authn_backend_to_points(atom(), Key, list(Authn)) -> list(Point) when
Key :: authn_metric_name(),
Authn :: map(),
Point :: {[Label], Metric},
Label :: IdLabel,
IdLabel :: {id, AuthnName :: binary()},
Metric :: number().
authn_backend_to_points(Mode, Key, Authns) ->
do_authn_backend_to_points(Mode, Key, Authns, []).
do_authn_backend_to_points(_Mode, _K, [], AccIn) ->
lists:reverse(AccIn);
do_authn_backend_to_points(Mode, K, [Authn | Rest], AccIn) ->
Id = authenticator_id(Authn),
Point = {
with_node_label(Mode, [{id, Id}]),
do_metric(K, Authn, lookup_authn_metrics_local(Id))
},
do_authn_backend_to_points(Mode, K, Rest, [Point | AccIn]).
lookup_authn_metrics_local(Id) ->
case emqx_authn_api:lookup_from_local_node(?GLOBAL, Id) of
{ok, {_Node, Status, #{counters := Counters}, _ResourceMetrics}} ->
#{
emqx_authn_status => emqx_prometheus_cluster:status_to_number(Status),
emqx_authn_nomatch => ?MG0(nomatch, Counters),
emqx_authn_total => ?MG0(total, Counters),
emqx_authn_success => ?MG0(success, Counters),
emqx_authn_failed => ?MG0(failed, Counters)
};
{error, _Reason} ->
maps:from_keys(authn_metric(names) -- [emqx_authn_enable], 0)
end.
%%====================
%% Authn users count
authn_users_count_metric_meta() ->
[
{emqx_authn_users_count, gauge}
].
-define(AUTHN_MNESIA, emqx_authn_mnesia).
-define(AUTHN_SCRAM_MNESIA, emqx_authn_scram_mnesia).
authn_users_count_data() ->
Samples = lists:foldl(
fun
(#{backend := built_in_database, mechanism := password_based} = Authn, AccIn) ->
[auth_data_sample_point(authn, Authn, ?AUTHN_MNESIA) | AccIn];
(#{backend := built_in_database, mechanism := scram} = Authn, AccIn) ->
[auth_data_sample_point(authn, Authn, ?AUTHN_SCRAM_MNESIA) | AccIn];
(_, AccIn) ->
AccIn
end,
[],
emqx_config:get([authentication])
),
#{emqx_authn_users_count => Samples}.
%%========================================
%% AuthZ (Authorization)
%%========================================
%%====================
%% Authz overview
authz_metric_meta() ->
[
{emqx_authz_enable, gauge},
{emqx_authz_status, gauge},
{emqx_authz_nomatch, counter},
{emqx_authz_total, counter},
{emqx_authz_allow, counter},
{emqx_authz_deny, counter}
].
authz_metric(names) ->
emqx_prometheus_cluster:metric_names(authz_metric_meta()).
-spec authz_data(atom()) -> #{Key => [Point]} when
Key :: authz_metric_name(),
Point :: {[Label], Metric},
Label :: TypeLabel,
TypeLabel :: {type, AuthZType :: binary()},
Metric :: number().
authz_data(Mode) ->
Authzs = emqx_config:get([authorization, sources]),
lists:foldl(
fun(Key, AccIn) ->
AccIn#{Key => authz_backend_to_points(Mode, Key, Authzs)}
end,
#{},
authz_metric(names)
).
-spec authz_backend_to_points(atom(), Key, list(Authz)) -> list(Point) when
Key :: authz_metric_name(),
Authz :: map(),
Point :: {[Label], Metric},
Label :: TypeLabel,
TypeLabel :: {type, AuthZType :: binary()},
Metric :: number().
authz_backend_to_points(Mode, Key, Authzs) ->
do_authz_backend_to_points(Mode, Key, Authzs, []).
do_authz_backend_to_points(_Mode, _K, [], AccIn) ->
lists:reverse(AccIn);
do_authz_backend_to_points(Mode, K, [Authz | Rest], AccIn) ->
Type = maps:get(type, Authz),
Point = {
with_node_label(Mode, [{type, Type}]),
do_metric(K, Authz, lookup_authz_metrics_local(Type))
},
do_authz_backend_to_points(Mode, K, Rest, [Point | AccIn]).
lookup_authz_metrics_local(Type) ->
case emqx_authz_api_sources:lookup_from_local_node(Type) of
{ok, {_Node, Status, #{counters := Counters}, _ResourceMetrics}} ->
#{
emqx_authz_status => emqx_prometheus_cluster:status_to_number(Status),
emqx_authz_nomatch => ?MG0(nomatch, Counters),
emqx_authz_total => ?MG0(total, Counters),
emqx_authz_allow => ?MG0(allow, Counters),
emqx_authz_deny => ?MG0(deny, Counters)
};
{error, _Reason} ->
maps:from_keys(authz_metric(names) -- [emqx_authz_enable], 0)
end.
%%====================
%% Authz rules count
authz_rules_count_metric_meta() ->
[
{emqx_authz_rules_count, gauge}
].
-define(ACL_TABLE, emqx_acl).
authz_rules_count_data() ->
Samples = lists:foldl(
fun
(#{type := built_in_database} = Authz, AccIn) ->
[auth_data_sample_point(authz, Authz, ?ACL_TABLE) | AccIn];
(#{type := file}, AccIn) ->
#{annotations := #{rules := Rules}} = emqx_authz:lookup(file),
Size = erlang:length(Rules),
[{[{type, file}], Size} | AccIn];
(_, AccIn) ->
AccIn
end,
[],
emqx_config:get([authorization, sources])
),
#{emqx_authz_rules_count => Samples}.
%%========================================
%% Banned
%%========================================
%%====================
%% Banned count
banned_count_metric_meta() ->
[
{emqx_banned_count, gauge}
].
-define(BANNED_TABLE,
emqx_banned
).
banned_count_data() ->
mnesia_size(?BANNED_TABLE).
%%--------------------------------------------------------------------
%% Collect functions
%%--------------------------------------------------------------------
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% merge / zip formatting funcs for type `application/json`
collect_json_data(Data) ->
emqx_prometheus_cluster:collect_json_data(Data, fun zip_json_auth_metrics/3).
collect_banned_data() ->
#{emqx_banned_count => banned_count_data()}.
%% for initialized empty AccIn
%% The following fields will be put into Result
%% For Authn:
%% `id`, `emqx_authn_users_count`
%% For Authz:
%% `type`, `emqx_authz_rules_count`n
zip_json_auth_metrics(Key, Points, [] = _AccIn) ->
lists:foldl(
fun({Lables, Metric}, AccIn2) ->
LablesKVMap = maps:from_list(Lables),
Point = (maps:merge(LablesKVMap, users_or_rule_count(LablesKVMap)))#{Key => Metric},
[Point | AccIn2]
end,
[],
Points
);
zip_json_auth_metrics(Key, Points, AllResultedAcc) ->
ThisKeyResult = lists:foldl(emqx_prometheus_cluster:point_to_map_fun(Key), [], Points),
lists:zipwith(fun maps:merge/2, AllResultedAcc, ThisKeyResult).
users_or_rule_count(#{id := Id}) ->
#{emqx_authn_users_count := Points} = authn_users_count_data(),
case lists:keyfind([{id, Id}], 1, Points) of
{_, Metric} ->
#{emqx_authn_users_count => Metric};
false ->
#{}
end;
users_or_rule_count(#{type := Type}) ->
#{emqx_authz_rules_count := Points} = authz_rules_count_data(),
case lists:keyfind([{type, Type}], 1, Points) of
{_, Metric} ->
#{emqx_authz_rules_count => Metric};
false ->
#{}
end;
users_or_rule_count(_) ->
#{}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Helper funcs
authenticator_id(Authn) ->
emqx_authn_chains:authenticator_id(Authn).
auth_data_sample_point(authn, Authn, Tab) ->
Size = mnesia_size(Tab),
Id = authenticator_id(Authn),
{[{id, Id}], Size};
auth_data_sample_point(authz, #{type := Type} = _Authz, Tab) ->
Size = mnesia_size(Tab),
{[{type, Type}], Size}.
mnesia_size(Tab) ->
mnesia:table_info(Tab, size).
do_metric(emqx_authn_enable, #{enable := B}, _) ->
emqx_prometheus_cluster:boolean_to_number(B);
do_metric(emqx_authz_enable, #{enable := B}, _) ->
emqx_prometheus_cluster:boolean_to_number(B);
do_metric(K, _, Metrics) ->
?MG0(K, Metrics).
with_node_label(?PROM_DATA_MODE__NODE, Labels) ->
Labels;
with_node_label(?PROM_DATA_MODE__ALL_NODES_AGGREGATED, Labels) ->
Labels;
with_node_label(?PROM_DATA_MODE__ALL_NODES_UNAGGREGATED, Labels) ->
[{node, node(self())} | Labels].

View File

@ -0,0 +1,205 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_prometheus_cluster).
-include("emqx_prometheus.hrl").
-include_lib("emqx_resource/include/emqx_resource.hrl").
-export([
raw_data/2,
collect_json_data/2,
aggre_cluster/3,
%% with_node_name_label/2,
point_to_map_fun/1,
boolean_to_number/1,
status_to_number/1,
metric_names/1
]).
-callback fetch_cluster_consistented_data() -> map().
-callback fetch_from_local_node(atom()) -> {node(), map()}.
-callback aggre_or_zip_init_acc() -> map().
-callback logic_sum_metrics() -> list().
-define(MG(K, MAP), maps:get(K, MAP)).
-define(PG0(K, PROPLISTS), proplists:get_value(K, PROPLISTS, 0)).
raw_data(Module, undefined) ->
%% TODO: for push gateway, the format mode should be configurable
raw_data(Module, ?PROM_DATA_MODE__NODE);
raw_data(Module, ?PROM_DATA_MODE__ALL_NODES_AGGREGATED = Mode) ->
AllNodesMetrics = aggre_cluster(Module, Mode),
Cluster = Module:fetch_cluster_consistented_data(),
maps:merge(AllNodesMetrics, Cluster);
raw_data(Module, ?PROM_DATA_MODE__ALL_NODES_UNAGGREGATED = Mode) ->
AllNodesMetrics = zip_cluster_data(Module, Mode),
Cluster = Module:fetch_cluster_consistented_data(),
maps:merge(AllNodesMetrics, Cluster);
raw_data(Module, ?PROM_DATA_MODE__NODE = Mode) ->
{_Node, LocalNodeMetrics} = Module:fetch_from_local_node(Mode),
Cluster = Module:fetch_cluster_consistented_data(),
maps:merge(LocalNodeMetrics, Cluster).
fetch_data_from_all_nodes(Module, Mode) ->
Nodes = mria:running_nodes(),
_ResL = emqx_prometheus_proto_v2:raw_prom_data(
Nodes, Module, fetch_from_local_node, [Mode]
).
collect_json_data(Data, Func) when is_function(Func, 3) ->
maps:fold(
fun(K, V, Acc) ->
Func(K, V, Acc)
end,
[],
Data
);
collect_json_data(_, _) ->
error(badarg).
aggre_cluster(Module, Mode) ->
do_aggre_cluster(
Module:logic_sum_metrics(),
fetch_data_from_all_nodes(Module, Mode),
Module:aggre_or_zip_init_acc()
).
aggre_cluster(LogicSumKs, ResL, Init) ->
do_aggre_cluster(LogicSumKs, ResL, Init).
do_aggre_cluster(_LogicSumKs, [], AccIn) ->
AccIn;
do_aggre_cluster(LogicSumKs, [{ok, {_NodeName, NodeMetric}} | Rest], AccIn) ->
do_aggre_cluster(
LogicSumKs,
Rest,
maps:fold(
fun(K, V, AccIn0) ->
AccIn0#{K => aggre_metric(LogicSumKs, V, ?MG(K, AccIn0))}
end,
AccIn,
NodeMetric
)
);
do_aggre_cluster(LogicSumKs, [{_, _} | Rest], AccIn) ->
do_aggre_cluster(LogicSumKs, Rest, AccIn).
aggre_metric(LogicSumKs, NodeMetrics, AccIn0) ->
lists:foldl(
fun(K, AccIn) ->
NAccL = do_aggre_metric(
K, LogicSumKs, ?MG(K, NodeMetrics), ?MG(K, AccIn)
),
AccIn#{K => NAccL}
end,
AccIn0,
maps:keys(NodeMetrics)
).
do_aggre_metric(K, LogicSumKs, NodeMetrics, AccL) ->
lists:foldl(
fun(Point = {_Labels, _Metric}, AccIn) ->
sum(K, LogicSumKs, Point, AccIn)
end,
AccL,
NodeMetrics
).
sum(K, LogicSumKs, {Labels, Metric} = Point, MetricAccL) ->
case lists:keytake(Labels, 1, MetricAccL) of
{value, {Labels, MetricAcc}, NMetricAccL} ->
NPoint = {Labels, do_sum(K, LogicSumKs, Metric, MetricAcc)},
[NPoint | NMetricAccL];
false ->
[Point | MetricAccL]
end.
do_sum(K, LogicSumKs, Metric, MetricAcc) ->
case lists:member(K, LogicSumKs) of
true ->
logic_sum(Metric, MetricAcc);
false ->
Metric + MetricAcc
end.
zip_cluster_data(Module, Mode) ->
zip_cluster(
fetch_data_from_all_nodes(Module, Mode),
Module:aggre_or_zip_init_acc()
).
zip_cluster([], AccIn) ->
AccIn;
zip_cluster([{ok, {_NodeName, NodeMetric}} | Rest], AccIn) ->
zip_cluster(
Rest,
maps:fold(
fun(K, V, AccIn0) ->
AccIn0#{
K => do_zip_cluster(V, ?MG(K, AccIn0))
}
end,
AccIn,
NodeMetric
)
);
zip_cluster([{_, _} | Rest], AccIn) ->
zip_cluster(Rest, AccIn).
do_zip_cluster(NodeMetrics, AccIn0) ->
lists:foldl(
fun(K, AccIn) ->
AccMetricL = ?MG(K, AccIn),
NAccL = ?MG(K, NodeMetrics) ++ AccMetricL,
AccIn#{K => NAccL}
end,
AccIn0,
maps:keys(NodeMetrics)
).
point_to_map_fun(Key) ->
fun({Lables, Metric}, AccIn2) ->
LablesKVMap = maps:from_list(Lables),
[maps:merge(LablesKVMap, #{Key => Metric}) | AccIn2]
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
logic_sum(N1, N2) when
(N1 > 0 andalso N2 > 0)
->
1;
logic_sum(_, _) ->
0.
boolean_to_number(true) -> 1;
boolean_to_number(false) -> 0.
status_to_number(?status_connected) -> 1;
status_to_number(?status_connecting) -> 0;
status_to_number(?status_disconnected) -> 0;
status_to_number(?rm_status_stopped) -> 0;
status_to_number(_) -> 0.
metric_names(MetricWithType) when is_list(MetricWithType) ->
[Name || {Name, _Type} <- MetricWithType].

View File

@ -25,6 +25,10 @@
-export([conf/0, is_push_gateway_server_enabled/1]).
-export([to_recommend_type/1]).
-ifdef(TEST).
-export([all_collectors/0]).
-endif.
update(Config) ->
case
emqx_conf:update(
@ -101,7 +105,7 @@ post_config_update(_ConfPath, _Req, _NewConf, _OldConf, _AppEnvs) ->
ok.
update_prometheus(AppEnvs) ->
PrevCollectors = prometheus_registry:collectors(default),
PrevCollectors = all_collectors(),
CurCollectors = proplists:get_value(collectors, proplists:get_value(prometheus, AppEnvs)),
lists:foreach(
fun prometheus_registry:deregister_collector/1,
@ -113,6 +117,15 @@ update_prometheus(AppEnvs) ->
),
application:set_env(AppEnvs).
all_collectors() ->
lists:foldl(
fun(Registry, AccIn) ->
prometheus_registry:collectors(Registry) ++ AccIn
end,
_InitAcc = [],
?PROMETHEUS_ALL_REGISTRYS
).
update_push_gateway(Prometheus) ->
case is_push_gateway_server_enabled(Prometheus) of
true ->

View File

@ -0,0 +1,543 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_prometheus_data_integration).
-export([
deregister_cleanup/1,
collect_mf/2,
collect_metrics/2
]).
-export([collect/1]).
-export([
zip_json_data_integration_metrics/3
]).
%% for bpapi
-behaviour(emqx_prometheus_cluster).
-export([
fetch_from_local_node/1,
fetch_cluster_consistented_data/0,
aggre_or_zip_init_acc/0,
logic_sum_metrics/0
]).
-export([add_collect_family/4]).
-include("emqx_prometheus.hrl").
-include_lib("prometheus/include/prometheus.hrl").
-import(
prometheus_model_helpers,
[
create_mf/5,
gauge_metric/1,
gauge_metrics/1,
counter_metrics/1
]
).
%% Please don't remove this attribute, prometheus uses it to
%% automatically register collectors.
-behaviour(prometheus_collector).
%%--------------------------------------------------------------------
%% Macros
%%--------------------------------------------------------------------
-define(METRIC_NAME_PREFIX, "emqx_data_integration_").
-define(MG(K, MAP), maps:get(K, MAP)).
-define(MG0(K, MAP), maps:get(K, MAP, 0)).
%%--------------------------------------------------------------------
%% Callback for emqx_prometheus_cluster
%%--------------------------------------------------------------------
-define(ROOT_KEY_ACTIONS, actions).
fetch_from_local_node(Mode) ->
Rules = emqx_rule_engine:get_rules(),
Bridges = emqx_bridge_v2:list(?ROOT_KEY_ACTIONS),
Connectors = emqx_connector:list(),
{node(self()), #{
rule_metric_data => rule_metric_data(Mode, Rules),
action_metric_data => action_metric_data(Mode, Bridges),
connector_metric_data => connector_metric_data(Mode, Connectors)
}}.
fetch_cluster_consistented_data() ->
Rules = emqx_rule_engine:get_rules(),
Connectors = emqx_connector:list(),
(maybe_collect_schema_registry())#{
rules_ov_data => rules_ov_data(Rules),
connectors_ov_data => connectors_ov_data(Connectors)
}.
aggre_or_zip_init_acc() ->
#{
rule_metric_data => maps:from_keys(rule_metric(names), []),
action_metric_data => maps:from_keys(action_metric(names), []),
connector_metric_data => maps:from_keys(connectr_metric(names), [])
}.
logic_sum_metrics() ->
[
emqx_rule_enable,
emqx_connector_enable,
emqx_connector_status
].
%%--------------------------------------------------------------------
%% Collector API
%%--------------------------------------------------------------------
%% @private
deregister_cleanup(_) -> ok.
%% @private
-spec collect_mf(_Registry, Callback) -> ok when
_Registry :: prometheus_registry:registry(),
Callback :: prometheus_collector:collect_mf_callback().
collect_mf(?PROMETHEUS_DATA_INTEGRATION_REGISTRY, Callback) ->
RawData = emqx_prometheus_cluster:raw_data(?MODULE, ?GET_PROM_DATA_MODE()),
%% Data Integration Overview
ok = add_collect_family(Callback, rules_ov_metric_meta(), ?MG(rules_ov_data, RawData)),
ok = add_collect_family(
Callback, connectors_ov_metric_meta(), ?MG(connectors_ov_data, RawData)
),
ok = maybe_collect_family_schema_registry(Callback),
%% Rule Metric
RuleMetricDs = ?MG(rule_metric_data, RawData),
ok = add_collect_family(Callback, rule_metric_meta(), RuleMetricDs),
%% Action Metric
ActionMetricDs = ?MG(action_metric_data, RawData),
ok = add_collect_family(Callback, action_metric_meta(), ActionMetricDs),
%% Connector Metric
ConnectorMetricDs = ?MG(connector_metric_data, RawData),
ok = add_collect_family(Callback, connector_metric_meta(), ConnectorMetricDs),
ok;
collect_mf(_, _) ->
ok.
%% @private
collect(<<"json">>) ->
RawData = emqx_prometheus_cluster:raw_data(?MODULE, ?GET_PROM_DATA_MODE()),
Rules = emqx_rule_engine:get_rules(),
Bridges = emqx_bridge:list(),
#{
data_integration_overview => collect_data_integration_overview(Rules, Bridges),
rules => collect_json_data(?MG(rule_metric_data, RawData)),
actions => collect_json_data(?MG(action_metric_data, RawData)),
connectors => collect_json_data(?MG(connector_metric_data, RawData))
};
collect(<<"prometheus">>) ->
prometheus_text_format:format(?PROMETHEUS_DATA_INTEGRATION_REGISTRY).
%%====================
%% API Helpers
add_collect_family(Callback, MetricWithType, Data) ->
_ = [add_collect_family(Name, Data, Callback, Type) || {Name, Type} <- MetricWithType],
ok.
add_collect_family(Name, Data, Callback, Type) ->
%% TODO: help document from Name
Callback(create_mf(Name, _Help = <<"">>, Type, ?MODULE, Data)).
collect_metrics(Name, Metrics) ->
collect_di(Name, Metrics).
%%--------------------------------------------------------------------
%% Collector
%%--------------------------------------------------------------------
%%========================================
%% Data Integration Overview
%%========================================
%%====================
%% All Rules
%% Rules
collect_di(K = emqx_rules_count, Data) -> gauge_metric(?MG(K, Data));
%%====================
%% Schema Registry
collect_di(K = emqx_schema_registrys_count, Data) -> gauge_metric(?MG(K, Data));
%%====================
%% Connectors
collect_di(K = emqx_connectors_count, Data) -> gauge_metric(?MG(K, Data));
%%========================================
%% Data Integration Metric for: Rule && Action && Connector
%%========================================
%%====================
%% Rule Metric
collect_di(K = emqx_rule_enable, Data) -> gauge_metrics(?MG(K, Data));
collect_di(K = emqx_rule_matched, Data) -> counter_metrics(?MG(K, Data));
collect_di(K = emqx_rule_failed, Data) -> counter_metrics(?MG(K, Data));
collect_di(K = emqx_rule_passed, Data) -> counter_metrics(?MG(K, Data));
collect_di(K = emqx_rule_failed_exception, Data) -> counter_metrics(?MG(K, Data));
collect_di(K = emqx_rule_failed_no_result, Data) -> counter_metrics(?MG(K, Data));
collect_di(K = emqx_rule_actions_total, Data) -> counter_metrics(?MG(K, Data));
collect_di(K = emqx_rule_actions_success, Data) -> counter_metrics(?MG(K, Data));
collect_di(K = emqx_rule_actions_failed, Data) -> counter_metrics(?MG(K, Data));
collect_di(K = emqx_rule_actions_failed_out_of_service, Data) -> counter_metrics(?MG(K, Data));
collect_di(K = emqx_rule_actions_failed_unknown, Data) -> counter_metrics(?MG(K, Data));
%%====================
%% Action Metric
collect_di(K = emqx_action_matched, Data) -> counter_metrics(?MG(K, Data));
collect_di(K = emqx_action_dropped, Data) -> counter_metrics(?MG(K, Data));
collect_di(K = emqx_action_success, Data) -> counter_metrics(?MG(K, Data));
collect_di(K = emqx_action_failed, Data) -> counter_metrics(?MG(K, Data));
%% inflight type: gauge
collect_di(K = emqx_action_inflight, Data) -> gauge_metrics(?MG(K, Data));
collect_di(K = emqx_action_received, Data) -> counter_metrics(?MG(K, Data));
collect_di(K = emqx_action_late_reply, Data) -> counter_metrics(?MG(K, Data));
collect_di(K = emqx_action_retried, Data) -> counter_metrics(?MG(K, Data));
collect_di(K = emqx_action_retried_success, Data) -> counter_metrics(?MG(K, Data));
collect_di(K = emqx_action_retried_failed, Data) -> counter_metrics(?MG(K, Data));
collect_di(K = emqx_action_dropped_resource_stopped, Data) -> counter_metrics(?MG(K, Data));
collect_di(K = emqx_action_dropped_resource_not_found, Data) -> counter_metrics(?MG(K, Data));
collect_di(K = emqx_action_dropped_queue_full, Data) -> counter_metrics(?MG(K, Data));
collect_di(K = emqx_action_dropped_other, Data) -> counter_metrics(?MG(K, Data));
collect_di(K = emqx_action_dropped_expired, Data) -> counter_metrics(?MG(K, Data));
%% queuing type: gauge
collect_di(K = emqx_action_queuing, Data) -> gauge_metrics(?MG(K, Data));
%%====================
%% Connector Metric
collect_di(K = emqx_connector_enable, Data) -> gauge_metrics(?MG(K, Data));
collect_di(K = emqx_connector_status, Data) -> gauge_metrics(?MG(K, Data)).
%%--------------------------------------------------------------------
%% Internal functions
%%--------------------------------------------------------------------
%%========================================
%% Data Integration Overview
%%========================================
%%====================
%% All Rules
rules_ov_metric_meta() ->
[
{emqx_rules_count, gauge}
].
rules_ov_metric(names) ->
emqx_prometheus_cluster:metric_names(rules_ov_metric_meta()).
-define(RULE_TAB, emqx_rule_engine).
rules_ov_data(_Rules) ->
#{
emqx_rules_count => ets:info(?RULE_TAB, size)
}.
%%====================
%% Schema Registry
-if(?EMQX_RELEASE_EDITION == ee).
maybe_collect_family_schema_registry(Callback) ->
ok = add_collect_family(Callback, schema_registry_metric_meta(), schema_registry_data()),
ok.
schema_registry_metric_meta() ->
[
{emqx_schema_registrys_count, gauge}
].
schema_registry_data() ->
#{
emqx_schema_registrys_count => erlang:map_size(emqx_schema_registry:list_schemas())
}.
maybe_collect_schema_registry() ->
schema_registry_data().
-else.
maybe_collect_family_schema_registry(_) ->
ok.
maybe_collect_schema_registry() ->
#{}.
-endif.
%%====================
%% Connectors
connectors_ov_metric_meta() ->
[
{emqx_connectors_count, gauge}
].
connectors_ov_metric(names) ->
emqx_prometheus_cluster:metric_names(connectors_ov_metric_meta()).
connectors_ov_data(Connectors) ->
#{
%% Both Bridge V1 and V2
emqx_connectors_count => erlang:length(Connectors)
}.
%%========================================
%% Data Integration Metric for: Rule && Action && Connector
%%========================================
%%====================
%% Rule Metric
%% With rule_id as label key: `rule_id`
rule_metric_meta() ->
[
{emqx_rule_enable, gauge},
{emqx_rule_matched, counter},
{emqx_rule_failed, counter},
{emqx_rule_passed, counter},
{emqx_rule_failed_exception, counter},
{emqx_rule_failed_no_result, counter},
{emqx_rule_actions_total, counter},
{emqx_rule_actions_success, counter},
{emqx_rule_actions_failed, counter},
{emqx_rule_actions_failed_out_of_service, counter},
{emqx_rule_actions_failed_unknown, counter}
].
rule_metric(names) ->
emqx_prometheus_cluster:metric_names(rule_metric_meta()).
rule_metric_data(Mode, Rules) ->
lists:foldl(
fun(#{id := Id} = Rule, AccIn) ->
merge_acc_with_rules(Mode, Id, get_metric(Rule), AccIn)
end,
maps:from_keys(rule_metric(names), []),
Rules
).
merge_acc_with_rules(Mode, Id, RuleMetrics, PointsAcc) ->
maps:fold(
fun(K, V, AccIn) ->
AccIn#{K => [rule_point(Mode, Id, V) | ?MG(K, AccIn)]}
end,
PointsAcc,
RuleMetrics
).
rule_point(Mode, Id, V) ->
{with_node_label(Mode, [{id, Id}]), V}.
get_metric(#{id := Id, enable := Bool} = _Rule) ->
case emqx_metrics_worker:get_metrics(rule_metrics, Id) of
#{counters := Counters} ->
#{
emqx_rule_enable => emqx_prometheus_cluster:boolean_to_number(Bool),
emqx_rule_matched => ?MG(matched, Counters),
emqx_rule_failed => ?MG(failed, Counters),
emqx_rule_passed => ?MG(passed, Counters),
emqx_rule_failed_exception => ?MG('failed.exception', Counters),
emqx_rule_failed_no_result => ?MG('failed.no_result', Counters),
emqx_rule_actions_total => ?MG('actions.total', Counters),
emqx_rule_actions_success => ?MG('actions.success', Counters),
emqx_rule_actions_failed => ?MG('actions.failed', Counters),
emqx_rule_actions_failed_out_of_service => ?MG(
'actions.failed.out_of_service', Counters
),
emqx_rule_actions_failed_unknown => ?MG('actions.failed.unknown', Counters)
}
end.
%%====================
%% Action Metric
%% With action_id: `{type}:{name}` as label key: `action_id`
action_metric_meta() ->
[
{emqx_action_matched, counter},
{emqx_action_dropped, counter},
{emqx_action_success, counter},
{emqx_action_failed, counter},
{emqx_action_inflight, gauge},
{emqx_action_received, counter},
{emqx_action_late_reply, counter},
{emqx_action_retried, counter},
{emqx_action_retried_success, counter},
{emqx_action_retried_failed, counter},
{emqx_action_dropped_resource_stopped, counter},
{emqx_action_dropped_resource_not_found, counter},
{emqx_action_dropped_queue_full, counter},
{emqx_action_dropped_other, counter},
{emqx_action_dropped_expired, counter},
{emqx_action_queuing, gauge}
].
action_metric(names) ->
emqx_prometheus_cluster:metric_names(action_metric_meta()).
action_metric_data(Mode, Bridges) ->
lists:foldl(
fun(#{type := Type, name := Name} = _Bridge, AccIn) ->
Id = emqx_bridge_resource:bridge_id(Type, Name),
merge_acc_with_bridges(Mode, Id, get_bridge_metric(Type, Name), AccIn)
end,
maps:from_keys(action_metric(names), []),
Bridges
).
merge_acc_with_bridges(Mode, Id, BridgeMetrics, PointsAcc) ->
maps:fold(
fun(K, V, AccIn) ->
AccIn#{K => [action_point(Mode, Id, V) | ?MG(K, AccIn)]}
end,
PointsAcc,
BridgeMetrics
).
action_point(Mode, Id, V) ->
{with_node_label(Mode, [{id, Id}]), V}.
get_bridge_metric(Type, Name) ->
#{counters := Counters, gauges := Gauges} = emqx_bridge_v2:get_metrics(Type, Name),
#{
emqx_action_matched => ?MG0(matched, Counters),
emqx_action_dropped => ?MG0(dropped, Counters),
emqx_action_success => ?MG0(success, Counters),
emqx_action_failed => ?MG0(failed, Counters),
emqx_action_inflight => ?MG0(inflight, Gauges),
emqx_action_received => ?MG0(received, Counters),
emqx_action_late_reply => ?MG0(late_reply, Counters),
emqx_action_retried => ?MG0(retried, Counters),
emqx_action_retried_success => ?MG0('retried.success', Counters),
emqx_action_retried_failed => ?MG0('retried.failed', Counters),
emqx_action_dropped_resource_stopped => ?MG0('dropped.resource_stopped', Counters),
emqx_action_dropped_resource_not_found => ?MG0('dropped.resource_not_found', Counters),
emqx_action_dropped_queue_full => ?MG0('dropped.queue_full', Counters),
emqx_action_dropped_other => ?MG0('dropped.other', Counters),
emqx_action_dropped_expired => ?MG0('dropped.expired', Counters),
emqx_action_queuing => ?MG0(queuing, Gauges)
}.
%%====================
%% Connector Metric
%% With connector_id: `{type}:{name}` as label key: `connector_id`
connector_metric_meta() ->
[
{emqx_connector_enable, gauge},
{emqx_connector_status, gauge}
].
connectr_metric(names) ->
emqx_prometheus_cluster:metric_names(connector_metric_meta()).
connector_metric_data(Mode, Connectors) ->
lists:foldl(
fun(#{type := Type, name := Name} = Connector, AccIn) ->
Id = emqx_connector_resource:connector_id(Type, Name),
merge_acc_with_connectors(Mode, Id, get_connector_status(Connector), AccIn)
end,
maps:from_keys(connectr_metric(names), []),
Connectors
).
merge_acc_with_connectors(Mode, Id, ConnectorMetrics, PointsAcc) ->
maps:fold(
fun(K, V, AccIn) ->
AccIn#{K => [connector_point(Mode, Id, V) | ?MG(K, AccIn)]}
end,
PointsAcc,
ConnectorMetrics
).
connector_point(Mode, Id, V) ->
{with_node_label(Mode, [{id, Id}]), V}.
get_connector_status(#{resource_data := ResourceData} = _Connector) ->
Enabled = emqx_utils_maps:deep_get([config, enable], ResourceData),
Status = ?MG(status, ResourceData),
#{
emqx_connector_enable => emqx_prometheus_cluster:boolean_to_number(Enabled),
emqx_connector_status => emqx_prometheus_cluster:status_to_number(Status)
}.
%%--------------------------------------------------------------------
%% Collect functions
%%--------------------------------------------------------------------
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% merge / zip formatting funcs for type `application/json`
collect_data_integration_overview(Rules, Bridges) ->
RulesD = rules_ov_data(Rules),
ConnectorsD = connectors_ov_data(Bridges),
M1 = lists:foldl(
fun(K, AccIn) -> AccIn#{K => ?MG(K, RulesD)} end,
#{},
rules_ov_metric(names)
),
M2 = lists:foldl(
fun(K, AccIn) -> AccIn#{K => ?MG(K, ConnectorsD)} end,
#{},
connectors_ov_metric(names)
),
M3 = maybe_collect_schema_registry(),
lists:foldl(fun(M, AccIn) -> maps:merge(M, AccIn) end, #{}, [M1, M2, M3]).
collect_json_data(Data) ->
emqx_prometheus_cluster:collect_json_data(Data, fun zip_json_data_integration_metrics/3).
%% for initialized empty AccIn
%% The following fields will be put into Result
%% For Rules:
%% `id` => [RULE_ID]
%% For Actions
%% `id` => [ACTION_ID]
%% FOR Connectors
%% `id` => [CONNECTOR_ID] %% CONNECTOR_ID = BRIDGE_ID
%% formatted with {type}:{name}
zip_json_data_integration_metrics(Key, Points, [] = _AccIn) ->
lists:foldl(
fun({Lables, Metric}, AccIn2) ->
LablesKVMap = maps:from_list(Lables),
Point = LablesKVMap#{Key => Metric},
[Point | AccIn2]
end,
[],
Points
);
zip_json_data_integration_metrics(Key, Points, AllResultedAcc) ->
ThisKeyResult = lists:foldl(emqx_prometheus_cluster:point_to_map_fun(Key), [], Points),
lists:zipwith(fun maps:merge/2, AllResultedAcc, ThisKeyResult).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Helper funcs
with_node_label(?PROM_DATA_MODE__NODE, Labels) ->
Labels;
with_node_label(?PROM_DATA_MODE__ALL_NODES_AGGREGATED, Labels) ->
Labels;
with_node_label(?PROM_DATA_MODE__ALL_NODES_UNAGGREGATED, Labels) ->
[{node, node(self())} | Labels].

View File

@ -67,6 +67,9 @@ init([]) ->
Children =
case emqx_prometheus_config:is_push_gateway_server_enabled(Conf) of
false -> [];
%% TODO: add push gateway for endpoints
%% `/prometheus/auth`
%% `/prometheus/data_integration`
true -> [?CHILD(emqx_prometheus, Conf)]
end,
{ok, {{one_for_one, 10, 3600}, Children}}.

View File

@ -0,0 +1,53 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_prometheus_proto_v2).
-behaviour(emqx_bpapi).
-export([
introduced_in/0,
start/1,
stop/1,
raw_prom_data/4
]).
-include_lib("emqx/include/bpapi.hrl").
introduced_in() ->
"5.5.0".
-spec start([node()]) -> emqx_rpc:multicall_result().
start(Nodes) ->
rpc:multicall(Nodes, emqx_prometheus, do_start, [], 5000).
-spec stop([node()]) -> emqx_rpc:multicall_result().
stop(Nodes) ->
rpc:multicall(Nodes, emqx_prometheus, do_stop, [], 5000).
-type key() :: atom().
-type arg() :: list(term()).
-spec raw_prom_data([node()], key(), key(), arg()) -> emqx_rpc:erpc_multicall(term()).
raw_prom_data(Nodes, M, F, A) ->
erpc:multicall(
Nodes,
emqx_prometheus_api,
lookup_from_local_nodes,
[M, F, A],
5000
).

View File

@ -103,13 +103,16 @@ init_group() ->
ok = mria_rlog:wait_for_shards([?CLUSTER_RPC_SHARD], infinity),
meck:new(emqx_alarm, [non_strict, passthrough, no_link]),
meck:expect(emqx_alarm, activate, 3, ok),
meck:expect(emqx_alarm, deactivate, 3, ok).
meck:expect(emqx_alarm, deactivate, 3, ok),
meck:new(emqx_license_checker, [non_strict, passthrough, no_link]),
meck:expect(emqx_license_checker, expiry_epoch, fun() -> 1859673600 end).
end_group() ->
ekka:stop(),
mria:stop(),
mria_mnesia:delete_schema(),
meck:unload(emqx_alarm),
meck:unload(emqx_license_checker),
emqx_common_test_helpers:stop_apps([emqx_prometheus]).
end_per_group(_Group, Config) ->

View File

@ -128,8 +128,8 @@ t_legacy_prometheus_api(_) ->
Conf2 = emqx_utils_json:decode(Response2, [return_maps]),
?assertEqual(NewConf, Conf2),
EnvCollectors = application:get_env(prometheus, collectors, []),
PromCollectors = prometheus_registry:collectors(default),
EnvCollectors = env_collectors(),
PromCollectors = all_collectors(),
?assertEqual(lists:sort(EnvCollectors), lists:sort(PromCollectors)),
?assert(lists:member(prometheus_vm_statistics_collector, EnvCollectors), EnvCollectors),
@ -221,8 +221,8 @@ t_prometheus_api(_) ->
Conf2 = emqx_utils_json:decode(Response2, [return_maps]),
?assertMatch(NewConf, Conf2),
EnvCollectors = application:get_env(prometheus, collectors, []),
PromCollectors = prometheus_registry:collectors(default),
EnvCollectors = env_collectors(),
PromCollectors = all_collectors(),
?assertEqual(lists:sort(EnvCollectors), lists:sort(PromCollectors)),
?assert(lists:member(prometheus_vm_statistics_collector, EnvCollectors), EnvCollectors),
@ -308,3 +308,16 @@ request_stats(JsonAuth, Auth) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Internal Functions
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
env_collectors() ->
do_env_collectors(application:get_env(prometheus, collectors, []), []).
do_env_collectors([], Acc) ->
lists:reverse(Acc);
do_env_collectors([{_Registry, Collector} | Rest], Acc) when is_atom(Collector) ->
do_env_collectors(Rest, [Collector | Acc]);
do_env_collectors([Collector | Rest], Acc) when is_atom(Collector) ->
do_env_collectors(Rest, [Collector | Acc]).
all_collectors() ->
emqx_prometheus_config:all_collectors().

View File

@ -1,13 +1,12 @@
{application, emqx_redis, [
{description, "EMQX Redis Database Connector"},
{vsn, "0.1.4"},
{vsn, "0.1.5"},
{registered, []},
{applications, [
kernel,
stdlib,
eredis,
eredis_cluster,
emqx_connector,
emqx_resource
]},
{env, []},

View File

@ -50,7 +50,8 @@
]).
-export([
set_resource_status_connecting/1
set_resource_status_connecting/1,
make_test_id/0
]).
% Server

View File

@ -44,7 +44,11 @@ api_spec() ->
emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}).
paths() ->
[?PREFIX, ?PREFIX ++ "/messages", ?PREFIX ++ "/message/:topic"].
[
?PREFIX,
?PREFIX ++ "/messages",
?PREFIX ++ "/message/:topic"
].
schema(?PREFIX) ->
#{

View File

@ -51,6 +51,8 @@
gen_id/0,
gen_id/1,
explain_posix/1,
pforeach/2,
pforeach/3,
pmap/2,
pmap/3,
readable_error_msg/1,
@ -423,6 +425,15 @@ explain_posix(estale) -> "Stale remote file handle";
explain_posix(exdev) -> "Cross-domain link";
explain_posix(NotPosix) -> NotPosix.
-spec pforeach(fun((A) -> term()), list(A)) -> ok.
pforeach(Fun, List) when is_function(Fun, 1), is_list(List) ->
pforeach(Fun, List, ?DEFAULT_PMAP_TIMEOUT).
-spec pforeach(fun((A) -> term()), list(A), timeout()) -> ok.
pforeach(Fun, List, Timeout) ->
_ = pmap(Fun, List, Timeout),
ok.
%% @doc Like lists:map/2, only the callback function is evaluated
%% concurrently.
-spec pmap(fun((A) -> B), list(A)) -> list(B).
@ -431,7 +442,9 @@ pmap(Fun, List) when is_function(Fun, 1), is_list(List) ->
-spec pmap(fun((A) -> B), list(A), timeout()) -> list(B).
pmap(Fun, List, Timeout) when
is_function(Fun, 1), is_list(List), is_integer(Timeout), Timeout >= 0
is_function(Fun, 1),
is_list(List),
(is_integer(Timeout) andalso Timeout >= 0 orelse Timeout =:= infinity)
->
nolink_apply(fun() -> do_parallel_map(Fun, List) end, Timeout).

View File

@ -0,0 +1,15 @@
Expose more metrics to improve observability:
Montior API:
- Add `retained_msg_count` field to `/api/v5/monitor_current`.
- Add `retained_msg_count` and `node_uptime` fields to `/api/v5/monitor_current/nodes/{node}`.
Prometheus API:
- Add `emqx_cert_expiry_at` to `/api/v5/prometheus/stats` to display TLS listener certificate expiration time.
- Add `/api/v5/prometheus/auth` endpoint to provide metrics such as execution count and running status for all authenticatiors and authorizators.
- Add `/api/v5/prometheus/data_integration` endpoint to provide metrics such as execution count and status for all rules, actions, and connectors.
Limitations:
Prometheus push gateway only supports content in `/api/v5/prometheus/stats?mode=node` for now.
For more API details and metric type information. Please see also in swagger api docs.

View File

@ -0,0 +1 @@
Fixed an issue that could lead to error messages when restarting a node configured with some types of data bridges. Said bridges could also start in a failed state, requiring manual restart.

View File

@ -0,0 +1,3 @@
Apply post config bridge changes in parallel.
This can greatly improve the performance when multiple bridges are being changed,
e.g. when a backup file is being imported.

View File

@ -0,0 +1,17 @@
# Expose more metrics to improve observability:
Montior API:
- Add `retained_msg_count` field to `/api/v5/monitor_current`.
- Add `license_quota` field to `/api/v5/monitor_current`
- Add `retained_msg_count` and `node_uptime` fields to `/api/v5/monitor_current/nodes/{node}`.
- Add `retained_msg_count`, `license_quota` and `node_uptime` fields to `/api/v5/monitor_current/nodes/{node}`.
Prometheus API:
- Add `emqx_cert_expiry_at` and `emqx_license_expiry_at` to `/api/v5/prometheus/stats` to display TLS listener certificate expiration time and license expiration time.
- Add `/api/v5/prometheus/auth` endpoint to provide metrics such as execution count and running status for all authenticatiors and authorizators.
- Add `/api/v5/prometheus/data_integration` endpoint to provide metrics such as execution count and status for all rules, actions, and connectors.
Limitations:
Prometheus push gateway only supports the content in `/api/v5/prometheus/stats?mode=node`
For more API details and metric type information. Please see also in swagger api docs.

View File

@ -55,7 +55,7 @@ defmodule EMQXUmbrella.MixProject do
{:cowboy, github: "emqx/cowboy", tag: "2.9.2", override: true},
{:esockd, github: "emqx/esockd", tag: "5.11.1", override: true},
{:rocksdb, github: "emqx/erlang-rocksdb", tag: "1.8.0-emqx-2", override: true},
{:ekka, github: "emqx/ekka", tag: "0.18.1", override: true},
{:ekka, github: "emqx/ekka", tag: "0.18.3", override: true},
{:gen_rpc, github: "emqx/gen_rpc", tag: "3.3.1", override: true},
{:grpc, github: "emqx/grpc-erl", tag: "0.6.12", override: true},
{:minirest, github: "emqx/minirest", tag: "1.3.15", override: true},

View File

@ -83,7 +83,7 @@
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}},
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.11.1"}}},
{rocksdb, {git, "https://github.com/emqx/erlang-rocksdb", {tag, "1.8.0-emqx-2"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.18.1"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.18.3"}}},
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.3.1"}}},
{grpc, {git, "https://github.com/emqx/grpc-erl", {tag, "0.6.12"}}},
{minirest, {git, "https://github.com/emqx/minirest", {tag, "1.3.15"}}},

View File

@ -35,11 +35,6 @@ config_auth_basic_password.desc:
config_auth_basic_password.label:
"""HTTP Basic Auth Password"""
config_base_url.desc:
"""The base URL of the external ElasticSearch service's REST interface."""
config_base_url.label:
"""ElasticSearch REST Service Base URL"""
config_target.desc:
"""Name of the data stream, index, or index alias to perform bulk actions on"""

View File

@ -1,5 +1,13 @@
emqx_bridge_es_connector {
server.desc:
"""The IPv4 or IPv6 address or the hostname to connect to.
A host entry has the following form: `Host[:Port]`.
The Elasticsearch default port 9200 is used if `[:Port]` is not specified."""
server.label:
"""Server Host"""
config_authentication.desc:
"""Authentication configuration"""

View File

@ -15,4 +15,14 @@ get_prom_data.desc:
get_prom_data.label:
"""Prometheus Metrics"""
get_prom_auth_data.desc:
"""Get Prometheus Metrics for AuthN, AuthZ and Banned"""
get_prom_auth_data.label:
"""Prometheus Metrics for Auth"""
get_prom_data_integration_data.desc:
"""Get Prometheus Metrics for Data Integration"""
get_prom_data_integration_data.label:
"""Prometheus Metrics for Data Integration"""
}