Merge remote-tracking branch 'origin/release-54' into 1128-sync-release-54
This commit is contained in:
commit
1b1cea24f3
|
@ -39,6 +39,7 @@
|
|||
{emqx_mgmt_api_plugins,2}.
|
||||
{emqx_mgmt_cluster,1}.
|
||||
{emqx_mgmt_cluster,2}.
|
||||
{emqx_mgmt_data_backup,1}.
|
||||
{emqx_mgmt_trace,1}.
|
||||
{emqx_mgmt_trace,2}.
|
||||
{emqx_node_rebalance,1}.
|
||||
|
|
|
@ -52,6 +52,9 @@
|
|||
lookup_routes/1
|
||||
]).
|
||||
|
||||
%% Topics API
|
||||
-export([select/3]).
|
||||
|
||||
-export([print_routes/1]).
|
||||
|
||||
-export([
|
||||
|
@ -59,7 +62,10 @@
|
|||
foldr_routes/2
|
||||
]).
|
||||
|
||||
-export([topics/0]).
|
||||
-export([
|
||||
topics/0,
|
||||
stats/1
|
||||
]).
|
||||
|
||||
%% Exported for tests
|
||||
-export([has_route/2]).
|
||||
|
@ -219,6 +225,19 @@ mria_delete_route(v2, Topic, Dest) ->
|
|||
mria_delete_route(v1, Topic, Dest) ->
|
||||
mria_delete_route_v1(Topic, Dest).
|
||||
|
||||
-spec select(Spec, _Limit :: pos_integer(), Continuation) ->
|
||||
{[emqx_types:route()], Continuation} | '$end_of_table'
|
||||
when
|
||||
Spec :: {_TopicPat, _DestPat},
|
||||
Continuation :: term() | '$end_of_table'.
|
||||
select(MatchSpec, Limit, Cont) ->
|
||||
select(get_schema_vsn(), MatchSpec, Limit, Cont).
|
||||
|
||||
select(v2, MatchSpec, Limit, Cont) ->
|
||||
select_v2(MatchSpec, Limit, Cont);
|
||||
select(v1, MatchSpec, Limit, Cont) ->
|
||||
select_v1(MatchSpec, Limit, Cont).
|
||||
|
||||
-spec topics() -> list(emqx_types:topic()).
|
||||
topics() ->
|
||||
topics(get_schema_vsn()).
|
||||
|
@ -228,6 +247,15 @@ topics(v2) ->
|
|||
topics(v1) ->
|
||||
list_topics_v1().
|
||||
|
||||
-spec stats(n_routes) -> non_neg_integer().
|
||||
stats(Item) ->
|
||||
stats(get_schema_vsn(), Item).
|
||||
|
||||
stats(v2, Item) ->
|
||||
get_stats_v2(Item);
|
||||
stats(v1, Item) ->
|
||||
get_stats_v1(Item).
|
||||
|
||||
%% @doc Print routes to a topic
|
||||
-spec print_routes(emqx_types:topic()) -> ok.
|
||||
print_routes(Topic) ->
|
||||
|
@ -345,9 +373,17 @@ cleanup_routes_v1(Node) ->
|
|||
]
|
||||
end).
|
||||
|
||||
select_v1({MTopic, MDest}, Limit, undefined) ->
|
||||
ets:match_object(?ROUTE_TAB, #route{topic = MTopic, dest = MDest}, Limit);
|
||||
select_v1(_Spec, _Limit, Cont) ->
|
||||
ets:select(Cont).
|
||||
|
||||
list_topics_v1() ->
|
||||
list_route_tab_topics().
|
||||
|
||||
get_stats_v1(n_routes) ->
|
||||
emqx_maybe:define(ets:info(?ROUTE_TAB, size), 0).
|
||||
|
||||
list_route_tab_topics() ->
|
||||
mnesia:dirty_all_keys(?ROUTE_TAB).
|
||||
|
||||
|
@ -436,11 +472,52 @@ get_dest_node({_, Node}) ->
|
|||
get_dest_node(Node) ->
|
||||
Node.
|
||||
|
||||
select_v2(Spec, Limit, undefined) ->
|
||||
Stream = mk_route_stream(Spec),
|
||||
select_next(Limit, Stream);
|
||||
select_v2(_Spec, Limit, Stream) ->
|
||||
select_next(Limit, Stream).
|
||||
|
||||
select_next(N, Stream) ->
|
||||
case emqx_utils_stream:consume(N, Stream) of
|
||||
{Routes, SRest} ->
|
||||
{Routes, SRest};
|
||||
Routes ->
|
||||
{Routes, '$end_of_table'}
|
||||
end.
|
||||
|
||||
mk_route_stream(Spec) ->
|
||||
emqx_utils_stream:chain(
|
||||
mk_route_stream(route, Spec),
|
||||
mk_route_stream(filter, Spec)
|
||||
).
|
||||
|
||||
mk_route_stream(route, Spec) ->
|
||||
emqx_utils_stream:ets(fun(Cont) -> select_v1(Spec, 1, Cont) end);
|
||||
mk_route_stream(filter, {MTopic, MDest}) ->
|
||||
emqx_utils_stream:map(
|
||||
fun routeidx_to_route/1,
|
||||
emqx_utils_stream:ets(
|
||||
fun
|
||||
(undefined) ->
|
||||
MatchSpec = #routeidx{entry = emqx_trie_search:make_pat(MTopic, MDest)},
|
||||
ets:match_object(?ROUTE_TAB_FILTERS, MatchSpec, 1);
|
||||
(Cont) ->
|
||||
ets:match_object(Cont)
|
||||
end
|
||||
)
|
||||
).
|
||||
|
||||
list_topics_v2() ->
|
||||
Pat = #routeidx{entry = '$1'},
|
||||
Filters = [emqx_topic_index:get_topic(K) || [K] <- ets:match(?ROUTE_TAB_FILTERS, Pat)],
|
||||
list_route_tab_topics() ++ Filters.
|
||||
|
||||
get_stats_v2(n_routes) ->
|
||||
NTopics = emqx_maybe:define(ets:info(?ROUTE_TAB, size), 0),
|
||||
NWildcards = emqx_maybe:define(ets:info(?ROUTE_TAB_FILTERS, size), 0),
|
||||
NTopics + NWildcards.
|
||||
|
||||
fold_routes_v2(FunName, FoldFun, AccIn) ->
|
||||
FilterFoldFun = mk_filtertab_fold_fun(FoldFun),
|
||||
Acc = ets:FunName(FoldFun, AccIn, ?ROUTE_TAB),
|
||||
|
@ -449,6 +526,9 @@ fold_routes_v2(FunName, FoldFun, AccIn) ->
|
|||
mk_filtertab_fold_fun(FoldFun) ->
|
||||
fun(#routeidx{entry = K}, Acc) -> FoldFun(match_to_route(K), Acc) end.
|
||||
|
||||
routeidx_to_route(#routeidx{entry = M}) ->
|
||||
match_to_route(M).
|
||||
|
||||
match_to_route(M) ->
|
||||
#route{topic = emqx_topic_index:get_topic(M), dest = emqx_topic_index:get_id(M)}.
|
||||
|
||||
|
|
|
@ -190,12 +190,7 @@ code_change(_OldVsn, State, _Extra) ->
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
stats_fun() ->
|
||||
case ets:info(?ROUTE_TAB, size) of
|
||||
undefined ->
|
||||
ok;
|
||||
Size ->
|
||||
emqx_stats:setstat('topics.count', 'topics.max', Size)
|
||||
end.
|
||||
emqx_stats:setstat('topics.count', 'topics.max', emqx_router:stats(n_routes)).
|
||||
|
||||
cleanup_routes(Node) ->
|
||||
emqx_router:cleanup_routes(Node).
|
||||
|
|
|
@ -1395,7 +1395,7 @@ fields("broker_routing") ->
|
|||
sc(
|
||||
hoconsc:enum([v1, v2]),
|
||||
#{
|
||||
default => v1,
|
||||
default => v2,
|
||||
'readOnly' => true,
|
||||
desc => ?DESC(broker_routing_storage_schema)
|
||||
}
|
||||
|
|
|
@ -98,7 +98,7 @@
|
|||
|
||||
-module(emqx_trie_search).
|
||||
|
||||
-export([make_key/2, filter/1]).
|
||||
-export([make_key/2, make_pat/2, filter/1]).
|
||||
-export([match/2, matches/3, get_id/1, get_topic/1]).
|
||||
-export_type([key/1, word/0, words/0, nextf/0, opts/0]).
|
||||
|
||||
|
@ -127,6 +127,12 @@ make_key(Topic, ID) when is_binary(Topic) ->
|
|||
make_key(Words, ID) when is_list(Words) ->
|
||||
{Words, {ID}}.
|
||||
|
||||
-spec make_pat(emqx_types:topic() | words() | '_', _ID | '_') -> _Pat.
|
||||
make_pat(Pattern = '_', ID) ->
|
||||
{Pattern, {ID}};
|
||||
make_pat(Topic, ID) ->
|
||||
make_key(Topic, ID).
|
||||
|
||||
%% @doc Parse a topic filter into a list of words. Returns `false` if it's not a filter.
|
||||
-spec filter(emqx_types:topic()) -> words() | false.
|
||||
filter(Topic) ->
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
-include_lib("hocon/include/hoconsc.hrl").
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
-include_lib("emqx_utils/include/emqx_utils_api.hrl").
|
||||
-include_lib("emqx_bridge/include/emqx_bridge.hrl").
|
||||
|
||||
-import(hoconsc, [mk/2, array/1, enum/1]).
|
||||
-import(emqx_utils, [redact/1]).
|
||||
|
@ -37,6 +38,8 @@
|
|||
-export([
|
||||
'/actions'/2,
|
||||
'/actions/:id'/2,
|
||||
'/actions/:id/metrics'/2,
|
||||
'/actions/:id/metrics/reset'/2,
|
||||
'/actions/:id/enable/:enable'/2,
|
||||
'/actions/:id/:operation'/2,
|
||||
'/nodes/:node/actions/:id/:operation'/2,
|
||||
|
@ -44,8 +47,8 @@
|
|||
'/action_types'/2
|
||||
]).
|
||||
|
||||
%% BpAPI
|
||||
-export([lookup_from_local_node/2]).
|
||||
%% BpAPI / RPC Targets
|
||||
-export([lookup_from_local_node/2, get_metrics_from_local_node/2]).
|
||||
|
||||
-define(BRIDGE_NOT_FOUND(BRIDGE_TYPE, BRIDGE_NAME),
|
||||
?NOT_FOUND(
|
||||
|
@ -80,6 +83,10 @@ paths() ->
|
|||
"/actions/:id/enable/:enable",
|
||||
"/actions/:id/:operation",
|
||||
"/nodes/:node/actions/:id/:operation",
|
||||
%% Caveat: metrics paths must come *after* `/:operation', otherwise minirest will
|
||||
%% try to match the latter first, trying to interpret `metrics' as an operation...
|
||||
"/actions/:id/metrics",
|
||||
"/actions/:id/metrics/reset",
|
||||
"/actions_probe",
|
||||
"/action_types"
|
||||
].
|
||||
|
@ -247,6 +254,34 @@ schema("/actions/:id") ->
|
|||
}
|
||||
}
|
||||
};
|
||||
schema("/actions/:id/metrics") ->
|
||||
#{
|
||||
'operationId' => '/actions/:id/metrics',
|
||||
get => #{
|
||||
tags => [<<"actions">>],
|
||||
summary => <<"Get action metrics">>,
|
||||
description => ?DESC("desc_bridge_metrics"),
|
||||
parameters => [param_path_id()],
|
||||
responses => #{
|
||||
200 => emqx_bridge_schema:metrics_fields(),
|
||||
404 => error_schema('NOT_FOUND', "Action not found")
|
||||
}
|
||||
}
|
||||
};
|
||||
schema("/actions/:id/metrics/reset") ->
|
||||
#{
|
||||
'operationId' => '/actions/:id/metrics/reset',
|
||||
put => #{
|
||||
tags => [<<"actions">>],
|
||||
summary => <<"Reset action metrics">>,
|
||||
description => ?DESC("desc_api6"),
|
||||
parameters => [param_path_id()],
|
||||
responses => #{
|
||||
204 => <<"Reset success">>,
|
||||
404 => error_schema('NOT_FOUND', "Action not found")
|
||||
}
|
||||
}
|
||||
};
|
||||
schema("/actions/:id/enable/:enable") ->
|
||||
#{
|
||||
'operationId' => '/actions/:id/enable/:enable',
|
||||
|
@ -429,6 +464,19 @@ schema("/action_types") ->
|
|||
end
|
||||
).
|
||||
|
||||
'/actions/:id/metrics'(get, #{bindings := #{id := Id}}) ->
|
||||
?TRY_PARSE_ID(Id, get_metrics_from_all_nodes(BridgeType, BridgeName)).
|
||||
|
||||
'/actions/:id/metrics/reset'(put, #{bindings := #{id := Id}}) ->
|
||||
?TRY_PARSE_ID(
|
||||
Id,
|
||||
begin
|
||||
ActionType = emqx_bridge_v2:bridge_v2_type_to_connector_type(BridgeType),
|
||||
ok = emqx_bridge_v2:reset_metrics(ActionType, BridgeName),
|
||||
?NO_CONTENT
|
||||
end
|
||||
).
|
||||
|
||||
'/actions/:id/enable/:enable'(put, #{bindings := #{id := Id, enable := Enable}}) ->
|
||||
?TRY_PARSE_ID(
|
||||
Id,
|
||||
|
@ -570,6 +618,18 @@ lookup_from_all_nodes(BridgeType, BridgeName, SuccCode) ->
|
|||
?INTERNAL_ERROR(Reason)
|
||||
end.
|
||||
|
||||
get_metrics_from_all_nodes(ActionType, ActionName) ->
|
||||
Nodes = emqx:running_nodes(),
|
||||
Result = maybe_unwrap(
|
||||
emqx_bridge_proto_v5:v2_get_metrics_from_all_nodes(Nodes, ActionType, ActionName)
|
||||
),
|
||||
case Result of
|
||||
Metrics when is_list(Metrics) ->
|
||||
{200, format_bridge_metrics(lists:zip(Nodes, Metrics))};
|
||||
{error, Reason} ->
|
||||
?INTERNAL_ERROR(Reason)
|
||||
end.
|
||||
|
||||
operation_func(all, start) -> v2_start_bridge_to_all_nodes;
|
||||
operation_func(_Node, start) -> v2_start_bridge_to_node.
|
||||
|
||||
|
@ -720,12 +780,17 @@ aggregate_status(AllStatus) ->
|
|||
false -> inconsistent
|
||||
end.
|
||||
|
||||
%% RPC Target
|
||||
lookup_from_local_node(BridgeType, BridgeName) ->
|
||||
case emqx_bridge_v2:lookup(BridgeType, BridgeName) of
|
||||
{ok, Res} -> {ok, format_resource(Res, node())};
|
||||
Error -> Error
|
||||
end.
|
||||
|
||||
%% RPC Target
|
||||
get_metrics_from_local_node(ActionType, ActionName) ->
|
||||
format_metrics(emqx_bridge_v2:get_metrics(ActionType, ActionName)).
|
||||
|
||||
%% resource
|
||||
format_resource(
|
||||
#{
|
||||
|
@ -751,6 +816,123 @@ format_resource(
|
|||
)
|
||||
).
|
||||
|
||||
format_metrics(#{
|
||||
counters := #{
|
||||
'dropped' := Dropped,
|
||||
'dropped.other' := DroppedOther,
|
||||
'dropped.expired' := DroppedExpired,
|
||||
'dropped.queue_full' := DroppedQueueFull,
|
||||
'dropped.resource_not_found' := DroppedResourceNotFound,
|
||||
'dropped.resource_stopped' := DroppedResourceStopped,
|
||||
'matched' := Matched,
|
||||
'retried' := Retried,
|
||||
'late_reply' := LateReply,
|
||||
'failed' := SentFailed,
|
||||
'success' := SentSucc,
|
||||
'received' := Rcvd
|
||||
},
|
||||
gauges := Gauges,
|
||||
rate := #{
|
||||
matched := #{current := Rate, last5m := Rate5m, max := RateMax}
|
||||
}
|
||||
}) ->
|
||||
Queued = maps:get('queuing', Gauges, 0),
|
||||
SentInflight = maps:get('inflight', Gauges, 0),
|
||||
?METRICS(
|
||||
Dropped,
|
||||
DroppedOther,
|
||||
DroppedExpired,
|
||||
DroppedQueueFull,
|
||||
DroppedResourceNotFound,
|
||||
DroppedResourceStopped,
|
||||
Matched,
|
||||
Queued,
|
||||
Retried,
|
||||
LateReply,
|
||||
SentFailed,
|
||||
SentInflight,
|
||||
SentSucc,
|
||||
Rate,
|
||||
Rate5m,
|
||||
RateMax,
|
||||
Rcvd
|
||||
);
|
||||
format_metrics(_Metrics) ->
|
||||
%% Empty metrics: can happen when a node joins another and a
|
||||
%% bridge is not yet replicated to it, so the counters map is
|
||||
%% empty.
|
||||
empty_metrics().
|
||||
|
||||
empty_metrics() ->
|
||||
?METRICS(
|
||||
_Dropped = 0,
|
||||
_DroppedOther = 0,
|
||||
_DroppedExpired = 0,
|
||||
_DroppedQueueFull = 0,
|
||||
_DroppedResourceNotFound = 0,
|
||||
_DroppedResourceStopped = 0,
|
||||
_Matched = 0,
|
||||
_Queued = 0,
|
||||
_Retried = 0,
|
||||
_LateReply = 0,
|
||||
_SentFailed = 0,
|
||||
_SentInflight = 0,
|
||||
_SentSucc = 0,
|
||||
_Rate = 0,
|
||||
_Rate5m = 0,
|
||||
_RateMax = 0,
|
||||
_Rcvd = 0
|
||||
).
|
||||
|
||||
format_bridge_metrics(Bridges) ->
|
||||
NodeMetrics = lists:filtermap(
|
||||
fun
|
||||
({Node, Metrics}) when is_map(Metrics) ->
|
||||
{true, #{node => Node, metrics => Metrics}};
|
||||
({Node, _}) ->
|
||||
{true, #{node => Node, metrics => empty_metrics()}}
|
||||
end,
|
||||
Bridges
|
||||
),
|
||||
#{
|
||||
metrics => aggregate_metrics(NodeMetrics),
|
||||
node_metrics => NodeMetrics
|
||||
}.
|
||||
|
||||
aggregate_metrics(AllMetrics) ->
|
||||
InitMetrics = ?EMPTY_METRICS,
|
||||
lists:foldl(fun aggregate_metrics/2, InitMetrics, AllMetrics).
|
||||
|
||||
aggregate_metrics(
|
||||
#{
|
||||
metrics := ?metrics(
|
||||
M1, M2, M3, M4, M5, M6, M7, M8, M9, M10, M11, M12, M13, M14, M15, M16, M17
|
||||
)
|
||||
},
|
||||
?metrics(
|
||||
N1, N2, N3, N4, N5, N6, N7, N8, N9, N10, N11, N12, N13, N14, N15, N16, N17
|
||||
)
|
||||
) ->
|
||||
?METRICS(
|
||||
M1 + N1,
|
||||
M2 + N2,
|
||||
M3 + N3,
|
||||
M4 + N4,
|
||||
M5 + N5,
|
||||
M6 + N6,
|
||||
M7 + N7,
|
||||
M8 + N8,
|
||||
M9 + N9,
|
||||
M10 + N10,
|
||||
M11 + N11,
|
||||
M12 + N12,
|
||||
M13 + N13,
|
||||
M14 + N14,
|
||||
M15 + N15,
|
||||
M16 + N16,
|
||||
M17 + N17
|
||||
).
|
||||
|
||||
format_bridge_status_and_error(Data) ->
|
||||
maps:fold(fun format_resource_data/3, #{}, maps:with([status, error], Data)).
|
||||
|
||||
|
|
|
@ -34,7 +34,8 @@
|
|||
v2_start_bridge_to_node/3,
|
||||
v2_start_bridge_to_all_nodes/3,
|
||||
v2_list_bridges_on_nodes/1,
|
||||
v2_lookup_from_all_nodes/3
|
||||
v2_lookup_from_all_nodes/3,
|
||||
v2_get_metrics_from_all_nodes/3
|
||||
]).
|
||||
|
||||
-include_lib("emqx/include/bpapi.hrl").
|
||||
|
@ -156,6 +157,17 @@ v2_lookup_from_all_nodes(Nodes, BridgeType, BridgeName) ->
|
|||
?TIMEOUT
|
||||
).
|
||||
|
||||
-spec v2_get_metrics_from_all_nodes([node()], key(), key()) ->
|
||||
emqx_rpc:erpc_multicall().
|
||||
v2_get_metrics_from_all_nodes(Nodes, ActionType, ActionName) ->
|
||||
erpc:multicall(
|
||||
Nodes,
|
||||
emqx_bridge_v2_api,
|
||||
get_metrics_from_local_node,
|
||||
[ActionType, ActionName],
|
||||
?TIMEOUT
|
||||
).
|
||||
|
||||
-spec v2_start_bridge_to_all_nodes([node()], key(), key()) ->
|
||||
emqx_rpc:erpc_multicall().
|
||||
v2_start_bridge_to_all_nodes(Nodes, BridgeType, BridgeName) ->
|
||||
|
|
|
@ -250,7 +250,7 @@ is_bad_schema(#{type := ?MAP(_, ?R_REF(Module, TypeName))}) ->
|
|||
false;
|
||||
_ ->
|
||||
{true, #{
|
||||
schema_modle => Module,
|
||||
schema_module => Module,
|
||||
type_name => TypeName,
|
||||
missing_fields => MissingFileds
|
||||
}}
|
||||
|
|
|
@ -56,6 +56,7 @@
|
|||
-define(CONNECTOR(Name), ?KAFKA_CONNECTOR(Name, ?KAFKA_BOOTSTRAP_HOST)).
|
||||
-define(CONNECTOR, ?CONNECTOR(?CONNECTOR_NAME)).
|
||||
|
||||
-define(MQTT_LOCAL_TOPIC, <<"mqtt/local/topic">>).
|
||||
-define(BRIDGE_NAME, (atom_to_binary(?FUNCTION_NAME))).
|
||||
-define(BRIDGE_TYPE_STR, "kafka_producer").
|
||||
-define(BRIDGE_TYPE, <<?BRIDGE_TYPE_STR>>).
|
||||
|
@ -93,7 +94,7 @@
|
|||
<<"required_acks">> => <<"all_isr">>,
|
||||
<<"topic">> => <<"kafka-topic">>
|
||||
},
|
||||
<<"local_topic">> => <<"mqtt/local/topic">>,
|
||||
<<"local_topic">> => ?MQTT_LOCAL_TOPIC,
|
||||
<<"resource_opts">> => #{
|
||||
<<"health_check_interval">> => <<"32s">>
|
||||
}
|
||||
|
@ -105,48 +106,6 @@
|
|||
).
|
||||
-define(KAFKA_BRIDGE_UPDATE(Name), ?KAFKA_BRIDGE_UPDATE(Name, ?CONNECTOR_NAME)).
|
||||
|
||||
%% -define(BRIDGE_TYPE_MQTT, <<"mqtt">>).
|
||||
%% -define(MQTT_BRIDGE(SERVER, NAME), ?BRIDGE(NAME, ?BRIDGE_TYPE_MQTT)#{
|
||||
%% <<"server">> => SERVER,
|
||||
%% <<"username">> => <<"user1">>,
|
||||
%% <<"password">> => <<"">>,
|
||||
%% <<"proto_ver">> => <<"v5">>,
|
||||
%% <<"egress">> => #{
|
||||
%% <<"remote">> => #{
|
||||
%% <<"topic">> => <<"emqx/${topic}">>,
|
||||
%% <<"qos">> => <<"${qos}">>,
|
||||
%% <<"retain">> => false
|
||||
%% }
|
||||
%% }
|
||||
%% }).
|
||||
%% -define(MQTT_BRIDGE(SERVER), ?MQTT_BRIDGE(SERVER, <<"mqtt_egress_test_bridge">>)).
|
||||
|
||||
%% -define(BRIDGE_TYPE_HTTP, <<"kafka">>).
|
||||
%% -define(HTTP_BRIDGE(URL, NAME), ?BRIDGE(NAME, ?BRIDGE_TYPE_HTTP)#{
|
||||
%% <<"url">> => URL,
|
||||
%% <<"local_topic">> => <<"emqx_webhook/#">>,
|
||||
%% <<"method">> => <<"post">>,
|
||||
%% <<"body">> => <<"${payload}">>,
|
||||
%% <<"headers">> => #{
|
||||
%% % NOTE
|
||||
%% % The Pascal-Case is important here.
|
||||
%% % The reason is kinda ridiculous: `emqx_bridge_resource:create_dry_run/2` converts
|
||||
%% % bridge config keys into atoms, and the atom 'Content-Type' exists in the ERTS
|
||||
%% % when this happens (while the 'content-type' does not).
|
||||
%% <<"Content-Type">> => <<"application/json">>
|
||||
%% }
|
||||
%% }).
|
||||
%% -define(HTTP_BRIDGE(URL), ?HTTP_BRIDGE(URL, ?BRIDGE_NAME)).
|
||||
|
||||
%% -define(URL(PORT, PATH),
|
||||
%% list_to_binary(
|
||||
%% io_lib:format(
|
||||
%% "http://localhost:~s/~s",
|
||||
%% [integer_to_list(PORT), PATH]
|
||||
%% )
|
||||
%% )
|
||||
%% ).
|
||||
|
||||
-define(APPSPECS, [
|
||||
emqx_conf,
|
||||
emqx,
|
||||
|
@ -166,7 +125,7 @@
|
|||
all() ->
|
||||
[
|
||||
{group, single},
|
||||
%{group, cluster_later_join},
|
||||
{group, cluster_later_join},
|
||||
{group, cluster}
|
||||
].
|
||||
-else.
|
||||
|
@ -182,7 +141,7 @@ groups() ->
|
|||
t_fix_broken_bridge_config
|
||||
],
|
||||
ClusterLaterJoinOnlyTCs = [
|
||||
% t_cluster_later_join_metrics
|
||||
t_cluster_later_join_metrics
|
||||
],
|
||||
[
|
||||
{single, [], AllTCs -- ClusterLaterJoinOnlyTCs},
|
||||
|
@ -202,9 +161,9 @@ end_per_suite(_Config) ->
|
|||
init_per_group(cluster = Name, Config) ->
|
||||
Nodes = [NodePrimary | _] = mk_cluster(Name, Config),
|
||||
init_api([{group, Name}, {cluster_nodes, Nodes}, {node, NodePrimary} | Config]);
|
||||
%% init_per_group(cluster_later_join = Name, Config) ->
|
||||
%% Nodes = [NodePrimary | _] = mk_cluster(Name, Config, #{join_to => undefined}),
|
||||
%% init_api([{group, Name}, {cluster_nodes, Nodes}, {node, NodePrimary} | Config]);
|
||||
init_per_group(cluster_later_join = Name, Config) ->
|
||||
Nodes = [NodePrimary | _] = mk_cluster(Name, Config, #{join_to => undefined}),
|
||||
init_api([{group, Name}, {cluster_nodes, Nodes}, {node, NodePrimary} | Config]);
|
||||
init_per_group(Name, Config) ->
|
||||
WorkDir = filename:join(?config(priv_dir, Config), Name),
|
||||
Apps = emqx_cth_suite:start(?APPSPECS ++ [?APPSPEC_DASHBOARD], #{work_dir => WorkDir}),
|
||||
|
@ -1041,6 +1000,143 @@ t_bad_name(Config) ->
|
|||
),
|
||||
ok.
|
||||
|
||||
t_metrics(Config) ->
|
||||
{ok, 200, []} = request_json(get, uri([?ROOT]), Config),
|
||||
|
||||
ActionName = ?BRIDGE_NAME,
|
||||
?assertMatch(
|
||||
{ok, 201, _},
|
||||
request_json(
|
||||
post,
|
||||
uri([?ROOT]),
|
||||
?KAFKA_BRIDGE(?BRIDGE_NAME),
|
||||
Config
|
||||
)
|
||||
),
|
||||
|
||||
ActionID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, ActionName),
|
||||
|
||||
?assertMatch(
|
||||
{ok, 200, #{
|
||||
<<"metrics">> := #{<<"matched">> := 0},
|
||||
<<"node_metrics">> := [#{<<"metrics">> := #{<<"matched">> := 0}} | _]
|
||||
}},
|
||||
request_json(get, uri([?ROOT, ActionID, "metrics"]), Config)
|
||||
),
|
||||
|
||||
{ok, 200, Bridge} = request_json(get, uri([?ROOT, ActionID]), Config),
|
||||
?assertNot(maps:is_key(<<"metrics">>, Bridge)),
|
||||
?assertNot(maps:is_key(<<"node_metrics">>, Bridge)),
|
||||
|
||||
Body = <<"my msg">>,
|
||||
_ = publish_message(?MQTT_LOCAL_TOPIC, Body, Config),
|
||||
|
||||
%% check for non-empty bridge metrics
|
||||
?retry(
|
||||
_Sleep0 = 200,
|
||||
_Retries0 = 20,
|
||||
?assertMatch(
|
||||
{ok, 200, #{
|
||||
<<"metrics">> := #{<<"matched">> := 1},
|
||||
<<"node_metrics">> := [#{<<"metrics">> := #{<<"matched">> := 1}} | _]
|
||||
}},
|
||||
request_json(get, uri([?ROOT, ActionID, "metrics"]), Config)
|
||||
)
|
||||
),
|
||||
|
||||
%% check for absence of metrics when listing all bridges
|
||||
{ok, 200, Bridges} = request_json(get, uri([?ROOT]), Config),
|
||||
?assertNotMatch(
|
||||
[
|
||||
#{
|
||||
<<"metrics">> := #{},
|
||||
<<"node_metrics">> := [_ | _]
|
||||
}
|
||||
],
|
||||
Bridges
|
||||
),
|
||||
ok.
|
||||
|
||||
t_reset_metrics(Config) ->
|
||||
%% assert there's no bridges at first
|
||||
{ok, 200, []} = request_json(get, uri([?ROOT]), Config),
|
||||
|
||||
ActionName = ?BRIDGE_NAME,
|
||||
?assertMatch(
|
||||
{ok, 201, _},
|
||||
request_json(
|
||||
post,
|
||||
uri([?ROOT]),
|
||||
?KAFKA_BRIDGE(?BRIDGE_NAME),
|
||||
Config
|
||||
)
|
||||
),
|
||||
ActionID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, ActionName),
|
||||
|
||||
Body = <<"my msg">>,
|
||||
_ = publish_message(?MQTT_LOCAL_TOPIC, Body, Config),
|
||||
?retry(
|
||||
_Sleep0 = 200,
|
||||
_Retries0 = 20,
|
||||
?assertMatch(
|
||||
{ok, 200, #{
|
||||
<<"metrics">> := #{<<"matched">> := 1},
|
||||
<<"node_metrics">> := [#{<<"metrics">> := #{}} | _]
|
||||
}},
|
||||
request_json(get, uri([?ROOT, ActionID, "metrics"]), Config)
|
||||
)
|
||||
),
|
||||
|
||||
{ok, 204, <<>>} = request(put, uri([?ROOT, ActionID, "metrics", "reset"]), Config),
|
||||
|
||||
?retry(
|
||||
_Sleep0 = 200,
|
||||
_Retries0 = 20,
|
||||
?assertMatch(
|
||||
{ok, 200, #{
|
||||
<<"metrics">> := #{<<"matched">> := 0},
|
||||
<<"node_metrics">> := [#{<<"metrics">> := #{}} | _]
|
||||
}},
|
||||
request_json(get, uri([?ROOT, ActionID, "metrics"]), Config)
|
||||
)
|
||||
),
|
||||
|
||||
ok.
|
||||
|
||||
t_cluster_later_join_metrics(Config) ->
|
||||
[PrimaryNode, OtherNode | _] = ?config(cluster_nodes, Config),
|
||||
Name = ?BRIDGE_NAME,
|
||||
ActionParams = ?KAFKA_BRIDGE(Name),
|
||||
ActionID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name),
|
||||
?check_trace(
|
||||
begin
|
||||
%% Create a bridge on only one of the nodes.
|
||||
?assertMatch({ok, 201, _}, request_json(post, uri([?ROOT]), ActionParams, Config)),
|
||||
%% Pre-condition.
|
||||
?assertMatch(
|
||||
{ok, 200, #{
|
||||
<<"metrics">> := #{<<"success">> := _},
|
||||
<<"node_metrics">> := [#{<<"metrics">> := #{}} | _]
|
||||
}},
|
||||
request_json(get, uri([?ROOT, ActionID, "metrics"]), Config)
|
||||
),
|
||||
%% Now join the other node join with the api node.
|
||||
ok = erpc:call(OtherNode, ekka, join, [PrimaryNode]),
|
||||
%% Check metrics; shouldn't crash even if the bridge is not
|
||||
%% ready on the node that just joined the cluster.
|
||||
?assertMatch(
|
||||
{ok, 200, #{
|
||||
<<"metrics">> := #{<<"success">> := _},
|
||||
<<"node_metrics">> := [#{<<"metrics">> := #{}}, #{<<"metrics">> := #{}} | _]
|
||||
}},
|
||||
request_json(get, uri([?ROOT, ActionID, "metrics"]), Config)
|
||||
),
|
||||
ok
|
||||
end,
|
||||
[]
|
||||
),
|
||||
ok.
|
||||
|
||||
%%% helpers
|
||||
listen_on_random_port() ->
|
||||
SockOpts = [binary, {active, false}, {packet, raw}, {reuseaddr, true}, {backlog, 1000}],
|
||||
|
|
|
@ -31,8 +31,8 @@
|
|||
|
||||
-import(hoconsc, [mk/2, enum/1, ref/2]).
|
||||
|
||||
-define(AEH_CONNECTOR_TYPE, azure_event_hub_producer).
|
||||
-define(AEH_CONNECTOR_TYPE_BIN, <<"azure_event_hub_producer">>).
|
||||
-define(CONNECTOR_TYPE, azure_event_hub_producer).
|
||||
-define(CONNECTOR_TYPE_BIN, <<"azure_event_hub_producer">>).
|
||||
|
||||
%%-------------------------------------------------------------------------------------------------
|
||||
%% `hocon_schema' API
|
||||
|
@ -42,18 +42,17 @@ namespace() -> "bridge_azure_event_hub".
|
|||
|
||||
roots() -> ["config_producer"].
|
||||
|
||||
fields("put_connector") ->
|
||||
fields(Field) when
|
||||
Field == "get_connector";
|
||||
Field == "put_connector";
|
||||
Field == "post_connector"
|
||||
->
|
||||
Fields = override(
|
||||
emqx_bridge_kafka:fields("put_connector"),
|
||||
connector_overrides()
|
||||
),
|
||||
override_documentations(Fields);
|
||||
fields("get_connector") ->
|
||||
emqx_bridge_schema:status_fields() ++
|
||||
fields("post_connector");
|
||||
fields("post_connector") ->
|
||||
Fields = override(
|
||||
emqx_bridge_kafka:fields("post_connector"),
|
||||
emqx_connector_schema:api_fields(
|
||||
Field,
|
||||
?CONNECTOR_TYPE,
|
||||
emqx_bridge_kafka:kafka_connector_config_fields()
|
||||
),
|
||||
connector_overrides()
|
||||
),
|
||||
override_documentations(Fields);
|
||||
|
@ -170,7 +169,7 @@ struct_names() ->
|
|||
bridge_v2_examples(Method) ->
|
||||
[
|
||||
#{
|
||||
?AEH_CONNECTOR_TYPE_BIN => #{
|
||||
?CONNECTOR_TYPE_BIN => #{
|
||||
summary => <<"Azure Event Hub Action">>,
|
||||
value => values({Method, bridge_v2})
|
||||
}
|
||||
|
@ -180,7 +179,7 @@ bridge_v2_examples(Method) ->
|
|||
connector_examples(Method) ->
|
||||
[
|
||||
#{
|
||||
?AEH_CONNECTOR_TYPE_BIN => #{
|
||||
?CONNECTOR_TYPE_BIN => #{
|
||||
summary => <<"Azure Event Hub Connector">>,
|
||||
value => values({Method, connector})
|
||||
}
|
||||
|
@ -197,6 +196,20 @@ conn_bridge_examples(Method) ->
|
|||
}
|
||||
].
|
||||
|
||||
values({get, connector}) ->
|
||||
maps:merge(
|
||||
#{
|
||||
status => <<"connected">>,
|
||||
node_status => [
|
||||
#{
|
||||
node => <<"emqx@localhost">>,
|
||||
status => <<"connected">>
|
||||
}
|
||||
],
|
||||
actions => [<<"my_action">>]
|
||||
},
|
||||
values({post, connector})
|
||||
);
|
||||
values({get, AEHType}) ->
|
||||
maps:merge(
|
||||
#{
|
||||
|
@ -217,7 +230,7 @@ values({post, bridge_v2}) ->
|
|||
enable => true,
|
||||
connector => <<"my_azure_event_hub_producer_connector">>,
|
||||
name => <<"my_azure_event_hub_producer_action">>,
|
||||
type => ?AEH_CONNECTOR_TYPE_BIN
|
||||
type => ?CONNECTOR_TYPE_BIN
|
||||
}
|
||||
);
|
||||
values({post, connector}) ->
|
||||
|
@ -225,7 +238,7 @@ values({post, connector}) ->
|
|||
values(common_config),
|
||||
#{
|
||||
name => <<"my_azure_event_hub_producer_connector">>,
|
||||
type => ?AEH_CONNECTOR_TYPE_BIN,
|
||||
type => ?CONNECTOR_TYPE_BIN,
|
||||
ssl => #{
|
||||
enable => true,
|
||||
server_name_indication => <<"auto">>,
|
||||
|
@ -358,7 +371,7 @@ connector_overrides() ->
|
|||
}
|
||||
),
|
||||
type => mk(
|
||||
?AEH_CONNECTOR_TYPE,
|
||||
?CONNECTOR_TYPE,
|
||||
#{
|
||||
required => true,
|
||||
desc => ?DESC("connector_type")
|
||||
|
@ -414,7 +427,7 @@ bridge_v2_overrides() ->
|
|||
}),
|
||||
ssl => mk(ref(ssl_client_opts), #{default => #{<<"enable">> => true}}),
|
||||
type => mk(
|
||||
?AEH_CONNECTOR_TYPE,
|
||||
?CONNECTOR_TYPE,
|
||||
#{
|
||||
required => true,
|
||||
desc => ?DESC("bridge_v2_type")
|
||||
|
|
|
@ -30,8 +30,8 @@
|
|||
|
||||
-import(hoconsc, [mk/2, enum/1, ref/2]).
|
||||
|
||||
-define(CONFLUENT_CONNECTOR_TYPE, confluent_producer).
|
||||
-define(CONFLUENT_CONNECTOR_TYPE_BIN, <<"confluent_producer">>).
|
||||
-define(CONNECTOR_TYPE, confluent_producer).
|
||||
-define(CONNECTOR_TYPE_BIN, <<"confluent_producer">>).
|
||||
|
||||
%%-------------------------------------------------------------------------------------------------
|
||||
%% `hocon_schema' API
|
||||
|
@ -41,18 +41,17 @@ namespace() -> "confluent".
|
|||
|
||||
roots() -> ["config_producer"].
|
||||
|
||||
fields("put_connector") ->
|
||||
fields(Field) when
|
||||
Field == "get_connector";
|
||||
Field == "put_connector";
|
||||
Field == "post_connector"
|
||||
->
|
||||
Fields = override(
|
||||
emqx_bridge_kafka:fields("put_connector"),
|
||||
connector_overrides()
|
||||
),
|
||||
override_documentations(Fields);
|
||||
fields("get_connector") ->
|
||||
emqx_bridge_schema:status_fields() ++
|
||||
fields("post_connector");
|
||||
fields("post_connector") ->
|
||||
Fields = override(
|
||||
emqx_bridge_kafka:fields("post_connector"),
|
||||
emqx_connector_schema:api_fields(
|
||||
Field,
|
||||
?CONNECTOR_TYPE,
|
||||
emqx_bridge_kafka:kafka_connector_config_fields()
|
||||
),
|
||||
connector_overrides()
|
||||
),
|
||||
override_documentations(Fields);
|
||||
|
@ -155,7 +154,7 @@ struct_names() ->
|
|||
bridge_v2_examples(Method) ->
|
||||
[
|
||||
#{
|
||||
?CONFLUENT_CONNECTOR_TYPE_BIN => #{
|
||||
?CONNECTOR_TYPE_BIN => #{
|
||||
summary => <<"Confluent Action">>,
|
||||
value => values({Method, bridge_v2})
|
||||
}
|
||||
|
@ -165,13 +164,27 @@ bridge_v2_examples(Method) ->
|
|||
connector_examples(Method) ->
|
||||
[
|
||||
#{
|
||||
?CONFLUENT_CONNECTOR_TYPE_BIN => #{
|
||||
?CONNECTOR_TYPE_BIN => #{
|
||||
summary => <<"Confluent Connector">>,
|
||||
value => values({Method, connector})
|
||||
}
|
||||
}
|
||||
].
|
||||
|
||||
values({get, connector}) ->
|
||||
maps:merge(
|
||||
#{
|
||||
status => <<"connected">>,
|
||||
node_status => [
|
||||
#{
|
||||
node => <<"emqx@localhost">>,
|
||||
status => <<"connected">>
|
||||
}
|
||||
],
|
||||
actions => [<<"my_action">>]
|
||||
},
|
||||
values({post, connector})
|
||||
);
|
||||
values({get, ConfluentType}) ->
|
||||
maps:merge(
|
||||
#{
|
||||
|
@ -192,7 +205,7 @@ values({post, bridge_v2}) ->
|
|||
enable => true,
|
||||
connector => <<"my_confluent_producer_connector">>,
|
||||
name => <<"my_confluent_producer_action">>,
|
||||
type => ?CONFLUENT_CONNECTOR_TYPE_BIN
|
||||
type => ?CONNECTOR_TYPE_BIN
|
||||
}
|
||||
);
|
||||
values({post, connector}) ->
|
||||
|
@ -200,7 +213,7 @@ values({post, connector}) ->
|
|||
values(common_config),
|
||||
#{
|
||||
name => <<"my_confluent_producer_connector">>,
|
||||
type => ?CONFLUENT_CONNECTOR_TYPE_BIN,
|
||||
type => ?CONNECTOR_TYPE_BIN,
|
||||
ssl => #{
|
||||
enable => true,
|
||||
server_name_indication => <<"auto">>,
|
||||
|
@ -320,7 +333,7 @@ connector_overrides() ->
|
|||
}
|
||||
),
|
||||
type => mk(
|
||||
?CONFLUENT_CONNECTOR_TYPE,
|
||||
?CONNECTOR_TYPE,
|
||||
#{
|
||||
required => true,
|
||||
desc => ?DESC("connector_type")
|
||||
|
@ -342,7 +355,7 @@ bridge_v2_overrides() ->
|
|||
}
|
||||
}),
|
||||
type => mk(
|
||||
?CONFLUENT_CONNECTOR_TYPE,
|
||||
?CONNECTOR_TYPE,
|
||||
#{
|
||||
required => true,
|
||||
desc => ?DESC("bridge_v2_type")
|
||||
|
|
|
@ -24,6 +24,8 @@
|
|||
connector_examples/1
|
||||
]).
|
||||
|
||||
-define(CONNECTOR_TYPE, gcp_pubsub_producer).
|
||||
|
||||
%%-------------------------------------------------------------------------------------------------
|
||||
%% `hocon_schema' API
|
||||
%%-------------------------------------------------------------------------------------------------
|
||||
|
@ -68,8 +70,7 @@ fields(action_parameters) ->
|
|||
fields("config_connector") ->
|
||||
%% FIXME
|
||||
emqx_connector_schema:common_fields() ++
|
||||
emqx_bridge_gcp_pubsub:fields(connector_config) ++
|
||||
emqx_resource_schema:fields("resource_opts");
|
||||
connector_config_fields();
|
||||
%%=========================================
|
||||
%% HTTP API fields: action
|
||||
%%=========================================
|
||||
|
@ -82,12 +83,16 @@ fields("put_bridge_v2") ->
|
|||
%%=========================================
|
||||
%% HTTP API fields: connector
|
||||
%%=========================================
|
||||
fields("get_connector") ->
|
||||
emqx_bridge_schema:status_fields() ++ fields("post_connector");
|
||||
fields("post_connector") ->
|
||||
[type_field(), name_field() | fields("put_connector")];
|
||||
fields("put_connector") ->
|
||||
fields("config_connector").
|
||||
fields(Field) when
|
||||
Field == "get_connector";
|
||||
Field == "put_connector";
|
||||
Field == "post_connector"
|
||||
->
|
||||
emqx_connector_schema:api_fields(Field, ?CONNECTOR_TYPE, connector_config_fields()).
|
||||
|
||||
connector_config_fields() ->
|
||||
emqx_bridge_gcp_pubsub:fields(connector_config) ++
|
||||
emqx_resource_schema:fields("resource_opts").
|
||||
|
||||
desc("config_connector") ->
|
||||
?DESC("config_connector");
|
||||
|
@ -177,7 +182,7 @@ action_example(put) ->
|
|||
|
||||
connector_example(get) ->
|
||||
maps:merge(
|
||||
connector_example(put),
|
||||
connector_example(post),
|
||||
#{
|
||||
status => <<"connected">>,
|
||||
node_status => [
|
||||
|
@ -185,7 +190,8 @@ connector_example(get) ->
|
|||
node => <<"emqx@localhost">>,
|
||||
status => <<"connected">>
|
||||
}
|
||||
]
|
||||
],
|
||||
actions => [<<"my_action">>]
|
||||
}
|
||||
);
|
||||
connector_example(post) ->
|
||||
|
|
|
@ -33,10 +33,13 @@
|
|||
]).
|
||||
|
||||
-export([
|
||||
kafka_connector_config_fields/0,
|
||||
kafka_producer_converter/2,
|
||||
producer_strategy_key_validator/1
|
||||
]).
|
||||
|
||||
-define(CONNECTOR_TYPE, kafka_producer).
|
||||
|
||||
%% -------------------------------------------------------------------------------------------------
|
||||
%% api
|
||||
|
||||
|
@ -76,6 +79,20 @@ conn_bridge_examples(Method) ->
|
|||
}
|
||||
].
|
||||
|
||||
values({get, connector}) ->
|
||||
maps:merge(
|
||||
#{
|
||||
status => <<"connected">>,
|
||||
node_status => [
|
||||
#{
|
||||
node => <<"emqx@localhost">>,
|
||||
status => <<"connected">>
|
||||
}
|
||||
],
|
||||
actions => [<<"my_action">>]
|
||||
},
|
||||
values({post, connector})
|
||||
);
|
||||
values({get, KafkaType}) ->
|
||||
maps:merge(
|
||||
#{
|
||||
|
@ -247,6 +264,12 @@ namespace() -> "bridge_kafka".
|
|||
|
||||
roots() -> ["config_consumer", "config_producer", "config_bridge_v2"].
|
||||
|
||||
fields(Field) when
|
||||
Field == "get_connector";
|
||||
Field == "put_connector";
|
||||
Field == "post_connector"
|
||||
->
|
||||
emqx_connector_schema:api_fields(Field, ?CONNECTOR_TYPE, kafka_connector_config_fields());
|
||||
fields("post_" ++ Type) ->
|
||||
[type_field(Type), name_field() | fields("config_" ++ Type)];
|
||||
fields("put_" ++ Type) ->
|
||||
|
@ -560,9 +583,11 @@ desc(Name) ->
|
|||
?DESC(Name).
|
||||
|
||||
connector_config_fields() ->
|
||||
emqx_connector_schema:common_fields() ++
|
||||
kafka_connector_config_fields().
|
||||
|
||||
kafka_connector_config_fields() ->
|
||||
[
|
||||
{enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})},
|
||||
{description, emqx_schema:description_schema()},
|
||||
{bootstrap_hosts,
|
||||
mk(
|
||||
binary(),
|
||||
|
|
|
@ -251,7 +251,7 @@ do_handle_message(Message, State) ->
|
|||
Payload = render(FullMessage, PayloadTemplate),
|
||||
MQTTTopic = render(FullMessage, MQTTTopicTemplate),
|
||||
MQTTMessage = emqx_message:make(ResourceId, MQTTQoS, MQTTTopic, Payload),
|
||||
_ = emqx:publish(MQTTMessage),
|
||||
_ = emqx_broker:safe_publish(MQTTMessage),
|
||||
emqx_hooks:run(Hookpoint, [FullMessage]),
|
||||
emqx_resource_metrics:received_inc(ResourceId),
|
||||
%% note: just `ack' does not commit the offset to the
|
||||
|
|
|
@ -22,6 +22,8 @@
|
|||
connector_examples/1
|
||||
]).
|
||||
|
||||
-define(CONNECTOR_TYPE, matrix).
|
||||
|
||||
%% -------------------------------------------------------------------------------------------------
|
||||
%% api
|
||||
|
||||
|
@ -60,12 +62,12 @@ fields("get_bridge_v2") ->
|
|||
emqx_bridge_pgsql:fields(pgsql_action);
|
||||
fields("post_bridge_v2") ->
|
||||
emqx_bridge_pgsql:fields(pgsql_action);
|
||||
fields("put_connector") ->
|
||||
emqx_bridge_pgsql:fields("config_connector");
|
||||
fields("get_connector") ->
|
||||
emqx_bridge_pgsql:fields("config_connector");
|
||||
fields("post_connector") ->
|
||||
emqx_bridge_pgsql:fields("config_connector");
|
||||
fields(Field) when
|
||||
Field == "get_connector";
|
||||
Field == "put_connector";
|
||||
Field == "post_connector"
|
||||
->
|
||||
emqx_postgresql_connector_schema:fields({Field, ?CONNECTOR_TYPE});
|
||||
fields(Method) ->
|
||||
emqx_bridge_pgsql:fields(Method).
|
||||
|
||||
|
|
|
@ -25,6 +25,8 @@
|
|||
desc/1
|
||||
]).
|
||||
|
||||
-define(CONNECTOR_TYPE, mongodb).
|
||||
|
||||
%%=================================================================================================
|
||||
%% hocon_schema API
|
||||
%%=================================================================================================
|
||||
|
@ -51,16 +53,18 @@ fields("config") ->
|
|||
];
|
||||
fields("config_connector") ->
|
||||
emqx_connector_schema:common_fields() ++
|
||||
[
|
||||
{parameters,
|
||||
mk(
|
||||
hoconsc:union([
|
||||
ref(emqx_mongodb, "connector_" ++ T)
|
||||
|| T <- ["single", "sharded", "rs"]
|
||||
]),
|
||||
#{required => true, desc => ?DESC("mongodb_parameters")}
|
||||
)}
|
||||
] ++ emqx_mongodb:fields(mongodb);
|
||||
fields("connection_fields");
|
||||
fields("connection_fields") ->
|
||||
[
|
||||
{parameters,
|
||||
mk(
|
||||
hoconsc:union([
|
||||
ref(emqx_mongodb, "connector_" ++ T)
|
||||
|| T <- ["single", "sharded", "rs"]
|
||||
]),
|
||||
#{required => true, desc => ?DESC("mongodb_parameters")}
|
||||
)}
|
||||
] ++ emqx_mongodb:fields(mongodb);
|
||||
fields("creation_opts") ->
|
||||
%% so far, mongodb connector does not support batching
|
||||
%% but we cannot delete this field due to compatibility reasons
|
||||
|
@ -97,14 +101,12 @@ fields(mongodb_sharded) ->
|
|||
emqx_mongodb:fields(sharded) ++ fields("config");
|
||||
fields(mongodb_single) ->
|
||||
emqx_mongodb:fields(single) ++ fields("config");
|
||||
fields("post_connector") ->
|
||||
type_and_name_fields(mongodb) ++
|
||||
fields("config_connector");
|
||||
fields("put_connector") ->
|
||||
fields("config_connector");
|
||||
fields("get_connector") ->
|
||||
emqx_bridge_schema:status_fields() ++
|
||||
fields("post_connector");
|
||||
fields(Field) when
|
||||
Field == "get_connector";
|
||||
Field == "put_connector";
|
||||
Field == "post_connector"
|
||||
->
|
||||
emqx_connector_schema:api_fields(Field, ?CONNECTOR_TYPE, fields("connection_fields"));
|
||||
fields("get_bridge_v2") ->
|
||||
emqx_bridge_schema:status_fields() ++
|
||||
fields("post_bridge_v2");
|
||||
|
@ -319,7 +321,8 @@ method_values(Type, get) ->
|
|||
node => <<"emqx@localhost">>,
|
||||
status => <<"connected">>
|
||||
}
|
||||
]
|
||||
],
|
||||
actions => [<<"my_action">>]
|
||||
}
|
||||
);
|
||||
method_values(_Type, put) ->
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
|
||||
-import(hoconsc, [mk/2, enum/1, ref/2]).
|
||||
|
||||
-define(CONNECTOR_TYPE, syskeeper_forwarder).
|
||||
-define(SYSKEEPER_HOST_OPTIONS, #{
|
||||
default_port => 9092
|
||||
}).
|
||||
|
@ -62,7 +63,8 @@ values(get) ->
|
|||
node => <<"emqx@localhost">>,
|
||||
status => <<"connected">>
|
||||
}
|
||||
]
|
||||
],
|
||||
actions => [<<"my_action">>]
|
||||
},
|
||||
values(post)
|
||||
);
|
||||
|
@ -89,9 +91,9 @@ roots() ->
|
|||
[{config, #{type => hoconsc:ref(?MODULE, config)}}].
|
||||
|
||||
fields(config) ->
|
||||
emqx_connector_schema:common_fields() ++ fields("connection_fields");
|
||||
fields("connection_fields") ->
|
||||
[
|
||||
{enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})},
|
||||
{description, emqx_schema:description_schema()},
|
||||
{server, server()},
|
||||
{ack_mode,
|
||||
mk(
|
||||
|
@ -110,12 +112,14 @@ fields(config) ->
|
|||
emqx_connector_schema_lib:pool_size(Other)
|
||||
end}
|
||||
];
|
||||
fields("post") ->
|
||||
[type_field(), name_field() | fields(config)];
|
||||
fields("put") ->
|
||||
fields(config);
|
||||
fields("get") ->
|
||||
emqx_bridge_schema:status_fields() ++ fields("post").
|
||||
fields(Field) when
|
||||
Field == "get";
|
||||
Field == "post";
|
||||
Field == "put"
|
||||
->
|
||||
emqx_connector_schema:api_fields(
|
||||
Field ++ "_connector", ?CONNECTOR_TYPE, fields("connection_fields")
|
||||
).
|
||||
|
||||
desc(config) ->
|
||||
?DESC("desc_config");
|
||||
|
@ -128,12 +132,6 @@ server() ->
|
|||
Meta = #{desc => ?DESC("server")},
|
||||
emqx_schema:servers_sc(Meta, ?SYSKEEPER_HOST_OPTIONS).
|
||||
|
||||
type_field() ->
|
||||
{type, mk(enum([syskeeper_forwarder]), #{required => true, desc => ?DESC("desc_type")})}.
|
||||
|
||||
name_field() ->
|
||||
{name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}.
|
||||
|
||||
%% -------------------------------------------------------------------------------------------------
|
||||
%% `emqx_resource' API
|
||||
|
||||
|
|
|
@ -22,6 +22,8 @@
|
|||
desc/1
|
||||
]).
|
||||
|
||||
-define(CONNECTOR_TYPE, syskeeper_proxy).
|
||||
|
||||
-define(SYSKEEPER_HOST_OPTIONS, #{
|
||||
default_port => 9092
|
||||
}).
|
||||
|
@ -47,7 +49,8 @@ values(get) ->
|
|||
node => <<"emqx@localhost">>,
|
||||
status => <<"connected">>
|
||||
}
|
||||
]
|
||||
],
|
||||
actions => [<<"my_action">>]
|
||||
},
|
||||
values(post)
|
||||
);
|
||||
|
@ -74,9 +77,9 @@ namespace() -> "connector_syskeeper_proxy".
|
|||
roots() -> [].
|
||||
|
||||
fields(config) ->
|
||||
emqx_connector_schema:common_fields() ++ fields("connection_fields");
|
||||
fields("connection_fields") ->
|
||||
[
|
||||
{enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})},
|
||||
{description, emqx_schema:description_schema()},
|
||||
{listen, listen()},
|
||||
{acceptors,
|
||||
mk(
|
||||
|
@ -89,12 +92,14 @@ fields(config) ->
|
|||
#{desc => ?DESC(handshake_timeout), default => <<"10s">>}
|
||||
)}
|
||||
];
|
||||
fields("post") ->
|
||||
[type_field(), name_field() | fields(config)];
|
||||
fields("put") ->
|
||||
fields(config);
|
||||
fields("get") ->
|
||||
emqx_bridge_schema:status_fields() ++ fields("post").
|
||||
fields(Field) when
|
||||
Field == "get";
|
||||
Field == "post";
|
||||
Field == "put"
|
||||
->
|
||||
emqx_connector_schema:api_fields(
|
||||
Field ++ "_connector", ?CONNECTOR_TYPE, fields("connection_fields")
|
||||
).
|
||||
|
||||
desc(config) ->
|
||||
?DESC("desc_config");
|
||||
|
@ -106,11 +111,3 @@ desc(_) ->
|
|||
listen() ->
|
||||
Meta = #{desc => ?DESC("listen")},
|
||||
emqx_schema:servers_sc(Meta, ?SYSKEEPER_HOST_OPTIONS).
|
||||
|
||||
%% -------------------------------------------------------------------------------------------------
|
||||
|
||||
type_field() ->
|
||||
{type, mk(enum([syskeeper_proxy]), #{required => true, desc => ?DESC("desc_type")})}.
|
||||
|
||||
name_field() ->
|
||||
{name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}.
|
||||
|
|
|
@ -22,6 +22,8 @@
|
|||
connector_examples/1
|
||||
]).
|
||||
|
||||
-define(CONNECTOR_TYPE, timescale).
|
||||
|
||||
%% -------------------------------------------------------------------------------------------------
|
||||
%% api
|
||||
|
||||
|
@ -44,7 +46,7 @@ roots() -> [].
|
|||
fields("post") ->
|
||||
emqx_bridge_pgsql:fields("post", timescale);
|
||||
fields("config_connector") ->
|
||||
emqx_bridge_pgsql:fields("config_connector");
|
||||
emqx_postgresql_connector_schema:fields("config_connector");
|
||||
fields(action) ->
|
||||
{timescale,
|
||||
hoconsc:mk(
|
||||
|
@ -60,12 +62,12 @@ fields("get_bridge_v2") ->
|
|||
emqx_bridge_pgsql:fields(pgsql_action);
|
||||
fields("post_bridge_v2") ->
|
||||
emqx_bridge_pgsql:fields(pgsql_action);
|
||||
fields("put_connector") ->
|
||||
emqx_bridge_pgsql:fields("config_connector");
|
||||
fields("get_connector") ->
|
||||
emqx_bridge_pgsql:fields("config_connector");
|
||||
fields("post_connector") ->
|
||||
emqx_bridge_pgsql:fields("config_connector");
|
||||
fields(Field) when
|
||||
Field == "get_connector";
|
||||
Field == "put_connector";
|
||||
Field == "post_connector"
|
||||
->
|
||||
emqx_postgresql_connector_schema:fields({Field, ?CONNECTOR_TYPE});
|
||||
fields(Method) ->
|
||||
emqx_bridge_pgsql:fields(Method).
|
||||
|
||||
|
|
|
@ -77,7 +77,8 @@
|
|||
|
||||
%% Callback to upgrade config after loaded from config file but before validation.
|
||||
upgrade_raw_conf(RawConf) ->
|
||||
emqx_connector_schema:transform_bridges_v1_to_connectors_and_bridges_v2(RawConf).
|
||||
RawConf1 = emqx_connector_schema:transform_bridges_v1_to_connectors_and_bridges_v2(RawConf),
|
||||
emqx_otel_schema:upgrade_legacy_metrics(RawConf1).
|
||||
|
||||
namespace() -> emqx.
|
||||
|
||||
|
@ -1189,37 +1190,44 @@ tr_prometheus_collectors(Conf) ->
|
|||
emqx_prometheus,
|
||||
emqx_prometheus_mria
|
||||
%% builtin vm collectors
|
||||
| tr_vm_dist_collector(Conf) ++
|
||||
tr_mnesia_collector(Conf) ++
|
||||
tr_vm_statistics_collector(Conf) ++
|
||||
tr_vm_system_info_collector(Conf) ++
|
||||
tr_vm_memory_collector(Conf) ++
|
||||
tr_vm_msacc_collector(Conf)
|
||||
| prometheus_collectors(Conf)
|
||||
].
|
||||
|
||||
tr_vm_dist_collector(Conf) ->
|
||||
Enabled = conf_get("prometheus.vm_dist_collector", Conf, disabled),
|
||||
collector_enabled(Enabled, prometheus_vm_dist_collector).
|
||||
prometheus_collectors(Conf) ->
|
||||
case conf_get("prometheus.enable_basic_auth", Conf, undefined) of
|
||||
%% legacy
|
||||
undefined ->
|
||||
tr_collector("prometheus.vm_dist_collector", prometheus_vm_dist_collector, Conf) ++
|
||||
tr_collector("prometheus.mnesia_collector", prometheus_mnesia_collector, Conf) ++
|
||||
tr_collector(
|
||||
"prometheus.vm_statistics_collector", prometheus_vm_statistics_collector, Conf
|
||||
) ++
|
||||
tr_collector(
|
||||
"prometheus.vm_system_info_collector", prometheus_vm_system_info_collector, Conf
|
||||
) ++
|
||||
tr_collector("prometheus.vm_memory_collector", prometheus_vm_memory_collector, Conf) ++
|
||||
tr_collector("prometheus.vm_msacc_collector", prometheus_vm_msacc_collector, Conf);
|
||||
%% new
|
||||
_ ->
|
||||
tr_collector("prometheus.collectors.vm_dist", prometheus_vm_dist_collector, Conf) ++
|
||||
tr_collector("prometheus.collectors.mnesia", prometheus_mnesia_collector, Conf) ++
|
||||
tr_collector(
|
||||
"prometheus.collectors.vm_statistics", prometheus_vm_statistics_collector, Conf
|
||||
) ++
|
||||
tr_collector(
|
||||
"prometheus.collectors.vm_system_info",
|
||||
prometheus_vm_system_info_collector,
|
||||
Conf
|
||||
) ++
|
||||
tr_collector(
|
||||
"prometheus.collectors.vm_memory", prometheus_vm_memory_collector, Conf
|
||||
) ++
|
||||
tr_collector("prometheus.collectors.vm_msacc", prometheus_vm_msacc_collector, Conf)
|
||||
end.
|
||||
|
||||
tr_mnesia_collector(Conf) ->
|
||||
Enabled = conf_get("prometheus.mnesia_collector", Conf, disabled),
|
||||
collector_enabled(Enabled, prometheus_mnesia_collector).
|
||||
|
||||
tr_vm_statistics_collector(Conf) ->
|
||||
Enabled = conf_get("prometheus.vm_statistics_collector", Conf, disabled),
|
||||
collector_enabled(Enabled, prometheus_vm_statistics_collector).
|
||||
|
||||
tr_vm_system_info_collector(Conf) ->
|
||||
Enabled = conf_get("prometheus.vm_system_info_collector", Conf, disabled),
|
||||
collector_enabled(Enabled, prometheus_vm_system_info_collector).
|
||||
|
||||
tr_vm_memory_collector(Conf) ->
|
||||
Enabled = conf_get("prometheus.vm_memory_collector", Conf, disabled),
|
||||
collector_enabled(Enabled, prometheus_vm_memory_collector).
|
||||
|
||||
tr_vm_msacc_collector(Conf) ->
|
||||
Enabled = conf_get("prometheus.vm_msacc_collector", Conf, disabled),
|
||||
collector_enabled(Enabled, prometheus_vm_msacc_collector).
|
||||
tr_collector(Key, Collect, Conf) ->
|
||||
Enabled = conf_get(Key, Conf, disabled),
|
||||
collector_enabled(Enabled, Collect).
|
||||
|
||||
collector_enabled(enabled, Collector) -> [Collector];
|
||||
collector_enabled(disabled, _) -> [].
|
||||
|
|
|
@ -637,15 +637,20 @@ format_resource(
|
|||
).
|
||||
|
||||
format_resource_data(ResData) ->
|
||||
maps:fold(fun format_resource_data/3, #{}, maps:with([status, error], ResData)).
|
||||
maps:fold(fun format_resource_data/3, #{}, maps:with([status, error, added_channels], ResData)).
|
||||
|
||||
format_resource_data(error, undefined, Result) ->
|
||||
Result;
|
||||
format_resource_data(error, Error, Result) ->
|
||||
Result#{status_reason => emqx_utils:readable_error_msg(Error)};
|
||||
format_resource_data(added_channels, Channels, Result) ->
|
||||
Result#{actions => lists:map(fun format_action/1, maps:keys(Channels))};
|
||||
format_resource_data(K, V, Result) ->
|
||||
Result#{K => V}.
|
||||
|
||||
format_action(ActionId) ->
|
||||
element(2, emqx_bridge_v2:parse_id(ActionId)).
|
||||
|
||||
is_ok(ok) ->
|
||||
ok;
|
||||
is_ok(OkResult = {ok, _}) ->
|
||||
|
|
|
@ -33,7 +33,12 @@
|
|||
-export([get_response/0, put_request/0, post_request/0]).
|
||||
|
||||
-export([connector_type_to_bridge_types/1]).
|
||||
-export([common_fields/0]).
|
||||
-export([
|
||||
api_fields/3,
|
||||
common_fields/0,
|
||||
status_and_actions_fields/0,
|
||||
type_and_name_fields/1
|
||||
]).
|
||||
|
||||
-export([resource_opts_fields/0, resource_opts_fields/1]).
|
||||
|
||||
|
@ -352,19 +357,87 @@ roots() ->
|
|||
end.
|
||||
|
||||
fields(connectors) ->
|
||||
[] ++ enterprise_fields_connectors().
|
||||
[] ++ enterprise_fields_connectors();
|
||||
fields("node_status") ->
|
||||
[
|
||||
node_name(),
|
||||
{"status", mk(status(), #{})},
|
||||
{"status_reason",
|
||||
mk(binary(), #{
|
||||
required => false,
|
||||
desc => ?DESC("desc_status_reason"),
|
||||
example => <<"Connection refused">>
|
||||
})}
|
||||
].
|
||||
|
||||
desc(connectors) ->
|
||||
?DESC("desc_connectors");
|
||||
desc("node_status") ->
|
||||
?DESC("desc_node_status");
|
||||
desc(_) ->
|
||||
undefined.
|
||||
|
||||
api_fields("get_connector", Type, Fields) ->
|
||||
lists:append(
|
||||
[
|
||||
type_and_name_fields(Type),
|
||||
common_fields(),
|
||||
status_and_actions_fields(),
|
||||
Fields
|
||||
]
|
||||
);
|
||||
api_fields("post_connector", Type, Fields) ->
|
||||
lists:append(
|
||||
[
|
||||
type_and_name_fields(Type),
|
||||
common_fields(),
|
||||
Fields
|
||||
]
|
||||
);
|
||||
api_fields("put_connector", _Type, Fields) ->
|
||||
lists:append(
|
||||
[
|
||||
common_fields(),
|
||||
Fields
|
||||
]
|
||||
).
|
||||
|
||||
common_fields() ->
|
||||
[
|
||||
{enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})},
|
||||
{description, emqx_schema:description_schema()}
|
||||
].
|
||||
|
||||
type_and_name_fields(ConnectorType) ->
|
||||
[
|
||||
{type, mk(ConnectorType, #{required => true, desc => ?DESC("desc_type")})},
|
||||
{name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}
|
||||
].
|
||||
|
||||
status_and_actions_fields() ->
|
||||
[
|
||||
{"status", mk(status(), #{desc => ?DESC("desc_status")})},
|
||||
{"status_reason",
|
||||
mk(binary(), #{
|
||||
required => false,
|
||||
desc => ?DESC("desc_status_reason"),
|
||||
example => <<"Connection refused">>
|
||||
})},
|
||||
{"node_status",
|
||||
mk(
|
||||
hoconsc:array(ref(?MODULE, "node_status")),
|
||||
#{desc => ?DESC("desc_node_status")}
|
||||
)},
|
||||
{"actions",
|
||||
mk(
|
||||
hoconsc:array(binary()),
|
||||
#{
|
||||
desc => ?DESC("connector_actions"),
|
||||
example => [<<"my_action">>]
|
||||
}
|
||||
)}
|
||||
].
|
||||
|
||||
resource_opts_fields() ->
|
||||
resource_opts_fields(_Overrides = []).
|
||||
|
||||
|
@ -422,12 +495,18 @@ is_bad_schema(#{type := ?MAP(_, ?R_REF(Module, TypeName))}) ->
|
|||
false;
|
||||
_ ->
|
||||
{true, #{
|
||||
schema_modle => Module,
|
||||
schema_module => Module,
|
||||
type_name => TypeName,
|
||||
missing_fields => MissingFileds
|
||||
}}
|
||||
end.
|
||||
|
||||
status() ->
|
||||
hoconsc:enum([connected, disconnected, connecting, inconsistent]).
|
||||
|
||||
node_name() ->
|
||||
{"node", mk(binary(), #{desc => ?DESC("desc_node_name"), example => "emqx@127.0.0.1"})}.
|
||||
|
||||
common_field_names() ->
|
||||
[
|
||||
enable, description
|
||||
|
|
|
@ -175,7 +175,8 @@ groups() ->
|
|||
AllTCs = emqx_common_test_helpers:all(?MODULE),
|
||||
SingleOnlyTests = [
|
||||
t_connectors_probe,
|
||||
t_fail_delete_with_action
|
||||
t_fail_delete_with_action,
|
||||
t_actions_field
|
||||
],
|
||||
ClusterLaterJoinOnlyTCs = [
|
||||
% t_cluster_later_join_metrics
|
||||
|
@ -256,15 +257,6 @@ end_per_testcase(TestCase, Config) ->
|
|||
ok.
|
||||
|
||||
-define(CONNECTOR_IMPL, dummy_connector_impl).
|
||||
init_mocks(t_fail_delete_with_action) ->
|
||||
init_mocks(common),
|
||||
meck:expect(?CONNECTOR_IMPL, on_add_channel, 4, {ok, connector_state}),
|
||||
meck:expect(?CONNECTOR_IMPL, on_remove_channel, 3, {ok, connector_state}),
|
||||
meck:expect(?CONNECTOR_IMPL, on_get_channel_status, 3, connected),
|
||||
ok = meck:expect(?CONNECTOR_IMPL, on_get_channels, fun(ResId) ->
|
||||
emqx_bridge_v2:get_channels_for_connector(ResId)
|
||||
end),
|
||||
ok;
|
||||
init_mocks(_TestCase) ->
|
||||
meck:new(emqx_connector_ee_schema, [passthrough, no_link]),
|
||||
meck:expect(emqx_connector_ee_schema, resource_type, 1, ?CONNECTOR_IMPL),
|
||||
|
@ -289,17 +281,25 @@ init_mocks(_TestCase) ->
|
|||
(_, _) -> connected
|
||||
end
|
||||
),
|
||||
meck:expect(?CONNECTOR_IMPL, on_add_channel, 4, {ok, connector_state}),
|
||||
meck:expect(?CONNECTOR_IMPL, on_remove_channel, 3, {ok, connector_state}),
|
||||
meck:expect(?CONNECTOR_IMPL, on_get_channel_status, 3, connected),
|
||||
meck:expect(
|
||||
?CONNECTOR_IMPL,
|
||||
on_get_channels,
|
||||
fun(ResId) ->
|
||||
emqx_bridge_v2:get_channels_for_connector(ResId)
|
||||
end
|
||||
),
|
||||
[?CONNECTOR_IMPL, emqx_connector_ee_schema].
|
||||
|
||||
clear_resources(t_fail_delete_with_action) ->
|
||||
clear_resources(_) ->
|
||||
lists:foreach(
|
||||
fun(#{type := Type, name := Name}) ->
|
||||
ok = emqx_bridge_v2:remove(Type, Name)
|
||||
end,
|
||||
emqx_bridge_v2:list()
|
||||
),
|
||||
clear_resources(common);
|
||||
clear_resources(_) ->
|
||||
lists:foreach(
|
||||
fun(#{type := Type, name := Name}) ->
|
||||
ok = emqx_connector:remove(Type, Name)
|
||||
|
@ -738,6 +738,62 @@ t_create_with_bad_name(Config) ->
|
|||
?assertMatch(#{<<"kind">> := <<"validation_error">>}, Msg),
|
||||
ok.
|
||||
|
||||
t_actions_field(Config) ->
|
||||
Name = ?CONNECTOR_NAME,
|
||||
?assertMatch(
|
||||
{ok, 201, #{
|
||||
<<"type">> := ?CONNECTOR_TYPE,
|
||||
<<"name">> := Name,
|
||||
<<"enable">> := true,
|
||||
<<"status">> := <<"connected">>,
|
||||
<<"node_status">> := [_ | _],
|
||||
<<"actions">> := []
|
||||
}},
|
||||
request_json(
|
||||
post,
|
||||
uri(["connectors"]),
|
||||
?KAFKA_CONNECTOR(Name),
|
||||
Config
|
||||
)
|
||||
),
|
||||
ConnectorID = emqx_connector_resource:connector_id(?CONNECTOR_TYPE, Name),
|
||||
BridgeName = ?BRIDGE_NAME,
|
||||
?assertMatch(
|
||||
{ok, 201, #{
|
||||
<<"type">> := ?BRIDGE_TYPE,
|
||||
<<"name">> := BridgeName,
|
||||
<<"enable">> := true,
|
||||
<<"status">> := <<"connected">>,
|
||||
<<"node_status">> := [_ | _],
|
||||
<<"connector">> := Name,
|
||||
<<"kafka">> := #{},
|
||||
<<"local_topic">> := _,
|
||||
<<"resource_opts">> := _
|
||||
}},
|
||||
request_json(
|
||||
post,
|
||||
uri(["actions"]),
|
||||
?KAFKA_BRIDGE(?BRIDGE_NAME),
|
||||
Config
|
||||
)
|
||||
),
|
||||
?assertMatch(
|
||||
{ok, 200, #{
|
||||
<<"type">> := ?CONNECTOR_TYPE,
|
||||
<<"name">> := Name,
|
||||
<<"enable">> := true,
|
||||
<<"status">> := <<"connected">>,
|
||||
<<"node_status">> := [_ | _],
|
||||
<<"actions">> := [BridgeName]
|
||||
}},
|
||||
request_json(
|
||||
get,
|
||||
uri(["connectors", ConnectorID]),
|
||||
Config
|
||||
)
|
||||
),
|
||||
ok.
|
||||
|
||||
t_fail_delete_with_action(Config) ->
|
||||
Name = ?CONNECTOR_NAME,
|
||||
?assertMatch(
|
||||
|
|
|
@ -185,7 +185,8 @@ fields(meta) ->
|
|||
schema_with_example(Type, Example) ->
|
||||
hoconsc:mk(Type, #{examples => #{<<"example">> => Example}}).
|
||||
|
||||
-spec schema_with_examples(hocon_schema:type(), map()) -> hocon_schema:field_schema_map().
|
||||
-spec schema_with_examples(hocon_schema:type(), map() | list(tuple())) ->
|
||||
hocon_schema:field_schema_map().
|
||||
schema_with_examples(Type, Examples) ->
|
||||
hoconsc:mk(Type, #{examples => #{<<"examples">> => Examples}}).
|
||||
|
||||
|
|
|
@ -127,9 +127,15 @@ fields(ssl_listener) ->
|
|||
)}
|
||||
];
|
||||
fields(ws_listener) ->
|
||||
ws_listener() ++ ws_opts(<<>>, <<>>);
|
||||
emqx_gateway_schema:ws_listener() ++
|
||||
[{websocket, sc(ref(websocket), #{})}];
|
||||
fields(wss_listener) ->
|
||||
wss_listener() ++ ws_opts(<<>>, <<>>);
|
||||
emqx_gateway_schema:wss_listener() ++
|
||||
[{websocket, sc(ref(websocket), #{})}];
|
||||
fields(websocket) ->
|
||||
DefaultPath = <<>>,
|
||||
SubProtocols = <<>>,
|
||||
emqx_gateway_schema:ws_opts(DefaultPath, SubProtocols);
|
||||
fields(udp_listener) ->
|
||||
[
|
||||
%% some special configs for udp listener
|
||||
|
@ -193,6 +199,8 @@ desc(udp_opts) ->
|
|||
"Settings for UDP sockets.";
|
||||
desc(dtls_opts) ->
|
||||
"Settings for DTLS protocol.";
|
||||
desc(websocket) ->
|
||||
"Websocket options";
|
||||
desc(_) ->
|
||||
undefined.
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ max_mqueue_len() ->
|
|||
| dnstream_only
|
||||
| disable.
|
||||
message_format_checking() ->
|
||||
conf(message_format_checking, all).
|
||||
conf(message_format_checking, disable).
|
||||
|
||||
uptopic(Action) ->
|
||||
Topic = upstream(topic),
|
||||
|
|
|
@ -32,7 +32,7 @@ load() ->
|
|||
disable ->
|
||||
ok;
|
||||
_ ->
|
||||
case feedvar(emqx_config:get([gateway, ocpp, json_schema_dir])) of
|
||||
case feedvar(emqx_config:get([gateway, ocpp, json_schema_dir], undefined)) of
|
||||
undefined ->
|
||||
ok;
|
||||
Dir ->
|
||||
|
|
|
@ -69,9 +69,9 @@ stop_apps() ->
|
|||
?SLOG(notice, #{msg => "stopping_emqx_apps"}),
|
||||
_ = emqx_alarm_handler:unload(),
|
||||
ok = emqx_conf_app:unset_config_loaded(),
|
||||
lists:foreach(fun stop_one_app/1, lists:reverse(sorted_reboot_apps())),
|
||||
%% Mute otel deps application.
|
||||
_ = emqx_otel:stop_otel(),
|
||||
lists:foreach(fun stop_one_app/1, lists:reverse(sorted_reboot_apps())).
|
||||
ok = emqx_otel_app:stop_deps().
|
||||
|
||||
%% Those port apps are terminated after the main apps
|
||||
%% Don't need to stop when reboot.
|
||||
|
|
|
@ -35,6 +35,13 @@
|
|||
b2i/1
|
||||
]).
|
||||
|
||||
-export([
|
||||
parse_pager_params/1,
|
||||
parse_qstring/2,
|
||||
init_query_result/0,
|
||||
accumulate_query_rows/4
|
||||
]).
|
||||
|
||||
-ifdef(TEST).
|
||||
-export([paginate_test_format/1]).
|
||||
-endif.
|
||||
|
@ -444,6 +451,8 @@ accumulate_query_rows(
|
|||
count => Count + length(SubRows),
|
||||
rows => [{Node, SubRows} | RowsAcc]
|
||||
}};
|
||||
NCursor when NCursor >= PageEnd + Limit ->
|
||||
{enough, ResultAcc#{cursor => NCursor}};
|
||||
NCursor when NCursor >= PageEnd ->
|
||||
SubRows = lists:sublist(Rows, Limit - Count),
|
||||
{enough, ResultAcc#{
|
||||
|
|
|
@ -0,0 +1,361 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_mgmt_api_data_backup).
|
||||
|
||||
-behaviour(minirest_api).
|
||||
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
-include_lib("hocon/include/hoconsc.hrl").
|
||||
|
||||
-export([api_spec/0, paths/0, schema/1, fields/1]).
|
||||
|
||||
-export([
|
||||
data_export/2,
|
||||
data_import/2,
|
||||
data_files/2,
|
||||
data_file_by_name/2
|
||||
]).
|
||||
|
||||
-define(TAGS, [<<"Data Backup">>]).
|
||||
|
||||
-define(BAD_REQUEST, 'BAD_REQUEST').
|
||||
-define(NOT_FOUND, 'NOT_FOUND').
|
||||
|
||||
-define(node_field(IsRequired), ?node_field(IsRequired, #{})).
|
||||
-define(node_field(IsRequired, Meta),
|
||||
{node, ?HOCON(binary(), Meta#{desc => "Node name", required => IsRequired})}
|
||||
).
|
||||
-define(filename_field(IsRequired), ?filename_field(IsRequired, #{})).
|
||||
-define(filename_field(IsRequired, Meta),
|
||||
{filename,
|
||||
?HOCON(binary(), Meta#{
|
||||
desc => "Data backup file name",
|
||||
required => IsRequired
|
||||
})}
|
||||
).
|
||||
|
||||
api_spec() ->
|
||||
emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}).
|
||||
|
||||
paths() ->
|
||||
[
|
||||
"/data/export",
|
||||
"/data/import",
|
||||
"/data/files",
|
||||
"/data/files/:filename"
|
||||
].
|
||||
|
||||
schema("/data/export") ->
|
||||
#{
|
||||
'operationId' => data_export,
|
||||
post => #{
|
||||
tags => ?TAGS,
|
||||
desc => <<"Export a data backup file">>,
|
||||
responses => #{
|
||||
200 =>
|
||||
emqx_dashboard_swagger:schema_with_example(
|
||||
?R_REF(backup_file_info),
|
||||
backup_file_info_example()
|
||||
)
|
||||
}
|
||||
}
|
||||
};
|
||||
schema("/data/import") ->
|
||||
#{
|
||||
'operationId' => data_import,
|
||||
post => #{
|
||||
tags => ?TAGS,
|
||||
desc => <<"Import a data backup file">>,
|
||||
'requestBody' => emqx_dashboard_swagger:schema_with_example(
|
||||
?R_REF(import_request_body),
|
||||
maps:with([node, filename], backup_file_info_example())
|
||||
),
|
||||
|
||||
responses => #{
|
||||
204 => <<"No Content">>,
|
||||
400 => emqx_dashboard_swagger:error_codes(
|
||||
[?BAD_REQUEST], <<"Backup file import failed">>
|
||||
)
|
||||
}
|
||||
}
|
||||
};
|
||||
schema("/data/files") ->
|
||||
#{
|
||||
'operationId' => data_files,
|
||||
post => #{
|
||||
tags => ?TAGS,
|
||||
desc => <<"Upload a data backup file">>,
|
||||
'requestBody' => emqx_dashboard_swagger:file_schema(filename),
|
||||
responses => #{
|
||||
204 => <<"No Content">>,
|
||||
400 => emqx_dashboard_swagger:error_codes(
|
||||
[?BAD_REQUEST], <<"Bad backup file">>
|
||||
)
|
||||
}
|
||||
},
|
||||
get => #{
|
||||
tags => ?TAGS,
|
||||
desc => <<"List backup files">>,
|
||||
parameters => [
|
||||
?R_REF(emqx_dashboard_swagger, page),
|
||||
?R_REF(emqx_dashboard_swagger, limit)
|
||||
],
|
||||
responses => #{
|
||||
200 =>
|
||||
emqx_dashboard_swagger:schema_with_example(
|
||||
?R_REF(files_response),
|
||||
files_response_example()
|
||||
)
|
||||
}
|
||||
}
|
||||
};
|
||||
schema("/data/files/:filename") ->
|
||||
#{
|
||||
'operationId' => data_file_by_name,
|
||||
get => #{
|
||||
tags => ?TAGS,
|
||||
desc => <<"Download a data backup file">>,
|
||||
parameters => [
|
||||
?filename_field(true, #{in => path}),
|
||||
?node_field(false, #{in => query})
|
||||
],
|
||||
responses => #{
|
||||
200 => ?HOCON(binary),
|
||||
400 => emqx_dashboard_swagger:error_codes(
|
||||
[?BAD_REQUEST], <<"Bad request">>
|
||||
),
|
||||
404 => emqx_dashboard_swagger:error_codes(
|
||||
[?NOT_FOUND], <<"Backup file not found">>
|
||||
)
|
||||
}
|
||||
},
|
||||
delete => #{
|
||||
tags => ?TAGS,
|
||||
desc => <<"Delete a data backup file">>,
|
||||
parameters => [
|
||||
?filename_field(true, #{in => path}),
|
||||
?node_field(false, #{in => query})
|
||||
],
|
||||
responses => #{
|
||||
204 => <<"No Content">>,
|
||||
400 => emqx_dashboard_swagger:error_codes(
|
||||
[?BAD_REQUEST], <<"Bad request">>
|
||||
),
|
||||
404 => emqx_dashboard_swagger:error_codes(
|
||||
[?NOT_FOUND], <<"Backup file not found">>
|
||||
)
|
||||
}
|
||||
}
|
||||
}.
|
||||
|
||||
fields(files_response) ->
|
||||
[
|
||||
{data, ?ARRAY(?R_REF(backup_file_info))},
|
||||
{meta, ?R_REF(emqx_dashboard_swagger, meta)}
|
||||
];
|
||||
fields(backup_file_info) ->
|
||||
[
|
||||
?node_field(true),
|
||||
?filename_field(true),
|
||||
{created_at,
|
||||
?HOCON(binary(), #{
|
||||
desc => "Data backup file creation date and time",
|
||||
required => true
|
||||
})}
|
||||
];
|
||||
fields(import_request_body) ->
|
||||
[?node_field(false), ?filename_field(true)];
|
||||
fields(data_backup_file) ->
|
||||
[
|
||||
?filename_field(true),
|
||||
{file,
|
||||
?HOCON(binary(), #{
|
||||
desc => "Data backup file content",
|
||||
required => true
|
||||
})}
|
||||
].
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% HTTP API Callbacks
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
data_export(post, _Request) ->
|
||||
case emqx_mgmt_data_backup:export() of
|
||||
{ok, #{filename := FileName} = File} ->
|
||||
{200, File#{filename => filename:basename(FileName)}};
|
||||
Error ->
|
||||
Error
|
||||
end.
|
||||
|
||||
data_import(post, #{body := #{<<"filename">> := FileName} = Body}) ->
|
||||
case safe_parse_node(Body) of
|
||||
{error, Msg} ->
|
||||
{400, #{code => 'BAD_REQUEST', message => Msg}};
|
||||
FileNode ->
|
||||
CoreNode = core_node(FileNode),
|
||||
response(
|
||||
emqx_mgmt_data_backup_proto_v1:import_file(CoreNode, FileNode, FileName, infinity)
|
||||
)
|
||||
end.
|
||||
|
||||
core_node(FileNode) ->
|
||||
case mria_rlog:role(FileNode) of
|
||||
core ->
|
||||
FileNode;
|
||||
replicant ->
|
||||
case mria_rlog:role() of
|
||||
core ->
|
||||
node();
|
||||
replicant ->
|
||||
mria_membership:coordinator()
|
||||
end
|
||||
end.
|
||||
|
||||
data_files(post, #{body := #{<<"filename">> := #{type := _} = File}}) ->
|
||||
[{FileName, FileContent} | _] = maps:to_list(maps:without([type], File)),
|
||||
case emqx_mgmt_data_backup:upload(FileName, FileContent) of
|
||||
ok ->
|
||||
{204};
|
||||
{error, Reason} ->
|
||||
{400, #{code => 'BAD_REQUEST', message => emqx_mgmt_data_backup:format_error(Reason)}}
|
||||
end;
|
||||
data_files(get, #{query_string := PageParams}) ->
|
||||
case emqx_mgmt_api:parse_pager_params(PageParams) of
|
||||
false ->
|
||||
{400, #{code => ?BAD_REQUEST, message => <<"page_limit_invalid">>}};
|
||||
#{page := Page, limit := Limit} = Pager ->
|
||||
{200, #{data => list_backup_files(Page, Limit), meta => Pager}}
|
||||
end.
|
||||
|
||||
data_file_by_name(Method, #{bindings := #{filename := Filename}, query_string := QS}) ->
|
||||
case safe_parse_node(QS) of
|
||||
{error, Msg} ->
|
||||
{400, #{code => 'BAD_REQUEST', message => Msg}};
|
||||
Node ->
|
||||
case get_or_delete_file(Method, Filename, Node) of
|
||||
{error, not_found} ->
|
||||
{404, #{
|
||||
code => ?NOT_FOUND, message => emqx_mgmt_data_backup:format_error(not_found)
|
||||
}};
|
||||
Other ->
|
||||
response(Other)
|
||||
end
|
||||
end.
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% Internal functions
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
get_or_delete_file(get, Filename, Node) ->
|
||||
emqx_mgmt_data_backup_proto_v1:read_file(Node, Filename, infinity);
|
||||
get_or_delete_file(delete, Filename, Node) ->
|
||||
emqx_mgmt_data_backup_proto_v1:delete_file(Node, Filename, infinity).
|
||||
|
||||
safe_parse_node(#{<<"node">> := NodeBin}) ->
|
||||
NodesBin = [erlang:atom_to_binary(N, utf8) || N <- emqx:running_nodes()],
|
||||
case lists:member(NodeBin, NodesBin) of
|
||||
true -> erlang:binary_to_atom(NodeBin, utf8);
|
||||
false -> {error, io_lib:format("Unknown node: ~s", [NodeBin])}
|
||||
end;
|
||||
safe_parse_node(_) ->
|
||||
node().
|
||||
|
||||
response({ok, #{db_errors := DbErrs, config_errors := ConfErrs}}) ->
|
||||
case DbErrs =:= #{} andalso ConfErrs =:= #{} of
|
||||
true ->
|
||||
{204};
|
||||
false ->
|
||||
DbErrs1 = emqx_mgmt_data_backup:format_db_errors(DbErrs),
|
||||
ConfErrs1 = emqx_mgmt_data_backup:format_conf_errors(ConfErrs),
|
||||
Msg = unicode:characters_to_binary(io_lib:format("~s", [DbErrs1 ++ ConfErrs1])),
|
||||
{400, #{code => ?BAD_REQUEST, message => Msg}}
|
||||
end;
|
||||
response({ok, Res}) ->
|
||||
{200, Res};
|
||||
response(ok) ->
|
||||
{204};
|
||||
response({error, Reason}) ->
|
||||
{400, #{code => ?BAD_REQUEST, message => emqx_mgmt_data_backup:format_error(Reason)}}.
|
||||
|
||||
list_backup_files(Page, Limit) ->
|
||||
Start = Page * Limit - Limit + 1,
|
||||
lists:sublist(list_backup_files(), Start, Limit).
|
||||
|
||||
list_backup_files() ->
|
||||
Nodes = emqx:running_nodes(),
|
||||
Results = emqx_mgmt_data_backup_proto_v1:list_files(Nodes, 30_0000),
|
||||
NodeResults = lists:zip(Nodes, Results),
|
||||
{Successes, Failures} =
|
||||
lists:partition(
|
||||
fun({_Node, Result}) ->
|
||||
case Result of
|
||||
{ok, _} -> true;
|
||||
_ -> false
|
||||
end
|
||||
end,
|
||||
NodeResults
|
||||
),
|
||||
case Failures of
|
||||
[] ->
|
||||
ok;
|
||||
[_ | _] ->
|
||||
?SLOG(error, #{msg => "list_exported_backup_files_failed", node_errors => Failures})
|
||||
end,
|
||||
FileList = [FileInfo || {_Node, {ok, FileInfos}} <- Successes, FileInfo <- FileInfos],
|
||||
lists:sort(
|
||||
fun(#{created_at_sec := T1, filename := F1}, #{created_at_sec := T2, filename := F2}) ->
|
||||
case T1 =:= T2 of
|
||||
true -> F1 >= F2;
|
||||
false -> T1 > T2
|
||||
end
|
||||
end,
|
||||
FileList
|
||||
).
|
||||
|
||||
backup_file_info_example() ->
|
||||
#{
|
||||
created_at => <<"2023-11-23T19:13:19+02:00">>,
|
||||
created_at_sec => 1700759599,
|
||||
filename => <<"emqx-export-2023-11-23-19-13-19.043.tar.gz">>,
|
||||
node => 'emqx@127.0.0.1',
|
||||
size => 22740
|
||||
}.
|
||||
|
||||
files_response_example() ->
|
||||
#{
|
||||
data => [
|
||||
#{
|
||||
created_at => <<"2023-09-02T11:11:33+02:00">>,
|
||||
created_at_sec => 1693645893,
|
||||
filename => <<"emqx-export-2023-09-02-11-11-33.012.tar.gz">>,
|
||||
node => 'emqx@127.0.0.1',
|
||||
size => 22740
|
||||
},
|
||||
#{
|
||||
created_at => <<"2023-11-23T19:13:19+02:00">>,
|
||||
created_at_sec => 1700759599,
|
||||
filename => <<"emqx-export-2023-11-23-19-13-19.043.tar.gz">>,
|
||||
node => 'emqx@127.0.0.1',
|
||||
size => 22740
|
||||
}
|
||||
],
|
||||
meta => #{
|
||||
page => 0,
|
||||
limit => 20,
|
||||
count => 300
|
||||
}
|
||||
}.
|
|
@ -18,7 +18,6 @@
|
|||
|
||||
-include_lib("emqx/include/emqx.hrl").
|
||||
-include_lib("emqx/include/emqx_mqtt.hrl").
|
||||
-include_lib("emqx/include/emqx_router.hrl").
|
||||
-include_lib("typerefl/include/types.hrl").
|
||||
-include_lib("hocon/include/hoconsc.hrl").
|
||||
|
||||
|
@ -37,8 +36,6 @@
|
|||
topic/2
|
||||
]).
|
||||
|
||||
-export([qs2ms/2, format/1]).
|
||||
|
||||
-define(TOPIC_NOT_FOUND, 'TOPIC_NOT_FOUND').
|
||||
|
||||
-define(TOPICS_QUERY_SCHEMA, [{<<"topic">>, binary}, {<<"node">>, atom}]).
|
||||
|
@ -110,23 +107,15 @@ topic(get, #{bindings := Bindings}) ->
|
|||
%%%==============================================================================================
|
||||
%% api apply
|
||||
do_list(Params) ->
|
||||
case
|
||||
emqx_mgmt_api:node_query(
|
||||
node(),
|
||||
?ROUTE_TAB,
|
||||
Params,
|
||||
?TOPICS_QUERY_SCHEMA,
|
||||
fun ?MODULE:qs2ms/2,
|
||||
fun ?MODULE:format/1
|
||||
)
|
||||
of
|
||||
{error, page_limit_invalid} ->
|
||||
{400, #{code => <<"INVALID_PARAMETER">>, message => <<"page_limit_invalid">>}};
|
||||
{error, Node, Error} ->
|
||||
Message = list_to_binary(io_lib:format("bad rpc call ~p, Reason ~p", [Node, Error])),
|
||||
{500, #{code => <<"NODE_DOWN">>, message => Message}};
|
||||
Response ->
|
||||
{200, Response}
|
||||
try
|
||||
Pager = parse_pager_params(Params),
|
||||
{_, Query} = emqx_mgmt_api:parse_qstring(Params, ?TOPICS_QUERY_SCHEMA),
|
||||
QState = Pager#{continuation => undefined},
|
||||
QResult = eval_topic_query(qs2ms(Query), QState),
|
||||
{200, format_list_response(Pager, QResult)}
|
||||
catch
|
||||
throw:{error, page_limit_invalid} ->
|
||||
{400, #{code => <<"INVALID_PARAMETER">>, message => <<"page_limit_invalid">>}}
|
||||
end.
|
||||
|
||||
lookup(#{topic := Topic}) ->
|
||||
|
@ -140,26 +129,63 @@ lookup(#{topic := Topic}) ->
|
|||
|
||||
%%%==============================================================================================
|
||||
%% internal
|
||||
-spec qs2ms(atom(), {list(), list()}) -> emqx_mgmt_api:match_spec_and_filter().
|
||||
qs2ms(_Tab, {Qs, _}) ->
|
||||
#{
|
||||
match_spec => gen_match_spec(Qs, [{{route, '_', '_'}, [], ['$_']}]),
|
||||
fuzzy_fun => undefined
|
||||
}.
|
||||
|
||||
gen_match_spec([], Res) ->
|
||||
Res;
|
||||
gen_match_spec([{topic, '=:=', T0} | Qs], [{{route, _, Node}, [], ['$_']}]) when is_atom(Node) ->
|
||||
{T, D} =
|
||||
case emqx_topic:parse(T0) of
|
||||
{#share{group = Group, topic = Topic}, _SubOpts} ->
|
||||
{Topic, {Group, Node}};
|
||||
{T1, _SubOpts} ->
|
||||
{T1, Node}
|
||||
end,
|
||||
gen_match_spec(Qs, [{{route, T, D}, [], ['$_']}]);
|
||||
gen_match_spec([{node, '=:=', N} | Qs], [{{route, T, _}, [], ['$_']}]) ->
|
||||
gen_match_spec(Qs, [{{route, T, N}, [], ['$_']}]).
|
||||
parse_pager_params(Params) ->
|
||||
try emqx_mgmt_api:parse_pager_params(Params) of
|
||||
Pager = #{} ->
|
||||
Pager;
|
||||
false ->
|
||||
throw({error, page_limit_invalid})
|
||||
catch
|
||||
error:badarg ->
|
||||
throw({error, page_limit_invalid})
|
||||
end.
|
||||
|
||||
-spec qs2ms({list(), list()}) -> tuple().
|
||||
qs2ms({Qs, _}) ->
|
||||
lists:foldl(fun gen_match_spec/2, {'_', '_'}, Qs).
|
||||
|
||||
gen_match_spec({topic, '=:=', QTopic}, {_MTopic, MNode}) when is_atom(MNode) ->
|
||||
case emqx_topic:parse(QTopic) of
|
||||
{#share{group = Group, topic = Topic}, _SubOpts} ->
|
||||
{Topic, {Group, MNode}};
|
||||
{Topic, _SubOpts} ->
|
||||
{Topic, MNode}
|
||||
end;
|
||||
gen_match_spec({node, '=:=', QNode}, {MTopic, _MDest}) ->
|
||||
{MTopic, QNode}.
|
||||
|
||||
eval_topic_query(MS, QState) ->
|
||||
finalize_query(eval_topic_query(MS, QState, emqx_mgmt_api:init_query_result())).
|
||||
|
||||
eval_topic_query(MS, QState, QResult) ->
|
||||
QPage = eval_topic_query_page(MS, QState),
|
||||
case QPage of
|
||||
{Rows, '$end_of_table'} ->
|
||||
{_, NQResult} = emqx_mgmt_api:accumulate_query_rows(node(), Rows, QState, QResult),
|
||||
NQResult#{complete => true};
|
||||
{Rows, NCont} ->
|
||||
{_, NQResult} = emqx_mgmt_api:accumulate_query_rows(node(), Rows, QState, QResult),
|
||||
eval_topic_query(MS, QState#{continuation := NCont}, NQResult);
|
||||
'$end_of_table' ->
|
||||
QResult#{complete => true}
|
||||
end.
|
||||
|
||||
eval_topic_query_page(MS, #{limit := Limit, continuation := Cont}) ->
|
||||
emqx_router:select(MS, Limit, Cont).
|
||||
|
||||
finalize_query(QResult = #{overflow := Overflow, complete := Complete}) ->
|
||||
HasNext = Overflow orelse not Complete,
|
||||
QResult#{hasnext => HasNext}.
|
||||
|
||||
format_list_response(Meta, _QResult = #{hasnext := HasNext, rows := RowsAcc, cursor := Cursor}) ->
|
||||
#{
|
||||
meta => Meta#{hasnext => HasNext, count => Cursor},
|
||||
data => lists:flatmap(
|
||||
fun({_Node, Rows}) -> [format(R) || R <- Rows] end,
|
||||
RowsAcc
|
||||
)
|
||||
}.
|
||||
|
||||
format(#route{topic = Topic, dest = {Group, Node}}) ->
|
||||
#{topic => ?SHARE(Group, Topic), node => Node};
|
||||
|
|
|
@ -775,7 +775,7 @@ data(["import", Filename]) ->
|
|||
emqx_ctl:print("Data has been imported successfully.~n");
|
||||
{ok, _} ->
|
||||
emqx_ctl:print(
|
||||
"Data has been imported, but some errors occurred, see the the log above.~n"
|
||||
"Data has been imported, but some errors occurred, see the log above.~n"
|
||||
);
|
||||
{error, Reason} ->
|
||||
Reason1 = emqx_mgmt_data_backup:format_error(Reason),
|
||||
|
|
|
@ -24,8 +24,21 @@
|
|||
format_error/1
|
||||
]).
|
||||
|
||||
%% HTTP API
|
||||
-export([
|
||||
upload/2,
|
||||
maybe_copy_and_import/2,
|
||||
read_file/1,
|
||||
delete_file/1,
|
||||
list_files/0,
|
||||
format_conf_errors/1,
|
||||
format_db_errors/1
|
||||
]).
|
||||
|
||||
-export([default_validate_mnesia_backup/1]).
|
||||
|
||||
-export_type([import_res/0]).
|
||||
|
||||
-ifdef(TEST).
|
||||
-compile(export_all).
|
||||
-compile(nowarn_export_all).
|
||||
|
@ -80,17 +93,21 @@
|
|||
end
|
||||
end()
|
||||
).
|
||||
-define(backup_path(_FileName_), filename:join(root_backup_dir(), _FileName_)).
|
||||
|
||||
-type backup_file_info() :: #{
|
||||
filename => binary(),
|
||||
size => non_neg_integer(),
|
||||
created_at => binary(),
|
||||
node => node(),
|
||||
filename := binary(),
|
||||
size := non_neg_integer(),
|
||||
created_at := binary(),
|
||||
created_at_sec := integer(),
|
||||
node := node(),
|
||||
atom() => _
|
||||
}.
|
||||
|
||||
-type db_error_details() :: #{mria:table() => {error, _}}.
|
||||
-type config_error_details() :: #{emqx_utils_maps:config_path() => {error, _}}.
|
||||
-type import_res() ::
|
||||
{ok, #{db_errors => db_error_details(), config_errors => config_error_details()}} | {error, _}.
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% APIs
|
||||
|
@ -120,15 +137,11 @@ export(Opts) ->
|
|||
file:del_dir_r(BackupName)
|
||||
end.
|
||||
|
||||
-spec import(file:filename_all()) ->
|
||||
{ok, #{db_errors => db_error_details(), config_errors => config_error_details()}}
|
||||
| {error, _}.
|
||||
-spec import(file:filename_all()) -> import_res().
|
||||
import(BackupFileName) ->
|
||||
import(BackupFileName, ?DEFAULT_OPTS).
|
||||
|
||||
-spec import(file:filename_all(), map()) ->
|
||||
{ok, #{db_errors => db_error_details(), config_errors => config_error_details()}}
|
||||
| {error, _}.
|
||||
-spec import(file:filename_all(), map()) -> import_res().
|
||||
import(BackupFileName, Opts) ->
|
||||
case is_import_allowed() of
|
||||
true ->
|
||||
|
@ -142,6 +155,74 @@ import(BackupFileName, Opts) ->
|
|||
{error, not_core_node}
|
||||
end.
|
||||
|
||||
-spec maybe_copy_and_import(node(), file:filename_all()) -> import_res().
|
||||
maybe_copy_and_import(FileNode, BackupFileName) when FileNode =:= node() ->
|
||||
import(BackupFileName, #{});
|
||||
maybe_copy_and_import(FileNode, BackupFileName) ->
|
||||
%% The file can be already present locally
|
||||
case filelib:is_file(?backup_path(str(BackupFileName))) of
|
||||
true ->
|
||||
import(BackupFileName, #{});
|
||||
false ->
|
||||
copy_and_import(FileNode, BackupFileName)
|
||||
end.
|
||||
|
||||
-spec read_file(file:filename_all()) ->
|
||||
{ok, #{filename => file:filename_all(), file => binary()}} | {error, _}.
|
||||
read_file(BackupFileName) ->
|
||||
BackupFileNameStr = str(BackupFileName),
|
||||
case validate_backup_name(BackupFileNameStr) of
|
||||
ok ->
|
||||
maybe_not_found(file:read_file(?backup_path(BackupFileName)));
|
||||
Err ->
|
||||
Err
|
||||
end.
|
||||
|
||||
-spec delete_file(file:filename_all()) -> ok | {error, _}.
|
||||
delete_file(BackupFileName) ->
|
||||
BackupFileNameStr = str(BackupFileName),
|
||||
case validate_backup_name(BackupFileNameStr) of
|
||||
ok ->
|
||||
maybe_not_found(file:delete(?backup_path(BackupFileName)));
|
||||
Err ->
|
||||
Err
|
||||
end.
|
||||
|
||||
-spec upload(file:filename_all(), binary()) -> ok | {error, _}.
|
||||
upload(BackupFileName, BackupFileContent) ->
|
||||
BackupFileNameStr = str(BackupFileName),
|
||||
FilePath = ?backup_path(BackupFileNameStr),
|
||||
case filelib:is_file(FilePath) of
|
||||
true ->
|
||||
{error, {already_exists, BackupFileNameStr}};
|
||||
false ->
|
||||
do_upload(BackupFileNameStr, BackupFileContent)
|
||||
end.
|
||||
|
||||
-spec list_files() -> [backup_file_info()].
|
||||
list_files() ->
|
||||
Filter =
|
||||
fun(File) ->
|
||||
case file:read_file_info(File, [{time, posix}]) of
|
||||
{ok, #file_info{size = Size, ctime = CTimeSec}} ->
|
||||
BaseFilename = bin(filename:basename(File)),
|
||||
Info = #{
|
||||
filename => BaseFilename,
|
||||
size => Size,
|
||||
created_at => emqx_utils_calendar:epoch_to_rfc3339(CTimeSec, second),
|
||||
created_at_sec => CTimeSec,
|
||||
node => node()
|
||||
},
|
||||
{true, Info};
|
||||
_ ->
|
||||
false
|
||||
end
|
||||
end,
|
||||
lists:filtermap(Filter, backup_files()).
|
||||
|
||||
backup_files() ->
|
||||
filelib:wildcard(?backup_path("*" ++ ?TAR_SUFFIX)).
|
||||
|
||||
format_error(not_core_node) ->
|
||||
str(
|
||||
io_lib:format(
|
||||
|
@ -170,13 +251,83 @@ format_error({unsupported_version, ImportVersion}) ->
|
|||
[str(ImportVersion), str(emqx_release:version())]
|
||||
)
|
||||
);
|
||||
format_error({already_exists, BackupFileName}) ->
|
||||
str(io_lib:format("Backup file \"~s\" already exists", [BackupFileName]));
|
||||
format_error(Reason) ->
|
||||
Reason.
|
||||
|
||||
format_conf_errors(Errors) ->
|
||||
Opts = #{print_fun => fun io_lib:format/2},
|
||||
maps:values(maps:map(conf_error_formatter(Opts), Errors)).
|
||||
|
||||
format_db_errors(Errors) ->
|
||||
Opts = #{print_fun => fun io_lib:format/2},
|
||||
maps:values(
|
||||
maps:map(
|
||||
fun(Tab, Err) -> maybe_print_mnesia_import_err(Tab, Err, Opts) end,
|
||||
Errors
|
||||
)
|
||||
).
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% Internal functions
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
copy_and_import(FileNode, BackupFileName) ->
|
||||
case emqx_mgmt_data_backup_proto_v1:read_file(FileNode, BackupFileName, infinity) of
|
||||
{ok, BackupFileContent} ->
|
||||
case upload(BackupFileName, BackupFileContent) of
|
||||
ok ->
|
||||
import(BackupFileName, #{});
|
||||
Err ->
|
||||
Err
|
||||
end;
|
||||
Err ->
|
||||
Err
|
||||
end.
|
||||
|
||||
%% compatibility with import API that uses lookup_file/1 and returns `not_found` reason
|
||||
maybe_not_found({error, enoent}) ->
|
||||
{error, not_found};
|
||||
maybe_not_found(Other) ->
|
||||
Other.
|
||||
|
||||
do_upload(BackupFileNameStr, BackupFileContent) ->
|
||||
FilePath = ?backup_path(BackupFileNameStr),
|
||||
BackupDir = ?backup_path(filename:basename(BackupFileNameStr, ?TAR_SUFFIX)),
|
||||
try
|
||||
ok = validate_backup_name(BackupFileNameStr),
|
||||
ok = file:write_file(FilePath, BackupFileContent),
|
||||
ok = extract_backup(FilePath),
|
||||
{ok, _} = validate_backup(BackupDir),
|
||||
HoconFileName = filename:join(BackupDir, ?CLUSTER_HOCON_FILENAME),
|
||||
case filelib:is_regular(HoconFileName) of
|
||||
true ->
|
||||
{ok, RawConf} = hocon:files([HoconFileName]),
|
||||
RawConf1 = upgrade_raw_conf(emqx_conf:schema_module(), RawConf),
|
||||
{ok, _} = validate_cluster_hocon(RawConf1),
|
||||
ok;
|
||||
false ->
|
||||
%% cluster.hocon can be missing in the backup
|
||||
ok
|
||||
end,
|
||||
?SLOG(info, #{msg => "emqx_data_upload_success"})
|
||||
catch
|
||||
error:{badmatch, {error, Reason}}:Stack ->
|
||||
?SLOG(error, #{msg => "emqx_data_upload_failed", reason => Reason, stacktrace => Stack}),
|
||||
{error, Reason};
|
||||
Class:Reason:Stack ->
|
||||
?SLOG(error, #{
|
||||
msg => "emqx_data_upload_failed",
|
||||
exception => Class,
|
||||
reason => Reason,
|
||||
stacktrace => Stack
|
||||
}),
|
||||
{error, Reason}
|
||||
after
|
||||
file:del_dir_r(BackupDir)
|
||||
end.
|
||||
|
||||
prepare_new_backup(Opts) ->
|
||||
Ts = erlang:system_time(millisecond),
|
||||
{{Y, M, D}, {HH, MM, SS}} = local_datetime(Ts),
|
||||
|
@ -186,7 +337,7 @@ prepare_new_backup(Opts) ->
|
|||
[Y, M, D, HH, MM, SS, Ts rem 1000]
|
||||
)
|
||||
),
|
||||
BackupName = filename:join(root_backup_dir(), BackupBaseName),
|
||||
BackupName = ?backup_path(BackupBaseName),
|
||||
BackupTarName = ?tar(BackupName),
|
||||
maybe_print("Exporting data to ~p...~n", [BackupTarName], Opts),
|
||||
{ok, TarDescriptor} = ?fmt_tar_err(erl_tar:open(BackupTarName, [write, compressed])),
|
||||
|
@ -208,13 +359,13 @@ do_export(BackupName, TarDescriptor, Opts) ->
|
|||
ok = ?fmt_tar_err(erl_tar:close(TarDescriptor)),
|
||||
{ok, #file_info{
|
||||
size = Size,
|
||||
ctime = {{Y1, M1, D1}, {H1, MM1, S1}}
|
||||
}} = file:read_file_info(BackupTarName),
|
||||
CreatedAt = io_lib:format("~p-~p-~p ~p:~p:~p", [Y1, M1, D1, H1, MM1, S1]),
|
||||
ctime = CTime
|
||||
}} = file:read_file_info(BackupTarName, [{time, posix}]),
|
||||
{ok, #{
|
||||
filename => bin(BackupTarName),
|
||||
size => Size,
|
||||
created_at => bin(CreatedAt),
|
||||
created_at => emqx_utils_calendar:epoch_to_rfc3339(CTime, second),
|
||||
created_at_sec => CTime,
|
||||
node => node()
|
||||
}}.
|
||||
|
||||
|
@ -351,7 +502,7 @@ parse_version_no_patch(VersionBin) ->
|
|||
end.
|
||||
|
||||
do_import(BackupFileName, Opts) ->
|
||||
BackupDir = filename:join(root_backup_dir(), filename:basename(BackupFileName, ?TAR_SUFFIX)),
|
||||
BackupDir = ?backup_path(filename:basename(BackupFileName, ?TAR_SUFFIX)),
|
||||
maybe_print("Importing data from ~p...~n", [BackupFileName], Opts),
|
||||
try
|
||||
ok = validate_backup_name(BackupFileName),
|
||||
|
@ -619,7 +770,7 @@ validate_cluster_hocon(RawConf) ->
|
|||
|
||||
do_import_conf(RawConf, Opts) ->
|
||||
GenConfErrs = filter_errors(maps:from_list(import_generic_conf(RawConf))),
|
||||
maybe_print_errors(GenConfErrs, Opts),
|
||||
maybe_print_conf_errors(GenConfErrs, Opts),
|
||||
Errors =
|
||||
lists:foldl(
|
||||
fun(Module, ErrorsAcc) ->
|
||||
|
@ -634,7 +785,7 @@ do_import_conf(RawConf, Opts) ->
|
|||
GenConfErrs,
|
||||
sort_importer_modules(find_behaviours(emqx_config_backup))
|
||||
),
|
||||
maybe_print_errors(Errors, Opts),
|
||||
maybe_print_conf_errors(Errors, Opts),
|
||||
Errors.
|
||||
|
||||
sort_importer_modules(Modules) ->
|
||||
|
@ -677,17 +828,17 @@ maybe_print_changed(Changed, Opts) ->
|
|||
Changed
|
||||
).
|
||||
|
||||
maybe_print_errors(Errors, Opts) ->
|
||||
maps:foreach(
|
||||
fun(Path, Err) ->
|
||||
maybe_print(
|
||||
"Failed to import the following config path: ~p, reason: ~p~n",
|
||||
[pretty_path(Path), Err],
|
||||
Opts
|
||||
)
|
||||
end,
|
||||
Errors
|
||||
).
|
||||
maybe_print_conf_errors(Errors, Opts) ->
|
||||
maps:foreach(conf_error_formatter(Opts), Errors).
|
||||
|
||||
conf_error_formatter(Opts) ->
|
||||
fun(Path, Err) ->
|
||||
maybe_print(
|
||||
"Failed to import the following config path: ~p, reason: ~p~n",
|
||||
[pretty_path(Path), Err],
|
||||
Opts
|
||||
)
|
||||
end.
|
||||
|
||||
filter_errors(Results) ->
|
||||
maps:filter(
|
||||
|
@ -727,7 +878,7 @@ lookup_file(FileName) ->
|
|||
%% Only lookup by basename, don't allow to lookup by file path
|
||||
case FileName =:= filename:basename(FileName) of
|
||||
true ->
|
||||
FilePath = filename:join(root_backup_dir(), FileName),
|
||||
FilePath = ?backup_path(FileName),
|
||||
case filelib:is_file(FilePath) of
|
||||
true -> {ok, FilePath};
|
||||
false -> {error, not_found}
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_mgmt_data_backup_proto_v1).
|
||||
|
||||
-behaviour(emqx_bpapi).
|
||||
|
||||
-export([
|
||||
introduced_in/0,
|
||||
import_file/4,
|
||||
list_files/2,
|
||||
read_file/3,
|
||||
delete_file/3
|
||||
]).
|
||||
|
||||
-include_lib("emqx/include/bpapi.hrl").
|
||||
|
||||
introduced_in() ->
|
||||
"5.4.0".
|
||||
|
||||
-spec list_files([node()], timeout()) ->
|
||||
emqx_rpc:erpc_multicall({non_neg_integer(), map()}).
|
||||
list_files(Nodes, Timeout) ->
|
||||
erpc:multicall(Nodes, emqx_mgmt_data_backup, list_files, [], Timeout).
|
||||
|
||||
-spec import_file(node(), node(), binary(), timeout()) ->
|
||||
emqx_mgmt_data_backup:import_res() | {badrpc, _}.
|
||||
import_file(Node, FileNode, FileName, Timeout) ->
|
||||
rpc:call(Node, emqx_mgmt_data_backup, maybe_copy_and_import, [FileNode, FileName], Timeout).
|
||||
|
||||
-spec read_file(node(), binary(), timeout()) ->
|
||||
{ok, binary()} | {error, _} | {bardrpc, _}.
|
||||
read_file(Node, FileName, Timeout) ->
|
||||
rpc:call(Node, emqx_mgmt_data_backup, read_file, [FileName], Timeout).
|
||||
|
||||
-spec delete_file(node(), binary(), timeout()) -> ok | {error, _} | {bardrpc, _}.
|
||||
delete_file(Node, FileName, Timeout) ->
|
||||
rpc:call(Node, emqx_mgmt_data_backup, delete_file, [FileName], Timeout).
|
|
@ -0,0 +1,355 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_mgmt_api_data_backup_SUITE).
|
||||
|
||||
-compile(export_all).
|
||||
-compile(nowarn_export_all).
|
||||
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
-include_lib("common_test/include/ct.hrl").
|
||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||
|
||||
-define(NODE1_PORT, 18085).
|
||||
-define(NODE2_PORT, 18086).
|
||||
-define(NODE3_PORT, 18087).
|
||||
-define(api_base_url(_Port_), ("http://127.0.0.1:" ++ (integer_to_list(_Port_)))).
|
||||
|
||||
-define(UPLOAD_EE_BACKUP, "emqx-export-upload-ee.tar.gz").
|
||||
-define(UPLOAD_CE_BACKUP, "emqx-export-upload-ce.tar.gz").
|
||||
-define(BAD_UPLOAD_BACKUP, "emqx-export-bad-upload.tar.gz").
|
||||
-define(BAD_IMPORT_BACKUP, "emqx-export-bad-file.tar.gz").
|
||||
-define(backup_path(_Config_, _BackupName_),
|
||||
filename:join(?config(data_dir, _Config_), _BackupName_)
|
||||
).
|
||||
|
||||
all() ->
|
||||
emqx_common_test_helpers:all(?MODULE).
|
||||
|
||||
init_per_suite(Config) ->
|
||||
Config.
|
||||
|
||||
end_per_suite(_) ->
|
||||
ok.
|
||||
|
||||
init_per_testcase(TC, Config) when
|
||||
TC =:= t_upload_ee_backup;
|
||||
TC =:= t_import_ee_backup
|
||||
->
|
||||
case emqx_release:edition() of
|
||||
ee -> do_init_per_testcase(TC, Config);
|
||||
ce -> Config
|
||||
end;
|
||||
init_per_testcase(TC, Config) ->
|
||||
do_init_per_testcase(TC, Config).
|
||||
|
||||
end_per_testcase(_TC, Config) ->
|
||||
case ?config(cluster, Config) of
|
||||
undefined -> ok;
|
||||
Cluster -> emqx_cth_cluster:stop(Cluster)
|
||||
end.
|
||||
|
||||
t_export_backup(Config) ->
|
||||
Auth = ?config(auth, Config),
|
||||
export_test(?NODE1_PORT, Auth),
|
||||
export_test(?NODE2_PORT, Auth),
|
||||
export_test(?NODE3_PORT, Auth).
|
||||
|
||||
t_delete_backup(Config) ->
|
||||
test_file_op(delete, Config).
|
||||
|
||||
t_get_backup(Config) ->
|
||||
test_file_op(get, Config).
|
||||
|
||||
t_list_backups(Config) ->
|
||||
Auth = ?config(auth, Config),
|
||||
|
||||
[{ok, _} = export_backup(?NODE1_PORT, Auth) || _ <- lists:seq(1, 10)],
|
||||
[{ok, _} = export_backup(?NODE2_PORT, Auth) || _ <- lists:seq(1, 10)],
|
||||
|
||||
{ok, RespBody} = list_backups(?NODE1_PORT, Auth, <<"1">>, <<"100">>),
|
||||
#{<<"data">> := Data, <<"meta">> := _} = emqx_utils_json:decode(RespBody),
|
||||
?assertEqual(20, length(Data)),
|
||||
|
||||
{ok, EmptyRespBody} = list_backups(?NODE2_PORT, Auth, <<"2">>, <<"100">>),
|
||||
#{<<"data">> := EmptyData, <<"meta">> := _} = emqx_utils_json:decode(EmptyRespBody),
|
||||
?assertEqual(0, length(EmptyData)),
|
||||
|
||||
{ok, RespBodyP1} = list_backups(?NODE3_PORT, Auth, <<"1">>, <<"10">>),
|
||||
{ok, RespBodyP2} = list_backups(?NODE3_PORT, Auth, <<"2">>, <<"10">>),
|
||||
{ok, RespBodyP3} = list_backups(?NODE3_PORT, Auth, <<"3">>, <<"10">>),
|
||||
|
||||
#{<<"data">> := DataP1, <<"meta">> := _} = emqx_utils_json:decode(RespBodyP1),
|
||||
?assertEqual(10, length(DataP1)),
|
||||
#{<<"data">> := DataP2, <<"meta">> := _} = emqx_utils_json:decode(RespBodyP2),
|
||||
?assertEqual(10, length(DataP2)),
|
||||
#{<<"data">> := DataP3, <<"meta">> := _} = emqx_utils_json:decode(RespBodyP3),
|
||||
?assertEqual(0, length(DataP3)),
|
||||
|
||||
?assertEqual(Data, DataP1 ++ DataP2).
|
||||
|
||||
t_upload_ce_backup(Config) ->
|
||||
upload_backup_test(Config, ?UPLOAD_CE_BACKUP).
|
||||
|
||||
t_upload_ee_backup(Config) ->
|
||||
case emqx_release:edition() of
|
||||
ee -> upload_backup_test(Config, ?UPLOAD_EE_BACKUP);
|
||||
ce -> ok
|
||||
end.
|
||||
|
||||
t_import_ce_backup(Config) ->
|
||||
import_backup_test(Config, ?UPLOAD_CE_BACKUP).
|
||||
|
||||
t_import_ee_backup(Config) ->
|
||||
case emqx_release:edition() of
|
||||
ee -> import_backup_test(Config, ?UPLOAD_EE_BACKUP);
|
||||
ce -> ok
|
||||
end.
|
||||
|
||||
do_init_per_testcase(TC, Config) ->
|
||||
Cluster = [Core1, _Core2, Repl] = cluster(TC, Config),
|
||||
Auth = auth_header(Core1),
|
||||
ok = wait_for_auth_replication(Repl),
|
||||
[{auth, Auth}, {cluster, Cluster} | Config].
|
||||
|
||||
test_file_op(Method, Config) ->
|
||||
Auth = ?config(auth, Config),
|
||||
|
||||
{ok, Node1Resp} = export_backup(?NODE1_PORT, Auth),
|
||||
{ok, Node2Resp} = export_backup(?NODE2_PORT, Auth),
|
||||
{ok, Node3Resp} = export_backup(?NODE3_PORT, Auth),
|
||||
|
||||
ParsedResps = [emqx_utils_json:decode(R) || R <- [Node1Resp, Node2Resp, Node3Resp]],
|
||||
|
||||
[Node1Parsed, Node2Parsed, Node3Parsed] = ParsedResps,
|
||||
|
||||
%% node param is not set in Query, expect get/delete the backup on the local node
|
||||
F1 = fun() ->
|
||||
backup_file_op(Method, ?NODE1_PORT, Auth, maps:get(<<"filename">>, Node1Parsed), [])
|
||||
end,
|
||||
?assertMatch({ok, _}, F1()),
|
||||
assert_second_call(Method, F1()),
|
||||
|
||||
%% Node 2 must get/delete the backup on Node 3 via rpc
|
||||
F2 = fun() ->
|
||||
backup_file_op(
|
||||
Method,
|
||||
?NODE2_PORT,
|
||||
Auth,
|
||||
maps:get(<<"filename">>, Node3Parsed),
|
||||
[{<<"node">>, maps:get(<<"node">>, Node3Parsed)}]
|
||||
)
|
||||
end,
|
||||
?assertMatch({ok, _}, F2()),
|
||||
assert_second_call(Method, F2()),
|
||||
|
||||
%% The same as above but nodes are switched
|
||||
F3 = fun() ->
|
||||
backup_file_op(
|
||||
Method,
|
||||
?NODE3_PORT,
|
||||
Auth,
|
||||
maps:get(<<"filename">>, Node2Parsed),
|
||||
[{<<"node">>, maps:get(<<"node">>, Node2Parsed)}]
|
||||
)
|
||||
end,
|
||||
?assertMatch({ok, _}, F3()),
|
||||
assert_second_call(Method, F3()).
|
||||
|
||||
export_test(NodeApiPort, Auth) ->
|
||||
{ok, RespBody} = export_backup(NodeApiPort, Auth),
|
||||
#{
|
||||
<<"created_at">> := _,
|
||||
<<"created_at_sec">> := CreatedSec,
|
||||
<<"filename">> := _,
|
||||
<<"node">> := _,
|
||||
<<"size">> := Size
|
||||
} = emqx_utils_json:decode(RespBody),
|
||||
?assert(is_integer(Size)),
|
||||
?assert(is_integer(CreatedSec) andalso CreatedSec > 0).
|
||||
|
||||
upload_backup_test(Config, BackupName) ->
|
||||
Auth = ?config(auth, Config),
|
||||
UploadFile = ?backup_path(Config, BackupName),
|
||||
BadImportFile = ?backup_path(Config, ?BAD_IMPORT_BACKUP),
|
||||
BadUploadFile = ?backup_path(Config, ?BAD_UPLOAD_BACKUP),
|
||||
|
||||
?assertEqual(ok, upload_backup(?NODE3_PORT, Auth, UploadFile)),
|
||||
%% This file was specially forged to pass upload validation bat fail on import
|
||||
?assertEqual(ok, upload_backup(?NODE2_PORT, Auth, BadImportFile)),
|
||||
?assertEqual({error, bad_request}, upload_backup(?NODE1_PORT, Auth, BadUploadFile)).
|
||||
|
||||
import_backup_test(Config, BackupName) ->
|
||||
Auth = ?config(auth, Config),
|
||||
UploadFile = ?backup_path(Config, BackupName),
|
||||
BadImportFile = ?backup_path(Config, ?BAD_IMPORT_BACKUP),
|
||||
|
||||
?assertEqual(ok, upload_backup(?NODE3_PORT, Auth, UploadFile)),
|
||||
|
||||
%% This file was specially forged to pass upload validation bat fail on import
|
||||
?assertEqual(ok, upload_backup(?NODE2_PORT, Auth, BadImportFile)),
|
||||
|
||||
%% Replicant node must be able to import the file by doing rpc to a core node
|
||||
?assertMatch({ok, _}, import_backup(?NODE3_PORT, Auth, BackupName)),
|
||||
|
||||
[N1, N2, N3] = ?config(cluster, Config),
|
||||
|
||||
?assertMatch({ok, _}, import_backup(?NODE3_PORT, Auth, BackupName)),
|
||||
|
||||
?assertMatch({ok, _}, import_backup(?NODE1_PORT, Auth, BackupName, N3)),
|
||||
%% Now this node must also have the file locally
|
||||
?assertMatch({ok, _}, import_backup(?NODE1_PORT, Auth, BackupName, N1)),
|
||||
|
||||
?assertMatch({error, {_, 400, _}}, import_backup(?NODE2_PORT, Auth, ?BAD_IMPORT_BACKUP, N2)).
|
||||
|
||||
assert_second_call(get, Res) ->
|
||||
?assertMatch({ok, _}, Res);
|
||||
assert_second_call(delete, Res) ->
|
||||
?assertMatch({error, {_, 404, _}}, Res).
|
||||
|
||||
export_backup(NodeApiPort, Auth) ->
|
||||
Path = ["data", "export"],
|
||||
request(post, NodeApiPort, Path, Auth).
|
||||
|
||||
import_backup(NodeApiPort, Auth, BackupName) ->
|
||||
import_backup(NodeApiPort, Auth, BackupName, undefined).
|
||||
|
||||
import_backup(NodeApiPort, Auth, BackupName, Node) ->
|
||||
Path = ["data", "import"],
|
||||
Body = #{<<"filename">> => unicode:characters_to_binary(BackupName)},
|
||||
Body1 =
|
||||
case Node of
|
||||
undefined -> Body;
|
||||
_ -> Body#{<<"node">> => Node}
|
||||
end,
|
||||
request(post, NodeApiPort, Path, Body1, Auth).
|
||||
|
||||
list_backups(NodeApiPort, Auth, Page, Limit) ->
|
||||
Path = ["data", "files"],
|
||||
request(get, NodeApiPort, Path, [{<<"page">>, Page}, {<<"limit">>, Limit}], [], Auth).
|
||||
|
||||
backup_file_op(Method, NodeApiPort, Auth, BackupName, QueryList) ->
|
||||
Path = ["data", "files", BackupName],
|
||||
request(Method, NodeApiPort, Path, QueryList, [], Auth).
|
||||
|
||||
upload_backup(NodeApiPort, Auth, BackupFilePath) ->
|
||||
Path = emqx_mgmt_api_test_util:api_path(?api_base_url(NodeApiPort), ["data", "files"]),
|
||||
Res = emqx_mgmt_api_test_util:upload_request(
|
||||
Path,
|
||||
BackupFilePath,
|
||||
"filename",
|
||||
<<"application/octet-stream">>,
|
||||
[],
|
||||
Auth
|
||||
),
|
||||
case Res of
|
||||
{ok, {{"HTTP/1.1", 204, _}, _Headers, _}} ->
|
||||
ok;
|
||||
{ok, {{"HTTP/1.1", 400, _}, _Headers, _} = Resp} ->
|
||||
ct:pal("Backup upload failed: ~p", [Resp]),
|
||||
{error, bad_request};
|
||||
Err ->
|
||||
Err
|
||||
end.
|
||||
|
||||
request(Method, NodePort, PathParts, Auth) ->
|
||||
request(Method, NodePort, PathParts, [], [], Auth).
|
||||
|
||||
request(Method, NodePort, PathParts, Body, Auth) ->
|
||||
request(Method, NodePort, PathParts, [], Body, Auth).
|
||||
|
||||
request(Method, NodePort, PathParts, QueryList, Body, Auth) ->
|
||||
Path = emqx_mgmt_api_test_util:api_path(?api_base_url(NodePort), PathParts),
|
||||
Query = unicode:characters_to_list(uri_string:compose_query(QueryList)),
|
||||
emqx_mgmt_api_test_util:request_api(Method, Path, Query, Auth, Body).
|
||||
|
||||
cluster(TC, Config) ->
|
||||
Nodes = emqx_cth_cluster:start(
|
||||
[
|
||||
{api_data_backup_core1, #{role => core, apps => apps_spec(18085, TC)}},
|
||||
{api_data_backup_core2, #{role => core, apps => apps_spec(18086, TC)}},
|
||||
{api_data_backup_replicant, #{role => replicant, apps => apps_spec(18087, TC)}}
|
||||
],
|
||||
#{work_dir => emqx_cth_suite:work_dir(TC, Config)}
|
||||
),
|
||||
Nodes.
|
||||
|
||||
auth_header(Node) ->
|
||||
{ok, API} = erpc:call(Node, emqx_common_test_http, create_default_app, []),
|
||||
emqx_common_test_http:auth_header(API).
|
||||
|
||||
wait_for_auth_replication(ReplNode) ->
|
||||
wait_for_auth_replication(ReplNode, 100).
|
||||
|
||||
wait_for_auth_replication(ReplNode, 0) ->
|
||||
{error, {ReplNode, auth_not_ready}};
|
||||
wait_for_auth_replication(ReplNode, Retries) ->
|
||||
try
|
||||
{_Header, _Val} = erpc:call(ReplNode, emqx_common_test_http, default_auth_header, []),
|
||||
ok
|
||||
catch
|
||||
_:_ ->
|
||||
timer:sleep(1),
|
||||
wait_for_auth_replication(ReplNode, Retries - 1)
|
||||
end.
|
||||
|
||||
apps_spec(APIPort, TC) ->
|
||||
common_apps_spec() ++
|
||||
app_spec_dashboard(APIPort) ++
|
||||
upload_import_apps_spec(TC).
|
||||
|
||||
common_apps_spec() ->
|
||||
[
|
||||
emqx,
|
||||
emqx_conf,
|
||||
emqx_management
|
||||
].
|
||||
|
||||
app_spec_dashboard(APIPort) ->
|
||||
[
|
||||
{emqx_dashboard, #{
|
||||
config =>
|
||||
#{
|
||||
dashboard =>
|
||||
#{
|
||||
listeners =>
|
||||
#{
|
||||
http =>
|
||||
#{bind => APIPort}
|
||||
},
|
||||
default_username => "",
|
||||
default_password => ""
|
||||
}
|
||||
}
|
||||
}}
|
||||
].
|
||||
|
||||
upload_import_apps_spec(TC) when
|
||||
TC =:= t_upload_ee_backup;
|
||||
TC =:= t_import_ee_backup;
|
||||
TC =:= t_upload_ce_backup;
|
||||
TC =:= t_import_ce_backup
|
||||
->
|
||||
[
|
||||
emqx_auth,
|
||||
emqx_auth_http,
|
||||
emqx_auth_jwt,
|
||||
emqx_auth_mnesia,
|
||||
emqx_rule_engine,
|
||||
emqx_modules,
|
||||
emqx_bridge
|
||||
];
|
||||
upload_import_apps_spec(_TC) ->
|
||||
[].
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -1,9 +1,15 @@
|
|||
{application, emqx_opentelemetry, [
|
||||
{description, "OpenTelemetry for EMQX Broker"},
|
||||
{vsn, "0.1.3"},
|
||||
{vsn, "0.2.0"},
|
||||
{registered, []},
|
||||
{mod, {emqx_otel_app, []}},
|
||||
{applications, [kernel, stdlib, emqx]},
|
||||
{applications, [
|
||||
kernel,
|
||||
stdlib,
|
||||
emqx,
|
||||
%% otel metrics depend on emqx_mgmt_cache
|
||||
emqx_management
|
||||
]},
|
||||
{env, []},
|
||||
{modules, []},
|
||||
{licenses, ["Apache 2.0"]},
|
||||
|
|
|
@ -103,10 +103,24 @@ otel_config_schema() ->
|
|||
|
||||
otel_config_example() ->
|
||||
#{
|
||||
enable => true,
|
||||
exporter =>
|
||||
#{
|
||||
logs => #{
|
||||
enable => true,
|
||||
exporter => #{
|
||||
endpoint => "http://localhost:4317",
|
||||
interval => "10s"
|
||||
ssl_options => #{
|
||||
enable => false
|
||||
}
|
||||
},
|
||||
level => warning
|
||||
},
|
||||
metrics => #{
|
||||
enable => true,
|
||||
exporter => #{
|
||||
endpoint => "http://localhost:4317",
|
||||
interval => "10s",
|
||||
ssl_options => #{
|
||||
enable => false
|
||||
}
|
||||
}
|
||||
}
|
||||
}.
|
||||
|
|
|
@ -19,11 +19,17 @@
|
|||
-behaviour(application).
|
||||
|
||||
-export([start/2, stop/1]).
|
||||
-export([stop_deps/0]).
|
||||
|
||||
start(_StartType, _StartArgs) ->
|
||||
emqx_otel_config:add_handler(),
|
||||
ok = emqx_otel_config:add_otel_log_handler(),
|
||||
emqx_otel_sup:start_link().
|
||||
|
||||
stop(_State) ->
|
||||
emqx_otel_config:remove_handler(),
|
||||
_ = emqx_otel_config:remove_otel_log_handler(),
|
||||
ok.
|
||||
|
||||
stop_deps() ->
|
||||
emqx_otel_config:stop_all_otel_apps().
|
||||
|
|
|
@ -19,9 +19,16 @@
|
|||
|
||||
-define(OPTL, [opentelemetry]).
|
||||
|
||||
-define(OTEL_EXPORTER, opentelemetry_exporter).
|
||||
-define(OTEL_LOG_HANDLER, otel_log_handler).
|
||||
-define(OTEL_LOG_HANDLER_ID, opentelemetry_handler).
|
||||
|
||||
-export([add_handler/0, remove_handler/0]).
|
||||
-export([post_config_update/5]).
|
||||
-export([update/1]).
|
||||
-export([add_otel_log_handler/0, remove_otel_log_handler/0]).
|
||||
-export([stop_all_otel_apps/0]).
|
||||
-export([otel_exporter/1]).
|
||||
|
||||
update(Config) ->
|
||||
case
|
||||
|
@ -45,14 +52,109 @@ remove_handler() ->
|
|||
ok = emqx_config_handler:remove_handler(?OPTL),
|
||||
ok.
|
||||
|
||||
post_config_update(?OPTL, _Req, Old, Old, _AppEnvs) ->
|
||||
ok;
|
||||
post_config_update(?OPTL, _Req, New, _Old, AppEnvs) ->
|
||||
application:set_env(AppEnvs),
|
||||
ensure_otel(New);
|
||||
MetricsRes = ensure_otel_metrics(New),
|
||||
LogsRes = ensure_otel_logs(New),
|
||||
_ = maybe_stop_all_otel_apps(New),
|
||||
case {MetricsRes, LogsRes} of
|
||||
{ok, ok} -> ok;
|
||||
Other -> {error, Other}
|
||||
end;
|
||||
post_config_update(_ConfPath, _Req, _NewConf, _OldConf, _AppEnvs) ->
|
||||
ok.
|
||||
|
||||
ensure_otel(#{enable := true} = Conf) ->
|
||||
_ = emqx_otel:stop_otel(),
|
||||
emqx_otel:start_otel(Conf);
|
||||
ensure_otel(#{enable := false}) ->
|
||||
emqx_otel:stop_otel().
|
||||
stop_all_otel_apps() ->
|
||||
_ = application:stop(opentelemetry),
|
||||
_ = application:stop(opentelemetry_experimental),
|
||||
_ = application:stop(opentelemetry_experimental_api),
|
||||
_ = application:stop(opentelemetry_exporter),
|
||||
ok.
|
||||
|
||||
add_otel_log_handler() ->
|
||||
ensure_otel_logs(emqx:get_config(?OPTL)).
|
||||
|
||||
remove_otel_log_handler() ->
|
||||
remove_handler_if_present(?OTEL_LOG_HANDLER_ID).
|
||||
|
||||
otel_exporter(ExporterConf) ->
|
||||
#{
|
||||
endpoint := Endpoint,
|
||||
protocol := Proto,
|
||||
ssl_options := SSLOpts
|
||||
} = ExporterConf,
|
||||
{?OTEL_EXPORTER, #{
|
||||
endpoint => Endpoint,
|
||||
protocol => Proto,
|
||||
ssl_options => ssl_opts(Endpoint, SSLOpts)
|
||||
}}.
|
||||
|
||||
%% Internal functions
|
||||
|
||||
ensure_otel_metrics(#{metrics := #{enable := true} = MetricsConf}) ->
|
||||
_ = emqx_otel_metrics:stop_otel(),
|
||||
emqx_otel_metrics:start_otel(MetricsConf);
|
||||
ensure_otel_metrics(#{metrics := #{enable := false}}) ->
|
||||
emqx_otel_metrics:stop_otel();
|
||||
ensure_otel_metrics(_) ->
|
||||
ok.
|
||||
|
||||
ensure_otel_logs(#{logs := #{enable := true} = LogsConf}) ->
|
||||
ok = remove_handler_if_present(?OTEL_LOG_HANDLER_ID),
|
||||
ok = ensure_log_apps(),
|
||||
HandlerConf = tr_handler_conf(LogsConf),
|
||||
%% NOTE: should primary logger level be updated if it's higher than otel log level?
|
||||
logger:add_handler(?OTEL_LOG_HANDLER_ID, ?OTEL_LOG_HANDLER, HandlerConf);
|
||||
ensure_otel_logs(#{logs := #{enable := false}}) ->
|
||||
remove_handler_if_present(?OTEL_LOG_HANDLER_ID).
|
||||
|
||||
remove_handler_if_present(HandlerId) ->
|
||||
case logger:get_handler_config(HandlerId) of
|
||||
{ok, _} ->
|
||||
ok = logger:remove_handler(HandlerId);
|
||||
_ ->
|
||||
ok
|
||||
end.
|
||||
|
||||
ensure_log_apps() ->
|
||||
{ok, _} = application:ensure_all_started(opentelemetry_exporter),
|
||||
{ok, _} = application:ensure_all_started(opentelemetry_experimental),
|
||||
ok.
|
||||
|
||||
maybe_stop_all_otel_apps(#{metrics := #{enable := false}, logs := #{enable := false}}) ->
|
||||
stop_all_otel_apps();
|
||||
maybe_stop_all_otel_apps(_) ->
|
||||
ok.
|
||||
|
||||
tr_handler_conf(Conf) ->
|
||||
#{
|
||||
level := Level,
|
||||
max_queue_size := MaxQueueSize,
|
||||
exporting_timeout := ExportingTimeout,
|
||||
scheduled_delay := ScheduledDelay,
|
||||
exporter := ExporterConf
|
||||
} = Conf,
|
||||
#{
|
||||
level => Level,
|
||||
config => #{
|
||||
max_queue_size => MaxQueueSize,
|
||||
exporting_timeout_ms => ExportingTimeout,
|
||||
scheduled_delay_ms => ScheduledDelay,
|
||||
exporter => otel_exporter(ExporterConf)
|
||||
}
|
||||
}.
|
||||
|
||||
ssl_opts(Endpoint, SSLOpts) ->
|
||||
case is_ssl(Endpoint) of
|
||||
true ->
|
||||
emqx_tls_lib:to_client_opts(SSLOpts#{enable => true});
|
||||
false ->
|
||||
[]
|
||||
end.
|
||||
|
||||
is_ssl(<<"https://", _/binary>> = _Endpoint) ->
|
||||
true;
|
||||
is_ssl(_Endpoint) ->
|
||||
false.
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_otel).
|
||||
-module(emqx_otel_metrics).
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
|
||||
-export([start_otel/1, stop_otel/0]).
|
||||
|
@ -29,17 +29,19 @@ start_otel(Conf) ->
|
|||
assert_started(supervisor:start_child(?SUPERVISOR, Spec)).
|
||||
|
||||
stop_otel() ->
|
||||
Res =
|
||||
case erlang:whereis(?SUPERVISOR) of
|
||||
undefined ->
|
||||
ok;
|
||||
Pid ->
|
||||
case supervisor:terminate_child(Pid, ?MODULE) of
|
||||
ok -> supervisor:delete_child(Pid, ?MODULE);
|
||||
{error, not_found} -> ok;
|
||||
Error -> Error
|
||||
end
|
||||
end,
|
||||
ok = cleanup(),
|
||||
case erlang:whereis(?SUPERVISOR) of
|
||||
undefined ->
|
||||
ok;
|
||||
Pid ->
|
||||
case supervisor:terminate_child(Pid, ?MODULE) of
|
||||
ok -> supervisor:delete_child(Pid, ?MODULE);
|
||||
{error, not_found} -> ok;
|
||||
Error -> Error
|
||||
end
|
||||
end.
|
||||
Res.
|
||||
|
||||
start_link(Conf) ->
|
||||
gen_server:start_link({local, ?MODULE}, ?MODULE, Conf, []).
|
||||
|
@ -71,32 +73,40 @@ setup(_Conf) ->
|
|||
ok.
|
||||
|
||||
ensure_apps(Conf) ->
|
||||
#{exporter := #{interval := ExporterInterval}} = Conf,
|
||||
#{exporter := #{interval := ExporterInterval} = Exporter} = Conf,
|
||||
{ok, _} = application:ensure_all_started(opentelemetry_exporter),
|
||||
{ok, _} = application:ensure_all_started(opentelemetry),
|
||||
_ = application:stop(opentelemetry_experimental),
|
||||
{ok, _} = application:ensure_all_started(opentelemetry_experimental),
|
||||
{ok, _} = application:ensure_all_started(opentelemetry_api_experimental),
|
||||
|
||||
_ = opentelemetry_experimental:stop_default_metrics(),
|
||||
ok = application:set_env(
|
||||
opentelemetry_experimental,
|
||||
readers,
|
||||
[
|
||||
#{
|
||||
id => emqx_otel_metric_reader,
|
||||
module => otel_metric_reader,
|
||||
config => #{
|
||||
exporter => {opentelemetry_exporter, #{}},
|
||||
exporter => emqx_otel_config:otel_exporter(Exporter),
|
||||
export_interval_ms => ExporterInterval
|
||||
}
|
||||
}
|
||||
]
|
||||
),
|
||||
{ok, _} = application:ensure_all_started(opentelemetry_experimental),
|
||||
{ok, _} = application:ensure_all_started(opentelemetry_api_experimental),
|
||||
{ok, _} = opentelemetry_experimental:start_default_metrics(),
|
||||
ok.
|
||||
|
||||
cleanup() ->
|
||||
_ = application:stop(opentelemetry),
|
||||
_ = application:stop(opentelemetry_experimental),
|
||||
_ = application:stop(opentelemetry_experimental_api),
|
||||
_ = application:stop(opentelemetry_exporter),
|
||||
safe_stop_default_metrics().
|
||||
|
||||
safe_stop_default_metrics() ->
|
||||
try
|
||||
_ = opentelemetry_experimental:stop_default_metrics()
|
||||
catch
|
||||
%% noramal scenario, metrics supervisor is not started
|
||||
exit:{noproc, _} -> ok
|
||||
end,
|
||||
ok.
|
||||
|
||||
create_metric_views() ->
|
|
@ -24,16 +24,48 @@
|
|||
desc/1
|
||||
]).
|
||||
|
||||
-export([upgrade_legacy_metrics/1]).
|
||||
|
||||
%% Compatibility with the previous schema that defined only metric fields
|
||||
upgrade_legacy_metrics(RawConf) ->
|
||||
case RawConf of
|
||||
#{<<"opentelemetry">> := Otel} ->
|
||||
LegacyMetricsFields = [<<"enable">>, <<"exporter">>],
|
||||
Otel1 = maps:without(LegacyMetricsFields, Otel),
|
||||
Metrics = maps:with(LegacyMetricsFields, Otel),
|
||||
case Metrics =:= #{} of
|
||||
true ->
|
||||
RawConf;
|
||||
false ->
|
||||
RawConf#{<<"opentelemetry">> => Otel1#{<<"metrics">> => Metrics}}
|
||||
end;
|
||||
_ ->
|
||||
RawConf
|
||||
end.
|
||||
|
||||
namespace() -> opentelemetry.
|
||||
|
||||
roots() -> ["opentelemetry"].
|
||||
|
||||
fields("opentelemetry") ->
|
||||
[
|
||||
{exporter,
|
||||
{metrics,
|
||||
?HOCON(
|
||||
?R_REF("exporter"),
|
||||
#{desc => ?DESC(exporter)}
|
||||
?R_REF("otel_metrics"),
|
||||
#{
|
||||
desc => ?DESC(otel_metrics)
|
||||
}
|
||||
)},
|
||||
{logs,
|
||||
?HOCON(
|
||||
?R_REF("otel_logs"),
|
||||
#{
|
||||
desc => ?DESC(otel_logs)
|
||||
}
|
||||
)}
|
||||
];
|
||||
fields("otel_metrics") ->
|
||||
[
|
||||
{enable,
|
||||
?HOCON(
|
||||
boolean(),
|
||||
|
@ -42,41 +74,130 @@ fields("opentelemetry") ->
|
|||
required => true,
|
||||
desc => ?DESC(enable)
|
||||
}
|
||||
)},
|
||||
{exporter,
|
||||
?HOCON(
|
||||
?R_REF("otel_metrics_exporter"),
|
||||
#{desc => ?DESC(exporter)}
|
||||
)}
|
||||
];
|
||||
fields("exporter") ->
|
||||
fields("otel_logs") ->
|
||||
[
|
||||
{"protocol",
|
||||
{level,
|
||||
?HOCON(
|
||||
%% http_protobuf is not support for metrics yet.
|
||||
?ENUM([grpc]),
|
||||
emqx_conf_schema:log_level(),
|
||||
#{
|
||||
mapping => "opentelemetry_exporter.otlp_protocol",
|
||||
desc => ?DESC(protocol),
|
||||
default => grpc,
|
||||
default => warning,
|
||||
desc => ?DESC(otel_log_handler_level),
|
||||
importance => ?IMPORTANCE_HIGH
|
||||
}
|
||||
)},
|
||||
{enable,
|
||||
?HOCON(
|
||||
boolean(),
|
||||
#{
|
||||
default => false,
|
||||
desc => ?DESC(enable),
|
||||
importance => ?IMPORTANCE_HIGH
|
||||
}
|
||||
)},
|
||||
{max_queue_size,
|
||||
?HOCON(
|
||||
pos_integer(),
|
||||
#{
|
||||
default => 2048,
|
||||
desc => ?DESC(max_queue_size),
|
||||
importance => ?IMPORTANCE_HIDDEN
|
||||
}
|
||||
)},
|
||||
{"endpoint",
|
||||
{exporting_timeout,
|
||||
?HOCON(
|
||||
emqx_schema:timeout_duration_ms(),
|
||||
#{
|
||||
default => <<"30s">>,
|
||||
desc => ?DESC(exporting_timeout),
|
||||
importance => ?IMPORTANCE_HIDDEN
|
||||
}
|
||||
)},
|
||||
{scheduled_delay,
|
||||
?HOCON(
|
||||
emqx_schema:timeout_duration_ms(),
|
||||
#{
|
||||
default => <<"1s">>,
|
||||
desc => ?DESC(scheduled_delay),
|
||||
importance => ?IMPORTANCE_HIDDEN
|
||||
}
|
||||
)},
|
||||
{exporter,
|
||||
?HOCON(
|
||||
?R_REF("otel_logs_exporter"),
|
||||
#{
|
||||
desc => ?DESC(exporter),
|
||||
importance => ?IMPORTANCE_HIGH
|
||||
}
|
||||
)}
|
||||
];
|
||||
fields("otel_metrics_exporter") ->
|
||||
exporter_fields(metrics);
|
||||
fields("otel_logs_exporter") ->
|
||||
exporter_fields(logs);
|
||||
fields("ssl_opts") ->
|
||||
Schema = emqx_schema:client_ssl_opts_schema(#{}),
|
||||
lists:keydelete("enable", 1, Schema).
|
||||
|
||||
desc("opentelemetry") -> ?DESC(opentelemetry);
|
||||
desc("exporter") -> ?DESC(exporter);
|
||||
desc("otel_logs_exporter") -> ?DESC(exporter);
|
||||
desc("otel_metrics_exporter") -> ?DESC(exporter);
|
||||
desc("otel_logs") -> ?DESC(otel_logs);
|
||||
desc("otel_metrics") -> ?DESC(otel_metrics);
|
||||
desc("ssl_opts") -> ?DESC(exporter_ssl);
|
||||
desc(_) -> undefined.
|
||||
|
||||
exporter_fields(OtelSignal) ->
|
||||
[
|
||||
{endpoint,
|
||||
?HOCON(
|
||||
emqx_schema:url(),
|
||||
#{
|
||||
mapping => "opentelemetry_exporter.otlp_endpoint",
|
||||
default => <<"http://localhost:4317">>,
|
||||
desc => ?DESC(endpoint)
|
||||
default => "http://localhost:4317",
|
||||
desc => ?DESC(exporter_endpoint),
|
||||
importance => ?IMPORTANCE_HIGH
|
||||
}
|
||||
)},
|
||||
{"interval",
|
||||
{protocol,
|
||||
?HOCON(
|
||||
%% http protobuf/json may be added in future
|
||||
?ENUM([grpc]),
|
||||
#{
|
||||
default => grpc,
|
||||
desc => ?DESC(exporter_protocol),
|
||||
importance => ?IMPORTANCE_HIDDEN
|
||||
}
|
||||
)},
|
||||
{ssl_options,
|
||||
?HOCON(
|
||||
?R_REF("ssl_opts"),
|
||||
#{
|
||||
desc => ?DESC(exporter_ssl),
|
||||
importance => ?IMPORTANCE_LOW
|
||||
}
|
||||
)}
|
||||
] ++ exporter_extra_fields(OtelSignal).
|
||||
|
||||
%% Let's keep it in exporter config for metrics, as it is different from
|
||||
%% scheduled_delay_ms opt used for otel traces and logs
|
||||
exporter_extra_fields(metrics) ->
|
||||
[
|
||||
{interval,
|
||||
?HOCON(
|
||||
emqx_schema:timeout_duration_ms(),
|
||||
#{
|
||||
default => <<"10s">>,
|
||||
required => true,
|
||||
desc => ?DESC(interval)
|
||||
desc => ?DESC(scheduled_delay)
|
||||
}
|
||||
)}
|
||||
].
|
||||
|
||||
desc("opentelemetry") -> ?DESC(opentelemetry);
|
||||
desc("exporter") -> ?DESC(exporter);
|
||||
desc(_) -> undefined.
|
||||
];
|
||||
exporter_extra_fields(_OtelSignal) ->
|
||||
[].
|
||||
|
|
|
@ -41,8 +41,8 @@ init([]) ->
|
|||
period => 512
|
||||
},
|
||||
Children =
|
||||
case emqx_conf:get([opentelemetry]) of
|
||||
case emqx_conf:get([opentelemetry, metrics]) of
|
||||
#{enable := false} -> [];
|
||||
#{enable := true} = Conf -> [worker_spec(emqx_otel, Conf)]
|
||||
#{enable := true} = Conf -> [worker_spec(emqx_otel_metrics, Conf)]
|
||||
end,
|
||||
{ok, {SupFlags, Children}}.
|
||||
|
|
|
@ -35,6 +35,8 @@
|
|||
values/1
|
||||
]).
|
||||
|
||||
-define(CONNECTOR_TYPE, pgsql).
|
||||
|
||||
roots() ->
|
||||
[].
|
||||
|
||||
|
@ -64,12 +66,18 @@ fields("get_bridge_v2") ->
|
|||
fields(pgsql_action);
|
||||
fields("post_bridge_v2") ->
|
||||
fields(pgsql_action);
|
||||
fields("put_connector") ->
|
||||
fields("config_connector");
|
||||
fields("get_connector") ->
|
||||
fields("config_connector");
|
||||
fields("post_connector") ->
|
||||
fields("config_connector").
|
||||
fields(Field) when
|
||||
Field == "get_connector";
|
||||
Field == "put_connector";
|
||||
Field == "post_connector"
|
||||
->
|
||||
fields({Field, ?CONNECTOR_TYPE});
|
||||
fields({Field, Type}) when
|
||||
Field == "get_connector";
|
||||
Field == "put_connector";
|
||||
Field == "post_connector"
|
||||
->
|
||||
emqx_connector_schema:api_fields(Field, Type, fields("connection_fields")).
|
||||
|
||||
server() ->
|
||||
Meta = #{desc => ?DESC("server")},
|
||||
|
@ -94,7 +102,7 @@ connector_examples(Method) ->
|
|||
#{
|
||||
<<"pgsql">> => #{
|
||||
summary => <<"PostgreSQL Connector">>,
|
||||
value => values({Method, pgsql})
|
||||
value => values({Method, <<"pgsql">>})
|
||||
}
|
||||
}
|
||||
].
|
||||
|
@ -109,20 +117,21 @@ values({get, PostgreSQLType}) ->
|
|||
node => <<"emqx@localhost">>,
|
||||
status => <<"connected">>
|
||||
}
|
||||
]
|
||||
],
|
||||
actions => [<<"my_action">>]
|
||||
},
|
||||
values({post, PostgreSQLType})
|
||||
);
|
||||
values({post, PostgreSQLType}) ->
|
||||
values({put, PostgreSQLType});
|
||||
values({put, PostgreSQLType}) ->
|
||||
maps:merge(
|
||||
#{
|
||||
name => <<"my_action">>,
|
||||
name => <<"my_", PostgreSQLType/binary, "_connector">>,
|
||||
type => PostgreSQLType
|
||||
},
|
||||
values(common)
|
||||
);
|
||||
values({put, _PostgreSQLType}) ->
|
||||
values(common);
|
||||
values(common) ->
|
||||
#{
|
||||
<<"database">> => <<"emqx_data">>,
|
||||
|
|
|
@ -42,6 +42,7 @@
|
|||
%% gen_server callbacks
|
||||
-export([
|
||||
init/1,
|
||||
handle_continue/2,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
|
@ -74,8 +75,8 @@
|
|||
%% APIs
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
start_link([]) ->
|
||||
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
|
||||
start_link(Conf) ->
|
||||
gen_server:start_link({local, ?MODULE}, ?MODULE, Conf, []).
|
||||
|
||||
info() ->
|
||||
gen_server:call(?MODULE, info).
|
||||
|
@ -84,49 +85,41 @@ info() ->
|
|||
%% gen_server callbacks
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
init([]) ->
|
||||
#{interval := Interval} = opts(),
|
||||
{ok, #{timer => ensure_timer(Interval), ok => 0, failed => 0}}.
|
||||
init(Conf) ->
|
||||
{ok, #{}, {continue, Conf}}.
|
||||
|
||||
handle_call(info, _From, State = #{timer := Timer}) ->
|
||||
{reply, State#{opts => opts(), next_push_ms => erlang:read_timer(Timer)}, State};
|
||||
handle_continue(Conf, State) ->
|
||||
Opts = #{interval := Interval} = opts(Conf),
|
||||
{noreply, State#{
|
||||
timer => ensure_timer(Interval),
|
||||
opts => Opts,
|
||||
ok => 0,
|
||||
failed => 0
|
||||
}}.
|
||||
|
||||
handle_call(info, _From, State = #{timer := Timer, opts := Opts}) ->
|
||||
{reply, State#{opts => Opts, next_push_ms => erlang:read_timer(Timer)}, State};
|
||||
handle_call(_Msg, _From, State) ->
|
||||
{reply, ok, State}.
|
||||
|
||||
handle_cast(_Msg, State) ->
|
||||
{noreply, State}.
|
||||
|
||||
handle_info({timeout, Timer, ?TIMER_MSG}, State = #{timer := Timer}) ->
|
||||
#{
|
||||
interval := Interval,
|
||||
headers := Headers,
|
||||
job_name := JobName,
|
||||
push_gateway_server := Server
|
||||
} = opts(),
|
||||
PushRes = push_to_push_gateway(Server, Headers, JobName),
|
||||
handle_info({timeout, Timer, ?TIMER_MSG}, State = #{timer := Timer, opts := Opts}) ->
|
||||
#{interval := Interval, headers := Headers, url := Server} = Opts,
|
||||
PushRes = push_to_push_gateway(Server, Headers),
|
||||
NewTimer = ensure_timer(Interval),
|
||||
NewState = maps:update_with(PushRes, fun(C) -> C + 1 end, 1, State#{timer => NewTimer}),
|
||||
%% Data is too big, hibernate for saving memory and stop system monitor warning.
|
||||
{noreply, NewState, hibernate};
|
||||
handle_info({update, Conf}, State = #{timer := Timer}) ->
|
||||
emqx_utils:cancel_timer(Timer),
|
||||
handle_continue(Conf, State);
|
||||
handle_info(_Msg, State) ->
|
||||
{noreply, State}.
|
||||
|
||||
push_to_push_gateway(Uri, Headers, JobName) when is_list(Headers) ->
|
||||
[Name, Ip] = string:tokens(atom_to_list(node()), "@"),
|
||||
% NOTE: allowing errors here to keep rough backward compatibility
|
||||
{JobName1, Errors} = emqx_template:render(
|
||||
emqx_template:parse(JobName),
|
||||
#{<<"name">> => Name, <<"host">> => Ip}
|
||||
),
|
||||
_ =
|
||||
Errors == [] orelse
|
||||
?SLOG(warning, #{
|
||||
msg => "prometheus_job_name_template_invalid",
|
||||
errors => Errors,
|
||||
template => JobName
|
||||
}),
|
||||
push_to_push_gateway(Url, Headers) when is_list(Headers) ->
|
||||
Data = prometheus_text_format:format(),
|
||||
Url = lists:concat([Uri, "/metrics/job/", unicode:characters_to_list(JobName1)]),
|
||||
case httpc:request(post, {Url, Headers, "text/plain", Data}, ?HTTP_OPTIONS, []) of
|
||||
{ok, {{"HTTP/1.1", 200, _}, _RespHeaders, _RespBody}} ->
|
||||
ok;
|
||||
|
@ -152,8 +145,26 @@ ensure_timer(Interval) ->
|
|||
%%--------------------------------------------------------------------
|
||||
%% prometheus callbacks
|
||||
%%--------------------------------------------------------------------
|
||||
opts() ->
|
||||
emqx_conf:get(?PROMETHEUS).
|
||||
opts(#{interval := Interval, headers := Headers, job_name := JobName, push_gateway_server := Url}) ->
|
||||
#{interval => Interval, headers => Headers, url => join_url(Url, JobName)};
|
||||
opts(#{push_gateway := #{url := Url, job_name := JobName} = PushGateway}) ->
|
||||
maps:put(url, join_url(Url, JobName), PushGateway).
|
||||
|
||||
join_url(Url, JobName0) ->
|
||||
[Name, Ip] = string:tokens(atom_to_list(node()), "@"),
|
||||
% NOTE: allowing errors here to keep rough backward compatibility
|
||||
{JobName1, Errors} = emqx_template:render(
|
||||
emqx_template:parse(JobName0),
|
||||
#{<<"name">> => Name, <<"host">> => Ip}
|
||||
),
|
||||
_ =
|
||||
Errors == [] orelse
|
||||
?SLOG(warning, #{
|
||||
msg => "prometheus_job_name_template_invalid",
|
||||
errors => Errors,
|
||||
template => JobName0
|
||||
}),
|
||||
lists:concat([Url, "/metrics/job/", unicode:characters_to_list(JobName1)]).
|
||||
|
||||
deregister_cleanup(_Registry) ->
|
||||
ok.
|
||||
|
|
|
@ -20,8 +20,6 @@
|
|||
|
||||
-include_lib("hocon/include/hoconsc.hrl").
|
||||
|
||||
-import(hoconsc, [ref/2]).
|
||||
|
||||
-export([
|
||||
api_spec/0,
|
||||
paths/0,
|
||||
|
@ -29,11 +27,10 @@
|
|||
]).
|
||||
|
||||
-export([
|
||||
prometheus/2,
|
||||
setting/2,
|
||||
stats/2
|
||||
]).
|
||||
|
||||
-define(SCHEMA_MODULE, emqx_prometheus_schema).
|
||||
-define(TAGS, [<<"Monitor">>]).
|
||||
|
||||
api_spec() ->
|
||||
|
@ -47,21 +44,21 @@ paths() ->
|
|||
|
||||
schema("/prometheus") ->
|
||||
#{
|
||||
'operationId' => prometheus,
|
||||
'operationId' => setting,
|
||||
get =>
|
||||
#{
|
||||
description => ?DESC(get_prom_conf_info),
|
||||
tags => ?TAGS,
|
||||
responses =>
|
||||
#{200 => prometheus_config_schema()}
|
||||
#{200 => prometheus_setting_schema()}
|
||||
},
|
||||
put =>
|
||||
#{
|
||||
description => ?DESC(update_prom_conf_info),
|
||||
tags => ?TAGS,
|
||||
'requestBody' => prometheus_config_schema(),
|
||||
'requestBody' => prometheus_setting_schema(),
|
||||
responses =>
|
||||
#{200 => prometheus_config_schema()}
|
||||
#{200 => prometheus_setting_schema()}
|
||||
}
|
||||
};
|
||||
schema("/prometheus/stats") ->
|
||||
|
@ -71,19 +68,24 @@ schema("/prometheus/stats") ->
|
|||
#{
|
||||
description => ?DESC(get_prom_data),
|
||||
tags => ?TAGS,
|
||||
security => [],
|
||||
security => security(),
|
||||
responses =>
|
||||
#{200 => prometheus_data_schema()}
|
||||
}
|
||||
}.
|
||||
|
||||
security() ->
|
||||
case emqx_config:get([prometheus, enable_basic_auth], false) of
|
||||
true -> [#{'basicAuth' => []}, #{'bearerAuth' => []}];
|
||||
false -> []
|
||||
end.
|
||||
%%--------------------------------------------------------------------
|
||||
%% API Handler funcs
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
prometheus(get, _Params) ->
|
||||
setting(get, _Params) ->
|
||||
{200, emqx:get_raw_config([<<"prometheus">>], #{})};
|
||||
prometheus(put, #{body := Body}) ->
|
||||
setting(put, #{body := Body}) ->
|
||||
case emqx_prometheus_config:update(Body) of
|
||||
{ok, NewConfig} ->
|
||||
{200, NewConfig};
|
||||
|
@ -110,20 +112,57 @@ stats(get, #{headers := Headers}) ->
|
|||
%% Internal funcs
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
prometheus_config_schema() ->
|
||||
emqx_dashboard_swagger:schema_with_example(
|
||||
ref(?SCHEMA_MODULE, "prometheus"),
|
||||
prometheus_config_example()
|
||||
prometheus_setting_schema() ->
|
||||
[{prometheus, #{type := Setting}}] = emqx_prometheus_schema:roots(),
|
||||
emqx_dashboard_swagger:schema_with_examples(
|
||||
Setting,
|
||||
[
|
||||
recommend_setting_example(),
|
||||
legacy_setting_example()
|
||||
]
|
||||
).
|
||||
|
||||
prometheus_config_example() ->
|
||||
#{
|
||||
enable => true,
|
||||
interval => "15s",
|
||||
push_gateway_server => <<"http://127.0.0.1:9091">>,
|
||||
headers => #{'header-name' => 'header-value'},
|
||||
job_name => <<"${name}/instance/${name}~${host}">>
|
||||
}.
|
||||
legacy_setting_example() ->
|
||||
Summary = <<"legacy_deprecated_setting">>,
|
||||
{Summary, #{
|
||||
summary => Summary,
|
||||
value => #{
|
||||
enable => true,
|
||||
interval => <<"15s">>,
|
||||
push_gateway_server => <<"http://127.0.0.1:9091">>,
|
||||
headers => #{<<"Authorization">> => <<"Basic YWRtaW46Y2JraG55eWd5QDE=">>},
|
||||
job_name => <<"${name}/instance/${name}~${host}">>,
|
||||
vm_dist_collector => <<"disabled">>,
|
||||
vm_memory_collector => <<"disabled">>,
|
||||
vm_msacc_collector => <<"disabled">>,
|
||||
mnesia_collector => <<"disabled">>,
|
||||
vm_statistics_collector => <<"disabled">>,
|
||||
vm_system_info_collector => <<"disabled">>
|
||||
}
|
||||
}}.
|
||||
|
||||
recommend_setting_example() ->
|
||||
Summary = <<"recommend_setting">>,
|
||||
{Summary, #{
|
||||
summary => Summary,
|
||||
value => #{
|
||||
enable_basic_auth => false,
|
||||
push_gateway => #{
|
||||
interval => <<"15s">>,
|
||||
url => <<"http://127.0.0.1:9091">>,
|
||||
headers => #{<<"Authorization">> => <<"Basic YWRtaW46Y2JraG55eWd5QDE=">>},
|
||||
job_name => <<"${name}/instance/${name}~${host}">>
|
||||
},
|
||||
collectors => #{
|
||||
vm_dist => <<"disabled">>,
|
||||
vm_memory => <<"disabled">>,
|
||||
vm_msacc => <<"disabled">>,
|
||||
mnesia => <<"disabled">>,
|
||||
vm_statistics => <<"disabled">>,
|
||||
vm_system_info => <<"disabled">>
|
||||
}
|
||||
}
|
||||
}}.
|
||||
|
||||
prometheus_data_schema() ->
|
||||
#{
|
||||
|
|
|
@ -20,8 +20,9 @@
|
|||
-include("emqx_prometheus.hrl").
|
||||
|
||||
-export([add_handler/0, remove_handler/0]).
|
||||
-export([post_config_update/5]).
|
||||
-export([pre_config_update/3, post_config_update/5]).
|
||||
-export([update/1]).
|
||||
-export([conf/0, is_push_gateway_server_enabled/1]).
|
||||
|
||||
update(Config) ->
|
||||
case
|
||||
|
@ -45,9 +46,55 @@ remove_handler() ->
|
|||
ok = emqx_config_handler:remove_handler(?PROMETHEUS),
|
||||
ok.
|
||||
|
||||
post_config_update(?PROMETHEUS, _Req, New, _Old, AppEnvs) ->
|
||||
%% when we import the config with the old version
|
||||
%% we need to respect it, and convert to new schema.
|
||||
pre_config_update(?PROMETHEUS, MergeConf, OriginConf) ->
|
||||
OriginType = emqx_prometheus_schema:is_recommend_type(OriginConf),
|
||||
MergeType = emqx_prometheus_schema:is_recommend_type(MergeConf),
|
||||
{ok,
|
||||
case {OriginType, MergeType} of
|
||||
{true, false} -> to_recommend_type(MergeConf);
|
||||
_ -> MergeConf
|
||||
end}.
|
||||
|
||||
to_recommend_type(Conf) ->
|
||||
#{
|
||||
<<"push_gateway">> => to_push_gateway(Conf),
|
||||
<<"collectors">> => to_collectors(Conf)
|
||||
}.
|
||||
|
||||
to_push_gateway(Conf) ->
|
||||
Init = maps:with([<<"interval">>, <<"headers">>, <<"job_name">>, <<"enable">>], Conf),
|
||||
case maps:get(<<"push_gateway_server">>, Conf, "") of
|
||||
"" ->
|
||||
Init#{<<"enable">> => false};
|
||||
Url ->
|
||||
Init#{<<"url">> => Url}
|
||||
end.
|
||||
|
||||
to_collectors(Conf) ->
|
||||
lists:foldl(
|
||||
fun({From, To}, Acc) ->
|
||||
case maps:find(From, Conf) of
|
||||
{ok, Value} -> Acc#{To => Value};
|
||||
error -> Acc
|
||||
end
|
||||
end,
|
||||
#{},
|
||||
[
|
||||
{<<"vm_dist_collector">>, <<"vm_dist">>},
|
||||
{<<"mnesia_collector">>, <<"mnesia">>},
|
||||
{<<"vm_statistics_collector">>, <<"vm_statistics">>},
|
||||
{<<"vm_system_info_collector">>, <<"vm_system_info">>},
|
||||
{<<"vm_memory_collector">>, <<"vm_memory">>},
|
||||
{<<"vm_msacc_collector">>, <<"vm_msacc">>}
|
||||
]
|
||||
).
|
||||
|
||||
post_config_update(?PROMETHEUS, _Req, New, Old, AppEnvs) ->
|
||||
update_prometheus(AppEnvs),
|
||||
update_push_gateway(New);
|
||||
_ = update_push_gateway(New),
|
||||
update_auth(New, Old);
|
||||
post_config_update(_ConfPath, _Req, _NewConf, _OldConf, _AppEnvs) ->
|
||||
ok.
|
||||
|
||||
|
@ -64,7 +111,29 @@ update_prometheus(AppEnvs) ->
|
|||
),
|
||||
application:set_env(AppEnvs).
|
||||
|
||||
update_push_gateway(#{enable := true}) ->
|
||||
emqx_prometheus_sup:start_child(?APP);
|
||||
update_push_gateway(#{enable := false}) ->
|
||||
emqx_prometheus_sup:stop_child(?APP).
|
||||
update_push_gateway(Prometheus) ->
|
||||
case is_push_gateway_server_enabled(Prometheus) of
|
||||
true ->
|
||||
case erlang:whereis(?APP) of
|
||||
undefined -> emqx_prometheus_sup:start_child(?APP, Prometheus);
|
||||
Pid -> emqx_prometheus_sup:update_child(Pid, Prometheus)
|
||||
end;
|
||||
false ->
|
||||
emqx_prometheus_sup:stop_child(?APP)
|
||||
end.
|
||||
|
||||
update_auth(#{enable_basic_auth := New}, #{enable_basic_auth := Old}) when New =/= Old ->
|
||||
emqx_dashboard_listener:regenerate_minirest_dispatch(),
|
||||
ok;
|
||||
update_auth(_, _) ->
|
||||
ok.
|
||||
|
||||
conf() ->
|
||||
emqx_config:get(?PROMETHEUS).
|
||||
|
||||
is_push_gateway_server_enabled(#{enable := true, push_gateway_server := Url}) ->
|
||||
Url =/= "";
|
||||
is_push_gateway_server_enabled(#{push_gateway := #{url := Url, enable := Enable}}) ->
|
||||
Enable andalso Url =/= "";
|
||||
is_push_gateway_server_enabled(_) ->
|
||||
false.
|
||||
|
|
|
@ -27,23 +27,68 @@
|
|||
desc/1,
|
||||
translation/1,
|
||||
convert_headers/2,
|
||||
validate_push_gateway_server/1
|
||||
validate_url/1,
|
||||
is_recommend_type/1
|
||||
]).
|
||||
|
||||
namespace() -> "prometheus".
|
||||
namespace() -> prometheus.
|
||||
|
||||
roots() -> [{"prometheus", ?HOCON(?R_REF("prometheus"), #{translate_to => ["prometheus"]})}].
|
||||
|
||||
fields("prometheus") ->
|
||||
roots() ->
|
||||
[
|
||||
{push_gateway_server,
|
||||
{prometheus,
|
||||
?HOCON(
|
||||
?UNION(setting_union_schema()),
|
||||
#{translate_to => ["prometheus"], default => #{}}
|
||||
)}
|
||||
].
|
||||
|
||||
fields(recommend_setting) ->
|
||||
[
|
||||
{enable_basic_auth,
|
||||
?HOCON(
|
||||
boolean(),
|
||||
#{
|
||||
default => false,
|
||||
required => true,
|
||||
importance => ?IMPORTANCE_HIGH,
|
||||
desc => ?DESC(enable_basic_auth)
|
||||
}
|
||||
)},
|
||||
{push_gateway,
|
||||
?HOCON(
|
||||
?R_REF(push_gateway),
|
||||
#{
|
||||
required => false,
|
||||
importance => ?IMPORTANCE_MEDIUM,
|
||||
desc => ?DESC(push_gateway)
|
||||
}
|
||||
)},
|
||||
{collectors,
|
||||
?HOCON(?R_REF(collectors), #{
|
||||
required => false,
|
||||
importance => ?IMPORTANCE_LOW,
|
||||
desc => ?DESC(collectors)
|
||||
})}
|
||||
];
|
||||
fields(push_gateway) ->
|
||||
[
|
||||
{enable,
|
||||
?HOCON(
|
||||
boolean(),
|
||||
#{
|
||||
default => false,
|
||||
required => true,
|
||||
desc => ?DESC(push_gateway_enable)
|
||||
}
|
||||
)},
|
||||
{url,
|
||||
?HOCON(
|
||||
string(),
|
||||
#{
|
||||
required => false,
|
||||
default => <<"http://127.0.0.1:9091">>,
|
||||
required => true,
|
||||
validator => fun ?MODULE:validate_push_gateway_server/1,
|
||||
desc => ?DESC(push_gateway_server)
|
||||
validator => fun ?MODULE:validate_url/1,
|
||||
desc => ?DESC(push_gateway_url)
|
||||
}
|
||||
)},
|
||||
{interval,
|
||||
|
@ -51,7 +96,7 @@ fields("prometheus") ->
|
|||
emqx_schema:timeout_duration_ms(),
|
||||
#{
|
||||
default => <<"15s">>,
|
||||
required => true,
|
||||
required => false,
|
||||
desc => ?DESC(interval)
|
||||
}
|
||||
)},
|
||||
|
@ -70,18 +115,121 @@ fields("prometheus") ->
|
|||
binary(),
|
||||
#{
|
||||
default => <<"${name}/instance/${name}~${host}">>,
|
||||
required => true,
|
||||
required => false,
|
||||
desc => ?DESC(job_name)
|
||||
}
|
||||
)}
|
||||
];
|
||||
fields(collectors) ->
|
||||
[
|
||||
{vm_dist,
|
||||
?HOCON(
|
||||
hoconsc:enum([disabled, enabled]),
|
||||
#{
|
||||
default => disabled,
|
||||
required => true,
|
||||
desc => ?DESC(vm_dist_collector)
|
||||
}
|
||||
)},
|
||||
%% Mnesia metrics mainly using mnesia:system_info/1
|
||||
{mnesia,
|
||||
?HOCON(
|
||||
hoconsc:enum([enabled, disabled]),
|
||||
#{
|
||||
default => disabled,
|
||||
required => true,
|
||||
desc => ?DESC(mnesia_collector)
|
||||
}
|
||||
)},
|
||||
%% Collects Erlang VM metrics using erlang:statistics/1.
|
||||
{vm_statistics,
|
||||
?HOCON(
|
||||
hoconsc:enum([enabled, disabled]),
|
||||
#{
|
||||
default => disabled,
|
||||
required => true,
|
||||
desc => ?DESC(vm_statistics_collector)
|
||||
}
|
||||
)},
|
||||
%% Collects Erlang VM metrics using erlang:system_info/1.
|
||||
{vm_system_info,
|
||||
?HOCON(
|
||||
hoconsc:enum([enabled, disabled]),
|
||||
#{
|
||||
default => disabled,
|
||||
required => true,
|
||||
desc => ?DESC(vm_system_info_collector)
|
||||
}
|
||||
)},
|
||||
%% Collects information about memory dynamically allocated by the Erlang VM using erlang:memory/0,
|
||||
%% it also provides basic (D)ETS statistics.
|
||||
{vm_memory,
|
||||
?HOCON(
|
||||
hoconsc:enum([enabled, disabled]),
|
||||
#{
|
||||
default => disabled,
|
||||
required => true,
|
||||
desc => ?DESC(vm_memory_collector)
|
||||
}
|
||||
)},
|
||||
%% Collects microstate accounting metrics using erlang:statistics(microstate_accounting).
|
||||
{vm_msacc,
|
||||
?HOCON(
|
||||
hoconsc:enum([enabled, disabled]),
|
||||
#{
|
||||
default => disabled,
|
||||
required => true,
|
||||
desc => ?DESC(vm_msacc_collector)
|
||||
}
|
||||
)}
|
||||
];
|
||||
fields(legacy_deprecated_setting) ->
|
||||
[
|
||||
{push_gateway_server,
|
||||
?HOCON(
|
||||
string(),
|
||||
#{
|
||||
default => <<"http://127.0.0.1:9091">>,
|
||||
required => true,
|
||||
validator => fun ?MODULE:validate_url/1,
|
||||
desc => ?DESC(legacy_push_gateway_server)
|
||||
}
|
||||
)},
|
||||
{interval,
|
||||
?HOCON(
|
||||
emqx_schema:timeout_duration_ms(),
|
||||
#{
|
||||
default => <<"15s">>,
|
||||
required => true,
|
||||
desc => ?DESC(legacy_interval)
|
||||
}
|
||||
)},
|
||||
{headers,
|
||||
?HOCON(
|
||||
typerefl:alias("map", list({string(), string()}), #{}, [string(), string()]),
|
||||
#{
|
||||
default => #{},
|
||||
required => false,
|
||||
converter => fun ?MODULE:convert_headers/2,
|
||||
desc => ?DESC(legacy_headers)
|
||||
}
|
||||
)},
|
||||
{job_name,
|
||||
?HOCON(
|
||||
binary(),
|
||||
#{
|
||||
default => <<"${name}/instance/${name}~${host}">>,
|
||||
required => true,
|
||||
desc => ?DESC(legacy_job_name)
|
||||
}
|
||||
)},
|
||||
|
||||
{enable,
|
||||
?HOCON(
|
||||
boolean(),
|
||||
#{
|
||||
default => false,
|
||||
required => true,
|
||||
desc => ?DESC(enable)
|
||||
desc => ?DESC(legacy_enable)
|
||||
}
|
||||
)},
|
||||
{vm_dist_collector,
|
||||
|
@ -91,7 +239,7 @@ fields("prometheus") ->
|
|||
default => disabled,
|
||||
required => true,
|
||||
importance => ?IMPORTANCE_LOW,
|
||||
desc => ?DESC(vm_dist_collector)
|
||||
desc => ?DESC(legacy_vm_dist_collector)
|
||||
}
|
||||
)},
|
||||
%% Mnesia metrics mainly using mnesia:system_info/1
|
||||
|
@ -102,7 +250,7 @@ fields("prometheus") ->
|
|||
default => disabled,
|
||||
required => true,
|
||||
importance => ?IMPORTANCE_LOW,
|
||||
desc => ?DESC(mnesia_collector)
|
||||
desc => ?DESC(legacy_mnesia_collector)
|
||||
}
|
||||
)},
|
||||
%% Collects Erlang VM metrics using erlang:statistics/1.
|
||||
|
@ -113,7 +261,7 @@ fields("prometheus") ->
|
|||
default => disabled,
|
||||
required => true,
|
||||
importance => ?IMPORTANCE_LOW,
|
||||
desc => ?DESC(vm_statistics_collector)
|
||||
desc => ?DESC(legacy_vm_statistics_collector)
|
||||
}
|
||||
)},
|
||||
%% Collects Erlang VM metrics using erlang:system_info/1.
|
||||
|
@ -124,7 +272,7 @@ fields("prometheus") ->
|
|||
default => disabled,
|
||||
required => true,
|
||||
importance => ?IMPORTANCE_LOW,
|
||||
desc => ?DESC(vm_system_info_collector)
|
||||
desc => ?DESC(legacy_vm_system_info_collector)
|
||||
}
|
||||
)},
|
||||
%% Collects information about memory dynamically allocated by the Erlang VM using erlang:memory/0,
|
||||
|
@ -136,7 +284,7 @@ fields("prometheus") ->
|
|||
default => disabled,
|
||||
required => true,
|
||||
importance => ?IMPORTANCE_LOW,
|
||||
desc => ?DESC(vm_memory_collector)
|
||||
desc => ?DESC(legacy_vm_memory_collector)
|
||||
}
|
||||
)},
|
||||
%% Collects microstate accounting metrics using erlang:statistics(microstate_accounting).
|
||||
|
@ -147,14 +295,48 @@ fields("prometheus") ->
|
|||
default => disabled,
|
||||
required => true,
|
||||
importance => ?IMPORTANCE_LOW,
|
||||
desc => ?DESC(vm_msacc_collector)
|
||||
desc => ?DESC(legacy_vm_msacc_collector)
|
||||
}
|
||||
)}
|
||||
].
|
||||
|
||||
desc("prometheus") -> ?DESC(prometheus);
|
||||
setting_union_schema() ->
|
||||
RecommendSetting = ?R_REF(recommend_setting),
|
||||
LegacySetting = ?R_REF(legacy_deprecated_setting),
|
||||
fun
|
||||
(all_union_members) ->
|
||||
[RecommendSetting, LegacySetting];
|
||||
({value, Setting}) ->
|
||||
case is_recommend_type(Setting) of
|
||||
true -> [RecommendSetting];
|
||||
false -> [LegacySetting]
|
||||
end
|
||||
end.
|
||||
|
||||
%% For it to be considered as new schema,
|
||||
%% all keys must be included in the new configuration.
|
||||
is_recommend_type(Setting) ->
|
||||
case maps:keys(Setting) of
|
||||
[] ->
|
||||
true;
|
||||
Keys ->
|
||||
NewKeys = fields(recommend_setting),
|
||||
Fun = fun(Key0) ->
|
||||
Key = binary_to_existing_atom(Key0),
|
||||
lists:keymember(Key, 1, NewKeys)
|
||||
end,
|
||||
lists:all(Fun, Keys)
|
||||
end.
|
||||
|
||||
desc(prometheus) -> ?DESC(prometheus);
|
||||
desc(collectors) -> ?DESC(collectors);
|
||||
desc(legacy_deprecated_setting) -> ?DESC(legacy_deprecated_setting);
|
||||
desc(recommend_setting) -> ?DESC(recommend_setting);
|
||||
desc(push_gateway) -> ?DESC(push_gateway);
|
||||
desc(_) -> undefined.
|
||||
|
||||
convert_headers(undefined, _) ->
|
||||
undefined;
|
||||
convert_headers(Headers, #{make_serializable := true}) ->
|
||||
Headers;
|
||||
convert_headers(<<>>, _Opts) ->
|
||||
|
@ -170,10 +352,17 @@ convert_headers(Headers, _Opts) when is_map(Headers) ->
|
|||
convert_headers(Headers, _Opts) when is_list(Headers) ->
|
||||
Headers.
|
||||
|
||||
validate_push_gateway_server(Url) ->
|
||||
validate_url(Url) ->
|
||||
case uri_string:parse(Url) of
|
||||
#{scheme := S} when S =:= "https" orelse S =:= "http" -> ok;
|
||||
_ -> {error, "Invalid url"}
|
||||
#{scheme := S} when
|
||||
S =:= "https";
|
||||
S =:= "http";
|
||||
S =:= <<"https">>;
|
||||
S =:= <<"http">>
|
||||
->
|
||||
ok;
|
||||
_ ->
|
||||
{error, "Invalid url"}
|
||||
end.
|
||||
|
||||
%% for CI test, CI don't load the whole emqx_conf_schema.
|
||||
|
|
|
@ -21,6 +21,8 @@
|
|||
-export([
|
||||
start_link/0,
|
||||
start_child/1,
|
||||
start_child/2,
|
||||
update_child/2,
|
||||
stop_child/1
|
||||
]).
|
||||
|
||||
|
@ -39,11 +41,18 @@
|
|||
start_link() ->
|
||||
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
|
||||
|
||||
-spec start_child(supervisor:child_spec() | atom()) -> ok.
|
||||
start_child(ChildSpec) when is_map(ChildSpec) ->
|
||||
assert_started(supervisor:start_child(?MODULE, ChildSpec));
|
||||
-spec start_child(atom()) -> ok.
|
||||
start_child(Mod) when is_atom(Mod) ->
|
||||
assert_started(supervisor:start_child(?MODULE, ?CHILD(Mod, []))).
|
||||
start_child(Mod, emqx_prometheus_config:conf()).
|
||||
|
||||
-spec start_child(atom(), map()) -> ok.
|
||||
start_child(Mod, Conf) when is_atom(Mod) ->
|
||||
assert_started(supervisor:start_child(?MODULE, ?CHILD(Mod, Conf))).
|
||||
|
||||
-spec update_child(pid() | atom(), map()) -> ok.
|
||||
update_child(Pid, Conf) ->
|
||||
erlang:send(Pid, {update, Conf}),
|
||||
ok.
|
||||
|
||||
-spec stop_child(any()) -> ok | {error, term()}.
|
||||
stop_child(ChildId) ->
|
||||
|
@ -54,10 +63,11 @@ stop_child(ChildId) ->
|
|||
end.
|
||||
|
||||
init([]) ->
|
||||
Conf = emqx_prometheus_config:conf(),
|
||||
Children =
|
||||
case emqx_conf:get([prometheus, enable], false) of
|
||||
case emqx_prometheus_config:is_push_gateway_server_enabled(Conf) of
|
||||
false -> [];
|
||||
true -> [?CHILD(emqx_prometheus, [])]
|
||||
true -> [?CHILD(emqx_prometheus, Conf)]
|
||||
end,
|
||||
{ok, {{one_for_one, 10, 3600}, Children}}.
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
-compile(export_all).
|
||||
|
||||
-define(CLUSTER_RPC_SHARD, emqx_cluster_rpc_shard).
|
||||
-define(CONF_DEFAULT, <<
|
||||
-define(LEGACY_CONF_DEFAULT, <<
|
||||
"\n"
|
||||
"prometheus {\n"
|
||||
" push_gateway_server = \"http://127.0.0.1:9091\"\n"
|
||||
|
@ -38,45 +38,121 @@
|
|||
" vm_msacc_collector = disabled\n"
|
||||
"}\n"
|
||||
>>).
|
||||
-define(CONF_DEFAULT, #{
|
||||
<<"prometheus">> =>
|
||||
#{
|
||||
<<"enable_basic_auth">> => false,
|
||||
<<"collectors">> =>
|
||||
#{
|
||||
<<"mnesia">> => <<"disabled">>,
|
||||
<<"vm_dist">> => <<"disabled">>,
|
||||
<<"vm_memory">> => <<"disabled">>,
|
||||
<<"vm_msacc">> => <<"disabled">>,
|
||||
<<"vm_statistics">> => <<"disabled">>,
|
||||
<<"vm_system_info">> => <<"disabled">>
|
||||
},
|
||||
<<"push_gateway">> =>
|
||||
#{
|
||||
<<"enable">> => true,
|
||||
<<"headers">> => #{<<"Authorization">> => <<"some-authz-tokens">>},
|
||||
<<"interval">> => <<"1s">>,
|
||||
<<"job_name">> => <<"${name}~${host}">>,
|
||||
<<"url">> => <<"http://127.0.0.1:9091">>
|
||||
}
|
||||
}
|
||||
}).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Setups
|
||||
%%--------------------------------------------------------------------
|
||||
all() ->
|
||||
[
|
||||
{group, new_config},
|
||||
{group, legacy_config}
|
||||
].
|
||||
|
||||
all() -> emqx_common_test_helpers:all(?MODULE).
|
||||
groups() ->
|
||||
[
|
||||
{new_config, [sequence], common_tests()},
|
||||
{legacy_config, [sequence], common_tests()}
|
||||
].
|
||||
|
||||
init_per_suite(Cfg) ->
|
||||
suite() ->
|
||||
[{timetrap, {seconds, 30}}].
|
||||
|
||||
common_tests() ->
|
||||
emqx_common_test_helpers:all(?MODULE).
|
||||
|
||||
init_per_group(new_config, Config) ->
|
||||
init_group(),
|
||||
load_config(),
|
||||
emqx_common_test_helpers:start_apps([emqx_prometheus]),
|
||||
%% coverage olp metrics
|
||||
{ok, _} = emqx:update_config([overload_protection, enable], true),
|
||||
Config;
|
||||
init_per_group(legacy_config, Config) ->
|
||||
init_group(),
|
||||
load_legacy_config(),
|
||||
emqx_common_test_helpers:start_apps([emqx_prometheus]),
|
||||
{ok, _} = emqx:update_config([overload_protection, enable], false),
|
||||
Config.
|
||||
|
||||
init_group() ->
|
||||
application:load(emqx_conf),
|
||||
ok = ekka:start(),
|
||||
ok = mria_rlog:wait_for_shards([?CLUSTER_RPC_SHARD], infinity),
|
||||
meck:new(emqx_alarm, [non_strict, passthrough, no_link]),
|
||||
meck:expect(emqx_alarm, activate, 3, ok),
|
||||
meck:expect(emqx_alarm, deactivate, 3, ok),
|
||||
meck:expect(emqx_alarm, deactivate, 3, ok).
|
||||
|
||||
load_config(),
|
||||
emqx_common_test_helpers:start_apps([emqx_prometheus]),
|
||||
Cfg.
|
||||
|
||||
end_per_suite(_Cfg) ->
|
||||
end_group() ->
|
||||
ekka:stop(),
|
||||
mria:stop(),
|
||||
mria_mnesia:delete_schema(),
|
||||
meck:unload(emqx_alarm),
|
||||
|
||||
emqx_common_test_helpers:stop_apps([emqx_prometheus]).
|
||||
|
||||
end_per_group(_Group, Config) ->
|
||||
end_group(),
|
||||
Config.
|
||||
|
||||
init_per_testcase(t_assert_push, Config) ->
|
||||
meck:new(httpc, [passthrough]),
|
||||
Config;
|
||||
init_per_testcase(t_push_gateway, Config) ->
|
||||
start_mock_pushgateway(9091),
|
||||
Config;
|
||||
init_per_testcase(_Testcase, Config) ->
|
||||
Config.
|
||||
|
||||
end_per_testcase(t_push_gateway, Config) ->
|
||||
stop_mock_pushgateway(),
|
||||
Config;
|
||||
end_per_testcase(t_assert_push, _Config) ->
|
||||
meck:unload(httpc),
|
||||
ok;
|
||||
end_per_testcase(_Testcase, _Config) ->
|
||||
ok.
|
||||
|
||||
load_config() ->
|
||||
ok = emqx_common_test_helpers:load_config(emqx_prometheus_schema, ?CONF_DEFAULT).
|
||||
|
||||
load_legacy_config() ->
|
||||
ok = emqx_common_test_helpers:load_config(emqx_prometheus_schema, ?LEGACY_CONF_DEFAULT).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Test cases
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
t_start_stop(_) ->
|
||||
App = emqx_prometheus,
|
||||
?assertMatch(ok, emqx_prometheus_sup:start_child(App)),
|
||||
Conf = emqx_prometheus_config:conf(),
|
||||
?assertMatch(ok, emqx_prometheus_sup:start_child(App, Conf)),
|
||||
%% start twice return ok.
|
||||
?assertMatch(ok, emqx_prometheus_sup:start_child(App)),
|
||||
?assertMatch(ok, emqx_prometheus_sup:start_child(App, Conf)),
|
||||
ok = gen_server:call(emqx_prometheus, dump, 1000),
|
||||
ok = gen_server:cast(emqx_prometheus, dump),
|
||||
dump = erlang:send(emqx_prometheus, dump),
|
||||
?assertMatch(ok, emqx_prometheus_sup:stop_child(App)),
|
||||
%% stop twice return ok.
|
||||
?assertMatch(ok, emqx_prometheus_sup:stop_child(App)),
|
||||
|
@ -88,7 +164,6 @@ t_collector_no_crash_test(_) ->
|
|||
ok.
|
||||
|
||||
t_assert_push(_) ->
|
||||
meck:new(httpc, [passthrough]),
|
||||
Self = self(),
|
||||
AssertPush = fun(Method, Req = {Url, Headers, ContentType, _Data}, HttpOpts, Opts) ->
|
||||
?assertEqual(post, Method),
|
||||
|
@ -99,13 +174,51 @@ t_assert_push(_) ->
|
|||
meck:passthrough([Method, Req, HttpOpts, Opts])
|
||||
end,
|
||||
meck:expect(httpc, request, AssertPush),
|
||||
?assertMatch(ok, emqx_prometheus_sup:start_child(emqx_prometheus)),
|
||||
Conf = emqx_prometheus_config:conf(),
|
||||
?assertMatch(ok, emqx_prometheus_sup:start_child(emqx_prometheus, Conf)),
|
||||
receive
|
||||
pass -> ok
|
||||
after 2000 ->
|
||||
ct:fail(assert_push_request_failed)
|
||||
end.
|
||||
|
||||
t_only_for_coverage(_) ->
|
||||
?assertEqual("5.0.0", emqx_prometheus_proto_v1:introduced_in()),
|
||||
t_push_gateway(_) ->
|
||||
Conf = emqx_prometheus_config:conf(),
|
||||
?assertMatch(ok, emqx_prometheus_sup:stop_child(emqx_prometheus)),
|
||||
?assertMatch(ok, emqx_prometheus_sup:start_child(emqx_prometheus, Conf)),
|
||||
?assertMatch(#{ok := 0, failed := 0}, emqx_prometheus:info()),
|
||||
timer:sleep(1100),
|
||||
?assertMatch(#{ok := 1, failed := 0}, emqx_prometheus:info()),
|
||||
ok = emqx_prometheus_sup:update_child(emqx_prometheus, Conf),
|
||||
?assertMatch(#{ok := 0, failed := 0}, emqx_prometheus:info()),
|
||||
|
||||
ok.
|
||||
|
||||
start_mock_pushgateway(Port) ->
|
||||
application:ensure_all_started(cowboy),
|
||||
Dispatch = cowboy_router:compile([{'_', [{'_', ?MODULE, []}]}]),
|
||||
{ok, _} = cowboy:start_clear(
|
||||
mock_pushgateway_listener,
|
||||
[{port, Port}],
|
||||
#{env => #{dispatch => Dispatch}}
|
||||
).
|
||||
|
||||
stop_mock_pushgateway() ->
|
||||
cowboy:stop_listener(mock_pushgateway_listener).
|
||||
|
||||
init(Req0, Opts) ->
|
||||
Method = cowboy_req:method(Req0),
|
||||
Headers = cowboy_req:headers(Req0),
|
||||
?assertEqual(<<"POST">>, Method),
|
||||
?assertMatch(
|
||||
#{
|
||||
<<"authorization">> := <<"some-authz-tokens">>,
|
||||
<<"content-length">> := _,
|
||||
<<"content-type">> := <<"text/plain">>,
|
||||
<<"host">> := <<"127.0.0.1:9091">>
|
||||
},
|
||||
Headers
|
||||
),
|
||||
RespHeader = #{<<"content-type">> => <<"text/plain; charset=utf-8">>},
|
||||
Req = cowboy_req:reply(200, RespHeader, <<"OK">>, Req0),
|
||||
{ok, Req, Opts}.
|
||||
|
|
|
@ -28,40 +28,59 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Setups
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
all() ->
|
||||
emqx_common_test_helpers:all(?MODULE).
|
||||
[
|
||||
{group, new_config},
|
||||
{group, legacy_config}
|
||||
].
|
||||
|
||||
groups() ->
|
||||
[
|
||||
{new_config, [sequence], [t_stats_auth_api, t_stats_no_auth_api, t_prometheus_api]},
|
||||
{legacy_config, [sequence], [t_stats_no_auth_api, t_legacy_prometheus_api]}
|
||||
].
|
||||
|
||||
init_per_suite(Config) ->
|
||||
application:load(emqx_conf),
|
||||
ok = ekka:start(),
|
||||
ok = mria_rlog:wait_for_shards([?CLUSTER_RPC_SHARD], infinity),
|
||||
|
||||
meck:new(mria_rlog, [non_strict, passthrough, no_link]),
|
||||
|
||||
emqx_prometheus_SUITE:load_config(),
|
||||
emqx_mgmt_api_test_util:init_suite([emqx_prometheus]),
|
||||
|
||||
emqx_prometheus_SUITE:init_group(),
|
||||
emqx_mgmt_api_test_util:init_suite([emqx_conf]),
|
||||
Config.
|
||||
|
||||
end_per_suite(Config) ->
|
||||
ekka:stop(),
|
||||
mria:stop(),
|
||||
mria_mnesia:delete_schema(),
|
||||
|
||||
meck:unload(mria_rlog),
|
||||
|
||||
emqx_mgmt_api_test_util:end_suite([emqx_prometheus]),
|
||||
emqx_prometheus_SUITE:end_group(),
|
||||
emqx_mgmt_api_test_util:end_suite([emqx_conf]),
|
||||
Config.
|
||||
|
||||
init_per_testcase(_, Config) ->
|
||||
{ok, _} = emqx_cluster_rpc:start_link(),
|
||||
init_per_group(new_config, Config) ->
|
||||
emqx_common_test_helpers:start_apps(
|
||||
[emqx_prometheus],
|
||||
fun(App) -> set_special_configs(App, new_config) end
|
||||
),
|
||||
Config;
|
||||
init_per_group(legacy_config, Config) ->
|
||||
emqx_common_test_helpers:start_apps(
|
||||
[emqx_prometheus],
|
||||
fun(App) -> set_special_configs(App, legacy_config) end
|
||||
),
|
||||
Config.
|
||||
|
||||
end_per_group(_Group, Config) ->
|
||||
_ = application:stop(emqx_prometheus),
|
||||
Config.
|
||||
|
||||
set_special_configs(emqx_dashboard, _) ->
|
||||
emqx_dashboard_api_test_helpers:set_default_config();
|
||||
set_special_configs(emqx_prometheus, new_config) ->
|
||||
emqx_prometheus_SUITE:load_config(),
|
||||
ok;
|
||||
set_special_configs(emqx_prometheus, legacy_config) ->
|
||||
emqx_prometheus_SUITE:load_legacy_config(),
|
||||
ok;
|
||||
set_special_configs(_App, _) ->
|
||||
ok.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Cases
|
||||
%%--------------------------------------------------------------------
|
||||
t_prometheus_api(_) ->
|
||||
t_legacy_prometheus_api(_) ->
|
||||
Path = emqx_mgmt_api_test_util:api_path(["prometheus"]),
|
||||
Auth = emqx_mgmt_api_test_util:auth_header_(),
|
||||
{ok, Response} = emqx_mgmt_api_test_util:request_api(get, Path, "", Auth),
|
||||
|
@ -145,21 +164,133 @@ t_prometheus_api(_) ->
|
|||
),
|
||||
ok.
|
||||
|
||||
t_stats_api(_) ->
|
||||
Path = emqx_mgmt_api_test_util:api_path(["prometheus", "stats"]),
|
||||
t_prometheus_api(_) ->
|
||||
Path = emqx_mgmt_api_test_util:api_path(["prometheus"]),
|
||||
Auth = emqx_mgmt_api_test_util:auth_header_(),
|
||||
Headers = [{"accept", "application/json"}, Auth],
|
||||
{ok, Response} = emqx_mgmt_api_test_util:request_api(get, Path, "", Headers),
|
||||
{ok, Response} = emqx_mgmt_api_test_util:request_api(get, Path, "", Auth),
|
||||
|
||||
Conf = emqx_utils_json:decode(Response, [return_maps]),
|
||||
?assertMatch(
|
||||
#{
|
||||
<<"push_gateway">> := #{},
|
||||
<<"collectors">> := _,
|
||||
<<"enable_basic_auth">> := _
|
||||
},
|
||||
Conf
|
||||
),
|
||||
#{
|
||||
<<"push_gateway">> :=
|
||||
#{<<"url">> := Url, <<"enable">> := Enable} = PushGateway,
|
||||
<<"collectors">> := Collector
|
||||
} = Conf,
|
||||
Pid = erlang:whereis(emqx_prometheus),
|
||||
?assertEqual(Enable, undefined =/= Pid, {Url, Pid}),
|
||||
|
||||
NewConf = Conf#{
|
||||
<<"push_gateway">> => PushGateway#{
|
||||
<<"interval">> => <<"2s">>,
|
||||
<<"headers">> => #{
|
||||
<<"test-str1">> => <<"test-value">>,
|
||||
<<"test-str2">> => <<"42">>
|
||||
}
|
||||
},
|
||||
<<"collectors">> => Collector#{
|
||||
<<"vm_dist">> => <<"enabled">>,
|
||||
<<"vm_system_info">> => <<"enabled">>,
|
||||
<<"vm_memory">> => <<"enabled">>,
|
||||
<<"vm_msacc">> => <<"enabled">>,
|
||||
<<"mnesia">> => <<"enabled">>,
|
||||
<<"vm_statistics">> => <<"enabled">>
|
||||
}
|
||||
},
|
||||
{ok, Response2} = emqx_mgmt_api_test_util:request_api(put, Path, "", Auth, NewConf),
|
||||
|
||||
Conf2 = emqx_utils_json:decode(Response2, [return_maps]),
|
||||
?assertMatch(NewConf, Conf2),
|
||||
|
||||
EnvCollectors = application:get_env(prometheus, collectors, []),
|
||||
PromCollectors = prometheus_registry:collectors(default),
|
||||
?assertEqual(lists:sort(EnvCollectors), lists:sort(PromCollectors)),
|
||||
?assert(lists:member(prometheus_vm_statistics_collector, EnvCollectors), EnvCollectors),
|
||||
|
||||
lists:foreach(
|
||||
fun({C, Enabled}) ->
|
||||
?assertEqual(Enabled, lists:member(C, EnvCollectors), EnvCollectors)
|
||||
end,
|
||||
[
|
||||
{prometheus_vm_dist_collector, true},
|
||||
{prometheus_vm_system_info_collector, true},
|
||||
{prometheus_vm_memory_collector, true},
|
||||
{prometheus_mnesia_collector, true},
|
||||
{prometheus_vm_msacc_collector, true},
|
||||
{prometheus_vm_statistics_collector, true}
|
||||
]
|
||||
),
|
||||
|
||||
?assertMatch(
|
||||
#{
|
||||
<<"push_gateway">> := #{
|
||||
<<"headers">> := #{
|
||||
<<"test-str1">> := <<"test-value">>,
|
||||
<<"test-str2">> := <<"42">>
|
||||
}
|
||||
}
|
||||
},
|
||||
emqx_config:get_raw([prometheus])
|
||||
),
|
||||
?assertMatch(
|
||||
#{
|
||||
push_gateway := #{
|
||||
headers := [
|
||||
{"test-str2", "42"},
|
||||
{"test-str1", "test-value"}
|
||||
]
|
||||
}
|
||||
},
|
||||
emqx_config:get([prometheus])
|
||||
),
|
||||
|
||||
NewConf1 = Conf#{<<"push_gateway">> => PushGateway#{<<"enable">> => false}},
|
||||
{ok, _Response3} = emqx_mgmt_api_test_util:request_api(put, Path, "", Auth, NewConf1),
|
||||
?assertEqual(undefined, erlang:whereis(emqx_prometheus)),
|
||||
|
||||
ConfWithoutScheme = Conf#{
|
||||
<<"push_gateway">> => PushGateway#{<<"url">> => <<"127.0.0.1:8081">>}
|
||||
},
|
||||
?assertMatch(
|
||||
{error, {"HTTP/1.1", 400, _}},
|
||||
emqx_mgmt_api_test_util:request_api(put, Path, "", Auth, ConfWithoutScheme)
|
||||
),
|
||||
ok.
|
||||
|
||||
t_stats_no_auth_api(_) ->
|
||||
%% undefined is legacy prometheus
|
||||
case emqx:get_config([prometheus, enable_basic_auth], undefined) of
|
||||
true ->
|
||||
{ok, _} = emqx:update_config([prometheus, enable_basic_auth], false),
|
||||
emqx_dashboard_listener:regenerate_minirest_dispatch();
|
||||
_ ->
|
||||
ok
|
||||
end,
|
||||
emqx_dashboard_listener:regenerate_minirest_dispatch(),
|
||||
Json = [{"accept", "application/json"}],
|
||||
request_stats(Json, []).
|
||||
|
||||
t_stats_auth_api(_) ->
|
||||
{ok, _} = emqx:update_config([prometheus, enable_basic_auth], true),
|
||||
Auth = emqx_mgmt_api_test_util:auth_header_(),
|
||||
JsonAuth = [{"accept", "application/json"}, Auth],
|
||||
request_stats(JsonAuth, Auth),
|
||||
ok.
|
||||
|
||||
request_stats(JsonAuth, Auth) ->
|
||||
Path = emqx_mgmt_api_test_util:api_path(["prometheus", "stats"]),
|
||||
{ok, Response} = emqx_mgmt_api_test_util:request_api(get, Path, "", JsonAuth),
|
||||
Data = emqx_utils_json:decode(Response, [return_maps]),
|
||||
?assertMatch(#{<<"client">> := _, <<"delivery">> := _}, Data),
|
||||
|
||||
{ok, _} = emqx_mgmt_api_test_util:request_api(get, Path, "", Auth),
|
||||
|
||||
ok = meck:expect(mria_rlog, backend, fun() -> rlog end),
|
||||
{ok, _} = emqx_mgmt_api_test_util:request_api(get, Path, "", Auth),
|
||||
|
||||
ok.
|
||||
{ok, _} = emqx_mgmt_api_test_util:request_api(get, Path, "", Auth).
|
||||
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%%% Internal Functions
|
||||
|
|
|
@ -0,0 +1,146 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_utils_stream).
|
||||
|
||||
%% Constructors / Combinators
|
||||
-export([
|
||||
empty/0,
|
||||
list/1,
|
||||
map/2,
|
||||
chain/2
|
||||
]).
|
||||
|
||||
%% Evaluating
|
||||
-export([
|
||||
next/1,
|
||||
consume/1,
|
||||
consume/2
|
||||
]).
|
||||
|
||||
%% Streams from ETS tables
|
||||
-export([
|
||||
ets/1
|
||||
]).
|
||||
|
||||
-export_type([stream/1]).
|
||||
|
||||
%% @doc A stream is essentially a lazy list.
|
||||
-type stream(T) :: fun(() -> next(T) | []).
|
||||
-type next(T) :: nonempty_improper_list(T, stream(T)).
|
||||
|
||||
-dialyzer(no_improper_lists).
|
||||
|
||||
%%
|
||||
|
||||
%% @doc Make a stream that produces no values.
|
||||
-spec empty() -> stream(none()).
|
||||
empty() ->
|
||||
fun() -> [] end.
|
||||
|
||||
%% @doc Make a stream out of the given list.
|
||||
%% Essentially it's an opposite of `consume/1`, i.e. `L = consume(list(L))`.
|
||||
-spec list([T]) -> stream(T).
|
||||
list([]) ->
|
||||
empty();
|
||||
list([X | Rest]) ->
|
||||
fun() -> [X | list(Rest)] end.
|
||||
|
||||
%% @doc Make a stream by applying a function to each element of the underlying stream.
|
||||
-spec map(fun((X) -> Y), stream(X)) -> stream(Y).
|
||||
map(F, S) ->
|
||||
fun() ->
|
||||
case next(S) of
|
||||
[X | Rest] ->
|
||||
[F(X) | map(F, Rest)];
|
||||
[] ->
|
||||
[]
|
||||
end
|
||||
end.
|
||||
|
||||
%% @doc Make a stream by chaining (concatenating) two streams.
|
||||
%% The second stream begins to produce values only after the first one is exhausted.
|
||||
-spec chain(stream(X), stream(Y)) -> stream(X | Y).
|
||||
chain(SFirst, SThen) ->
|
||||
fun() ->
|
||||
case next(SFirst) of
|
||||
[X | SRest] ->
|
||||
[X | chain(SRest, SThen)];
|
||||
[] ->
|
||||
next(SThen)
|
||||
end
|
||||
end.
|
||||
|
||||
%%
|
||||
|
||||
%% @doc Produce the next value from the stream.
|
||||
-spec next(stream(T)) -> next(T) | [].
|
||||
next(S) ->
|
||||
S().
|
||||
|
||||
%% @doc Consume the stream and return a list of all produced values.
|
||||
-spec consume(stream(T)) -> [T].
|
||||
consume(S) ->
|
||||
case next(S) of
|
||||
[X | SRest] ->
|
||||
[X | consume(SRest)];
|
||||
[] ->
|
||||
[]
|
||||
end.
|
||||
|
||||
%% @doc Consume N values from the stream and return a list of them and the rest of the stream.
|
||||
%% If the stream is exhausted before N values are produced, return just a list of these values.
|
||||
-spec consume(non_neg_integer(), stream(T)) -> {[T], stream(T)} | [T].
|
||||
consume(N, S) ->
|
||||
consume(N, S, []).
|
||||
|
||||
consume(0, S, Acc) ->
|
||||
{lists:reverse(Acc), S};
|
||||
consume(N, S, Acc) ->
|
||||
case next(S) of
|
||||
[X | SRest] ->
|
||||
consume(N - 1, SRest, [X | Acc]);
|
||||
[] ->
|
||||
lists:reverse(Acc)
|
||||
end.
|
||||
|
||||
%%
|
||||
|
||||
-type select_result(Record, Cont) ::
|
||||
{[Record], Cont}
|
||||
| {[Record], '$end_of_table'}
|
||||
| '$end_of_table'.
|
||||
|
||||
%% @doc Make a stream out of an ETS table, where the ETS table is scanned through in chunks,
|
||||
%% with the given continuation function. The function is assumed to return a result of a call to:
|
||||
%% * `ets:select/1` / `ets:select/3`
|
||||
%% * `ets:match/1` / `ets:match/3`
|
||||
%% * `ets:match_object/1` / `ets:match_object/3`
|
||||
-spec ets(fun((Cont) -> select_result(Record, Cont))) -> stream(Record).
|
||||
ets(ContF) ->
|
||||
ets(undefined, ContF).
|
||||
|
||||
ets(Cont, ContF) ->
|
||||
fun() ->
|
||||
case ContF(Cont) of
|
||||
{Records, '$end_of_table'} ->
|
||||
next(list(Records));
|
||||
{Records, NCont} ->
|
||||
next(chain(list(Records), ets(NCont, ContF)));
|
||||
'$end_of_table' ->
|
||||
[]
|
||||
end
|
||||
end.
|
|
@ -0,0 +1,75 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_utils_stream_tests).
|
||||
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
empty_test() ->
|
||||
S = emqx_utils_stream:empty(),
|
||||
?assertEqual([], emqx_utils_stream:next(S)).
|
||||
|
||||
empty_consume_test() ->
|
||||
S = emqx_utils_stream:empty(),
|
||||
?assertEqual([], emqx_utils_stream:consume(S)).
|
||||
|
||||
chain_empties_test() ->
|
||||
S = emqx_utils_stream:chain(
|
||||
emqx_utils_stream:empty(),
|
||||
emqx_utils_stream:empty()
|
||||
),
|
||||
?assertEqual([], emqx_utils_stream:next(S)).
|
||||
|
||||
chain_list_test() ->
|
||||
S = emqx_utils_stream:chain(
|
||||
emqx_utils_stream:list([1, 2, 3]),
|
||||
emqx_utils_stream:list([4, 5, 6])
|
||||
),
|
||||
?assertEqual(
|
||||
[1, 2, 3, 4, 5, 6],
|
||||
emqx_utils_stream:consume(S)
|
||||
).
|
||||
|
||||
chain_take_test() ->
|
||||
S = emqx_utils_stream:chain(
|
||||
emqx_utils_stream:list([1, 2, 3]),
|
||||
emqx_utils_stream:list([4, 5, 6, 7, 8])
|
||||
),
|
||||
?assertMatch(
|
||||
{[1, 2, 3, 4, 5], _SRest},
|
||||
emqx_utils_stream:consume(5, S)
|
||||
),
|
||||
{_, SRest} = emqx_utils_stream:consume(5, S),
|
||||
?assertEqual(
|
||||
[6, 7, 8],
|
||||
emqx_utils_stream:consume(5, SRest)
|
||||
).
|
||||
|
||||
chain_list_map_test() ->
|
||||
S = emqx_utils_stream:map(
|
||||
fun integer_to_list/1,
|
||||
emqx_utils_stream:chain(
|
||||
emqx_utils_stream:list([1, 2, 3]),
|
||||
emqx_utils_stream:chain(
|
||||
emqx_utils_stream:empty(),
|
||||
emqx_utils_stream:list([4, 5, 6])
|
||||
)
|
||||
)
|
||||
),
|
||||
?assertEqual(
|
||||
["1", "2", "3", "4", "5", "6"],
|
||||
emqx_utils_stream:consume(S)
|
||||
).
|
|
@ -0,0 +1,4 @@
|
|||
Modified the Prometheus API and configuration to:
|
||||
- Restructure configuration sections to group related settings, improving readability and maintainability
|
||||
- Introduced `enable_basic_auth` configuration for basic authentication on the scrape API endpoint, enhancing security
|
||||
- Maintained backwards compatibility while refactoring code, avoiding breaking changes
|
|
@ -0,0 +1,2 @@
|
|||
Introduced Open Telemetry Logs Handler that allows to format log events according to Open Telemetry log data model and
|
||||
export them to the configured Open Telemetry collector or back-end.
|
|
@ -0,0 +1,5 @@
|
|||
Switch to the new `v2` routing store schema by default. New schema improves both subscription and routing performance, especially so for scenarios with concurrent subscriptions to topic filters sharing common wildcard prefixes, at the cost of slightly increased memory usage. This schema also eliminates the need for a separate index, thus inconsistencies in the routing state rarely encountered in previous versions should no longer be possible.
|
||||
|
||||
If a cluster is rolling upgraded from older version, the cluster will continue to use `v1` store until a full cluster (non-rolling) restart happens.
|
||||
|
||||
The former schema can still be forced by setting `broker.routing.storage_schema` configuration option to `v1` and conducting full non-rolling cluster restart as well.
|
|
@ -0,0 +1 @@
|
|||
Implemented HTTP API for configuration and user data import/export.
|
10
mix.exs
10
mix.exs
|
@ -102,31 +102,31 @@ defmodule EMQXUmbrella.MixProject do
|
|||
{:opentelemetry_api,
|
||||
github: "emqx/opentelemetry-erlang",
|
||||
sparse: "apps/opentelemetry_api",
|
||||
tag: "v1.3.2-emqx",
|
||||
tag: "v1.4.2-emqx",
|
||||
override: true,
|
||||
runtime: false},
|
||||
{:opentelemetry,
|
||||
github: "emqx/opentelemetry-erlang",
|
||||
sparse: "apps/opentelemetry",
|
||||
tag: "v1.3.2-emqx",
|
||||
tag: "v1.4.2-emqx",
|
||||
override: true,
|
||||
runtime: false},
|
||||
{:opentelemetry_api_experimental,
|
||||
github: "emqx/opentelemetry-erlang",
|
||||
sparse: "apps/opentelemetry_api_experimental",
|
||||
tag: "v1.3.2-emqx",
|
||||
tag: "v1.4.2-emqx",
|
||||
override: true,
|
||||
runtime: false},
|
||||
{:opentelemetry_experimental,
|
||||
github: "emqx/opentelemetry-erlang",
|
||||
sparse: "apps/opentelemetry_experimental",
|
||||
tag: "v1.3.2-emqx",
|
||||
tag: "v1.4.2-emqx",
|
||||
override: true,
|
||||
runtime: false},
|
||||
{:opentelemetry_exporter,
|
||||
github: "emqx/opentelemetry-erlang",
|
||||
sparse: "apps/opentelemetry_exporter",
|
||||
tag: "v1.3.2-emqx",
|
||||
tag: "v1.4.2-emqx",
|
||||
override: true,
|
||||
runtime: false}
|
||||
] ++
|
||||
|
|
10
rebar.config
10
rebar.config
|
@ -85,13 +85,13 @@
|
|||
, {jsone, {git, "https://github.com/emqx/jsone.git", {tag, "1.7.1"}}}
|
||||
, {uuid, {git, "https://github.com/okeuday/uuid.git", {tag, "v2.0.6"}}}
|
||||
%% trace
|
||||
, {opentelemetry_api, {git_subdir, "https://github.com/emqx/opentelemetry-erlang", {tag, "v1.3.2-emqx"}, "apps/opentelemetry_api"}}
|
||||
, {opentelemetry, {git_subdir, "https://github.com/emqx/opentelemetry-erlang", {tag, "v1.3.2-emqx"}, "apps/opentelemetry"}}
|
||||
, {opentelemetry_api, {git_subdir, "https://github.com/emqx/opentelemetry-erlang", {tag, "v1.4.2-emqx"}, "apps/opentelemetry_api"}}
|
||||
, {opentelemetry, {git_subdir, "https://github.com/emqx/opentelemetry-erlang", {tag, "v1.4.2-emqx"}, "apps/opentelemetry"}}
|
||||
%% log metrics
|
||||
, {opentelemetry_experimental, {git_subdir, "https://github.com/emqx/opentelemetry-erlang", {tag, "v1.3.2-emqx"}, "apps/opentelemetry_experimental"}}
|
||||
, {opentelemetry_api_experimental, {git_subdir, "https://github.com/emqx/opentelemetry-erlang", {tag, "v1.3.2-emqx"}, "apps/opentelemetry_api_experimental"}}
|
||||
, {opentelemetry_experimental, {git_subdir, "https://github.com/emqx/opentelemetry-erlang", {tag, "v1.4.2-emqx"}, "apps/opentelemetry_experimental"}}
|
||||
, {opentelemetry_api_experimental, {git_subdir, "https://github.com/emqx/opentelemetry-erlang", {tag, "v1.4.2-emqx"}, "apps/opentelemetry_api_experimental"}}
|
||||
%% export
|
||||
, {opentelemetry_exporter, {git_subdir, "https://github.com/emqx/opentelemetry-erlang", {tag, "v1.3.2-emqx"}, "apps/opentelemetry_exporter"}}
|
||||
, {opentelemetry_exporter, {git_subdir, "https://github.com/emqx/opentelemetry-erlang", {tag, "v1.4.2-emqx"}, "apps/opentelemetry_exporter"}}
|
||||
]}.
|
||||
|
||||
{xref_ignores,
|
||||
|
|
|
@ -1,10 +1,24 @@
|
|||
## Prometheus
|
||||
|
||||
## EMQX's Prometheus scraping endpoint is enabled by default without authentication.
|
||||
## And there is no way to turn it off.
|
||||
## You can enable basic authentication by setting enable_basic_auth to true.
|
||||
## You can inspect it with a curl command: curl -f "127.0.0.1:18083/api/v5/prometheus/stats"
|
||||
|
||||
prometheus {
|
||||
# turn off this expensive collector
|
||||
vm_dist_collector = disabled
|
||||
enable_basic_auth = false
|
||||
push_gateway {
|
||||
enable = false
|
||||
url = "http://127.0.0.1:9091"
|
||||
headers {Authorization = "Basic YWRtaW46Y2JraG55eWd5QDE="}
|
||||
interval = 15s
|
||||
job_name = "${name}/instance/${name}~${host}"
|
||||
}
|
||||
collectors {
|
||||
mnesia = disabled
|
||||
vm_dist = disabled
|
||||
vm_memory = disabled
|
||||
vm_msacc = disabled
|
||||
vm_statistics = disabled
|
||||
vm_system_info = enabled
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,9 +10,54 @@ connector_field.desc:
|
|||
connector_field.label:
|
||||
"""Connector"""
|
||||
|
||||
desc_name.desc:
|
||||
"""The name of the connector."""
|
||||
|
||||
desc_name.label:
|
||||
"""Connector Name"""
|
||||
|
||||
desc_type.desc:
|
||||
"""The type of the connector."""
|
||||
|
||||
desc_type.label:
|
||||
"""Connector Type"""
|
||||
|
||||
config_enable.desc:
|
||||
"""Enable (true) or disable (false) this connector."""
|
||||
config_enable.label:
|
||||
"""Enable or Disable"""
|
||||
|
||||
desc_node_name.desc:
|
||||
"""The node name."""
|
||||
|
||||
desc_node_name.label:
|
||||
"""Node Name"""
|
||||
|
||||
desc_node_status.desc:
|
||||
"""Node status."""
|
||||
|
||||
desc_node_status.label:
|
||||
"""Node Status"""
|
||||
|
||||
desc_status.desc:
|
||||
"""The status of the connector<br/>
|
||||
- <code>connecting</code>: the initial state before any health probes were made.<br/>
|
||||
- <code>connected</code>: when the connector passes the health probes.<br/>
|
||||
- <code>disconnected</code>: when the connector can not pass health probes.<br/>
|
||||
- <code>inconsistent</code>: When not all the nodes are at the same status."""
|
||||
|
||||
desc_status.label:
|
||||
"""Connector Status"""
|
||||
|
||||
desc_status_reason.desc:
|
||||
"""This is the reason given in case a connector is failing to connect."""
|
||||
|
||||
desc_status_reason.label:
|
||||
"""Failure reason"""
|
||||
|
||||
connector_actions.desc:
|
||||
"""List of actions added to this connector."""
|
||||
|
||||
connector_actions.label:
|
||||
"""Actions"""
|
||||
}
|
||||
|
|
|
@ -1,15 +1,44 @@
|
|||
emqx_otel_schema {
|
||||
|
||||
opentelemetry.desc: "Open Telemetry Toolkit configuration"
|
||||
opentelemetry.label: "Open Telemetry"
|
||||
|
||||
otel_logs.desc:
|
||||
"""Open Telemetry Logs configuration. If enabled, EMQX installs a log handler that formats events according to Open Telemetry log data model and
|
||||
exports them to the configured Open Telemetry collector or backend."""
|
||||
otel_logs.label: "Open Telemetry Logs"
|
||||
|
||||
otel_metrics.desc: "Open Telemetry Metrics configuration."
|
||||
otel_metrics.label: "Open Telemetry Metrics"
|
||||
|
||||
enable.desc: "Enable or disable Open Telemetry signal."
|
||||
enable.label: "Enable."
|
||||
|
||||
exporter.desc: "Open Telemetry Exporter"
|
||||
exporter.label: "Exporter"
|
||||
|
||||
enable.desc: "Enable or disable open telemetry metrics"
|
||||
max_queue_size.desc:
|
||||
"""The maximum queue size. After the size is reached Open Telemetry signals are dropped."""
|
||||
max_queue_size.label: "Max Queue Size"
|
||||
|
||||
protocol.desc: "Open Telemetry Exporter Protocol"
|
||||
exporting_timeout.desc: "The time Open Telemetry signal export can run before it is cancelled."
|
||||
exporting_timeout.label: "Exporting Timeout"
|
||||
|
||||
endpoint.desc: "Open Telemetry Exporter Endpoint"
|
||||
scheduled_delay.desc: "The delay interval between two consecutive exports of Open Telemetry signals."
|
||||
scheduled_delay.label: "Scheduled Delay Interval"
|
||||
|
||||
interval.desc: "The interval of sending metrics to Open Telemetry Endpoint"
|
||||
exporter_endpoint.desc:
|
||||
"""The target URL to which the exporter is going to send Open Telemetry signal data."""
|
||||
exporter_endpoint.label: "Exporter Endpoint"
|
||||
|
||||
exporter_protocol.desc: "The transport protocol of Open Telemetry Exporter"
|
||||
exporter_protocol.label: "Exporter Protocol"
|
||||
|
||||
exporter_ssl.desc: "SSL configuration for the Open Telemetry exporter"
|
||||
exporter_ssl.label: "SSL Options"
|
||||
|
||||
otel_log_handler_level.desc:
|
||||
"""The log level of the Open Telemetry log handler."""
|
||||
otel_log_handler_level.label: "Log Level"
|
||||
|
||||
}
|
||||
|
|
|
@ -11,8 +11,8 @@ update_prom_conf_info.label:
|
|||
"""Update Prometheus config"""
|
||||
|
||||
get_prom_data.desc:
|
||||
"""Get Prometheus Data"""
|
||||
"""Get Prometheus Metrics"""
|
||||
get_prom_data.label:
|
||||
"""Get Prometheus Data"""
|
||||
"""Prometheus Metrics"""
|
||||
|
||||
}
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
emqx_prometheus_schema {
|
||||
|
||||
enable.desc:
|
||||
"""Turn Prometheus data pushing on or off"""
|
||||
|
||||
headers.desc:
|
||||
"""An HTTP Headers when pushing to Push Gateway.<br/>
|
||||
For example, <code> { Authorization = "some-authz-tokens"}</code>"""
|
||||
|
@ -14,28 +11,46 @@ job_name.desc:
|
|||
"""Job Name that is pushed to the Push Gateway. Available variables:<br/>
|
||||
- ${name}: Name of EMQX node.<br/>
|
||||
- ${host}: Host name of EMQX node.<br/>
|
||||
For example, when the EMQX node name is <code>emqx@127.0.0.1</code> then the <code>name</code> variable takes value <code>emqx</code> and the <code>host</code> variable takes value <code>127.0.0.1</code>.<br/>
|
||||
For example, when the EMQX node name is <code>emqx@127.0.0.1</code> then the <code>name</code>
|
||||
variable takes value <code>emqx</code> and the <code>host</code> variable takes value <code>127.0.0.1</code>.
|
||||
Default value is: <code>${name}/instance/${name}~${host}</code>"""
|
||||
|
||||
mnesia_collector.desc:
|
||||
"""Enable or disable Mnesia metrics collector"""
|
||||
|
||||
prometheus.desc:
|
||||
"""EMQX's Prometheus scraping endpoint is enabled by default without authentication.
|
||||
You can inspect it with a `curl` command like this: `curl -f "127.0.0.1:18083/api/v5/prometheus/stats"`<br/>
|
||||
The 'enable' flag is used to turn on and off for the push-gateway integration."""
|
||||
You can inspect it with a `curl` command like this: `curl -f "127.0.0.1:18083/api/v5/prometheus/stats"`"""
|
||||
|
||||
prometheus.label:
|
||||
"""Prometheus"""
|
||||
|
||||
push_gateway_server.desc:
|
||||
"""URL of Prometheus server. Pushgateway is optional, should not be configured if prometheus is to scrape EMQX."""
|
||||
push_gateway.desc:
|
||||
"""Push Gateway is optional, should not be configured if prometheus is to scrape EMQX."""
|
||||
|
||||
enable_basic_auth.desc:
|
||||
"""Enable or disable basic authentication for prometheus scrape api, not for Push Gateway"""
|
||||
|
||||
collectors.desc:
|
||||
"""The internal advanced metrics of the virtual machine are initially disabled
|
||||
and are usually only enabled during performance testing.
|
||||
Enabling them will increase the CPU load."""
|
||||
|
||||
recommend_setting.desc:
|
||||
"""Recommended setting"""
|
||||
|
||||
push_gateway_url.desc:
|
||||
"""URL of Pushgateway server. Pushgateway is optional, should not be configured if prometheus is to scrape EMQX."""
|
||||
push_gateway_enable.desc:
|
||||
"""Enable or disable Pushgateway"""
|
||||
|
||||
mnesia_collector.desc:
|
||||
"""Collects Mnesia metrics mainly using <code> mnesia:system_info/1 </code>"""
|
||||
|
||||
vm_dist_collector.desc:
|
||||
"""Enable or disable VM distribution collector, collects information about the sockets and processes involved in the Erlang distribution mechanism."""
|
||||
"""Enable or disable VM distribution collector,
|
||||
collects information about the sockets and processes involved in the Erlang distribution mechanism."""
|
||||
|
||||
vm_memory_collector.desc:
|
||||
"""Enable or disable VM memory metrics collector."""
|
||||
"""Collects information about memory dynamically allocated by the Erlang emulator using
|
||||
<code> erlang:memory/0 </code>."""
|
||||
|
||||
vm_msacc_collector.desc:
|
||||
"""Enable or disable VM microstate accounting metrics collector."""
|
||||
|
@ -46,4 +61,43 @@ vm_statistics_collector.desc:
|
|||
vm_system_info_collector.desc:
|
||||
"""Enable or disable VM system info collector."""
|
||||
|
||||
legacy_deprecated_setting.desc:
|
||||
"""Deprecated since 5.4.0"""
|
||||
|
||||
legacy_enable.desc:
|
||||
"""Deprecated since 5.4.0, use `prometheus.push_gateway.url` instead"""
|
||||
|
||||
legacy_headers.desc:
|
||||
"""Deprecated since 5.4.0, use `prometheus.push_gateway.headers` instead"""
|
||||
|
||||
legacy_interval.desc:
|
||||
"""Deprecated since 5.4.0, use `prometheus.push_gateway.interval` instead"""
|
||||
|
||||
legacy_job_name.desc:
|
||||
"""Deprecated since 5.4.0, use `prometheus.push_gateway.job_name` instead"""
|
||||
|
||||
legacy_push_gateway_server.desc:
|
||||
"""Deprecated since 5.4.0, use `prometheus.push_gateway.url` instead"""
|
||||
|
||||
legacy_mnesia_collector.desc:
|
||||
"""Deprecated since 5.4.0, use `prometheus.collectors.mnesia` instead"""
|
||||
|
||||
legacy_vm_dist_collector.desc:
|
||||
"""Deprecated since 5.4.0, use `prometheus.collectors.vm_dist` instead"""
|
||||
|
||||
legacy_vm_memory_collector.desc:
|
||||
"""Deprecated since 5.4.0, use `prometheus.collectors.vm_memory` instead"""
|
||||
|
||||
legacy_vm_msacc_collector.desc:
|
||||
"""Deprecated since 5.4.0, use `prometheus.collectors.vm_msacc` instead"""
|
||||
|
||||
legacy_vm_statistics_collector.desc:
|
||||
"""Deprecated since 5.4.0, use `prometheus.collectors.vm_statistics` instead"""
|
||||
|
||||
legacy_vm_system_info_collector.desc:
|
||||
"""Deprecated, use `prometheus.collectors.vm_system_info` instead"""
|
||||
|
||||
legacy_deprecated_setting.desc:
|
||||
"""Deprecated since 5.4.0"""
|
||||
|
||||
}
|
||||
|
|
|
@ -1530,7 +1530,7 @@ sys_event_messages.desc:
|
|||
|
||||
broker_routing_storage_schema.desc:
|
||||
"""Routing storage schema.
|
||||
Set <code>v1</code> to leave the default.
|
||||
Set <code>v1</code> to use the former schema.
|
||||
<code>v2</code> is introduced in 5.2. It enables routing through 2 separate tables, one for topic filter and one for regular topic subscriptions. This schema should increase both subscription and routing performance at the cost of slight increase in memory consumption per subscription.
|
||||
NOTE: Schema <code>v2</code> is still experimental.
|
||||
NOTE: Full non-rolling cluster restart is needed after altering this option for it to take any effect."""
|
||||
|
|
|
@ -295,3 +295,4 @@ dnstream
|
|||
upstream
|
||||
priv
|
||||
Syskeeper
|
||||
msacc
|
||||
|
|
Loading…
Reference in New Issue