Merge branch 'release-57' into sync-r57-m-20240430
This commit is contained in:
commit
42cb17360e
|
@ -18,7 +18,7 @@ services:
|
|||
- /tmp/emqx-ci/emqx-shared-secret:/var/lib/secret
|
||||
kdc:
|
||||
hostname: kdc.emqx.net
|
||||
image: ghcr.io/emqx/emqx-builder/5.3-2:1.15.7-26.2.1-2-ubuntu22.04
|
||||
image: ghcr.io/emqx/emqx-builder/5.3-4:1.15.7-26.2.1-2-ubuntu22.04
|
||||
container_name: kdc.emqx.net
|
||||
expose:
|
||||
- 88 # kdc
|
||||
|
|
|
@ -3,7 +3,7 @@ version: '3.9'
|
|||
services:
|
||||
erlang:
|
||||
container_name: erlang
|
||||
image: ${DOCKER_CT_RUNNER_IMAGE:-ghcr.io/emqx/emqx-builder/5.3-2:1.15.7-26.2.1-2-ubuntu22.04}
|
||||
image: ${DOCKER_CT_RUNNER_IMAGE:-ghcr.io/emqx/emqx-builder/5.3-4:1.15.7-26.2.1-2-ubuntu22.04}
|
||||
env_file:
|
||||
- credentials.env
|
||||
- conf.env
|
||||
|
|
|
@ -20,15 +20,15 @@ permissions:
|
|||
jobs:
|
||||
sanity-checks:
|
||||
runs-on: ubuntu-22.04
|
||||
container: "ghcr.io/emqx/emqx-builder/5.3-2:1.15.7-26.2.1-2-ubuntu22.04"
|
||||
container: "ghcr.io/emqx/emqx-builder/5.3-4:1.15.7-26.2.1-2-ubuntu22.04"
|
||||
outputs:
|
||||
ct-matrix: ${{ steps.matrix.outputs.ct-matrix }}
|
||||
ct-host: ${{ steps.matrix.outputs.ct-host }}
|
||||
ct-docker: ${{ steps.matrix.outputs.ct-docker }}
|
||||
version-emqx: ${{ steps.matrix.outputs.version-emqx }}
|
||||
version-emqx-enterprise: ${{ steps.matrix.outputs.version-emqx-enterprise }}
|
||||
builder: "ghcr.io/emqx/emqx-builder/5.3-2:1.15.7-26.2.1-2-ubuntu22.04"
|
||||
builder_vsn: "5.3-2"
|
||||
builder: "ghcr.io/emqx/emqx-builder/5.3-4:1.15.7-26.2.1-2-ubuntu22.04"
|
||||
builder_vsn: "5.3-4"
|
||||
otp_vsn: "26.2.1-2"
|
||||
elixir_vsn: "1.15.7"
|
||||
|
||||
|
@ -95,12 +95,12 @@ jobs:
|
|||
MATRIX="$(echo "${APPS}" | jq -c '
|
||||
[
|
||||
(.[] | select(.profile == "emqx") | . + {
|
||||
builder: "5.3-2",
|
||||
builder: "5.3-4",
|
||||
otp: "26.2.1-2",
|
||||
elixir: "1.15.7"
|
||||
}),
|
||||
(.[] | select(.profile == "emqx-enterprise") | . + {
|
||||
builder: "5.3-2",
|
||||
builder: "5.3-4",
|
||||
otp: ["26.2.1-2"][],
|
||||
elixir: "1.15.7"
|
||||
})
|
||||
|
|
|
@ -23,7 +23,7 @@ env:
|
|||
jobs:
|
||||
prepare:
|
||||
runs-on: ubuntu-22.04
|
||||
container: 'ghcr.io/emqx/emqx-builder/5.3-2:1.15.7-26.2.1-2-ubuntu22.04'
|
||||
container: 'ghcr.io/emqx/emqx-builder/5.3-4:1.15.7-26.2.1-2-ubuntu22.04'
|
||||
outputs:
|
||||
profile: ${{ steps.parse-git-ref.outputs.profile }}
|
||||
release: ${{ steps.parse-git-ref.outputs.release }}
|
||||
|
@ -31,8 +31,8 @@ jobs:
|
|||
ct-matrix: ${{ steps.matrix.outputs.ct-matrix }}
|
||||
ct-host: ${{ steps.matrix.outputs.ct-host }}
|
||||
ct-docker: ${{ steps.matrix.outputs.ct-docker }}
|
||||
builder: 'ghcr.io/emqx/emqx-builder/5.3-2:1.15.7-26.2.1-2-ubuntu22.04'
|
||||
builder_vsn: '5.3-2'
|
||||
builder: 'ghcr.io/emqx/emqx-builder/5.3-4:1.15.7-26.2.1-2-ubuntu22.04'
|
||||
builder_vsn: '5.3-4'
|
||||
otp_vsn: '26.2.1-2'
|
||||
elixir_vsn: '1.15.7'
|
||||
|
||||
|
@ -62,12 +62,12 @@ jobs:
|
|||
MATRIX="$(echo "${APPS}" | jq -c '
|
||||
[
|
||||
(.[] | select(.profile == "emqx") | . + {
|
||||
builder: "5.3-2",
|
||||
builder: "5.3-4",
|
||||
otp: "26.2.1-2",
|
||||
elixir: "1.15.7"
|
||||
}),
|
||||
(.[] | select(.profile == "emqx-enterprise") | . + {
|
||||
builder: "5.3-2",
|
||||
builder: "5.3-4",
|
||||
otp: ["26.2.1-2"][],
|
||||
elixir: "1.15.7"
|
||||
})
|
||||
|
|
|
@ -61,7 +61,7 @@ on:
|
|||
builder_vsn:
|
||||
required: false
|
||||
type: string
|
||||
default: '5.3-2'
|
||||
default: '5.3-4'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
|
|
@ -63,7 +63,7 @@ on:
|
|||
builder_vsn:
|
||||
required: false
|
||||
type: string
|
||||
default: '5.3-2'
|
||||
default: '5.3-4'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
|
|
@ -23,8 +23,8 @@ jobs:
|
|||
fail-fast: false
|
||||
matrix:
|
||||
profile:
|
||||
- ['emqx', 'master', '5.3-2:1.15.7-26.2.1-2']
|
||||
- ['emqx-enterprise', 'release-56', '5.3-2:1.15.7-26.2.1-2']
|
||||
- ['emqx', 'master', '5.3-4:1.15.7-26.2.1-2']
|
||||
- ['emqx-enterprise', 'release-56', '5.3-4:1.15.7-26.2.1-2']
|
||||
os:
|
||||
- debian10
|
||||
- ubuntu22.04
|
||||
|
|
|
@ -27,11 +27,11 @@ on:
|
|||
builder:
|
||||
required: false
|
||||
type: string
|
||||
default: 'ghcr.io/emqx/emqx-builder/5.3-2:1.15.7-26.2.1-2-ubuntu22.04'
|
||||
default: 'ghcr.io/emqx/emqx-builder/5.3-4:1.15.7-26.2.1-2-ubuntu22.04'
|
||||
builder_vsn:
|
||||
required: false
|
||||
type: string
|
||||
default: '5.3-2'
|
||||
default: '5.3-4'
|
||||
otp_vsn:
|
||||
required: false
|
||||
type: string
|
||||
|
|
|
@ -17,7 +17,7 @@ jobs:
|
|||
actions: read
|
||||
security-events: write
|
||||
container:
|
||||
image: ghcr.io/emqx/emqx-builder/5.3-2:1.15.7-26.2.1-2-ubuntu22.04
|
||||
image: ghcr.io/emqx/emqx-builder/5.3-4:1.15.7-26.2.1-2-ubuntu22.04
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
|
|
@ -26,7 +26,7 @@ jobs:
|
|||
prepare:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.repository_owner == 'emqx'
|
||||
container: ghcr.io/emqx/emqx-builder/5.3-2:1.15.7-26.2.1-2-ubuntu20.04
|
||||
container: ghcr.io/emqx/emqx-builder/5.3-4:1.15.7-26.2.1-2-ubuntu20.04
|
||||
outputs:
|
||||
BENCH_ID: ${{ steps.prepare.outputs.BENCH_ID }}
|
||||
PACKAGE_FILE: ${{ steps.package_file.outputs.PACKAGE_FILE }}
|
||||
|
|
6
Makefile
6
Makefile
|
@ -7,7 +7,7 @@ REBAR = $(CURDIR)/rebar3
|
|||
BUILD = $(CURDIR)/build
|
||||
SCRIPTS = $(CURDIR)/scripts
|
||||
export EMQX_RELUP ?= true
|
||||
export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.3-2:1.15.7-26.2.1-2-debian12
|
||||
export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.3-4:1.15.7-26.2.1-2-debian12
|
||||
export EMQX_DEFAULT_RUNNER = public.ecr.aws/debian/debian:12-slim
|
||||
export EMQX_REL_FORM ?= tgz
|
||||
export QUICER_DOWNLOAD_FROM_RELEASE = 1
|
||||
|
@ -20,8 +20,8 @@ endif
|
|||
|
||||
# Dashboard version
|
||||
# from https://github.com/emqx/emqx-dashboard5
|
||||
export EMQX_DASHBOARD_VERSION ?= v1.8.1
|
||||
export EMQX_EE_DASHBOARD_VERSION ?= e1.6.1
|
||||
export EMQX_DASHBOARD_VERSION ?= v1.9.0-beta.1
|
||||
export EMQX_EE_DASHBOARD_VERSION ?= e1.7.0-beta.1
|
||||
|
||||
PROFILE ?= emqx
|
||||
REL_PROFILES := emqx emqx-enterprise
|
||||
|
|
|
@ -86,7 +86,7 @@ EMQX Cloud 文档:[docs.emqx.com/zh/cloud/latest/](https://docs.emqx.com/zh/cl
|
|||
|
||||
`master` 分支是最新的 5 版本,`main-v4.4` 是 4.4 版本。
|
||||
|
||||
EMQX 4.4 版本需要 OTP 24;5 版本则可以使用 OTP 24 和 25 构建。
|
||||
EMQX 4.4 版本需要 OTP 24;5 版本则可以使用 OTP 25 和 26 构建。
|
||||
|
||||
```bash
|
||||
git clone https://github.com/emqx/emqx.git
|
||||
|
|
|
@ -98,7 +98,7 @@ The `master` branch tracks the latest version 5. For version 4.4 checkout the `m
|
|||
|
||||
EMQX 4.4 requires OTP 24.
|
||||
EMQX 5.0 ~ 5.3 can be built with OTP 24 or 25.
|
||||
EMQX 5.4 and newer can be built with OTP 24 or 25.
|
||||
EMQX 5.4 and newer can be built with OTP 25 or 26.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/emqx/emqx.git
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
%% `apps/emqx/src/bpapi/README.md'
|
||||
|
||||
%% Opensource edition
|
||||
-define(EMQX_RELEASE_CE, "5.6.1").
|
||||
-define(EMQX_RELEASE_CE, "5.7.0-alpha.1").
|
||||
|
||||
%% Enterprise edition
|
||||
-define(EMQX_RELEASE_EE, "5.6.1").
|
||||
-define(EMQX_RELEASE_EE, "5.7.0-alpha.1").
|
||||
|
|
|
@ -38,4 +38,10 @@
|
|||
-define(SHARD, ?COMMON_SHARD).
|
||||
-define(MAX_SIZE, 30).
|
||||
|
||||
-define(EMQX_TRACE_STOP_ACTION(REASON),
|
||||
{unrecoverable_error, {action_stopped_after_template_rendering, REASON}}
|
||||
).
|
||||
|
||||
-define(EMQX_TRACE_STOP_ACTION_MATCH, ?EMQX_TRACE_STOP_ACTION(_)).
|
||||
|
||||
-endif.
|
||||
|
|
|
@ -46,6 +46,7 @@
|
|||
{emqx_metrics,2}.
|
||||
{emqx_mgmt_api_plugins,1}.
|
||||
{emqx_mgmt_api_plugins,2}.
|
||||
{emqx_mgmt_api_plugins,3}.
|
||||
{emqx_mgmt_cluster,1}.
|
||||
{emqx_mgmt_cluster,2}.
|
||||
{emqx_mgmt_cluster,3}.
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.11.2"}}},
|
||||
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.19.3"}}},
|
||||
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.3.1"}}},
|
||||
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.42.1"}}},
|
||||
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.42.2"}}},
|
||||
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.3"}}},
|
||||
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
|
||||
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},
|
||||
|
|
|
@ -1636,8 +1636,17 @@ maybe_set_client_initial_attrs(ConnPkt, #{zone := Zone} = ClientInfo) ->
|
|||
initialize_client_attrs(Inits, ClientInfo) ->
|
||||
lists:foldl(
|
||||
fun(#{expression := Variform, set_as_attr := Name}, Acc) ->
|
||||
Attrs = maps:get(client_attrs, ClientInfo, #{}),
|
||||
Attrs = maps:get(client_attrs, Acc, #{}),
|
||||
case emqx_variform:render(Variform, ClientInfo) of
|
||||
{ok, <<>>} ->
|
||||
?SLOG(
|
||||
debug,
|
||||
#{
|
||||
msg => "client_attr_rednered_to_empty_string",
|
||||
set_as_attr => Name
|
||||
}
|
||||
),
|
||||
Acc;
|
||||
{ok, Value} ->
|
||||
?SLOG(
|
||||
debug,
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
-export([format/2]).
|
||||
|
||||
%% For CLI HTTP API outputs
|
||||
-export([best_effort_json/1, best_effort_json/2]).
|
||||
-export([best_effort_json/1, best_effort_json/2, best_effort_json_obj/1]).
|
||||
|
||||
-ifdef(TEST).
|
||||
-include_lib("proper/include/proper.hrl").
|
||||
|
@ -65,10 +65,13 @@
|
|||
best_effort_json(Input) ->
|
||||
best_effort_json(Input, [pretty, force_utf8]).
|
||||
best_effort_json(Input, Opts) ->
|
||||
Config = #{depth => unlimited, single_line => true, chars_limit => unlimited},
|
||||
JsonReady = best_effort_json_obj(Input, Config),
|
||||
JsonReady = best_effort_json_obj(Input),
|
||||
emqx_utils_json:encode(JsonReady, Opts).
|
||||
|
||||
best_effort_json_obj(Input) ->
|
||||
Config = #{depth => unlimited, single_line => true, chars_limit => unlimited},
|
||||
best_effort_json_obj(Input, Config).
|
||||
|
||||
-spec format(logger:log_event(), config()) -> iodata().
|
||||
format(#{level := Level, msg := Msg, meta := Meta}, Config0) when is_map(Config0) ->
|
||||
Config = add_default_config(Config0),
|
||||
|
|
|
@ -217,14 +217,7 @@ roots(high) ->
|
|||
importance => ?IMPORTANCE_MEDIUM
|
||||
}
|
||||
)},
|
||||
{zones,
|
||||
sc(
|
||||
map(name, ref("zone")),
|
||||
#{
|
||||
desc => ?DESC(zones),
|
||||
importance => ?IMPORTANCE_HIDDEN
|
||||
}
|
||||
)}
|
||||
{zones, zones_field_schema()}
|
||||
] ++
|
||||
emqx_schema_hooks:injection_point(
|
||||
'roots.high',
|
||||
|
@ -1859,7 +1852,7 @@ base_listener(Bind) ->
|
|||
#{
|
||||
desc => ?DESC(base_listener_zone),
|
||||
default => 'default',
|
||||
importance => ?IMPORTANCE_HIDDEN
|
||||
importance => ?IMPORTANCE_LOW
|
||||
}
|
||||
)},
|
||||
{"limiter",
|
||||
|
@ -1883,6 +1876,22 @@ base_listener(Bind) ->
|
|||
)}
|
||||
] ++ emqx_limiter_schema:short_paths_fields().
|
||||
|
||||
%% @hidden Starting from 5.7, listeners.{TYPE}.{NAME}.zone is no longer hidden
|
||||
%% However, the root key 'zones' is still hidden because the fields' schema
|
||||
%% just repeat other root field's schema, which makes the dumped schema doc
|
||||
%% unnecessarily bloated.
|
||||
%%
|
||||
%% zone schema is documented here since 5.7:
|
||||
%% https://docs.emqx.com/en/enterprise/latest/configuration/configuration.html
|
||||
zones_field_schema() ->
|
||||
sc(
|
||||
map(name, ref("zone")),
|
||||
#{
|
||||
desc => ?DESC(zones),
|
||||
importance => ?IMPORTANCE_HIDDEN
|
||||
}
|
||||
).
|
||||
|
||||
desc("persistent_session_store") ->
|
||||
"Settings for message persistence.";
|
||||
desc("persistent_session_builtin") ->
|
||||
|
@ -3712,7 +3721,7 @@ default_mem_check_interval() ->
|
|||
|
||||
description_schema() ->
|
||||
sc(
|
||||
string(),
|
||||
binary(),
|
||||
#{
|
||||
default => <<"">>,
|
||||
desc => ?DESC(description),
|
||||
|
|
|
@ -29,7 +29,9 @@
|
|||
unsubscribe/2,
|
||||
log/3,
|
||||
log/4,
|
||||
rendered_action_template/2
|
||||
rendered_action_template/2,
|
||||
make_rendered_action_template_trace_context/1,
|
||||
rendered_action_template_with_ctx/2
|
||||
]).
|
||||
|
||||
-export([
|
||||
|
@ -70,6 +72,12 @@
|
|||
-export_type([ruleid/0]).
|
||||
-type ruleid() :: binary().
|
||||
|
||||
-export_type([rendered_action_template_ctx/0]).
|
||||
-opaque rendered_action_template_ctx() :: #{
|
||||
trace_ctx := map(),
|
||||
action_id := any()
|
||||
}.
|
||||
|
||||
publish(#message{topic = <<"$SYS/", _/binary>>}) ->
|
||||
ignore;
|
||||
publish(#message{from = From, topic = Topic, payload = Payload}) when
|
||||
|
@ -87,7 +95,7 @@ unsubscribe(<<"$SYS/", _/binary>>, _SubOpts) ->
|
|||
unsubscribe(Topic, SubOpts) ->
|
||||
?TRACE("UNSUBSCRIBE", "unsubscribe", #{topic => Topic, sub_opts => SubOpts}).
|
||||
|
||||
rendered_action_template(ActionID, RenderResult) ->
|
||||
rendered_action_template(<<"action:", _/binary>> = ActionID, RenderResult) ->
|
||||
TraceResult = ?TRACE(
|
||||
"QUERY_RENDER",
|
||||
"action_template_rendered",
|
||||
|
@ -107,11 +115,55 @@ rendered_action_template(ActionID, RenderResult) ->
|
|||
)
|
||||
),
|
||||
MsgBin = unicode:characters_to_binary(StopMsg),
|
||||
error({unrecoverable_error, {action_stopped_after_template_rendering, MsgBin}});
|
||||
error(?EMQX_TRACE_STOP_ACTION(MsgBin));
|
||||
_ ->
|
||||
ok
|
||||
end,
|
||||
TraceResult.
|
||||
TraceResult;
|
||||
rendered_action_template(_ActionID, _RenderResult) ->
|
||||
%% We do nothing if we don't get a valid Action ID. This can happen when
|
||||
%% called from connectors that are used for actions as well as authz and
|
||||
%% authn.
|
||||
ok.
|
||||
|
||||
%% The following two functions are used for connectors that don't do the
|
||||
%% rendering in the main process (the one that called on_*query). In this case
|
||||
%% we need to pass the trace context to the sub process that do the rendering
|
||||
%% so that the result of the rendering can be traced correctly. It is also
|
||||
%% important to ensure that the error that can be thrown from
|
||||
%% rendered_action_template_with_ctx is handled in the appropriate way in the
|
||||
%% sub process.
|
||||
-spec make_rendered_action_template_trace_context(any()) -> rendered_action_template_ctx().
|
||||
make_rendered_action_template_trace_context(ActionID) ->
|
||||
MetaData =
|
||||
case logger:get_process_metadata() of
|
||||
undefined -> #{};
|
||||
M -> M
|
||||
end,
|
||||
#{trace_ctx => MetaData, action_id => ActionID}.
|
||||
|
||||
-spec rendered_action_template_with_ctx(rendered_action_template_ctx(), Result :: term()) -> term().
|
||||
rendered_action_template_with_ctx(
|
||||
#{
|
||||
trace_ctx := LogMetaData,
|
||||
action_id := ActionID
|
||||
},
|
||||
RenderResult
|
||||
) ->
|
||||
OldMetaData =
|
||||
case logger:get_process_metadata() of
|
||||
undefined -> #{};
|
||||
M -> M
|
||||
end,
|
||||
try
|
||||
logger:set_process_metadata(LogMetaData),
|
||||
emqx_trace:rendered_action_template(
|
||||
ActionID,
|
||||
RenderResult
|
||||
)
|
||||
after
|
||||
logger:set_process_metadata(OldMetaData)
|
||||
end.
|
||||
|
||||
log(List, Msg, Meta) ->
|
||||
log(debug, List, Msg, Meta).
|
||||
|
|
|
@ -48,6 +48,21 @@ prepare_log_map(LogMap, PEncode) ->
|
|||
NewKeyValuePairs = [prepare_key_value(K, V, PEncode) || {K, V} <- maps:to_list(LogMap)],
|
||||
maps:from_list(NewKeyValuePairs).
|
||||
|
||||
prepare_key_value(K, {Formatter, V}, PEncode) when is_function(Formatter, 1) ->
|
||||
%% A cusom formatter is provided with the value
|
||||
try
|
||||
NewV = Formatter(V),
|
||||
prepare_key_value(K, NewV, PEncode)
|
||||
catch
|
||||
_:_ ->
|
||||
{K, V}
|
||||
end;
|
||||
prepare_key_value(K, {ok, Status, Headers, Body}, PEncode) when
|
||||
is_integer(Status), is_list(Headers), is_binary(Body)
|
||||
->
|
||||
%% This is unlikely anything else then info about a HTTP request so we make
|
||||
%% it more structured
|
||||
prepare_key_value(K, #{status => Status, headers => Headers, body => Body}, PEncode);
|
||||
prepare_key_value(payload = K, V, PEncode) ->
|
||||
NewV =
|
||||
try
|
||||
|
|
|
@ -422,6 +422,10 @@ t_client_attr_from_user_property(_Config) ->
|
|||
#{
|
||||
expression => Compiled,
|
||||
set_as_attr => <<"group">>
|
||||
},
|
||||
#{
|
||||
expression => Compiled,
|
||||
set_as_attr => <<"group2">>
|
||||
}
|
||||
]),
|
||||
SslConf = emqx_common_test_helpers:client_mtls('tlsv1.3'),
|
||||
|
@ -436,7 +440,7 @@ t_client_attr_from_user_property(_Config) ->
|
|||
{ok, _} = emqtt:connect(Client),
|
||||
%% assert only two chars are extracted
|
||||
?assertMatch(
|
||||
#{clientinfo := #{client_attrs := #{<<"group">> := <<"g1">>}}},
|
||||
#{clientinfo := #{client_attrs := #{<<"group">> := <<"g1">>, <<"group2">> := <<"g1">>}}},
|
||||
emqx_cm:get_chan_info(ClientId)
|
||||
),
|
||||
emqtt:disconnect(Client).
|
||||
|
|
|
@ -1,27 +1,4 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% -type(ipaddr() :: {ipaddr, string()}).
|
||||
%%
|
||||
%% -type(ipaddrs() :: {ipaddrs, [string()]}).
|
||||
%%
|
||||
%% -type(username() :: {user | username, string()} | {user | username, {re, regex()}}).
|
||||
%%
|
||||
%% -type(clientid() :: {client | clientid, string()} | {client | clientid, {re, regex()}}).
|
||||
%%
|
||||
%% -type(who() :: ipaddr() | ipaddrs() | username() | clientid() |
|
||||
%% {'and', [ipaddr() | ipaddrs() | username() | clientid()]} |
|
||||
%% {'or', [ipaddr() | ipaddrs() | username() | clientid()]} |
|
||||
%% all).
|
||||
%%
|
||||
%% -type(action() :: subscribe | publish | all).
|
||||
%%
|
||||
%% -type(topic_filters() :: string()).
|
||||
%%
|
||||
%% -type(topics() :: [topic_filters() | {eq, topic_filters()}]).
|
||||
%%
|
||||
%% -type(permission() :: allow | deny).
|
||||
%%
|
||||
%% -type(rule() :: {permission(), who(), action(), topics()} | {permission(), all}).
|
||||
%%--------------------------------------------------------------------
|
||||
%%-------------- Default ACL rules -------------------------------------------------------
|
||||
|
||||
{allow, {username, {re, "^dashboard$"}}, subscribe, ["$SYS/#"]}.
|
||||
|
||||
|
@ -30,3 +7,117 @@
|
|||
{deny, all, subscribe, ["$SYS/#", {eq, "#"}]}.
|
||||
|
||||
{allow, all}.
|
||||
%% NOTE! when deploy in production:
|
||||
%% - Change the last rule to `{deny, all}.`
|
||||
%% - Set config `authorization.no_match = deny`
|
||||
|
||||
%% See docs below
|
||||
%%
|
||||
%% ------------ The formal spec ----------------------------------------------------------
|
||||
%%
|
||||
%% -type ipaddr() :: {ipaddr, string()}.
|
||||
%% -type ipaddrs() :: {ipaddrs, [string()]}.
|
||||
%% -type username() :: {user | username, string()} | {user | username, {re, regex()}}.
|
||||
%% -type clientid() :: {client | clientid, string()} | {client | clientid, {re, regex()}}.
|
||||
%% -type who() :: ipaddr() | ipaddrs() | username() | clientid() |
|
||||
%% {'and', [ipaddr() | ipaddrs() | username() | clientid()]} |
|
||||
%% {'or', [ipaddr() | ipaddrs() | username() | clientid()]} |
|
||||
%% all.
|
||||
%% -type simple_action() :: subscribe | publish | all.
|
||||
%% -type complex_action() :: {simple_action(), [{qos, 0..2}, {retain, true|false|all}]}.
|
||||
%% -type action() :: simple_action() | complex_action().
|
||||
%% -type topic() :: string().
|
||||
%% -type topic_filter() :: string().
|
||||
%% -type topic_match() :: topic() | topic_filter() | {eq, topic() | topic_filter()}.
|
||||
%% -type perm() :: allow | deny.
|
||||
%% -type rule() :: {perm(), who(), action(), [topic_match()]} | {perm(), all}.
|
||||
|
||||
%%-------------- Viusal aid for the spec -------------------------------------------------
|
||||
%%
|
||||
%% rule()
|
||||
%% ├── {perm(), who(), action(), [topic_match()]}
|
||||
%% │ │ │ │ ├── topic() :: string()
|
||||
%% │ │ │ │ ├── topic_filter() :: string()
|
||||
%% │ │ │ │ └── {eq, topic() | topic_filter()}
|
||||
%% │ │ │ │
|
||||
%% │ │ │ ├── simple_action()
|
||||
%% │ │ │ │ ├── publish
|
||||
%% │ │ │ │ ├── subscribe
|
||||
%% │ │ │ │ └── all
|
||||
%% │ │ │ └── {simple_action(), [{qos,0..2},{retain,true|false|all}]}
|
||||
%% │ │ │
|
||||
%% │ │ ├── ipaddr()
|
||||
%% │ │ │ └── {ipaddr, string()}
|
||||
%% │ │ ├── ipaddrs()
|
||||
%% │ │ │ └── {ipaddrs, [string()]}
|
||||
%% │ │ ├── username()
|
||||
%% │ │ │ ├── {user | username, string()}
|
||||
%% │ │ │ └── {user | username, {re, regex()}}
|
||||
%% │ │ ├── clientid()
|
||||
%% │ │ │ ├── {client | clientid, string()}
|
||||
%% │ │ │ └── {client | clientid, {re, regex()}}
|
||||
%% │ │ ├── {'and', [ipaddr() | ipaddrs() | username() | clientid()]}
|
||||
%% │ │ ├── {'or', [ipaddr() | ipaddrs() | username() | clientid()]}
|
||||
%% │ │ └── all
|
||||
%% │ │
|
||||
%% │ ├── allow
|
||||
%% │ └── deny
|
||||
%% │
|
||||
%% └── {perm(), all}
|
||||
%%
|
||||
|
||||
%% This file defines a set of ACL rules for MQTT client pub/sub authorization.
|
||||
%% The content is of Erlang-term format.
|
||||
%% Each Erlang-term is a tuple `{...}` terminated by dot `.`
|
||||
%%
|
||||
%% NOTE: When deploy to production, the last rule should be changed to {deny, all}.
|
||||
%%
|
||||
%% NOTE: It's a good practice to keep the nubmer of rules small, because in worst case
|
||||
%% scenarios, all rules have to be traversed for each message publish.
|
||||
%%
|
||||
%% A rule is a 4-element tuple.
|
||||
%% For example, `{allow, {username, "Jon"}, subscribe, ["#"]}` allows Jon to subscribe to
|
||||
%% any topic they want.
|
||||
%%
|
||||
%% Below is an explanation:
|
||||
%%
|
||||
%% - `perm()`: The permission.
|
||||
%% Defines whether this is an `allow` or `deny` rule.
|
||||
%%
|
||||
%% - `who()`: The MQTT client matching condition.
|
||||
%% - `all`: A rule which applies to all clients.
|
||||
%% - `{ipaddr, IpAddress}`: Matches a client by source IP address. CIDR notation is allowed.
|
||||
%% - `{ipaddrs, [IpAddress]}`: Matches clients by a set of IP addresses. CIDR notation is allowed.
|
||||
%% - `{clientid, ClientID}`: Matches a client by ID.
|
||||
%% - `{username, Username}`: Matches a client by username.
|
||||
%% - `{..., {re, ..}}`: Regular expression to match either clientid or username.
|
||||
%% - `{'and', [...]}`: Combines a list of matching conditions.
|
||||
%% - `{'or', [...]}`: Combines a list of matching conditions.
|
||||
%%
|
||||
%% - `action()`: Matches publish or subscribe actions (or both).
|
||||
%% Applies the rule to `publish` or `subscribe` actions.
|
||||
%% The special value `all` denotes allowing or denying both `publish` and `subscribe`.
|
||||
%% It can also be associated with `qos` and `retain` flags to match the action with
|
||||
%% more specifics. For example, `{publish, [{qos,0},{retain,false}]}` should only
|
||||
%% match the `publish` action when the message has QoS 0, and without retained flag set.
|
||||
%%
|
||||
%% - `[topic_match()]`:
|
||||
%% A list of topics, topic-filters, or template rendering to match the topic being
|
||||
%% subscribed to or published.
|
||||
%% For example, `{allow, {username, "Jan"}, publish, ["jan/#"]}` permits Jan to publish
|
||||
%% to any topic matching the wildcard pattern "jan/#".
|
||||
%% A special tuple `{eq, topic_match()}` is useful to allow or deny the specific wildcard
|
||||
%% subscription instead of performing a topic match.
|
||||
%% A `topic_match()` can also contain a placeholder rendered with actual value at runtime,
|
||||
%% for example, `{allow, all, publish, "${clientid}/#"}` allows all clients to publish to
|
||||
%% topics prefixed by their own client ID.
|
||||
%%
|
||||
%% Supported placeholders are:
|
||||
%% - `${cn}`: TLS certificate common name.
|
||||
%% - `${clientid}`: The client ID.
|
||||
%% - `${username}`: The username.
|
||||
%% - `${client_attrs.NAME}`: A client attribute named `NAME`, which can be initialized by
|
||||
%% `mqtt.client_attrs_init` config or extended by certain authentication backends.
|
||||
%% NOTE: Placeholder is not rendered as empty string if the referencing value is not
|
||||
%% foud. For example, `${client_attrs.group}/#` is not rendered as `/#` if the
|
||||
%% client does not have a `group` attribute.
|
||||
|
|
|
@ -386,8 +386,9 @@ match_who(_, _) ->
|
|||
match_topics(_ClientInfo, _Topic, []) ->
|
||||
false;
|
||||
match_topics(ClientInfo, Topic, [{pattern, PatternFilter} | Filters]) ->
|
||||
TopicFilter = bin(emqx_template:render_strict(PatternFilter, ClientInfo)),
|
||||
match_topic(emqx_topic:words(Topic), emqx_topic:words(TopicFilter)) orelse
|
||||
TopicFilter = render_topic(PatternFilter, ClientInfo),
|
||||
(is_binary(TopicFilter) andalso
|
||||
match_topic(emqx_topic:words(Topic), emqx_topic:words(TopicFilter))) orelse
|
||||
match_topics(ClientInfo, Topic, Filters);
|
||||
match_topics(ClientInfo, Topic, [TopicFilter | Filters]) ->
|
||||
match_topic(emqx_topic:words(Topic), TopicFilter) orelse
|
||||
|
@ -397,3 +398,16 @@ match_topic(Topic, {'eq', TopicFilter}) ->
|
|||
Topic =:= TopicFilter;
|
||||
match_topic(Topic, TopicFilter) ->
|
||||
emqx_topic:match(Topic, TopicFilter).
|
||||
|
||||
render_topic(Topic, ClientInfo) ->
|
||||
try
|
||||
bin(emqx_template:render_strict(Topic, ClientInfo))
|
||||
catch
|
||||
error:Reason ->
|
||||
?SLOG(debug, #{
|
||||
msg => "failed_to_render_topic_template",
|
||||
template => Topic,
|
||||
reason => Reason
|
||||
}),
|
||||
error
|
||||
end.
|
||||
|
|
|
@ -173,7 +173,16 @@ end_per_testcase(_TestCase, _Config) ->
|
|||
-define(SOURCE_FILE_CLIENT_ATTR,
|
||||
?SOURCE_FILE(
|
||||
<<
|
||||
"{allow,all,all,[\"${client_attrs.alias}/#\"]}.\n"
|
||||
"{allow,all,all,[\"${client_attrs.alias}/#\",\"client_attrs_backup\"]}.\n"
|
||||
"{deny, all}."
|
||||
>>
|
||||
)
|
||||
).
|
||||
|
||||
-define(SOURCE_FILE_CLIENT_NO_SUCH_ATTR,
|
||||
?SOURCE_FILE(
|
||||
<<
|
||||
"{allow,all,all,[\"${client_attrs.nonexist}/#\",\"client_attrs_backup\"]}.\n"
|
||||
"{deny, all}."
|
||||
>>
|
||||
)
|
||||
|
@ -572,11 +581,41 @@ t_alias_prefix(_Config) ->
|
|||
?assertMatch({ok, _}, emqtt:connect(C)),
|
||||
?assertMatch({ok, _, [?RC_SUCCESS]}, emqtt:subscribe(C, SubTopic)),
|
||||
?assertMatch({ok, _, [?RC_NOT_AUTHORIZED]}, emqtt:subscribe(C, SubTopicNotAllowed)),
|
||||
?assertMatch({ok, _, [?RC_NOT_AUTHORIZED]}, emqtt:subscribe(C, <<"/#">>)),
|
||||
unlink(C),
|
||||
emqtt:stop(C),
|
||||
NonMatching = <<"clientid_which_has_no_dash">>,
|
||||
{ok, C2} = emqtt:start_link([{clientid, NonMatching}, {proto_ver, v5}]),
|
||||
?assertMatch({ok, _}, emqtt:connect(C2)),
|
||||
?assertMatch({ok, _, [?RC_SUCCESS]}, emqtt:subscribe(C2, <<"client_attrs_backup">>)),
|
||||
%% assert '${client_attrs.alias}/#' is not rendered as '/#'
|
||||
?assertMatch({ok, _, [?RC_NOT_AUTHORIZED]}, emqtt:subscribe(C2, <<"/#">>)),
|
||||
unlink(C2),
|
||||
emqtt:stop(C2),
|
||||
emqx_config:put_zone_conf(default, [mqtt, client_attrs_init], []),
|
||||
ok.
|
||||
|
||||
t_non_existing_attr(_Config) ->
|
||||
{ok, _} = emqx_authz:update(?CMD_REPLACE, [?SOURCE_FILE_CLIENT_NO_SUCH_ATTR]),
|
||||
%% '^.*-(.*)$': extract the suffix after the last '-'
|
||||
{ok, Compiled} = emqx_variform:compile("concat(regex_extract(clientid,'^.*-(.*)$'))"),
|
||||
emqx_config:put_zone_conf(default, [mqtt, client_attrs_init], [
|
||||
#{
|
||||
expression => Compiled,
|
||||
%% this is intended to be different from 'nonexist'
|
||||
set_as_attr => <<"existing">>
|
||||
}
|
||||
]),
|
||||
ClientId = <<"org1-name3">>,
|
||||
{ok, C} = emqtt:start_link([{clientid, ClientId}, {proto_ver, v5}]),
|
||||
?assertMatch({ok, _}, emqtt:connect(C)),
|
||||
?assertMatch({ok, _, [?RC_SUCCESS]}, emqtt:subscribe(C, <<"client_attrs_backup">>)),
|
||||
%% assert '${client_attrs.nonexist}/#' is not rendered as '/#'
|
||||
?assertMatch({ok, _, [?RC_NOT_AUTHORIZED]}, emqtt:subscribe(C, <<"/#">>)),
|
||||
unlink(C),
|
||||
emqtt:stop(C),
|
||||
ok.
|
||||
|
||||
%% client is allowed by ACL to publish to its LWT topic, is connected,
|
||||
%% and then gets banned and kicked out while connected. Should not
|
||||
%% publish LWT.
|
||||
|
|
|
@ -118,7 +118,8 @@ hard_coded_action_info_modules_ee() ->
|
|||
emqx_bridge_pulsar_action_info,
|
||||
emqx_bridge_greptimedb_action_info,
|
||||
emqx_bridge_tdengine_action_info,
|
||||
emqx_bridge_s3_action_info
|
||||
emqx_bridge_s3_upload_action_info,
|
||||
emqx_bridge_s3_aggreg_upload_action_info
|
||||
].
|
||||
-else.
|
||||
hard_coded_action_info_modules_ee() ->
|
||||
|
|
|
@ -173,6 +173,11 @@ source_hookpoint(Config) ->
|
|||
BridgeId = emqx_bridge_resource:bridge_id(Type, Name),
|
||||
emqx_bridge_v2:source_hookpoint(BridgeId).
|
||||
|
||||
action_hookpoint(Config) ->
|
||||
#{kind := action, type := Type, name := Name} = get_common_values(Config),
|
||||
BridgeId = emqx_bridge_resource:bridge_id(Type, Name),
|
||||
emqx_bridge_resource:bridge_hookpoint(BridgeId).
|
||||
|
||||
add_source_hookpoint(Config) ->
|
||||
Hookpoint = source_hookpoint(Config),
|
||||
ok = emqx_hooks:add(Hookpoint, {?MODULE, source_hookpoint_callback, [self()]}, 1000),
|
||||
|
@ -378,6 +383,14 @@ start_connector_api(ConnectorName, ConnectorType) ->
|
|||
ct:pal("connector update (http) result:\n ~p", [Res]),
|
||||
Res.
|
||||
|
||||
get_connector_api(ConnectorType, ConnectorName) ->
|
||||
ConnectorId = emqx_connector_resource:connector_id(ConnectorType, ConnectorName),
|
||||
Path = emqx_mgmt_api_test_util:api_path(["connectors", ConnectorId]),
|
||||
ct:pal("get connector ~s (http)", [ConnectorId]),
|
||||
Res = request(get, Path, _Params = []),
|
||||
ct:pal("get connector (http) result:\n ~p", [Res]),
|
||||
Res.
|
||||
|
||||
create_action_api(Config) ->
|
||||
create_action_api(Config, _Overrides = #{}).
|
||||
|
||||
|
|
|
@ -223,6 +223,11 @@ do_single_query(InstId, Request, Async, #{pool_name := PoolName} = State) ->
|
|||
}
|
||||
),
|
||||
{PreparedKeyOrCQL1, Data} = proc_cql_params(Type, PreparedKeyOrCQL, Params, State),
|
||||
emqx_trace:rendered_action_template(PreparedKeyOrCQL, #{
|
||||
type => Type,
|
||||
key_or_cql => PreparedKeyOrCQL1,
|
||||
data => Data
|
||||
}),
|
||||
Res = exec_cql_query(InstId, PoolName, Type, Async, PreparedKeyOrCQL1, Data),
|
||||
handle_result(Res).
|
||||
|
||||
|
@ -261,6 +266,14 @@ do_batch_query(InstId, Requests, Async, #{pool_name := PoolName} = State) ->
|
|||
state => State
|
||||
}
|
||||
),
|
||||
ChannelID =
|
||||
case Requests of
|
||||
[{CID, _} | _] -> CID;
|
||||
_ -> none
|
||||
end,
|
||||
emqx_trace:rendered_action_template(ChannelID, #{
|
||||
cqls => CQLs
|
||||
}),
|
||||
Res = exec_cql_batch_query(InstId, PoolName, Async, CQLs),
|
||||
handle_result(Res).
|
||||
|
||||
|
|
|
@ -386,7 +386,7 @@ on_query(
|
|||
SimplifiedRequestType = query_type(RequestType),
|
||||
Templates = get_templates(RequestType, State),
|
||||
SQL = get_sql(SimplifiedRequestType, Templates, DataOrSQL),
|
||||
ClickhouseResult = execute_sql_in_clickhouse_server(PoolName, SQL),
|
||||
ClickhouseResult = execute_sql_in_clickhouse_server(RequestType, PoolName, SQL),
|
||||
transform_and_log_clickhouse_result(ClickhouseResult, ResourceID, SQL).
|
||||
|
||||
get_templates(ChannId, State) ->
|
||||
|
@ -398,7 +398,7 @@ get_templates(ChannId, State) ->
|
|||
end.
|
||||
|
||||
get_sql(channel_message, #{send_message_template := PreparedSQL}, Data) ->
|
||||
emqx_placeholder:proc_tmpl(PreparedSQL, Data);
|
||||
emqx_placeholder:proc_tmpl(PreparedSQL, Data, #{return => full_binary});
|
||||
get_sql(_, _, SQL) ->
|
||||
SQL.
|
||||
|
||||
|
@ -425,7 +425,7 @@ on_batch_query(ResourceID, BatchReq, #{pool_name := PoolName} = State) ->
|
|||
%% Create batch insert SQL statement
|
||||
SQL = objects_to_sql(ObjectsToInsert, Templates),
|
||||
%% Do the actual query in the database
|
||||
ResultFromClickhouse = execute_sql_in_clickhouse_server(PoolName, SQL),
|
||||
ResultFromClickhouse = execute_sql_in_clickhouse_server(ChannId, PoolName, SQL),
|
||||
%% Transform the result to a better format
|
||||
transform_and_log_clickhouse_result(ResultFromClickhouse, ResourceID, SQL).
|
||||
|
||||
|
@ -464,7 +464,8 @@ objects_to_sql(_, _) ->
|
|||
|
||||
%% This function is used by on_query/3 and on_batch_query/3 to send a query to
|
||||
%% the database server and receive a result
|
||||
execute_sql_in_clickhouse_server(PoolName, SQL) ->
|
||||
execute_sql_in_clickhouse_server(Id, PoolName, SQL) ->
|
||||
emqx_trace:rendered_action_template(Id, #{rendered_sql => SQL}),
|
||||
ecpool:pick_and_do(
|
||||
PoolName,
|
||||
{?MODULE, execute_sql_in_clickhouse_server_using_connection, [SQL]},
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
-include_lib("emqx_resource/include/emqx_resource.hrl").
|
||||
-include_lib("typerefl/include/types.hrl").
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
-include_lib("emqx/include/emqx_trace.hrl").
|
||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||
-include_lib("hocon/include/hoconsc.hrl").
|
||||
|
||||
|
@ -133,20 +134,10 @@ on_add_channel(
|
|||
create_channel_state(
|
||||
#{parameters := Conf} = _ChannelConfig
|
||||
) ->
|
||||
Keys = maps:with([hash_key, range_key], Conf),
|
||||
Keys1 = maps:fold(
|
||||
fun(K, V, Acc) ->
|
||||
Acc#{K := erlang:binary_to_existing_atom(V)}
|
||||
end,
|
||||
Keys,
|
||||
Keys
|
||||
),
|
||||
|
||||
Base = maps:without([template, hash_key, range_key], Conf),
|
||||
Base1 = maps:merge(Base, Keys1),
|
||||
Base = maps:without([template], Conf),
|
||||
|
||||
Templates = parse_template_from_conf(Conf),
|
||||
State = Base1#{
|
||||
State = Base#{
|
||||
templates => Templates
|
||||
},
|
||||
{ok, State}.
|
||||
|
@ -246,12 +237,16 @@ do_query(
|
|||
table := Table,
|
||||
templates := Templates
|
||||
} = ChannelState,
|
||||
TraceRenderedCTX =
|
||||
emqx_trace:make_rendered_action_template_trace_context(ChannelId),
|
||||
Result =
|
||||
case ensuare_dynamo_keys(Query, ChannelState) of
|
||||
true ->
|
||||
ecpool:pick_and_do(
|
||||
PoolName,
|
||||
{emqx_bridge_dynamo_connector_client, query, [Table, QueryTuple, Templates]},
|
||||
{emqx_bridge_dynamo_connector_client, query, [
|
||||
Table, QueryTuple, Templates, TraceRenderedCTX
|
||||
]},
|
||||
no_handover
|
||||
);
|
||||
_ ->
|
||||
|
@ -259,6 +254,8 @@ do_query(
|
|||
end,
|
||||
|
||||
case Result of
|
||||
{error, ?EMQX_TRACE_STOP_ACTION(_)} = Error ->
|
||||
Error;
|
||||
{error, Reason} ->
|
||||
?tp(
|
||||
dynamo_connector_query_return,
|
||||
|
@ -311,12 +308,12 @@ get_query_tuple([InsertQuery | _]) ->
|
|||
ensuare_dynamo_keys({_, Data} = Query, State) when is_map(Data) ->
|
||||
ensuare_dynamo_keys([Query], State);
|
||||
ensuare_dynamo_keys([{_, Data} | _] = Queries, State) when is_map(Data) ->
|
||||
Keys = maps:to_list(maps:with([hash_key, range_key], State)),
|
||||
Keys = maps:values(maps:with([hash_key, range_key], State)),
|
||||
lists:all(
|
||||
fun({_, Query}) ->
|
||||
lists:all(
|
||||
fun({_, Key}) ->
|
||||
maps:is_key(Key, Query)
|
||||
fun(Key) ->
|
||||
is_dynamo_key_existing(Key, Query)
|
||||
end,
|
||||
Keys
|
||||
)
|
||||
|
@ -364,3 +361,17 @@ get_host_info(Server) ->
|
|||
|
||||
redact(Data) ->
|
||||
emqx_utils:redact(Data, fun(Any) -> Any =:= aws_secret_access_key end).
|
||||
|
||||
is_dynamo_key_existing(Bin, Query) when is_binary(Bin) ->
|
||||
case maps:is_key(Bin, Query) of
|
||||
true ->
|
||||
true;
|
||||
_ ->
|
||||
try
|
||||
Key = erlang:binary_to_existing_atom(Bin),
|
||||
maps:is_key(Key, Query)
|
||||
catch
|
||||
_:_ ->
|
||||
false
|
||||
end
|
||||
end.
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
-export([
|
||||
start_link/1,
|
||||
is_connected/2,
|
||||
query/4
|
||||
query/5
|
||||
]).
|
||||
|
||||
%% gen_server callbacks
|
||||
|
@ -40,8 +40,8 @@ is_connected(Pid, Timeout) ->
|
|||
{false, Error}
|
||||
end.
|
||||
|
||||
query(Pid, Table, Query, Templates) ->
|
||||
gen_server:call(Pid, {query, Table, Query, Templates}, infinity).
|
||||
query(Pid, Table, Query, Templates, TraceRenderedCTX) ->
|
||||
gen_server:call(Pid, {query, Table, Query, Templates, TraceRenderedCTX}, infinity).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% @doc
|
||||
|
@ -77,14 +77,14 @@ handle_call(is_connected, _From, State) ->
|
|||
{false, Error}
|
||||
end,
|
||||
{reply, IsConnected, State};
|
||||
handle_call({query, Table, Query, Templates}, _From, State) ->
|
||||
Result = do_query(Table, Query, Templates),
|
||||
handle_call({query, Table, Query, Templates, TraceRenderedCTX}, _From, State) ->
|
||||
Result = do_query(Table, Query, Templates, TraceRenderedCTX),
|
||||
{reply, Result, State};
|
||||
handle_call(_Request, _From, State) ->
|
||||
{reply, ok, State}.
|
||||
|
||||
handle_cast({query, Table, Query, Templates, {ReplyFun, [Context]}}, State) ->
|
||||
Result = do_query(Table, Query, Templates),
|
||||
Result = do_query(Table, Query, Templates, {fun(_, _) -> ok end, none}),
|
||||
ReplyFun(Context, Result),
|
||||
{noreply, State};
|
||||
handle_cast(_Request, State) ->
|
||||
|
@ -102,15 +102,29 @@ code_change(_OldVsn, State, _Extra) ->
|
|||
%%%===================================================================
|
||||
%%% Internal functions
|
||||
%%%===================================================================
|
||||
do_query(Table, Query0, Templates) ->
|
||||
do_query(Table, Query0, Templates, TraceRenderedCTX) ->
|
||||
try
|
||||
Query = apply_template(Query0, Templates),
|
||||
emqx_trace:rendered_action_template_with_ctx(TraceRenderedCTX, #{
|
||||
table => Table,
|
||||
query => {fun trace_format_query/1, Query}
|
||||
}),
|
||||
execute(Query, Table)
|
||||
catch
|
||||
error:{unrecoverable_error, Reason} ->
|
||||
{error, {unrecoverable_error, Reason}};
|
||||
_Type:Reason ->
|
||||
{error, {unrecoverable_error, {invalid_request, Reason}}}
|
||||
end.
|
||||
|
||||
trace_format_query({Type, Data}) ->
|
||||
#{type => Type, data => Data};
|
||||
trace_format_query([_ | _] = Batch) ->
|
||||
BatchData = [trace_format_query(Q) || Q <- Batch],
|
||||
#{type => batch, data => BatchData};
|
||||
trace_format_query(Query) ->
|
||||
Query.
|
||||
|
||||
%% some simple query commands for authn/authz or test
|
||||
execute({insert_item, Msg}, Table) ->
|
||||
Item = convert_to_item(Msg),
|
||||
|
|
|
@ -284,6 +284,13 @@ do_send_requests_sync(ConnectorState, Requests, InstanceId) ->
|
|||
Method = post,
|
||||
ReqOpts = #{request_ttl => RequestTTL},
|
||||
Request = {prepared_request, {Method, Path, Body}, ReqOpts},
|
||||
emqx_trace:rendered_action_template(MessageTag, #{
|
||||
method => Method,
|
||||
path => Path,
|
||||
body => Body,
|
||||
options => ReqOpts,
|
||||
is_async => false
|
||||
}),
|
||||
Result = emqx_bridge_gcp_pubsub_client:query_sync(Request, Client),
|
||||
QueryMode = sync,
|
||||
handle_result(Result, Request, QueryMode, InstanceId).
|
||||
|
@ -312,6 +319,13 @@ do_send_requests_async(ConnectorState, Requests, ReplyFunAndArgs0) ->
|
|||
ReqOpts = #{request_ttl => RequestTTL},
|
||||
Request = {prepared_request, {Method, Path, Body}, ReqOpts},
|
||||
ReplyFunAndArgs = {fun ?MODULE:reply_delegator/2, [ReplyFunAndArgs0]},
|
||||
emqx_trace:rendered_action_template(MessageTag, #{
|
||||
method => Method,
|
||||
path => Path,
|
||||
body => Body,
|
||||
options => ReqOpts,
|
||||
is_async => true
|
||||
}),
|
||||
emqx_bridge_gcp_pubsub_client:query_async(
|
||||
Request, ReplyFunAndArgs, Client
|
||||
).
|
||||
|
|
|
@ -128,7 +128,7 @@ on_query(InstId, {Channel, Message}, State) ->
|
|||
greptimedb_connector_send_query,
|
||||
#{points => Points, batch => false, mode => sync}
|
||||
),
|
||||
do_query(InstId, Client, Points);
|
||||
do_query(InstId, Channel, Client, Points);
|
||||
{error, ErrorPoints} ->
|
||||
?tp(
|
||||
greptimedb_connector_send_query_error,
|
||||
|
@ -152,7 +152,7 @@ on_batch_query(InstId, [{Channel, _} | _] = BatchData, State) ->
|
|||
greptimedb_connector_send_query,
|
||||
#{points => Points, batch => true, mode => sync}
|
||||
),
|
||||
do_query(InstId, Client, Points);
|
||||
do_query(InstId, Channel, Client, Points);
|
||||
{error, Reason} ->
|
||||
?tp(
|
||||
greptimedb_connector_send_query_error,
|
||||
|
@ -173,7 +173,7 @@ on_query_async(InstId, {Channel, Message}, {ReplyFun, Args}, State) ->
|
|||
greptimedb_connector_send_query,
|
||||
#{points => Points, batch => false, mode => async}
|
||||
),
|
||||
do_async_query(InstId, Client, Points, {ReplyFun, Args});
|
||||
do_async_query(InstId, Channel, Client, Points, {ReplyFun, Args});
|
||||
{error, ErrorPoints} = Err ->
|
||||
?tp(
|
||||
greptimedb_connector_send_query_error,
|
||||
|
@ -195,7 +195,7 @@ on_batch_query_async(InstId, [{Channel, _} | _] = BatchData, {ReplyFun, Args}, S
|
|||
greptimedb_connector_send_query,
|
||||
#{points => Points, batch => true, mode => async}
|
||||
),
|
||||
do_async_query(InstId, Client, Points, {ReplyFun, Args});
|
||||
do_async_query(InstId, Channel, Client, Points, {ReplyFun, Args});
|
||||
{error, Reason} ->
|
||||
?tp(
|
||||
greptimedb_connector_send_query_error,
|
||||
|
@ -420,7 +420,8 @@ is_auth_key(_) ->
|
|||
|
||||
%% -------------------------------------------------------------------------------------------------
|
||||
%% Query
|
||||
do_query(InstId, Client, Points) ->
|
||||
do_query(InstId, Channel, Client, Points) ->
|
||||
emqx_trace:rendered_action_template(Channel, #{points => Points, is_async => false}),
|
||||
case greptimedb:write_batch(Client, Points) of
|
||||
{ok, #{response := {affected_rows, #{value := Rows}}}} ->
|
||||
?SLOG(debug, #{
|
||||
|
@ -452,12 +453,13 @@ do_query(InstId, Client, Points) ->
|
|||
end
|
||||
end.
|
||||
|
||||
do_async_query(InstId, Client, Points, ReplyFunAndArgs) ->
|
||||
do_async_query(InstId, Channel, Client, Points, ReplyFunAndArgs) ->
|
||||
?SLOG(info, #{
|
||||
msg => "greptimedb_write_point_async",
|
||||
connector => InstId,
|
||||
points => Points
|
||||
}),
|
||||
emqx_trace:rendered_action_template(Channel, #{points => Points, is_async => true}),
|
||||
WrappedReplyFunAndArgs = {fun ?MODULE:reply_callback/2, [ReplyFunAndArgs]},
|
||||
ok = greptimedb:async_write_batch(Client, Points, WrappedReplyFunAndArgs).
|
||||
|
||||
|
|
|
@ -134,8 +134,11 @@ on_query(
|
|||
#{
|
||||
producer := Producer, partition_key := PartitionKey, record_template := HRecordTemplate
|
||||
} = maps:get(ChannelID, Channels),
|
||||
try to_record(PartitionKey, HRecordTemplate, Data) of
|
||||
Record -> append_record(InstId, Producer, Record, false)
|
||||
try
|
||||
KeyAndRawRecord = to_key_and_raw_record(PartitionKey, HRecordTemplate, Data),
|
||||
emqx_trace:rendered_action_template(ChannelID, #{record => KeyAndRawRecord}),
|
||||
Record = to_record(KeyAndRawRecord),
|
||||
append_record(InstId, Producer, Record, false)
|
||||
catch
|
||||
_:_ -> ?FAILED_TO_APPLY_HRECORD_TEMPLATE
|
||||
end.
|
||||
|
@ -148,8 +151,13 @@ on_batch_query(
|
|||
#{
|
||||
producer := Producer, partition_key := PartitionKey, record_template := HRecordTemplate
|
||||
} = maps:get(ChannelID, Channels),
|
||||
try to_multi_part_records(PartitionKey, HRecordTemplate, BatchList) of
|
||||
Records -> append_record(InstId, Producer, Records, true)
|
||||
try
|
||||
KeyAndRawRecordList = to_multi_part_key_and_partition_key(
|
||||
PartitionKey, HRecordTemplate, BatchList
|
||||
),
|
||||
emqx_trace:rendered_action_template(ChannelID, #{records => KeyAndRawRecordList}),
|
||||
Records = [to_record(Item) || Item <- KeyAndRawRecordList],
|
||||
append_record(InstId, Producer, Records, true)
|
||||
catch
|
||||
_:_ -> ?FAILED_TO_APPLY_HRECORD_TEMPLATE
|
||||
end.
|
||||
|
@ -348,20 +356,20 @@ ensure_start_producer(ProducerName, ProducerOptions) ->
|
|||
produce_name(ActionId) ->
|
||||
list_to_binary("backend_hstream_producer:" ++ to_string(ActionId)).
|
||||
|
||||
to_record(PartitionKeyTmpl, HRecordTmpl, Data) ->
|
||||
to_key_and_raw_record(PartitionKeyTmpl, HRecordTmpl, Data) ->
|
||||
PartitionKey = emqx_placeholder:proc_tmpl(PartitionKeyTmpl, Data),
|
||||
RawRecord = emqx_placeholder:proc_tmpl(HRecordTmpl, Data),
|
||||
to_record(PartitionKey, RawRecord).
|
||||
#{partition_key => PartitionKey, raw_record => RawRecord}.
|
||||
|
||||
to_record(PartitionKey, RawRecord) when is_binary(PartitionKey) ->
|
||||
to_record(binary_to_list(PartitionKey), RawRecord);
|
||||
to_record(PartitionKey, RawRecord) ->
|
||||
to_record(#{partition_key := PartitionKey, raw_record := RawRecord}) when is_binary(PartitionKey) ->
|
||||
to_record(#{partition_key => binary_to_list(PartitionKey), raw_record => RawRecord});
|
||||
to_record(#{partition_key := PartitionKey, raw_record := RawRecord}) ->
|
||||
hstreamdb:to_record(PartitionKey, raw, RawRecord).
|
||||
|
||||
to_multi_part_records(PartitionKeyTmpl, HRecordTmpl, BatchList) ->
|
||||
to_multi_part_key_and_partition_key(PartitionKeyTmpl, HRecordTmpl, BatchList) ->
|
||||
lists:map(
|
||||
fun({_, Data}) ->
|
||||
to_record(PartitionKeyTmpl, HRecordTmpl, Data)
|
||||
to_key_and_raw_record(PartitionKeyTmpl, HRecordTmpl, Data)
|
||||
end,
|
||||
BatchList
|
||||
).
|
||||
|
|
|
@ -359,7 +359,7 @@ on_query(InstId, {Method, Request, Timeout}, State) ->
|
|||
on_query(
|
||||
InstId,
|
||||
{ActionId, KeyOrNum, Method, Request, Timeout, Retry},
|
||||
#{base_path := BasePath} = State
|
||||
#{base_path := BasePath, host := Host} = State
|
||||
) ->
|
||||
?TRACE(
|
||||
"QUERY",
|
||||
|
@ -373,7 +373,7 @@ on_query(
|
|||
}
|
||||
),
|
||||
NRequest = formalize_request(Method, BasePath, Request),
|
||||
trace_rendered_action_template(ActionId, Method, NRequest, Timeout),
|
||||
trace_rendered_action_template(ActionId, Host, Method, NRequest, Timeout),
|
||||
Worker = resolve_pool_worker(State, KeyOrNum),
|
||||
Result0 = ehttpc:request(
|
||||
Worker,
|
||||
|
@ -469,7 +469,7 @@ on_query_async(
|
|||
InstId,
|
||||
{ActionId, KeyOrNum, Method, Request, Timeout},
|
||||
ReplyFunAndArgs,
|
||||
#{base_path := BasePath} = State
|
||||
#{base_path := BasePath, host := Host} = State
|
||||
) ->
|
||||
Worker = resolve_pool_worker(State, KeyOrNum),
|
||||
?TRACE(
|
||||
|
@ -483,7 +483,7 @@ on_query_async(
|
|||
}
|
||||
),
|
||||
NRequest = formalize_request(Method, BasePath, Request),
|
||||
trace_rendered_action_template(ActionId, Method, NRequest, Timeout),
|
||||
trace_rendered_action_template(ActionId, Host, Method, NRequest, Timeout),
|
||||
MaxAttempts = maps:get(max_attempts, State, 3),
|
||||
Context = #{
|
||||
attempt => 1,
|
||||
|
@ -503,15 +503,16 @@ on_query_async(
|
|||
),
|
||||
{ok, Worker}.
|
||||
|
||||
trace_rendered_action_template(ActionId, Method, NRequest, Timeout) ->
|
||||
trace_rendered_action_template(ActionId, Host, Method, NRequest, Timeout) ->
|
||||
case NRequest of
|
||||
{Path, Headers} ->
|
||||
emqx_trace:rendered_action_template(
|
||||
ActionId,
|
||||
#{
|
||||
host => Host,
|
||||
path => Path,
|
||||
method => Method,
|
||||
headers => emqx_utils_redact:redact_headers(Headers),
|
||||
headers => {fun emqx_utils_redact:redact_headers/1, Headers},
|
||||
timeout => Timeout
|
||||
}
|
||||
);
|
||||
|
@ -519,15 +520,19 @@ trace_rendered_action_template(ActionId, Method, NRequest, Timeout) ->
|
|||
emqx_trace:rendered_action_template(
|
||||
ActionId,
|
||||
#{
|
||||
host => Host,
|
||||
path => Path,
|
||||
method => Method,
|
||||
headers => emqx_utils_redact:redact_headers(Headers),
|
||||
headers => {fun emqx_utils_redact:redact_headers/1, Headers},
|
||||
timeout => Timeout,
|
||||
body => Body
|
||||
body => {fun log_format_body/1, Body}
|
||||
}
|
||||
)
|
||||
end.
|
||||
|
||||
log_format_body(Body) ->
|
||||
unicode:characters_to_binary(Body).
|
||||
|
||||
resolve_pool_worker(State, undefined) ->
|
||||
resolve_pool_worker(State, self());
|
||||
resolve_pool_worker(#{pool_name := PoolName} = State, Key) ->
|
||||
|
@ -831,6 +836,8 @@ transform_result(Result) ->
|
|||
Result;
|
||||
{ok, _TooManyRequests = StatusCode = 429, Headers} ->
|
||||
{error, {recoverable_error, #{status_code => StatusCode, headers => Headers}}};
|
||||
{ok, _ServiceUnavailable = StatusCode = 503, Headers} ->
|
||||
{error, {recoverable_error, #{status_code => StatusCode, headers => Headers}}};
|
||||
{ok, StatusCode, Headers} ->
|
||||
{error, {unrecoverable_error, #{status_code => StatusCode, headers => Headers}}};
|
||||
{ok, _TooManyRequests = StatusCode = 429, Headers, Body} ->
|
||||
|
@ -838,6 +845,11 @@ transform_result(Result) ->
|
|||
{recoverable_error, #{
|
||||
status_code => StatusCode, headers => Headers, body => Body
|
||||
}}};
|
||||
{ok, _ServiceUnavailable = StatusCode = 503, Headers, Body} ->
|
||||
{error,
|
||||
{recoverable_error, #{
|
||||
status_code => StatusCode, headers => Headers, body => Body
|
||||
}}};
|
||||
{ok, StatusCode, Headers, Body} ->
|
||||
{error,
|
||||
{unrecoverable_error, #{
|
||||
|
|
|
@ -93,6 +93,14 @@ init_per_testcase(t_too_many_requests, Config) ->
|
|||
),
|
||||
ok = emqx_bridge_http_connector_test_server:set_handler(too_many_requests_http_handler()),
|
||||
[{http_server, #{port => HTTPPort, path => HTTPPath}} | Config];
|
||||
init_per_testcase(t_service_unavailable, Config) ->
|
||||
HTTPPath = <<"/path">>,
|
||||
ServerSSLOpts = false,
|
||||
{ok, {HTTPPort, _Pid}} = emqx_bridge_http_connector_test_server:start_link(
|
||||
_Port = random, HTTPPath, ServerSSLOpts
|
||||
),
|
||||
ok = emqx_bridge_http_connector_test_server:set_handler(service_unavailable_http_handler()),
|
||||
[{http_server, #{port => HTTPPort, path => HTTPPath}} | Config];
|
||||
init_per_testcase(t_rule_action_expired, Config) ->
|
||||
[
|
||||
{bridge_name, ?BRIDGE_NAME}
|
||||
|
@ -115,6 +123,7 @@ init_per_testcase(_TestCase, Config) ->
|
|||
end_per_testcase(TestCase, _Config) when
|
||||
TestCase =:= t_path_not_found;
|
||||
TestCase =:= t_too_many_requests;
|
||||
TestCase =:= t_service_unavailable;
|
||||
TestCase =:= t_rule_action_expired;
|
||||
TestCase =:= t_bridge_probes_header_atoms;
|
||||
TestCase =:= t_send_async_connection_timeout;
|
||||
|
@ -260,6 +269,12 @@ not_found_http_handler() ->
|
|||
end.
|
||||
|
||||
too_many_requests_http_handler() ->
|
||||
fail_then_success_http_handler(429).
|
||||
|
||||
service_unavailable_http_handler() ->
|
||||
fail_then_success_http_handler(503).
|
||||
|
||||
fail_then_success_http_handler(FailStatusCode) ->
|
||||
GetAndBump =
|
||||
fun() ->
|
||||
NCalled = persistent_term:get({?MODULE, times_called}, 0),
|
||||
|
@ -272,7 +287,7 @@ too_many_requests_http_handler() ->
|
|||
{ok, Body, Req} = cowboy_req:read_body(Req0),
|
||||
TestPid ! {http, cowboy_req:headers(Req), Body},
|
||||
Rep =
|
||||
case N >= 2 of
|
||||
case N >= 3 of
|
||||
true ->
|
||||
cowboy_req:reply(
|
||||
200,
|
||||
|
@ -282,9 +297,13 @@ too_many_requests_http_handler() ->
|
|||
);
|
||||
false ->
|
||||
cowboy_req:reply(
|
||||
429,
|
||||
FailStatusCode,
|
||||
#{<<"content-type">> => <<"text/plain">>},
|
||||
<<"slow down, buddy">>,
|
||||
%% Body and no body to trigger different code paths
|
||||
case N of
|
||||
1 -> <<"slow down, buddy">>;
|
||||
_ -> <<>>
|
||||
end,
|
||||
Req
|
||||
)
|
||||
end,
|
||||
|
@ -570,6 +589,12 @@ t_path_not_found(Config) ->
|
|||
ok.
|
||||
|
||||
t_too_many_requests(Config) ->
|
||||
check_send_is_retried(Config).
|
||||
|
||||
t_service_unavailable(Config) ->
|
||||
check_send_is_retried(Config).
|
||||
|
||||
check_send_is_retried(Config) ->
|
||||
?check_trace(
|
||||
begin
|
||||
#{port := Port, path := Path} = ?config(http_server, Config),
|
||||
|
|
|
@ -69,10 +69,21 @@ end_per_suite(Config) ->
|
|||
suite() ->
|
||||
[{timetrap, {seconds, 60}}].
|
||||
|
||||
init_per_testcase(t_update_with_sensitive_data, Config) ->
|
||||
HTTPPath = <<"/foo/bar">>,
|
||||
ServerSSLOpts = false,
|
||||
{ok, {HTTPPort, _Pid}} = emqx_bridge_http_connector_test_server:start_link(
|
||||
_Port = random, HTTPPath, ServerSSLOpts
|
||||
),
|
||||
ok = emqx_bridge_http_connector_test_server:set_handler(success_handler()),
|
||||
[{path, HTTPPath}, {http_server, #{port => HTTPPort, path => HTTPPath}} | Config];
|
||||
init_per_testcase(_TestCase, Config) ->
|
||||
Server = start_http_server(#{response_delay_ms => 0}),
|
||||
[{http_server, Server} | Config].
|
||||
|
||||
end_per_testcase(t_update_with_sensitive_data, Config) ->
|
||||
ok = emqx_bridge_http_connector_test_server:stop(),
|
||||
end_per_testcase(common, proplists:delete(http_server, Config));
|
||||
end_per_testcase(_TestCase, Config) ->
|
||||
case ?config(http_server, Config) of
|
||||
undefined -> ok;
|
||||
|
@ -112,6 +123,69 @@ t_compose_connector_url_and_action_path(Config) ->
|
|||
),
|
||||
ok.
|
||||
|
||||
%% Checks that we can successfully update a connector containing sensitive headers and
|
||||
%% they won't be clobbered by the update.
|
||||
t_update_with_sensitive_data(Config) ->
|
||||
?check_trace(
|
||||
begin
|
||||
ConnectorCfg0 = make_connector_config(Config),
|
||||
AuthHeader = <<"Bearer some_token">>,
|
||||
ConnectorCfg1 = emqx_utils_maps:deep_merge(
|
||||
ConnectorCfg0,
|
||||
#{<<"headers">> => #{<<"authorization">> => AuthHeader}}
|
||||
),
|
||||
ActionCfg = make_action_config(Config),
|
||||
CreateConfig = [
|
||||
{bridge_kind, action},
|
||||
{action_type, ?BRIDGE_TYPE},
|
||||
{action_name, ?BRIDGE_NAME},
|
||||
{action_config, ActionCfg},
|
||||
{connector_type, ?BRIDGE_TYPE},
|
||||
{connector_name, ?CONNECTOR_NAME},
|
||||
{connector_config, ConnectorCfg1}
|
||||
],
|
||||
{ok, {{_, 201, _}, _, #{<<"headers">> := #{<<"authorization">> := Obfuscated}}}} =
|
||||
emqx_bridge_v2_testlib:create_connector_api(CreateConfig),
|
||||
{ok, _} =
|
||||
emqx_bridge_v2_testlib:create_kind_api(CreateConfig),
|
||||
BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME),
|
||||
{ok, _} = emqx_bridge_v2_testlib:create_rule_api(
|
||||
#{
|
||||
sql => <<"select * from \"t/http\" ">>,
|
||||
actions => [BridgeId]
|
||||
}
|
||||
),
|
||||
emqx:publish(emqx_message:make(<<"t/http">>, <<"1">>)),
|
||||
?assertReceive({http, #{<<"authorization">> := AuthHeader}, _}),
|
||||
|
||||
%% Now update the connector and see if the header stays deobfuscated. We send the old
|
||||
%% auth header as an obfuscated value to simulate the behavior of the frontend.
|
||||
ConnectorCfg2 = emqx_utils_maps:deep_merge(
|
||||
ConnectorCfg1,
|
||||
#{
|
||||
<<"headers">> => #{
|
||||
<<"authorization">> => Obfuscated,
|
||||
<<"other_header">> => <<"new">>
|
||||
}
|
||||
}
|
||||
),
|
||||
{ok, _} = emqx_bridge_v2_testlib:update_connector_api(
|
||||
?CONNECTOR_NAME,
|
||||
?BRIDGE_TYPE,
|
||||
ConnectorCfg2
|
||||
),
|
||||
|
||||
emqx:publish(emqx_message:make(<<"t/http">>, <<"2">>)),
|
||||
%% Should not be obfuscated.
|
||||
?assertReceive({http, #{<<"authorization">> := AuthHeader}, _}, 2_000),
|
||||
|
||||
ok
|
||||
end,
|
||||
[]
|
||||
),
|
||||
|
||||
ok.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% helpers
|
||||
%%--------------------------------------------------------------------
|
||||
|
@ -123,7 +197,10 @@ make_connector_config(Config) ->
|
|||
<<"url">> => iolist_to_binary(io_lib:format("http://localhost:~p", [Port])),
|
||||
<<"headers">> => #{},
|
||||
<<"pool_type">> => <<"hash">>,
|
||||
<<"pool_size">> => 1
|
||||
<<"pool_size">> => 1,
|
||||
<<"resource_opts">> => #{
|
||||
<<"health_check_interval">> => <<"100ms">>
|
||||
}
|
||||
}.
|
||||
|
||||
make_action_config(Config) ->
|
||||
|
@ -136,5 +213,22 @@ make_action_config(Config) ->
|
|||
<<"method">> => <<"post">>,
|
||||
<<"headers">> => #{},
|
||||
<<"body">> => <<"${.}">>
|
||||
},
|
||||
<<"resource_opts">> => #{
|
||||
<<"health_check_interval">> => <<"100ms">>
|
||||
}
|
||||
}.
|
||||
|
||||
success_handler() ->
|
||||
TestPid = self(),
|
||||
fun(Req0, State) ->
|
||||
{ok, Body, Req} = cowboy_req:read_body(Req0),
|
||||
TestPid ! {http, cowboy_req:headers(Req), Body},
|
||||
Rep = cowboy_req:reply(
|
||||
200,
|
||||
#{<<"content-type">> => <<"application/json">>},
|
||||
<<"{}">>,
|
||||
Req
|
||||
),
|
||||
{ok, Rep, State}
|
||||
end.
|
||||
|
|
|
@ -130,7 +130,7 @@ on_query(InstId, {Channel, Message}, #{channels := ChannelConf}) ->
|
|||
influxdb_connector_send_query,
|
||||
#{points => Points, batch => false, mode => sync}
|
||||
),
|
||||
do_query(InstId, Client, Points);
|
||||
do_query(InstId, Channel, Client, Points);
|
||||
{error, ErrorPoints} ->
|
||||
?tp(
|
||||
influxdb_connector_send_query_error,
|
||||
|
@ -152,7 +152,7 @@ on_batch_query(InstId, BatchData, #{channels := ChannelConf}) ->
|
|||
influxdb_connector_send_query,
|
||||
#{points => Points, batch => true, mode => sync}
|
||||
),
|
||||
do_query(InstId, Client, Points);
|
||||
do_query(InstId, Channel, Client, Points);
|
||||
{error, Reason} ->
|
||||
?tp(
|
||||
influxdb_connector_send_query_error,
|
||||
|
@ -175,7 +175,7 @@ on_query_async(
|
|||
influxdb_connector_send_query,
|
||||
#{points => Points, batch => false, mode => async}
|
||||
),
|
||||
do_async_query(InstId, Client, Points, {ReplyFun, Args});
|
||||
do_async_query(InstId, Channel, Client, Points, {ReplyFun, Args});
|
||||
{error, ErrorPoints} = Err ->
|
||||
?tp(
|
||||
influxdb_connector_send_query_error,
|
||||
|
@ -200,7 +200,7 @@ on_batch_query_async(
|
|||
influxdb_connector_send_query,
|
||||
#{points => Points, batch => true, mode => async}
|
||||
),
|
||||
do_async_query(InstId, Client, Points, {ReplyFun, Args});
|
||||
do_async_query(InstId, Channel, Client, Points, {ReplyFun, Args});
|
||||
{error, Reason} ->
|
||||
?tp(
|
||||
influxdb_connector_send_query_error,
|
||||
|
@ -496,7 +496,8 @@ is_auth_key(_) ->
|
|||
|
||||
%% -------------------------------------------------------------------------------------------------
|
||||
%% Query
|
||||
do_query(InstId, Client, Points) ->
|
||||
do_query(InstId, Channel, Client, Points) ->
|
||||
emqx_trace:rendered_action_template(Channel, #{points => Points, is_async => false}),
|
||||
case influxdb:write(Client, Points) of
|
||||
ok ->
|
||||
?SLOG(debug, #{
|
||||
|
@ -527,12 +528,13 @@ do_query(InstId, Client, Points) ->
|
|||
end
|
||||
end.
|
||||
|
||||
do_async_query(InstId, Client, Points, ReplyFunAndArgs) ->
|
||||
do_async_query(InstId, Channel, Client, Points, ReplyFunAndArgs) ->
|
||||
?SLOG(info, #{
|
||||
msg => "influxdb_write_point_async",
|
||||
connector => InstId,
|
||||
points => Points
|
||||
}),
|
||||
emqx_trace:rendered_action_template(Channel, #{points => Points, is_async => true}),
|
||||
WrappedReplyFunAndArgs = {fun ?MODULE:reply_callback/2, [ReplyFunAndArgs]},
|
||||
{ok, _WorkerPid} = influxdb:write_async(Client, Points, WrappedReplyFunAndArgs).
|
||||
|
||||
|
|
|
@ -319,6 +319,9 @@ on_query(
|
|||
emqx_bridge_kafka_impl_producer_sync_query,
|
||||
#{headers_config => KafkaHeaders, instance_id => InstId}
|
||||
),
|
||||
emqx_trace:rendered_action_template(MessageTag, #{
|
||||
message => KafkaMessage, send_type => sync
|
||||
}),
|
||||
do_send_msg(sync, KafkaMessage, Producers, SyncTimeout)
|
||||
catch
|
||||
throw:{bad_kafka_header, _} = Error ->
|
||||
|
@ -376,6 +379,9 @@ on_query_async(
|
|||
emqx_bridge_kafka_impl_producer_async_query,
|
||||
#{headers_config => KafkaHeaders, instance_id => InstId}
|
||||
),
|
||||
emqx_trace:rendered_action_template(MessageTag, #{
|
||||
message => KafkaMessage, send_type => async
|
||||
}),
|
||||
do_send_msg(async, KafkaMessage, Producers, AsyncReplyFn)
|
||||
catch
|
||||
error:{invalid_partition_count, _Count, _Partitioner} ->
|
||||
|
|
|
@ -261,6 +261,11 @@ do_send_requests_sync(
|
|||
stream_name := StreamName
|
||||
} = maps:get(ChannelId, InstalledChannels),
|
||||
Records = render_records(Requests, Templates),
|
||||
StructuredRecords = [
|
||||
#{data => Data, partition_key => PartitionKey}
|
||||
|| {Data, PartitionKey} <- Records
|
||||
],
|
||||
emqx_trace:rendered_action_template(ChannelId, StructuredRecords),
|
||||
Result = ecpool:pick_and_do(
|
||||
PoolName,
|
||||
{emqx_bridge_kinesis_connector_client, query, [Records, StreamName]},
|
||||
|
|
|
@ -66,10 +66,15 @@ on_query(InstanceId, {Channel, Message0}, #{channels := Channels, connector_stat
|
|||
payload_template := PayloadTemplate,
|
||||
collection_template := CollectionTemplate
|
||||
} = ChannelState0 = maps:get(Channel, Channels),
|
||||
Collection = emqx_placeholder:proc_tmpl(CollectionTemplate, Message0),
|
||||
ChannelState = ChannelState0#{
|
||||
collection => emqx_placeholder:proc_tmpl(CollectionTemplate, Message0)
|
||||
collection => Collection
|
||||
},
|
||||
Message = render_message(PayloadTemplate, Message0),
|
||||
emqx_trace:rendered_action_template(Channel, #{
|
||||
collection => Collection,
|
||||
data => Message
|
||||
}),
|
||||
Res = emqx_mongodb:on_query(
|
||||
InstanceId,
|
||||
{Channel, Message},
|
||||
|
|
|
@ -264,7 +264,7 @@ on_query(
|
|||
),
|
||||
Channels = maps:get(installed_channels, State),
|
||||
ChannelConfig = maps:get(ChannelId, Channels),
|
||||
handle_send_result(with_egress_client(PoolName, send, [Msg, ChannelConfig]));
|
||||
handle_send_result(with_egress_client(ChannelId, PoolName, send, [Msg, ChannelConfig]));
|
||||
on_query(ResourceId, {_ChannelId, Msg}, #{}) ->
|
||||
?SLOG(error, #{
|
||||
msg => "forwarding_unavailable",
|
||||
|
@ -283,7 +283,7 @@ on_query_async(
|
|||
Callback = {fun on_async_result/2, [CallbackIn]},
|
||||
Channels = maps:get(installed_channels, State),
|
||||
ChannelConfig = maps:get(ChannelId, Channels),
|
||||
Result = with_egress_client(PoolName, send_async, [Msg, Callback, ChannelConfig]),
|
||||
Result = with_egress_client(ChannelId, PoolName, send_async, [Msg, Callback, ChannelConfig]),
|
||||
case Result of
|
||||
ok ->
|
||||
ok;
|
||||
|
@ -300,8 +300,11 @@ on_query_async(ResourceId, {_ChannelId, Msg}, _Callback, #{}) ->
|
|||
reason => "Egress is not configured"
|
||||
}).
|
||||
|
||||
with_egress_client(ResourceId, Fun, Args) ->
|
||||
ecpool:pick_and_do(ResourceId, {emqx_bridge_mqtt_egress, Fun, Args}, no_handover).
|
||||
with_egress_client(ActionID, ResourceId, Fun, Args) ->
|
||||
TraceRenderedCTX = emqx_trace:make_rendered_action_template_trace_context(ActionID),
|
||||
ecpool:pick_and_do(
|
||||
ResourceId, {emqx_bridge_mqtt_egress, Fun, [TraceRenderedCTX | Args]}, no_handover
|
||||
).
|
||||
|
||||
on_async_result(Callback, Result) ->
|
||||
apply_callback_function(Callback, handle_send_result(Result)).
|
||||
|
@ -337,6 +340,8 @@ classify_error({shutdown, _} = Reason) ->
|
|||
{recoverable_error, Reason};
|
||||
classify_error(shutdown = Reason) ->
|
||||
{recoverable_error, Reason};
|
||||
classify_error({unrecoverable_error, _Reason} = Error) ->
|
||||
Error;
|
||||
classify_error(Reason) ->
|
||||
{unrecoverable_error, Reason}.
|
||||
|
||||
|
|
|
@ -22,8 +22,8 @@
|
|||
|
||||
-export([
|
||||
config/1,
|
||||
send/3,
|
||||
send_async/4
|
||||
send/4,
|
||||
send_async/5
|
||||
]).
|
||||
|
||||
-type message() :: emqx_types:message() | map().
|
||||
|
@ -42,25 +42,40 @@
|
|||
config(#{remote := RC = #{}} = Conf) ->
|
||||
Conf#{remote => emqx_bridge_mqtt_msg:parse(RC)}.
|
||||
|
||||
-spec send(pid(), message(), egress()) -> ok.
|
||||
send(Pid, MsgIn, Egress) ->
|
||||
emqtt:publish(Pid, export_msg(MsgIn, Egress)).
|
||||
-spec send(pid(), emqx_trace:rendered_action_template_ctx(), message(), egress()) ->
|
||||
ok | {error, {unrecoverable_error, term()}}.
|
||||
send(Pid, TraceRenderedCTX, MsgIn, Egress) ->
|
||||
try
|
||||
emqtt:publish(Pid, export_msg(MsgIn, Egress, TraceRenderedCTX))
|
||||
catch
|
||||
error:{unrecoverable_error, Reason} ->
|
||||
{error, {unrecoverable_error, Reason}}
|
||||
end.
|
||||
|
||||
-spec send_async(pid(), message(), callback(), egress()) ->
|
||||
ok | {ok, pid()}.
|
||||
send_async(Pid, MsgIn, Callback, Egress) ->
|
||||
ok = emqtt:publish_async(Pid, export_msg(MsgIn, Egress), _Timeout = infinity, Callback),
|
||||
{ok, Pid}.
|
||||
-spec send_async(pid(), emqx_trace:rendered_action_template_ctx(), message(), callback(), egress()) ->
|
||||
{ok, pid()} | {error, {unrecoverable_error, term()}}.
|
||||
send_async(Pid, TraceRenderedCTX, MsgIn, Callback, Egress) ->
|
||||
try
|
||||
ok = emqtt:publish_async(
|
||||
Pid, export_msg(MsgIn, Egress, TraceRenderedCTX), _Timeout = infinity, Callback
|
||||
),
|
||||
{ok, Pid}
|
||||
catch
|
||||
error:{unrecoverable_error, Reason} ->
|
||||
{error, {unrecoverable_error, Reason}}
|
||||
end.
|
||||
|
||||
export_msg(Msg, #{remote := Remote}) ->
|
||||
to_remote_msg(Msg, Remote).
|
||||
export_msg(Msg, #{remote := Remote}, TraceRenderedCTX) ->
|
||||
to_remote_msg(Msg, Remote, TraceRenderedCTX).
|
||||
|
||||
-spec to_remote_msg(message(), emqx_bridge_mqtt_msg:msgvars()) ->
|
||||
-spec to_remote_msg(
|
||||
message(), emqx_bridge_mqtt_msg:msgvars(), emqx_trace:rendered_action_template_ctx()
|
||||
) ->
|
||||
remote_message().
|
||||
to_remote_msg(#message{flags = Flags} = Msg, Vars) ->
|
||||
to_remote_msg(#message{flags = Flags} = Msg, Vars, TraceRenderedCTX) ->
|
||||
{EventMsg, _} = emqx_rule_events:eventmsg_publish(Msg),
|
||||
to_remote_msg(EventMsg#{retain => maps:get(retain, Flags, false)}, Vars);
|
||||
to_remote_msg(Msg = #{}, Remote) ->
|
||||
to_remote_msg(EventMsg#{retain => maps:get(retain, Flags, false)}, Vars, TraceRenderedCTX);
|
||||
to_remote_msg(Msg = #{}, Remote, TraceRenderedCTX) ->
|
||||
#{
|
||||
topic := Topic,
|
||||
payload := Payload,
|
||||
|
@ -68,6 +83,13 @@ to_remote_msg(Msg = #{}, Remote) ->
|
|||
retain := Retain
|
||||
} = emqx_bridge_mqtt_msg:render(Msg, Remote),
|
||||
PubProps = maps:get(pub_props, Msg, #{}),
|
||||
emqx_trace:rendered_action_template_with_ctx(TraceRenderedCTX, #{
|
||||
qos => QoS,
|
||||
retain => Retain,
|
||||
topic => Topic,
|
||||
props => PubProps,
|
||||
payload => Payload
|
||||
}),
|
||||
#mqtt_msg{
|
||||
qos = QoS,
|
||||
retain = Retain,
|
||||
|
|
|
@ -104,10 +104,12 @@ on_query(
|
|||
#{channels := Channels, connector_state := ConnectorState}
|
||||
) when is_binary(Channel) ->
|
||||
ChannelConfig = maps:get(Channel, Channels),
|
||||
MergedState0 = maps:merge(ConnectorState, ChannelConfig),
|
||||
MergedState1 = MergedState0#{channel_id => Channel},
|
||||
Result = emqx_mysql:on_query(
|
||||
InstanceId,
|
||||
Request,
|
||||
maps:merge(ConnectorState, ChannelConfig)
|
||||
MergedState1
|
||||
),
|
||||
?tp(mysql_connector_on_query_return, #{instance_id => InstanceId, result => Result}),
|
||||
Result;
|
||||
|
@ -121,10 +123,12 @@ on_batch_query(
|
|||
) when is_binary(element(1, Req)) ->
|
||||
Channel = element(1, Req),
|
||||
ChannelConfig = maps:get(Channel, Channels),
|
||||
MergedState0 = maps:merge(ConnectorState, ChannelConfig),
|
||||
MergedState1 = MergedState0#{channel_id => Channel},
|
||||
Result = emqx_mysql:on_batch_query(
|
||||
InstanceId,
|
||||
BatchRequest,
|
||||
maps:merge(ConnectorState, ChannelConfig)
|
||||
MergedState1
|
||||
),
|
||||
?tp(mysql_connector_on_batch_query_return, #{instance_id => InstanceId, result => Result}),
|
||||
Result;
|
||||
|
|
|
@ -167,9 +167,10 @@ on_batch_query(
|
|||
BatchReq,
|
||||
#{channels := Channels} = State
|
||||
) ->
|
||||
[{ChannelId, _} | _] = BatchReq,
|
||||
case try_render_messages(BatchReq, Channels) of
|
||||
{ok, Datas} ->
|
||||
do_query(InstanceId, Datas, State);
|
||||
do_query(InstanceId, ChannelId, Datas, State);
|
||||
Error ->
|
||||
Error
|
||||
end.
|
||||
|
@ -222,12 +223,13 @@ on_get_channel_status(InstanceId, ChannelId, #{channels := Channels} = State) ->
|
|||
%% Helper fns
|
||||
%%========================================================================================
|
||||
|
||||
do_query(InstanceId, Query, #{pool_name := PoolName} = State) ->
|
||||
do_query(InstanceId, ChannelID, Query, #{pool_name := PoolName} = State) ->
|
||||
?TRACE(
|
||||
"QUERY",
|
||||
"opents_connector_received",
|
||||
#{connector => InstanceId, query => Query, state => State}
|
||||
),
|
||||
emqx_trace:rendered_action_template(ChannelID, #{query => Query}),
|
||||
|
||||
?tp(opents_bridge_on_query, #{instance_id => InstanceId}),
|
||||
|
||||
|
|
|
@ -196,6 +196,11 @@ on_query(_InstanceId, {ChannelId, Message}, State) ->
|
|||
{error, channel_not_found};
|
||||
{ok, #{message := MessageTmpl, sync_timeout := SyncTimeout, producers := Producers}} ->
|
||||
PulsarMessage = render_message(Message, MessageTmpl),
|
||||
emqx_trace:rendered_action_template(ChannelId, #{
|
||||
message => PulsarMessage,
|
||||
sync_timeout => SyncTimeout,
|
||||
is_async => false
|
||||
}),
|
||||
try
|
||||
pulsar:send_sync(Producers, [PulsarMessage], SyncTimeout)
|
||||
catch
|
||||
|
@ -217,12 +222,16 @@ on_query_async(_InstanceId, {ChannelId, Message}, AsyncReplyFn, State) ->
|
|||
?tp_span(
|
||||
pulsar_producer_on_query_async,
|
||||
#{instance_id => _InstanceId, message => Message},
|
||||
on_query_async2(Producers, Message, MessageTmpl, AsyncReplyFn)
|
||||
on_query_async2(ChannelId, Producers, Message, MessageTmpl, AsyncReplyFn)
|
||||
)
|
||||
end.
|
||||
|
||||
on_query_async2(Producers, Message, MessageTmpl, AsyncReplyFn) ->
|
||||
on_query_async2(ChannelId, Producers, Message, MessageTmpl, AsyncReplyFn) ->
|
||||
PulsarMessage = render_message(Message, MessageTmpl),
|
||||
emqx_trace:rendered_action_template(ChannelId, #{
|
||||
message => PulsarMessage,
|
||||
is_async => true
|
||||
}),
|
||||
pulsar:send(Producers, [PulsarMessage], #{callback_fn => AsyncReplyFn}).
|
||||
|
||||
%%-------------------------------------------------------------------------------------
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
-include_lib("emqx_resource/include/emqx_resource.hrl").
|
||||
-include_lib("typerefl/include/types.hrl").
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
-include_lib("emqx/include/emqx_trace.hrl").
|
||||
-include_lib("hocon/include/hoconsc.hrl").
|
||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||
|
||||
|
@ -41,7 +42,7 @@
|
|||
-export([connect/1]).
|
||||
|
||||
%% Internal callbacks
|
||||
-export([publish_messages/4]).
|
||||
-export([publish_messages/5]).
|
||||
|
||||
namespace() -> "rabbitmq".
|
||||
|
||||
|
@ -214,9 +215,10 @@ on_query(ResourceID, {ChannelId, Data} = MsgReq, State) ->
|
|||
#{channels := Channels} = State,
|
||||
case maps:find(ChannelId, Channels) of
|
||||
{ok, #{param := ProcParam, rabbitmq := RabbitMQ}} ->
|
||||
TraceRenderedCTX = emqx_trace:make_rendered_action_template_trace_context(ChannelId),
|
||||
Res = ecpool:pick_and_do(
|
||||
ResourceID,
|
||||
{?MODULE, publish_messages, [RabbitMQ, ProcParam, [MsgReq]]},
|
||||
{?MODULE, publish_messages, [RabbitMQ, ProcParam, [MsgReq], TraceRenderedCTX]},
|
||||
no_handover
|
||||
),
|
||||
handle_result(Res);
|
||||
|
@ -234,9 +236,10 @@ on_batch_query(ResourceID, [{ChannelId, _Data} | _] = Batch, State) ->
|
|||
#{channels := Channels} = State,
|
||||
case maps:find(ChannelId, Channels) of
|
||||
{ok, #{param := ProcParam, rabbitmq := RabbitMQ}} ->
|
||||
TraceRenderedCTX = emqx_trace:make_rendered_action_template_trace_context(ChannelId),
|
||||
Res = ecpool:pick_and_do(
|
||||
ResourceID,
|
||||
{?MODULE, publish_messages, [RabbitMQ, ProcParam, Batch]},
|
||||
{?MODULE, publish_messages, [RabbitMQ, ProcParam, Batch, TraceRenderedCTX]},
|
||||
no_handover
|
||||
),
|
||||
handle_result(Res);
|
||||
|
@ -255,7 +258,8 @@ publish_messages(
|
|||
wait_for_publish_confirmations := WaitForPublishConfirmations,
|
||||
publish_confirmation_timeout := PublishConfirmationTimeout
|
||||
},
|
||||
Messages
|
||||
Messages,
|
||||
TraceRenderedCTX
|
||||
) ->
|
||||
try
|
||||
publish_messages(
|
||||
|
@ -267,15 +271,18 @@ publish_messages(
|
|||
PayloadTmpl,
|
||||
Messages,
|
||||
WaitForPublishConfirmations,
|
||||
PublishConfirmationTimeout
|
||||
PublishConfirmationTimeout,
|
||||
TraceRenderedCTX
|
||||
)
|
||||
catch
|
||||
error:?EMQX_TRACE_STOP_ACTION_MATCH = Reason ->
|
||||
{error, Reason};
|
||||
%% if send a message to a non-existent exchange, RabbitMQ client will crash
|
||||
%% {shutdown,{server_initiated_close,404,<<"NOT_FOUND - no exchange 'xyz' in vhost '/'">>}
|
||||
%% so we catch and return {recoverable_error, Reason} to increase metrics
|
||||
_Type:Reason ->
|
||||
Msg = iolist_to_binary(io_lib:format("RabbitMQ: publish_failed: ~p", [Reason])),
|
||||
erlang:error({recoverable_error, Msg})
|
||||
{error, {recoverable_error, Msg}}
|
||||
end.
|
||||
|
||||
publish_messages(
|
||||
|
@ -287,7 +294,8 @@ publish_messages(
|
|||
PayloadTmpl,
|
||||
Messages,
|
||||
WaitForPublishConfirmations,
|
||||
PublishConfirmationTimeout
|
||||
PublishConfirmationTimeout,
|
||||
TraceRenderedCTX
|
||||
) ->
|
||||
case maps:find(Conn, RabbitMQ) of
|
||||
{ok, Channel} ->
|
||||
|
@ -299,18 +307,33 @@ publish_messages(
|
|||
exchange = Exchange,
|
||||
routing_key = RoutingKey
|
||||
},
|
||||
FormattedMsgs = [
|
||||
format_data(PayloadTmpl, M)
|
||||
|| {_, M} <- Messages
|
||||
],
|
||||
emqx_trace:rendered_action_template_with_ctx(TraceRenderedCTX, #{
|
||||
messages => FormattedMsgs,
|
||||
properties => #{
|
||||
headers => [],
|
||||
delivery_mode => DeliveryMode
|
||||
},
|
||||
method => #{
|
||||
exchange => Exchange,
|
||||
routing_key => RoutingKey
|
||||
}
|
||||
}),
|
||||
lists:foreach(
|
||||
fun({_, MsgRaw}) ->
|
||||
fun(Msg) ->
|
||||
amqp_channel:cast(
|
||||
Channel,
|
||||
Method,
|
||||
#amqp_msg{
|
||||
payload = format_data(PayloadTmpl, MsgRaw),
|
||||
payload = Msg,
|
||||
props = MessageProperties
|
||||
}
|
||||
)
|
||||
end,
|
||||
Messages
|
||||
FormattedMsgs
|
||||
),
|
||||
case WaitForPublishConfirmations of
|
||||
true ->
|
||||
|
|
|
@ -107,7 +107,7 @@ on_query(InstId, {cmd, Cmd}, #{conn_st := RedisConnSt}) ->
|
|||
Result;
|
||||
on_query(
|
||||
InstId,
|
||||
{_MessageTag, _Data} = Msg,
|
||||
{MessageTag, _Data} = Msg,
|
||||
#{channels := Channels, conn_st := RedisConnSt}
|
||||
) ->
|
||||
case try_render_message([Msg], Channels) of
|
||||
|
@ -116,6 +116,10 @@ on_query(
|
|||
redis_bridge_connector_cmd,
|
||||
#{cmd => Cmd, batch => false, mode => sync}
|
||||
),
|
||||
emqx_trace:rendered_action_template(
|
||||
MessageTag,
|
||||
#{command => Cmd, batch => false, mode => sync}
|
||||
),
|
||||
Result = query(InstId, {cmd, Cmd}, RedisConnSt),
|
||||
?tp(
|
||||
redis_bridge_connector_send_done,
|
||||
|
@ -135,6 +139,11 @@ on_batch_query(
|
|||
redis_bridge_connector_send,
|
||||
#{batch_data => BatchData, batch => true, mode => sync}
|
||||
),
|
||||
[{ChannelID, _} | _] = BatchData,
|
||||
emqx_trace:rendered_action_template(
|
||||
ChannelID,
|
||||
#{commands => Cmds, batch => ture, mode => sync}
|
||||
),
|
||||
Result = query(InstId, {cmds, Cmds}, RedisConnSt),
|
||||
?tp(
|
||||
redis_bridge_connector_send_done,
|
||||
|
|
|
@ -264,7 +264,11 @@ do_query(
|
|||
|
||||
TopicKey = get_topic_key(Query, TopicTks),
|
||||
Data = apply_template(Query, Templates, DispatchStrategy),
|
||||
|
||||
emqx_trace:rendered_action_template(ChannelId, #{
|
||||
topic_key => TopicKey,
|
||||
data => Data,
|
||||
request_timeout => RequestTimeout
|
||||
}),
|
||||
Result = safe_do_produce(
|
||||
ChannelId, InstanceId, QueryFunc, ClientId, TopicKey, Data, ProducerOpts, RequestTimeout
|
||||
),
|
||||
|
|
|
@ -10,9 +10,15 @@
|
|||
emqx_s3
|
||||
]},
|
||||
{env, [
|
||||
{emqx_action_info_modules, [emqx_bridge_s3_action_info]},
|
||||
{emqx_connector_info_modules, [emqx_bridge_s3_connector_info]}
|
||||
{emqx_action_info_modules, [
|
||||
emqx_bridge_s3_upload_action_info,
|
||||
emqx_bridge_s3_aggreg_upload_action_info
|
||||
]},
|
||||
{emqx_connector_info_modules, [
|
||||
emqx_bridge_s3_connector_info
|
||||
]}
|
||||
]},
|
||||
{mod, {emqx_bridge_s3_app, []}},
|
||||
{modules, []},
|
||||
{links, []}
|
||||
]}.
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
]).
|
||||
|
||||
-export([
|
||||
bridge_v2_examples/1,
|
||||
connector_examples/1
|
||||
]).
|
||||
|
||||
|
@ -39,58 +38,11 @@ fields(Field) when
|
|||
Field == "post_connector"
|
||||
->
|
||||
emqx_connector_schema:api_fields(Field, ?CONNECTOR, fields(s3_connector_config));
|
||||
fields(Field) when
|
||||
Field == "get_bridge_v2";
|
||||
Field == "put_bridge_v2";
|
||||
Field == "post_bridge_v2"
|
||||
->
|
||||
emqx_bridge_v2_schema:api_fields(Field, ?ACTION, fields(?ACTION));
|
||||
fields(action) ->
|
||||
{?ACTION,
|
||||
hoconsc:mk(
|
||||
hoconsc:map(name, hoconsc:ref(?MODULE, ?ACTION)),
|
||||
#{
|
||||
desc => <<"S3 Action Config">>,
|
||||
required => false
|
||||
}
|
||||
)};
|
||||
fields("config_connector") ->
|
||||
emqx_connector_schema:common_fields() ++ fields(s3_connector_config);
|
||||
fields(?ACTION) ->
|
||||
emqx_bridge_v2_schema:make_producer_action_schema(
|
||||
hoconsc:mk(
|
||||
?R_REF(s3_upload_parameters),
|
||||
#{
|
||||
required => true,
|
||||
desc => ?DESC(s3_upload)
|
||||
}
|
||||
),
|
||||
#{
|
||||
resource_opts_ref => ?R_REF(s3_action_resource_opts)
|
||||
}
|
||||
);
|
||||
fields(s3_connector_config) ->
|
||||
emqx_s3_schema:fields(s3_client) ++
|
||||
emqx_connector_schema:resource_opts_ref(?MODULE, s3_connector_resource_opts);
|
||||
fields(s3_upload_parameters) ->
|
||||
emqx_s3_schema:fields(s3_upload) ++
|
||||
[
|
||||
{content,
|
||||
hoconsc:mk(
|
||||
emqx_schema:template(),
|
||||
#{
|
||||
required => false,
|
||||
default => <<"${.}">>,
|
||||
desc => ?DESC(s3_object_content)
|
||||
}
|
||||
)}
|
||||
];
|
||||
fields(s3_action_resource_opts) ->
|
||||
UnsupportedOpts = [batch_size, batch_time],
|
||||
lists:filter(
|
||||
fun({N, _}) -> not lists:member(N, UnsupportedOpts) end,
|
||||
emqx_bridge_v2_schema:action_resource_opts_fields()
|
||||
);
|
||||
fields(s3_connector_resource_opts) ->
|
||||
CommonOpts = emqx_connector_schema:common_resource_opts_subfields(),
|
||||
lists:filter(
|
||||
|
@ -100,14 +52,6 @@ fields(s3_connector_resource_opts) ->
|
|||
|
||||
desc("config_connector") ->
|
||||
?DESC(config_connector);
|
||||
desc(?ACTION) ->
|
||||
?DESC(s3_upload);
|
||||
desc(s3_upload) ->
|
||||
?DESC(s3_upload);
|
||||
desc(s3_upload_parameters) ->
|
||||
?DESC(s3_upload_parameters);
|
||||
desc(s3_action_resource_opts) ->
|
||||
?DESC(emqx_resource_schema, resource_opts);
|
||||
desc(s3_connector_resource_opts) ->
|
||||
?DESC(emqx_resource_schema, resource_opts);
|
||||
desc(_Name) ->
|
||||
|
@ -115,54 +59,6 @@ desc(_Name) ->
|
|||
|
||||
%% Examples
|
||||
|
||||
bridge_v2_examples(Method) ->
|
||||
[
|
||||
#{
|
||||
<<"s3">> => #{
|
||||
summary => <<"S3 Simple Upload">>,
|
||||
value => action_example(Method)
|
||||
}
|
||||
}
|
||||
].
|
||||
|
||||
action_example(post) ->
|
||||
maps:merge(
|
||||
action_example(put),
|
||||
#{
|
||||
type => atom_to_binary(?ACTION),
|
||||
name => <<"my_s3_action">>
|
||||
}
|
||||
);
|
||||
action_example(get) ->
|
||||
maps:merge(
|
||||
action_example(put),
|
||||
#{
|
||||
status => <<"connected">>,
|
||||
node_status => [
|
||||
#{
|
||||
node => <<"emqx@localhost">>,
|
||||
status => <<"connected">>
|
||||
}
|
||||
]
|
||||
}
|
||||
);
|
||||
action_example(put) ->
|
||||
#{
|
||||
enable => true,
|
||||
connector => <<"my_s3_connector">>,
|
||||
description => <<"My action">>,
|
||||
parameters => #{
|
||||
bucket => <<"${clientid}">>,
|
||||
key => <<"${topic}">>,
|
||||
content => <<"${payload}">>,
|
||||
acl => <<"public_read">>
|
||||
},
|
||||
resource_opts => #{
|
||||
query_mode => <<"sync">>,
|
||||
inflight_window => 10
|
||||
}
|
||||
}.
|
||||
|
||||
connector_examples(Method) ->
|
||||
[
|
||||
#{
|
||||
|
|
|
@ -5,7 +5,12 @@
|
|||
-ifndef(__EMQX_BRIDGE_S3_HRL__).
|
||||
-define(__EMQX_BRIDGE_S3_HRL__, true).
|
||||
|
||||
-define(ACTION, s3).
|
||||
%% Actions
|
||||
-define(ACTION_UPLOAD, s3).
|
||||
-define(BRIDGE_TYPE_UPLOAD, <<"s3">>).
|
||||
-define(ACTION_AGGREGATED_UPLOAD, s3_aggregated_upload).
|
||||
-define(BRIDGE_TYPE_AGGREGATED_UPLOAD, <<"s3_aggregated_upload">>).
|
||||
|
||||
-define(CONNECTOR, s3).
|
||||
|
||||
-endif.
|
||||
|
|
|
@ -0,0 +1,138 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2022-2024 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
%% This module provides pretty stupid interface for writing and reading
|
||||
%% Erlang terms to/from a file descriptor (i.e. IO device), with a goal
|
||||
%% of being able to write and read terms in a streaming fashion, and to
|
||||
%% survive partial corruption.
|
||||
%%
|
||||
%% Layout of the file is as follows:
|
||||
%% ```
|
||||
%% ETF(Header { Metadata })
|
||||
%% ETF(Record1 ByteSize)
|
||||
%% ETF(Record1)
|
||||
%% ETF(Record2 ByteSize)
|
||||
%% ETF(Record2)
|
||||
%% ...
|
||||
%% ```
|
||||
%% ^ ETF = Erlang External Term Format (i.e. `erlang:term_to_binary/1`).
|
||||
-module(emqx_bridge_s3_aggreg_buffer).
|
||||
|
||||
-export([
|
||||
new_writer/2,
|
||||
write/2,
|
||||
takeover/1
|
||||
]).
|
||||
|
||||
-export([
|
||||
new_reader/1,
|
||||
read/1
|
||||
]).
|
||||
|
||||
-export_type([writer/0, reader/0]).
|
||||
|
||||
-record(reader, {
|
||||
fd :: file:io_device() | eof,
|
||||
buffer :: binary(),
|
||||
hasread = 0 :: non_neg_integer()
|
||||
}).
|
||||
|
||||
-type writer() :: file:io_device().
|
||||
-type reader() :: #reader{}.
|
||||
|
||||
%%
|
||||
|
||||
-define(VSN, 1).
|
||||
-define(HEADER(MD), [?MODULE, ?VSN, MD]).
|
||||
|
||||
-define(READAHEAD_BYTES, 64 * 4096).
|
||||
-define(SANE_TERM_SIZE, 256 * 1024 * 1024).
|
||||
|
||||
%%
|
||||
|
||||
-spec new_writer(file:io_device(), _Meta) -> writer().
|
||||
new_writer(FD, Meta) ->
|
||||
%% TODO: Validate header is not too big?
|
||||
Header = term_to_iovec(?HEADER(Meta)),
|
||||
case file:write(FD, Header) of
|
||||
ok ->
|
||||
FD;
|
||||
{error, Reason} ->
|
||||
error({buffer_write_failed, Reason})
|
||||
end.
|
||||
|
||||
-spec write(_Term, writer()) -> ok | {error, file:posix()}.
|
||||
write(Term, FD) ->
|
||||
IOData = term_to_iovec(Term),
|
||||
Marker = term_to_binary(iolist_size(IOData)),
|
||||
file:write(FD, [Marker | IOData]).
|
||||
|
||||
%%
|
||||
|
||||
-spec new_reader(file:io_device()) -> {_Meta, reader()}.
|
||||
new_reader(FD) ->
|
||||
Reader0 = #reader{fd = FD, buffer = <<>>},
|
||||
Reader1 = read_buffered(?READAHEAD_BYTES, Reader0),
|
||||
case read_next_term(Reader1) of
|
||||
{?HEADER(MD), Reader} ->
|
||||
{MD, Reader};
|
||||
{UnexpectedHeader, _Reader} ->
|
||||
error({buffer_unexpected_header, UnexpectedHeader});
|
||||
eof ->
|
||||
error({buffer_incomplete, header})
|
||||
end.
|
||||
|
||||
-spec read(reader()) -> {_Term, reader()} | eof.
|
||||
read(Reader0) ->
|
||||
case read_next_term(read_buffered(_LargeEnough = 16, Reader0)) of
|
||||
{Size, Reader1} when is_integer(Size) andalso Size > 0 andalso Size < ?SANE_TERM_SIZE ->
|
||||
case read_next_term(read_buffered(Size, Reader1)) of
|
||||
{Term, Reader} ->
|
||||
{Term, Reader};
|
||||
eof ->
|
||||
error({buffer_incomplete, Size})
|
||||
end;
|
||||
{UnexpectedSize, _Reader} ->
|
||||
error({buffer_unexpected_record_size, UnexpectedSize});
|
||||
eof ->
|
||||
eof
|
||||
end.
|
||||
|
||||
-spec takeover(reader()) -> writer().
|
||||
takeover(#reader{fd = FD, hasread = HasRead}) ->
|
||||
case file:position(FD, HasRead) of
|
||||
{ok, HasRead} ->
|
||||
case file:truncate(FD) of
|
||||
ok ->
|
||||
FD;
|
||||
{error, Reason} ->
|
||||
error({buffer_takeover_failed, Reason})
|
||||
end;
|
||||
{error, Reason} ->
|
||||
error({buffer_takeover_failed, Reason})
|
||||
end.
|
||||
|
||||
read_next_term(#reader{fd = eof, buffer = <<>>}) ->
|
||||
eof;
|
||||
read_next_term(Reader = #reader{buffer = Buffer, hasread = HasRead}) ->
|
||||
{Term, UsedBytes} = erlang:binary_to_term(Buffer, [safe, used]),
|
||||
BufferSize = byte_size(Buffer),
|
||||
BufferLeft = binary:part(Buffer, UsedBytes, BufferSize - UsedBytes),
|
||||
{Term, Reader#reader{buffer = BufferLeft, hasread = HasRead + UsedBytes}}.
|
||||
|
||||
read_buffered(_Size, Reader = #reader{fd = eof}) ->
|
||||
Reader;
|
||||
read_buffered(Size, Reader = #reader{fd = FD, buffer = Buffer0}) ->
|
||||
BufferSize = byte_size(Buffer0),
|
||||
ReadSize = erlang:max(Size, ?READAHEAD_BYTES),
|
||||
case BufferSize < Size andalso file:read(FD, ReadSize) of
|
||||
false ->
|
||||
Reader;
|
||||
{ok, Data} ->
|
||||
Reader#reader{buffer = <<Buffer0/binary, Data/binary>>};
|
||||
eof ->
|
||||
Reader#reader{fd = eof};
|
||||
{error, Reason} ->
|
||||
error({buffer_read_failed, Reason})
|
||||
end.
|
|
@ -0,0 +1,105 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2022-2024 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
%% CSV container implementation for `emqx_bridge_s3_aggregator`.
|
||||
-module(emqx_bridge_s3_aggreg_csv).
|
||||
|
||||
%% Container API
|
||||
-export([
|
||||
new/1,
|
||||
fill/2,
|
||||
close/1
|
||||
]).
|
||||
|
||||
-export_type([container/0]).
|
||||
|
||||
-record(csv, {
|
||||
columns :: [binary()] | undefined,
|
||||
column_order :: [binary()],
|
||||
%% A string or character that separates each field in a record from the next.
|
||||
%% Default: ","
|
||||
field_separator :: char() | iodata(),
|
||||
%% A string or character that delimits boundaries of a record.
|
||||
%% Default: "\n"
|
||||
record_delimiter :: char() | iodata(),
|
||||
quoting_mp :: _ReMP
|
||||
}).
|
||||
|
||||
-type container() :: #csv{}.
|
||||
|
||||
-type options() :: #{
|
||||
%% Which columns have to be ordered first in the resulting CSV?
|
||||
column_order => [column()]
|
||||
}.
|
||||
|
||||
-type record() :: emqx_bridge_s3_aggregator:record().
|
||||
-type column() :: binary().
|
||||
|
||||
%%
|
||||
|
||||
-spec new(options()) -> container().
|
||||
new(Opts) ->
|
||||
{ok, MP} = re:compile("[\\[\\],\\r\\n\"]", [unicode]),
|
||||
#csv{
|
||||
column_order = maps:get(column_order, Opts, []),
|
||||
field_separator = $,,
|
||||
record_delimiter = $\n,
|
||||
quoting_mp = MP
|
||||
}.
|
||||
|
||||
-spec fill([record()], container()) -> {iodata(), container()}.
|
||||
fill(Records = [Record | _], CSV0 = #csv{columns = undefined}) ->
|
||||
Columns = mk_columns(Record, CSV0),
|
||||
Header = emit_header(Columns, CSV0),
|
||||
{Writes, CSV} = fill(Records, CSV0#csv{columns = Columns}),
|
||||
{[Header | Writes], CSV};
|
||||
fill(Records, CSV) ->
|
||||
Writes = [emit_row(R, CSV) || R <- Records],
|
||||
{Writes, CSV}.
|
||||
|
||||
-spec close(container()) -> iodata().
|
||||
close(#csv{}) ->
|
||||
[].
|
||||
|
||||
%%
|
||||
|
||||
mk_columns(Record, #csv{column_order = ColumnOrder}) ->
|
||||
Columns = [emqx_utils_conv:bin(C) || C <- lists:sort(maps:keys(Record))],
|
||||
Unoredered = Columns -- ColumnOrder,
|
||||
ColumnOrder ++ Unoredered.
|
||||
|
||||
-spec emit_header([column()], container()) -> iodata().
|
||||
emit_header([C], #csv{record_delimiter = Delim}) ->
|
||||
[C, Delim];
|
||||
emit_header([C | Rest], CSV = #csv{field_separator = Sep}) ->
|
||||
[C, Sep | emit_header(Rest, CSV)];
|
||||
emit_header([], #csv{record_delimiter = Delim}) ->
|
||||
[Delim].
|
||||
|
||||
-spec emit_row(record(), container()) -> iodata().
|
||||
emit_row(Record, CSV = #csv{columns = Columns}) ->
|
||||
emit_row(Record, Columns, CSV).
|
||||
|
||||
emit_row(Record, [C], CSV = #csv{record_delimiter = Delim}) ->
|
||||
[emit_cell(C, Record, CSV), Delim];
|
||||
emit_row(Record, [C | Rest], CSV = #csv{field_separator = Sep}) ->
|
||||
[emit_cell(C, Record, CSV), Sep | emit_row(Record, Rest, CSV)];
|
||||
emit_row(#{}, [], #csv{record_delimiter = Delim}) ->
|
||||
[Delim].
|
||||
|
||||
emit_cell(Column, Record, CSV) ->
|
||||
case emqx_template:lookup(Column, Record) of
|
||||
{ok, Value} ->
|
||||
encode_cell(emqx_template:to_string(Value), CSV);
|
||||
{error, undefined} ->
|
||||
_Empty = ""
|
||||
end.
|
||||
|
||||
encode_cell(V, #csv{quoting_mp = MP}) ->
|
||||
case re:run(V, MP, []) of
|
||||
nomatch ->
|
||||
V;
|
||||
_ ->
|
||||
[$", re:replace(V, <<"\"">>, <<"\"\"">>, [global, unicode]), $"]
|
||||
end.
|
|
@ -0,0 +1,212 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2022-2024 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
%% This module takes aggregated records from a buffer and delivers them to S3,
|
||||
%% wrapped in a configurable container (though currently there's only CSV).
|
||||
-module(emqx_bridge_s3_aggreg_delivery).
|
||||
|
||||
-include_lib("snabbkaffe/include/trace.hrl").
|
||||
-include("emqx_bridge_s3_aggregator.hrl").
|
||||
|
||||
-export([start_link/3]).
|
||||
|
||||
%% Internal exports
|
||||
-export([
|
||||
init/4,
|
||||
loop/3
|
||||
]).
|
||||
|
||||
-behaviour(emqx_template).
|
||||
-export([lookup/2]).
|
||||
|
||||
%% Sys
|
||||
-export([
|
||||
system_continue/3,
|
||||
system_terminate/4,
|
||||
format_status/2
|
||||
]).
|
||||
|
||||
-record(delivery, {
|
||||
name :: _Name,
|
||||
container :: emqx_bridge_s3_aggreg_csv:container(),
|
||||
reader :: emqx_bridge_s3_aggreg_buffer:reader(),
|
||||
upload :: emqx_s3_upload:t(),
|
||||
empty :: boolean()
|
||||
}).
|
||||
|
||||
-type state() :: #delivery{}.
|
||||
|
||||
%%
|
||||
|
||||
start_link(Name, Buffer, Opts) ->
|
||||
proc_lib:start_link(?MODULE, init, [self(), Name, Buffer, Opts]).
|
||||
|
||||
%%
|
||||
|
||||
-spec init(pid(), _Name, buffer(), _Opts :: map()) -> no_return().
|
||||
init(Parent, Name, Buffer, Opts) ->
|
||||
?tp(s3_aggreg_delivery_started, #{action => Name, buffer => Buffer}),
|
||||
Reader = open_buffer(Buffer),
|
||||
Delivery = init_delivery(Name, Reader, Buffer, Opts#{action => Name}),
|
||||
_ = erlang:process_flag(trap_exit, true),
|
||||
ok = proc_lib:init_ack({ok, self()}),
|
||||
loop(Delivery, Parent, []).
|
||||
|
||||
init_delivery(Name, Reader, Buffer, Opts = #{container := ContainerOpts}) ->
|
||||
#delivery{
|
||||
name = Name,
|
||||
container = mk_container(ContainerOpts),
|
||||
reader = Reader,
|
||||
upload = mk_upload(Buffer, Opts),
|
||||
empty = true
|
||||
}.
|
||||
|
||||
open_buffer(#buffer{filename = Filename}) ->
|
||||
case file:open(Filename, [read, binary, raw]) of
|
||||
{ok, FD} ->
|
||||
{_Meta, Reader} = emqx_bridge_s3_aggreg_buffer:new_reader(FD),
|
||||
Reader;
|
||||
{error, Reason} ->
|
||||
error({buffer_open_failed, Reason})
|
||||
end.
|
||||
|
||||
mk_container(#{type := csv, column_order := OrderOpt}) ->
|
||||
%% TODO: Deduplicate?
|
||||
ColumnOrder = lists:map(fun emqx_utils_conv:bin/1, OrderOpt),
|
||||
emqx_bridge_s3_aggreg_csv:new(#{column_order => ColumnOrder}).
|
||||
|
||||
mk_upload(
|
||||
Buffer,
|
||||
Opts = #{
|
||||
bucket := Bucket,
|
||||
upload_options := UploadOpts,
|
||||
client_config := Config,
|
||||
uploader_config := UploaderConfig
|
||||
}
|
||||
) ->
|
||||
Client = emqx_s3_client:create(Bucket, Config),
|
||||
Key = mk_object_key(Buffer, Opts),
|
||||
emqx_s3_upload:new(Client, Key, UploadOpts, UploaderConfig).
|
||||
|
||||
mk_object_key(Buffer, #{action := Name, key := Template}) ->
|
||||
emqx_template:render_strict(Template, {?MODULE, {Name, Buffer}}).
|
||||
|
||||
%%
|
||||
|
||||
-spec loop(state(), pid(), [sys:debug_option()]) -> no_return().
|
||||
loop(Delivery, Parent, Debug) ->
|
||||
%% NOTE: This function is mocked in tests.
|
||||
receive
|
||||
Msg -> handle_msg(Msg, Delivery, Parent, Debug)
|
||||
after 0 ->
|
||||
process_delivery(Delivery, Parent, Debug)
|
||||
end.
|
||||
|
||||
process_delivery(Delivery0 = #delivery{reader = Reader0}, Parent, Debug) ->
|
||||
case emqx_bridge_s3_aggreg_buffer:read(Reader0) of
|
||||
{Records = [#{} | _], Reader} ->
|
||||
Delivery1 = Delivery0#delivery{reader = Reader},
|
||||
Delivery2 = process_append_records(Records, Delivery1),
|
||||
Delivery = process_write(Delivery2),
|
||||
loop(Delivery, Parent, Debug);
|
||||
{[], Reader} ->
|
||||
Delivery = Delivery0#delivery{reader = Reader},
|
||||
loop(Delivery, Parent, Debug);
|
||||
eof ->
|
||||
process_complete(Delivery0);
|
||||
{Unexpected, _Reader} ->
|
||||
exit({buffer_unexpected_record, Unexpected})
|
||||
end.
|
||||
|
||||
process_append_records(Records, Delivery = #delivery{container = Container0, upload = Upload0}) ->
|
||||
{Writes, Container} = emqx_bridge_s3_aggreg_csv:fill(Records, Container0),
|
||||
{ok, Upload} = emqx_s3_upload:append(Writes, Upload0),
|
||||
Delivery#delivery{
|
||||
container = Container,
|
||||
upload = Upload,
|
||||
empty = false
|
||||
}.
|
||||
|
||||
process_write(Delivery = #delivery{upload = Upload0}) ->
|
||||
case emqx_s3_upload:write(Upload0) of
|
||||
{ok, Upload} ->
|
||||
Delivery#delivery{upload = Upload};
|
||||
{cont, Upload} ->
|
||||
process_write(Delivery#delivery{upload = Upload});
|
||||
{error, Reason} ->
|
||||
_ = emqx_s3_upload:abort(Upload0),
|
||||
exit({upload_failed, Reason})
|
||||
end.
|
||||
|
||||
process_complete(#delivery{name = Name, empty = true}) ->
|
||||
?tp(s3_aggreg_delivery_completed, #{action => Name, upload => empty}),
|
||||
exit({shutdown, {skipped, empty}});
|
||||
process_complete(#delivery{name = Name, container = Container, upload = Upload0}) ->
|
||||
Trailer = emqx_bridge_s3_aggreg_csv:close(Container),
|
||||
{ok, Upload} = emqx_s3_upload:append(Trailer, Upload0),
|
||||
case emqx_s3_upload:complete(Upload) of
|
||||
{ok, Completed} ->
|
||||
?tp(s3_aggreg_delivery_completed, #{action => Name, upload => Completed}),
|
||||
ok;
|
||||
{error, Reason} ->
|
||||
_ = emqx_s3_upload:abort(Upload),
|
||||
exit({upload_failed, Reason})
|
||||
end.
|
||||
|
||||
%%
|
||||
|
||||
handle_msg({system, From, Msg}, Delivery, Parent, Debug) ->
|
||||
sys:handle_system_msg(Msg, From, Parent, ?MODULE, Debug, Delivery);
|
||||
handle_msg({'EXIT', Parent, Reason}, Delivery, Parent, Debug) ->
|
||||
system_terminate(Reason, Parent, Debug, Delivery);
|
||||
handle_msg(_Msg, Delivery, Parent, Debug) ->
|
||||
loop(Parent, Debug, Delivery).
|
||||
|
||||
-spec system_continue(pid(), [sys:debug_option()], state()) -> no_return().
|
||||
system_continue(Parent, Debug, Delivery) ->
|
||||
loop(Delivery, Parent, Debug).
|
||||
|
||||
-spec system_terminate(_Reason, pid(), [sys:debug_option()], state()) -> _.
|
||||
system_terminate(_Reason, _Parent, _Debug, #delivery{upload = Upload}) ->
|
||||
emqx_s3_upload:abort(Upload).
|
||||
|
||||
-spec format_status(normal, Args :: [term()]) -> _StateFormatted.
|
||||
format_status(_Normal, [_PDict, _SysState, _Parent, _Debug, Delivery]) ->
|
||||
Delivery#delivery{
|
||||
upload = emqx_s3_upload:format(Delivery#delivery.upload)
|
||||
}.
|
||||
|
||||
%%
|
||||
|
||||
-spec lookup(emqx_template:accessor(), {_Name, buffer()}) ->
|
||||
{ok, integer() | string()} | {error, undefined}.
|
||||
lookup([<<"action">>], {Name, _Buffer}) ->
|
||||
{ok, mk_fs_safe_string(Name)};
|
||||
lookup(Accessor, {_Name, Buffer = #buffer{}}) ->
|
||||
lookup_buffer_var(Accessor, Buffer);
|
||||
lookup(_Accessor, _Context) ->
|
||||
{error, undefined}.
|
||||
|
||||
lookup_buffer_var([<<"datetime">>, Format], #buffer{since = Since}) ->
|
||||
{ok, format_timestamp(Since, Format)};
|
||||
lookup_buffer_var([<<"datetime_until">>, Format], #buffer{until = Until}) ->
|
||||
{ok, format_timestamp(Until, Format)};
|
||||
lookup_buffer_var([<<"sequence">>], #buffer{seq = Seq}) ->
|
||||
{ok, Seq};
|
||||
lookup_buffer_var([<<"node">>], #buffer{}) ->
|
||||
{ok, mk_fs_safe_string(atom_to_binary(erlang:node()))};
|
||||
lookup_buffer_var(_Binding, _Context) ->
|
||||
{error, undefined}.
|
||||
|
||||
format_timestamp(Timestamp, <<"rfc3339utc">>) ->
|
||||
String = calendar:system_time_to_rfc3339(Timestamp, [{unit, second}, {offset, "Z"}]),
|
||||
mk_fs_safe_string(String);
|
||||
format_timestamp(Timestamp, <<"rfc3339">>) ->
|
||||
String = calendar:system_time_to_rfc3339(Timestamp, [{unit, second}]),
|
||||
mk_fs_safe_string(String);
|
||||
format_timestamp(Timestamp, <<"unix">>) ->
|
||||
Timestamp.
|
||||
|
||||
mk_fs_safe_string(String) ->
|
||||
unicode:characters_to_binary(string:replace(String, ":", "_", all)).
|
|
@ -0,0 +1,275 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2022-2024 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_bridge_s3_aggreg_upload).
|
||||
|
||||
-include_lib("typerefl/include/types.hrl").
|
||||
-include_lib("hocon/include/hoconsc.hrl").
|
||||
-include("emqx_bridge_s3.hrl").
|
||||
|
||||
-define(ACTION, ?ACTION_AGGREGATED_UPLOAD).
|
||||
|
||||
-define(DEFAULT_BATCH_SIZE, 100).
|
||||
-define(DEFAULT_BATCH_TIME, <<"10ms">>).
|
||||
|
||||
-behaviour(hocon_schema).
|
||||
-export([
|
||||
namespace/0,
|
||||
roots/0,
|
||||
fields/1,
|
||||
desc/1
|
||||
]).
|
||||
|
||||
%% Interpreting options
|
||||
-export([
|
||||
mk_key_template/1,
|
||||
mk_upload_options/1
|
||||
]).
|
||||
|
||||
%% emqx_bridge_v2_schema API
|
||||
-export([bridge_v2_examples/1]).
|
||||
|
||||
%%-------------------------------------------------------------------------------------------------
|
||||
%% `hocon_schema' API
|
||||
%%-------------------------------------------------------------------------------------------------
|
||||
|
||||
namespace() ->
|
||||
"bridge_s3".
|
||||
|
||||
roots() ->
|
||||
[].
|
||||
|
||||
fields(Field) when
|
||||
Field == "get_bridge_v2";
|
||||
Field == "put_bridge_v2";
|
||||
Field == "post_bridge_v2"
|
||||
->
|
||||
emqx_bridge_v2_schema:api_fields(Field, ?ACTION, fields(?ACTION));
|
||||
fields(action) ->
|
||||
{?ACTION,
|
||||
hoconsc:mk(
|
||||
hoconsc:map(name, hoconsc:ref(?MODULE, ?ACTION)),
|
||||
#{
|
||||
desc => <<"S3 Aggregated Upload Action Config">>,
|
||||
required => false
|
||||
}
|
||||
)};
|
||||
fields(?ACTION) ->
|
||||
emqx_bridge_v2_schema:make_producer_action_schema(
|
||||
hoconsc:mk(
|
||||
?R_REF(s3_aggregated_upload_parameters),
|
||||
#{
|
||||
required => true,
|
||||
desc => ?DESC(s3_aggregated_upload_parameters)
|
||||
}
|
||||
),
|
||||
#{
|
||||
resource_opts_ref => ?R_REF(s3_aggreg_upload_resource_opts)
|
||||
}
|
||||
);
|
||||
fields(s3_aggregated_upload_parameters) ->
|
||||
lists:append([
|
||||
[
|
||||
{container,
|
||||
hoconsc:mk(
|
||||
%% TODO: Support selectors once there are more than one container.
|
||||
hoconsc:union(fun
|
||||
(all_union_members) -> [?REF(s3_aggregated_container_csv)];
|
||||
({value, _Valur}) -> [?REF(s3_aggregated_container_csv)]
|
||||
end),
|
||||
#{
|
||||
required => true,
|
||||
default => #{<<"type">> => <<"csv">>},
|
||||
desc => ?DESC(s3_aggregated_container)
|
||||
}
|
||||
)},
|
||||
{aggregation,
|
||||
hoconsc:mk(
|
||||
?REF(s3_aggregation),
|
||||
#{
|
||||
required => true,
|
||||
desc => ?DESC(s3_aggregation)
|
||||
}
|
||||
)}
|
||||
],
|
||||
emqx_resource_schema:override(emqx_s3_schema:fields(s3_upload), [
|
||||
{key, #{desc => ?DESC(s3_aggregated_upload_key)}}
|
||||
]),
|
||||
emqx_s3_schema:fields(s3_uploader)
|
||||
]);
|
||||
fields(s3_aggregated_container_csv) ->
|
||||
[
|
||||
{type,
|
||||
hoconsc:mk(
|
||||
csv,
|
||||
#{
|
||||
required => true,
|
||||
desc => ?DESC(s3_aggregated_container_csv)
|
||||
}
|
||||
)},
|
||||
{column_order,
|
||||
hoconsc:mk(
|
||||
hoconsc:array(string()),
|
||||
#{
|
||||
required => false,
|
||||
default => [],
|
||||
desc => ?DESC(s3_aggregated_container_csv_column_order)
|
||||
}
|
||||
)}
|
||||
];
|
||||
fields(s3_aggregation) ->
|
||||
[
|
||||
%% TODO: Needs bucketing? (e.g. messages falling in this 1h interval)
|
||||
{time_interval,
|
||||
hoconsc:mk(
|
||||
emqx_schema:duration_s(),
|
||||
#{
|
||||
required => false,
|
||||
default => <<"1h">>,
|
||||
desc => ?DESC(s3_aggregation_interval)
|
||||
}
|
||||
)},
|
||||
{max_records,
|
||||
hoconsc:mk(
|
||||
pos_integer(),
|
||||
#{
|
||||
required => false,
|
||||
default => <<"1000000">>,
|
||||
desc => ?DESC(s3_aggregation_max_records)
|
||||
}
|
||||
)}
|
||||
];
|
||||
fields(s3_aggreg_upload_resource_opts) ->
|
||||
%% NOTE: This action should benefit from generous batching defaults.
|
||||
emqx_bridge_v2_schema:action_resource_opts_fields([
|
||||
{batch_size, #{default => ?DEFAULT_BATCH_SIZE}},
|
||||
{batch_time, #{default => ?DEFAULT_BATCH_TIME}}
|
||||
]).
|
||||
|
||||
desc(Name) when
|
||||
Name == s3_aggregated_upload;
|
||||
Name == s3_aggregated_upload_parameters;
|
||||
Name == s3_aggregation;
|
||||
Name == s3_aggregated_container_csv
|
||||
->
|
||||
?DESC(Name);
|
||||
desc(s3_aggreg_upload_resource_opts) ->
|
||||
?DESC(emqx_resource_schema, resource_opts);
|
||||
desc(_Name) ->
|
||||
undefined.
|
||||
|
||||
%% Interpreting options
|
||||
|
||||
-spec mk_key_template(_Parameters :: map()) -> emqx_template:str().
|
||||
mk_key_template(#{key := Key}) ->
|
||||
Template = emqx_template:parse(Key),
|
||||
{_, BindingErrors} = emqx_template:render(Template, #{}),
|
||||
{UsedBindings, _} = lists:unzip(BindingErrors),
|
||||
SuffixTemplate = mk_suffix_template(UsedBindings),
|
||||
case emqx_template:is_const(SuffixTemplate) of
|
||||
true ->
|
||||
Template;
|
||||
false ->
|
||||
Template ++ SuffixTemplate
|
||||
end.
|
||||
|
||||
mk_suffix_template(UsedBindings) ->
|
||||
RequiredBindings = ["action", "node", "datetime.", "sequence"],
|
||||
SuffixBindings = [
|
||||
mk_default_binding(RB)
|
||||
|| RB <- RequiredBindings,
|
||||
lists:all(fun(UB) -> string:prefix(UB, RB) == nomatch end, UsedBindings)
|
||||
],
|
||||
SuffixTemplate = [["/", B] || B <- SuffixBindings],
|
||||
emqx_template:parse(SuffixTemplate).
|
||||
|
||||
mk_default_binding("datetime.") ->
|
||||
"${datetime.rfc3339utc}";
|
||||
mk_default_binding(Binding) ->
|
||||
"${" ++ Binding ++ "}".
|
||||
|
||||
-spec mk_upload_options(_Parameters :: map()) -> emqx_s3_client:upload_options().
|
||||
mk_upload_options(Parameters) ->
|
||||
Headers = mk_upload_headers(Parameters),
|
||||
#{
|
||||
headers => Headers,
|
||||
acl => maps:get(acl, Parameters, undefined)
|
||||
}.
|
||||
|
||||
mk_upload_headers(Parameters = #{container := Container}) ->
|
||||
Headers = normalize_headers(maps:get(headers, Parameters, #{})),
|
||||
ContainerHeaders = mk_container_headers(Container),
|
||||
maps:merge(ContainerHeaders, Headers).
|
||||
|
||||
normalize_headers(Headers) ->
|
||||
maps:fold(
|
||||
fun(Header, Value, Acc) ->
|
||||
maps:put(string:lowercase(emqx_utils_conv:str(Header)), Value, Acc)
|
||||
end,
|
||||
#{},
|
||||
Headers
|
||||
).
|
||||
|
||||
mk_container_headers(#{type := csv}) ->
|
||||
#{"content-type" => "text/csv"};
|
||||
mk_container_headers(#{}) ->
|
||||
#{}.
|
||||
|
||||
%% Examples
|
||||
|
||||
bridge_v2_examples(Method) ->
|
||||
[
|
||||
#{
|
||||
<<"s3_aggregated_upload">> => #{
|
||||
summary => <<"S3 Aggregated Upload">>,
|
||||
value => s3_action_example(Method)
|
||||
}
|
||||
}
|
||||
].
|
||||
|
||||
s3_action_example(post) ->
|
||||
maps:merge(
|
||||
s3_action_example(put),
|
||||
#{
|
||||
type => atom_to_binary(?ACTION_UPLOAD),
|
||||
name => <<"my_s3_action">>
|
||||
}
|
||||
);
|
||||
s3_action_example(get) ->
|
||||
maps:merge(
|
||||
s3_action_example(put),
|
||||
#{
|
||||
status => <<"connected">>,
|
||||
node_status => [
|
||||
#{
|
||||
node => <<"emqx@localhost">>,
|
||||
status => <<"connected">>
|
||||
}
|
||||
]
|
||||
}
|
||||
);
|
||||
s3_action_example(put) ->
|
||||
#{
|
||||
enable => true,
|
||||
connector => <<"my_s3_connector">>,
|
||||
description => <<"My action">>,
|
||||
parameters => #{
|
||||
bucket => <<"mqtt-aggregated">>,
|
||||
key => <<"${action}/${node}/${datetime.rfc3339utc}_N${sequence}.csv">>,
|
||||
acl => <<"public_read">>,
|
||||
aggregation => #{
|
||||
time_interval => <<"15m">>,
|
||||
max_records => 100_000
|
||||
},
|
||||
<<"container">> => #{
|
||||
type => <<"csv">>,
|
||||
column_order => [<<"clientid">>, <<"topic">>, <<"publish_received_at">>]
|
||||
}
|
||||
},
|
||||
resource_opts => #{
|
||||
health_check_interval => <<"10s">>,
|
||||
query_mode => <<"async">>,
|
||||
inflight_window => 100
|
||||
}
|
||||
}.
|
|
@ -0,0 +1,21 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_bridge_s3_aggreg_upload_action_info).
|
||||
|
||||
-behaviour(emqx_action_info).
|
||||
|
||||
-include("emqx_bridge_s3.hrl").
|
||||
|
||||
-export([
|
||||
action_type_name/0,
|
||||
connector_type_name/0,
|
||||
schema_module/0
|
||||
]).
|
||||
|
||||
action_type_name() -> ?ACTION_AGGREGATED_UPLOAD.
|
||||
|
||||
connector_type_name() -> s3.
|
||||
|
||||
schema_module() -> emqx_bridge_s3_aggreg_upload.
|
|
@ -0,0 +1,72 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_bridge_s3_aggreg_upload_sup).
|
||||
|
||||
-export([
|
||||
start_link/3,
|
||||
start_link_delivery_sup/2
|
||||
]).
|
||||
|
||||
-export([
|
||||
start_delivery/2,
|
||||
start_delivery_proc/3
|
||||
]).
|
||||
|
||||
-behaviour(supervisor).
|
||||
-export([init/1]).
|
||||
|
||||
-define(SUPREF(NAME), {via, gproc, {n, l, {?MODULE, NAME}}}).
|
||||
|
||||
%%
|
||||
|
||||
start_link(Name, AggregOpts, DeliveryOpts) ->
|
||||
supervisor:start_link(?MODULE, {root, Name, AggregOpts, DeliveryOpts}).
|
||||
|
||||
start_link_delivery_sup(Name, DeliveryOpts) ->
|
||||
supervisor:start_link(?SUPREF(Name), ?MODULE, {delivery, Name, DeliveryOpts}).
|
||||
|
||||
%%
|
||||
|
||||
start_delivery(Name, Buffer) ->
|
||||
supervisor:start_child(?SUPREF(Name), [Buffer]).
|
||||
|
||||
start_delivery_proc(Name, DeliveryOpts, Buffer) ->
|
||||
emqx_bridge_s3_aggreg_delivery:start_link(Name, Buffer, DeliveryOpts).
|
||||
|
||||
%%
|
||||
|
||||
init({root, Name, AggregOpts, DeliveryOpts}) ->
|
||||
SupFlags = #{
|
||||
strategy => one_for_one,
|
||||
intensity => 10,
|
||||
period => 5
|
||||
},
|
||||
AggregatorChildSpec = #{
|
||||
id => aggregator,
|
||||
start => {emqx_bridge_s3_aggregator, start_link, [Name, AggregOpts]},
|
||||
type => worker,
|
||||
restart => permanent
|
||||
},
|
||||
DeliverySupChildSpec = #{
|
||||
id => delivery_sup,
|
||||
start => {?MODULE, start_link_delivery_sup, [Name, DeliveryOpts]},
|
||||
type => supervisor,
|
||||
restart => permanent
|
||||
},
|
||||
{ok, {SupFlags, [DeliverySupChildSpec, AggregatorChildSpec]}};
|
||||
init({delivery, Name, DeliveryOpts}) ->
|
||||
SupFlags = #{
|
||||
strategy => simple_one_for_one,
|
||||
intensity => 100,
|
||||
period => 5
|
||||
},
|
||||
ChildSpec = #{
|
||||
id => delivery,
|
||||
start => {?MODULE, start_delivery_proc, [Name, DeliveryOpts]},
|
||||
type => worker,
|
||||
restart => temporary,
|
||||
shutdown => 1000
|
||||
},
|
||||
{ok, {SupFlags, [ChildSpec]}}.
|
|
@ -0,0 +1,486 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2022-2024 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
%% This module manages buffers for aggregating records and offloads them
|
||||
%% to separate "delivery" processes when they are full or time interval
|
||||
%% is over.
|
||||
-module(emqx_bridge_s3_aggregator).
|
||||
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
-include_lib("snabbkaffe/include/trace.hrl").
|
||||
|
||||
-include("emqx_bridge_s3_aggregator.hrl").
|
||||
|
||||
-export([
|
||||
start_link/2,
|
||||
push_records/3,
|
||||
tick/2,
|
||||
take_error/1
|
||||
]).
|
||||
|
||||
-behaviour(gen_server).
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2
|
||||
]).
|
||||
|
||||
-export_type([
|
||||
record/0,
|
||||
timestamp/0
|
||||
]).
|
||||
|
||||
%% Record.
|
||||
-type record() :: #{binary() => _}.
|
||||
|
||||
%% Unix timestamp, seconds since epoch.
|
||||
-type timestamp() :: _Seconds :: non_neg_integer().
|
||||
|
||||
%%
|
||||
|
||||
-define(VSN, 1).
|
||||
-define(SRVREF(NAME), {via, gproc, {n, l, {?MODULE, NAME}}}).
|
||||
|
||||
%%
|
||||
|
||||
start_link(Name, Opts) ->
|
||||
gen_server:start_link(?SRVREF(Name), ?MODULE, mk_state(Name, Opts), []).
|
||||
|
||||
push_records(Name, Timestamp, Records = [_ | _]) ->
|
||||
%% FIXME: Error feedback.
|
||||
case pick_buffer(Name, Timestamp) of
|
||||
undefined ->
|
||||
BufferNext = next_buffer(Name, Timestamp),
|
||||
write_records_limited(Name, BufferNext, Records);
|
||||
Buffer ->
|
||||
write_records_limited(Name, Buffer, Records)
|
||||
end;
|
||||
push_records(_Name, _Timestamp, []) ->
|
||||
ok.
|
||||
|
||||
tick(Name, Timestamp) ->
|
||||
case pick_buffer(Name, Timestamp) of
|
||||
#buffer{} ->
|
||||
ok;
|
||||
_Outdated ->
|
||||
send_close_buffer(Name, Timestamp)
|
||||
end.
|
||||
|
||||
take_error(Name) ->
|
||||
gen_server:call(?SRVREF(Name), take_error).
|
||||
|
||||
%%
|
||||
|
||||
write_records_limited(Name, Buffer = #buffer{max_records = undefined}, Records) ->
|
||||
write_records(Name, Buffer, Records);
|
||||
write_records_limited(Name, Buffer = #buffer{max_records = MaxRecords}, Records) ->
|
||||
NR = length(Records),
|
||||
case inc_num_records(Buffer, NR) of
|
||||
NR ->
|
||||
%% NOTE: Allow unconditionally if it's the first write.
|
||||
write_records(Name, Buffer, Records);
|
||||
NWritten when NWritten > MaxRecords ->
|
||||
NextBuffer = rotate_buffer(Name, Buffer),
|
||||
write_records_limited(Name, NextBuffer, Records);
|
||||
_ ->
|
||||
write_records(Name, Buffer, Records)
|
||||
end.
|
||||
|
||||
write_records(Name, Buffer = #buffer{fd = Writer}, Records) ->
|
||||
case emqx_bridge_s3_aggreg_buffer:write(Records, Writer) of
|
||||
ok ->
|
||||
?tp(s3_aggreg_records_written, #{action => Name, records => Records}),
|
||||
ok;
|
||||
{error, terminated} ->
|
||||
BufferNext = rotate_buffer(Name, Buffer),
|
||||
write_records(Name, BufferNext, Records);
|
||||
{error, _} = Error ->
|
||||
Error
|
||||
end.
|
||||
|
||||
inc_num_records(#buffer{cnt_records = Counter}, Size) ->
|
||||
inc_counter(Counter, Size).
|
||||
|
||||
next_buffer(Name, Timestamp) ->
|
||||
gen_server:call(?SRVREF(Name), {next_buffer, Timestamp}).
|
||||
|
||||
rotate_buffer(Name, #buffer{fd = FD}) ->
|
||||
gen_server:call(?SRVREF(Name), {rotate_buffer, FD}).
|
||||
|
||||
send_close_buffer(Name, Timestamp) ->
|
||||
gen_server:cast(?SRVREF(Name), {close_buffer, Timestamp}).
|
||||
|
||||
%%
|
||||
|
||||
-record(st, {
|
||||
name :: _Name,
|
||||
tab :: ets:tid() | undefined,
|
||||
buffer :: buffer() | undefined,
|
||||
queued :: buffer() | undefined,
|
||||
deliveries = #{} :: #{reference() => buffer()},
|
||||
errors = queue:new() :: queue:queue(_Error),
|
||||
interval :: emqx_schema:duration_s(),
|
||||
max_records :: pos_integer(),
|
||||
work_dir :: file:filename()
|
||||
}).
|
||||
|
||||
-type state() :: #st{}.
|
||||
|
||||
mk_state(Name, Opts) ->
|
||||
Interval = maps:get(time_interval, Opts),
|
||||
MaxRecords = maps:get(max_records, Opts),
|
||||
WorkDir = maps:get(work_dir, Opts),
|
||||
ok = ensure_workdir(WorkDir),
|
||||
#st{
|
||||
name = Name,
|
||||
interval = Interval,
|
||||
max_records = MaxRecords,
|
||||
work_dir = WorkDir
|
||||
}.
|
||||
|
||||
ensure_workdir(WorkDir) ->
|
||||
%% NOTE
|
||||
%% Writing MANIFEST as a means to ensure the work directory is writable. It's not
|
||||
%% (yet) read back because there's only one version of the implementation.
|
||||
ok = filelib:ensure_path(WorkDir),
|
||||
ok = write_manifest(WorkDir).
|
||||
|
||||
write_manifest(WorkDir) ->
|
||||
Manifest = #{<<"version">> => ?VSN},
|
||||
file:write_file(filename:join(WorkDir, "MANIFEST"), hocon_pp:do(Manifest, #{})).
|
||||
|
||||
%%
|
||||
|
||||
-spec init(state()) -> {ok, state()}.
|
||||
init(St0 = #st{name = Name}) ->
|
||||
_ = erlang:process_flag(trap_exit, true),
|
||||
St1 = St0#st{tab = create_tab(Name)},
|
||||
St = recover(St1),
|
||||
_ = announce_current_buffer(St),
|
||||
{ok, St}.
|
||||
|
||||
handle_call({next_buffer, Timestamp}, _From, St0) ->
|
||||
St = #st{buffer = Buffer} = handle_next_buffer(Timestamp, St0),
|
||||
{reply, Buffer, St, 0};
|
||||
handle_call({rotate_buffer, FD}, _From, St0) ->
|
||||
St = #st{buffer = Buffer} = handle_rotate_buffer(FD, St0),
|
||||
{reply, Buffer, St, 0};
|
||||
handle_call(take_error, _From, St0) ->
|
||||
{MaybeError, St} = handle_take_error(St0),
|
||||
{reply, MaybeError, St}.
|
||||
|
||||
handle_cast({close_buffer, Timestamp}, St) ->
|
||||
{noreply, handle_close_buffer(Timestamp, St)};
|
||||
handle_cast(_Cast, St) ->
|
||||
{noreply, St}.
|
||||
|
||||
handle_info(timeout, St) ->
|
||||
{noreply, handle_queued_buffer(St)};
|
||||
handle_info({'DOWN', MRef, _, Pid, Reason}, St0 = #st{name = Name, deliveries = Ds0}) ->
|
||||
case maps:take(MRef, Ds0) of
|
||||
{Buffer, Ds} ->
|
||||
St = St0#st{deliveries = Ds},
|
||||
{noreply, handle_delivery_exit(Buffer, Reason, St)};
|
||||
error ->
|
||||
?SLOG(notice, #{
|
||||
msg => "unexpected_down_signal",
|
||||
action => Name,
|
||||
pid => Pid,
|
||||
reason => Reason
|
||||
}),
|
||||
{noreply, St0}
|
||||
end;
|
||||
handle_info(_Msg, St) ->
|
||||
{noreply, St}.
|
||||
|
||||
terminate(_Reason, #st{name = Name}) ->
|
||||
cleanup_tab(Name).
|
||||
|
||||
%%
|
||||
|
||||
handle_next_buffer(Timestamp, St = #st{buffer = #buffer{until = Until}}) when Timestamp < Until ->
|
||||
St;
|
||||
handle_next_buffer(Timestamp, St0 = #st{buffer = Buffer = #buffer{since = PrevSince}}) ->
|
||||
BufferClosed = close_buffer(Buffer),
|
||||
St = enqueue_closed_buffer(BufferClosed, St0),
|
||||
handle_next_buffer(Timestamp, PrevSince, St);
|
||||
handle_next_buffer(Timestamp, St = #st{buffer = undefined}) ->
|
||||
handle_next_buffer(Timestamp, Timestamp, St).
|
||||
|
||||
handle_next_buffer(Timestamp, PrevSince, St0) ->
|
||||
NextBuffer = allocate_next_buffer(Timestamp, PrevSince, St0),
|
||||
St = St0#st{buffer = NextBuffer},
|
||||
_ = announce_current_buffer(St),
|
||||
St.
|
||||
|
||||
handle_rotate_buffer(
|
||||
FD,
|
||||
St0 = #st{buffer = Buffer = #buffer{since = Since, seq = Seq, fd = FD}}
|
||||
) ->
|
||||
BufferClosed = close_buffer(Buffer),
|
||||
NextBuffer = allocate_buffer(Since, Seq + 1, St0),
|
||||
St = enqueue_closed_buffer(BufferClosed, St0#st{buffer = NextBuffer}),
|
||||
_ = announce_current_buffer(St),
|
||||
St;
|
||||
handle_rotate_buffer(_ClosedFD, St) ->
|
||||
St.
|
||||
|
||||
enqueue_closed_buffer(Buffer, St = #st{queued = undefined}) ->
|
||||
St#st{queued = Buffer};
|
||||
enqueue_closed_buffer(Buffer, St0) ->
|
||||
%% NOTE: Should never really happen unless interval / max records are too tight.
|
||||
St = handle_queued_buffer(St0),
|
||||
St#st{queued = Buffer}.
|
||||
|
||||
handle_queued_buffer(St = #st{queued = undefined}) ->
|
||||
St;
|
||||
handle_queued_buffer(St = #st{queued = Buffer}) ->
|
||||
enqueue_delivery(Buffer, St#st{queued = undefined}).
|
||||
|
||||
allocate_next_buffer(Timestamp, PrevSince, St = #st{interval = Interval}) ->
|
||||
Since = compute_since(Timestamp, PrevSince, Interval),
|
||||
allocate_buffer(Since, 0, St).
|
||||
|
||||
compute_since(Timestamp, PrevSince, Interval) ->
|
||||
Timestamp - (Timestamp - PrevSince) rem Interval.
|
||||
|
||||
allocate_buffer(Since, Seq, St = #st{name = Name}) ->
|
||||
Buffer = #buffer{filename = Filename, cnt_records = Counter} = mk_buffer(Since, Seq, St),
|
||||
{ok, FD} = file:open(Filename, [write, binary]),
|
||||
Writer = emqx_bridge_s3_aggreg_buffer:new_writer(FD, _Meta = []),
|
||||
_ = add_counter(Counter),
|
||||
?tp(s3_aggreg_buffer_allocated, #{action => Name, filename => Filename}),
|
||||
Buffer#buffer{fd = Writer}.
|
||||
|
||||
recover_buffer(Buffer = #buffer{filename = Filename, cnt_records = Counter}) ->
|
||||
{ok, FD} = file:open(Filename, [read, write, binary]),
|
||||
case recover_buffer_writer(FD, Filename) of
|
||||
{ok, Writer, NWritten} ->
|
||||
_ = add_counter(Counter, NWritten),
|
||||
Buffer#buffer{fd = Writer};
|
||||
{error, Reason} ->
|
||||
?SLOG(warning, #{
|
||||
msg => "existing_buffer_recovery_failed",
|
||||
filename => Filename,
|
||||
reason => Reason,
|
||||
details => "Buffer is corrupted beyond repair, will be discarded."
|
||||
}),
|
||||
_ = file:close(FD),
|
||||
_ = file:delete(Filename),
|
||||
undefined
|
||||
end.
|
||||
|
||||
recover_buffer_writer(FD, Filename) ->
|
||||
try emqx_bridge_s3_aggreg_buffer:new_reader(FD) of
|
||||
{_Meta, Reader} -> recover_buffer_writer(FD, Filename, Reader, 0)
|
||||
catch
|
||||
error:Reason ->
|
||||
{error, Reason}
|
||||
end.
|
||||
|
||||
recover_buffer_writer(FD, Filename, Reader0, NWritten) ->
|
||||
try emqx_bridge_s3_aggreg_buffer:read(Reader0) of
|
||||
{Records, Reader} when is_list(Records) ->
|
||||
recover_buffer_writer(FD, Filename, Reader, NWritten + length(Records));
|
||||
{Unexpected, _Reader} ->
|
||||
%% Buffer is corrupted, should be discarded.
|
||||
{error, {buffer_unexpected_record, Unexpected}};
|
||||
eof ->
|
||||
%% Buffer is fine, continue writing at the end.
|
||||
{ok, FD, NWritten}
|
||||
catch
|
||||
error:Reason ->
|
||||
%% Buffer is truncated or corrupted somewhere in the middle.
|
||||
%% Continue writing after the last valid record.
|
||||
?SLOG(warning, #{
|
||||
msg => "existing_buffer_recovered_partially",
|
||||
filename => Filename,
|
||||
reason => Reason,
|
||||
details =>
|
||||
"Buffer is truncated or corrupted somewhere in the middle. "
|
||||
"Corrupted records will be discarded."
|
||||
}),
|
||||
Writer = emqx_bridge_s3_aggreg_buffer:takeover(Reader0),
|
||||
{ok, Writer, NWritten}
|
||||
end.
|
||||
|
||||
mk_buffer(
|
||||
Since,
|
||||
Seq,
|
||||
#st{tab = Tab, interval = Interval, max_records = MaxRecords, work_dir = WorkDir}
|
||||
) ->
|
||||
Name = mk_filename(Since, Seq),
|
||||
Counter = {Tab, {Since, Seq}},
|
||||
#buffer{
|
||||
since = Since,
|
||||
until = Since + Interval,
|
||||
seq = Seq,
|
||||
filename = filename:join(WorkDir, Name),
|
||||
max_records = MaxRecords,
|
||||
cnt_records = Counter
|
||||
}.
|
||||
|
||||
handle_close_buffer(
|
||||
Timestamp,
|
||||
St0 = #st{buffer = Buffer = #buffer{until = Until}}
|
||||
) when Timestamp >= Until ->
|
||||
St = St0#st{buffer = undefined},
|
||||
_ = announce_current_buffer(St),
|
||||
enqueue_delivery(close_buffer(Buffer), St);
|
||||
handle_close_buffer(_Timestamp, St = #st{buffer = undefined}) ->
|
||||
St.
|
||||
|
||||
close_buffer(Buffer = #buffer{fd = FD}) ->
|
||||
ok = file:close(FD),
|
||||
Buffer#buffer{fd = undefined}.
|
||||
|
||||
discard_buffer(#buffer{filename = Filename, cnt_records = Counter}) ->
|
||||
%% NOTE: Hopefully, no process is touching this counter anymore.
|
||||
_ = del_counter(Counter),
|
||||
file:delete(Filename).
|
||||
|
||||
pick_buffer(Name, Timestamp) ->
|
||||
case lookup_current_buffer(Name) of
|
||||
#buffer{until = Until} = Buffer when Timestamp < Until ->
|
||||
Buffer;
|
||||
#buffer{since = Since} when Timestamp < Since ->
|
||||
%% TODO: Support timestamps going back.
|
||||
error({invalid_timestamp, Timestamp});
|
||||
_Outdated ->
|
||||
undefined
|
||||
end.
|
||||
|
||||
announce_current_buffer(#st{tab = Tab, buffer = Buffer}) ->
|
||||
ets:insert(Tab, {buffer, Buffer}).
|
||||
|
||||
lookup_current_buffer(Name) ->
|
||||
ets:lookup_element(lookup_tab(Name), buffer, 2).
|
||||
|
||||
%%
|
||||
|
||||
enqueue_delivery(Buffer, St = #st{name = Name, deliveries = Ds}) ->
|
||||
{ok, Pid} = emqx_bridge_s3_aggreg_upload_sup:start_delivery(Name, Buffer),
|
||||
MRef = erlang:monitor(process, Pid),
|
||||
St#st{deliveries = Ds#{MRef => Buffer}}.
|
||||
|
||||
handle_delivery_exit(Buffer, Normal, St = #st{name = Name}) when
|
||||
Normal == normal; Normal == noproc
|
||||
->
|
||||
?SLOG(debug, #{
|
||||
msg => "aggregated_buffer_delivery_completed",
|
||||
action => Name,
|
||||
buffer => Buffer#buffer.filename
|
||||
}),
|
||||
ok = discard_buffer(Buffer),
|
||||
St;
|
||||
handle_delivery_exit(Buffer, {shutdown, {skipped, Reason}}, St = #st{name = Name}) ->
|
||||
?SLOG(info, #{
|
||||
msg => "aggregated_buffer_delivery_skipped",
|
||||
action => Name,
|
||||
buffer => {Buffer#buffer.since, Buffer#buffer.seq},
|
||||
reason => Reason
|
||||
}),
|
||||
ok = discard_buffer(Buffer),
|
||||
St;
|
||||
handle_delivery_exit(Buffer, Error, St = #st{name = Name}) ->
|
||||
?SLOG(error, #{
|
||||
msg => "aggregated_buffer_delivery_failed",
|
||||
action => Name,
|
||||
buffer => {Buffer#buffer.since, Buffer#buffer.seq},
|
||||
filename => Buffer#buffer.filename,
|
||||
reason => Error
|
||||
}),
|
||||
%% TODO: Retries?
|
||||
enqueue_status_error(Error, St).
|
||||
|
||||
enqueue_status_error({upload_failed, Error}, St = #st{errors = QErrors}) ->
|
||||
%% TODO
|
||||
%% This code feels too specific, errors probably need classification.
|
||||
St#st{errors = queue:in(Error, QErrors)};
|
||||
enqueue_status_error(_AnotherError, St) ->
|
||||
St.
|
||||
|
||||
handle_take_error(St = #st{errors = QErrors0}) ->
|
||||
case queue:out(QErrors0) of
|
||||
{{value, Error}, QErrors} ->
|
||||
{[Error], St#st{errors = QErrors}};
|
||||
{empty, QErrors} ->
|
||||
{[], St#st{errors = QErrors}}
|
||||
end.
|
||||
|
||||
%%
|
||||
|
||||
recover(St0 = #st{work_dir = WorkDir}) ->
|
||||
{ok, Filenames} = file:list_dir(WorkDir),
|
||||
ExistingBuffers = lists:flatmap(fun(FN) -> read_existing_file(FN, St0) end, Filenames),
|
||||
case lists:reverse(lists:keysort(#buffer.since, ExistingBuffers)) of
|
||||
[Buffer | ClosedBuffers] ->
|
||||
St = lists:foldl(fun enqueue_delivery/2, St0, ClosedBuffers),
|
||||
St#st{buffer = recover_buffer(Buffer)};
|
||||
[] ->
|
||||
St0
|
||||
end.
|
||||
|
||||
read_existing_file("MANIFEST", _St) ->
|
||||
[];
|
||||
read_existing_file(Name, St) ->
|
||||
case parse_filename(Name) of
|
||||
{Since, Seq} ->
|
||||
[read_existing_buffer(Since, Seq, Name, St)];
|
||||
error ->
|
||||
%% TODO: log?
|
||||
[]
|
||||
end.
|
||||
|
||||
read_existing_buffer(Since, Seq, Name, St = #st{work_dir = WorkDir}) ->
|
||||
Filename = filename:join(WorkDir, Name),
|
||||
Buffer = mk_buffer(Since, Seq, St),
|
||||
Buffer#buffer{filename = Filename}.
|
||||
|
||||
%%
|
||||
|
||||
mk_filename(Since, Seq) ->
|
||||
"T" ++ integer_to_list(Since) ++ "_" ++ pad_number(Seq, 4).
|
||||
|
||||
parse_filename(Filename) ->
|
||||
case re:run(Filename, "^T(\\d+)_(\\d+)$", [{capture, all_but_first, list}]) of
|
||||
{match, [Since, Seq]} ->
|
||||
{list_to_integer(Since), list_to_integer(Seq)};
|
||||
nomatch ->
|
||||
error
|
||||
end.
|
||||
|
||||
%%
|
||||
|
||||
add_counter({Tab, Counter}) ->
|
||||
add_counter({Tab, Counter}, 0).
|
||||
|
||||
add_counter({Tab, Counter}, N) ->
|
||||
ets:insert(Tab, {Counter, N}).
|
||||
|
||||
inc_counter({Tab, Counter}, Size) ->
|
||||
ets:update_counter(Tab, Counter, {2, Size}).
|
||||
|
||||
del_counter({Tab, Counter}) ->
|
||||
ets:delete(Tab, Counter).
|
||||
|
||||
%%
|
||||
|
||||
create_tab(Name) ->
|
||||
Tab = ets:new(?MODULE, [public, set, {write_concurrency, auto}]),
|
||||
ok = persistent_term:put({?MODULE, Name}, Tab),
|
||||
Tab.
|
||||
|
||||
lookup_tab(Name) ->
|
||||
persistent_term:get({?MODULE, Name}).
|
||||
|
||||
cleanup_tab(Name) ->
|
||||
persistent_term:erase({?MODULE, Name}).
|
||||
|
||||
%%
|
||||
|
||||
pad_number(I, L) ->
|
||||
string:pad(integer_to_list(I), L, leading, $0).
|
|
@ -0,0 +1,15 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2022-2024 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-record(buffer, {
|
||||
since :: emqx_bridge_s3_aggregator:timestamp(),
|
||||
until :: emqx_bridge_s3_aggregator:timestamp(),
|
||||
seq :: non_neg_integer(),
|
||||
filename :: file:filename(),
|
||||
fd :: file:io_device() | undefined,
|
||||
max_records :: pos_integer() | undefined,
|
||||
cnt_records :: {ets:tab(), _Counter} | undefined
|
||||
}).
|
||||
|
||||
-type buffer() :: #buffer{}.
|
|
@ -0,0 +1,16 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_bridge_s3_app).
|
||||
|
||||
-behaviour(application).
|
||||
-export([start/2, stop/1]).
|
||||
|
||||
%%
|
||||
|
||||
start(_StartType, _StartArgs) ->
|
||||
emqx_bridge_s3_sup:start_link().
|
||||
|
||||
stop(_State) ->
|
||||
ok.
|
|
@ -7,6 +7,7 @@
|
|||
-include_lib("emqx/include/logger.hrl").
|
||||
-include_lib("snabbkaffe/include/trace.hrl").
|
||||
-include_lib("emqx_resource/include/emqx_resource.hrl").
|
||||
-include("emqx_bridge_s3.hrl").
|
||||
|
||||
-behaviour(emqx_resource).
|
||||
-export([
|
||||
|
@ -17,7 +18,7 @@
|
|||
on_remove_channel/3,
|
||||
on_get_channels/1,
|
||||
on_query/3,
|
||||
% on_batch_query/3,
|
||||
on_batch_query/3,
|
||||
on_get_status/2,
|
||||
on_get_channel_status/3
|
||||
]).
|
||||
|
@ -31,12 +32,31 @@
|
|||
}.
|
||||
|
||||
-type channel_config() :: #{
|
||||
parameters := #{
|
||||
bridge_type := binary(),
|
||||
parameters := s3_upload_parameters() | s3_aggregated_upload_parameters()
|
||||
}.
|
||||
|
||||
-type s3_upload_parameters() :: #{
|
||||
bucket := string(),
|
||||
key := string(),
|
||||
content := string(),
|
||||
acl => emqx_s3:acl()
|
||||
}
|
||||
}.
|
||||
|
||||
-type s3_aggregated_upload_parameters() :: #{
|
||||
bucket := string(),
|
||||
key := string(),
|
||||
acl => emqx_s3:acl(),
|
||||
aggregation => #{
|
||||
time_interval := emqx_schema:duration_s(),
|
||||
max_records := pos_integer()
|
||||
},
|
||||
container := #{
|
||||
type := csv,
|
||||
column_order => [string()]
|
||||
},
|
||||
min_part_size := emqx_schema:bytesize(),
|
||||
max_part_size := emqx_schema:bytesize()
|
||||
}.
|
||||
|
||||
-type channel_state() :: #{
|
||||
|
@ -123,12 +143,13 @@ on_get_status(_InstId, State = #{client_config := Config}) ->
|
|||
-spec on_add_channel(_InstanceId :: resource_id(), state(), channel_id(), channel_config()) ->
|
||||
{ok, state()} | {error, _Reason}.
|
||||
on_add_channel(_InstId, State = #{channels := Channels}, ChannelId, Config) ->
|
||||
ChannelState = init_channel_state(Config),
|
||||
ChannelState = start_channel(State, Config),
|
||||
{ok, State#{channels => Channels#{ChannelId => ChannelState}}}.
|
||||
|
||||
-spec on_remove_channel(_InstanceId :: resource_id(), state(), channel_id()) ->
|
||||
{ok, state()}.
|
||||
on_remove_channel(_InstId, State = #{channels := Channels}, ChannelId) ->
|
||||
ok = stop_channel(maps:get(ChannelId, Channels, undefined)),
|
||||
{ok, State#{channels => maps:remove(ChannelId, Channels)}}.
|
||||
|
||||
-spec on_get_channels(_InstanceId :: resource_id()) ->
|
||||
|
@ -138,27 +159,121 @@ on_get_channels(InstId) ->
|
|||
|
||||
-spec on_get_channel_status(_InstanceId :: resource_id(), channel_id(), state()) ->
|
||||
channel_status().
|
||||
on_get_channel_status(_InstId, ChannelId, #{channels := Channels}) ->
|
||||
on_get_channel_status(_InstId, ChannelId, State = #{channels := Channels}) ->
|
||||
case maps:get(ChannelId, Channels, undefined) of
|
||||
_ChannelState = #{} ->
|
||||
%% TODO
|
||||
%% Since bucket name may be templated, we can't really provide any
|
||||
%% additional information regarding the channel health.
|
||||
?status_connected;
|
||||
ChannelState = #{} ->
|
||||
channel_status(ChannelState, State);
|
||||
undefined ->
|
||||
?status_disconnected
|
||||
end.
|
||||
|
||||
init_channel_state(#{parameters := Parameters}) ->
|
||||
#{
|
||||
bucket => emqx_template:parse(maps:get(bucket, Parameters)),
|
||||
key => emqx_template:parse(maps:get(key, Parameters)),
|
||||
content => emqx_template:parse(maps:get(content, Parameters)),
|
||||
upload_options => #{
|
||||
acl => maps:get(acl, Parameters, undefined)
|
||||
start_channel(_State, #{
|
||||
bridge_type := ?BRIDGE_TYPE_UPLOAD,
|
||||
parameters := Parameters = #{
|
||||
bucket := Bucket,
|
||||
key := Key,
|
||||
content := Content
|
||||
}
|
||||
}) ->
|
||||
#{
|
||||
type => ?ACTION_UPLOAD,
|
||||
bucket => emqx_template:parse(Bucket),
|
||||
key => emqx_template:parse(Key),
|
||||
content => emqx_template:parse(Content),
|
||||
upload_options => upload_options(Parameters)
|
||||
};
|
||||
start_channel(State, #{
|
||||
bridge_type := Type = ?BRIDGE_TYPE_AGGREGATED_UPLOAD,
|
||||
bridge_name := Name,
|
||||
parameters := Parameters = #{
|
||||
aggregation := #{
|
||||
time_interval := TimeInterval,
|
||||
max_records := MaxRecords
|
||||
},
|
||||
container := Container,
|
||||
bucket := Bucket
|
||||
}
|
||||
}) ->
|
||||
AggregOpts = #{
|
||||
time_interval => TimeInterval,
|
||||
max_records => MaxRecords,
|
||||
work_dir => work_dir(Type, Name)
|
||||
},
|
||||
DeliveryOpts = #{
|
||||
bucket => Bucket,
|
||||
key => emqx_bridge_s3_aggreg_upload:mk_key_template(Parameters),
|
||||
container => Container,
|
||||
upload_options => emqx_bridge_s3_aggreg_upload:mk_upload_options(Parameters),
|
||||
client_config => maps:get(client_config, State),
|
||||
uploader_config => maps:with([min_part_size, max_part_size], Parameters)
|
||||
},
|
||||
_ = emqx_bridge_s3_sup:delete_child({Type, Name}),
|
||||
{ok, SupPid} = emqx_bridge_s3_sup:start_child(#{
|
||||
id => {Type, Name},
|
||||
start => {emqx_bridge_s3_aggreg_upload_sup, start_link, [Name, AggregOpts, DeliveryOpts]},
|
||||
type => supervisor,
|
||||
restart => permanent
|
||||
}),
|
||||
#{
|
||||
type => ?ACTION_AGGREGATED_UPLOAD,
|
||||
name => Name,
|
||||
bucket => Bucket,
|
||||
supervisor => SupPid,
|
||||
on_stop => fun() -> emqx_bridge_s3_sup:delete_child({Type, Name}) end
|
||||
}.
|
||||
|
||||
upload_options(Parameters) ->
|
||||
#{acl => maps:get(acl, Parameters, undefined)}.
|
||||
|
||||
work_dir(Type, Name) ->
|
||||
filename:join([emqx:data_dir(), bridge, Type, Name]).
|
||||
|
||||
stop_channel(#{on_stop := OnStop}) ->
|
||||
OnStop();
|
||||
stop_channel(_ChannelState) ->
|
||||
ok.
|
||||
|
||||
channel_status(#{type := ?ACTION_UPLOAD}, _State) ->
|
||||
%% TODO
|
||||
%% Since bucket name may be templated, we can't really provide any additional
|
||||
%% information regarding the channel health.
|
||||
?status_connected;
|
||||
channel_status(#{type := ?ACTION_AGGREGATED_UPLOAD, name := Name, bucket := Bucket}, State) ->
|
||||
%% NOTE: This will effectively trigger uploads of buffers yet to be uploaded.
|
||||
Timestamp = erlang:system_time(second),
|
||||
ok = emqx_bridge_s3_aggregator:tick(Name, Timestamp),
|
||||
ok = check_bucket_accessible(Bucket, State),
|
||||
ok = check_aggreg_upload_errors(Name),
|
||||
?status_connected.
|
||||
|
||||
check_bucket_accessible(Bucket, #{client_config := Config}) ->
|
||||
case emqx_s3_client:aws_config(Config) of
|
||||
{error, Reason} ->
|
||||
throw({unhealthy_target, Reason});
|
||||
AWSConfig ->
|
||||
try erlcloud_s3:list_objects(Bucket, [{max_keys, 1}], AWSConfig) of
|
||||
Props when is_list(Props) ->
|
||||
ok
|
||||
catch
|
||||
error:{aws_error, {http_error, 404, _, _Reason}} ->
|
||||
throw({unhealthy_target, "Bucket does not exist"});
|
||||
error:{aws_error, {socket_error, Reason}} ->
|
||||
throw({unhealthy_target, emqx_utils:format(Reason)})
|
||||
end
|
||||
end.
|
||||
|
||||
check_aggreg_upload_errors(Name) ->
|
||||
case emqx_bridge_s3_aggregator:take_error(Name) of
|
||||
[Error] ->
|
||||
%% TODO
|
||||
%% This approach means that, for example, 3 upload failures will cause
|
||||
%% the channel to be marked as unhealthy for 3 consecutive health checks.
|
||||
ErrorMessage = emqx_utils:format(Error),
|
||||
throw({unhealthy_target, ErrorMessage});
|
||||
[] ->
|
||||
ok
|
||||
end.
|
||||
|
||||
%% Queries
|
||||
|
||||
-type query() :: {_Tag :: channel_id(), _Data :: emqx_jsonish:t()}.
|
||||
|
@ -167,14 +282,28 @@ init_channel_state(#{parameters := Parameters}) ->
|
|||
{ok, _Result} | {error, _Reason}.
|
||||
on_query(InstId, {Tag, Data}, #{client_config := Config, channels := Channels}) ->
|
||||
case maps:get(Tag, Channels, undefined) of
|
||||
ChannelState = #{} ->
|
||||
run_simple_upload(InstId, Data, ChannelState, Config);
|
||||
ChannelState = #{type := ?ACTION_UPLOAD} ->
|
||||
run_simple_upload(InstId, Tag, Data, ChannelState, Config);
|
||||
ChannelState = #{type := ?ACTION_AGGREGATED_UPLOAD} ->
|
||||
run_aggregated_upload(InstId, [Data], ChannelState);
|
||||
undefined ->
|
||||
{error, {unrecoverable_error, {invalid_message_tag, Tag}}}
|
||||
end.
|
||||
|
||||
-spec on_batch_query(_InstanceId :: resource_id(), [query()], state()) ->
|
||||
{ok, _Result} | {error, _Reason}.
|
||||
on_batch_query(InstId, [{Tag, Data0} | Rest], #{channels := Channels}) ->
|
||||
case maps:get(Tag, Channels, undefined) of
|
||||
ChannelState = #{type := ?ACTION_AGGREGATED_UPLOAD} ->
|
||||
Records = [Data0 | [Data || {_, Data} <- Rest]],
|
||||
run_aggregated_upload(InstId, Records, ChannelState);
|
||||
undefined ->
|
||||
{error, {unrecoverable_error, {invalid_message_tag, Tag}}}
|
||||
end.
|
||||
|
||||
run_simple_upload(
|
||||
InstId,
|
||||
ChannelID,
|
||||
Data,
|
||||
#{
|
||||
bucket := BucketTemplate,
|
||||
|
@ -188,6 +317,11 @@ run_simple_upload(
|
|||
Client = emqx_s3_client:create(Bucket, Config),
|
||||
Key = render_key(KeyTemplate, Data),
|
||||
Content = render_content(ContentTemplate, Data),
|
||||
emqx_trace:rendered_action_template(ChannelID, #{
|
||||
bucket => Bucket,
|
||||
key => Key,
|
||||
content => Content
|
||||
}),
|
||||
case emqx_s3_client:put_object(Client, Key, UploadOpts, Content) of
|
||||
ok ->
|
||||
?tp(s3_bridge_connector_upload_ok, #{
|
||||
|
@ -200,6 +334,16 @@ run_simple_upload(
|
|||
{error, map_error(Reason)}
|
||||
end.
|
||||
|
||||
run_aggregated_upload(InstId, Records, #{name := Name}) ->
|
||||
Timestamp = erlang:system_time(second),
|
||||
case emqx_bridge_s3_aggregator:push_records(Name, Timestamp, Records) of
|
||||
ok ->
|
||||
?tp(s3_bridge_aggreg_push_ok, #{instance_id => InstId, name => Name}),
|
||||
ok;
|
||||
{error, Reason} ->
|
||||
{error, {unrecoverable_error, Reason}}
|
||||
end.
|
||||
|
||||
map_error({socket_error, _} = Reason) ->
|
||||
{recoverable_error, Reason};
|
||||
map_error(Reason = {aws_error, Status, _, _Body}) when Status >= 500 ->
|
||||
|
|
|
@ -20,7 +20,7 @@ type_name() ->
|
|||
s3.
|
||||
|
||||
bridge_types() ->
|
||||
[s3].
|
||||
[s3, s3_aggregated_upload].
|
||||
|
||||
resource_callback_module() ->
|
||||
emqx_bridge_s3_connector.
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_bridge_s3_sup).
|
||||
|
||||
-export([
|
||||
start_link/0,
|
||||
start_child/1,
|
||||
delete_child/1
|
||||
]).
|
||||
|
||||
-behaviour(supervisor).
|
||||
-export([init/1]).
|
||||
|
||||
-define(SUPREF, ?MODULE).
|
||||
|
||||
%%
|
||||
|
||||
start_link() ->
|
||||
supervisor:start_link({local, ?SUPREF}, ?MODULE, root).
|
||||
|
||||
start_child(ChildSpec) ->
|
||||
supervisor:start_child(?SUPREF, ChildSpec).
|
||||
|
||||
delete_child(ChildId) ->
|
||||
case supervisor:terminate_child(?SUPREF, ChildId) of
|
||||
ok ->
|
||||
supervisor:delete_child(?SUPREF, ChildId);
|
||||
Error ->
|
||||
Error
|
||||
end.
|
||||
|
||||
%%
|
||||
|
||||
init(root) ->
|
||||
SupFlags = #{
|
||||
strategy => one_for_one,
|
||||
intensity => 1,
|
||||
period => 1
|
||||
},
|
||||
{ok, {SupFlags, []}}.
|
|
@ -0,0 +1,143 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2022-2024 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_bridge_s3_upload).
|
||||
|
||||
-include_lib("typerefl/include/types.hrl").
|
||||
-include_lib("hocon/include/hoconsc.hrl").
|
||||
-include("emqx_bridge_s3.hrl").
|
||||
|
||||
-define(ACTION, ?ACTION_UPLOAD).
|
||||
|
||||
-behaviour(hocon_schema).
|
||||
-export([
|
||||
namespace/0,
|
||||
roots/0,
|
||||
fields/1,
|
||||
desc/1
|
||||
]).
|
||||
|
||||
-export([
|
||||
bridge_v2_examples/1
|
||||
]).
|
||||
|
||||
%%-------------------------------------------------------------------------------------------------
|
||||
%% `hocon_schema' API
|
||||
%%-------------------------------------------------------------------------------------------------
|
||||
|
||||
namespace() ->
|
||||
"bridge_s3".
|
||||
|
||||
roots() ->
|
||||
[].
|
||||
|
||||
fields(Field) when
|
||||
Field == "get_bridge_v2";
|
||||
Field == "put_bridge_v2";
|
||||
Field == "post_bridge_v2"
|
||||
->
|
||||
emqx_bridge_v2_schema:api_fields(Field, ?ACTION, fields(?ACTION));
|
||||
fields(action) ->
|
||||
{?ACTION,
|
||||
hoconsc:mk(
|
||||
hoconsc:map(name, hoconsc:ref(?MODULE, ?ACTION)),
|
||||
#{
|
||||
desc => <<"S3 Upload Action Config">>,
|
||||
required => false
|
||||
}
|
||||
)};
|
||||
fields(?ACTION) ->
|
||||
emqx_bridge_v2_schema:make_producer_action_schema(
|
||||
hoconsc:mk(
|
||||
?R_REF(s3_upload_parameters),
|
||||
#{
|
||||
required => true,
|
||||
desc => ?DESC(s3_upload)
|
||||
}
|
||||
),
|
||||
#{
|
||||
resource_opts_ref => ?R_REF(s3_action_resource_opts)
|
||||
}
|
||||
);
|
||||
fields(s3_upload_parameters) ->
|
||||
emqx_s3_schema:fields(s3_upload) ++
|
||||
[
|
||||
{content,
|
||||
hoconsc:mk(
|
||||
emqx_schema:template(),
|
||||
#{
|
||||
required => false,
|
||||
default => <<"${.}">>,
|
||||
desc => ?DESC(s3_object_content)
|
||||
}
|
||||
)}
|
||||
];
|
||||
fields(s3_action_resource_opts) ->
|
||||
UnsupportedOpts = [batch_size, batch_time],
|
||||
lists:filter(
|
||||
fun({N, _}) -> not lists:member(N, UnsupportedOpts) end,
|
||||
emqx_bridge_v2_schema:action_resource_opts_fields()
|
||||
).
|
||||
|
||||
desc(s3) ->
|
||||
?DESC(s3_upload);
|
||||
desc(Name) when
|
||||
Name == s3_upload;
|
||||
Name == s3_upload_parameters
|
||||
->
|
||||
?DESC(Name);
|
||||
desc(s3_action_resource_opts) ->
|
||||
?DESC(emqx_resource_schema, resource_opts);
|
||||
desc(_Name) ->
|
||||
undefined.
|
||||
|
||||
%% Examples
|
||||
|
||||
bridge_v2_examples(Method) ->
|
||||
[
|
||||
#{
|
||||
<<"s3">> => #{
|
||||
summary => <<"S3 Simple Upload">>,
|
||||
value => s3_upload_action_example(Method)
|
||||
}
|
||||
}
|
||||
].
|
||||
|
||||
s3_upload_action_example(post) ->
|
||||
maps:merge(
|
||||
s3_upload_action_example(put),
|
||||
#{
|
||||
type => atom_to_binary(?ACTION_UPLOAD),
|
||||
name => <<"my_s3_action">>
|
||||
}
|
||||
);
|
||||
s3_upload_action_example(get) ->
|
||||
maps:merge(
|
||||
s3_upload_action_example(put),
|
||||
#{
|
||||
status => <<"connected">>,
|
||||
node_status => [
|
||||
#{
|
||||
node => <<"emqx@localhost">>,
|
||||
status => <<"connected">>
|
||||
}
|
||||
]
|
||||
}
|
||||
);
|
||||
s3_upload_action_example(put) ->
|
||||
#{
|
||||
enable => true,
|
||||
connector => <<"my_s3_connector">>,
|
||||
description => <<"My action">>,
|
||||
parameters => #{
|
||||
bucket => <<"${clientid}">>,
|
||||
key => <<"${topic}">>,
|
||||
content => <<"${payload}">>,
|
||||
acl => <<"public_read">>
|
||||
},
|
||||
resource_opts => #{
|
||||
query_mode => <<"sync">>,
|
||||
inflight_window => 10
|
||||
}
|
||||
}.
|
|
@ -2,18 +2,20 @@
|
|||
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_bridge_s3_action_info).
|
||||
-module(emqx_bridge_s3_upload_action_info).
|
||||
|
||||
-behaviour(emqx_action_info).
|
||||
|
||||
-include("emqx_bridge_s3.hrl").
|
||||
|
||||
-export([
|
||||
action_type_name/0,
|
||||
connector_type_name/0,
|
||||
schema_module/0
|
||||
]).
|
||||
|
||||
action_type_name() -> s3.
|
||||
action_type_name() -> ?ACTION_UPLOAD.
|
||||
|
||||
connector_type_name() -> s3.
|
||||
|
||||
schema_module() -> emqx_bridge_s3.
|
||||
schema_module() -> emqx_bridge_s3_upload.
|
|
@ -11,8 +11,6 @@
|
|||
-include_lib("common_test/include/ct.hrl").
|
||||
-include_lib("snabbkaffe/include/test_macros.hrl").
|
||||
|
||||
-import(emqx_utils_conv, [bin/1]).
|
||||
|
||||
%% See `emqx_bridge_s3.hrl`.
|
||||
-define(BRIDGE_TYPE, <<"s3">>).
|
||||
-define(CONNECTOR_TYPE, <<"s3">>).
|
||||
|
@ -79,7 +77,8 @@ end_per_testcase(_TestCase, _Config) ->
|
|||
|
||||
connector_config(Name, _Config) ->
|
||||
BaseConf = emqx_s3_test_helpers:base_raw_config(tcp),
|
||||
parse_and_check_config(<<"connectors">>, ?CONNECTOR_TYPE, Name, #{
|
||||
emqx_bridge_s3_test_helpers:parse_and_check_config(
|
||||
<<"connectors">>, ?CONNECTOR_TYPE, Name, #{
|
||||
<<"enable">> => true,
|
||||
<<"description">> => <<"S3 Connector">>,
|
||||
<<"host">> => emqx_utils_conv:bin(maps:get(<<"host">>, BaseConf)),
|
||||
|
@ -100,10 +99,12 @@ connector_config(Name, _Config) ->
|
|||
<<"health_check_interval">> => <<"5s">>,
|
||||
<<"start_timeout">> => <<"5s">>
|
||||
}
|
||||
}).
|
||||
}
|
||||
).
|
||||
|
||||
action_config(Name, ConnectorId) ->
|
||||
parse_and_check_config(<<"actions">>, ?BRIDGE_TYPE, Name, #{
|
||||
emqx_bridge_s3_test_helpers:parse_and_check_config(
|
||||
<<"actions">>, ?BRIDGE_TYPE, Name, #{
|
||||
<<"enable">> => true,
|
||||
<<"connector">> => ConnectorId,
|
||||
<<"parameters">> => #{
|
||||
|
@ -124,22 +125,8 @@ action_config(Name, ConnectorId) ->
|
|||
<<"resume_interval">> => <<"3s">>,
|
||||
<<"worker_pool_size">> => <<"4">>
|
||||
}
|
||||
}).
|
||||
|
||||
parse_and_check_config(Root, Type, Name, ConfigIn) ->
|
||||
Schema =
|
||||
case Root of
|
||||
<<"connectors">> -> emqx_connector_schema;
|
||||
<<"actions">> -> emqx_bridge_v2_schema
|
||||
end,
|
||||
#{Root := #{Type := #{Name := Config}}} =
|
||||
hocon_tconf:check_plain(
|
||||
Schema,
|
||||
#{Root => #{Type => #{Name => ConfigIn}}},
|
||||
#{required => false, atom_key => false}
|
||||
),
|
||||
ct:pal("parsed config: ~p", [Config]),
|
||||
ConfigIn.
|
||||
}
|
||||
).
|
||||
|
||||
t_start_stop(Config) ->
|
||||
emqx_bridge_v2_testlib:t_start_stop(Config, s3_bridge_stopped).
|
||||
|
@ -190,7 +177,7 @@ t_sync_query(Config) ->
|
|||
ok = erlcloud_s3:create_bucket(Bucket, AwsConfig),
|
||||
ok = emqx_bridge_v2_testlib:t_sync_query(
|
||||
Config,
|
||||
fun() -> mk_message(Bucket, Topic, Payload) end,
|
||||
fun() -> emqx_bridge_s3_test_helpers:mk_message_event(Bucket, Topic, Payload) end,
|
||||
fun(Res) -> ?assertMatch(ok, Res) end,
|
||||
s3_bridge_connector_upload_ok
|
||||
),
|
||||
|
@ -224,15 +211,10 @@ t_query_retry_recoverable(Config) ->
|
|||
heal_failure,
|
||||
[timeout, ?PROXY_NAME, ProxyHost, ProxyPort]
|
||||
),
|
||||
Message = mk_message(Bucket, Topic, Payload),
|
||||
Message = emqx_bridge_s3_test_helpers:mk_message_event(Bucket, Topic, Payload),
|
||||
%% Verify that the message is sent eventually.
|
||||
ok = emqx_bridge_v2:send_message(?BRIDGE_TYPE, BridgeName, Message, #{}),
|
||||
?assertMatch(
|
||||
#{content := Payload},
|
||||
maps:from_list(erlcloud_s3:get_object(Bucket, Topic, AwsConfig))
|
||||
).
|
||||
|
||||
mk_message(ClientId, Topic, Payload) ->
|
||||
Message = emqx_message:make(bin(ClientId), bin(Topic), Payload),
|
||||
{Event, _} = emqx_rule_events:eventmsg_publish(Message),
|
||||
Event.
|
||||
|
|
|
@ -0,0 +1,181 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_bridge_s3_aggreg_buffer_SUITE).
|
||||
|
||||
-compile(nowarn_export_all).
|
||||
-compile(export_all).
|
||||
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
-include_lib("common_test/include/ct.hrl").
|
||||
|
||||
%% CT Setup
|
||||
|
||||
all() ->
|
||||
emqx_common_test_helpers:all(?MODULE).
|
||||
|
||||
init_per_suite(Config) ->
|
||||
WorkDir = emqx_cth_suite:work_dir(Config),
|
||||
ok = filelib:ensure_path(WorkDir),
|
||||
[{work_dir, WorkDir} | Config].
|
||||
|
||||
end_per_suite(_Config) ->
|
||||
ok.
|
||||
|
||||
%% Testcases
|
||||
|
||||
t_write_read_cycle(Config) ->
|
||||
Filename = mk_filename(?FUNCTION_NAME, Config),
|
||||
Metadata = {?MODULE, #{tc => ?FUNCTION_NAME}},
|
||||
{ok, WFD} = file:open(Filename, [write, binary]),
|
||||
Writer = emqx_bridge_s3_aggreg_buffer:new_writer(WFD, Metadata),
|
||||
Terms = [
|
||||
[],
|
||||
[[[[[[[[]]]]]]]],
|
||||
123,
|
||||
lists:seq(1, 100),
|
||||
lists:seq(1, 1000),
|
||||
lists:seq(1, 10000),
|
||||
lists:seq(1, 100000),
|
||||
#{<<"id">> => 123456789, <<"ts">> => <<"2028-02-29T12:34:56Z">>, <<"gauge">> => 42.42},
|
||||
{<<"text/plain">>, _Huge = rand:bytes(1048576)},
|
||||
{<<"application/json">>, emqx_utils_json:encode(#{j => <<"son">>, null => null})}
|
||||
],
|
||||
ok = lists:foreach(
|
||||
fun(T) -> ?assertEqual(ok, emqx_bridge_s3_aggreg_buffer:write(T, Writer)) end,
|
||||
Terms
|
||||
),
|
||||
ok = file:close(WFD),
|
||||
{ok, RFD} = file:open(Filename, [read, binary, raw]),
|
||||
{MetadataRead, Reader} = emqx_bridge_s3_aggreg_buffer:new_reader(RFD),
|
||||
?assertEqual(Metadata, MetadataRead),
|
||||
TermsRead = read_until_eof(Reader),
|
||||
?assertEqual(Terms, TermsRead).
|
||||
|
||||
t_read_empty(Config) ->
|
||||
Filename = mk_filename(?FUNCTION_NAME, Config),
|
||||
{ok, WFD} = file:open(Filename, [write, binary]),
|
||||
ok = file:close(WFD),
|
||||
{ok, RFD} = file:open(Filename, [read, binary]),
|
||||
?assertError(
|
||||
{buffer_incomplete, header},
|
||||
emqx_bridge_s3_aggreg_buffer:new_reader(RFD)
|
||||
).
|
||||
|
||||
t_read_garbage(Config) ->
|
||||
Filename = mk_filename(?FUNCTION_NAME, Config),
|
||||
{ok, WFD} = file:open(Filename, [write, binary]),
|
||||
ok = file:write(WFD, rand:bytes(1048576)),
|
||||
ok = file:close(WFD),
|
||||
{ok, RFD} = file:open(Filename, [read, binary]),
|
||||
?assertError(
|
||||
badarg,
|
||||
emqx_bridge_s3_aggreg_buffer:new_reader(RFD)
|
||||
).
|
||||
|
||||
t_read_truncated(Config) ->
|
||||
Filename = mk_filename(?FUNCTION_NAME, Config),
|
||||
{ok, WFD} = file:open(Filename, [write, binary]),
|
||||
Metadata = {?MODULE, #{tc => ?FUNCTION_NAME}},
|
||||
Writer = emqx_bridge_s3_aggreg_buffer:new_writer(WFD, Metadata),
|
||||
Terms = [
|
||||
[[[[[[[[[[[]]]]]]]]]]],
|
||||
lists:seq(1, 100000),
|
||||
#{<<"id">> => 123456789, <<"ts">> => <<"2029-02-30T12:34:56Z">>, <<"gauge">> => 42.42},
|
||||
{<<"text/plain">>, _Huge = rand:bytes(1048576)}
|
||||
],
|
||||
LastTerm =
|
||||
{<<"application/json">>, emqx_utils_json:encode(#{j => <<"son">>, null => null})},
|
||||
ok = lists:foreach(
|
||||
fun(T) -> ?assertEqual(ok, emqx_bridge_s3_aggreg_buffer:write(T, Writer)) end,
|
||||
Terms
|
||||
),
|
||||
{ok, WPos} = file:position(WFD, cur),
|
||||
?assertEqual(ok, emqx_bridge_s3_aggreg_buffer:write(LastTerm, Writer)),
|
||||
ok = file:close(WFD),
|
||||
ok = emqx_bridge_s3_test_helpers:truncate_at(Filename, WPos + 1),
|
||||
{ok, RFD1} = file:open(Filename, [read, binary]),
|
||||
{Metadata, Reader0} = emqx_bridge_s3_aggreg_buffer:new_reader(RFD1),
|
||||
{ReadTerms1, Reader1} = read_terms(length(Terms), Reader0),
|
||||
?assertEqual(Terms, ReadTerms1),
|
||||
?assertError(
|
||||
badarg,
|
||||
emqx_bridge_s3_aggreg_buffer:read(Reader1)
|
||||
),
|
||||
ok = emqx_bridge_s3_test_helpers:truncate_at(Filename, WPos div 2),
|
||||
{ok, RFD2} = file:open(Filename, [read, binary]),
|
||||
{Metadata, Reader2} = emqx_bridge_s3_aggreg_buffer:new_reader(RFD2),
|
||||
{ReadTerms2, Reader3} = read_terms(_FitsInto = 3, Reader2),
|
||||
?assertEqual(lists:sublist(Terms, 3), ReadTerms2),
|
||||
?assertError(
|
||||
badarg,
|
||||
emqx_bridge_s3_aggreg_buffer:read(Reader3)
|
||||
).
|
||||
|
||||
t_read_truncated_takeover_write(Config) ->
|
||||
Filename = mk_filename(?FUNCTION_NAME, Config),
|
||||
{ok, WFD} = file:open(Filename, [write, binary]),
|
||||
Metadata = {?MODULE, #{tc => ?FUNCTION_NAME}},
|
||||
Writer1 = emqx_bridge_s3_aggreg_buffer:new_writer(WFD, Metadata),
|
||||
Terms1 = [
|
||||
[[[[[[[[[[[]]]]]]]]]]],
|
||||
lists:seq(1, 10000),
|
||||
lists:duplicate(1000, ?FUNCTION_NAME),
|
||||
{<<"text/plain">>, _Huge = rand:bytes(1048576)}
|
||||
],
|
||||
Terms2 = [
|
||||
{<<"application/json">>, emqx_utils_json:encode(#{j => <<"son">>, null => null})},
|
||||
{<<"application/x-octet-stream">>, rand:bytes(102400)}
|
||||
],
|
||||
ok = lists:foreach(
|
||||
fun(T) -> ?assertEqual(ok, emqx_bridge_s3_aggreg_buffer:write(T, Writer1)) end,
|
||||
Terms1
|
||||
),
|
||||
{ok, WPos} = file:position(WFD, cur),
|
||||
ok = file:close(WFD),
|
||||
ok = emqx_bridge_s3_test_helpers:truncate_at(Filename, WPos div 2),
|
||||
{ok, RWFD} = file:open(Filename, [read, write, binary]),
|
||||
{Metadata, Reader0} = emqx_bridge_s3_aggreg_buffer:new_reader(RWFD),
|
||||
{ReadTerms1, Reader1} = read_terms(_Survived = 3, Reader0),
|
||||
?assertEqual(
|
||||
lists:sublist(Terms1, 3),
|
||||
ReadTerms1
|
||||
),
|
||||
?assertError(
|
||||
badarg,
|
||||
emqx_bridge_s3_aggreg_buffer:read(Reader1)
|
||||
),
|
||||
Writer2 = emqx_bridge_s3_aggreg_buffer:takeover(Reader1),
|
||||
ok = lists:foreach(
|
||||
fun(T) -> ?assertEqual(ok, emqx_bridge_s3_aggreg_buffer:write(T, Writer2)) end,
|
||||
Terms2
|
||||
),
|
||||
ok = file:close(RWFD),
|
||||
{ok, RFD} = file:open(Filename, [read, binary]),
|
||||
{Metadata, Reader2} = emqx_bridge_s3_aggreg_buffer:new_reader(RFD),
|
||||
ReadTerms2 = read_until_eof(Reader2),
|
||||
?assertEqual(
|
||||
lists:sublist(Terms1, 3) ++ Terms2,
|
||||
ReadTerms2
|
||||
).
|
||||
|
||||
%%
|
||||
|
||||
mk_filename(Name, Config) ->
|
||||
filename:join(?config(work_dir, Config), Name).
|
||||
|
||||
read_terms(0, Reader) ->
|
||||
{[], Reader};
|
||||
read_terms(N, Reader0) ->
|
||||
{Term, Reader1} = emqx_bridge_s3_aggreg_buffer:read(Reader0),
|
||||
{Terms, Reader} = read_terms(N - 1, Reader1),
|
||||
{[Term | Terms], Reader}.
|
||||
|
||||
read_until_eof(Reader0) ->
|
||||
case emqx_bridge_s3_aggreg_buffer:read(Reader0) of
|
||||
{Term, Reader} ->
|
||||
[Term | read_until_eof(Reader)];
|
||||
eof ->
|
||||
[]
|
||||
end.
|
|
@ -0,0 +1,72 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_bridge_s3_aggreg_csv_tests).
|
||||
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
encoding_test() ->
|
||||
CSV = emqx_bridge_s3_aggreg_csv:new(#{}),
|
||||
?assertEqual(
|
||||
"A,B,Ç\n"
|
||||
"1.2345,string,0.0\n"
|
||||
"0.3333333333,\"[]\",-0.0\n"
|
||||
"111111,🫠,0.0\n"
|
||||
"111.111,\"\"\"quoted\"\"\",\"line\r\nbreak\"\n"
|
||||
"222.222,,\n",
|
||||
fill_close(CSV, [
|
||||
[
|
||||
#{<<"A">> => 1.2345, <<"B">> => "string", <<"Ç"/utf8>> => +0.0},
|
||||
#{<<"A">> => 1 / 3, <<"B">> => "[]", <<"Ç"/utf8>> => -0.0},
|
||||
#{<<"A">> => 111111, <<"B">> => "🫠", <<"Ç"/utf8>> => 0.0},
|
||||
#{<<"A">> => 111.111, <<"B">> => "\"quoted\"", <<"Ç"/utf8>> => "line\r\nbreak"},
|
||||
#{<<"A">> => 222.222, <<"B">> => "", <<"Ç"/utf8>> => undefined}
|
||||
]
|
||||
])
|
||||
).
|
||||
|
||||
column_order_test() ->
|
||||
Order = [<<"ID">>, <<"TS">>],
|
||||
CSV = emqx_bridge_s3_aggreg_csv:new(#{column_order => Order}),
|
||||
?assertEqual(
|
||||
"ID,TS,A,B,D\n"
|
||||
"1,2024-01-01,12.34,str,\"[]\"\n"
|
||||
"2,2024-01-02,23.45,ing,\n"
|
||||
"3,,45,,'\n"
|
||||
"4,2024-01-04,,,\n",
|
||||
fill_close(CSV, [
|
||||
[
|
||||
#{
|
||||
<<"A">> => 12.34,
|
||||
<<"B">> => "str",
|
||||
<<"ID">> => 1,
|
||||
<<"TS">> => "2024-01-01",
|
||||
<<"D">> => <<"[]">>
|
||||
},
|
||||
#{
|
||||
<<"TS">> => "2024-01-02",
|
||||
<<"C">> => <<"null">>,
|
||||
<<"ID">> => 2,
|
||||
<<"A">> => 23.45,
|
||||
<<"B">> => "ing"
|
||||
}
|
||||
],
|
||||
[
|
||||
#{<<"A">> => 45, <<"D">> => <<"'">>, <<"ID">> => 3},
|
||||
#{<<"ID">> => 4, <<"TS">> => "2024-01-04"}
|
||||
]
|
||||
])
|
||||
).
|
||||
|
||||
fill_close(CSV, LRecords) ->
|
||||
string(fill_close_(CSV, LRecords)).
|
||||
|
||||
fill_close_(CSV0, [Records | LRest]) ->
|
||||
{Writes, CSV} = emqx_bridge_s3_aggreg_csv:fill(Records, CSV0),
|
||||
[Writes | fill_close_(CSV, LRest)];
|
||||
fill_close_(CSV, []) ->
|
||||
[emqx_bridge_s3_aggreg_csv:close(CSV)].
|
||||
|
||||
string(Writes) ->
|
||||
unicode:characters_to_list(Writes).
|
|
@ -0,0 +1,465 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_bridge_s3_aggreg_upload_SUITE).
|
||||
|
||||
-compile(nowarn_export_all).
|
||||
-compile(export_all).
|
||||
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
-include_lib("common_test/include/ct.hrl").
|
||||
-include_lib("snabbkaffe/include/test_macros.hrl").
|
||||
|
||||
-import(emqx_utils_conv, [bin/1]).
|
||||
|
||||
%% See `emqx_bridge_s3.hrl`.
|
||||
-define(BRIDGE_TYPE, <<"s3_aggregated_upload">>).
|
||||
-define(CONNECTOR_TYPE, <<"s3">>).
|
||||
|
||||
-define(PROXY_NAME, "minio_tcp").
|
||||
|
||||
-define(CONF_TIME_INTERVAL, 4000).
|
||||
-define(CONF_MAX_RECORDS, 100).
|
||||
-define(CONF_COLUMN_ORDER, ?CONF_COLUMN_ORDER([])).
|
||||
-define(CONF_COLUMN_ORDER(T), [
|
||||
<<"publish_received_at">>,
|
||||
<<"clientid">>,
|
||||
<<"topic">>,
|
||||
<<"payload">>,
|
||||
<<"empty">>
|
||||
| T
|
||||
]).
|
||||
|
||||
-define(LIMIT_TOLERANCE, 1.1).
|
||||
|
||||
%% CT Setup
|
||||
|
||||
all() ->
|
||||
emqx_common_test_helpers:all(?MODULE).
|
||||
|
||||
init_per_suite(Config) ->
|
||||
% Setup toxiproxy
|
||||
ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"),
|
||||
ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")),
|
||||
_ = emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
|
||||
Apps = emqx_cth_suite:start(
|
||||
[
|
||||
emqx,
|
||||
emqx_conf,
|
||||
emqx_connector,
|
||||
emqx_bridge_s3,
|
||||
emqx_bridge,
|
||||
emqx_rule_engine,
|
||||
emqx_management,
|
||||
{emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"}
|
||||
],
|
||||
#{work_dir => emqx_cth_suite:work_dir(Config)}
|
||||
),
|
||||
{ok, _} = emqx_common_test_http:create_default_app(),
|
||||
[
|
||||
{apps, Apps},
|
||||
{proxy_host, ProxyHost},
|
||||
{proxy_port, ProxyPort},
|
||||
{proxy_name, ?PROXY_NAME}
|
||||
| Config
|
||||
].
|
||||
|
||||
end_per_suite(Config) ->
|
||||
ok = emqx_cth_suite:stop(?config(apps, Config)).
|
||||
|
||||
%% Testcases
|
||||
|
||||
init_per_testcase(TestCase, Config) ->
|
||||
ct:timetrap(timer:seconds(15)),
|
||||
ok = snabbkaffe:start_trace(),
|
||||
TS = erlang:system_time(),
|
||||
Name = iolist_to_binary(io_lib:format("~s-~p", [TestCase, TS])),
|
||||
Bucket = unicode:characters_to_list(string:replace(Name, "_", "-", all)),
|
||||
ConnectorConfig = connector_config(Name, Config),
|
||||
ActionConfig = action_config(Name, Name, Bucket),
|
||||
ok = emqx_bridge_s3_test_helpers:create_bucket(Bucket),
|
||||
[
|
||||
{connector_type, ?CONNECTOR_TYPE},
|
||||
{connector_name, Name},
|
||||
{connector_config, ConnectorConfig},
|
||||
{bridge_type, ?BRIDGE_TYPE},
|
||||
{bridge_name, Name},
|
||||
{bridge_config, ActionConfig},
|
||||
{s3_bucket, Bucket}
|
||||
| Config
|
||||
].
|
||||
|
||||
end_per_testcase(_TestCase, _Config) ->
|
||||
ok = snabbkaffe:stop(),
|
||||
ok.
|
||||
|
||||
connector_config(Name, _Config) ->
|
||||
BaseConf = emqx_s3_test_helpers:base_raw_config(tcp),
|
||||
emqx_bridge_s3_test_helpers:parse_and_check_config(
|
||||
<<"connectors">>, ?CONNECTOR_TYPE, Name, #{
|
||||
<<"enable">> => true,
|
||||
<<"description">> => <<"S3 Connector">>,
|
||||
<<"host">> => emqx_utils_conv:bin(maps:get(<<"host">>, BaseConf)),
|
||||
<<"port">> => maps:get(<<"port">>, BaseConf),
|
||||
<<"access_key_id">> => maps:get(<<"access_key_id">>, BaseConf),
|
||||
<<"secret_access_key">> => maps:get(<<"secret_access_key">>, BaseConf),
|
||||
<<"transport_options">> => #{
|
||||
<<"connect_timeout">> => <<"500ms">>,
|
||||
<<"request_timeout">> => <<"1s">>,
|
||||
<<"pool_size">> => 4,
|
||||
<<"max_retries">> => 0
|
||||
},
|
||||
<<"resource_opts">> => #{
|
||||
<<"health_check_interval">> => <<"1s">>
|
||||
}
|
||||
}
|
||||
).
|
||||
|
||||
action_config(Name, ConnectorId, Bucket) ->
|
||||
emqx_bridge_s3_test_helpers:parse_and_check_config(
|
||||
<<"actions">>, ?BRIDGE_TYPE, Name, #{
|
||||
<<"enable">> => true,
|
||||
<<"connector">> => ConnectorId,
|
||||
<<"parameters">> => #{
|
||||
<<"bucket">> => unicode:characters_to_binary(Bucket),
|
||||
<<"key">> => <<"${action}/${node}/${datetime.rfc3339}">>,
|
||||
<<"acl">> => <<"public_read">>,
|
||||
<<"headers">> => #{
|
||||
<<"X-AMZ-Meta-Version">> => <<"42">>
|
||||
},
|
||||
<<"aggregation">> => #{
|
||||
<<"time_interval">> => <<"4s">>,
|
||||
<<"max_records">> => ?CONF_MAX_RECORDS
|
||||
},
|
||||
<<"container">> => #{
|
||||
<<"type">> => <<"csv">>,
|
||||
<<"column_order">> => ?CONF_COLUMN_ORDER
|
||||
}
|
||||
},
|
||||
<<"resource_opts">> => #{
|
||||
<<"health_check_interval">> => <<"1s">>,
|
||||
<<"max_buffer_bytes">> => <<"64MB">>,
|
||||
<<"query_mode">> => <<"async">>,
|
||||
<<"worker_pool_size">> => 4
|
||||
}
|
||||
}
|
||||
).
|
||||
|
||||
t_start_stop(Config) ->
|
||||
emqx_bridge_v2_testlib:t_start_stop(Config, s3_bridge_stopped).
|
||||
|
||||
t_create_via_http(Config) ->
|
||||
emqx_bridge_v2_testlib:t_create_via_http(Config).
|
||||
|
||||
t_on_get_status(Config) ->
|
||||
emqx_bridge_v2_testlib:t_on_get_status(Config, #{}).
|
||||
|
||||
t_aggreg_upload(Config) ->
|
||||
Bucket = ?config(s3_bucket, Config),
|
||||
BridgeName = ?config(bridge_name, Config),
|
||||
BridgeNameString = unicode:characters_to_list(BridgeName),
|
||||
NodeString = atom_to_list(node()),
|
||||
%% Create a bridge with the sample configuration.
|
||||
?assertMatch({ok, _Bridge}, emqx_bridge_v2_testlib:create_bridge(Config)),
|
||||
%% Prepare some sample messages that look like Rule SQL productions.
|
||||
MessageEvents = lists:map(fun mk_message_event/1, [
|
||||
{<<"C1">>, T1 = <<"a/b/c">>, P1 = <<"{\"hello\":\"world\"}">>},
|
||||
{<<"C2">>, T2 = <<"foo/bar">>, P2 = <<"baz">>},
|
||||
{<<"C3">>, T3 = <<"t/42">>, P3 = <<"">>}
|
||||
]),
|
||||
ok = send_messages(BridgeName, MessageEvents),
|
||||
%% Wait until the delivery is completed.
|
||||
?block_until(#{?snk_kind := s3_aggreg_delivery_completed, action := BridgeName}),
|
||||
%% Check the uploaded objects.
|
||||
_Uploads = [#{key := Key}] = emqx_bridge_s3_test_helpers:list_objects(Bucket),
|
||||
?assertMatch(
|
||||
[BridgeNameString, NodeString, _Datetime, _Seq = "0"],
|
||||
string:split(Key, "/", all)
|
||||
),
|
||||
Upload = #{content := Content} = emqx_bridge_s3_test_helpers:get_object(Bucket, Key),
|
||||
?assertMatch(
|
||||
#{content_type := "text/csv", "x-amz-meta-version" := "42"},
|
||||
Upload
|
||||
),
|
||||
%% Verify that column order is respected.
|
||||
?assertMatch(
|
||||
{ok, [
|
||||
?CONF_COLUMN_ORDER(_),
|
||||
[TS, <<"C1">>, T1, P1, <<>> | _],
|
||||
[TS, <<"C2">>, T2, P2, <<>> | _],
|
||||
[TS, <<"C3">>, T3, P3, <<>> | _]
|
||||
]},
|
||||
erl_csv:decode(Content)
|
||||
).
|
||||
|
||||
t_aggreg_upload_rule(Config) ->
|
||||
Bucket = ?config(s3_bucket, Config),
|
||||
BridgeName = ?config(bridge_name, Config),
|
||||
ClientID = emqx_utils_conv:bin(?FUNCTION_NAME),
|
||||
%% Create a bridge with the sample configuration and a simple SQL rule.
|
||||
?assertMatch({ok, _Bridge}, emqx_bridge_v2_testlib:create_bridge(Config)),
|
||||
?assertMatch(
|
||||
{ok, _Rule},
|
||||
emqx_bridge_v2_testlib:create_rule_and_action_http(?BRIDGE_TYPE, <<>>, Config, #{
|
||||
sql => <<
|
||||
"SELECT"
|
||||
" *,"
|
||||
" strlen(payload) as psize,"
|
||||
" unix_ts_to_rfc3339(publish_received_at, 'millisecond') as publish_received_at"
|
||||
" FROM 's3/#'"
|
||||
>>
|
||||
})
|
||||
),
|
||||
ok = lists:foreach(fun emqx:publish/1, [
|
||||
emqx_message:make(?FUNCTION_NAME, T1 = <<"s3/m1">>, P1 = <<"[HELLO]">>),
|
||||
emqx_message:make(?FUNCTION_NAME, T2 = <<"s3/m2">>, P2 = <<"[WORLD]">>),
|
||||
emqx_message:make(?FUNCTION_NAME, T3 = <<"s3/empty">>, P3 = <<>>),
|
||||
emqx_message:make(?FUNCTION_NAME, <<"not/s3">>, <<"should not be here">>)
|
||||
]),
|
||||
?block_until(#{?snk_kind := s3_aggreg_delivery_completed, action := BridgeName}),
|
||||
%% Check the uploaded objects.
|
||||
_Uploads = [#{key := Key}] = emqx_bridge_s3_test_helpers:list_objects(Bucket),
|
||||
_CSV = [Header | Rows] = fetch_parse_csv(Bucket, Key),
|
||||
%% Verify that column order is respected and event fields are preserved.
|
||||
?assertMatch(?CONF_COLUMN_ORDER(_), Header),
|
||||
?assertEqual(
|
||||
[<<"event">>, <<"qos">>, <<"psize">>],
|
||||
[C || C <- [<<"event">>, <<"qos">>, <<"psize">>], lists:member(C, Header)]
|
||||
),
|
||||
%% Verify that all the matching messages are present.
|
||||
?assertMatch(
|
||||
[
|
||||
[_TS1, ClientID, T1, P1 | _],
|
||||
[_TS2, ClientID, T2, P2 | _],
|
||||
[_TS3, ClientID, T3, P3 | _]
|
||||
],
|
||||
Rows
|
||||
),
|
||||
%% Verify that timestamp column now has RFC3339 format.
|
||||
[_Row = [TS1 | _] | _Rest] = Rows,
|
||||
?assert(
|
||||
is_integer(emqx_rule_funcs:rfc3339_to_unix_ts(TS1, millisecond)),
|
||||
TS1
|
||||
).
|
||||
|
||||
t_aggreg_upload_restart(Config) ->
|
||||
%% NOTE
|
||||
%% This test verifies that the bridge will reuse existing aggregation buffer
|
||||
%% after a restart.
|
||||
Bucket = ?config(s3_bucket, Config),
|
||||
BridgeName = ?config(bridge_name, Config),
|
||||
%% Create a bridge with the sample configuration.
|
||||
?assertMatch({ok, _Bridge}, emqx_bridge_v2_testlib:create_bridge(Config)),
|
||||
%% Send some sample messages that look like Rule SQL productions.
|
||||
MessageEvents = lists:map(fun mk_message_event/1, [
|
||||
{<<"C1">>, T1 = <<"a/b/c">>, P1 = <<"{\"hello\":\"world\"}">>},
|
||||
{<<"C2">>, T2 = <<"foo/bar">>, P2 = <<"baz">>},
|
||||
{<<"C3">>, T3 = <<"t/42">>, P3 = <<"">>}
|
||||
]),
|
||||
ok = send_messages(BridgeName, MessageEvents),
|
||||
{ok, _} = ?block_until(#{?snk_kind := s3_aggreg_records_written, action := BridgeName}),
|
||||
%% Restart the bridge.
|
||||
{ok, _} = emqx_bridge_v2:disable_enable(disable, ?BRIDGE_TYPE, BridgeName),
|
||||
{ok, _} = emqx_bridge_v2:disable_enable(enable, ?BRIDGE_TYPE, BridgeName),
|
||||
%% Send some more messages.
|
||||
ok = send_messages(BridgeName, MessageEvents),
|
||||
{ok, _} = ?block_until(#{?snk_kind := s3_aggreg_records_written, action := BridgeName}),
|
||||
%% Wait until the delivery is completed.
|
||||
{ok, _} = ?block_until(#{?snk_kind := s3_aggreg_delivery_completed, action := BridgeName}),
|
||||
%% Check there's still only one upload.
|
||||
_Uploads = [#{key := Key}] = emqx_bridge_s3_test_helpers:list_objects(Bucket),
|
||||
_Upload = #{content := Content} = emqx_bridge_s3_test_helpers:get_object(Bucket, Key),
|
||||
?assertMatch(
|
||||
{ok, [
|
||||
_Header = [_ | _],
|
||||
[TS1, <<"C1">>, T1, P1 | _],
|
||||
[TS1, <<"C2">>, T2, P2 | _],
|
||||
[TS1, <<"C3">>, T3, P3 | _],
|
||||
[TS2, <<"C1">>, T1, P1 | _],
|
||||
[TS2, <<"C2">>, T2, P2 | _],
|
||||
[TS2, <<"C3">>, T3, P3 | _]
|
||||
]},
|
||||
erl_csv:decode(Content)
|
||||
).
|
||||
|
||||
t_aggreg_upload_restart_corrupted(Config) ->
|
||||
%% NOTE
|
||||
%% This test verifies that the bridge can recover from a buffer file corruption,
|
||||
%% and does so while preserving uncompromised data.
|
||||
Bucket = ?config(s3_bucket, Config),
|
||||
BridgeName = ?config(bridge_name, Config),
|
||||
BatchSize = ?CONF_MAX_RECORDS div 2,
|
||||
%% Create a bridge with the sample configuration.
|
||||
?assertMatch({ok, _Bridge}, emqx_bridge_v2_testlib:create_bridge(Config)),
|
||||
%% Send some sample messages that look like Rule SQL productions.
|
||||
Messages1 = [
|
||||
{integer_to_binary(N), <<"a/b/c">>, <<"{\"hello\":\"world\"}">>}
|
||||
|| N <- lists:seq(1, BatchSize)
|
||||
],
|
||||
%% Ensure that they span multiple batch queries.
|
||||
ok = send_messages_delayed(BridgeName, lists:map(fun mk_message_event/1, Messages1), 1),
|
||||
{ok, _} = ?block_until(
|
||||
#{?snk_kind := s3_aggreg_records_written, action := BridgeName},
|
||||
infinity,
|
||||
0
|
||||
),
|
||||
%% Find out the buffer file.
|
||||
{ok, #{filename := Filename}} = ?block_until(
|
||||
#{?snk_kind := s3_aggreg_buffer_allocated, action := BridgeName}
|
||||
),
|
||||
%% Stop the bridge, corrupt the buffer file, and restart the bridge.
|
||||
{ok, _} = emqx_bridge_v2:disable_enable(disable, ?BRIDGE_TYPE, BridgeName),
|
||||
BufferFileSize = filelib:file_size(Filename),
|
||||
ok = emqx_bridge_s3_test_helpers:truncate_at(Filename, BufferFileSize div 2),
|
||||
{ok, _} = emqx_bridge_v2:disable_enable(enable, ?BRIDGE_TYPE, BridgeName),
|
||||
%% Send some more messages.
|
||||
Messages2 = [
|
||||
{integer_to_binary(N), <<"c/d/e">>, <<"{\"hello\":\"world\"}">>}
|
||||
|| N <- lists:seq(1, BatchSize)
|
||||
],
|
||||
ok = send_messages_delayed(BridgeName, lists:map(fun mk_message_event/1, Messages2), 0),
|
||||
%% Wait until the delivery is completed.
|
||||
{ok, _} = ?block_until(#{?snk_kind := s3_aggreg_delivery_completed, action := BridgeName}),
|
||||
%% Check that upload contains part of the first batch and all of the second batch.
|
||||
_Uploads = [#{key := Key}] = emqx_bridge_s3_test_helpers:list_objects(Bucket),
|
||||
CSV = [_Header | Rows] = fetch_parse_csv(Bucket, Key),
|
||||
NRows = length(Rows),
|
||||
?assert(
|
||||
NRows > BatchSize,
|
||||
CSV
|
||||
),
|
||||
?assertEqual(
|
||||
lists:sublist(Messages1, NRows - BatchSize) ++ Messages2,
|
||||
[{ClientID, Topic, Payload} || [_TS, ClientID, Topic, Payload | _] <- Rows],
|
||||
CSV
|
||||
).
|
||||
|
||||
t_aggreg_pending_upload_restart(Config) ->
|
||||
%% NOTE
|
||||
%% This test verifies that the bridge will finish uploading a buffer file after
|
||||
%% a restart.
|
||||
Bucket = ?config(s3_bucket, Config),
|
||||
BridgeName = ?config(bridge_name, Config),
|
||||
%% Create a bridge with the sample configuration.
|
||||
?assertMatch({ok, _Bridge}, emqx_bridge_v2_testlib:create_bridge(Config)),
|
||||
%% Send few large messages that will require multipart upload.
|
||||
%% Ensure that they span multiple batch queries.
|
||||
Payload = iolist_to_binary(lists:duplicate(128 * 1024, "PAYLOAD!")),
|
||||
Messages = [{integer_to_binary(N), <<"a/b/c">>, Payload} || N <- lists:seq(1, 10)],
|
||||
ok = send_messages_delayed(BridgeName, lists:map(fun mk_message_event/1, Messages), 10),
|
||||
%% Wait until the multipart upload is started.
|
||||
{ok, #{key := ObjectKey}} =
|
||||
?block_until(#{?snk_kind := s3_client_multipart_started, bucket := Bucket}),
|
||||
%% Stop the bridge.
|
||||
{ok, _} = emqx_bridge_v2:disable_enable(disable, ?BRIDGE_TYPE, BridgeName),
|
||||
%% Verify that pending uploads have been gracefully aborted.
|
||||
%% NOTE: Minio does not support multipart upload listing w/o prefix.
|
||||
?assertEqual(
|
||||
[],
|
||||
emqx_bridge_s3_test_helpers:list_pending_uploads(Bucket, ObjectKey)
|
||||
),
|
||||
%% Restart the bridge.
|
||||
{ok, _} = emqx_bridge_v2:disable_enable(enable, ?BRIDGE_TYPE, BridgeName),
|
||||
%% Wait until the delivery is completed.
|
||||
{ok, _} = ?block_until(#{?snk_kind := s3_aggreg_delivery_completed, action := BridgeName}),
|
||||
%% Check that delivery contains all the messages.
|
||||
_Uploads = [#{key := Key}] = emqx_bridge_s3_test_helpers:list_objects(Bucket),
|
||||
[_Header | Rows] = fetch_parse_csv(Bucket, Key),
|
||||
?assertEqual(
|
||||
Messages,
|
||||
[{CID, Topic, PL} || [_TS, CID, Topic, PL | _] <- Rows]
|
||||
).
|
||||
|
||||
t_aggreg_next_rotate(Config) ->
|
||||
%% NOTE
|
||||
%% This is essentially a stress test that tries to verify that buffer rotation
|
||||
%% and windowing work correctly under high rate, high concurrency conditions.
|
||||
Bucket = ?config(s3_bucket, Config),
|
||||
BridgeName = ?config(bridge_name, Config),
|
||||
NSenders = 4,
|
||||
%% Create a bridge with the sample configuration.
|
||||
?assertMatch({ok, _Bridge}, emqx_bridge_v2_testlib:create_bridge(Config)),
|
||||
%% Start separate processes to send messages.
|
||||
Senders = [
|
||||
spawn_link(fun() -> run_message_sender(BridgeName, N) end)
|
||||
|| N <- lists:seq(1, NSenders)
|
||||
],
|
||||
%% Give them some time to send messages so that rotation and windowing will happen.
|
||||
ok = timer:sleep(round(?CONF_TIME_INTERVAL * 1.5)),
|
||||
%% Stop the senders.
|
||||
_ = [Sender ! {stop, self()} || Sender <- Senders],
|
||||
NSent = receive_sender_reports(Senders),
|
||||
%% Wait for the last delivery to complete.
|
||||
ok = timer:sleep(round(?CONF_TIME_INTERVAL * 0.5)),
|
||||
?block_until(#{?snk_kind := s3_aggreg_delivery_completed, action := BridgeName}, infinity, 0),
|
||||
%% There should be at least 2 time windows of aggregated records.
|
||||
Uploads = [K || #{key := K} <- emqx_bridge_s3_test_helpers:list_objects(Bucket)],
|
||||
DTs = [DT || K <- Uploads, [_Action, _Node, DT | _] <- [string:split(K, "/", all)]],
|
||||
?assert(
|
||||
ordsets:size(ordsets:from_list(DTs)) > 1,
|
||||
Uploads
|
||||
),
|
||||
%% Uploads should not contain more than max allowed records.
|
||||
CSVs = [{K, fetch_parse_csv(Bucket, K)} || K <- Uploads],
|
||||
NRecords = [{K, length(CSV) - 1} || {K, CSV} <- CSVs],
|
||||
?assertEqual(
|
||||
[],
|
||||
[{K, NR} || {K, NR} <- NRecords, NR > ?CONF_MAX_RECORDS * ?LIMIT_TOLERANCE]
|
||||
),
|
||||
%% No message should be lost.
|
||||
?assertEqual(
|
||||
NSent,
|
||||
lists:sum([NR || {_, NR} <- NRecords])
|
||||
).
|
||||
|
||||
run_message_sender(BridgeName, N) ->
|
||||
ClientID = integer_to_binary(N),
|
||||
Topic = <<"a/b/c/", ClientID/binary>>,
|
||||
run_message_sender(BridgeName, N, ClientID, Topic, N, 0).
|
||||
|
||||
run_message_sender(BridgeName, N, ClientID, Topic, Delay, NSent) ->
|
||||
Payload = integer_to_binary(N * 1_000_000 + NSent),
|
||||
Message = emqx_bridge_s3_test_helpers:mk_message_event(ClientID, Topic, Payload),
|
||||
_ = send_message(BridgeName, Message),
|
||||
receive
|
||||
{stop, From} ->
|
||||
From ! {sent, self(), NSent + 1}
|
||||
after Delay ->
|
||||
run_message_sender(BridgeName, N, ClientID, Topic, Delay, NSent + 1)
|
||||
end.
|
||||
|
||||
receive_sender_reports([Sender | Rest]) ->
|
||||
receive
|
||||
{sent, Sender, NSent} -> NSent + receive_sender_reports(Rest)
|
||||
end;
|
||||
receive_sender_reports([]) ->
|
||||
0.
|
||||
|
||||
%%
|
||||
|
||||
mk_message_event({ClientID, Topic, Payload}) ->
|
||||
emqx_bridge_s3_test_helpers:mk_message_event(ClientID, Topic, Payload).
|
||||
|
||||
send_messages(BridgeName, MessageEvents) ->
|
||||
lists:foreach(
|
||||
fun(M) -> send_message(BridgeName, M) end,
|
||||
MessageEvents
|
||||
).
|
||||
|
||||
send_messages_delayed(BridgeName, MessageEvents, Delay) ->
|
||||
lists:foreach(
|
||||
fun(M) ->
|
||||
send_message(BridgeName, M),
|
||||
timer:sleep(Delay)
|
||||
end,
|
||||
MessageEvents
|
||||
).
|
||||
|
||||
send_message(BridgeName, Message) ->
|
||||
?assertEqual(ok, emqx_bridge_v2:send_message(?BRIDGE_TYPE, BridgeName, Message, #{})).
|
||||
|
||||
fetch_parse_csv(Bucket, Key) ->
|
||||
#{content := Content} = emqx_bridge_s3_test_helpers:get_object(Bucket, Key),
|
||||
{ok, CSV} = erl_csv:decode(Content),
|
||||
CSV.
|
|
@ -0,0 +1,58 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_bridge_s3_test_helpers).
|
||||
|
||||
-compile(nowarn_export_all).
|
||||
-compile(export_all).
|
||||
|
||||
-import(emqx_utils_conv, [bin/1]).
|
||||
|
||||
parse_and_check_config(Root, Type, Name, Config) ->
|
||||
Schema =
|
||||
case Root of
|
||||
<<"connectors">> -> emqx_connector_schema;
|
||||
<<"actions">> -> emqx_bridge_v2_schema
|
||||
end,
|
||||
#{Root := #{Type := #{Name := _ConfigParsed}}} =
|
||||
hocon_tconf:check_plain(
|
||||
Schema,
|
||||
#{Root => #{Type => #{Name => Config}}},
|
||||
#{required => false, atom_key => false}
|
||||
),
|
||||
Config.
|
||||
|
||||
mk_message_event(ClientId, Topic, Payload) ->
|
||||
Message = emqx_message:make(bin(ClientId), bin(Topic), Payload),
|
||||
{Event, _} = emqx_rule_events:eventmsg_publish(Message),
|
||||
emqx_utils_maps:binary_key_map(Event).
|
||||
|
||||
create_bucket(Bucket) ->
|
||||
AwsConfig = emqx_s3_test_helpers:aws_config(tcp),
|
||||
erlcloud_s3:create_bucket(Bucket, AwsConfig).
|
||||
|
||||
list_objects(Bucket) ->
|
||||
AwsConfig = emqx_s3_test_helpers:aws_config(tcp),
|
||||
Response = erlcloud_s3:list_objects(Bucket, AwsConfig),
|
||||
false = proplists:get_value(is_truncated, Response),
|
||||
Contents = proplists:get_value(contents, Response),
|
||||
lists:map(fun maps:from_list/1, Contents).
|
||||
|
||||
get_object(Bucket, Key) ->
|
||||
AwsConfig = emqx_s3_test_helpers:aws_config(tcp),
|
||||
maps:from_list(erlcloud_s3:get_object(Bucket, Key, AwsConfig)).
|
||||
|
||||
list_pending_uploads(Bucket, Key) ->
|
||||
AwsConfig = emqx_s3_test_helpers:aws_config(tcp),
|
||||
{ok, Props} = erlcloud_s3:list_multipart_uploads(Bucket, [{prefix, Key}], [], AwsConfig),
|
||||
Uploads = proplists:get_value(uploads, Props),
|
||||
lists:map(fun maps:from_list/1, Uploads).
|
||||
|
||||
%% File utilities
|
||||
|
||||
truncate_at(Filename, Pos) ->
|
||||
{ok, FD} = file:open(Filename, [read, write, binary]),
|
||||
{ok, Pos} = file:position(FD, Pos),
|
||||
ok = file:truncate(FD),
|
||||
ok = file:close(FD).
|
|
@ -413,6 +413,9 @@ do_query(
|
|||
%% only insert sql statement for single query and batch query
|
||||
case apply_template(QueryTuple, Templates) of
|
||||
{?ACTION_SEND_MESSAGE, SQL} ->
|
||||
emqx_trace:rendered_action_template(ChannelId, #{
|
||||
sql => SQL
|
||||
}),
|
||||
Result = ecpool:pick_and_do(
|
||||
PoolName,
|
||||
{?MODULE, worker_do_insert, [SQL, State]},
|
||||
|
|
|
@ -273,6 +273,8 @@ do_query(
|
|||
Result =
|
||||
case try_render_message(Query, Channels) of
|
||||
{ok, Msg} ->
|
||||
[{ChannelID, _} | _] = Query,
|
||||
emqx_trace:rendered_action_template(ChannelID, #{message => Msg}),
|
||||
ecpool:pick_and_do(
|
||||
PoolName,
|
||||
{emqx_bridge_syskeeper_client, forward, [Msg, AckTimeout + ?EXTRA_CALL_TIMEOUT]},
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
|
||||
-include_lib("hocon/include/hoconsc.hrl").
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
-include_lib("emqx/include/emqx_trace.hrl").
|
||||
-include_lib("typerefl/include/types.hrl").
|
||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||
-include_lib("emqx_resource/include/emqx_resource.hrl").
|
||||
|
@ -32,7 +33,7 @@
|
|||
|
||||
-export([connector_examples/1]).
|
||||
|
||||
-export([connect/1, do_get_status/1, execute/3, do_batch_insert/4]).
|
||||
-export([connect/1, do_get_status/1, execute/3, do_batch_insert/5]).
|
||||
|
||||
-import(hoconsc, [mk/2, enum/1, ref/2]).
|
||||
|
||||
|
@ -186,6 +187,7 @@ on_query(InstanceId, {ChannelId, Data}, #{channels := Channels} = State) ->
|
|||
case maps:find(ChannelId, Channels) of
|
||||
{ok, #{insert := Tokens, opts := Opts}} ->
|
||||
Query = emqx_placeholder:proc_tmpl(Tokens, Data),
|
||||
emqx_trace:rendered_action_template(ChannelId, #{query => Query}),
|
||||
do_query_job(InstanceId, {?MODULE, execute, [Query, Opts]}, State);
|
||||
_ ->
|
||||
{error, {unrecoverable_error, {invalid_channel_id, InstanceId}}}
|
||||
|
@ -199,9 +201,10 @@ on_batch_query(
|
|||
) ->
|
||||
case maps:find(ChannelId, Channels) of
|
||||
{ok, #{batch := Tokens, opts := Opts}} ->
|
||||
TraceRenderedCTX = emqx_trace:make_rendered_action_template_trace_context(ChannelId),
|
||||
do_query_job(
|
||||
InstanceId,
|
||||
{?MODULE, do_batch_insert, [Tokens, BatchReq, Opts]},
|
||||
{?MODULE, do_batch_insert, [Tokens, BatchReq, Opts, TraceRenderedCTX]},
|
||||
State
|
||||
);
|
||||
_ ->
|
||||
|
@ -338,9 +341,18 @@ do_query_job(InstanceId, Job, #{pool_name := PoolName} = State) ->
|
|||
execute(Conn, Query, Opts) ->
|
||||
tdengine:insert(Conn, Query, Opts).
|
||||
|
||||
do_batch_insert(Conn, Tokens, BatchReqs, Opts) ->
|
||||
do_batch_insert(Conn, Tokens, BatchReqs, Opts, TraceRenderedCTX) ->
|
||||
SQL = aggregate_query(Tokens, BatchReqs, <<"INSERT INTO">>),
|
||||
execute(Conn, SQL, Opts).
|
||||
try
|
||||
emqx_trace:rendered_action_template_with_ctx(
|
||||
TraceRenderedCTX,
|
||||
#{query => SQL}
|
||||
),
|
||||
execute(Conn, SQL, Opts)
|
||||
catch
|
||||
error:?EMQX_TRACE_STOP_ACTION_MATCH = Reason ->
|
||||
{error, Reason}
|
||||
end.
|
||||
|
||||
aggregate_query(BatchTks, BatchReqs, Acc) ->
|
||||
lists:foldl(
|
||||
|
|
|
@ -242,7 +242,7 @@ load_config(Bin, Opts) when is_binary(Bin) ->
|
|||
load_config_from_raw(RawConf0, Opts) ->
|
||||
SchemaMod = emqx_conf:schema_module(),
|
||||
RawConf1 = emqx_config:upgrade_raw_conf(SchemaMod, RawConf0),
|
||||
case check_config(RawConf1) of
|
||||
case check_config(RawConf1, Opts) of
|
||||
{ok, RawConf} ->
|
||||
%% It has been ensured that the connector is always the first configuration to be updated.
|
||||
%% However, when deleting the connector, we need to clean up the dependent actions/sources first;
|
||||
|
@ -395,24 +395,28 @@ suggest_msg(#{kind := validation_error, reason := unknown_fields}, Mode) ->
|
|||
suggest_msg(_, _) ->
|
||||
<<"">>.
|
||||
|
||||
check_config(Conf) ->
|
||||
case check_keys_is_not_readonly(Conf) of
|
||||
ok ->
|
||||
Conf1 = emqx_config:fill_defaults(Conf),
|
||||
case check_config_schema(Conf1) of
|
||||
ok -> {ok, Conf1};
|
||||
check_config(Conf0, Opts) ->
|
||||
case check_keys_is_not_readonly(Conf0, Opts) of
|
||||
{ok, Conf1} ->
|
||||
Conf = emqx_config:fill_defaults(Conf1),
|
||||
case check_config_schema(Conf) of
|
||||
ok -> {ok, Conf};
|
||||
{error, Reason} -> {error, Reason}
|
||||
end;
|
||||
Error ->
|
||||
Error
|
||||
end.
|
||||
|
||||
check_keys_is_not_readonly(Conf) ->
|
||||
check_keys_is_not_readonly(Conf, Opts) ->
|
||||
IgnoreReadonly = maps:get(ignore_readonly, Opts, false),
|
||||
Keys = maps:keys(Conf),
|
||||
ReadOnlyKeys = [atom_to_binary(K) || K <- ?READONLY_KEYS],
|
||||
case lists:filter(fun(K) -> lists:member(K, Keys) end, ReadOnlyKeys) of
|
||||
[] ->
|
||||
ok;
|
||||
{ok, Conf};
|
||||
BadKeys when IgnoreReadonly ->
|
||||
?SLOG(info, #{msg => "readonly_root_keys_ignored", keys => BadKeys}),
|
||||
{ok, maps:without(BadKeys, Conf)};
|
||||
BadKeys ->
|
||||
BadKeysStr = lists:join(<<",">>, BadKeys),
|
||||
{error, ?UPDATE_READONLY_KEYS_PROHIBITED, BadKeysStr}
|
||||
|
|
|
@ -72,7 +72,7 @@ dashboard_addr(desc) -> ?DESC(dashboard_addr);
|
|||
dashboard_addr(default) -> <<"https://127.0.0.1:18083">>;
|
||||
dashboard_addr(_) -> undefined.
|
||||
|
||||
%% TOOD: support raw xml metadata in hocon (maybe?🤔)
|
||||
%% TODO: support raw xml metadata in hocon (maybe?🤔)
|
||||
idp_metadata_url(type) -> binary();
|
||||
idp_metadata_url(desc) -> ?DESC(idp_metadata_url);
|
||||
idp_metadata_url(default) -> <<"https://idp.example.com">>;
|
||||
|
|
|
@ -147,7 +147,9 @@ schema("/configs") ->
|
|||
hoconsc:mk(
|
||||
hoconsc:enum([replace, merge]),
|
||||
#{in => query, default => merge, required => false}
|
||||
)}
|
||||
)},
|
||||
{ignore_readonly,
|
||||
hoconsc:mk(boolean(), #{in => query, default => false, required => false})}
|
||||
],
|
||||
'requestBody' => #{
|
||||
content =>
|
||||
|
@ -361,16 +363,18 @@ configs(get, #{query_string := QueryStr, headers := Headers}, _Req) ->
|
|||
{ok, <<"text/plain">>} -> get_configs_v2(QueryStr);
|
||||
{error, _} = Error -> {400, #{code => 'INVALID_ACCEPT', message => ?ERR_MSG(Error)}}
|
||||
end;
|
||||
configs(put, #{body := Conf, query_string := #{<<"mode">> := Mode}}, _Req) ->
|
||||
case emqx_conf_cli:load_config(Conf, #{mode => Mode, log => none}) of
|
||||
configs(put, #{body := Conf, query_string := #{<<"mode">> := Mode} = QS}, _Req) ->
|
||||
IngnoreReadonly = maps:get(<<"ignore_readonly">>, QS, false),
|
||||
case
|
||||
emqx_conf_cli:load_config(Conf, #{
|
||||
mode => Mode, log => none, ignore_readonly => IngnoreReadonly
|
||||
})
|
||||
of
|
||||
ok ->
|
||||
{200};
|
||||
%% bad hocon format
|
||||
{error, MsgList = [{_, _} | _]} ->
|
||||
JsonFun = fun(K, V) -> {K, emqx_utils_maps:binary_string(V)} end,
|
||||
JsonMap = emqx_utils_maps:jsonable_map(maps:from_list(MsgList), JsonFun),
|
||||
{400, #{<<"content-type">> => <<"text/plain">>}, JsonMap};
|
||||
{error, Msg} ->
|
||||
{error, Errors} ->
|
||||
Msg = emqx_logger_jsonfmt:best_effort_json_obj(#{errors => Errors}),
|
||||
{400, #{<<"content-type">> => <<"text/plain">>}, Msg}
|
||||
end.
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
-include_lib("typerefl/include/types.hrl").
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
%%-include_lib("emqx_plugins/include/emqx_plugins.hrl").
|
||||
-include_lib("emqx_plugins/include/emqx_plugins.hrl").
|
||||
|
||||
-export([
|
||||
api_spec/0,
|
||||
|
@ -34,6 +34,8 @@
|
|||
upload_install/2,
|
||||
plugin/2,
|
||||
update_plugin/2,
|
||||
plugin_config/2,
|
||||
plugin_schema/2,
|
||||
update_boot_order/2
|
||||
]).
|
||||
|
||||
|
@ -43,7 +45,8 @@
|
|||
install_package/2,
|
||||
delete_package/1,
|
||||
describe_package/1,
|
||||
ensure_action/2
|
||||
ensure_action/2,
|
||||
do_update_plugin_config/3
|
||||
]).
|
||||
|
||||
-define(NAME_RE, "^[A-Za-z]+[A-Za-z0-9-_.]*$").
|
||||
|
@ -52,7 +55,11 @@
|
|||
%% app_name must be a snake_case (no '-' allowed).
|
||||
-define(VSN_WILDCARD, "-*.tar.gz").
|
||||
|
||||
namespace() -> "plugins".
|
||||
-define(CONTENT_PLUGIN, plugin).
|
||||
-define(CONTENT_CONFIG, config).
|
||||
|
||||
namespace() ->
|
||||
"plugins".
|
||||
|
||||
api_spec() ->
|
||||
emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}).
|
||||
|
@ -64,6 +71,8 @@ paths() ->
|
|||
"/plugins/:name",
|
||||
"/plugins/install",
|
||||
"/plugins/:name/:action",
|
||||
"/plugins/:name/config",
|
||||
"/plugins/:name/schema",
|
||||
"/plugins/:name/move"
|
||||
].
|
||||
|
||||
|
@ -97,15 +106,15 @@ schema("/plugins/install") ->
|
|||
schema => #{
|
||||
type => object,
|
||||
properties => #{
|
||||
plugin => #{type => string, format => binary}
|
||||
?CONTENT_PLUGIN => #{type => string, format => binary}
|
||||
}
|
||||
},
|
||||
encoding => #{plugin => #{'contentType' => 'application/gzip'}}
|
||||
encoding => #{?CONTENT_PLUGIN => #{'contentType' => 'application/gzip'}}
|
||||
}
|
||||
}
|
||||
},
|
||||
responses => #{
|
||||
200 => <<"OK">>,
|
||||
204 => <<"Install plugin successfully">>,
|
||||
400 => emqx_dashboard_swagger:error_codes(
|
||||
['UNEXPECTED_ERROR', 'ALREADY_INSTALLED', 'BAD_PLUGIN_INFO']
|
||||
)
|
||||
|
@ -117,7 +126,7 @@ schema("/plugins/:name") ->
|
|||
'operationId' => plugin,
|
||||
get => #{
|
||||
summary => <<"Get a plugin description">>,
|
||||
description => "Describs plugin according to its `release.json` and `README.md`.",
|
||||
description => "Describe a plugin according to its `release.json` and `README.md`.",
|
||||
tags => ?TAGS,
|
||||
parameters => [hoconsc:ref(name)],
|
||||
responses => #{
|
||||
|
@ -152,22 +161,80 @@ schema("/plugins/:name/:action") ->
|
|||
{action, hoconsc:mk(hoconsc:enum([start, stop]), #{desc => "Action", in => path})}
|
||||
],
|
||||
responses => #{
|
||||
200 => <<"OK">>,
|
||||
204 => <<"Trigger action successfully">>,
|
||||
404 => emqx_dashboard_swagger:error_codes(['NOT_FOUND'], <<"Plugin Not Found">>)
|
||||
}
|
||||
}
|
||||
};
|
||||
schema("/plugins/:name/config") ->
|
||||
#{
|
||||
'operationId' => plugin_config,
|
||||
get => #{
|
||||
summary => <<"Get plugin config">>,
|
||||
description =>
|
||||
"Get plugin config. Config schema is defined by user's schema.avsc file.<br/>",
|
||||
tags => ?TAGS,
|
||||
parameters => [hoconsc:ref(name)],
|
||||
responses => #{
|
||||
%% avro data, json encoded
|
||||
200 => hoconsc:mk(binary()),
|
||||
404 => emqx_dashboard_swagger:error_codes(['NOT_FOUND'], <<"Plugin Not Found">>)
|
||||
}
|
||||
},
|
||||
put => #{
|
||||
summary =>
|
||||
<<"Update plugin config">>,
|
||||
description =>
|
||||
"Update plugin config. Config schema defined by user's schema.avsc file.<br/>",
|
||||
tags => ?TAGS,
|
||||
parameters => [hoconsc:ref(name)],
|
||||
'requestBody' => #{
|
||||
content => #{
|
||||
'application/json' => #{
|
||||
schema => #{
|
||||
type => object
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
responses => #{
|
||||
204 => <<"Config updated successfully">>,
|
||||
400 => emqx_dashboard_swagger:error_codes(
|
||||
['BAD_CONFIG', 'UNEXPECTED_ERROR'], <<"Update plugin config failed">>
|
||||
),
|
||||
404 => emqx_dashboard_swagger:error_codes(['NOT_FOUND'], <<"Plugin Not Found">>)
|
||||
}
|
||||
}
|
||||
};
|
||||
schema("/plugins/:name/schema") ->
|
||||
#{
|
||||
'operationId' => plugin_schema,
|
||||
get => #{
|
||||
summary => <<"Get installed plugin's AVRO schema">>,
|
||||
description => "Get plugin's config AVRO schema.",
|
||||
tags => ?TAGS,
|
||||
parameters => [hoconsc:ref(name)],
|
||||
responses => #{
|
||||
%% avro schema and i18n json object
|
||||
200 => hoconsc:mk(binary()),
|
||||
404 => emqx_dashboard_swagger:error_codes(
|
||||
['NOT_FOUND', 'FILE_NOT_EXISTED'],
|
||||
<<"Plugin Not Found or Plugin not given a schema file">>
|
||||
)
|
||||
}
|
||||
}
|
||||
};
|
||||
schema("/plugins/:name/move") ->
|
||||
#{
|
||||
'operationId' => update_boot_order,
|
||||
post => #{
|
||||
summary => <<"Move plugin within plugin hiearchy">>,
|
||||
summary => <<"Move plugin within plugin hierarchy">>,
|
||||
description => "Setting the boot order of plugins.",
|
||||
tags => ?TAGS,
|
||||
parameters => [hoconsc:ref(name)],
|
||||
'requestBody' => move_request_body(),
|
||||
responses => #{
|
||||
200 => <<"OK">>,
|
||||
204 => <<"Boot order changed successfully">>,
|
||||
400 => emqx_dashboard_swagger:error_codes(['MOVE_FAILED'], <<"Move failed">>)
|
||||
}
|
||||
}
|
||||
|
@ -338,7 +405,7 @@ upload_install(post, #{body := #{<<"plugin">> := Plugin}}) when is_map(Plugin) -
|
|||
%% File bin is too large, we use rpc:multicall instead of cluster_rpc:multicall
|
||||
NameVsn = string:trim(FileName, trailing, ".tar.gz"),
|
||||
case emqx_plugins:describe(NameVsn) of
|
||||
{error, #{error := "bad_info_file", return := {enoent, _}}} ->
|
||||
{error, #{error_msg := "bad_info_file", reason := {enoent, _}}} ->
|
||||
case emqx_plugins:parse_name_vsn(FileName) of
|
||||
{ok, AppName, _Vsn} ->
|
||||
AppDir = filename:join(emqx_plugins:install_dir(), AppName),
|
||||
|
@ -382,7 +449,7 @@ do_install_package(FileName, Bin) ->
|
|||
{[_ | _] = Res, []} = emqx_mgmt_api_plugins_proto_v2:install_package(Nodes, FileName, Bin),
|
||||
case lists:filter(fun(R) -> R =/= ok end, Res) of
|
||||
[] ->
|
||||
{200};
|
||||
{204};
|
||||
Filtered ->
|
||||
%% crash if we have unexpected errors or results
|
||||
[] = lists:filter(
|
||||
|
@ -394,7 +461,7 @@ do_install_package(FileName, Bin) ->
|
|||
),
|
||||
Reason =
|
||||
case hd(Filtered) of
|
||||
{error, #{error := Reason0}} -> Reason0;
|
||||
{error, #{error_msg := Reason0}} -> Reason0;
|
||||
{error, #{reason := Reason0}} -> Reason0
|
||||
end,
|
||||
{400, #{
|
||||
|
@ -418,6 +485,50 @@ update_plugin(put, #{bindings := #{name := Name, action := Action}}) ->
|
|||
Res = emqx_mgmt_api_plugins_proto_v2:ensure_action(Name, Action),
|
||||
return(204, Res).
|
||||
|
||||
plugin_config(get, #{bindings := #{name := NameVsn}}) ->
|
||||
case emqx_plugins:describe(NameVsn) of
|
||||
{ok, _} ->
|
||||
case emqx_plugins:get_config(NameVsn) of
|
||||
{ok, AvroJson} ->
|
||||
{200, #{<<"content-type">> => <<"'application/json'">>}, AvroJson};
|
||||
{error, _} ->
|
||||
{400, #{
|
||||
code => 'BAD_CONFIG',
|
||||
message => <<"Failed to get plugin config">>
|
||||
}}
|
||||
end;
|
||||
_ ->
|
||||
{404, plugin_not_found_msg()}
|
||||
end;
|
||||
plugin_config(put, #{bindings := #{name := NameVsn}, body := AvroJsonMap}) ->
|
||||
case emqx_plugins:describe(NameVsn) of
|
||||
{ok, _} ->
|
||||
case emqx_plugins:decode_plugin_avro_config(NameVsn, AvroJsonMap) of
|
||||
{ok, AvroValueConfig} ->
|
||||
Nodes = emqx:running_nodes(),
|
||||
%% cluster call with config in map (binary key-value)
|
||||
_Res = emqx_mgmt_api_plugins_proto_v3:update_plugin_config(
|
||||
Nodes, NameVsn, AvroJsonMap, AvroValueConfig
|
||||
),
|
||||
{204};
|
||||
{error, Reason} ->
|
||||
{400, #{
|
||||
code => 'BAD_CONFIG',
|
||||
message => readable_error_msg(Reason)
|
||||
}}
|
||||
end;
|
||||
_ ->
|
||||
{404, plugin_not_found_msg()}
|
||||
end.
|
||||
|
||||
plugin_schema(get, #{bindings := #{name := NameVsn}}) ->
|
||||
case emqx_plugins:describe(NameVsn) of
|
||||
{ok, _Plugin} ->
|
||||
{200, format_plugin_avsc_and_i18n(NameVsn)};
|
||||
_ ->
|
||||
{404, plugin_not_found_msg()}
|
||||
end.
|
||||
|
||||
update_boot_order(post, #{bindings := #{name := Name}, body := Body}) ->
|
||||
case parse_position(Body, Name) of
|
||||
{error, Reason} ->
|
||||
|
@ -425,11 +536,11 @@ update_boot_order(post, #{bindings := #{name := Name}, body := Body}) ->
|
|||
Position ->
|
||||
case emqx_plugins:ensure_enabled(Name, Position, _ConfLocation = global) of
|
||||
ok ->
|
||||
{200};
|
||||
{204};
|
||||
{error, Reason} ->
|
||||
{400, #{
|
||||
code => 'MOVE_FAILED',
|
||||
message => iolist_to_binary(io_lib:format("~p", [Reason]))
|
||||
message => readable_error_msg(Reason)
|
||||
}}
|
||||
end
|
||||
end.
|
||||
|
@ -443,7 +554,7 @@ install_package(FileName, Bin) ->
|
|||
ok = file:write_file(File, Bin),
|
||||
PackageName = string:trim(FileName, trailing, ".tar.gz"),
|
||||
case emqx_plugins:ensure_installed(PackageName) of
|
||||
{error, #{return := not_found}} = NotFound ->
|
||||
{error, #{reason := not_found}} = NotFound ->
|
||||
NotFound;
|
||||
{error, Reason} = Error ->
|
||||
?SLOG(error, Reason#{msg => "failed_to_install_plugin"}),
|
||||
|
@ -454,9 +565,9 @@ install_package(FileName, Bin) ->
|
|||
end.
|
||||
|
||||
%% For RPC plugin get
|
||||
describe_package(Name) ->
|
||||
describe_package(NameVsn) ->
|
||||
Node = node(),
|
||||
case emqx_plugins:describe(Name) of
|
||||
case emqx_plugins:describe(NameVsn) of
|
||||
{ok, Plugin} -> {Node, [Plugin]};
|
||||
_ -> {Node, []}
|
||||
end.
|
||||
|
@ -487,12 +598,32 @@ ensure_action(Name, restart) ->
|
|||
_ = emqx_plugins:restart(Name),
|
||||
ok.
|
||||
|
||||
%% for RPC plugin avro encoded config update
|
||||
do_update_plugin_config(Name, AvroJsonMap, PluginConfigMap) ->
|
||||
%% TODO: maybe use `PluginConfigMap` to validate config
|
||||
emqx_plugins:put_config(Name, AvroJsonMap, PluginConfigMap).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Helper functions
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
return(Code, ok) ->
|
||||
{Code};
|
||||
return(_, {error, #{error := "bad_info_file", return := {enoent, _} = Reason}}) ->
|
||||
{404, #{code => 'NOT_FOUND', message => iolist_to_binary(io_lib:format("~p", [Reason]))}};
|
||||
return(_, {error, #{error_msg := "bad_info_file", reason := {enoent, _} = Reason}}) ->
|
||||
{404, #{code => 'NOT_FOUND', message => readable_error_msg(Reason)}};
|
||||
return(_, {error, #{error_msg := "bad_avro_config_file", reason := {enoent, _} = Reason}}) ->
|
||||
{404, #{code => 'NOT_FOUND', message => readable_error_msg(Reason)}};
|
||||
return(_, {error, Reason}) ->
|
||||
{400, #{code => 'PARAM_ERROR', message => iolist_to_binary(io_lib:format("~p", [Reason]))}}.
|
||||
{400, #{code => 'PARAM_ERROR', message => readable_error_msg(Reason)}}.
|
||||
|
||||
plugin_not_found_msg() ->
|
||||
#{
|
||||
code => 'NOT_FOUND',
|
||||
message => <<"Plugin Not Found">>
|
||||
}.
|
||||
|
||||
readable_error_msg(Msg) ->
|
||||
emqx_utils:readable_error_msg(Msg).
|
||||
|
||||
parse_position(#{<<"position">> := <<"front">>}, _) ->
|
||||
front;
|
||||
|
@ -563,6 +694,18 @@ aggregate_status([{Node, Plugins} | List], Acc) ->
|
|||
),
|
||||
aggregate_status(List, NewAcc).
|
||||
|
||||
format_plugin_avsc_and_i18n(NameVsn) ->
|
||||
#{
|
||||
avsc => try_read_file(fun() -> emqx_plugins:plugin_avsc(NameVsn) end),
|
||||
i18n => try_read_file(fun() -> emqx_plugins:plugin_i18n(NameVsn) end)
|
||||
}.
|
||||
|
||||
try_read_file(Fun) ->
|
||||
case Fun() of
|
||||
{ok, Json} -> Json;
|
||||
_ -> null
|
||||
end.
|
||||
|
||||
% running_status: running loaded, stopped
|
||||
%% config_status: not_configured disable enable
|
||||
plugin_status(#{running_status := running}) -> running;
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2022-2024 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
-module(emqx_mgmt_api_plugins_proto_v3).
|
||||
|
||||
-behaviour(emqx_bpapi).
|
||||
|
||||
-export([
|
||||
introduced_in/0,
|
||||
get_plugins/1,
|
||||
install_package/3,
|
||||
describe_package/2,
|
||||
delete_package/1,
|
||||
ensure_action/2,
|
||||
update_plugin_config/4
|
||||
]).
|
||||
|
||||
-include_lib("emqx/include/bpapi.hrl").
|
||||
|
||||
introduced_in() ->
|
||||
"5.7.0".
|
||||
|
||||
-spec get_plugins([node()]) -> emqx_rpc:multicall_result().
|
||||
get_plugins(Nodes) ->
|
||||
rpc:multicall(Nodes, emqx_mgmt_api_plugins, get_plugins, [], 15000).
|
||||
|
||||
-spec install_package([node()], binary() | string(), binary()) -> emqx_rpc:multicall_result().
|
||||
install_package(Nodes, Filename, Bin) ->
|
||||
rpc:multicall(Nodes, emqx_mgmt_api_plugins, install_package, [Filename, Bin], 25000).
|
||||
|
||||
-spec describe_package([node()], binary() | string()) -> emqx_rpc:multicall_result().
|
||||
describe_package(Nodes, Name) ->
|
||||
rpc:multicall(Nodes, emqx_mgmt_api_plugins, describe_package, [Name], 10000).
|
||||
|
||||
-spec delete_package(binary() | string()) -> ok | {error, any()}.
|
||||
delete_package(Name) ->
|
||||
emqx_cluster_rpc:multicall(emqx_mgmt_api_plugins, delete_package, [Name], all, 10000).
|
||||
|
||||
-spec ensure_action(binary() | string(), 'restart' | 'start' | 'stop') -> ok | {error, any()}.
|
||||
ensure_action(Name, Action) ->
|
||||
emqx_cluster_rpc:multicall(emqx_mgmt_api_plugins, ensure_action, [Name, Action], all, 10000).
|
||||
|
||||
-spec update_plugin_config(
|
||||
[node()],
|
||||
binary() | string(),
|
||||
binary(),
|
||||
map()
|
||||
) ->
|
||||
emqx_rpc:multicall_result().
|
||||
update_plugin_config(Nodes, NameVsn, AvroJsonMap, PluginConfig) ->
|
||||
rpc:multicall(
|
||||
Nodes,
|
||||
emqx_mgmt_api_plugins,
|
||||
do_update_plugin_config,
|
||||
[NameVsn, AvroJsonMap, PluginConfig],
|
||||
10000
|
||||
).
|
|
@ -331,11 +331,12 @@ t_configs_key(_Config) ->
|
|||
Log
|
||||
),
|
||||
Log1 = emqx_utils_maps:deep_put([<<"log">>, <<"console">>, <<"level">>], Log, <<"error">>),
|
||||
?assertEqual(<<>>, update_configs_with_binary(iolist_to_binary(hocon_pp:do(Log1, #{})))),
|
||||
?assertEqual({ok, <<>>}, update_configs_with_binary(iolist_to_binary(hocon_pp:do(Log1, #{})))),
|
||||
?assertEqual(<<"error">>, read_conf([<<"log">>, <<"console">>, <<"level">>])),
|
||||
BadLog = emqx_utils_maps:deep_put([<<"log">>, <<"console">>, <<"level">>], Log, <<"erro1r">>),
|
||||
{error, Error} = update_configs_with_binary(iolist_to_binary(hocon_pp:do(BadLog, #{}))),
|
||||
ExpectError = #{
|
||||
<<"errors">> => #{
|
||||
<<"log">> =>
|
||||
#{
|
||||
<<"kind">> => <<"validation_error">>,
|
||||
|
@ -343,6 +344,7 @@ t_configs_key(_Config) ->
|
|||
<<"reason">> => <<"unable_to_convert_to_enum_symbol">>,
|
||||
<<"value">> => <<"erro1r">>
|
||||
}
|
||||
}
|
||||
},
|
||||
?assertEqual(ExpectError, emqx_utils_json:decode(Error, [return_maps])),
|
||||
ReadOnlyConf = #{
|
||||
|
@ -355,7 +357,8 @@ t_configs_key(_Config) ->
|
|||
},
|
||||
ReadOnlyBin = iolist_to_binary(hocon_pp:do(ReadOnlyConf, #{})),
|
||||
{error, ReadOnlyError} = update_configs_with_binary(ReadOnlyBin),
|
||||
?assertEqual(<<"Cannot update read-only key 'cluster'.">>, ReadOnlyError),
|
||||
?assertEqual(<<"{\"errors\":\"Cannot update read-only key 'cluster'.\"}">>, ReadOnlyError),
|
||||
?assertMatch({ok, <<>>}, update_configs_with_binary(ReadOnlyBin, _InogreReadonly = true)),
|
||||
ok.
|
||||
|
||||
t_get_configs_in_different_accept(_Config) ->
|
||||
|
@ -405,7 +408,7 @@ t_create_webhook_v1_bridges_api(Config) ->
|
|||
WebHookFile = filename:join(?config(data_dir, Config), "webhook_v1.conf"),
|
||||
?assertMatch({ok, _}, hocon:files([WebHookFile])),
|
||||
{ok, WebHookBin} = file:read_file(WebHookFile),
|
||||
?assertEqual(<<>>, update_configs_with_binary(WebHookBin)),
|
||||
?assertEqual({ok, <<>>}, update_configs_with_binary(WebHookBin)),
|
||||
Actions =
|
||||
#{
|
||||
<<"http">> =>
|
||||
|
@ -487,6 +490,22 @@ t_create_webhook_v1_bridges_api(Config) ->
|
|||
?assertEqual(#{<<"webhook">> => #{}}, emqx_conf:get_raw([<<"bridges">>])),
|
||||
ok.
|
||||
|
||||
t_config_update_parse_error(_Config) ->
|
||||
?assertMatch(
|
||||
{error, <<"{\"errors\":\"{parse_error,", _/binary>>},
|
||||
update_configs_with_binary(<<"not an object">>)
|
||||
),
|
||||
?assertMatch(
|
||||
{error, <<"{\"errors\":\"{parse_error,", _/binary>>},
|
||||
update_configs_with_binary(<<"a = \"tlsv1\"\"\"3e-01">>)
|
||||
).
|
||||
|
||||
t_config_update_unknown_root(_Config) ->
|
||||
?assertMatch(
|
||||
{error, <<"{\"errors\":{\"a\":\"{root_key_not_found,", _/binary>>},
|
||||
update_configs_with_binary(<<"a = \"tlsv1.3\"">>)
|
||||
).
|
||||
|
||||
%% Helpers
|
||||
|
||||
get_config(Name) ->
|
||||
|
@ -539,18 +558,29 @@ get_configs_with_binary(Key, Node) ->
|
|||
end.
|
||||
|
||||
update_configs_with_binary(Bin) ->
|
||||
Path = emqx_mgmt_api_test_util:api_path(["configs"]),
|
||||
update_configs_with_binary(Bin, _InogreReadonly = undefined).
|
||||
|
||||
update_configs_with_binary(Bin, IgnoreReadonly) ->
|
||||
Path =
|
||||
case IgnoreReadonly of
|
||||
undefined ->
|
||||
emqx_mgmt_api_test_util:api_path(["configs"]);
|
||||
Boolean ->
|
||||
emqx_mgmt_api_test_util:api_path([
|
||||
"configs?ignore_readonly=" ++ atom_to_list(Boolean)
|
||||
])
|
||||
end,
|
||||
Auth = emqx_mgmt_api_test_util:auth_header_(),
|
||||
Headers = [{"accept", "text/plain"}, Auth],
|
||||
case httpc:request(put, {Path, Headers, "text/plain", Bin}, [], [{body_format, binary}]) of
|
||||
{ok, {{"HTTP/1.1", Code, _}, _Headers, Body}} when
|
||||
Code >= 200 andalso Code =< 299
|
||||
->
|
||||
Body;
|
||||
{ok, {{"HTTP/1.1", _Code, _}, _Headers, Body}} ->
|
||||
{ok, Body};
|
||||
{ok, {{"HTTP/1.1", 400, _}, _Headers, Body}} ->
|
||||
{error, Body};
|
||||
Error ->
|
||||
Error
|
||||
error({unexpected, Error})
|
||||
end.
|
||||
|
||||
update_config(Name, Change) ->
|
||||
|
|
|
@ -37,10 +37,10 @@ init_per_suite(Config) ->
|
|||
ok = filelib:ensure_dir(WorkDir),
|
||||
DemoShDir1 = string:replace(WorkDir, "emqx_mgmt_api_plugins", "emqx_plugins"),
|
||||
DemoShDir = lists:flatten(string:replace(DemoShDir1, "emqx_management", "emqx_plugins")),
|
||||
OrigInstallDir = emqx_plugins:get_config(install_dir, undefined),
|
||||
OrigInstallDir = emqx_plugins:get_config_interal(install_dir, undefined),
|
||||
ok = filelib:ensure_dir(DemoShDir),
|
||||
emqx_mgmt_api_test_util:init_suite([emqx_conf, emqx_plugins]),
|
||||
emqx_plugins:put_config(install_dir, DemoShDir),
|
||||
emqx_plugins:put_config_internal(install_dir, DemoShDir),
|
||||
[{demo_sh_dir, DemoShDir}, {orig_install_dir, OrigInstallDir} | Config].
|
||||
|
||||
end_per_suite(Config) ->
|
||||
|
@ -48,7 +48,7 @@ end_per_suite(Config) ->
|
|||
%% restore config
|
||||
case proplists:get_value(orig_install_dir, Config) of
|
||||
undefined -> ok;
|
||||
OrigInstallDir -> emqx_plugins:put_config(install_dir, OrigInstallDir)
|
||||
OrigInstallDir -> emqx_plugins:put_config_internal(install_dir, OrigInstallDir)
|
||||
end,
|
||||
emqx_mgmt_api_test_util:end_suite([emqx_plugins, emqx_conf]),
|
||||
ok.
|
||||
|
@ -271,7 +271,7 @@ install_plugin(FilePath) ->
|
|||
Token
|
||||
)
|
||||
of
|
||||
{ok, {{"HTTP/1.1", 200, "OK"}, _Headers, <<>>}} -> ok;
|
||||
{ok, {{"HTTP/1.1", 204, "No Content"}, _Headers, <<>>}} -> ok;
|
||||
Error -> Error
|
||||
end.
|
||||
|
||||
|
@ -288,7 +288,7 @@ install_plugin(Config, FilePath) ->
|
|||
Auth
|
||||
)
|
||||
of
|
||||
{ok, {{"HTTP/1.1", 200, "OK"}, _Headers, <<>>}} -> ok;
|
||||
{ok, {{"HTTP/1.1", 204, "No Content"}, _Headers, <<>>}} -> ok;
|
||||
Error -> Error
|
||||
end.
|
||||
|
||||
|
|
|
@ -65,6 +65,7 @@ fields(validation) ->
|
|||
#{
|
||||
desc => ?DESC("topics"),
|
||||
converter => fun ensure_array/2,
|
||||
validator => fun validate_unique_topics/1,
|
||||
required => true
|
||||
}
|
||||
)},
|
||||
|
@ -269,3 +270,23 @@ do_validate_unique_schema_checks(
|
|||
end;
|
||||
do_validate_unique_schema_checks([_Check | Rest], Seen, Duplicated) ->
|
||||
do_validate_unique_schema_checks(Rest, Seen, Duplicated).
|
||||
|
||||
validate_unique_topics(Topics) ->
|
||||
Grouped = maps:groups_from_list(
|
||||
fun(T) -> T end,
|
||||
Topics
|
||||
),
|
||||
DuplicatedMap = maps:filter(
|
||||
fun(_T, Ts) -> length(Ts) > 1 end,
|
||||
Grouped
|
||||
),
|
||||
case maps:keys(DuplicatedMap) of
|
||||
[] ->
|
||||
ok;
|
||||
Duplicated ->
|
||||
Msg = iolist_to_binary([
|
||||
<<"duplicated topics: ">>,
|
||||
lists:join(", ", Duplicated)
|
||||
]),
|
||||
{error, Msg}
|
||||
end.
|
||||
|
|
|
@ -232,6 +232,65 @@ check_test_() ->
|
|||
|
||||
duplicated_check_test_() ->
|
||||
[
|
||||
{"duplicated topics 1",
|
||||
?_assertThrow(
|
||||
{_Schema, [
|
||||
#{
|
||||
reason := <<"duplicated topics: t/1">>,
|
||||
kind := validation_error,
|
||||
path := "message_validation.validations.1.topics"
|
||||
}
|
||||
]},
|
||||
parse_and_check([
|
||||
validation(
|
||||
<<"foo">>,
|
||||
[schema_check(json, <<"a">>)],
|
||||
#{<<"topics">> => [<<"t/1">>, <<"t/1">>]}
|
||||
)
|
||||
])
|
||||
)},
|
||||
{"duplicated topics 2",
|
||||
?_assertThrow(
|
||||
{_Schema, [
|
||||
#{
|
||||
reason := <<"duplicated topics: t/1">>,
|
||||
kind := validation_error,
|
||||
path := "message_validation.validations.1.topics"
|
||||
}
|
||||
]},
|
||||
parse_and_check([
|
||||
validation(
|
||||
<<"foo">>,
|
||||
[schema_check(json, <<"a">>)],
|
||||
#{<<"topics">> => [<<"t/1">>, <<"t/#">>, <<"t/1">>]}
|
||||
)
|
||||
])
|
||||
)},
|
||||
{"duplicated topics 3",
|
||||
?_assertThrow(
|
||||
{_Schema, [
|
||||
#{
|
||||
reason := <<"duplicated topics: t/1, t/2">>,
|
||||
kind := validation_error,
|
||||
path := "message_validation.validations.1.topics"
|
||||
}
|
||||
]},
|
||||
parse_and_check([
|
||||
validation(
|
||||
<<"foo">>,
|
||||
[schema_check(json, <<"a">>)],
|
||||
#{
|
||||
<<"topics">> => [
|
||||
<<"t/1">>,
|
||||
<<"t/#">>,
|
||||
<<"t/1">>,
|
||||
<<"t/2">>,
|
||||
<<"t/2">>
|
||||
]
|
||||
}
|
||||
)
|
||||
])
|
||||
)},
|
||||
{"duplicated sql checks are not checked",
|
||||
?_assertMatch(
|
||||
[#{<<"checks">> := [_, _]}],
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{application, emqx_mysql, [
|
||||
{description, "EMQX MySQL Database Connector"},
|
||||
{vsn, "0.1.8"},
|
||||
{vsn, "0.1.9"},
|
||||
{registered, []},
|
||||
{applications, [
|
||||
kernel,
|
||||
|
|
|
@ -498,6 +498,8 @@ on_sql_query(
|
|||
) ->
|
||||
LogMeta = #{connector => InstId, sql => SQLOrKey, state => State},
|
||||
?TRACE("QUERY", "mysql_connector_received", LogMeta),
|
||||
ChannelID = maps:get(channel_id, State, no_channel),
|
||||
emqx_trace:rendered_action_template(ChannelID, #{sql => SQLOrKey}),
|
||||
Worker = ecpool:get_client(PoolName),
|
||||
case ecpool_worker:client(Worker) of
|
||||
{ok, Conn} ->
|
||||
|
|
|
@ -210,7 +210,7 @@ on_query(
|
|||
}),
|
||||
Type = query,
|
||||
{NameOrSQL2, Data} = proc_sql_params(TypeOrKey, NameOrSQL, Params, State),
|
||||
Res = on_sql_query(InstId, PoolName, Type, ?SYNC_QUERY_MODE, NameOrSQL2, Data),
|
||||
Res = on_sql_query(InstId, TypeOrKey, PoolName, Type, ?SYNC_QUERY_MODE, NameOrSQL2, Data),
|
||||
handle_result(Res).
|
||||
|
||||
on_batch_query(
|
||||
|
@ -244,7 +244,9 @@ on_batch_query(
|
|||
Datas2 = [emqx_placeholder:proc_sql(TokenList, Data) || Data <- Datas],
|
||||
St = maps:get(BinKey, Sts),
|
||||
case
|
||||
on_sql_query(InstId, PoolName, execute_batch, ?SYNC_QUERY_MODE, St, Datas2)
|
||||
on_sql_query(
|
||||
InstId, BinKey, PoolName, execute_batch, ?SYNC_QUERY_MODE, St, Datas2
|
||||
)
|
||||
of
|
||||
{ok, Results} ->
|
||||
handle_batch_result(Results, 0);
|
||||
|
@ -281,7 +283,13 @@ proc_sql_params(TypeOrKey, SQLOrData, Params, #{
|
|||
end
|
||||
end.
|
||||
|
||||
on_sql_query(InstId, PoolName, Type, ApplyMode, NameOrSQL, Data) ->
|
||||
on_sql_query(InstId, ChannelID, PoolName, Type, ApplyMode, NameOrSQL, Data) ->
|
||||
emqx_trace:rendered_action_template(ChannelID, #{
|
||||
type => Type,
|
||||
apply_mode => ApplyMode,
|
||||
name_or_sql => NameOrSQL,
|
||||
data => Data
|
||||
}),
|
||||
case ecpool:pick_and_do(PoolName, {?MODULE, Type, [NameOrSQL, Data]}, ApplyMode) of
|
||||
{error, Reason} = Result ->
|
||||
?tp(
|
||||
|
|
|
@ -19,4 +19,25 @@
|
|||
|
||||
-define(CONF_ROOT, plugins).
|
||||
|
||||
-define(PLUGIN_SERDE_TAB, emqx_plugins_schema_serde_tab).
|
||||
|
||||
-define(CONFIG_FORMAT_AVRO, config_format_avro).
|
||||
-define(CONFIG_FORMAT_MAP, config_format_map).
|
||||
|
||||
-type schema_name() :: binary().
|
||||
-type avsc() :: binary().
|
||||
|
||||
-type encoded_data() :: iodata().
|
||||
-type decoded_data() :: map().
|
||||
|
||||
-record(plugin_schema_serde, {
|
||||
name :: schema_name(),
|
||||
eval_context :: term(),
|
||||
%% TODO: fields to mark schema import status
|
||||
%% scheam_imported :: boolean(),
|
||||
%% for future use
|
||||
extra = []
|
||||
}).
|
||||
-type plugin_schema_serde() :: #plugin_schema_serde{}.
|
||||
|
||||
-endif.
|
||||
|
|
|
@ -1,5 +1,8 @@
|
|||
%% -*- mode: erlang -*-
|
||||
|
||||
{deps, [{emqx, {path, "../emqx"}}]}.
|
||||
{deps, [
|
||||
{emqx, {path, "../emqx"}},
|
||||
{erlavro, {git, "https://github.com/emqx/erlavro.git", {tag, "2.10.0"}}}
|
||||
]}.
|
||||
|
||||
{project_plugins, [erlfmt]}.
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
%% -*- mode: erlang -*-
|
||||
{application, emqx_plugins, [
|
||||
{description, "EMQX Plugin Management"},
|
||||
{vsn, "0.1.8"},
|
||||
{vsn, "0.2.0"},
|
||||
{modules, []},
|
||||
{mod, {emqx_plugins_app, []}},
|
||||
{applications, [kernel, stdlib, emqx]},
|
||||
{applications, [kernel, stdlib, emqx, erlavro]},
|
||||
{env, []}
|
||||
]}.
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,279 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2017-2024 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_plugins_serde).
|
||||
|
||||
-include("emqx_plugins.hrl").
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
|
||||
%% API
|
||||
-export([
|
||||
start_link/0,
|
||||
lookup_serde/1,
|
||||
add_schema/2,
|
||||
get_schema/1,
|
||||
delete_schema/1
|
||||
]).
|
||||
|
||||
%% `gen_server' API
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_continue/2,
|
||||
terminate/2
|
||||
]).
|
||||
|
||||
-export([
|
||||
decode/2,
|
||||
encode/2
|
||||
]).
|
||||
|
||||
%%-------------------------------------------------------------------------------------------------
|
||||
%% API
|
||||
%%-------------------------------------------------------------------------------------------------
|
||||
|
||||
start_link() ->
|
||||
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
|
||||
|
||||
-spec lookup_serde(schema_name()) -> {ok, plugin_schema_serde()} | {error, not_found}.
|
||||
lookup_serde(SchemaName) ->
|
||||
case ets:lookup(?PLUGIN_SERDE_TAB, to_bin(SchemaName)) of
|
||||
[] ->
|
||||
{error, not_found};
|
||||
[Serde] ->
|
||||
{ok, Serde}
|
||||
end.
|
||||
|
||||
-spec add_schema(schema_name(), avsc()) -> ok | {error, term()}.
|
||||
add_schema(NameVsn, Path) ->
|
||||
case lookup_serde(NameVsn) of
|
||||
{ok, _Serde} ->
|
||||
?SLOG(warning, #{msg => "plugin_schema_already_exists", plugin => NameVsn}),
|
||||
{error, already_exists};
|
||||
{error, not_found} ->
|
||||
case gen_server:call(?MODULE, {build_serdes, to_bin(NameVsn), Path}, infinity) of
|
||||
ok ->
|
||||
?SLOG(debug, #{msg => "plugin_schema_added", plugin => NameVsn}),
|
||||
ok;
|
||||
{error, Reason} = E ->
|
||||
?SLOG(error, #{
|
||||
msg => "plugin_schema_add_failed",
|
||||
plugin => NameVsn,
|
||||
reason => emqx_utils:readable_error_msg(Reason)
|
||||
}),
|
||||
E
|
||||
end
|
||||
end.
|
||||
|
||||
get_schema(NameVsn) ->
|
||||
Path = emqx_plugins:avsc_file_path(NameVsn),
|
||||
case read_avsc_file(Path) of
|
||||
{ok, Avsc} ->
|
||||
{ok, Avsc};
|
||||
{error, Reason} ->
|
||||
?SLOG(warning, Reason),
|
||||
{error, Reason}
|
||||
end.
|
||||
|
||||
-spec delete_schema(schema_name()) -> ok | {error, term()}.
|
||||
delete_schema(NameVsn) ->
|
||||
case lookup_serde(NameVsn) of
|
||||
{ok, _Serde} ->
|
||||
async_delete_serdes([NameVsn]),
|
||||
ok;
|
||||
{error, not_found} ->
|
||||
{error, not_found}
|
||||
end.
|
||||
|
||||
-spec decode(schema_name(), encoded_data()) -> {ok, decoded_data()} | {error, any()}.
|
||||
decode(SerdeName, RawData) ->
|
||||
with_serde(
|
||||
?FUNCTION_NAME,
|
||||
SerdeName,
|
||||
[RawData]
|
||||
).
|
||||
|
||||
-spec encode(schema_name(), decoded_data()) -> {ok, encoded_data()} | {error, any()}.
|
||||
encode(SerdeName, Data) ->
|
||||
with_serde(
|
||||
?FUNCTION_NAME,
|
||||
SerdeName,
|
||||
[Data]
|
||||
).
|
||||
|
||||
%%-------------------------------------------------------------------------------------------------
|
||||
%% `gen_server' API
|
||||
%%-------------------------------------------------------------------------------------------------
|
||||
|
||||
init(_) ->
|
||||
process_flag(trap_exit, true),
|
||||
ok = emqx_utils_ets:new(?PLUGIN_SERDE_TAB, [
|
||||
public, ordered_set, {keypos, #plugin_schema_serde.name}
|
||||
]),
|
||||
State = #{},
|
||||
AvscPaths = get_plugin_avscs(),
|
||||
{ok, State, {continue, {build_serdes, AvscPaths}}}.
|
||||
|
||||
handle_continue({build_serdes, AvscPaths}, State) ->
|
||||
_ = build_serdes(AvscPaths),
|
||||
{noreply, State}.
|
||||
|
||||
handle_call({build_serdes, NameVsn, AvscPath}, _From, State) ->
|
||||
BuildRes = do_build_serde({NameVsn, AvscPath}),
|
||||
{reply, BuildRes, State};
|
||||
handle_call(_Call, _From, State) ->
|
||||
{reply, {error, unknown_call}, State}.
|
||||
|
||||
handle_cast({delete_serdes, Names}, State) ->
|
||||
lists:foreach(fun ensure_serde_absent/1, Names),
|
||||
{noreply, State};
|
||||
handle_cast(_Cast, State) ->
|
||||
{noreply, State}.
|
||||
|
||||
terminate(_Reason, _State) ->
|
||||
ok.
|
||||
|
||||
%%-------------------------------------------------------------------------------------------------
|
||||
%% Internal fns
|
||||
%%-------------------------------------------------------------------------------------------------
|
||||
|
||||
-spec get_plugin_avscs() -> [{string(), string()}].
|
||||
get_plugin_avscs() ->
|
||||
Pattern = filename:join([emqx_plugins:install_dir(), "*", "config_schema.avsc"]),
|
||||
lists:foldl(
|
||||
fun(AvscPath, AccIn) ->
|
||||
[_, NameVsn | _] = lists:reverse(filename:split(AvscPath)),
|
||||
[{NameVsn, AvscPath} | AccIn]
|
||||
end,
|
||||
_Acc0 = [],
|
||||
filelib:wildcard(Pattern)
|
||||
).
|
||||
|
||||
build_serdes(AvscPaths) ->
|
||||
ok = lists:foreach(fun do_build_serde/1, AvscPaths).
|
||||
|
||||
do_build_serde({NameVsn, AvscPath}) ->
|
||||
try
|
||||
Serde = make_serde(NameVsn, AvscPath),
|
||||
true = ets:insert(?PLUGIN_SERDE_TAB, Serde),
|
||||
ok
|
||||
catch
|
||||
Kind:Error:Stacktrace ->
|
||||
?SLOG(
|
||||
error,
|
||||
#{
|
||||
msg => "error_building_plugin_schema_serde",
|
||||
name => NameVsn,
|
||||
kind => Kind,
|
||||
error => Error,
|
||||
stacktrace => Stacktrace
|
||||
}
|
||||
),
|
||||
{error, Error}
|
||||
end.
|
||||
|
||||
make_serde(NameVsn, AvscPath) ->
|
||||
{ok, AvscBin} = read_avsc_file(AvscPath),
|
||||
Store0 = avro_schema_store:new([map]),
|
||||
%% import the schema into the map store with an assigned name
|
||||
%% if it's a named schema (e.g. struct), then Name is added as alias
|
||||
Store = avro_schema_store:import_schema_json(NameVsn, AvscBin, Store0),
|
||||
#plugin_schema_serde{
|
||||
name = NameVsn,
|
||||
eval_context = Store
|
||||
}.
|
||||
|
||||
ensure_serde_absent(Name) when not is_binary(Name) ->
|
||||
ensure_serde_absent(to_bin(Name));
|
||||
ensure_serde_absent(Name) ->
|
||||
case lookup_serde(Name) of
|
||||
{ok, _Serde} ->
|
||||
_ = ets:delete(?PLUGIN_SERDE_TAB, Name),
|
||||
ok;
|
||||
{error, not_found} ->
|
||||
ok
|
||||
end.
|
||||
|
||||
async_delete_serdes(Names) ->
|
||||
gen_server:cast(?MODULE, {delete_serdes, Names}).
|
||||
|
||||
with_serde(Op, SerdeName, Args) ->
|
||||
WhichOp = which_op(Op),
|
||||
ErrMsg = error_msg(Op),
|
||||
try
|
||||
eval_serde(Op, ErrMsg, SerdeName, Args)
|
||||
catch
|
||||
throw:Reason ->
|
||||
?SLOG(error, Reason#{
|
||||
which_op => WhichOp,
|
||||
reason => emqx_utils:readable_error_msg(Reason)
|
||||
}),
|
||||
{error, Reason};
|
||||
error:Reason:Stacktrace ->
|
||||
%% unexpected errors, log stacktrace
|
||||
?SLOG(warning, #{
|
||||
msg => "plugin_schema_op_failed",
|
||||
which_op => WhichOp,
|
||||
exception => Reason,
|
||||
stacktrace => Stacktrace
|
||||
}),
|
||||
{error, #{
|
||||
which_op => WhichOp,
|
||||
reason => Reason
|
||||
}}
|
||||
end.
|
||||
|
||||
eval_serde(Op, ErrMsg, SerdeName, Args) ->
|
||||
case lookup_serde(SerdeName) of
|
||||
{ok, Serde} ->
|
||||
eval_serde(Op, Serde, Args);
|
||||
{error, not_found} ->
|
||||
throw(#{
|
||||
error_msg => ErrMsg,
|
||||
reason => plugin_serde_not_found,
|
||||
serde_name => SerdeName
|
||||
})
|
||||
end.
|
||||
|
||||
eval_serde(decode, #plugin_schema_serde{name = Name, eval_context = Store}, [Data]) ->
|
||||
Opts = avro:make_decoder_options([{map_type, map}, {record_type, map}, {encoding, avro_json}]),
|
||||
{ok, avro_json_decoder:decode_value(Data, Name, Store, Opts)};
|
||||
eval_serde(encode, #plugin_schema_serde{name = Name, eval_context = Store}, [Data]) ->
|
||||
{ok, avro_json_encoder:encode(Store, Name, Data)};
|
||||
eval_serde(_, _, _) ->
|
||||
throw(#{error_msg => "unexpected_plugin_avro_op"}).
|
||||
|
||||
which_op(Op) ->
|
||||
atom_to_list(Op) ++ "_avro_json".
|
||||
|
||||
error_msg(Op) ->
|
||||
atom_to_list(Op) ++ "_avro_data".
|
||||
|
||||
read_avsc_file(Path) ->
|
||||
case file:read_file(Path) of
|
||||
{ok, Bin} ->
|
||||
{ok, Bin};
|
||||
{error, _} ->
|
||||
{error, #{
|
||||
error => "failed_to_read_plugin_schema",
|
||||
path => Path
|
||||
}}
|
||||
end.
|
||||
|
||||
to_bin(A) when is_atom(A) -> atom_to_binary(A);
|
||||
to_bin(L) when is_list(L) -> iolist_to_binary(L);
|
||||
to_bin(B) when is_binary(B) -> B.
|
|
@ -32,4 +32,14 @@ init([]) ->
|
|||
intensity => 100,
|
||||
period => 10
|
||||
},
|
||||
{ok, {SupFlags, []}}.
|
||||
ChildSpecs = [child_spec(emqx_plugins_serde)],
|
||||
{ok, {SupFlags, ChildSpecs}}.
|
||||
|
||||
child_spec(Mod) ->
|
||||
#{
|
||||
id => Mod,
|
||||
start => {Mod, start_link, []},
|
||||
restart => permanent,
|
||||
shutdown => 5_000,
|
||||
type => worker
|
||||
}.
|
||||
|
|
|
@ -346,7 +346,7 @@ t_enable_disable(Config) ->
|
|||
?assertEqual([#{name_vsn => NameVsn, enable => true}], emqx_plugins:configured()),
|
||||
?assertMatch(
|
||||
{error, #{
|
||||
reason := "bad_plugin_config_status",
|
||||
error_msg := "bad_plugin_config_status",
|
||||
hint := "disable_the_plugin_first"
|
||||
}},
|
||||
emqx_plugins:ensure_uninstalled(NameVsn)
|
||||
|
@ -374,15 +374,15 @@ t_bad_tar_gz(Config) ->
|
|||
ok = file:write_file(FakeTarTz, "a\n"),
|
||||
?assertMatch(
|
||||
{error, #{
|
||||
reason := "bad_plugin_package",
|
||||
return := eof
|
||||
error_msg := "bad_plugin_package",
|
||||
reason := eof
|
||||
}},
|
||||
emqx_plugins:ensure_installed("fake-vsn")
|
||||
),
|
||||
?assertMatch(
|
||||
{error, #{
|
||||
reason := "failed_to_extract_plugin_package",
|
||||
return := not_found
|
||||
error_msg := "failed_to_extract_plugin_package",
|
||||
reason := not_found
|
||||
}},
|
||||
emqx_plugins:ensure_installed("nonexisting")
|
||||
),
|
||||
|
@ -412,7 +412,7 @@ t_bad_tar_gz2(Config) ->
|
|||
?assert(filelib:is_regular(TarGz)),
|
||||
%% failed to install, it also cleans up the bad content of .tar.gz file
|
||||
?assertMatch({error, _}, emqx_plugins:ensure_installed(NameVsn)),
|
||||
?assertEqual({error, enoent}, file:read_file_info(emqx_plugins:dir(NameVsn))),
|
||||
?assertEqual({error, enoent}, file:read_file_info(emqx_plugins:plugin_dir(NameVsn))),
|
||||
%% but the tar.gz file is still around
|
||||
?assert(filelib:is_regular(TarGz)),
|
||||
ok.
|
||||
|
@ -440,8 +440,8 @@ t_tar_vsn_content_mismatch(Config) ->
|
|||
%% failed to install, it also cleans up content of the bad .tar.gz file even
|
||||
%% if in other directory
|
||||
?assertMatch({error, _}, emqx_plugins:ensure_installed(NameVsn)),
|
||||
?assertEqual({error, enoent}, file:read_file_info(emqx_plugins:dir(NameVsn))),
|
||||
?assertEqual({error, enoent}, file:read_file_info(emqx_plugins:dir("foo-0.2"))),
|
||||
?assertEqual({error, enoent}, file:read_file_info(emqx_plugins:plugin_dir(NameVsn))),
|
||||
?assertEqual({error, enoent}, file:read_file_info(emqx_plugins:plugin_dir("foo-0.2"))),
|
||||
%% the tar.gz file is still around
|
||||
?assert(filelib:is_regular(TarGz)),
|
||||
ok.
|
||||
|
@ -455,15 +455,15 @@ t_bad_info_json(Config) ->
|
|||
ok = write_info_file(Config, NameVsn, "bad-syntax"),
|
||||
?assertMatch(
|
||||
{error, #{
|
||||
error := "bad_info_file",
|
||||
return := {parse_error, _}
|
||||
error_msg := "bad_info_file",
|
||||
reason := {parse_error, _}
|
||||
}},
|
||||
emqx_plugins:describe(NameVsn)
|
||||
),
|
||||
ok = write_info_file(Config, NameVsn, "{\"bad\": \"obj\"}"),
|
||||
?assertMatch(
|
||||
{error, #{
|
||||
error := "bad_info_file_content",
|
||||
error_msg := "bad_info_file_content",
|
||||
mandatory_fields := _
|
||||
}},
|
||||
emqx_plugins:describe(NameVsn)
|
||||
|
@ -499,7 +499,7 @@ t_elixir_plugin(Config) ->
|
|||
ok = emqx_plugins:ensure_installed(NameVsn),
|
||||
%% idempotent
|
||||
ok = emqx_plugins:ensure_installed(NameVsn),
|
||||
{ok, Info} = emqx_plugins:read_plugin(NameVsn, #{}),
|
||||
{ok, Info} = emqx_plugins:read_plugin_info(NameVsn, #{}),
|
||||
?assertEqual([Info], emqx_plugins:list()),
|
||||
%% start
|
||||
ok = emqx_plugins:ensure_started(NameVsn),
|
||||
|
@ -626,9 +626,9 @@ group_t_copy_plugin_to_a_new_node({init, Config}) ->
|
|||
}
|
||||
),
|
||||
[CopyFromNode] = emqx_cth_cluster:start([SpecCopyFrom#{join_to => undefined}]),
|
||||
ok = rpc:call(CopyFromNode, emqx_plugins, put_config, [install_dir, FromInstallDir]),
|
||||
ok = rpc:call(CopyFromNode, emqx_plugins, put_config_internal, [install_dir, FromInstallDir]),
|
||||
[CopyToNode] = emqx_cth_cluster:start([SpecCopyTo#{join_to => undefined}]),
|
||||
ok = rpc:call(CopyToNode, emqx_plugins, put_config, [install_dir, ToInstallDir]),
|
||||
ok = rpc:call(CopyToNode, emqx_plugins, put_config_internal, [install_dir, ToInstallDir]),
|
||||
NameVsn = filename:basename(Package, ?PACKAGE_SUFFIX),
|
||||
ok = rpc:call(CopyFromNode, emqx_plugins, ensure_installed, [NameVsn]),
|
||||
ok = rpc:call(CopyFromNode, emqx_plugins, ensure_started, [NameVsn]),
|
||||
|
@ -658,7 +658,7 @@ group_t_copy_plugin_to_a_new_node(Config) ->
|
|||
CopyFromNode = proplists:get_value(copy_from_node, Config),
|
||||
CopyToNode = proplists:get_value(copy_to_node, Config),
|
||||
CopyToDir = proplists:get_value(to_install_dir, Config),
|
||||
CopyFromPluginsState = rpc:call(CopyFromNode, emqx_plugins, get_config, [[states], []]),
|
||||
CopyFromPluginsState = rpc:call(CopyFromNode, emqx_plugins, get_config_interal, [[states], []]),
|
||||
NameVsn = proplists:get_value(name_vsn, Config),
|
||||
PluginName = proplists:get_value(plugin_name, Config),
|
||||
PluginApp = list_to_atom(PluginName),
|
||||
|
@ -681,7 +681,7 @@ group_t_copy_plugin_to_a_new_node(Config) ->
|
|||
),
|
||||
ok = rpc:call(CopyToNode, ekka, join, [CopyFromNode]),
|
||||
%% Mimic cluster-override conf copying
|
||||
ok = rpc:call(CopyToNode, emqx_plugins, put_config, [[states], CopyFromPluginsState]),
|
||||
ok = rpc:call(CopyToNode, emqx_plugins, put_config_internal, [[states], CopyFromPluginsState]),
|
||||
%% Plugin copying is triggered upon app restart on a new node.
|
||||
%% This is similar to emqx_conf, which copies cluster-override conf upon start,
|
||||
%% see: emqx_conf_app:init_conf/0
|
||||
|
@ -734,7 +734,7 @@ group_t_copy_plugin_to_a_new_node_single_node(Config) ->
|
|||
%% successfully even if it's not extracted yet. Simply starting
|
||||
%% the node would crash if not working properly.
|
||||
ct:pal("~p config:\n ~p", [
|
||||
CopyToNode, erpc:call(CopyToNode, emqx_plugins, get_config, [[], #{}])
|
||||
CopyToNode, erpc:call(CopyToNode, emqx_plugins, get_config_interal, [[], #{}])
|
||||
]),
|
||||
ct:pal("~p install_dir:\n ~p", [
|
||||
CopyToNode, erpc:call(CopyToNode, file, list_dir, [ToInstallDir])
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
|
||||
-module(emqx_plugins_tests).
|
||||
|
||||
-include("emqx_plugins.hrl").
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
-compile(nowarn_export_all).
|
||||
|
@ -28,20 +29,20 @@ ensure_configured_test_todo() ->
|
|||
after
|
||||
emqx_plugins:put_configured([])
|
||||
end,
|
||||
meck:unload(emqx).
|
||||
unmeck_emqx().
|
||||
|
||||
test_ensure_configured() ->
|
||||
ok = emqx_plugins:put_configured([]),
|
||||
P1 = #{name_vsn => "p-1", enable => true},
|
||||
P2 = #{name_vsn => "p-2", enable => true},
|
||||
P3 = #{name_vsn => "p-3", enable => false},
|
||||
emqx_plugins:ensure_configured(P1, front),
|
||||
emqx_plugins:ensure_configured(P2, {before, <<"p-1">>}),
|
||||
emqx_plugins:ensure_configured(P3, {before, <<"p-1">>}),
|
||||
emqx_plugins:ensure_configured(P1, front, local),
|
||||
emqx_plugins:ensure_configured(P2, {before, <<"p-1">>}, local),
|
||||
emqx_plugins:ensure_configured(P3, {before, <<"p-1">>}, local),
|
||||
?assertEqual([P2, P3, P1], emqx_plugins:configured()),
|
||||
?assertThrow(
|
||||
#{error := "position_anchor_plugin_not_configured"},
|
||||
emqx_plugins:ensure_configured(P3, {before, <<"unknown-x">>})
|
||||
emqx_plugins:ensure_configured(P3, {before, <<"unknown-x">>}, local)
|
||||
).
|
||||
|
||||
read_plugin_test() ->
|
||||
|
@ -49,34 +50,34 @@ read_plugin_test() ->
|
|||
with_rand_install_dir(
|
||||
fun(_Dir) ->
|
||||
NameVsn = "bar-5",
|
||||
InfoFile = emqx_plugins:info_file(NameVsn),
|
||||
InfoFile = emqx_plugins:info_file_path(NameVsn),
|
||||
FakeInfo =
|
||||
"name=bar, rel_vsn=\"5\", rel_apps=[justname_no_vsn],"
|
||||
"description=\"desc bar\"",
|
||||
try
|
||||
ok = write_file(InfoFile, FakeInfo),
|
||||
?assertMatch(
|
||||
{error, #{error := "bad_rel_apps"}},
|
||||
emqx_plugins:read_plugin(NameVsn, #{})
|
||||
{error, #{error_msg := "bad_rel_apps"}},
|
||||
emqx_plugins:read_plugin_info(NameVsn, #{})
|
||||
)
|
||||
after
|
||||
emqx_plugins:purge(NameVsn)
|
||||
end
|
||||
end
|
||||
),
|
||||
meck:unload(emqx).
|
||||
unmeck_emqx().
|
||||
|
||||
with_rand_install_dir(F) ->
|
||||
N = rand:uniform(10000000),
|
||||
TmpDir = integer_to_list(N),
|
||||
OriginalInstallDir = emqx_plugins:install_dir(),
|
||||
ok = filelib:ensure_dir(filename:join([TmpDir, "foo"])),
|
||||
ok = emqx_plugins:put_config(install_dir, TmpDir),
|
||||
ok = emqx_plugins:put_config_internal(install_dir, TmpDir),
|
||||
try
|
||||
F(TmpDir)
|
||||
after
|
||||
file:del_dir_r(TmpDir),
|
||||
ok = emqx_plugins:put_config(install_dir, OriginalInstallDir)
|
||||
ok = emqx_plugins:put_config_internal(install_dir, OriginalInstallDir)
|
||||
end.
|
||||
|
||||
write_file(Path, Content) ->
|
||||
|
@ -90,7 +91,7 @@ delete_package_test() ->
|
|||
meck_emqx(),
|
||||
with_rand_install_dir(
|
||||
fun(_Dir) ->
|
||||
File = emqx_plugins:pkg_file("a-1"),
|
||||
File = emqx_plugins:pkg_file_path("a-1"),
|
||||
ok = write_file(File, "a"),
|
||||
ok = emqx_plugins:delete_package("a-1"),
|
||||
%% delete again should be ok
|
||||
|
@ -100,7 +101,7 @@ delete_package_test() ->
|
|||
?assertMatch({error, _}, emqx_plugins:delete_package("a-1"))
|
||||
end
|
||||
),
|
||||
meck:unload(emqx).
|
||||
unmeck_emqx().
|
||||
|
||||
%% purge plugin's install dir should mostly work and return ok
|
||||
%% but it may fail in case the dir is read-only
|
||||
|
@ -108,8 +109,8 @@ purge_test() ->
|
|||
meck_emqx(),
|
||||
with_rand_install_dir(
|
||||
fun(_Dir) ->
|
||||
File = emqx_plugins:info_file("a-1"),
|
||||
Dir = emqx_plugins:dir("a-1"),
|
||||
File = emqx_plugins:info_file_path("a-1"),
|
||||
Dir = emqx_plugins:plugin_dir("a-1"),
|
||||
ok = filelib:ensure_dir(File),
|
||||
?assertMatch({ok, _}, file:read_file_info(Dir)),
|
||||
?assertEqual(ok, emqx_plugins:purge("a-1")),
|
||||
|
@ -120,10 +121,11 @@ purge_test() ->
|
|||
?assertEqual(ok, emqx_plugins:purge("a-1"))
|
||||
end
|
||||
),
|
||||
meck:unload(emqx).
|
||||
unmeck_emqx().
|
||||
|
||||
meck_emqx() ->
|
||||
meck:new(emqx, [unstick, passthrough]),
|
||||
meck:new(emqx_plugins_serde),
|
||||
meck:expect(
|
||||
emqx,
|
||||
update_config,
|
||||
|
@ -131,4 +133,14 @@ meck_emqx() ->
|
|||
emqx_config:put(Path, Values)
|
||||
end
|
||||
),
|
||||
meck:expect(
|
||||
emqx_plugins_serde,
|
||||
delete_schema,
|
||||
fun(_NameVsn) -> ok end
|
||||
),
|
||||
ok.
|
||||
|
||||
unmeck_emqx() ->
|
||||
meck:unload(emqx),
|
||||
meck:unload(emqx_plugins_serde),
|
||||
ok.
|
||||
|
|
|
@ -304,7 +304,7 @@ on_query(
|
|||
}),
|
||||
Type = pgsql_query_type(TypeOrKey),
|
||||
{NameOrSQL2, Data} = proc_sql_params(TypeOrKey, NameOrSQL, Params, State),
|
||||
Res = on_sql_query(InstId, PoolName, Type, NameOrSQL2, Data),
|
||||
Res = on_sql_query(TypeOrKey, InstId, PoolName, Type, NameOrSQL2, Data),
|
||||
?tp(postgres_bridge_connector_on_query_return, #{instance_id => InstId, result => Res}),
|
||||
handle_result(Res).
|
||||
|
||||
|
@ -337,7 +337,7 @@ on_batch_query(
|
|||
{_Statement, RowTemplate} ->
|
||||
PrepStatement = get_prepared_statement(BinKey, State),
|
||||
Rows = [render_prepare_sql_row(RowTemplate, Data) || {_Key, Data} <- BatchReq],
|
||||
case on_sql_query(InstId, PoolName, execute_batch, PrepStatement, Rows) of
|
||||
case on_sql_query(Key, InstId, PoolName, execute_batch, PrepStatement, Rows) of
|
||||
{error, _Error} = Result ->
|
||||
handle_result(Result);
|
||||
{_Column, Results} ->
|
||||
|
@ -386,7 +386,15 @@ get_prepared_statement(Key, #{prepares := PrepStatements}) ->
|
|||
BinKey = to_bin(Key),
|
||||
maps:get(BinKey, PrepStatements).
|
||||
|
||||
on_sql_query(InstId, PoolName, Type, NameOrSQL, Data) ->
|
||||
on_sql_query(Key, InstId, PoolName, Type, NameOrSQL, Data) ->
|
||||
emqx_trace:rendered_action_template(
|
||||
Key,
|
||||
#{
|
||||
statement_type => Type,
|
||||
statement_or_name => NameOrSQL,
|
||||
data => Data
|
||||
}
|
||||
),
|
||||
try ecpool:pick_and_do(PoolName, {?MODULE, Type, [NameOrSQL, Data]}, no_handover) of
|
||||
{error, Reason} ->
|
||||
?tp(
|
||||
|
|
|
@ -81,15 +81,15 @@
|
|||
hc_workers = #{
|
||||
resource => #{},
|
||||
channel => #{
|
||||
pending => [],
|
||||
previous_status => #{}
|
||||
ongoing => #{},
|
||||
pending => []
|
||||
}
|
||||
} :: #{
|
||||
resource := #{{pid(), reference()} => true},
|
||||
channel := #{
|
||||
{pid(), reference()} => channel_id(),
|
||||
pending := [channel_id()],
|
||||
previous_status := #{channel_id() => channel_status_map()}
|
||||
ongoing := #{channel_id() => channel_status_map()},
|
||||
pending := [channel_id()]
|
||||
}
|
||||
},
|
||||
%% Callers waiting on health check
|
||||
|
@ -446,6 +446,7 @@ init({DataIn, Opts}) ->
|
|||
terminate({shutdown, removed}, _State, _Data) ->
|
||||
ok;
|
||||
terminate(_Reason, _State, Data) ->
|
||||
ok = terminate_health_check_workers(Data),
|
||||
_ = maybe_stop_resource(Data),
|
||||
_ = erase_cache(Data),
|
||||
ok.
|
||||
|
@ -555,22 +556,22 @@ handle_event(
|
|||
{keep_state_and_data, {reply, From, {ok, Channels}}};
|
||||
handle_event(
|
||||
info,
|
||||
{'DOWN', Ref, process, Pid, Res},
|
||||
{'EXIT', Pid, Res},
|
||||
State0,
|
||||
Data0 = #data{hc_workers = #{resource := RHCWorkers}}
|
||||
) when
|
||||
is_map_key({Pid, Ref}, RHCWorkers)
|
||||
is_map_key(Pid, RHCWorkers)
|
||||
->
|
||||
handle_resource_health_check_worker_down(State0, Data0, {Pid, Ref}, Res);
|
||||
handle_resource_health_check_worker_down(State0, Data0, Pid, Res);
|
||||
handle_event(
|
||||
info,
|
||||
{'DOWN', Ref, process, Pid, Res},
|
||||
{'EXIT', Pid, Res},
|
||||
_State,
|
||||
Data0 = #data{hc_workers = #{channel := CHCWorkers}}
|
||||
) when
|
||||
is_map_key({Pid, Ref}, CHCWorkers)
|
||||
is_map_key(Pid, CHCWorkers)
|
||||
->
|
||||
handle_channel_health_check_worker_down(Data0, {Pid, Ref}, Res);
|
||||
handle_channel_health_check_worker_down(Data0, Pid, Res);
|
||||
% Ignore all other events
|
||||
handle_event(EventType, EventData, State, Data) ->
|
||||
?SLOG(
|
||||
|
@ -634,6 +635,7 @@ health_check_actions(Data) ->
|
|||
handle_remove_event(From, ClearMetrics, Data) ->
|
||||
%% stop the buffer workers first, brutal_kill, so it should be fast
|
||||
ok = emqx_resource_buffer_worker_sup:stop_workers(Data#data.id, Data#data.opts),
|
||||
ok = terminate_health_check_workers(Data),
|
||||
%% now stop the resource, this can be slow
|
||||
_ = stop_resource(Data),
|
||||
case ClearMetrics of
|
||||
|
@ -793,6 +795,35 @@ safe_call_remove_channel(_ResId, _Mod, undefined = State, _ChannelID) ->
|
|||
safe_call_remove_channel(ResId, Mod, State, ChannelID) ->
|
||||
emqx_resource:call_remove_channel(ResId, Mod, State, ChannelID).
|
||||
|
||||
%% For cases where we need to terminate and there are running health checks.
|
||||
terminate_health_check_workers(Data) ->
|
||||
#data{
|
||||
hc_workers = #{resource := RHCWorkers, channel := CHCWorkers},
|
||||
hc_pending_callers = #{resource := RPending, channel := CPending}
|
||||
} = Data,
|
||||
maps:foreach(
|
||||
fun(Pid, _) ->
|
||||
exit(Pid, kill)
|
||||
end,
|
||||
RHCWorkers
|
||||
),
|
||||
maps:foreach(
|
||||
fun
|
||||
(Pid, _) when is_pid(Pid) ->
|
||||
exit(Pid, kill);
|
||||
(_, _) ->
|
||||
ok
|
||||
end,
|
||||
CHCWorkers
|
||||
),
|
||||
Pending = lists:flatten([RPending, maps:values(CPending)]),
|
||||
lists:foreach(
|
||||
fun(From) ->
|
||||
gen_statem:reply(From, {error, resource_shutting_down})
|
||||
end,
|
||||
Pending
|
||||
).
|
||||
|
||||
make_test_id() ->
|
||||
RandId = iolist_to_binary(emqx_utils:gen_id(16)),
|
||||
<<?TEST_ID_PREFIX, RandId/binary>>.
|
||||
|
@ -916,14 +947,14 @@ start_resource_health_check(#data{hc_workers = #{resource := HCWorkers}}) when
|
|||
keep_state_and_data;
|
||||
start_resource_health_check(#data{} = Data0) ->
|
||||
#data{hc_workers = HCWorkers0 = #{resource := RHCWorkers0}} = Data0,
|
||||
WorkerRef = {_Pid, _Ref} = spawn_resource_health_check_worker(Data0),
|
||||
HCWorkers = HCWorkers0#{resource := RHCWorkers0#{WorkerRef => true}},
|
||||
WorkerPid = spawn_resource_health_check_worker(Data0),
|
||||
HCWorkers = HCWorkers0#{resource := RHCWorkers0#{WorkerPid => true}},
|
||||
Data = Data0#data{hc_workers = HCWorkers},
|
||||
{keep_state, Data}.
|
||||
|
||||
-spec spawn_resource_health_check_worker(data()) -> {pid(), reference()}.
|
||||
-spec spawn_resource_health_check_worker(data()) -> pid().
|
||||
spawn_resource_health_check_worker(#data{} = Data) ->
|
||||
spawn_monitor(?MODULE, worker_resource_health_check, [Data]).
|
||||
spawn_link(?MODULE, worker_resource_health_check, [Data]).
|
||||
|
||||
%% separated so it can be spec'ed and placate dialyzer tantrums...
|
||||
-spec worker_resource_health_check(data()) -> no_return().
|
||||
|
@ -1008,12 +1039,12 @@ handle_manual_channel_health_check(
|
|||
#data{
|
||||
added_channels = Channels,
|
||||
hc_pending_callers = #{channel := CPending0} = Pending0,
|
||||
hc_workers = #{channel := #{previous_status := PreviousStatus}}
|
||||
hc_workers = #{channel := #{ongoing := Ongoing}}
|
||||
} = Data0,
|
||||
ChannelId
|
||||
) when
|
||||
is_map_key(ChannelId, Channels),
|
||||
is_map_key(ChannelId, PreviousStatus)
|
||||
is_map_key(ChannelId, Ongoing)
|
||||
->
|
||||
%% Ongoing health check.
|
||||
CPending = maps:update_with(
|
||||
|
@ -1158,65 +1189,66 @@ resource_not_connected_channel_error_msg(ResourceStatus, ChannelId, Data1) ->
|
|||
%% `?status_connected'.
|
||||
-spec trigger_health_check_for_added_channels(data()) -> data().
|
||||
trigger_health_check_for_added_channels(Data0 = #data{hc_workers = HCWorkers0}) ->
|
||||
#{channel := CHCWorkers0} = HCWorkers0,
|
||||
PreviousStatus = maps:from_list([
|
||||
{ChannelId, OldStatus}
|
||||
|| {ChannelId, OldStatus} <- maps:to_list(Data0#data.added_channels),
|
||||
#{
|
||||
channel := CHCWorkers0 =
|
||||
#{
|
||||
pending := CPending0,
|
||||
ongoing := Ongoing0
|
||||
}
|
||||
} = HCWorkers0,
|
||||
NewOngoing = maps:filter(
|
||||
fun(ChannelId, OldStatus) ->
|
||||
not is_map_key(ChannelId, Ongoing0) and
|
||||
channel_status_is_channel_added(OldStatus)
|
||||
]),
|
||||
ChannelsToCheck = maps:keys(PreviousStatus),
|
||||
end,
|
||||
Data0#data.added_channels
|
||||
),
|
||||
ChannelsToCheck = maps:keys(NewOngoing),
|
||||
case ChannelsToCheck of
|
||||
[] ->
|
||||
%% Nothing to do.
|
||||
Data0;
|
||||
[ChannelId | Rest] ->
|
||||
%% Shooting one check at a time. We could increase concurrency in the future.
|
||||
CHCWorkers = CHCWorkers0#{pending := Rest, previous_status := PreviousStatus},
|
||||
CHCWorkers = CHCWorkers0#{
|
||||
pending := CPending0 ++ Rest,
|
||||
ongoing := maps:merge(Ongoing0, NewOngoing)
|
||||
},
|
||||
Data1 = Data0#data{hc_workers = HCWorkers0#{channel := CHCWorkers}},
|
||||
start_channel_health_check(Data1, ChannelId)
|
||||
end.
|
||||
|
||||
-spec continue_channel_health_check_connected(data()) -> data().
|
||||
continue_channel_health_check_connected(Data0) ->
|
||||
-spec continue_channel_health_check_connected(channel_id(), channel_status_map(), data()) -> data().
|
||||
continue_channel_health_check_connected(ChannelId, OldStatus, Data0) ->
|
||||
#data{hc_workers = HCWorkers0} = Data0,
|
||||
#{channel := #{previous_status := PreviousStatus} = CHCWorkers0} = HCWorkers0,
|
||||
CHCWorkers = CHCWorkers0#{previous_status := #{}},
|
||||
#{channel := CHCWorkers0} = HCWorkers0,
|
||||
CHCWorkers = emqx_utils_maps:deep_remove([ongoing, ChannelId], CHCWorkers0),
|
||||
Data1 = Data0#data{hc_workers = HCWorkers0#{channel := CHCWorkers}},
|
||||
%% Remove the added channels with a a status different from connected or connecting
|
||||
CheckedChannels = [
|
||||
{ChannelId, NewStatus}
|
||||
|| {ChannelId, NewStatus} <- maps:to_list(Data0#data.added_channels),
|
||||
is_map_key(ChannelId, PreviousStatus)
|
||||
],
|
||||
ChannelsToRemove = [
|
||||
ChannelId
|
||||
|| {ChannelId, NewStatus} <- CheckedChannels,
|
||||
not channel_status_is_channel_added(NewStatus)
|
||||
],
|
||||
NewStatus = maps:get(ChannelId, Data0#data.added_channels),
|
||||
ChannelsToRemove = [ChannelId || not channel_status_is_channel_added(NewStatus)],
|
||||
Data = remove_channels_in_list(ChannelsToRemove, Data1, true),
|
||||
%% Raise/clear alarms
|
||||
lists:foreach(
|
||||
fun
|
||||
({ID, #{status := ?status_connected}}) ->
|
||||
_ = maybe_clear_alarm(ID);
|
||||
({ID, NewStatus}) ->
|
||||
OldStatus = maps:get(ID, PreviousStatus),
|
||||
_ = maybe_alarm(NewStatus, ID, NewStatus, OldStatus)
|
||||
case NewStatus of
|
||||
#{status := ?status_connected} ->
|
||||
_ = maybe_clear_alarm(ChannelId),
|
||||
ok;
|
||||
_ ->
|
||||
_ = maybe_alarm(NewStatus, ChannelId, NewStatus, OldStatus),
|
||||
ok
|
||||
end,
|
||||
CheckedChannels
|
||||
),
|
||||
Data.
|
||||
|
||||
-spec start_channel_health_check(data(), channel_id()) -> data().
|
||||
start_channel_health_check(#data{} = Data0, ChannelId) ->
|
||||
#data{hc_workers = HCWorkers0 = #{channel := CHCWorkers0}} = Data0,
|
||||
WorkerRef = {_Pid, _Ref} = spawn_channel_health_check_worker(Data0, ChannelId),
|
||||
HCWorkers = HCWorkers0#{channel := CHCWorkers0#{WorkerRef => ChannelId}},
|
||||
WorkerPid = spawn_channel_health_check_worker(Data0, ChannelId),
|
||||
HCWorkers = HCWorkers0#{channel := CHCWorkers0#{WorkerPid => ChannelId}},
|
||||
Data0#data{hc_workers = HCWorkers}.
|
||||
|
||||
-spec spawn_channel_health_check_worker(data(), channel_id()) -> {pid(), reference()}.
|
||||
-spec spawn_channel_health_check_worker(data(), channel_id()) -> pid().
|
||||
spawn_channel_health_check_worker(#data{} = Data, ChannelId) ->
|
||||
spawn_monitor(?MODULE, worker_channel_health_check, [Data, ChannelId]).
|
||||
spawn_link(?MODULE, worker_channel_health_check, [Data, ChannelId]).
|
||||
|
||||
%% separated so it can be spec'ed and placate dialyzer tantrums...
|
||||
-spec worker_channel_health_check(data(), channel_id()) -> no_return().
|
||||
|
@ -1240,19 +1272,24 @@ handle_channel_health_check_worker_down(Data0, WorkerRef, ExitResult) ->
|
|||
%% `emqx_resource:call_channel_health_check' catches all exceptions.
|
||||
AddedChannels = maps:put(ChannelId, NewStatus, AddedChannels0)
|
||||
end,
|
||||
#{ongoing := Ongoing0} = CHCWorkers1,
|
||||
{PreviousChanStatus, Ongoing1} = maps:take(ChannelId, Ongoing0),
|
||||
CHCWorkers2 = CHCWorkers1#{ongoing := Ongoing1},
|
||||
CHCWorkers3 = emqx_utils_maps:deep_remove([ongoing, ChannelId], CHCWorkers2),
|
||||
Data1 = Data0#data{added_channels = AddedChannels},
|
||||
{Replies, Data2} = reply_pending_channel_health_check_callers(ChannelId, NewStatus, Data1),
|
||||
case CHCWorkers1 of
|
||||
#{pending := [NextChannelId | Rest]} ->
|
||||
CHCWorkers = CHCWorkers1#{pending := Rest},
|
||||
CHCWorkers = CHCWorkers3#{pending := Rest},
|
||||
HCWorkers = HCWorkers0#{channel := CHCWorkers},
|
||||
Data3 = Data2#data{hc_workers = HCWorkers},
|
||||
Data = start_channel_health_check(Data3, NextChannelId),
|
||||
Data4 = continue_channel_health_check_connected(ChannelId, PreviousChanStatus, Data3),
|
||||
Data = start_channel_health_check(Data4, NextChannelId),
|
||||
{keep_state, update_state(Data, Data0), Replies};
|
||||
#{pending := []} ->
|
||||
HCWorkers = HCWorkers0#{channel := CHCWorkers1},
|
||||
HCWorkers = HCWorkers0#{channel := CHCWorkers3},
|
||||
Data3 = Data2#data{hc_workers = HCWorkers},
|
||||
Data = continue_channel_health_check_connected(Data3),
|
||||
Data = continue_channel_health_check_connected(ChannelId, PreviousChanStatus, Data3),
|
||||
{keep_state, update_state(Data, Data0), Replies}
|
||||
end.
|
||||
|
||||
|
@ -1308,7 +1345,7 @@ remove_runtime_data(#data{} = Data0) ->
|
|||
Data0#data{
|
||||
hc_workers = #{
|
||||
resource => #{},
|
||||
channel => #{pending => [], previous_status => #{}}
|
||||
channel => #{pending => [], ongoing => #{}}
|
||||
},
|
||||
hc_pending_callers = #{
|
||||
resource => [],
|
||||
|
|
|
@ -132,8 +132,6 @@
|
|||
|
||||
%% String Funcs
|
||||
-export([
|
||||
coalesce/1,
|
||||
coalesce/2,
|
||||
lower/1,
|
||||
ltrim/1,
|
||||
reverse/1,
|
||||
|
@ -759,10 +757,6 @@ is_array(_) -> false.
|
|||
%% String Funcs
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
coalesce(List) -> emqx_variform_bif:coalesce(List).
|
||||
|
||||
coalesce(A, B) -> emqx_variform_bif:coalesce(A, B).
|
||||
|
||||
lower(S) -> emqx_variform_bif:lower(S).
|
||||
|
||||
ltrim(S) -> emqx_variform_bif:ltrim(S).
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
-include("rule_engine.hrl").
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
-include_lib("emqx/include/emqx_trace.hrl").
|
||||
-include_lib("emqx_resource/include/emqx_resource_errors.hrl").
|
||||
|
||||
-export([
|
||||
|
@ -141,21 +142,23 @@ apply_rule(Rule = #{id := RuleID}, Columns, Envs) ->
|
|||
|
||||
set_process_trace_metadata(RuleID, #{clientid := ClientID} = Columns) ->
|
||||
logger:update_process_metadata(#{
|
||||
clientid => ClientID
|
||||
}),
|
||||
set_process_trace_metadata(RuleID, maps:remove(clientid, Columns));
|
||||
clientid => ClientID,
|
||||
rule_id => RuleID,
|
||||
rule_trigger_time => rule_trigger_time(Columns)
|
||||
});
|
||||
set_process_trace_metadata(RuleID, Columns) ->
|
||||
EventTimestamp =
|
||||
logger:update_process_metadata(#{
|
||||
rule_id => RuleID,
|
||||
rule_trigger_time => rule_trigger_time(Columns)
|
||||
}).
|
||||
|
||||
rule_trigger_time(Columns) ->
|
||||
case Columns of
|
||||
#{timestamp := Timestamp} ->
|
||||
Timestamp;
|
||||
_ ->
|
||||
erlang:system_time(millisecond)
|
||||
end,
|
||||
logger:update_process_metadata(#{
|
||||
rule_id => RuleID,
|
||||
rule_trigger_time => EventTimestamp
|
||||
}).
|
||||
end.
|
||||
|
||||
reset_process_trace_metadata(#{clientid := _ClientID}) ->
|
||||
Meta = logger:get_process_metadata(),
|
||||
|
@ -722,7 +725,7 @@ inc_action_metrics(TraceCtx, Result) ->
|
|||
|
||||
do_inc_action_metrics(
|
||||
#{rule_id := RuleId, action_id := ActId} = TraceContext,
|
||||
{error, {unrecoverable_error, {action_stopped_after_template_rendering, Explanation}} = _Reason}
|
||||
{error, ?EMQX_TRACE_STOP_ACTION(Explanation) = _Reason}
|
||||
) ->
|
||||
TraceContext1 = maps:remove(action_id, TraceContext),
|
||||
trace_action(
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
|
||||
-include_lib("emqx/include/types.hrl").
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
-include_lib("snabbkaffe/include/trace.hrl").
|
||||
-include_lib("erlcloud/include/erlcloud_aws.hrl").
|
||||
|
||||
-export([
|
||||
|
@ -133,7 +134,13 @@ start_multipart(
|
|||
Headers = join_headers(BaseHeaders, maps:get(headers, UploadOpts, undefined)),
|
||||
case erlcloud_s3:start_multipart(Bucket, ECKey, ECOpts, Headers, AwsConfig) of
|
||||
{ok, Props} ->
|
||||
{ok, response_property('uploadId', Props)};
|
||||
UploadId = response_property('uploadId', Props),
|
||||
?tp(s3_client_multipart_started, #{
|
||||
bucket => Bucket,
|
||||
key => Key,
|
||||
upload_id => UploadId
|
||||
}),
|
||||
{ok, UploadId};
|
||||
{error, Reason} ->
|
||||
?SLOG(debug, #{msg => "start_multipart_fail", key => Key, reason => Reason}),
|
||||
{error, Reason}
|
||||
|
@ -177,6 +184,11 @@ complete_multipart(
|
|||
)
|
||||
of
|
||||
ok ->
|
||||
?tp(s3_client_multipart_completed, #{
|
||||
bucket => Bucket,
|
||||
key => Key,
|
||||
upload_id => UploadId
|
||||
}),
|
||||
ok;
|
||||
{error, Reason} ->
|
||||
?SLOG(debug, #{msg => "complete_multipart_fail", key => Key, reason => Reason}),
|
||||
|
@ -193,6 +205,11 @@ abort_multipart(
|
|||
) ->
|
||||
case erlcloud_s3:abort_multipart(Bucket, erlcloud_key(Key), UploadId, [], Headers, AwsConfig) of
|
||||
ok ->
|
||||
?tp(s3_client_multipart_aborted, #{
|
||||
bucket => Bucket,
|
||||
key => Key,
|
||||
upload_id => UploadId
|
||||
}),
|
||||
ok;
|
||||
{error, Reason} ->
|
||||
?SLOG(debug, #{msg => "abort_multipart_fail", key => Key, reason => Reason}),
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue