Merge pull request #12023 from emqx/master

chore: sync master to release-54
This commit is contained in:
zhongwencool 2023-11-27 09:31:47 +08:00 committed by GitHub
commit b5a00ec6b2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
178 changed files with 7786 additions and 1836 deletions

View File

@ -61,10 +61,6 @@ body:
# paste output here
$ uname -a
# paste output here
# On Windows:
C:\> wmic os get Caption, Version, BuildNumber, OSArchitecture
# paste output here
```
</details>

View File

@ -1,9 +1,6 @@
Fixes <issue-or-jira-number>
<!-- Make sure to target release-52 branch if this PR is intended to fix the issues for the release candidate. -->
## Summary
copilot:summary
<!-- Make sure to target release-5[0-9] branch if this PR is intended to fix the issues for the release candidate. -->
## PR Checklist
Please convert it to a draft if any of the following conditions are not met. Reviewers may skip over until all the items are checked:

View File

@ -65,58 +65,6 @@ on:
default: '5.2-3'
jobs:
windows:
runs-on: windows-2019
if: inputs.profile == 'emqx'
strategy:
fail-fast: false
matrix:
profile: # for now only CE for windows
- emqx
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.event.inputs.ref }}
fetch-depth: 0
- uses: ilammy/msvc-dev-cmd@v1.12.0
- uses: erlef/setup-beam@v1.16.0
with:
otp-version: 25.3.2
- name: build
env:
PYTHON: python
DIAGNOSTIC: 1
run: |
# ensure crypto app (openssl)
erl -eval "erlang:display(crypto:info_lib())" -s init stop
make ${{ matrix.profile }}-tgz
- name: run emqx
timeout-minutes: 5
run: |
$ErrorActionPreference = "Stop"
./_build/${{ matrix.profile }}/rel/emqx/bin/emqx start
Start-Sleep -s 10
$pingOutput = ./_build/${{ matrix.profile }}/rel/emqx/bin/emqx ping
if ($pingOutput = 'pong') {
echo "EMQX started OK"
} else {
echo "Failed to ping EMQX $pingOutput"
Exit 1
}
./_build/${{ matrix.profile }}/rel/emqx/bin/emqx stop
echo "EMQX stopped"
./_build/${{ matrix.profile }}/rel/emqx/bin/emqx install
echo "EMQX installed"
./_build/${{ matrix.profile }}/rel/emqx/bin/emqx uninstall
echo "EMQX uninstalled"
- uses: actions/upload-artifact@v3
if: success()
with:
name: ${{ matrix.profile }}
path: _packages/${{ matrix.profile }}/
retention-days: 7
mac:
strategy:
fail-fast: false
@ -126,9 +74,9 @@ jobs:
otp:
- ${{ inputs.otp_vsn }}
os:
- macos-11
- macos-12
- macos-12-arm64
- macos-13
runs-on: ${{ matrix.os }}
steps:
- uses: emqx/self-hosted-cleanup-action@v1.0.3

View File

@ -130,59 +130,3 @@ jobs:
with:
payload: |
{"text": "Scheduled build of ${{ matrix.profile }} package for ${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}
windows:
if: github.repository_owner == 'emqx'
runs-on: windows-2019
strategy:
fail-fast: false
matrix:
profile:
- emqx
otp:
- 25.3.2
steps:
- uses: actions/checkout@v3
- uses: ilammy/msvc-dev-cmd@v1.12.0
- uses: erlef/setup-beam@v1.16.0
with:
otp-version: ${{ matrix.otp }}
- name: build
env:
PYTHON: python
DIAGNOSTIC: 1
run: |
# ensure crypto app (openssl)
erl -eval "erlang:display(crypto:info_lib())" -s init stop
make ${{ matrix.profile }}-tgz
- name: run emqx
timeout-minutes: 5
run: |
./_build/${{ matrix.profile }}/rel/emqx/bin/emqx start
Start-Sleep -s 10
$pingOutput = ./_build/${{ matrix.profile }}/rel/emqx/bin/emqx ping
if ($pingOutput = 'pong') {
echo "EMQX started OK"
} else {
echo "Failed to ping EMQX $pingOutput"
Exit 1
}
./_build/${{ matrix.profile }}/rel/emqx/bin/emqx stop
echo "EMQX stopped"
./_build/${{ matrix.profile }}/rel/emqx/bin/emqx install
echo "EMQX installed"
./_build/${{ matrix.profile }}/rel/emqx/bin/emqx uninstall
echo "EMQX uninstalled"
- uses: actions/upload-artifact@v3
with:
name: windows
path: _packages/${{ matrix.profile }}/*
retention-days: 7
- name: Send notification to Slack
uses: slackapi/slack-github-action@v1.23.0
if: failure()
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
with:
payload: |
{"text": "Scheduled build of ${{ matrix.profile }} package for Windows failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"}

View File

@ -21,7 +21,7 @@ endif
# Dashboard version
# from https://github.com/emqx/emqx-dashboard5
export EMQX_DASHBOARD_VERSION ?= v1.5.1
export EMQX_EE_DASHBOARD_VERSION ?= e1.3.1
export EMQX_EE_DASHBOARD_VERSION ?= e1.3.2-beta.1
PROFILE ?= emqx
REL_PROFILES := emqx emqx-enterprise

View File

@ -32,10 +32,10 @@
%% `apps/emqx/src/bpapi/README.md'
%% Opensource edition
-define(EMQX_RELEASE_CE, "5.3.1").
-define(EMQX_RELEASE_CE, "5.3.2").
%% Enterprise edition
-define(EMQX_RELEASE_EE, "5.3.1").
-define(EMQX_RELEASE_EE, "5.3.2-alpha.1").
%% The HTTP API version
-define(EMQX_API_VERSION, "5.0").

View File

@ -11,12 +11,6 @@
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-include_lib("emqx/include/emqx_mqtt.hrl").
-include_lib("emqx/src/emqx_persistent_session_ds.hrl").
-define(DEFAULT_KEYSPACE, default).
-define(DS_SHARD_ID, <<"local">>).
-define(DS_SHARD, {?DEFAULT_KEYSPACE, ?DS_SHARD_ID}).
-import(emqx_common_test_helpers, [on_exit/1]).
%%------------------------------------------------------------------------------
@ -85,19 +79,13 @@ cluster(#{n := N}) ->
app_specs() ->
[
emqx_durable_storage,
{emqx, "persistent_session_store = {ds = true}"}
{emqx, "session_persistence = {enable = true}"}
].
get_mqtt_port(Node, Type) ->
{_IP, Port} = erpc:call(Node, emqx_config, get, [[listeners, Type, default, bind]]),
Port.
get_all_iterator_ids(Node) ->
Fn = fun(K, _V, Acc) -> [K | Acc] end,
erpc:call(Node, fun() ->
emqx_ds_storage_layer:foldl_iterator_prefix(?DS_SHARD, <<>>, Fn, [])
end).
wait_nodeup(Node) ->
?retry(
_Sleep0 = 500,
@ -233,9 +221,8 @@ t_session_subscription_idempotency(Config) ->
end,
fun(Trace) ->
ct:pal("trace:\n ~p", [Trace]),
SubTopicFilterWords = emqx_topic:words(SubTopicFilter),
?assertMatch(
{ok, #{}, #{SubTopicFilterWords := #{}}},
#{subscriptions := #{SubTopicFilter := #{}}},
erpc:call(Node1, emqx_persistent_session_ds, session_open, [ClientId])
)
end
@ -308,7 +295,7 @@ t_session_unsubscription_idempotency(Config) ->
fun(Trace) ->
ct:pal("trace:\n ~p", [Trace]),
?assertMatch(
{ok, #{}, Subs = #{}} when map_size(Subs) =:= 0,
#{subscriptions := Subs = #{}} when map_size(Subs) =:= 0,
erpc:call(Node1, emqx_persistent_session_ds, session_open, [ClientId])
),
ok
@ -370,18 +357,12 @@ do_t_session_discard(Params) ->
_Attempts0 = 50,
true = map_size(emqx_persistent_session_ds:list_all_streams()) > 0
),
?retry(
_Sleep0 = 100,
_Attempts0 = 50,
true = map_size(emqx_persistent_session_ds:list_all_iterators()) > 0
),
ok = emqtt:stop(Client0),
?tp(notice, "disconnected", #{}),
?tp(notice, "reconnecting", #{}),
%% we still have iterators and streams
%% we still have streams
?assert(map_size(emqx_persistent_session_ds:list_all_streams()) > 0),
?assert(map_size(emqx_persistent_session_ds:list_all_iterators()) > 0),
Client1 = start_client(ReconnectOpts),
{ok, _} = emqtt:connect(Client1),
?assertEqual([], emqtt:subscriptions(Client1)),
@ -394,7 +375,7 @@ do_t_session_discard(Params) ->
?assertEqual(#{}, emqx_persistent_session_ds:list_all_subscriptions()),
?assertEqual([], emqx_persistent_session_ds_router:topics()),
?assertEqual(#{}, emqx_persistent_session_ds:list_all_streams()),
?assertEqual(#{}, emqx_persistent_session_ds:list_all_iterators()),
?assertEqual(#{}, emqx_persistent_session_ds:list_all_pubranges()),
ok = emqtt:stop(Client1),
?tp(notice, "disconnected", #{}),

View File

@ -43,6 +43,7 @@
{emqx_mgmt_trace,2}.
{emqx_node_rebalance,1}.
{emqx_node_rebalance,2}.
{emqx_node_rebalance,3}.
{emqx_node_rebalance_api,1}.
{emqx_node_rebalance_api,2}.
{emqx_node_rebalance_evacuation,1}.

View File

@ -1216,8 +1216,10 @@ handle_info(
{ok, Channel3} -> {ok, ?REPLY_EVENT(disconnected), Channel3};
Shutdown -> Shutdown
end;
handle_info({sock_closed, Reason}, Channel = #channel{conn_state = disconnected}) ->
?SLOG(error, #{msg => "unexpected_sock_close", reason => Reason}),
handle_info({sock_closed, _Reason}, Channel = #channel{conn_state = disconnected}) ->
%% This can happen as a race:
%% EMQX closes socket and marks 'disconnected' but 'tcp_closed' or 'ssl_closed'
%% is already in process mailbox
{ok, Channel};
handle_info(clean_authz_cache, Channel) ->
ok = emqx_authz_cache:empty_authz_cache(),

View File

@ -552,13 +552,13 @@ handle_msg({quic, Data, _Stream, #{len := Len}}, State) when is_binary(Data) ->
inc_counter(incoming_bytes, Len),
ok = emqx_metrics:inc('bytes.received', Len),
when_bytes_in(Len, Data, State);
handle_msg(check_cache, #state{limiter_buffer = Cache} = State) ->
case queue:peek(Cache) of
handle_msg(check_limiter_buffer, #state{limiter_buffer = Buffer} = State) ->
case queue:peek(Buffer) of
empty ->
activate_socket(State);
handle_info(activate_socket, State);
{value, #pending_req{need = Needs, data = Data, next = Next}} ->
State2 = State#state{limiter_buffer = queue:drop(Cache)},
check_limiter(Needs, Data, Next, [check_cache], State2)
State2 = State#state{limiter_buffer = queue:drop(Buffer)},
check_limiter(Needs, Data, Next, [check_limiter_buffer], State2)
end;
handle_msg(
{incoming, Packet = ?CONNECT_PACKET(ConnPkt)},
@ -1036,13 +1036,13 @@ check_limiter(
Data,
WhenOk,
_Msgs,
#state{limiter_buffer = Cache} = State
#state{limiter_buffer = Buffer} = State
) ->
%% if there has a retry timer,
%% cache the operation and execute it after the retry is over
%% the maximum length of the cache queue is equal to the active_n
%% Buffer the operation and execute it after the retry is over
%% the maximum length of the buffer queue is equal to the active_n
New = #pending_req{need = Needs, data = Data, next = WhenOk},
{ok, State#state{limiter_buffer = queue:in(New, Cache)}}.
{ok, State#state{limiter_buffer = queue:in(New, Buffer)}}.
%% try to perform a retry
-spec retry_limiter(state()) -> _.
@ -1053,7 +1053,7 @@ retry_limiter(#state{limiter = Limiter} = State) ->
{ok, Limiter2} ->
Next(
Data,
[check_cache],
[check_limiter_buffer],
State#state{
limiter = Limiter2,
limiter_timer = undefined

View File

@ -301,7 +301,9 @@ update_expiry(Msg) ->
Msg.
%% @doc Message to PUBLISH Packet.
-spec to_packet(emqx_types:packet_id(), emqx_types:message()) ->
%%
%% When QoS=0 then packet id must be `undefined'
-spec to_packet(emqx_types:packet_id() | undefined, emqx_types:message()) ->
emqx_types:packet().
to_packet(
PacketId,

View File

@ -19,7 +19,7 @@
-include("emqx.hrl").
-export([init/0]).
-export([is_store_enabled/0]).
-export([is_persistence_enabled/0, force_ds/0]).
%% Message persistence
-export([
@ -28,9 +28,8 @@
-define(PERSISTENT_MESSAGE_DB, emqx_persistent_message).
%% FIXME
-define(WHEN_ENABLED(DO),
case is_store_enabled() of
case is_persistence_enabled() of
true -> DO;
false -> {skipped, disabled}
end
@ -40,18 +39,40 @@
init() ->
?WHEN_ENABLED(begin
ok = emqx_ds:open_db(?PERSISTENT_MESSAGE_DB, #{
backend => builtin,
storage => {emqx_ds_storage_bitfield_lts, #{}}
}),
Backend = storage_backend(),
ok = emqx_ds:open_db(?PERSISTENT_MESSAGE_DB, Backend),
ok = emqx_persistent_session_ds_router:init_tables(),
ok = emqx_persistent_session_ds:create_tables(),
ok
end).
-spec is_store_enabled() -> boolean().
is_store_enabled() ->
emqx_config:get([persistent_session_store, ds]).
-spec is_persistence_enabled() -> boolean().
is_persistence_enabled() ->
emqx_config:get([session_persistence, enable]).
-spec storage_backend() -> emqx_ds:create_db_opts().
storage_backend() ->
storage_backend(emqx_config:get([session_persistence, storage])).
%% Dev-only option: force all messages to go through
%% `emqx_persistent_session_ds':
-spec force_ds() -> boolean().
force_ds() ->
emqx_config:get([session_persistence, force_persistence]).
storage_backend(#{
builtin := #{enable := true, n_shards := NShards, replication_factor := ReplicationFactor}
}) ->
#{
backend => builtin,
storage => {emqx_ds_storage_bitfield_lts, #{}},
n_shards => NShards,
replication_factor => ReplicationFactor
};
storage_backend(#{
fdb := #{enable := true} = FDBConfig
}) ->
FDBConfig#{backend => fdb}.
%%--------------------------------------------------------------------

View File

@ -19,14 +19,15 @@
-module(emqx_persistent_message_ds_replayer).
%% API:
-export([new/0, next_packet_id/1, replay/2, commit_offset/3, poll/3, n_inflight/1]).
-export([new/0, open/1, next_packet_id/1, replay/1, commit_offset/3, poll/3, n_inflight/1]).
%% internal exports:
-export([]).
-export_type([inflight/0]).
-export_type([inflight/0, seqno/0]).
-include_lib("emqx/include/logger.hrl").
-include_lib("emqx_utils/include/emqx_message.hrl").
-include("emqx_persistent_session_ds.hrl").
-ifdef(TEST).
@ -41,19 +42,11 @@
%% Note: sequence numbers are monotonic; they don't wrap around:
-type seqno() :: non_neg_integer().
-record(range, {
stream :: emqx_ds:stream(),
first :: seqno(),
last :: seqno(),
iterator_next :: emqx_ds:iterator() | undefined
}).
-type range() :: #range{}.
-record(inflight, {
next_seqno = 0 :: seqno(),
acked_seqno = 0 :: seqno(),
offset_ranges = [] :: [range()]
next_seqno = 1 :: seqno(),
acked_until = 1 :: seqno(),
%% Ranges are sorted in ascending order of their sequence numbers.
offset_ranges = [] :: [ds_pubrange()]
}).
-opaque inflight() :: #inflight{}.
@ -66,34 +59,37 @@
new() ->
#inflight{}.
-spec open(emqx_persistent_session_ds:id()) -> inflight().
open(SessionId) ->
Ranges = ro_transaction(fun() -> get_ranges(SessionId) end),
{AckedUntil, NextSeqno} = compute_inflight_range(Ranges),
#inflight{
acked_until = AckedUntil,
next_seqno = NextSeqno,
offset_ranges = Ranges
}.
-spec next_packet_id(inflight()) -> {emqx_types:packet_id(), inflight()}.
next_packet_id(Inflight0 = #inflight{next_seqno = LastSeqNo}) ->
Inflight = Inflight0#inflight{next_seqno = LastSeqNo + 1},
case LastSeqNo rem 16#10000 of
0 ->
%% We skip sequence numbers that lead to PacketId = 0 to
%% simplify math. Note: it leads to occasional gaps in the
%% sequence numbers.
next_packet_id(Inflight);
PacketId ->
{PacketId, Inflight}
end.
next_packet_id(Inflight0 = #inflight{next_seqno = LastSeqno}) ->
Inflight = Inflight0#inflight{next_seqno = next_seqno(LastSeqno)},
{seqno_to_packet_id(LastSeqno), Inflight}.
-spec n_inflight(inflight()) -> non_neg_integer().
n_inflight(#inflight{next_seqno = NextSeqNo, acked_seqno = AckedSeqno}) ->
%% NOTE: this function assumes that gaps in the sequence ID occur
%% _only_ when the packet ID wraps:
case AckedSeqno >= ((NextSeqNo bsr 16) bsl 16) of
true ->
NextSeqNo - AckedSeqno;
false ->
NextSeqNo - AckedSeqno - 1
end.
n_inflight(#inflight{next_seqno = NextSeqno, acked_until = AckedUntil}) ->
range_size(AckedUntil, NextSeqno).
-spec replay(emqx_persistent_session_ds:id(), inflight()) ->
emqx_session:replies().
replay(_SessionId, _Inflight = #inflight{offset_ranges = _Ranges}) ->
[].
-spec replay(inflight()) ->
{emqx_session:replies(), inflight()}.
replay(Inflight0 = #inflight{acked_until = AckedUntil, offset_ranges = Ranges0}) ->
{Ranges, Replies} = lists:mapfoldr(
fun(Range, Acc) ->
replay_range(Range, AckedUntil, Acc)
end,
[],
Ranges0
),
Inflight = Inflight0#inflight{offset_ranges = Ranges},
{Replies, Inflight}.
-spec commit_offset(emqx_persistent_session_ds:id(), emqx_types:packet_id(), inflight()) ->
{_IsValidOffset :: boolean(), inflight()}.
@ -101,47 +97,34 @@ commit_offset(
SessionId,
PacketId,
Inflight0 = #inflight{
acked_seqno = AckedSeqno0, next_seqno = NextSeqNo, offset_ranges = Ranges0
acked_until = AckedUntil, next_seqno = NextSeqno
}
) ->
AckedSeqno =
case packet_id_to_seqno(NextSeqNo, PacketId) of
N when N > AckedSeqno0; AckedSeqno0 =:= 0 ->
N;
case packet_id_to_seqno(NextSeqno, PacketId) of
Seqno when Seqno >= AckedUntil andalso Seqno < NextSeqno ->
%% TODO
%% We do not preserve `acked_until` in the database. Instead, we discard
%% fully acked ranges from the database. In effect, this means that the
%% most recent `acked_until` the client has sent may be lost in case of a
%% crash or client loss.
Inflight1 = Inflight0#inflight{acked_until = next_seqno(Seqno)},
Inflight = discard_acked(SessionId, Inflight1),
{true, Inflight};
OutOfRange ->
?SLOG(warning, #{
msg => "out-of-order_ack",
prev_seqno => AckedSeqno0,
acked_until => AckedUntil,
acked_seqno => OutOfRange,
next_seqno => NextSeqNo,
next_seqno => NextSeqno,
packet_id => PacketId
}),
AckedSeqno0
end,
Ranges = lists:filter(
fun(#range{stream = Stream, last = LastSeqno, iterator_next = ItNext}) ->
case LastSeqno =< AckedSeqno of
true ->
%% This range has been fully
%% acked. Remove it and replace saved
%% iterator with the trailing iterator.
update_iterator(SessionId, Stream, ItNext),
false;
false ->
%% This range still has unacked
%% messages:
true
end
end,
Ranges0
),
Inflight = Inflight0#inflight{acked_seqno = AckedSeqno, offset_ranges = Ranges},
{true, Inflight}.
{false, Inflight0}
end.
-spec poll(emqx_persistent_session_ds:id(), inflight(), pos_integer()) ->
{emqx_session:replies(), inflight()}.
poll(SessionId, Inflight0, WindowSize) when WindowSize > 0, WindowSize < 16#7fff ->
#inflight{next_seqno = NextSeqNo0, acked_seqno = AckedSeqno} =
#inflight{next_seqno = NextSeqNo0, acked_until = AckedSeqno} =
Inflight0,
FetchThreshold = max(1, WindowSize div 2),
FreeSpace = AckedSeqno + WindowSize - NextSeqNo0,
@ -153,6 +136,7 @@ poll(SessionId, Inflight0, WindowSize) when WindowSize > 0, WindowSize < 16#7fff
%% client get stuck even?
{[], Inflight0};
true ->
%% TODO: Wrap this in `mria:async_dirty/2`?
Streams = shuffle(get_streams(SessionId)),
fetch(SessionId, Inflight0, Streams, FreeSpace, [])
end.
@ -165,75 +149,192 @@ poll(SessionId, Inflight0, WindowSize) when WindowSize > 0, WindowSize < 16#7fff
%% Internal functions
%%================================================================================
fetch(_SessionId, Inflight, _Streams = [], _N, Acc) ->
{lists:reverse(Acc), Inflight};
fetch(_SessionId, Inflight, _Streams, 0, Acc) ->
{lists:reverse(Acc), Inflight};
fetch(SessionId, Inflight0, [Stream | Streams], N, Publishes0) ->
#inflight{next_seqno = FirstSeqNo, offset_ranges = Ranges0} = Inflight0,
ItBegin = get_last_iterator(SessionId, Stream, Ranges0),
{ok, ItEnd, Messages} = emqx_ds:next(?PERSISTENT_MESSAGE_DB, ItBegin, N),
{NMessages, Publishes, Inflight1} =
lists:foldl(
fun(Msg, {N0, PubAcc0, InflightAcc0}) ->
{PacketId, InflightAcc} = next_packet_id(InflightAcc0),
PubAcc = [{PacketId, Msg} | PubAcc0],
{N0 + 1, PubAcc, InflightAcc}
end,
{0, Publishes0, Inflight0},
Messages
compute_inflight_range([]) ->
{1, 1};
compute_inflight_range(Ranges) ->
_RangeLast = #ds_pubrange{until = LastSeqno} = lists:last(Ranges),
RangesUnacked = lists:dropwhile(
fun(#ds_pubrange{type = T}) -> T == checkpoint end,
Ranges
),
#inflight{next_seqno = LastSeqNo} = Inflight1,
case NMessages > 0 of
true ->
Range = #range{
first = FirstSeqNo,
last = LastSeqNo - 1,
stream = Stream,
iterator_next = ItEnd
case RangesUnacked of
[#ds_pubrange{id = {_, AckedUntil}} | _] ->
{AckedUntil, LastSeqno};
[] ->
{LastSeqno, LastSeqno}
end.
-spec get_ranges(emqx_persistent_session_ds:id()) -> [ds_pubrange()].
get_ranges(SessionId) ->
Pat = erlang:make_tuple(
record_info(size, ds_pubrange),
'_',
[{1, ds_pubrange}, {#ds_pubrange.id, {SessionId, '_'}}]
),
mnesia:match_object(?SESSION_PUBRANGE_TAB, Pat, read).
fetch(SessionId, Inflight0, [DSStream | Streams], N, Acc) when N > 0 ->
#inflight{next_seqno = FirstSeqno, offset_ranges = Ranges} = Inflight0,
ItBegin = get_last_iterator(DSStream, Ranges),
{ok, ItEnd, Messages} = emqx_ds:next(?PERSISTENT_MESSAGE_DB, ItBegin, N),
case Messages of
[] ->
fetch(SessionId, Inflight0, Streams, N, Acc);
_ ->
{Publishes, UntilSeqno} = publish(FirstSeqno, Messages, _PreserveQoS0 = true),
Size = range_size(FirstSeqno, UntilSeqno),
%% We need to preserve the iterator pointing to the beginning of the
%% range, so that we can replay it if needed.
Range0 = #ds_pubrange{
id = {SessionId, FirstSeqno},
type = inflight,
until = UntilSeqno,
stream = DSStream#ds_stream.ref,
iterator = ItBegin
},
Inflight = Inflight1#inflight{offset_ranges = Ranges0 ++ [Range]},
fetch(SessionId, Inflight, Streams, N - NMessages, Publishes);
false ->
fetch(SessionId, Inflight1, Streams, N, Publishes)
end.
ok = preserve_range(Range0),
%% ...Yet we need to keep the iterator pointing past the end of the
%% range, so that we can pick up where we left off: it will become
%% `ItBegin` of the next range for this stream.
Range = Range0#ds_pubrange{iterator = ItEnd},
Inflight = Inflight0#inflight{
next_seqno = UntilSeqno,
offset_ranges = Ranges ++ [Range]
},
fetch(SessionId, Inflight, Streams, N - Size, [Publishes | Acc])
end;
fetch(_SessionId, Inflight, _Streams, _N, Acc) ->
Publishes = lists:append(lists:reverse(Acc)),
{Publishes, Inflight}.
-spec update_iterator(emqx_persistent_session_ds:id(), emqx_ds:stream(), emqx_ds:iterator()) -> ok.
update_iterator(DSSessionId, Stream, Iterator) ->
%% Workaround: we convert `Stream' to a binary before attempting to store it in
%% mnesia(rocksdb) because of a bug in `mnesia_rocksdb' when trying to do
%% `mnesia:dirty_all_keys' later.
StreamBin = term_to_binary(Stream),
mria:dirty_write(?SESSION_ITER_TAB, #ds_iter{id = {DSSessionId, StreamBin}, iter = Iterator}).
discard_acked(
SessionId,
Inflight0 = #inflight{acked_until = AckedUntil, offset_ranges = Ranges0}
) ->
%% TODO: This could be kept and incrementally updated in the inflight state.
Checkpoints = find_checkpoints(Ranges0),
%% TODO: Wrap this in `mria:async_dirty/2`?
Ranges = discard_acked_ranges(SessionId, AckedUntil, Checkpoints, Ranges0),
Inflight0#inflight{offset_ranges = Ranges}.
get_last_iterator(SessionId, Stream, Ranges) ->
case lists:keyfind(Stream, #range.stream, lists:reverse(Ranges)) of
false ->
get_iterator(SessionId, Stream);
#range{iterator_next = Next} ->
Next
end.
-spec get_iterator(emqx_persistent_session_ds:id(), emqx_ds:stream()) -> emqx_ds:iterator().
get_iterator(DSSessionId, Stream) ->
%% See comment in `update_iterator'.
StreamBin = term_to_binary(Stream),
Id = {DSSessionId, StreamBin},
[#ds_iter{iter = It}] = mnesia:dirty_read(?SESSION_ITER_TAB, Id),
It.
-spec get_streams(emqx_persistent_session_ds:id()) -> [emqx_ds:stream()].
get_streams(SessionId) ->
lists:map(
fun(#ds_stream{stream = Stream}) ->
Stream
find_checkpoints(Ranges) ->
lists:foldl(
fun(#ds_pubrange{stream = StreamRef, until = Until}, Acc) ->
%% For each stream, remember the last range over this stream.
Acc#{StreamRef => Until}
end,
mnesia:dirty_read(?SESSION_STREAM_TAB, SessionId)
#{},
Ranges
).
discard_acked_ranges(
SessionId,
AckedUntil,
Checkpoints,
[Range = #ds_pubrange{until = Until, stream = StreamRef} | Rest]
) when Until =< AckedUntil ->
%% This range has been fully acked.
%% Either discard it completely, or preserve the iterator for the next range
%% over this stream (i.e. a checkpoint).
RangeKept =
case maps:get(StreamRef, Checkpoints) of
CP when CP > Until ->
discard_range(Range),
[];
Until ->
[checkpoint_range(Range)]
end,
%% Since we're (intentionally) not using transactions here, it's important to
%% issue database writes in the same order in which ranges are stored: from
%% the oldest to the newest. This is also why we need to compute which ranges
%% should become checkpoints before we start writing anything.
RangeKept ++ discard_acked_ranges(SessionId, AckedUntil, Checkpoints, Rest);
discard_acked_ranges(_SessionId, _AckedUntil, _Checkpoints, Ranges) ->
%% The rest of ranges (if any) still have unacked messages.
Ranges.
replay_range(
Range0 = #ds_pubrange{type = inflight, id = {_, First}, until = Until, iterator = It},
AckedUntil,
Acc
) ->
Size = range_size(First, Until),
FirstUnacked = max(First, AckedUntil),
{ok, ItNext, Messages} = emqx_ds:next(?PERSISTENT_MESSAGE_DB, It, Size),
MessagesUnacked =
case FirstUnacked of
First ->
Messages;
_ ->
lists:nthtail(range_size(First, FirstUnacked), Messages)
end,
MessagesReplay = [emqx_message:set_flag(dup, true, Msg) || Msg <- MessagesUnacked],
%% Asserting that range is consistent with the message storage state.
{Replies, Until} = publish(FirstUnacked, MessagesReplay, _PreserveQoS0 = false),
%% Again, we need to keep the iterator pointing past the end of the
%% range, so that we can pick up where we left off.
Range = Range0#ds_pubrange{iterator = ItNext},
{Range, Replies ++ Acc};
replay_range(Range0 = #ds_pubrange{type = checkpoint}, _AckedUntil, Acc) ->
{Range0, Acc}.
publish(FirstSeqNo, Messages, PreserveQos0) ->
do_publish(FirstSeqNo, Messages, PreserveQos0, []).
do_publish(SeqNo, [], _, Acc) ->
{lists:reverse(Acc), SeqNo};
do_publish(SeqNo, [#message{qos = 0} | Messages], false, Acc) ->
do_publish(SeqNo, Messages, false, Acc);
do_publish(SeqNo, [#message{qos = 0} = Message | Messages], true, Acc) ->
do_publish(SeqNo, Messages, true, [{undefined, Message} | Acc]);
do_publish(SeqNo, [Message | Messages], PreserveQos0, Acc) ->
PacketId = seqno_to_packet_id(SeqNo),
do_publish(next_seqno(SeqNo), Messages, PreserveQos0, [{PacketId, Message} | Acc]).
-spec preserve_range(ds_pubrange()) -> ok.
preserve_range(Range = #ds_pubrange{type = inflight}) ->
mria:dirty_write(?SESSION_PUBRANGE_TAB, Range).
-spec discard_range(ds_pubrange()) -> ok.
discard_range(#ds_pubrange{id = RangeId}) ->
mria:dirty_delete(?SESSION_PUBRANGE_TAB, RangeId).
-spec checkpoint_range(ds_pubrange()) -> ds_pubrange().
checkpoint_range(Range0 = #ds_pubrange{type = inflight}) ->
Range = Range0#ds_pubrange{type = checkpoint},
ok = mria:dirty_write(?SESSION_PUBRANGE_TAB, Range),
Range;
checkpoint_range(Range = #ds_pubrange{type = checkpoint}) ->
%% This range should have been checkpointed already.
Range.
get_last_iterator(DSStream = #ds_stream{ref = StreamRef}, Ranges) ->
case lists:keyfind(StreamRef, #ds_pubrange.stream, lists:reverse(Ranges)) of
false ->
DSStream#ds_stream.beginning;
#ds_pubrange{iterator = ItNext} ->
ItNext
end.
-spec get_streams(emqx_persistent_session_ds:id()) -> [ds_stream()].
get_streams(SessionId) ->
mnesia:dirty_read(?SESSION_STREAM_TAB, SessionId).
next_seqno(Seqno) ->
NextSeqno = Seqno + 1,
case seqno_to_packet_id(NextSeqno) of
0 ->
%% We skip sequence numbers that lead to PacketId = 0 to
%% simplify math. Note: it leads to occasional gaps in the
%% sequence numbers.
NextSeqno + 1;
_ ->
NextSeqno
end.
%% Reconstruct session counter by adding most significant bits from
%% the current counter to the packet id.
-spec packet_id_to_seqno(non_neg_integer(), emqx_types:packet_id()) -> non_neg_integer().
-spec packet_id_to_seqno(_Next :: seqno(), emqx_types:packet_id()) -> seqno().
packet_id_to_seqno(NextSeqNo, PacketId) ->
Epoch = NextSeqNo bsr 16,
case packet_id_to_seqno_(Epoch, PacketId) of
@ -243,10 +344,20 @@ packet_id_to_seqno(NextSeqNo, PacketId) ->
packet_id_to_seqno_(Epoch - 1, PacketId)
end.
-spec packet_id_to_seqno_(non_neg_integer(), emqx_types:packet_id()) -> non_neg_integer().
-spec packet_id_to_seqno_(non_neg_integer(), emqx_types:packet_id()) -> seqno().
packet_id_to_seqno_(Epoch, PacketId) ->
(Epoch bsl 16) + PacketId.
-spec seqno_to_packet_id(seqno()) -> emqx_types:packet_id() | 0.
seqno_to_packet_id(Seqno) ->
Seqno rem 16#10000.
range_size(FirstSeqno, UntilSeqno) ->
%% This function assumes that gaps in the sequence ID occur _only_ when the
%% packet ID wraps.
Size = UntilSeqno - FirstSeqno,
Size + (FirstSeqno bsr 16) - (UntilSeqno bsr 16).
-spec shuffle([A]) -> [A].
shuffle(L0) ->
L1 = lists:map(
@ -259,6 +370,10 @@ shuffle(L0) ->
{_, L} = lists:unzip(L2),
L.
ro_transaction(Fun) ->
{atomic, Res} = mria:ro_transaction(?DS_MRIA_SHARD, Fun),
Res.
-ifdef(TEST).
%% This test only tests boundary conditions (to make sure property-based test didn't skip them):
@ -311,4 +426,40 @@ seqno_gen(NextSeqNo) ->
Max = max(0, NextSeqNo - 1),
range(Min, Max).
range_size_test_() ->
[
?_assertEqual(0, range_size(42, 42)),
?_assertEqual(1, range_size(42, 43)),
?_assertEqual(1, range_size(16#ffff, 16#10001)),
?_assertEqual(16#ffff - 456 + 123, range_size(16#1f0000 + 456, 16#200000 + 123))
].
compute_inflight_range_test_() ->
[
?_assertEqual(
{1, 1},
compute_inflight_range([])
),
?_assertEqual(
{12, 42},
compute_inflight_range([
#ds_pubrange{id = {<<>>, 1}, until = 2, type = checkpoint},
#ds_pubrange{id = {<<>>, 4}, until = 8, type = checkpoint},
#ds_pubrange{id = {<<>>, 11}, until = 12, type = checkpoint},
#ds_pubrange{id = {<<>>, 12}, until = 13, type = inflight},
#ds_pubrange{id = {<<>>, 13}, until = 20, type = inflight},
#ds_pubrange{id = {<<>>, 20}, until = 42, type = inflight}
])
),
?_assertEqual(
{13, 13},
compute_inflight_range([
#ds_pubrange{id = {<<>>, 1}, until = 2, type = checkpoint},
#ds_pubrange{id = {<<>>, 4}, until = 8, type = checkpoint},
#ds_pubrange{id = {<<>>, 11}, until = 12, type = checkpoint},
#ds_pubrange{id = {<<>>, 12}, until = 13, type = checkpoint}
])
)
].
-endif.

View File

@ -70,24 +70,27 @@
do_ensure_all_iterators_closed/1
]).
-export([print_session/1]).
-ifdef(TEST).
-export([
session_open/1,
list_all_sessions/0,
list_all_subscriptions/0,
list_all_streams/0,
list_all_iterators/0
list_all_pubranges/0
]).
-endif.
%% Currently, this is the clientid. We avoid `emqx_types:clientid()' because that can be
%% an atom, in theory (?).
-type id() :: binary().
-type topic_filter() :: emqx_ds:topic_filter().
-type topic_filter() :: emqx_types:topic().
-type topic_filter_words() :: emqx_ds:topic_filter().
-type subscription_id() :: {id(), topic_filter()}.
-type subscription() :: #{
start_time := emqx_ds:time(),
propts := map(),
props := map(),
extra := map()
}.
-type session() :: #{
@ -98,7 +101,7 @@
%% When the session should expire
expires_at := timestamp() | never,
%% Clients Subscriptions.
iterators := #{topic() => subscription()},
subscriptions := #{topic_filter() => subscription()},
%% Inflight messages
inflight := emqx_persistent_message_ds_replayer:inflight(),
%% Receive maximum
@ -108,7 +111,6 @@
}.
-type timestamp() :: emqx_utils_calendar:epoch_millisecond().
-type topic() :: emqx_types:topic().
-type clientinfo() :: emqx_types:clientinfo().
-type conninfo() :: emqx_session:conninfo().
-type replies() :: emqx_session:replies().
@ -142,7 +144,9 @@ open(#{clientid := ClientID} = _ClientInfo, ConnInfo) ->
%% somehow isolate those idling not-yet-expired sessions into a separate process
%% space, and move this call back into `emqx_cm` where it belongs.
ok = emqx_cm:discard_session(ClientID),
case open_session(ClientID) of
case maps:get(clean_start, ConnInfo, false) of
false ->
case session_open(ClientID) of
Session0 = #{} ->
ensure_timers(),
ReceiveMaximum = receive_maximum(ConnInfo),
@ -150,27 +154,16 @@ open(#{clientid := ClientID} = _ClientInfo, ConnInfo) ->
{true, Session, []};
false ->
false
end.
ensure_session(ClientID, ConnInfo, Conf) ->
{ok, Session, #{}} = session_ensure_new(ClientID, Conf),
ReceiveMaximum = receive_maximum(ConnInfo),
Session#{iterators => #{}, receive_maximum => ReceiveMaximum}.
open_session(ClientID) ->
case session_open(ClientID) of
{ok, Session, Subscriptions} ->
Session#{iterators => prep_subscriptions(Subscriptions)};
false ->
end;
true ->
session_drop(ClientID),
false
end.
prep_subscriptions(Subscriptions) ->
maps:fold(
fun(Topic, Subscription, Acc) -> Acc#{emqx_topic:join(Topic) => Subscription} end,
#{},
Subscriptions
).
ensure_session(ClientID, ConnInfo, Conf) ->
Session = session_ensure_new(ClientID, Conf),
ReceiveMaximum = receive_maximum(ConnInfo),
Session#{subscriptions => #{}, receive_maximum => ReceiveMaximum}.
-spec destroy(session() | clientinfo()) -> ok.
destroy(#{id := ClientID}) ->
@ -195,9 +188,9 @@ info(created_at, #{created_at := CreatedAt}) ->
CreatedAt;
info(is_persistent, #{}) ->
true;
info(subscriptions, #{iterators := Iters}) ->
info(subscriptions, #{subscriptions := Iters}) ->
maps:map(fun(_, #{props := SubOpts}) -> SubOpts end, Iters);
info(subscriptions_cnt, #{iterators := Iters}) ->
info(subscriptions_cnt, #{subscriptions := Iters}) ->
maps:size(Iters);
info(subscriptions_max, #{props := Conf}) ->
maps:get(max_subscriptions, Conf);
@ -235,51 +228,70 @@ info(await_rel_timeout, #{props := Conf}) ->
stats(Session) ->
info(?STATS_KEYS, Session).
%% Debug/troubleshooting
-spec print_session(emqx_types:client_id()) -> map() | undefined.
print_session(ClientId) ->
catch ro_transaction(
fun() ->
case mnesia:read(?SESSION_TAB, ClientId) of
[Session] ->
#{
session => Session,
streams => mnesia:read(?SESSION_STREAM_TAB, ClientId),
pubranges => session_read_pubranges(ClientId),
subscriptions => session_read_subscriptions(ClientId)
};
[] ->
undefined
end
end
).
%%--------------------------------------------------------------------
%% Client -> Broker: SUBSCRIBE / UNSUBSCRIBE
%%--------------------------------------------------------------------
-spec subscribe(topic(), emqx_types:subopts(), session()) ->
-spec subscribe(topic_filter(), emqx_types:subopts(), session()) ->
{ok, session()} | {error, emqx_types:reason_code()}.
subscribe(
TopicFilter,
SubOpts,
Session = #{id := ID, iterators := Iters}
) when is_map_key(TopicFilter, Iters) ->
Iterator = maps:get(TopicFilter, Iters),
NIterator = update_subscription(TopicFilter, Iterator, SubOpts, ID),
{ok, Session#{iterators := Iters#{TopicFilter => NIterator}}};
Session = #{id := ID, subscriptions := Subs}
) when is_map_key(TopicFilter, Subs) ->
Subscription = maps:get(TopicFilter, Subs),
NSubscription = update_subscription(TopicFilter, Subscription, SubOpts, ID),
{ok, Session#{subscriptions := Subs#{TopicFilter => NSubscription}}};
subscribe(
TopicFilter,
SubOpts,
Session = #{id := ID, iterators := Iters}
Session = #{id := ID, subscriptions := Subs}
) ->
% TODO: max_subscriptions
Iterator = add_subscription(TopicFilter, SubOpts, ID),
{ok, Session#{iterators := Iters#{TopicFilter => Iterator}}}.
Subscription = add_subscription(TopicFilter, SubOpts, ID),
{ok, Session#{subscriptions := Subs#{TopicFilter => Subscription}}}.
-spec unsubscribe(topic(), session()) ->
-spec unsubscribe(topic_filter(), session()) ->
{ok, session(), emqx_types:subopts()} | {error, emqx_types:reason_code()}.
unsubscribe(
TopicFilter,
Session = #{id := ID, iterators := Iters}
) when is_map_key(TopicFilter, Iters) ->
Iterator = maps:get(TopicFilter, Iters),
SubOpts = maps:get(props, Iterator),
Session = #{id := ID, subscriptions := Subs}
) when is_map_key(TopicFilter, Subs) ->
Subscription = maps:get(TopicFilter, Subs),
SubOpts = maps:get(props, Subscription),
ok = del_subscription(TopicFilter, ID),
{ok, Session#{iterators := maps:remove(TopicFilter, Iters)}, SubOpts};
{ok, Session#{subscriptions := maps:remove(TopicFilter, Subs)}, SubOpts};
unsubscribe(
_TopicFilter,
_Session = #{}
) ->
{error, ?RC_NO_SUBSCRIPTION_EXISTED}.
-spec get_subscription(topic(), session()) ->
-spec get_subscription(topic_filter(), session()) ->
emqx_types:subopts() | undefined.
get_subscription(TopicFilter, #{iterators := Iters}) ->
case maps:get(TopicFilter, Iters, undefined) of
Iterator = #{} ->
maps:get(props, Iterator);
get_subscription(TopicFilter, #{subscriptions := Subs}) ->
case maps:get(TopicFilter, Subs, undefined) of
Subscription = #{} ->
maps:get(props, Subscription);
undefined ->
undefined
end.
@ -289,12 +301,12 @@ get_subscription(TopicFilter, #{iterators := Iters}) ->
%%--------------------------------------------------------------------
-spec publish(emqx_types:packet_id(), emqx_types:message(), session()) ->
{ok, emqx_types:publish_result(), replies(), session()}
{ok, emqx_types:publish_result(), session()}
| {error, emqx_types:reason_code()}.
publish(_PacketId, Msg, Session) ->
%% TODO:
%% TODO: QoS2
Result = emqx_broker:publish(Msg),
{ok, Result, [], Session}.
{ok, Result, Session}.
%%--------------------------------------------------------------------
%% Client -> Broker: PUBACK
@ -353,7 +365,7 @@ pubcomp(_ClientInfo, _PacketId, _Session = #{}) ->
-spec deliver(clientinfo(), [emqx_types:deliver()], session()) ->
{ok, replies(), session()}.
deliver(_ClientInfo, _Delivers, Session) ->
%% TODO: QoS0 and system messages end up here.
%% TODO: system messages end up here.
{ok, [], Session}.
-spec handle_timeout(clientinfo(), _Timeout, session()) ->
@ -364,25 +376,26 @@ handle_timeout(
Session = #{id := Id, inflight := Inflight0, receive_maximum := ReceiveMaximum}
) ->
{Publishes, Inflight} = emqx_persistent_message_ds_replayer:poll(Id, Inflight0, ReceiveMaximum),
%% TODO: make these values configurable:
IdlePollInterval = emqx_config:get([session_persistence, idle_poll_interval]),
Timeout =
case Publishes of
[] ->
100;
IdlePollInterval;
[_ | _] ->
0
end,
ensure_timer(pull, Timeout),
{ok, Publishes, Session#{inflight => Inflight}};
handle_timeout(_ClientInfo, get_streams, Session = #{id := Id}) ->
renew_streams(Id),
handle_timeout(_ClientInfo, get_streams, Session) ->
renew_streams(Session),
ensure_timer(get_streams),
{ok, [], Session}.
-spec replay(clientinfo(), [], session()) ->
{ok, replies(), session()}.
replay(_ClientInfo, [], Session = #{}) ->
{ok, [], Session}.
replay(_ClientInfo, [], Session = #{inflight := Inflight0}) ->
{Replies, Inflight} = emqx_persistent_message_ds_replayer:replay(Inflight0),
{ok, Replies, Session#{inflight := Inflight}}.
%%--------------------------------------------------------------------
@ -392,14 +405,13 @@ disconnect(Session = #{}) ->
-spec terminate(Reason :: term(), session()) -> ok.
terminate(_Reason, _Session = #{}) ->
% TODO: close iterators
ok.
%%--------------------------------------------------------------------
-spec add_subscription(topic(), emqx_types:subopts(), id()) ->
-spec add_subscription(topic_filter(), emqx_types:subopts(), id()) ->
subscription().
add_subscription(TopicFilterBin, SubOpts, DSSessionID) ->
add_subscription(TopicFilter, SubOpts, DSSessionID) ->
%% N.B.: we chose to update the router before adding the subscription to the
%% session/iterator table. The reasoning for this is as follows:
%%
@ -418,8 +430,7 @@ add_subscription(TopicFilterBin, SubOpts, DSSessionID) ->
%% since it is guarded by a transaction context: we consider a subscription
%% operation to be successful if it ended up changing this table. Both router
%% and iterator information can be reconstructed from this table, if needed.
ok = emqx_persistent_session_ds_router:do_add_route(TopicFilterBin, DSSessionID),
TopicFilter = emqx_topic:words(TopicFilterBin),
ok = emqx_persistent_session_ds_router:do_add_route(TopicFilter, DSSessionID),
{ok, DSSubExt, IsNew} = session_add_subscription(
DSSessionID, TopicFilter, SubOpts
),
@ -427,20 +438,19 @@ add_subscription(TopicFilterBin, SubOpts, DSSessionID) ->
%% we'll list streams and open iterators when implementing message replay.
DSSubExt.
-spec update_subscription(topic(), subscription(), emqx_types:subopts(), id()) ->
-spec update_subscription(topic_filter(), subscription(), emqx_types:subopts(), id()) ->
subscription().
update_subscription(TopicFilterBin, DSSubExt, SubOpts, DSSessionID) ->
TopicFilter = emqx_topic:words(TopicFilterBin),
update_subscription(TopicFilter, DSSubExt, SubOpts, DSSessionID) ->
{ok, NDSSubExt, false} = session_add_subscription(
DSSessionID, TopicFilter, SubOpts
),
ok = ?tp(persistent_session_ds_iterator_updated, #{sub => DSSubExt}),
NDSSubExt.
-spec del_subscription(topic(), id()) ->
-spec del_subscription(topic_filter(), id()) ->
ok.
del_subscription(TopicFilterBin, DSSessionId) ->
TopicFilter = emqx_topic:words(TopicFilterBin),
del_subscription(TopicFilter, DSSessionId) ->
%% TODO: transaction?
?tp_span(
persistent_session_ds_subscription_delete,
#{session_id => DSSessionId},
@ -449,7 +459,7 @@ del_subscription(TopicFilterBin, DSSessionId) ->
?tp_span(
persistent_session_ds_subscription_route_delete,
#{session_id => DSSessionId},
ok = emqx_persistent_session_ds_router:do_delete_route(TopicFilterBin, DSSessionId)
ok = emqx_persistent_session_ds_router:do_delete_route(TopicFilter, DSSessionId)
).
%%--------------------------------------------------------------------
@ -457,10 +467,6 @@ del_subscription(TopicFilterBin, DSSessionId) ->
%%--------------------------------------------------------------------
create_tables() ->
ok = emqx_ds:open_db(?PERSISTENT_MESSAGE_DB, #{
backend => builtin,
storage => {emqx_ds_storage_bitfield_lts, #{}}
}),
ok = mria:create_table(
?SESSION_TAB,
[
@ -492,17 +498,20 @@ create_tables() ->
]
),
ok = mria:create_table(
?SESSION_ITER_TAB,
?SESSION_PUBRANGE_TAB,
[
{rlog_shard, ?DS_MRIA_SHARD},
{type, set},
{type, ordered_set},
{storage, storage()},
{record_name, ds_iter},
{attributes, record_info(fields, ds_iter)}
{record_name, ds_pubrange},
{attributes, record_info(fields, ds_pubrange)}
]
),
ok = mria:wait_for_tables([
?SESSION_TAB, ?SESSION_SUBSCRIPTIONS_TAB, ?SESSION_STREAM_TAB, ?SESSION_ITER_TAB
?SESSION_TAB,
?SESSION_SUBSCRIPTIONS_TAB,
?SESSION_STREAM_TAB,
?SESSION_PUBRANGE_TAB
]),
ok.
@ -522,27 +531,34 @@ storage() ->
%% Note: session API doesn't handle session takeovers, it's the job of
%% the broker.
-spec session_open(id()) ->
{ok, session(), #{topic() => subscription()}} | false.
session() | false.
session_open(SessionId) ->
transaction(fun() ->
ro_transaction(fun() ->
case mnesia:read(?SESSION_TAB, SessionId, write) of
[Record = #session{}] ->
Session = export_session(Record),
DSSubs = session_read_subscriptions(SessionId),
Subscriptions = export_subscriptions(DSSubs),
{ok, Session, Subscriptions};
Inflight = emqx_persistent_message_ds_replayer:open(SessionId),
Session#{
subscriptions => Subscriptions,
inflight => Inflight
};
[] ->
false
end
end).
-spec session_ensure_new(id(), _Props :: map()) ->
{ok, session(), #{topic() => subscription()}}.
session().
session_ensure_new(SessionId, Props) ->
transaction(fun() ->
ok = session_drop_subscriptions(SessionId),
Session = export_session(session_create(SessionId, Props)),
{ok, Session, #{}}
Session#{
subscriptions => #{},
inflight => emqx_persistent_message_ds_replayer:new()
}
end).
session_create(SessionId, Props) ->
@ -550,8 +566,7 @@ session_create(SessionId, Props) ->
id = SessionId,
created_at = erlang:system_time(millisecond),
expires_at = never,
props = Props,
inflight = emqx_persistent_message_ds_replayer:new()
props = Props
},
ok = mnesia:write(?SESSION_TAB, Session, write),
Session.
@ -562,19 +577,18 @@ session_create(SessionId, Props) ->
session_drop(DSSessionId) ->
transaction(fun() ->
ok = session_drop_subscriptions(DSSessionId),
ok = session_drop_iterators(DSSessionId),
ok = session_drop_pubranges(DSSessionId),
ok = session_drop_streams(DSSessionId),
ok = mnesia:delete(?SESSION_TAB, DSSessionId, write)
end).
-spec session_drop_subscriptions(id()) -> ok.
session_drop_subscriptions(DSSessionId) ->
Subscriptions = session_read_subscriptions(DSSessionId),
Subscriptions = session_read_subscriptions(DSSessionId, write),
lists:foreach(
fun(#ds_sub{id = DSSubId} = DSSub) ->
TopicFilter = subscription_id_to_topic_filter(DSSubId),
TopicFilterBin = emqx_topic:join(TopicFilter),
ok = emqx_persistent_session_ds_router:do_delete_route(TopicFilterBin, DSSessionId),
ok = emqx_persistent_session_ds_router:do_delete_route(TopicFilter, DSSessionId),
ok = session_del_subscription(DSSub)
end,
Subscriptions
@ -633,13 +647,27 @@ session_del_subscription(DSSessionId, TopicFilter) ->
session_del_subscription(#ds_sub{id = DSSubId}) ->
mnesia:delete(?SESSION_SUBSCRIPTIONS_TAB, DSSubId, write).
session_read_subscriptions(DSSessionId) ->
session_read_subscriptions(DSSessionID) ->
session_read_subscriptions(DSSessionID, read).
session_read_subscriptions(DSSessionId, LockKind) ->
MS = ets:fun2ms(
fun(Sub = #ds_sub{id = {Sess, _}}) when Sess =:= DSSessionId ->
Sub
end
),
mnesia:select(?SESSION_SUBSCRIPTIONS_TAB, MS, read).
mnesia:select(?SESSION_SUBSCRIPTIONS_TAB, MS, LockKind).
session_read_pubranges(DSSessionID) ->
session_read_pubranges(DSSessionID, read).
session_read_pubranges(DSSessionId, LockKind) ->
MS = ets:fun2ms(
fun(#ds_pubrange{id = {Sess, First}}) when Sess =:= DSSessionId ->
{DSSessionId, First}
end
),
mnesia:select(?SESSION_PUBRANGE_TAB, MS, LockKind).
-spec new_subscription_id(id(), topic_filter()) -> {subscription_id(), integer()}.
new_subscription_id(DSSessionId, TopicFilter) ->
@ -677,77 +705,77 @@ do_ensure_all_iterators_closed(_DSSessionID) ->
%% Reading batches
%%--------------------------------------------------------------------
-spec renew_streams(id()) -> ok.
renew_streams(DSSessionId) ->
Subscriptions = ro_transaction(fun() -> session_read_subscriptions(DSSessionId) end),
ExistingStreams = ro_transaction(fun() -> mnesia:read(?SESSION_STREAM_TAB, DSSessionId) end),
lists:foreach(
fun(#ds_sub{id = {_, TopicFilter}, start_time = StartTime}) ->
renew_streams(DSSessionId, ExistingStreams, TopicFilter, StartTime)
-spec renew_streams(session()) -> ok.
renew_streams(#{id := SessionId, subscriptions := Subscriptions}) ->
transaction(fun() ->
ExistingStreams = mnesia:read(?SESSION_STREAM_TAB, SessionId, write),
maps:fold(
fun(TopicFilter, #{start_time := StartTime}, Streams) ->
TopicFilterWords = emqx_topic:words(TopicFilter),
renew_topic_streams(SessionId, TopicFilterWords, StartTime, Streams)
end,
ExistingStreams,
Subscriptions
)
end),
ok.
-spec renew_topic_streams(id(), topic_filter_words(), emqx_ds:time(), _Acc :: [ds_stream()]) -> ok.
renew_topic_streams(DSSessionId, TopicFilter, StartTime, ExistingStreams) ->
TopicStreams = emqx_ds:get_streams(?PERSISTENT_MESSAGE_DB, TopicFilter, StartTime),
lists:foldl(
fun({Rank, Stream}, Streams) ->
case lists:keymember(Stream, #ds_stream.stream, Streams) of
true ->
Streams;
false ->
StreamRef = length(Streams) + 1,
DSStream = session_store_stream(
DSSessionId,
StreamRef,
Stream,
Rank,
TopicFilter,
StartTime
),
[DSStream | Streams]
end
end,
ExistingStreams,
TopicStreams
).
-spec renew_streams(id(), [ds_stream()], emqx_ds:topic_filter(), emqx_ds:time()) -> ok.
renew_streams(DSSessionId, ExistingStreams, TopicFilter, StartTime) ->
AllStreams = emqx_ds:get_streams(?PERSISTENT_MESSAGE_DB, TopicFilter, StartTime),
transaction(
fun() ->
lists:foreach(
fun({Rank, Stream}) ->
Rec = #ds_stream{
session = DSSessionId,
topic_filter = TopicFilter,
stream = Stream,
rank = Rank
},
case lists:member(Rec, ExistingStreams) of
true ->
ok;
false ->
mnesia:write(?SESSION_STREAM_TAB, Rec, write),
{ok, Iterator} = emqx_ds:make_iterator(
?PERSISTENT_MESSAGE_DB, Stream, TopicFilter, StartTime
session_store_stream(DSSessionId, StreamRef, Stream, Rank, TopicFilter, StartTime) ->
{ok, ItBegin} = emqx_ds:make_iterator(
?PERSISTENT_MESSAGE_DB,
Stream,
TopicFilter,
StartTime
),
%% Workaround: we convert `Stream' to a binary before
%% attempting to store it in mnesia(rocksdb) because of a bug
%% in `mnesia_rocksdb' when trying to do
%% `mnesia:dirty_all_keys' later.
StreamBin = term_to_binary(Stream),
IterRec = #ds_iter{id = {DSSessionId, StreamBin}, iter = Iterator},
mnesia:write(?SESSION_ITER_TAB, IterRec, write)
end
end,
AllStreams
)
end
).
DSStream = #ds_stream{
session = DSSessionId,
ref = StreamRef,
stream = Stream,
rank = Rank,
beginning = ItBegin
},
mnesia:write(?SESSION_STREAM_TAB, DSStream, write),
DSStream.
%% must be called inside a transaction
-spec session_drop_streams(id()) -> ok.
session_drop_streams(DSSessionId) ->
MS = ets:fun2ms(
fun(#ds_stream{session = DSSessionId0}) when DSSessionId0 =:= DSSessionId ->
DSSessionId0
end
),
StreamIDs = mnesia:select(?SESSION_STREAM_TAB, MS, write),
lists:foreach(fun(Key) -> mnesia:delete(?SESSION_STREAM_TAB, Key, write) end, StreamIDs).
mnesia:delete(?SESSION_STREAM_TAB, DSSessionId, write).
%% must be called inside a transaction
-spec session_drop_iterators(id()) -> ok.
session_drop_iterators(DSSessionId) ->
MS = ets:fun2ms(
fun(#ds_iter{id = {DSSessionId0, StreamBin}}) when DSSessionId0 =:= DSSessionId ->
StreamBin
end
),
StreamBins = mnesia:select(?SESSION_ITER_TAB, MS, write),
-spec session_drop_pubranges(id()) -> ok.
session_drop_pubranges(DSSessionId) ->
RangeIds = session_read_pubranges(DSSessionId, write),
lists:foreach(
fun(StreamBin) ->
mnesia:delete(?SESSION_ITER_TAB, {DSSessionId, StreamBin}, write)
fun(RangeId) ->
mnesia:delete(?SESSION_PUBRANGE_TAB, RangeId, write)
end,
StreamBins
RangeIds
).
%%--------------------------------------------------------------------------------
@ -772,7 +800,7 @@ export_subscriptions(DSSubs) ->
).
export_session(#session{} = Record) ->
export_record(Record, #session.id, [id, created_at, expires_at, inflight, props], #{}).
export_record(Record, #session.id, [id, created_at, expires_at, props], #{}).
export_subscription(#ds_sub{} = Record) ->
export_record(Record, #ds_sub.start_time, [start_time, props, extra], #{}).
@ -808,10 +836,7 @@ receive_maximum(ConnInfo) ->
list_all_sessions() ->
DSSessionIds = mnesia:dirty_all_keys(?SESSION_TAB),
Sessions = lists:map(
fun(SessionID) ->
{ok, Session, Subscriptions} = session_open(SessionID),
{SessionID, #{session => Session, subscriptions => Subscriptions}}
end,
fun(SessionID) -> {SessionID, session_open(SessionID)} end,
DSSessionIds
),
maps:from_list(Sessions).
@ -850,16 +875,18 @@ list_all_streams() ->
),
maps:from_list(DSStreams).
list_all_iterators() ->
DSIterIds = mnesia:dirty_all_keys(?SESSION_ITER_TAB),
DSIters = lists:map(
fun(DSIterId) ->
[Record] = mnesia:dirty_read(?SESSION_ITER_TAB, DSIterId),
{DSIterId, export_record(Record, #ds_iter.id, [id, iter], #{})}
end,
DSIterIds
list_all_pubranges() ->
DSPubranges = mnesia:dirty_match_object(?SESSION_PUBRANGE_TAB, #ds_pubrange{_ = '_'}),
lists:foldl(
fun(Record = #ds_pubrange{id = {SessionId, First}}, Acc) ->
Range = export_record(
Record, #ds_pubrange.until, [until, stream, type, iterator], #{first => First}
),
maps:from_list(DSIters).
maps:put(SessionId, maps:get(SessionId, Acc, []) ++ [Range], Acc)
end,
#{},
DSPubranges
).
%% ifdef(TEST)
-endif.

View File

@ -21,7 +21,7 @@
-define(SESSION_TAB, emqx_ds_session).
-define(SESSION_SUBSCRIPTIONS_TAB, emqx_ds_session_subscriptions).
-define(SESSION_STREAM_TAB, emqx_ds_stream_tab).
-define(SESSION_ITER_TAB, emqx_ds_iter_tab).
-define(SESSION_PUBRANGE_TAB, emqx_ds_pubrange_tab).
-define(DS_MRIA_SHARD, emqx_ds_session_shard).
-record(ds_sub, {
@ -34,17 +34,39 @@
-record(ds_stream, {
session :: emqx_persistent_session_ds:id(),
topic_filter :: emqx_ds:topic_filter(),
ref :: _StreamRef,
stream :: emqx_ds:stream(),
rank :: emqx_ds:stream_rank()
rank :: emqx_ds:stream_rank(),
beginning :: emqx_ds:iterator()
}).
-type ds_stream() :: #ds_stream{}.
-type ds_stream_bin() :: binary().
-record(ds_iter, {
id :: {emqx_persistent_session_ds:id(), ds_stream_bin()},
iter :: emqx_ds:iterator()
-record(ds_pubrange, {
id :: {
%% What session this range belongs to.
_Session :: emqx_persistent_session_ds:id(),
%% Where this range starts.
_First :: emqx_persistent_message_ds_replayer:seqno()
},
%% Where this range ends: the first seqno that is not included in the range.
until :: emqx_persistent_message_ds_replayer:seqno(),
%% Which stream this range is over.
stream :: _StreamRef,
%% Type of a range:
%% * Inflight range is a range of yet unacked messages from this stream.
%% * Checkpoint range was already acked, its purpose is to keep track of the
%% very last iterator for this stream.
type :: inflight | checkpoint,
%% Meaning of this depends on the type of the range:
%% * For inflight range, this is the iterator pointing to the first message in
%% the range.
%% * For checkpoint range, this is the iterator pointing right past the last
%% message in the range.
iterator :: emqx_ds:iterator(),
%% Reserved for future use.
misc = #{} :: map()
}).
-type ds_pubrange() :: #ds_pubrange{}.
-record(session, {
%% same as clientid
@ -52,7 +74,6 @@
%% creation time
created_at :: _Millisecond :: non_neg_integer(),
expires_at = never :: _Millisecond :: non_neg_integer() | never,
inflight :: emqx_persistent_message_ds_replayer:inflight(),
%% for future usage
props = #{} :: map()
}).

View File

@ -294,7 +294,19 @@ roots(low) ->
{"persistent_session_store",
sc(
ref("persistent_session_store"),
#{importance => ?IMPORTANCE_HIDDEN}
#{
%% NOTE
%% Due to some quirks in interaction between `emqx_config` and
%% `hocon_tconf`, schema roots cannot currently be deprecated.
importance => ?IMPORTANCE_HIDDEN
}
)},
{"session_persistence",
sc(
ref("session_persistence"),
#{
importance => ?IMPORTANCE_HIDDEN
}
)},
{"trace",
sc(
@ -309,11 +321,12 @@ roots(low) ->
].
fields("persistent_session_store") ->
Deprecated = #{deprecated => {since, "5.4.0"}},
[
{"enabled",
sc(
boolean(),
#{
Deprecated#{
default => false,
%% TODO(5.2): change field name to 'enable' and keep 'enabled' as an alias
aliases => [enable],
@ -323,7 +336,7 @@ fields("persistent_session_store") ->
{"ds",
sc(
boolean(),
#{
Deprecated#{
default => false,
importance => ?IMPORTANCE_HIDDEN
}
@ -331,7 +344,7 @@ fields("persistent_session_store") ->
{"on_disc",
sc(
boolean(),
#{
Deprecated#{
default => true,
desc => ?DESC(persistent_store_on_disc)
}
@ -339,7 +352,7 @@ fields("persistent_session_store") ->
{"ram_cache",
sc(
boolean(),
#{
Deprecated#{
default => false,
desc => ?DESC(persistent_store_ram_cache)
}
@ -347,7 +360,7 @@ fields("persistent_session_store") ->
{"backend",
sc(
hoconsc:union([ref("persistent_session_builtin")]),
#{
Deprecated#{
default => #{
<<"type">> => <<"builtin">>,
<<"session">> =>
@ -363,7 +376,7 @@ fields("persistent_session_store") ->
{"max_retain_undelivered",
sc(
duration(),
#{
Deprecated#{
default => <<"1h">>,
desc => ?DESC(persistent_session_store_max_retain_undelivered)
}
@ -371,7 +384,7 @@ fields("persistent_session_store") ->
{"message_gc_interval",
sc(
duration(),
#{
Deprecated#{
default => <<"1h">>,
desc => ?DESC(persistent_session_store_message_gc_interval)
}
@ -379,7 +392,7 @@ fields("persistent_session_store") ->
{"session_message_gc_interval",
sc(
duration(),
#{
Deprecated#{
default => <<"1m">>,
desc => ?DESC(persistent_session_store_session_message_gc_interval)
}
@ -1740,6 +1753,78 @@ fields("trace") ->
importance => ?IMPORTANCE_HIDDEN,
desc => ?DESC(fields_trace_payload_encode)
})}
];
fields("session_persistence") ->
[
{"enable",
sc(
boolean(), #{
desc => ?DESC(session_persistence_enable),
default => false
}
)},
{"storage",
sc(
ref("session_storage_backend"), #{
desc => ?DESC(session_persistence_storage),
validator => fun validate_backend_enabled/1,
default => #{
<<"builtin">> => #{}
}
}
)},
{"idle_poll_interval",
sc(
timeout_duration(),
#{
default => <<"100ms">>,
desc => ?DESC(session_ds_idle_poll_interval)
}
)},
{"force_persistence",
sc(
boolean(),
#{
default => false,
%% Only for testing, shall remain hidden
importance => ?IMPORTANCE_HIDDEN
}
)}
];
fields("session_storage_backend") ->
[
{"builtin",
sc(ref("session_storage_backend_builtin"), #{
desc => ?DESC(session_storage_backend_builtin),
required => {false, recursively}
})}
] ++ emqx_schema_hooks:injection_point('session_persistence.storage_backends', []);
fields("session_storage_backend_builtin") ->
[
{"enable",
sc(
boolean(),
#{
desc => ?DESC(session_storage_backend_enable),
default => true
}
)},
{"n_shards",
sc(
pos_integer(),
#{
desc => ?DESC(session_builtin_n_shards),
default => 16
}
)},
{"replication_factor",
sc(
pos_integer(),
#{
default => 3,
importance => ?IMPORTANCE_HIDDEN
}
)}
].
mqtt_listener(Bind) ->
@ -1992,6 +2077,8 @@ desc("ocsp") ->
"Per listener OCSP Stapling configuration.";
desc("crl_cache") ->
"Global CRL cache options.";
desc("session_persistence") ->
"Settings governing durable sessions persistence.";
desc(_) ->
undefined.
@ -2014,6 +2101,17 @@ ensure_list(V) ->
filter(Opts) ->
[{K, V} || {K, V} <- Opts, V =/= undefined].
validate_backend_enabled(Config) ->
Enabled = maps:filter(fun(_, #{<<"enable">> := E}) -> E end, Config),
case maps:to_list(Enabled) of
[{_Type, _BackendConfig}] ->
ok;
_Conflicts = [_ | _] ->
{error, multiple_enabled_backends};
_None = [] ->
{error, no_enabled_backend}
end.
%% @private This function defines the SSL opts which are commonly used by
%% SSL listener and client.
-spec common_ssl_opts_schema(map(), server | client) -> hocon_schema:field_schema().

View File

@ -617,21 +617,27 @@ maybe_mock_impl_mod(_) ->
-spec choose_impl_mod(conninfo()) -> module().
choose_impl_mod(#{expiry_interval := EI}) ->
hd(choose_impl_candidates(EI, emqx_persistent_message:is_store_enabled())).
hd(choose_impl_candidates(EI, emqx_persistent_message:is_persistence_enabled())).
-spec choose_impl_candidates(conninfo()) -> [module()].
choose_impl_candidates(#{expiry_interval := EI}) ->
choose_impl_candidates(EI, emqx_persistent_message:is_store_enabled()).
choose_impl_candidates(EI, emqx_persistent_message:is_persistence_enabled()).
choose_impl_candidates(_, _IsPSStoreEnabled = false) ->
[emqx_session_mem];
choose_impl_candidates(0, _IsPSStoreEnabled = true) ->
case emqx_persistent_message:force_ds() of
false ->
%% NOTE
%% If ExpiryInterval is 0, the natural choice is `emqx_session_mem`. Yet we still
%% need to look the existing session up in the `emqx_persistent_session_ds` store
%% first, because previous connection may have set ExpiryInterval to a non-zero
%% value.
%% If ExpiryInterval is 0, the natural choice is
%% `emqx_session_mem'. Yet we still need to look the
%% existing session up in the `emqx_persistent_session_ds'
%% store first, because previous connection may have set
%% ExpiryInterval to a non-zero value.
[emqx_session_mem, emqx_persistent_session_ds];
true ->
[emqx_persistent_session_ds]
end;
choose_impl_candidates(EI, _IsPSStoreEnabled = true) when EI > 0 ->
[emqx_persistent_session_ds].

View File

@ -94,7 +94,7 @@
limiter :: container(),
%% cache operation when overload
limiter_cache :: queue:queue(cache()),
limiter_buffer :: queue:queue(cache()),
%% limiter timers
limiter_timer :: undefined | reference()
@ -326,7 +326,7 @@ websocket_init([Req, Opts]) ->
zone = Zone,
listener = {Type, Listener},
limiter_timer = undefined,
limiter_cache = queue:new()
limiter_buffer = queue:new()
},
hibernate};
{denny, Reason} ->
@ -462,13 +462,13 @@ websocket_info(
State
) ->
return(retry_limiter(State));
websocket_info(check_cache, #state{limiter_cache = Cache} = State) ->
case queue:peek(Cache) of
websocket_info(check_limiter_buffer, #state{limiter_buffer = Buffer} = State) ->
case queue:peek(Buffer) of
empty ->
return(enqueue({active, true}, State#state{sockstate = running}));
{value, #cache{need = Needs, data = Data, next = Next}} ->
State2 = State#state{limiter_cache = queue:drop(Cache)},
return(check_limiter(Needs, Data, Next, [check_cache], State2))
State2 = State#state{limiter_buffer = queue:drop(Buffer)},
return(check_limiter(Needs, Data, Next, [check_limiter_buffer], State2))
end;
websocket_info({timeout, TRef, Msg}, State) when is_reference(TRef) ->
handle_timeout(TRef, Msg, State);
@ -630,10 +630,10 @@ check_limiter(
Data,
WhenOk,
_Msgs,
#state{limiter_cache = Cache} = State
#state{limiter_buffer = Buffer} = State
) ->
New = #cache{need = Needs, data = Data, next = WhenOk},
State#state{limiter_cache = queue:in(New, Cache)}.
State#state{limiter_buffer = queue:in(New, Buffer)}.
-spec retry_limiter(state()) -> state().
retry_limiter(#state{limiter = Limiter} = State) ->
@ -644,7 +644,7 @@ retry_limiter(#state{limiter = Limiter} = State) ->
{ok, Limiter2} ->
Next(
Data,
[check_cache],
[check_limiter_buffer],
State#state{
limiter = Limiter2,
limiter_timer = undefined

View File

@ -233,6 +233,56 @@ t_session_subscription_iterators(Config) ->
),
ok.
t_qos0(Config) ->
Sub = connect(<<?MODULE_STRING "1">>, true, 30),
Pub = connect(<<?MODULE_STRING "2">>, true, 0),
try
{ok, _, [1]} = emqtt:subscribe(Sub, <<"t/#">>, qos1),
Messages = [
{<<"t/1">>, <<"1">>, 0},
{<<"t/1">>, <<"2">>, 1},
{<<"t/1">>, <<"3">>, 0}
],
[emqtt:publish(Pub, Topic, Payload, Qos) || {Topic, Payload, Qos} <- Messages],
?assertMatch(
[
#{qos := 0, topic := <<"t/1">>, payload := <<"1">>},
#{qos := 1, topic := <<"t/1">>, payload := <<"2">>},
#{qos := 0, topic := <<"t/1">>, payload := <<"3">>}
],
receive_messages(3)
)
after
emqtt:stop(Sub),
emqtt:stop(Pub)
end.
t_publish_as_persistent(Config) ->
Sub = connect(<<?MODULE_STRING "1">>, true, 30),
Pub = connect(<<?MODULE_STRING "2">>, true, 30),
try
{ok, _, [1]} = emqtt:subscribe(Sub, <<"t/#">>, qos1),
Messages = [
{<<"t/1">>, <<"1">>, 0},
{<<"t/1">>, <<"2">>, 1},
{<<"t/1">>, <<"3">>, 2}
],
[emqtt:publish(Pub, Topic, Payload, Qos) || {Topic, Payload, Qos} <- Messages],
?assertMatch(
[
#{qos := 0, topic := <<"t/1">>, payload := <<"1">>},
#{qos := 1, topic := <<"t/1">>, payload := <<"2">>}
%% TODO: QoS 2
%% #{qos := 2, topic := <<"t/1">>, payload := <<"3">>}
],
receive_messages(3)
)
after
emqtt:stop(Sub),
emqtt:stop(Pub)
end.
%%
connect(ClientId, CleanStart, EI) ->
@ -273,7 +323,7 @@ consume(It) ->
end.
receive_messages(Count) ->
receive_messages(Count, []).
lists:reverse(receive_messages(Count, [])).
receive_messages(0, Msgs) ->
Msgs;
@ -291,7 +341,7 @@ publish(Node, Message) ->
app_specs() ->
[
emqx_durable_storage,
{emqx, "persistent_session_store {ds = true}"}
{emqx, "session_persistence {enable = true}"}
].
cluster() ->
@ -307,4 +357,6 @@ get_mqtt_port(Node, Type) ->
clear_db() ->
ok = emqx_ds:drop_db(?PERSISTENT_MESSAGE_DB),
mria:stop(),
ok = mnesia:delete_schema([node()]),
ok.

View File

@ -35,8 +35,8 @@ all() ->
% NOTE
% Tests are disabled while existing session persistence impl is being
% phased out.
{group, persistent_store_disabled},
{group, persistent_store_ds}
{group, persistence_disabled},
{group, persistence_enabled}
].
%% A persistent session can be resumed in two ways:
@ -54,24 +54,24 @@ groups() ->
TCs = emqx_common_test_helpers:all(?MODULE),
TCsNonGeneric = [t_choose_impl],
[
{persistent_store_disabled, [{group, no_kill_connection_process}]},
{persistent_store_ds, [{group, no_kill_connection_process}]},
{persistence_disabled, [{group, no_kill_connection_process}]},
{persistence_enabled, [{group, no_kill_connection_process}]},
{no_kill_connection_process, [], [{group, tcp}, {group, quic}, {group, ws}]},
{tcp, [], TCs},
{quic, [], TCs -- TCsNonGeneric},
{ws, [], TCs -- TCsNonGeneric}
].
init_per_group(persistent_store_disabled, Config) ->
init_per_group(persistence_disabled, Config) ->
[
{emqx_config, "persistent_session_store { enabled = false }"},
{persistent_store, false}
{emqx_config, "session_persistence { enable = false }"},
{persistence, false}
| Config
];
init_per_group(persistent_store_ds, Config) ->
init_per_group(persistence_enabled, Config) ->
[
{emqx_config, "persistent_session_store { ds = true }"},
{persistent_store, ds}
{emqx_config, "session_persistence { enable = true }"},
{persistence, ds}
| Config
];
init_per_group(Group, Config) when Group == tcp ->
@ -181,18 +181,23 @@ client_info(Key, Client) ->
maps:get(Key, maps:from_list(emqtt:info(Client)), undefined).
receive_messages(Count) ->
receive_messages(Count, []).
receive_messages(Count, 15000).
receive_messages(0, Msgs) ->
Msgs;
receive_messages(Count, Msgs) ->
receive_messages(Count, Timeout) ->
Deadline = erlang:monotonic_time(millisecond) + Timeout,
receive_message_loop(Count, Deadline).
receive_message_loop(0, _Deadline) ->
[];
receive_message_loop(Count, Deadline) ->
Timeout = max(0, Deadline - erlang:monotonic_time(millisecond)),
receive
{publish, Msg} ->
receive_messages(Count - 1, [Msg | Msgs]);
[Msg | receive_message_loop(Count - 1, Deadline)];
_Other ->
receive_messages(Count, Msgs)
after 15000 ->
Msgs
receive_message_loop(Count, Deadline)
after Timeout ->
[]
end.
maybe_kill_connection_process(ClientId, Config) ->
@ -229,16 +234,28 @@ wait_for_cm_unregister(ClientId, N) ->
wait_for_cm_unregister(ClientId, N - 1)
end.
publish(Topic, Payloads) ->
publish(Topic, Payloads, false, 2).
messages(Topic, Payloads) ->
messages(Topic, Payloads, ?QOS_2).
publish(Topic, Payloads, WaitForUnregister, QoS) ->
Fun = fun(Client, Payload) ->
{ok, _} = emqtt:publish(Client, Topic, Payload, QoS)
messages(Topic, Payloads, QoS) ->
[#mqtt_msg{topic = Topic, payload = P, qos = QoS} || P <- Payloads].
publish(Topic, Payload) ->
publish(Topic, Payload, ?QOS_2).
publish(Topic, Payload, QoS) ->
publish_many(messages(Topic, [Payload], QoS)).
publish_many(Messages) ->
publish_many(Messages, false).
publish_many(Messages, WaitForUnregister) ->
Fun = fun(Client, Message) ->
{ok, _} = emqtt:publish(Client, Message)
end,
do_publish(Payloads, Fun, WaitForUnregister).
do_publish(Messages, Fun, WaitForUnregister).
do_publish(Payloads = [_ | _], PublishFun, WaitForUnregister) ->
do_publish(Messages = [_ | _], PublishFun, WaitForUnregister) ->
%% Publish from another process to avoid connection confusion.
{Pid, Ref} =
spawn_monitor(
@ -252,7 +269,7 @@ do_publish(Payloads = [_ | _], PublishFun, WaitForUnregister) ->
{port, 1883}
]),
{ok, _} = emqtt:connect(Client),
lists:foreach(fun(Payload) -> PublishFun(Client, Payload) end, Payloads),
lists:foreach(fun(Message) -> PublishFun(Client, Message) end, Messages),
ok = emqtt:disconnect(Client),
%% Snabbkaffe sometimes fails unless all processes are gone.
case WaitForUnregister of
@ -277,9 +294,7 @@ do_publish(Payloads = [_ | _], PublishFun, WaitForUnregister) ->
receive
{'DOWN', Ref, process, Pid, normal} -> ok;
{'DOWN', Ref, process, Pid, What} -> error({failed_publish, What})
end;
do_publish(Payload, PublishFun, WaitForUnregister) ->
do_publish([Payload], PublishFun, WaitForUnregister).
end.
%%--------------------------------------------------------------------
%% Test Cases
@ -297,7 +312,7 @@ t_choose_impl(Config) ->
{ok, _} = emqtt:ConnFun(Client),
[ChanPid] = emqx_cm:lookup_channels(ClientId),
?assertEqual(
case ?config(persistent_store, Config) of
case ?config(persistence, Config) of
false -> emqx_session_mem;
ds -> emqx_persistent_session_ds
end,
@ -494,7 +509,7 @@ t_process_dies_session_expires(Config) ->
maybe_kill_connection_process(ClientId, Config),
ok = publish(Topic, [Payload]),
ok = publish(Topic, Payload),
timer:sleep(1100),
@ -535,7 +550,7 @@ t_publish_while_client_is_gone_qos1(Config) ->
ok = emqtt:disconnect(Client1),
maybe_kill_connection_process(ClientId, Config),
ok = publish(Topic, [Payload1, Payload2], false, 1),
ok = publish_many(messages(Topic, [Payload1, Payload2], ?QOS_1)),
{ok, Client2} = emqtt:start_link([
{proto_ver, v5},
@ -547,7 +562,7 @@ t_publish_while_client_is_gone_qos1(Config) ->
{ok, _} = emqtt:ConnFun(Client2),
Msgs = receive_messages(2),
?assertMatch([_, _], Msgs),
[Msg2, Msg1] = Msgs,
[Msg1, Msg2] = Msgs,
?assertEqual({ok, iolist_to_binary(Payload1)}, maps:find(payload, Msg1)),
?assertEqual({ok, 1}, maps:find(qos, Msg1)),
?assertEqual({ok, iolist_to_binary(Payload2)}, maps:find(payload, Msg2)),
@ -555,6 +570,137 @@ t_publish_while_client_is_gone_qos1(Config) ->
ok = emqtt:disconnect(Client2).
t_publish_many_while_client_is_gone_qos1(Config) ->
%% A persistent session should receive all of the still unacked messages
%% for its subscriptions after the client dies or reconnects, in addition
%% to new messages that were published while the client was gone. The order
%% of the messages should be consistent across reconnects.
ClientId = ?config(client_id, Config),
ConnFun = ?config(conn_fun, Config),
{ok, Client1} = emqtt:start_link([
{proto_ver, v5},
{clientid, ClientId},
{properties, #{'Session-Expiry-Interval' => 30}},
{clean_start, true},
{auto_ack, false}
| Config
]),
{ok, _} = emqtt:ConnFun(Client1),
STopics = [
<<"t/+/foo">>,
<<"msg/feed/#">>,
<<"loc/+/+/+">>
],
[{ok, _, [?QOS_1]} = emqtt:subscribe(Client1, ST, ?QOS_1) || ST <- STopics],
Pubs1 = [
#mqtt_msg{topic = <<"t/42/foo">>, payload = <<"M1">>, qos = 1},
#mqtt_msg{topic = <<"t/42/foo">>, payload = <<"M2">>, qos = 1},
#mqtt_msg{topic = <<"msg/feed/me">>, payload = <<"M3">>, qos = 1},
#mqtt_msg{topic = <<"loc/1/2/42">>, payload = <<"M4">>, qos = 1},
#mqtt_msg{topic = <<"t/42/foo">>, payload = <<"M5">>, qos = 1},
#mqtt_msg{topic = <<"loc/3/4/5">>, payload = <<"M6">>, qos = 1},
#mqtt_msg{topic = <<"msg/feed/me">>, payload = <<"M7">>, qos = 1}
],
ok = publish_many(Pubs1),
NPubs1 = length(Pubs1),
Msgs1 = receive_messages(NPubs1),
NMsgs1 = length(Msgs1),
?assertEqual(NPubs1, NMsgs1),
ct:pal("Msgs1 = ~p", [Msgs1]),
%% TODO
%% This assertion doesn't currently hold because `emqx_ds` doesn't enforce
%% strict ordering reflecting client publishing order. Instead, per-topic
%% ordering is guaranteed per each client. In fact, this violates the MQTT
%% specification, but we deemed it acceptable for now.
%% ?assertMatch([
%% #{payload := <<"M1">>},
%% #{payload := <<"M2">>},
%% #{payload := <<"M3">>},
%% #{payload := <<"M4">>},
%% #{payload := <<"M5">>},
%% #{payload := <<"M6">>},
%% #{payload := <<"M7">>}
%% ], Msgs1),
?assertEqual(
get_topicwise_order(Pubs1),
get_topicwise_order(Msgs1),
Msgs1
),
NAcked = 4,
[ok = emqtt:puback(Client1, PktId) || #{packet_id := PktId} <- lists:sublist(Msgs1, NAcked)],
%% Ensure that PUBACKs are propagated to the channel.
pong = emqtt:ping(Client1),
ok = emqtt:disconnect(Client1),
maybe_kill_connection_process(ClientId, Config),
Pubs2 = [
#mqtt_msg{topic = <<"loc/3/4/5">>, payload = <<"M8">>, qos = 1},
#mqtt_msg{topic = <<"t/100/foo">>, payload = <<"M9">>, qos = 1},
#mqtt_msg{topic = <<"t/100/foo">>, payload = <<"M10">>, qos = 1},
#mqtt_msg{topic = <<"msg/feed/friend">>, payload = <<"M11">>, qos = 1},
#mqtt_msg{topic = <<"msg/feed/me">>, payload = <<"M12">>, qos = 1}
],
ok = publish_many(Pubs2),
NPubs2 = length(Pubs2),
{ok, Client2} = emqtt:start_link([
{proto_ver, v5},
{clientid, ClientId},
{properties, #{'Session-Expiry-Interval' => 30}},
{clean_start, false},
{auto_ack, false}
| Config
]),
{ok, _} = emqtt:ConnFun(Client2),
%% Try to receive _at most_ `NPubs` messages.
%% There shouldn't be that much unacked messages in the replay anyway,
%% but it's an easy number to pick.
NPubs = NPubs1 + NPubs2,
Msgs2 = receive_messages(NPubs, _Timeout = 2000),
NMsgs2 = length(Msgs2),
ct:pal("Msgs2 = ~p", [Msgs2]),
?assert(NMsgs2 < NPubs, Msgs2),
?assert(NMsgs2 > NPubs2, Msgs2),
?assert(NMsgs2 >= NPubs - NAcked, Msgs2),
NSame = NMsgs2 - NPubs2,
?assert(
lists:all(fun(#{dup := Dup}) -> Dup end, lists:sublist(Msgs2, NSame))
),
?assertNot(
lists:all(fun(#{dup := Dup}) -> Dup end, lists:nthtail(NSame, Msgs2))
),
?assertEqual(
[maps:with([packet_id, topic, payload], M) || M <- lists:nthtail(NMsgs1 - NSame, Msgs1)],
[maps:with([packet_id, topic, payload], M) || M <- lists:sublist(Msgs2, NSame)]
),
ok = emqtt:disconnect(Client2).
get_topicwise_order(Msgs) ->
maps:groups_from_list(fun get_msgpub_topic/1, fun get_msgpub_payload/1, Msgs).
get_msgpub_topic(#mqtt_msg{topic = Topic}) ->
Topic;
get_msgpub_topic(#{topic := Topic}) ->
Topic.
get_msgpub_payload(#mqtt_msg{payload = Payload}) ->
Payload;
get_msgpub_payload(#{payload := Payload}) ->
Payload.
t_publish_while_client_is_gone(init, Config) -> skip_ds_tc(Config);
t_publish_while_client_is_gone('end', _Config) -> ok.
t_publish_while_client_is_gone(Config) ->
@ -579,7 +725,7 @@ t_publish_while_client_is_gone(Config) ->
ok = emqtt:disconnect(Client1),
maybe_kill_connection_process(ClientId, Config),
ok = publish(Topic, [Payload1, Payload2]),
ok = publish_many(messages(Topic, [Payload1, Payload2])),
{ok, Client2} = emqtt:start_link([
{proto_ver, v5},
@ -591,7 +737,7 @@ t_publish_while_client_is_gone(Config) ->
{ok, _} = emqtt:ConnFun(Client2),
Msgs = receive_messages(2),
?assertMatch([_, _], Msgs),
[Msg2, Msg1] = Msgs,
[Msg1, Msg2] = Msgs,
?assertEqual({ok, iolist_to_binary(Payload1)}, maps:find(payload, Msg1)),
?assertEqual({ok, 2}, maps:find(qos, Msg1)),
?assertEqual({ok, iolist_to_binary(Payload2)}, maps:find(payload, Msg2)),
@ -599,9 +745,6 @@ t_publish_while_client_is_gone(Config) ->
ok = emqtt:disconnect(Client2).
%% TODO: don't skip after QoS2 support is added to DS.
t_clean_start_drops_subscriptions(init, Config) -> skip_ds_tc(Config);
t_clean_start_drops_subscriptions('end', _Config) -> ok.
t_clean_start_drops_subscriptions(Config) ->
%% 1. A persistent session is started and disconnected.
%% 2. While disconnected, a message is published and persisted.
@ -627,13 +770,13 @@ t_clean_start_drops_subscriptions(Config) ->
| Config
]),
{ok, _} = emqtt:ConnFun(Client1),
{ok, _, [2]} = emqtt:subscribe(Client1, STopic, qos2),
{ok, _, [1]} = emqtt:subscribe(Client1, STopic, qos1),
ok = emqtt:disconnect(Client1),
maybe_kill_connection_process(ClientId, Config),
%% 2.
ok = publish(Topic, Payload1),
ok = publish(Topic, Payload1, ?QOS_1),
%% 3.
{ok, Client2} = emqtt:start_link([
@ -645,9 +788,10 @@ t_clean_start_drops_subscriptions(Config) ->
]),
{ok, _} = emqtt:ConnFun(Client2),
?assertEqual(0, client_info(session_present, Client2)),
{ok, _, [2]} = emqtt:subscribe(Client2, STopic, qos2),
{ok, _, [1]} = emqtt:subscribe(Client2, STopic, qos1),
ok = publish(Topic, Payload2),
timer:sleep(100),
ok = publish(Topic, Payload2, ?QOS_1),
[Msg1] = receive_messages(1),
?assertEqual({ok, iolist_to_binary(Payload2)}, maps:find(payload, Msg1)),
@ -664,7 +808,7 @@ t_clean_start_drops_subscriptions(Config) ->
]),
{ok, _} = emqtt:ConnFun(Client3),
ok = publish(Topic, Payload3),
ok = publish(Topic, Payload3, ?QOS_1),
[Msg2] = receive_messages(1),
?assertEqual({ok, iolist_to_binary(Payload3)}, maps:find(payload, Msg2)),
@ -732,7 +876,7 @@ t_multiple_subscription_matches(Config) ->
ok = emqtt:disconnect(Client2).
skip_ds_tc(Config) ->
case ?config(persistent_store, Config) of
case ?config(persistence, Config) of
ds ->
{skip, "Testcase not yet supported under 'emqx_persistent_session_ds' implementation"};
_ ->

View File

@ -38,7 +38,7 @@ init_per_suite(Config) ->
AppSpecs = [
emqx_durable_storage,
{emqx, #{
config => #{persistent_session_store => #{ds => true}},
config => #{session_persistence => #{enable => true}},
override_env => [{boot_modules, [broker]}]
}}
],

View File

@ -1111,10 +1111,7 @@ list_users(ChainName, AuthenticatorID, QueryString) ->
{error, page_limit_invalid} ->
{400, #{code => <<"INVALID_PARAMETER">>, message => <<"page_limit_invalid">>}};
{error, Reason} ->
{400, #{
code => <<"INVALID_PARAMETER">>,
message => list_to_binary(io_lib:format("Reason ~p", [Reason]))
}};
serialize_error({user_error, Reason});
Result ->
{200, Result}
end.
@ -1176,6 +1173,16 @@ serialize_error({user_error, not_found}) ->
code => <<"NOT_FOUND">>,
message => binfmt("User not found", [])
}};
serialize_error({user_error, {not_found, {chain, ?GLOBAL}}}) ->
{404, #{
code => <<"NOT_FOUND">>,
message => <<"Authenticator not found in the 'global' scope">>
}};
serialize_error({user_error, {not_found, {chain, Name}}}) ->
{400, #{
code => <<"BAD_REQUEST">>,
message => binfmt("No authentication has been created for listener ~p", [Name])
}};
serialize_error({user_error, already_exist}) ->
{409, #{
code => <<"ALREADY_EXISTS">>,

View File

@ -67,7 +67,7 @@
-define(SALT_ROUNDS_MIN, 5).
-define(SALT_ROUNDS_MAX, 10).
namespace() -> "authn-hash".
namespace() -> "authn_hash".
roots() -> [pbkdf2, bcrypt, bcrypt_rw, bcrypt_rw_api, simple].
fields(bcrypt_rw) ->

View File

@ -22,6 +22,7 @@
-behaviour(emqx_authz_schema).
-export([
namespace/0,
type/0,
fields/1,
desc/1,
@ -30,6 +31,8 @@
select_union_member/1
]).
namespace() -> "authz".
type() -> ?AUTHZ_TYPE.
fields(file) ->

View File

@ -435,6 +435,19 @@ test_authenticator_position(PathPrefix) ->
PathPrefix ++ [?CONF_NS]
).
t_authenticator_users_not_found(_) ->
GlobalUser = #{user_id => <<"global_user">>, password => <<"p1">>},
{ok, 404, _} = request(
get,
uri([?CONF_NS, "password_based:built_in_database", "users"])
),
{ok, 404, _} = request(
post,
uri([?CONF_NS, "password_based:built_in_database", "users"]),
GlobalUser
),
ok.
%% listener authn api is not supported since 5.1.0
%% Don't support listener switch to global chain.
ignore_switch_to_global_chain(_) ->

View File

@ -54,7 +54,7 @@ t_check_schema(_Config) ->
?assertThrow(
#{
path := "authentication.1.password_hash_algorithm.name",
matched_type := "authn:builtin_db/authn-hash:simple",
matched_type := "authn:builtin_db/authn_hash:simple",
reason := unable_to_convert_to_enum_symbol
},
Check(ConfigNotOk)

View File

@ -22,18 +22,47 @@
-export([
action_type_to_connector_type/1,
action_type_to_bridge_v1_type/1,
action_type_to_bridge_v1_type/2,
bridge_v1_type_to_action_type/1,
is_action_type/1,
registered_schema_modules/0
registered_schema_modules/0,
connector_action_config_to_bridge_v1_config/3,
has_custom_connector_action_config_to_bridge_v1_config/1,
bridge_v1_config_to_connector_config/2,
has_custom_bridge_v1_config_to_connector_config/1,
bridge_v1_config_to_action_config/3,
has_custom_bridge_v1_config_to_action_config/1,
transform_bridge_v1_config_to_action_config/4
]).
-callback bridge_v1_type_name() -> atom().
-callback bridge_v1_type_name() ->
atom()
| {
fun(({ActionConfig :: map(), ConnectorConfig :: map()}) -> Type :: atom()),
TypeList :: [atom()]
}.
-callback action_type_name() -> atom().
-callback connector_type_name() -> atom().
-callback schema_module() -> atom().
%% Define this if the automatic config downgrade is not enough for the bridge.
-callback connector_action_config_to_bridge_v1_config(
ConnectorConfig :: map(), ActionConfig :: map()
) -> map().
%% Define this if the automatic config upgrade is not enough for the connector.
-callback bridge_v1_config_to_connector_config(BridgeV1Config :: map()) -> map().
%% Define this if the automatic config upgrade is not enough for the bridge.
%% If you want to make use of the automatic config upgrade, you can call
%% emqx_action_info:transform_bridge_v1_config_to_action_config/4 in your
%% implementation and do some adjustments on the result.
-callback bridge_v1_config_to_action_config(BridgeV1Config :: map(), ConnectorName :: binary()) ->
map().
-optional_callbacks([bridge_v1_type_name/0]).
-optional_callbacks([
bridge_v1_type_name/0,
connector_action_config_to_bridge_v1_config/2,
bridge_v1_config_to_connector_config/1,
bridge_v1_config_to_action_config/2
]).
%% ====================================================================
%% Hadcoded list of info modules for actions
@ -44,9 +73,15 @@
-if(?EMQX_RELEASE_EDITION == ee).
hard_coded_action_info_modules_ee() ->
[
emqx_bridge_kafka_action_info,
emqx_bridge_azure_event_hub_action_info,
emqx_bridge_syskeeper_action_info
emqx_bridge_confluent_producer_action_info,
emqx_bridge_gcp_pubsub_producer_action_info,
emqx_bridge_kafka_action_info,
emqx_bridge_matrix_action_info,
emqx_bridge_mongodb_action_info,
emqx_bridge_pgsql_action_info,
emqx_bridge_syskeeper_action_info,
emqx_bridge_timescale_action_info
].
-else.
hard_coded_action_info_modules_ee() ->
@ -83,16 +118,30 @@ bridge_v1_type_to_action_type(Type) ->
ActionType -> ActionType
end.
action_type_to_bridge_v1_type(Bin) when is_binary(Bin) ->
action_type_to_bridge_v1_type(binary_to_existing_atom(Bin));
action_type_to_bridge_v1_type(Type) ->
action_type_to_bridge_v1_type(Bin, Conf) when is_binary(Bin) ->
action_type_to_bridge_v1_type(binary_to_existing_atom(Bin), Conf);
action_type_to_bridge_v1_type(ActionType, ActionConf) ->
ActionInfoMap = info_map(),
ActionTypeToBridgeV1Type = maps:get(action_type_to_bridge_v1_type, ActionInfoMap),
case maps:get(Type, ActionTypeToBridgeV1Type, undefined) of
undefined -> Type;
BridgeV1Type -> BridgeV1Type
case maps:get(ActionType, ActionTypeToBridgeV1Type, undefined) of
undefined ->
ActionType;
BridgeV1TypeFun when is_function(BridgeV1TypeFun) ->
case get_confs(ActionType, ActionConf) of
{ConnectorConfig, ActionConfig} -> BridgeV1TypeFun({ConnectorConfig, ActionConfig});
undefined -> ActionType
end;
BridgeV1Type ->
BridgeV1Type
end.
get_confs(ActionType, #{<<"connector">> := ConnectorName} = ActionConfig) ->
ConnectorType = action_type_to_connector_type(ActionType),
ConnectorConfig = emqx_conf:get_raw([connectors, ConnectorType, ConnectorName]),
{ConnectorConfig, ActionConfig};
get_confs(_, _) ->
undefined.
%% This function should return true for all inputs that are bridge V1 types for
%% bridges that have been refactored to bridge V2s, and for all all bridge V2
%% types. For everything else the function should return false.
@ -111,10 +160,49 @@ registered_schema_modules() ->
Schemas = maps:get(action_type_to_schema_module, InfoMap),
maps:to_list(Schemas).
has_custom_connector_action_config_to_bridge_v1_config(ActionOrBridgeType) ->
Module = get_action_info_module(ActionOrBridgeType),
erlang:function_exported(Module, connector_action_config_to_bridge_v1_config, 2).
connector_action_config_to_bridge_v1_config(ActionOrBridgeType, ConnectorConfig, ActionConfig) ->
Module = get_action_info_module(ActionOrBridgeType),
%% should only be called if defined
Module:connector_action_config_to_bridge_v1_config(ConnectorConfig, ActionConfig).
has_custom_bridge_v1_config_to_connector_config(ActionOrBridgeType) ->
Module = get_action_info_module(ActionOrBridgeType),
erlang:function_exported(Module, bridge_v1_config_to_connector_config, 1).
bridge_v1_config_to_connector_config(ActionOrBridgeType, BridgeV1Config) ->
Module = get_action_info_module(ActionOrBridgeType),
%% should only be called if defined
Module:bridge_v1_config_to_connector_config(BridgeV1Config).
has_custom_bridge_v1_config_to_action_config(ActionOrBridgeType) ->
Module = get_action_info_module(ActionOrBridgeType),
erlang:function_exported(Module, bridge_v1_config_to_action_config, 2).
bridge_v1_config_to_action_config(ActionOrBridgeType, BridgeV1Config, ConnectorName) ->
Module = get_action_info_module(ActionOrBridgeType),
%% should only be called if defined
Module:bridge_v1_config_to_action_config(BridgeV1Config, ConnectorName).
transform_bridge_v1_config_to_action_config(
BridgeV1Conf, ConnectorName, ConnectorConfSchemaMod, ConnectorConfSchemaName
) ->
emqx_connector_schema:transform_bridge_v1_config_to_action_config(
BridgeV1Conf, ConnectorName, ConnectorConfSchemaMod, ConnectorConfSchemaName
).
%% ====================================================================
%% Internal functions for building the info map and accessing it
%% ====================================================================
get_action_info_module(ActionOrBridgeType) ->
InfoMap = info_map(),
ActionInfoModuleMap = maps:get(action_type_to_info_module, InfoMap),
maps:get(ActionOrBridgeType, ActionInfoModuleMap, undefined).
internal_emqx_action_persistent_term_info_key() ->
?FUNCTION_NAME.
@ -149,9 +237,9 @@ action_info_modules() ->
lists:usort(lists:flatten(ActionInfoModules) ++ hard_coded_action_info_modules()).
action_info_modules(App) ->
case application:get_env(App, emqx_action_info_module) of
{ok, Module} ->
[Module];
case application:get_env(App, emqx_action_info_modules) of
{ok, Modules} ->
Modules;
_ ->
[]
end.
@ -162,39 +250,64 @@ initial_info_map() ->
bridge_v1_type_to_action_type => #{},
action_type_to_bridge_v1_type => #{},
action_type_to_connector_type => #{},
action_type_to_schema_module => #{}
action_type_to_schema_module => #{},
action_type_to_info_module => #{}
}.
get_info_map(Module) ->
%% Force the module to get loaded
_ = code:ensure_loaded(Module),
ActionType = Module:action_type_name(),
BridgeV1Type =
{BridgeV1TypeOrFun, BridgeV1Types} =
case erlang:function_exported(Module, bridge_v1_type_name, 0) of
true ->
Module:bridge_v1_type_name();
case Module:bridge_v1_type_name() of
{_BridgeV1TypeFun, _BridgeV1Types} = BridgeV1TypeTuple ->
BridgeV1TypeTuple;
BridgeV1Type0 ->
{BridgeV1Type0, [BridgeV1Type0]}
end;
false ->
Module:action_type_name()
{ActionType, [ActionType]}
end,
#{
action_type_names => #{
ActionType => true,
BridgeV1Type => true
},
bridge_v1_type_to_action_type => #{
BridgeV1Type => ActionType,
action_type_names =>
lists:foldl(
fun(BridgeV1Type, M) ->
M#{BridgeV1Type => true}
end,
#{ActionType => true},
BridgeV1Types
),
bridge_v1_type_to_action_type =>
lists:foldl(
fun(BridgeV1Type, M) ->
%% Alias the bridge V1 type to the action type
ActionType => ActionType
},
M#{BridgeV1Type => ActionType}
end,
#{ActionType => ActionType},
BridgeV1Types
),
action_type_to_bridge_v1_type => #{
ActionType => BridgeV1Type
},
action_type_to_connector_type => #{
ActionType => Module:connector_type_name(),
%% Alias the bridge V1 type to the action type
BridgeV1Type => Module:connector_type_name()
ActionType => BridgeV1TypeOrFun
},
action_type_to_connector_type =>
lists:foldl(
fun(BridgeV1Type, M) ->
M#{BridgeV1Type => Module:connector_type_name()}
end,
#{ActionType => Module:connector_type_name()},
BridgeV1Types
),
action_type_to_schema_module => #{
ActionType => Module:schema_module()
}
},
action_type_to_info_module =>
lists:foldl(
fun(BridgeV1Type, M) ->
M#{BridgeV1Type => Module}
end,
#{ActionType => Module},
BridgeV1Types
)
}.

View File

@ -55,7 +55,6 @@
]).
-export([config_key_path/0]).
-export([validate_bridge_name/1]).
%% exported for `emqx_telemetry'
-export([get_basic_usage_info/0]).
@ -238,9 +237,15 @@ send_to_matched_egress_bridges_loop(Topic, Msg, [Id | Ids]) ->
send_to_matched_egress_bridges_loop(Topic, Msg, Ids).
send_message(BridgeId, Message) ->
{BridgeType, BridgeName} = emqx_bridge_resource:parse_bridge_id(BridgeId),
ResId = emqx_bridge_resource:resource_id(BridgeType, BridgeName),
send_message(BridgeType, BridgeName, ResId, Message, #{}).
{BridgeV1Type, BridgeName} = emqx_bridge_resource:parse_bridge_id(BridgeId),
case emqx_bridge_v2:is_bridge_v2_type(BridgeV1Type) of
true ->
ActionType = emqx_action_info:bridge_v1_type_to_action_type(BridgeV1Type),
emqx_bridge_v2:send_message(ActionType, BridgeName, Message, #{});
false ->
ResId = emqx_bridge_resource:resource_id(BridgeV1Type, BridgeName),
send_message(BridgeV1Type, BridgeName, ResId, Message, #{})
end.
send_message(BridgeType, BridgeName, ResId, Message, QueryOpts0) ->
case emqx:get_config([?ROOT_KEY, BridgeType, BridgeName], not_found) of
@ -269,7 +274,12 @@ config_key_path() ->
pre_config_update([?ROOT_KEY], RawConf, RawConf) ->
{ok, RawConf};
pre_config_update([?ROOT_KEY], NewConf, _RawConf) ->
{ok, convert_certs(NewConf)}.
case multi_validate_bridge_names(NewConf) of
ok ->
{ok, convert_certs(NewConf)};
Error ->
Error
end.
post_config_update([?ROOT_KEY], _Req, NewConf, OldConf, _AppEnv) ->
#{added := Added, removed := Removed, changed := Updated} =
@ -310,7 +320,6 @@ list() ->
BridgeV2Bridges =
emqx_bridge_v2:bridge_v1_list_and_transform(),
BridgeV1Bridges ++ BridgeV2Bridges.
%%BridgeV2Bridges = emqx_bridge_v2:list().
lookup(Id) ->
{Type, Name} = emqx_bridge_resource:parse_bridge_id(Id),
@ -374,8 +383,8 @@ disable_enable(Action, BridgeType0, BridgeName) when
)
end.
create(BridgeType0, BridgeName, RawConf) ->
BridgeType = upgrade_type(BridgeType0),
create(BridgeV1Type, BridgeName, RawConf) ->
BridgeType = upgrade_type(BridgeV1Type),
?SLOG(debug, #{
bridge_action => create,
bridge_type => BridgeType,
@ -384,7 +393,7 @@ create(BridgeType0, BridgeName, RawConf) ->
}),
case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of
true ->
emqx_bridge_v2:bridge_v1_split_config_and_create(BridgeType, BridgeName, RawConf);
emqx_bridge_v2:bridge_v1_split_config_and_create(BridgeV1Type, BridgeName, RawConf);
false ->
emqx_conf:update(
emqx_bridge:config_key_path() ++ [BridgeType, BridgeName],
@ -405,7 +414,7 @@ remove(BridgeType0, BridgeName) ->
}),
case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of
true ->
emqx_bridge_v2:remove(BridgeType, BridgeName);
emqx_bridge_v2:bridge_v1_remove(BridgeType0, BridgeName);
false ->
remove_v1(BridgeType, BridgeName)
end.
@ -658,17 +667,13 @@ get_basic_usage_info() ->
InitialAcc
end.
validate_bridge_name(BridgeName0) ->
BridgeName = to_bin(BridgeName0),
case re:run(BridgeName, ?MAP_KEY_RE, [{capture, none}]) of
match ->
ok;
nomatch ->
{error, #{
kind => validation_error,
reason => bad_bridge_name,
value => BridgeName
}}
validate_bridge_name(BridgeName) ->
try
_ = emqx_resource:validate_name(to_bin(BridgeName)),
ok
catch
throw:Error ->
{error, Error}
end.
to_bin(A) when is_atom(A) -> atom_to_binary(A, utf8);
@ -676,3 +681,31 @@ to_bin(B) when is_binary(B) -> B.
upgrade_type(Type) ->
emqx_bridge_lib:upgrade_type(Type).
multi_validate_bridge_names(Conf) ->
BridgeTypeAndNames =
[
{Type, Name}
|| {Type, NameToConf} <- maps:to_list(Conf),
{Name, _Conf} <- maps:to_list(NameToConf)
],
BadBridges =
lists:filtermap(
fun({Type, Name}) ->
case validate_bridge_name(Name) of
ok -> false;
_Error -> {true, #{type => Type, name => Name}}
end
end,
BridgeTypeAndNames
),
case BadBridges of
[] ->
ok;
[_ | _] ->
{error, #{
kind => validation_error,
reason => bad_bridge_names,
bad_bridges => BadBridges
}}
end.

View File

@ -900,14 +900,14 @@ format_resource(
case emqx_bridge_v2:is_bridge_v2_type(Type) of
true ->
%% The defaults are already filled in
downgrade_raw_conf(Type, RawConf);
RawConf;
false ->
fill_defaults(Type, RawConf)
end,
redact(
maps:merge(
RawConfFull#{
type => downgrade_type(Type),
type => downgrade_type(Type, RawConf),
name => maps:get(<<"name">>, RawConf, BridgeName),
node => Node
},
@ -1162,21 +1162,5 @@ non_compat_bridge_msg() ->
upgrade_type(Type) ->
emqx_bridge_lib:upgrade_type(Type).
downgrade_type(Type) ->
emqx_bridge_lib:downgrade_type(Type).
%% TODO: move it to callback
downgrade_raw_conf(kafka_producer, RawConf) ->
rename(<<"parameters">>, <<"kafka">>, RawConf);
downgrade_raw_conf(azure_event_hub_producer, RawConf) ->
rename(<<"parameters">>, <<"kafka">>, RawConf);
downgrade_raw_conf(_Type, RawConf) ->
RawConf.
rename(OldKey, NewKey, Map) ->
case maps:find(OldKey, Map) of
{ok, Value} ->
maps:remove(OldKey, maps:put(NewKey, Value, Map));
error ->
Map
end.
downgrade_type(Type, Conf) ->
emqx_bridge_lib:downgrade_type(Type, Conf).

View File

@ -63,7 +63,7 @@ pre_config_update(_, {Oper, _Type, _Name}, OldConfig) ->
%% to save the 'enable' to the config files
{ok, OldConfig#{<<"enable">> => operation_to_enable(Oper)}};
pre_config_update(Path, Conf, _OldConfig) when is_map(Conf) ->
case validate_bridge_name(Path) of
case validate_bridge_name_in_config(Path) of
ok ->
case emqx_connector_ssl:convert_certs(filename:join(Path), Conf) of
{error, Reason} ->
@ -104,11 +104,23 @@ post_config_update([bridges, BridgeType, BridgeName], _Req, NewConf, OldConf, _A
operation_to_enable(disable) -> false;
operation_to_enable(enable) -> true.
validate_bridge_name(Path) ->
validate_bridge_name_in_config(Path) ->
[RootKey] = emqx_bridge:config_key_path(),
case Path of
[RootKey, _BridgeType, BridgeName] ->
emqx_bridge:validate_bridge_name(BridgeName);
validate_bridge_name(BridgeName);
_ ->
ok
end.
to_bin(A) when is_atom(A) -> atom_to_binary(A, utf8);
to_bin(B) when is_binary(B) -> B.
validate_bridge_name(BridgeName) ->
try
_ = emqx_resource:validate_name(to_bin(BridgeName)),
ok
catch
throw:Error ->
{error, Error}
end.

View File

@ -18,7 +18,7 @@
-export([
maybe_withdraw_rule_action/3,
upgrade_type/1,
downgrade_type/1
downgrade_type/2
]).
%% @doc A bridge can be used as a rule action.
@ -61,23 +61,31 @@ upgrade_type(Type) when is_list(Type) ->
atom_to_list(emqx_bridge_v2:bridge_v1_type_to_bridge_v2_type(list_to_binary(Type))).
%% @doc Kafka producer bridge type renamed from 'kafka' to 'kafka_bridge' since 5.3.1
downgrade_type(Type) when is_atom(Type) ->
emqx_bridge_v2:bridge_v2_type_to_bridge_v1_type(Type);
downgrade_type(Type) when is_binary(Type) ->
atom_to_binary(emqx_bridge_v2:bridge_v2_type_to_bridge_v1_type(Type));
downgrade_type(Type) when is_list(Type) ->
atom_to_list(emqx_bridge_v2:bridge_v2_type_to_bridge_v1_type(list_to_binary(Type))).
downgrade_type(Type, Conf) when is_atom(Type) ->
emqx_bridge_v2:bridge_v2_type_to_bridge_v1_type(Type, Conf);
downgrade_type(Type, Conf) when is_binary(Type) ->
atom_to_binary(emqx_bridge_v2:bridge_v2_type_to_bridge_v1_type(Type, Conf));
downgrade_type(Type, Conf) when is_list(Type) ->
atom_to_list(emqx_bridge_v2:bridge_v2_type_to_bridge_v1_type(list_to_binary(Type), Conf)).
%% A rule might be referencing an old version bridge type name
%% i.e. 'kafka' instead of 'kafka_producer' so we need to try both
external_ids(Type, Name) ->
case downgrade_type(Type) of
case downgrade_type(Type, get_conf(Type, Name)) of
Type ->
[external_id(Type, Name)];
Type0 ->
[external_id(Type0, Name), external_id(Type, Name)]
end.
get_conf(BridgeType, BridgeName) ->
case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of
true ->
emqx_conf:get_raw([actions, BridgeType, BridgeName]);
false ->
undefined
end.
%% Creates the external id for the bridge_v2 that is used by the rule actions
%% to refer to the bridge_v2
external_id(BridgeType, BridgeName) ->

View File

@ -55,6 +55,7 @@
disable_enable/3,
health_check/2,
send_message/4,
query/4,
start/2,
reset_metrics/2,
create_dry_run/2,
@ -111,12 +112,14 @@
bridge_v1_create_dry_run/2,
bridge_v1_type_to_bridge_v2_type/1,
%% Exception from the naming convention:
bridge_v2_type_to_bridge_v1_type/1,
bridge_v2_type_to_bridge_v1_type/2,
bridge_v1_id_to_connector_resource_id/1,
bridge_v1_enable_disable/3,
bridge_v1_restart/2,
bridge_v1_stop/2,
bridge_v1_start/2
bridge_v1_start/2,
%% For test cases only
bridge_v1_remove/2
]).
%%====================================================================
@ -202,33 +205,36 @@ lookup(Type, Name) ->
%% The connector should always exist
%% ... but, in theory, there might be no channels associated to it when we try
%% to delete the connector, and then this reference will become dangling...
InstanceData =
ConnectorData =
case emqx_resource:get_instance(ConnectorId) of
{ok, _, Data} ->
Data;
{error, not_found} ->
#{}
end,
%% Find the Bridge V2 status from the InstanceData
Channels = maps:get(added_channels, InstanceData, #{}),
%% Find the Bridge V2 status from the ConnectorData
ConnectorStatus = maps:get(status, ConnectorData, undefined),
Channels = maps:get(added_channels, ConnectorData, #{}),
BridgeV2Id = id(Type, Name, BridgeConnector),
ChannelStatus = maps:get(BridgeV2Id, Channels, undefined),
{DisplayBridgeV2Status, ErrorMsg} =
case ChannelStatus of
#{status := connected} ->
{connected, <<"">>};
#{status := Status, error := undefined} ->
case {ChannelStatus, ConnectorStatus} of
{#{status := ?status_connected}, _} ->
{?status_connected, <<"">>};
{#{error := resource_not_operational}, ?status_connecting} ->
{?status_connecting, <<"Not installed">>};
{#{status := Status, error := undefined}, _} ->
{Status, <<"Unknown reason">>};
#{status := Status, error := Error} ->
{#{status := Status, error := Error}, _} ->
{Status, emqx_utils:readable_error_msg(Error)};
undefined ->
{disconnected, <<"Pending installation">>}
{undefined, _} ->
{?status_disconnected, <<"Not installed">>}
end,
{ok, #{
type => bin(Type),
name => bin(Name),
raw_config => RawConf,
resource_data => InstanceData,
resource_data => ConnectorData,
status => DisplayBridgeV2Status,
error => ErrorMsg
}}
@ -407,10 +413,10 @@ uninstall_bridge_v2(
CreationOpts = emqx_resource:fetch_creation_opts(Config),
ok = emqx_resource_buffer_worker_sup:stop_workers(BridgeV2Id, CreationOpts),
ok = emqx_resource:clear_metrics(BridgeV2Id),
case combine_connector_and_bridge_v2_config(BridgeV2Type, BridgeName, Config) of
case validate_referenced_connectors(BridgeV2Type, ConnectorName, BridgeName) of
{error, _} ->
ok;
_CombinedConfig ->
ok ->
%% Deinstall from connector
ConnectorId = emqx_connector_resource:resource_id(
connector_type(BridgeV2Type), ConnectorName
@ -544,25 +550,25 @@ get_query_mode(BridgeV2Type, Config) ->
ResourceType = emqx_connector_resource:connector_to_resource_type(ConnectorType),
emqx_resource:query_mode(ResourceType, Config, CreationOpts).
-spec send_message(bridge_v2_type(), bridge_v2_name(), Message :: term(), QueryOpts :: map()) ->
-spec query(bridge_v2_type(), bridge_v2_name(), Message :: term(), QueryOpts :: map()) ->
term() | {error, term()}.
send_message(BridgeType, BridgeName, Message, QueryOpts0) ->
query(BridgeType, BridgeName, Message, QueryOpts0) ->
case lookup_conf(BridgeType, BridgeName) of
#{enable := true} = Config0 ->
Config = combine_connector_and_bridge_v2_config(BridgeType, BridgeName, Config0),
do_send_msg_with_enabled_config(BridgeType, BridgeName, Message, QueryOpts0, Config);
do_query_with_enabled_config(BridgeType, BridgeName, Message, QueryOpts0, Config);
#{enable := false} ->
{error, bridge_stopped};
_Error ->
{error, bridge_not_found}
end.
do_send_msg_with_enabled_config(
do_query_with_enabled_config(
_BridgeType, _BridgeName, _Message, _QueryOpts0, {error, Reason} = Error
) ->
?SLOG(error, Reason),
Error;
do_send_msg_with_enabled_config(
do_query_with_enabled_config(
BridgeType, BridgeName, Message, QueryOpts0, Config
) ->
QueryMode = get_query_mode(BridgeType, Config),
@ -576,7 +582,17 @@ do_send_msg_with_enabled_config(
}
),
BridgeV2Id = id(BridgeType, BridgeName),
emqx_resource:query(BridgeV2Id, {BridgeV2Id, Message}, QueryOpts).
case Message of
{send_message, Msg} ->
emqx_resource:query(BridgeV2Id, {BridgeV2Id, Msg}, QueryOpts);
Msg ->
emqx_resource:query(BridgeV2Id, Msg, QueryOpts)
end.
-spec send_message(bridge_v2_type(), bridge_v2_name(), Message :: term(), QueryOpts :: map()) ->
term() | {error, term()}.
send_message(BridgeType, BridgeName, Message, QueryOpts0) ->
query(BridgeType, BridgeName, {send_message, Message}, QueryOpts0).
-spec health_check(BridgeType :: term(), BridgeName :: term()) ->
#{status := emqx_resource:resource_status(), error := term()} | {error, Reason :: term()}.
@ -782,7 +798,8 @@ parse_id(Id) ->
end.
get_channels_for_connector(ConnectorId) ->
{ConnectorType, ConnectorName} = emqx_connector_resource:parse_connector_id(ConnectorId),
try emqx_connector_resource:parse_connector_id(ConnectorId) of
{ConnectorType, ConnectorName} ->
RootConf = maps:keys(emqx:get_config([?ROOT_KEY], #{})),
RelevantBridgeV2Types = [
Type
@ -792,7 +809,13 @@ get_channels_for_connector(ConnectorId) ->
lists:flatten([
get_channels_for_connector(ConnectorName, BridgeV2Type)
|| BridgeV2Type <- RelevantBridgeV2Types
]).
])
catch
_:_ ->
%% ConnectorId is not a valid connector id so we assume the connector
%% has no channels (e.g. it is a a connector for authn or authz)
[]
end.
get_channels_for_connector(ConnectorName, BridgeV2Type) ->
BridgeV2s = emqx:get_config([?ROOT_KEY, BridgeV2Type], #{}),
@ -1050,8 +1073,8 @@ bridge_v1_is_valid(BridgeV1Type, BridgeName) ->
bridge_v1_type_to_bridge_v2_type(Type) ->
emqx_action_info:bridge_v1_type_to_action_type(Type).
bridge_v2_type_to_bridge_v1_type(Type) ->
emqx_action_info:action_type_to_bridge_v1_type(Type).
bridge_v2_type_to_bridge_v1_type(ActionType, ActionConf) ->
emqx_action_info:action_type_to_bridge_v1_type(ActionType, ActionConf).
is_bridge_v2_type(Type) ->
emqx_action_info:is_action_type(Type).
@ -1060,33 +1083,38 @@ bridge_v1_list_and_transform() ->
Bridges = list_with_lookup_fun(fun bridge_v1_lookup_and_transform/2),
[B || B <- Bridges, B =/= not_bridge_v1_compatible_error()].
bridge_v1_lookup_and_transform(BridgeV1Type, Name) ->
bridge_v1_lookup_and_transform(ActionType, Name) ->
case lookup(ActionType, Name) of
{ok, #{raw_config := #{<<"connector">> := ConnectorName} = RawConfig} = ActionConfig} ->
BridgeV1Type = ?MODULE:bridge_v2_type_to_bridge_v1_type(ActionType, RawConfig),
case ?MODULE:bridge_v1_is_valid(BridgeV1Type, Name) of
true ->
Type = ?MODULE:bridge_v1_type_to_bridge_v2_type(BridgeV1Type),
case lookup(Type, Name) of
{ok, #{raw_config := #{<<"connector">> := ConnectorName}} = BridgeV2} ->
ConnectorType = connector_type(Type),
ConnectorType = connector_type(ActionType),
case emqx_connector:lookup(ConnectorType, ConnectorName) of
{ok, Connector} ->
bridge_v1_lookup_and_transform_helper(
BridgeV1Type, Name, Type, BridgeV2, ConnectorType, Connector
BridgeV1Type,
Name,
ActionType,
ActionConfig,
ConnectorType,
Connector
);
Error ->
Error
end;
Error ->
Error
end;
false ->
not_bridge_v1_compatible_error()
end;
Error ->
Error
end.
not_bridge_v1_compatible_error() ->
{error, not_bridge_v1_compatible}.
bridge_v1_lookup_and_transform_helper(
BridgeV1Type, BridgeName, BridgeV2Type, BridgeV2, ConnectorType, Connector
BridgeV1Type, BridgeName, ActionType, Action, ConnectorType, Connector
) ->
ConnectorRawConfig1 = maps:get(raw_config, Connector),
ConnectorRawConfig2 = fill_defaults(
@ -1095,19 +1123,33 @@ bridge_v1_lookup_and_transform_helper(
<<"connectors">>,
emqx_connector_schema
),
BridgeV2RawConfig1 = maps:get(raw_config, BridgeV2),
BridgeV2RawConfig2 = fill_defaults(
BridgeV2Type,
BridgeV2RawConfig1,
ActionRawConfig1 = maps:get(raw_config, Action),
ActionRawConfig2 = fill_defaults(
ActionType,
ActionRawConfig1,
<<"actions">>,
emqx_bridge_v2_schema
),
BridgeV1Config1 = maps:remove(<<"connector">>, BridgeV2RawConfig2),
BridgeV1Config2 = maps:merge(BridgeV1Config1, ConnectorRawConfig2),
BridgeV1Tmp = maps:put(raw_config, BridgeV1Config2, BridgeV2),
BridgeV1ConfigFinal =
case
emqx_action_info:has_custom_connector_action_config_to_bridge_v1_config(BridgeV1Type)
of
false ->
BridgeV1Config1 = maps:remove(<<"connector">>, ActionRawConfig2),
%% Move parameters to the top level
ParametersMap = maps:get(<<"parameters">>, BridgeV1Config1, #{}),
BridgeV1Config2 = maps:remove(<<"parameters">>, BridgeV1Config1),
BridgeV1Config3 = emqx_utils_maps:deep_merge(BridgeV1Config2, ParametersMap),
emqx_utils_maps:deep_merge(ConnectorRawConfig2, BridgeV1Config3);
true ->
emqx_action_info:connector_action_config_to_bridge_v1_config(
BridgeV1Type, ConnectorRawConfig2, ActionRawConfig2
)
end,
BridgeV1Tmp = maps:put(raw_config, BridgeV1ConfigFinal, Action),
BridgeV1 = maps:remove(status, BridgeV1Tmp),
BridgeV2Status = maps:get(status, BridgeV2, undefined),
BridgeV2Error = maps:get(error, BridgeV2, undefined),
BridgeV2Status = maps:get(status, Action, undefined),
BridgeV2Error = maps:get(error, Action, undefined),
ResourceData1 = maps:get(resource_data, BridgeV1, #{}),
%% Replace id in resouce data
BridgeV1Id = <<"bridge:", (bin(BridgeV1Type))/binary, ":", (bin(BridgeName))/binary>>,
@ -1222,6 +1264,8 @@ split_and_validate_bridge_v1_config(BridgeV1Type, BridgeName, RawConf, PreviousR
#{bin(BridgeV2Type) => #{bin(BridgeName) => PreviousRawConf}},
PreviousRawConf =/= undefined
),
%% [FIXME] this will loop through all connector types, instead pass the
%% connector type and just do it for that one
Output = emqx_connector_schema:transform_bridges_v1_to_connectors_and_bridges_v2(
FakeGlobalConfig
),
@ -1301,6 +1345,34 @@ bridge_v1_create_dry_run(BridgeType, RawConfig0) ->
} = split_and_validate_bridge_v1_config(BridgeType, TmpName, RawConf, PreviousRawConf),
create_dry_run_helper(BridgeV2Type, ConnectorRawConf, BridgeV2RawConf).
%% Only called by test cases (may create broken references)
bridge_v1_remove(BridgeV1Type, BridgeName) ->
ActionType = ?MODULE:bridge_v1_type_to_bridge_v2_type(BridgeV1Type),
bridge_v1_remove(
ActionType,
BridgeName,
lookup_conf(ActionType, BridgeName)
).
bridge_v1_remove(
ActionType,
Name,
#{connector := ConnectorName}
) ->
case remove(ActionType, Name) of
ok ->
ConnectorType = connector_type(ActionType),
emqx_connector:remove(ConnectorType, ConnectorName);
Error ->
Error
end;
bridge_v1_remove(
_ActionType,
_Name,
Error
) ->
Error.
bridge_v1_check_deps_and_remove(BridgeV1Type, BridgeName, RemoveDeps) ->
BridgeV2Type = ?MODULE:bridge_v1_type_to_bridge_v2_type(BridgeV1Type),
bridge_v1_check_deps_and_remove(

View File

@ -791,6 +791,8 @@ do_create_or_update_bridge(BridgeType, BridgeName, Conf, HttpStatusCode) ->
PreOrPostConfigUpdate =:= pre_config_update;
PreOrPostConfigUpdate =:= post_config_update
->
?BAD_REQUEST(map_to_json(redact(Reason)));
{error, Reason} when is_map(Reason) ->
?BAD_REQUEST(map_to_json(redact(Reason)))
end.

View File

@ -82,9 +82,7 @@ schema_modules() ->
].
examples(Method) ->
ActionExamples = emqx_bridge_v2_schema:examples(Method),
RegisteredExamples = registered_examples(Method),
maps:merge(ActionExamples, RegisteredExamples).
registered_examples(Method).
registered_examples(Method) ->
MergeFun =

View File

@ -39,6 +39,13 @@
]).
-export([types/0, types_sc/0]).
-export([resource_opts_fields/0, resource_opts_fields/1]).
-export([
make_producer_action_schema/1,
make_consumer_action_schema/1,
top_level_common_action_keys/0
]).
-export_type([action_type/0]).
@ -116,7 +123,9 @@ roots() ->
end.
fields(actions) ->
registered_schema_fields().
registered_schema_fields();
fields(resource_opts) ->
emqx_resource_schema:create_opts(_Overrides = []).
registered_schema_fields() ->
[
@ -126,6 +135,8 @@ registered_schema_fields() ->
desc(actions) ->
?DESC("desc_bridges_v2");
desc(resource_opts) ->
?DESC(emqx_resource_schema, "resource_opts");
desc(_) ->
undefined.
@ -137,6 +148,31 @@ types() ->
types_sc() ->
hoconsc:enum(types()).
resource_opts_fields() ->
resource_opts_fields(_Overrides = []).
resource_opts_fields(Overrides) ->
ActionROFields = [
batch_size,
batch_time,
buffer_mode,
buffer_seg_bytes,
health_check_interval,
inflight_window,
max_buffer_bytes,
metrics_flush_interval,
query_mode,
request_ttl,
resume_interval,
start_after_created,
start_timeout,
worker_pool_size
],
lists:filter(
fun({Key, _Sc}) -> lists:member(Key, ActionROFields) end,
emqx_resource_schema:create_opts(Overrides)
).
examples(Method) ->
MergeFun =
fun(Example, Examples) ->
@ -150,6 +186,42 @@ examples(Method) ->
SchemaModules = [Mod || {_, Mod} <- emqx_action_info:registered_schema_modules()],
lists:foldl(Fun, #{}, SchemaModules).
top_level_common_action_keys() ->
[
<<"connector">>,
<<"description">>,
<<"enable">>,
<<"local_topic">>,
<<"parameters">>,
<<"resource_opts">>
].
%%======================================================================================
%% Helper functions for making HOCON Schema
%%======================================================================================
make_producer_action_schema(ActionParametersRef) ->
[
{local_topic, mk(binary(), #{required => false, desc => ?DESC(mqtt_topic)})}
| make_consumer_action_schema(ActionParametersRef)
].
make_consumer_action_schema(ActionParametersRef) ->
[
{enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})},
{connector,
mk(binary(), #{
desc => ?DESC(emqx_connector_schema, "connector_field"), required => true
})},
{description, emqx_schema:description_schema()},
{parameters, ActionParametersRef},
{resource_opts,
mk(ref(?MODULE, resource_opts), #{
default => #{},
desc => ?DESC(emqx_resource_schema, "resource_opts")
})}
].
-ifdef(TEST).
-include_lib("hocon/include/hocon_types.hrl").
schema_homogeneous_test() ->
@ -169,7 +241,7 @@ schema_homogeneous_test() ->
is_bad_schema(#{type := ?MAP(_, ?R_REF(Module, TypeName))}) ->
Fields = Module:fields(TypeName),
ExpectedFieldNames = common_field_names(),
ExpectedFieldNames = lists:map(fun binary_to_atom/1, top_level_common_action_keys()),
MissingFileds = lists:filter(
fun(Name) -> lists:keyfind(Name, 1, Fields) =:= false end, ExpectedFieldNames
),
@ -184,9 +256,4 @@ is_bad_schema(#{type := ?MAP(_, ?R_REF(Module, TypeName))}) ->
}}
end.
common_field_names() ->
[
enable, description, local_topic, connector, resource_opts, parameters
].
-endif.

View File

@ -199,13 +199,41 @@ t_create_with_bad_name(_Config) ->
?assertMatch(
{error,
{pre_config_update, emqx_bridge_app, #{
reason := bad_bridge_name,
reason := <<"Invalid name format.", _/binary>>,
kind := validation_error
}}},
emqx:update_config(Path, Conf)
),
ok.
t_create_with_bad_name_root(_Config) ->
BadBridgeName = <<"test_哈哈">>,
BridgeConf = #{
<<"bridge_mode">> => false,
<<"clean_start">> => true,
<<"keepalive">> => <<"60s">>,
<<"proto_ver">> => <<"v4">>,
<<"server">> => <<"127.0.0.1:1883">>,
<<"ssl">> =>
#{
%% needed to trigger pre_config_update
<<"certfile">> => cert_file("certfile"),
<<"enable">> => true
}
},
Conf = #{<<"mqtt">> => #{BadBridgeName => BridgeConf}},
Path = [bridges],
?assertMatch(
{error,
{pre_config_update, _ConfigHandlerMod, #{
kind := validation_error,
reason := bad_bridge_names,
bad_bridges := [#{type := <<"mqtt">>, name := BadBridgeName}]
}}},
emqx:update_config(Path, Conf)
),
ok.
data_file(Name) ->
Dir = code:lib_dir(emqx_bridge, test),
{ok, Bin} = file:read_file(filename:join([Dir, "data", Name])),

View File

@ -1362,7 +1362,13 @@ t_create_with_bad_name(Config) ->
Config
),
Msg = emqx_utils_json:decode(Msg0, [return_maps]),
?assertMatch(#{<<"reason">> := <<"bad_bridge_name">>}, Msg),
?assertMatch(
#{
<<"kind">> := <<"validation_error">>,
<<"reason">> := <<"Invalid name format.", _/binary>>
},
Msg
),
ok.
validate_resource_request_ttl(single, Timeout, Name) ->

View File

@ -120,6 +120,22 @@ create_bridge(Config, Overrides) ->
ct:pal("creating bridge with config: ~p", [BridgeConfig]),
emqx_bridge:create(BridgeType, BridgeName, BridgeConfig).
list_bridges_api() ->
Params = [],
Path = emqx_mgmt_api_test_util:api_path(["bridges"]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Opts = #{return_all => true},
ct:pal("listing bridges (via http)"),
Res =
case emqx_mgmt_api_test_util:request_api(get, Path, "", AuthHeader, Params, Opts) of
{ok, {Status, Headers, Body0}} ->
{ok, {Status, Headers, emqx_utils_json:decode(Body0, [return_maps])}};
Error ->
Error
end,
ct:pal("list bridge result: ~p", [Res]),
Res.
create_bridge_api(Config) ->
create_bridge_api(Config, _Overrides = #{}).

View File

@ -60,15 +60,7 @@ init_per_testcase(_TestCase, Config) ->
ets:new(fun_table_name(), [named_table, public]),
%% Create a fake connector
{ok, _} = emqx_connector:create(con_type(), con_name(), con_config()),
[
{mocked_mods, [
emqx_connector_schema,
emqx_connector_resource,
emqx_bridge_v2
]}
| Config
].
Config.
end_per_testcase(_TestCase, _Config) ->
ets:delete(fun_table_name()),
@ -150,7 +142,8 @@ con_schema() ->
fields("connector") ->
[
{enable, hoconsc:mk(any(), #{})},
{resource_opts, hoconsc:mk(map(), #{})}
{resource_opts, hoconsc:mk(map(), #{})},
{ssl, hoconsc:ref(ssl)}
];
fields("api_post") ->
[
@ -159,7 +152,9 @@ fields("api_post") ->
{type, hoconsc:mk(bridge_type(), #{})},
{send_to, hoconsc:mk(atom(), #{})}
| fields("connector")
].
];
fields(ssl) ->
emqx_schema:client_ssl_opts_schema(#{required => false}).
con_config() ->
#{
@ -806,3 +801,27 @@ t_scenario_2(Config) ->
?assert(is_rule_enabled(RuleId2)),
ok.
t_create_with_bad_name(_Config) ->
BadBridgeName = <<"test_哈哈">>,
%% Note: must contain SSL options to trigger bug.
Cacertfile = emqx_common_test_helpers:app_path(
emqx,
filename:join(["etc", "certs", "cacert.pem"])
),
Opts = #{
name => BadBridgeName,
overrides => #{
<<"ssl">> =>
#{<<"cacertfile">> => Cacertfile}
}
},
{error,
{{_, 400, _}, _, #{
<<"code">> := <<"BAD_REQUEST">>,
<<"message">> := #{
<<"kind">> := <<"validation_error">>,
<<"reason">> := <<"Invalid name format.", _/binary>>
}
}}} = create_bridge_http_api_v1(Opts),
ok.

View File

@ -20,6 +20,7 @@
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("emqx_resource/include/emqx_resource.hrl").
-import(emqx_common_test_helpers, [on_exit/1]).
@ -43,7 +44,7 @@ con_schema() ->
{
con_type(),
hoconsc:mk(
hoconsc:map(name, typerefl:map()),
hoconsc:map(name, hoconsc:ref(?MODULE, connector_config)),
#{
desc => <<"Test Connector Config">>,
required => false
@ -52,6 +53,15 @@ con_schema() ->
}
].
fields(connector_config) ->
[
{enable, hoconsc:mk(typerefl:boolean(), #{})},
{resource_opts, hoconsc:mk(typerefl:map(), #{})},
{on_start_fun, hoconsc:mk(typerefl:binary(), #{})},
{on_get_status_fun, hoconsc:mk(typerefl:binary(), #{})},
{on_add_channel_fun, hoconsc:mk(typerefl:binary(), #{})}
].
con_config() ->
#{
<<"enable">> => true,
@ -112,6 +122,7 @@ setup_mocks() ->
catch meck:new(emqx_connector_schema, MeckOpts),
meck:expect(emqx_connector_schema, fields, 1, con_schema()),
meck:expect(emqx_connector_schema, connector_type_to_bridge_types, 1, [con_type()]),
catch meck:new(emqx_connector_resource, MeckOpts),
meck:expect(emqx_connector_resource, connector_to_resource_type, 1, con_mod()),
@ -159,15 +170,7 @@ init_per_testcase(_TestCase, Config) ->
ets:new(fun_table_name(), [named_table, public]),
%% Create a fake connector
{ok, _} = emqx_connector:create(con_type(), con_name(), con_config()),
[
{mocked_mods, [
emqx_connector_schema,
emqx_connector_resource,
emqx_bridge_v2
]}
| Config
].
Config.
end_per_testcase(_TestCase, _Config) ->
ets:delete(fun_table_name()),
@ -846,6 +849,51 @@ t_start_operation_when_on_add_channel_gives_error(_Config) ->
),
ok.
t_lookup_status_when_connecting(_Config) ->
ResponseETS = ets:new(response_ets, [public]),
ets:insert(ResponseETS, {on_get_status_value, ?status_connecting}),
OnGetStatusFun = wrap_fun(fun() ->
ets:lookup_element(ResponseETS, on_get_status_value, 2)
end),
ConnectorConfig = emqx_utils_maps:deep_merge(con_config(), #{
<<"on_get_status_fun">> => OnGetStatusFun,
<<"resource_opts">> => #{<<"start_timeout">> => 100}
}),
ConnectorName = ?FUNCTION_NAME,
ct:pal("connector config:\n ~p", [ConnectorConfig]),
{ok, _} = emqx_connector:create(con_type(), ConnectorName, ConnectorConfig),
ActionName = my_test_action,
ChanStatusFun = wrap_fun(fun() -> ?status_disconnected end),
ActionConfig = (bridge_config())#{
<<"on_get_channel_status_fun">> => ChanStatusFun,
<<"connector">> => atom_to_binary(ConnectorName)
},
ct:pal("action config:\n ~p", [ActionConfig]),
{ok, _} = emqx_bridge_v2:create(bridge_type(), ActionName, ActionConfig),
%% Top-level status is connecting if the connector status is connecting, but the
%% channel is not yet installed. `resource_data.added_channels.$channel_id.status'
%% contains true internal status.
{ok, Res} = emqx_bridge_v2:lookup(bridge_type(), ActionName),
?assertMatch(
#{
%% This is the action's public status
status := ?status_connecting,
resource_data :=
#{
%% This is the connector's status
status := ?status_connecting
}
},
Res
),
#{resource_data := #{added_channels := Channels}} = Res,
[{_Id, ChannelData}] = maps:to_list(Channels),
?assertMatch(#{status := ?status_disconnected}, ChannelData),
ok.
%% Helper Functions
wait_until(Fun) ->

View File

@ -587,7 +587,7 @@ t_broken_bridge_config(Config) ->
<<"type">> := ?BRIDGE_TYPE,
<<"connector">> := <<"does_not_exist">>,
<<"status">> := <<"disconnected">>,
<<"error">> := <<"Pending installation">>
<<"error">> := <<"Not installed">>
}
]},
request_json(get, uri([?ROOT]), Config)
@ -640,7 +640,7 @@ t_fix_broken_bridge_config(Config) ->
<<"type">> := ?BRIDGE_TYPE,
<<"connector">> := <<"does_not_exist">>,
<<"status">> := <<"disconnected">>,
<<"error">> := <<"Pending installation">>
<<"error">> := <<"Not installed">>
}
]},
request_json(get, uri([?ROOT]), Config)
@ -1021,6 +1021,26 @@ t_action_types(Config) ->
?assert(lists:all(fun is_binary/1, Types), #{types => Types}),
ok.
t_bad_name(Config) ->
Name = <<"_bad_name">>,
Res = request_json(
post,
uri([?ROOT]),
?KAFKA_BRIDGE(Name),
Config
),
?assertMatch({ok, 400, #{<<"message">> := _}}, Res),
{ok, 400, #{<<"message">> := Msg0}} = Res,
Msg = emqx_utils_json:decode(Msg0, [return_maps]),
?assertMatch(
#{
<<"kind">> := <<"validation_error">>,
<<"reason">> := <<"Invalid name format.", _/binary>>
},
Msg
),
ok.
%%% helpers
listen_on_random_port() ->
SockOpts = [binary, {active, false}, {packet, raw}, {reuseaddr, true}, {backlog, 1000}],

View File

@ -43,8 +43,8 @@ on_start(
) ->
Fun = emqx_bridge_v2_SUITE:unwrap_fun(FunRef),
Fun(Conf);
on_start(_InstId, _Config) ->
{ok, #{}}.
on_start(_InstId, Config) ->
{ok, Config}.
on_add_channel(
_InstId,

View File

@ -139,6 +139,7 @@ create_bridge(Config, Overrides) ->
ConnectorName = ?config(connector_name, Config),
ConnectorType = ?config(connector_type, Config),
ConnectorConfig = ?config(connector_config, Config),
ct:pal("creating connector with config: ~p", [ConnectorConfig]),
{ok, _} =
emqx_connector:create(ConnectorType, ConnectorName, ConnectorConfig),
@ -312,6 +313,25 @@ create_rule_and_action_http(BridgeType, RuleTopic, Config, Opts) ->
Error
end.
api_spec_schemas(Root) ->
Method = get,
Path = emqx_mgmt_api_test_util:api_path(["schemas", Root]),
Params = [],
AuthHeader = [],
Opts = #{return_all => true},
case emqx_mgmt_api_test_util:request_api(Method, Path, "", AuthHeader, Params, Opts) of
{ok, {{_, 200, _}, _, Res0}} ->
#{<<"components">> := #{<<"schemas">> := Schemas}} =
emqx_utils_json:decode(Res0, [return_maps]),
Schemas
end.
bridges_api_spec_schemas() ->
api_spec_schemas("bridges").
actions_api_spec_schemas() ->
api_spec_schemas("actions").
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
@ -532,12 +552,17 @@ t_on_get_status(Config, Opts) ->
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
case ProxyHost of
undefined ->
ok;
_ ->
emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
ct:sleep(500),
?retry(
_Interval0 = 200,
_Attempts0 = 10,
?assertEqual({ok, FailureStatus}, emqx_resource_manager:health_check(ResourceId))
_Interval0 = 100,
_Attempts0 = 20,
?assertEqual(
{ok, FailureStatus}, emqx_resource_manager:health_check(ResourceId)
)
)
end),
%% Check that it recovers itself.
@ -545,5 +570,6 @@ t_on_get_status(Config, Opts) ->
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
)
end,
ok.

View File

@ -0,0 +1,41 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_bridge_v2_tests).
-include_lib("eunit/include/eunit.hrl").
resource_opts_union_connector_actions_test() ->
%% The purpose of this test is to ensure we have split `resource_opts' fields
%% consciouly between connector and actions, in particular when/if we introduce new
%% fields there.
AllROFields = non_deprecated_fields(emqx_resource_schema:create_opts([])),
ActionROFields = non_deprecated_fields(emqx_bridge_v2_schema:resource_opts_fields()),
ConnectorROFields = non_deprecated_fields(emqx_connector_schema:resource_opts_fields()),
UnionROFields = lists:usort(ConnectorROFields ++ ActionROFields),
?assertEqual(
lists:usort(AllROFields),
UnionROFields,
#{
missing_fields => AllROFields -- UnionROFields,
unexpected_fields => UnionROFields -- AllROFields,
action_fields => ActionROFields,
connector_fields => ConnectorROFields
}
),
ok.
non_deprecated_fields(Fields) ->
[K || {K, Schema} <- Fields, not hocon_schema:is_deprecated(Schema)].

View File

@ -9,7 +9,7 @@
telemetry,
wolff
]},
{env, []},
{env, [{emqx_action_info_modules, [emqx_bridge_azure_event_hub_action_info]}]},
{modules, []},
{links, []}
]}.

View File

@ -17,7 +17,7 @@
desc/1
]).
%% emqx_bridge_enterprise "unofficial" API
%% `emqx_bridge_v2_schema' "unofficial" API
-export([
bridge_v2_examples/1,
conn_bridge_examples/1,
@ -126,7 +126,7 @@ fields(action) ->
fields(actions) ->
Fields =
override(
emqx_bridge_kafka:producer_opts(),
emqx_bridge_kafka:producer_opts(action),
bridge_v2_overrides()
) ++
[

View File

@ -10,7 +10,9 @@
bridge_v1_type_name/0,
action_type_name/0,
connector_type_name/0,
schema_module/0
schema_module/0,
connector_action_config_to_bridge_v1_config/2,
bridge_v1_config_to_action_config/2
]).
bridge_v1_type_name() -> azure_event_hub_producer.
@ -20,3 +22,11 @@ action_type_name() -> azure_event_hub_producer.
connector_type_name() -> azure_event_hub_producer.
schema_module() -> emqx_bridge_azure_event_hub.
connector_action_config_to_bridge_v1_config(ConnectorConfig, ActionConfig) ->
emqx_bridge_kafka_action_info:connector_action_config_to_bridge_v1_config(
ConnectorConfig, ActionConfig
).
bridge_v1_config_to_action_config(BridgeV1Conf, ConnectorName) ->
emqx_bridge_kafka_action_info:bridge_v1_config_to_action_config(BridgeV1Conf, ConnectorName).

View File

@ -272,6 +272,22 @@ make_message() ->
timestamp => Time
}.
bridge_api_spec_props_for_get() ->
#{
<<"bridge_azure_event_hub.get_producer">> :=
#{<<"properties">> := Props}
} =
emqx_bridge_v2_testlib:bridges_api_spec_schemas(),
Props.
action_api_spec_props_for_get() ->
#{
<<"bridge_azure_event_hub.get_bridge_v2">> :=
#{<<"properties">> := Props}
} =
emqx_bridge_v2_testlib:actions_api_spec_schemas(),
Props.
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
@ -341,3 +357,23 @@ t_same_name_azure_kafka_bridges(Config) ->
end
),
ok.
t_parameters_key_api_spec(_Config) ->
BridgeProps = bridge_api_spec_props_for_get(),
?assert(is_map_key(<<"kafka">>, BridgeProps), #{bridge_props => BridgeProps}),
?assertNot(is_map_key(<<"parameters">>, BridgeProps), #{bridge_props => BridgeProps}),
ActionProps = action_api_spec_props_for_get(),
?assertNot(is_map_key(<<"kafka">>, ActionProps), #{action_props => ActionProps}),
?assert(is_map_key(<<"parameters">>, ActionProps), #{action_props => ActionProps}),
ok.
t_http_api_get(Config) ->
?assertMatch({ok, _}, emqx_bridge_v2_testlib:create_bridge(Config)),
%% v1 api; no mangling of configs; has `kafka' top level config key
?assertMatch(
{ok, {{_, 200, _}, _, [#{<<"kafka">> := _}]}},
emqx_bridge_testlib:list_bridges_api()
),
ok.

View File

@ -0,0 +1,94 @@
Business Source License 1.1
Licensor: Hangzhou EMQ Technologies Co., Ltd.
Licensed Work: EMQX Enterprise Edition
The Licensed Work is (c) 2023
Hangzhou EMQ Technologies Co., Ltd.
Additional Use Grant: Students and educators are granted right to copy,
modify, and create derivative work for research
or education.
Change Date: 2027-02-01
Change License: Apache License, Version 2.0
For information about alternative licensing arrangements for the Software,
please contact Licensor: https://www.emqx.com/en/contact
Notice
The Business Source License (this document, or the “License”) is not an Open
Source license. However, the Licensed Work will eventually be made available
under an Open Source License, as stated in this License.
License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved.
“Business Source License” is a trademark of MariaDB Corporation Ab.
-----------------------------------------------------------------------------
Business Source License 1.1
Terms
The Licensor hereby grants you the right to copy, modify, create derivative
works, redistribute, and make non-production use of the Licensed Work. The
Licensor may make an Additional Use Grant, above, permitting limited
production use.
Effective on the Change Date, or the fourth anniversary of the first publicly
available distribution of a specific version of the Licensed Work under this
License, whichever comes first, the Licensor hereby grants you rights under
the terms of the Change License, and the rights granted in the paragraph
above terminate.
If your use of the Licensed Work does not comply with the requirements
currently in effect as described in this License, you must purchase a
commercial license from the Licensor, its affiliated entities, or authorized
resellers, or you must refrain from using the Licensed Work.
All copies of the original and modified Licensed Work, and derivative works
of the Licensed Work, are subject to this License. This License applies
separately for each version of the Licensed Work and the Change Date may vary
for each version of the Licensed Work released by Licensor.
You must conspicuously display this License on each original or modified copy
of the Licensed Work. If you receive the Licensed Work in original or
modified form from a third party, the terms and conditions set forth in this
License apply to your use of that work.
Any use of the Licensed Work in violation of this License will automatically
terminate your rights under this License for the current and all other
versions of the Licensed Work.
This License does not grant you any right in any trademark or logo of
Licensor or its affiliates (provided that you may use a trademark or logo of
Licensor as expressly required by this License).
TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON
AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS,
EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND
TITLE.
MariaDB hereby grants you permission to use this Licenses text to license
your works, and to refer to it using the trademark “Business Source License”,
as long as you comply with the Covenants of Licensor below.
Covenants of Licensor
In consideration of the right to use this Licenses text and the “Business
Source License” name and trademark, Licensor covenants to MariaDB, and to all
other recipients of the licensed work to be provided by Licensor:
1. To specify as the Change License the GPL Version 2.0 or any later version,
or a license that is compatible with GPL Version 2.0 or a later version,
where “compatible” means that software provided under the Change License can
be included in a program with software provided under GPL Version 2.0 or a
later version. Licensor may specify additional Change Licenses without
limitation.
2. To either: (a) specify an additional grant of rights to use that does not
impose any additional restriction on the right granted in this License, as
the Additional Use Grant; or (b) insert the text “None”.
3. To specify a Change Date.
4. Not to modify this License in any other way.

View File

@ -0,0 +1,27 @@
# Confluent Data Integration Bridge
This application houses the Confluent Producer data integration bridge for EMQX Enterprise
Edition. It provides the means to connect to Confluent Producer and publish messages to
it via the Kafka protocol.
Currently, our Kafka Producer library (`wolff`) has its own `replayq` buffering
implementation, so this bridge does not require buffer workers from `emqx_resource`. It
implements the connection management and interaction without need for a separate connector
app, since it's not used by authentication and authorization applications.
# Documentation links
For more information about Kafka interface for Confluent, please see [the official
docs](https://docs.confluent.io/cloud/current/overview.html).
# Configurations
Please see [Ingest Data into Confluent](https://docs.emqx.com/en/enterprise/v5.3/data-integration/data-bridge-confluent.html) for more detailed info.
# Contributing
Please see our [contributing.md](../../CONTRIBUTING.md).
# License
EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt).

View File

@ -0,0 +1,2 @@
toxiproxy
kafka

View File

@ -0,0 +1,15 @@
%% -*- mode: erlang; -*-
{erl_opts, [debug_info]}.
{deps, [ {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.8.0"}}}
, {kafka_protocol, {git, "https://github.com/kafka4beam/kafka_protocol.git", {tag, "4.1.3"}}}
, {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.0"}}}
, {brod, {git, "https://github.com/kafka4beam/brod.git", {tag, "3.16.8"}}}
, {snappyer, "1.2.9"}
, {emqx_connector, {path, "../../apps/emqx_connector"}}
, {emqx_resource, {path, "../../apps/emqx_resource"}}
, {emqx_bridge, {path, "../../apps/emqx_bridge"}}
]}.
{shell, [
{apps, [emqx_bridge_confluent]}
]}.

View File

@ -0,0 +1,15 @@
{application, emqx_bridge_confluent, [
{description, "EMQX Enterprise Confluent Connector and Action"},
{vsn, "0.1.0"},
{registered, []},
{applications, [
kernel,
stdlib,
emqx_resource,
telemetry,
wolff
]},
{env, [{emqx_action_info_modules, [emqx_bridge_confluent_producer_action_info]}]},
{modules, []},
{links, []}
]}.

View File

@ -0,0 +1,406 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_confluent_producer).
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-behaviour(hocon_schema).
-behaviour(emqx_connector_resource).
%% `hocon_schema' API
-export([
namespace/0,
roots/0,
fields/1,
desc/1
]).
%% emqx_bridge_enterprise "unofficial" API
-export([
bridge_v2_examples/1,
connector_examples/1
]).
%% emqx_connector_resource behaviour callbacks
-export([connector_config/1]).
-export([host_opts/0]).
-import(hoconsc, [mk/2, enum/1, ref/2]).
-define(CONFLUENT_CONNECTOR_TYPE, confluent_producer).
-define(CONFLUENT_CONNECTOR_TYPE_BIN, <<"confluent_producer">>).
%%-------------------------------------------------------------------------------------------------
%% `hocon_schema' API
%%-------------------------------------------------------------------------------------------------
namespace() -> "confluent".
roots() -> ["config_producer"].
fields("put_connector") ->
Fields = override(
emqx_bridge_kafka:fields("put_connector"),
connector_overrides()
),
override_documentations(Fields);
fields("get_connector") ->
emqx_bridge_schema:status_fields() ++
fields("post_connector");
fields("post_connector") ->
Fields = override(
emqx_bridge_kafka:fields("post_connector"),
connector_overrides()
),
override_documentations(Fields);
fields("put_bridge_v2") ->
Fields = override(
emqx_bridge_kafka:fields("put_bridge_v2"),
bridge_v2_overrides()
),
override_documentations(Fields);
fields("get_bridge_v2") ->
emqx_bridge_schema:status_fields() ++
fields("post_bridge_v2");
fields("post_bridge_v2") ->
Fields = override(
emqx_bridge_kafka:fields("post_bridge_v2"),
bridge_v2_overrides()
),
override_documentations(Fields);
fields("config_bridge_v2") ->
fields(actions);
fields("config_connector") ->
Fields = override(
emqx_bridge_kafka:fields("config_connector"),
connector_overrides()
),
override_documentations(Fields);
fields(auth_username_password) ->
Fields = override(
emqx_bridge_kafka:fields(auth_username_password),
auth_overrides()
),
override_documentations(Fields);
fields(ssl_client_opts) ->
Fields = override(
emqx_bridge_kafka:ssl_client_opts_fields(),
ssl_overrides()
),
override_documentations(Fields);
fields(producer_kafka_opts) ->
Fields = override(
emqx_bridge_kafka:fields(producer_kafka_opts),
kafka_producer_overrides()
),
override_documentations(Fields);
fields(kafka_message) ->
Fields0 = emqx_bridge_kafka:fields(kafka_message),
Fields = proplists:delete(timestamp, Fields0),
override_documentations(Fields);
fields(action) ->
{confluent_producer,
mk(
hoconsc:map(name, ref(emqx_bridge_confluent_producer, actions)),
#{
desc => <<"Confluent Actions Config">>,
required => false
}
)};
fields(actions) ->
Fields =
override(
emqx_bridge_kafka:producer_opts(action),
bridge_v2_overrides()
) ++
[
{enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})},
{connector,
mk(binary(), #{
desc => ?DESC(emqx_connector_schema, "connector_field"), required => true
})},
{description, emqx_schema:description_schema()}
],
override_documentations(Fields);
fields(Method) ->
Fields = emqx_bridge_kafka:fields(Method),
override_documentations(Fields).
desc("config") ->
?DESC("desc_config");
desc("config_connector") ->
?DESC("desc_config");
desc("get_" ++ Type) when Type == "connector"; Type == "bridge_v2" ->
["Configuration for Confluent using `GET` method."];
desc("put_" ++ Type) when Type == "connector"; Type == "bridge_v2" ->
["Configuration for Confluent using `PUT` method."];
desc("post_" ++ Type) when Type == "connector"; Type == "bridge_v2" ->
["Configuration for Confluent using `POST` method."];
desc(Name) ->
lists:member(Name, struct_names()) orelse throw({missing_desc, Name}),
?DESC(Name).
struct_names() ->
[
auth_username_password,
kafka_message,
producer_kafka_opts,
actions,
ssl_client_opts
].
bridge_v2_examples(Method) ->
[
#{
?CONFLUENT_CONNECTOR_TYPE_BIN => #{
summary => <<"Confluent Action">>,
value => values({Method, bridge_v2})
}
}
].
connector_examples(Method) ->
[
#{
?CONFLUENT_CONNECTOR_TYPE_BIN => #{
summary => <<"Confluent Connector">>,
value => values({Method, connector})
}
}
].
values({get, ConfluentType}) ->
maps:merge(
#{
status => <<"connected">>,
node_status => [
#{
node => <<"emqx@localhost">>,
status => <<"connected">>
}
]
},
values({post, ConfluentType})
);
values({post, bridge_v2}) ->
maps:merge(
values(action),
#{
enable => true,
connector => <<"my_confluent_producer_connector">>,
name => <<"my_confluent_producer_action">>,
type => ?CONFLUENT_CONNECTOR_TYPE_BIN
}
);
values({post, connector}) ->
maps:merge(
values(common_config),
#{
name => <<"my_confluent_producer_connector">>,
type => ?CONFLUENT_CONNECTOR_TYPE_BIN,
ssl => #{
enable => true,
server_name_indication => <<"auto">>,
verify => <<"verify_none">>,
versions => [<<"tlsv1.3">>, <<"tlsv1.2">>]
}
}
);
values({put, connector}) ->
values(common_config);
values({put, bridge_v2}) ->
maps:merge(
values(action),
#{
enable => true,
connector => <<"my_confluent_producer_connector">>
}
);
values(common_config) ->
#{
authentication => #{
password => <<"******">>
},
bootstrap_hosts => <<"xyz.sa-east1.gcp.confluent.cloud:9092">>,
connect_timeout => <<"5s">>,
enable => true,
metadata_request_timeout => <<"4s">>,
min_metadata_refresh_interval => <<"3s">>,
socket_opts => #{
sndbuf => <<"1024KB">>,
recbuf => <<"1024KB">>,
nodelay => true,
tcp_keepalive => <<"none">>
}
};
values(action) ->
#{
parameters => #{
topic => <<"topic">>,
message => #{
key => <<"${.clientid}">>,
value => <<"${.}">>
},
max_batch_bytes => <<"896KB">>,
partition_strategy => <<"random">>,
required_acks => <<"all_isr">>,
partition_count_refresh_interval => <<"60s">>,
kafka_headers => <<"${.pub_props}">>,
kafka_ext_headers => [
#{
kafka_ext_header_key => <<"clientid">>,
kafka_ext_header_value => <<"${clientid}">>
},
#{
kafka_ext_header_key => <<"topic">>,
kafka_ext_header_value => <<"${topic}">>
}
],
kafka_header_value_encode_mode => none,
max_inflight => 10,
buffer => #{
mode => <<"hybrid">>,
per_partition_limit => <<"2GB">>,
segment_bytes => <<"100MB">>,
memory_overload_protection => true
}
},
local_topic => <<"mqtt/local/topic">>
}.
%%-------------------------------------------------------------------------------------------------
%% `emqx_connector_resource' API
%%-------------------------------------------------------------------------------------------------
connector_config(Config) ->
%% Default port for Confluent is 9092
BootstrapHosts0 = maps:get(bootstrap_hosts, Config),
BootstrapHosts = emqx_schema:parse_servers(
BootstrapHosts0,
?MODULE:host_opts()
),
Config#{bootstrap_hosts := BootstrapHosts}.
%%-------------------------------------------------------------------------------------------------
%% Internal fns
%%-------------------------------------------------------------------------------------------------
ref(Name) ->
hoconsc:ref(?MODULE, Name).
connector_overrides() ->
#{
authentication =>
mk(
ref(auth_username_password),
#{
default => #{},
required => true,
desc => ?DESC("authentication")
}
),
bootstrap_hosts =>
mk(
binary(),
#{
required => true,
validator => emqx_schema:servers_validator(
host_opts(), _Required = true
)
}
),
ssl => mk(
ref(ssl_client_opts),
#{
required => true,
default => #{<<"enable">> => true}
}
),
type => mk(
?CONFLUENT_CONNECTOR_TYPE,
#{
required => true,
desc => ?DESC("connector_type")
}
)
}.
bridge_v2_overrides() ->
#{
parameters =>
mk(ref(producer_kafka_opts), #{
required => true,
validator => fun emqx_bridge_kafka:producer_strategy_key_validator/1
}),
ssl => mk(ref(ssl_client_opts), #{
default => #{
<<"enable">> => true,
<<"verify">> => <<"verify_none">>
}
}),
type => mk(
?CONFLUENT_CONNECTOR_TYPE,
#{
required => true,
desc => ?DESC("bridge_v2_type")
}
)
}.
auth_overrides() ->
#{
mechanism =>
mk(plain, #{
required => true,
default => plain,
importance => ?IMPORTANCE_HIDDEN
}),
username => mk(binary(), #{required => true}),
password => emqx_connector_schema_lib:password_field(#{required => true})
}.
%% Kafka has SSL disabled by default
%% Confluent must use SSL
ssl_overrides() ->
#{
"enable" => mk(true, #{default => true, importance => ?IMPORTANCE_HIDDEN}),
"verify" => mk(verify_none, #{default => verify_none, importance => ?IMPORTANCE_HIDDEN})
}.
kafka_producer_overrides() ->
#{
message => mk(ref(kafka_message), #{})
}.
override_documentations(Fields) ->
lists:map(
fun({Name, Sc}) ->
case hocon_schema:field_schema(Sc, desc) of
?DESC(emqx_bridge_kafka, Key) ->
%% to please dialyzer...
Override = #{type => hocon_schema:field_schema(Sc, type), desc => ?DESC(Key)},
{Name, hocon_schema:override(Sc, Override)};
_ ->
{Name, Sc}
end
end,
Fields
).
override(Fields, Overrides) ->
lists:map(
fun({Name, Sc}) ->
case maps:find(Name, Overrides) of
{ok, Override} ->
{Name, hocon_schema:override(Sc, Override)};
error ->
{Name, Sc}
end
end,
Fields
).
host_opts() ->
#{default_port => 9092}.

View File

@ -0,0 +1,19 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_confluent_producer_action_info).
-behaviour(emqx_action_info).
-export([
action_type_name/0,
connector_type_name/0,
schema_module/0
]).
action_type_name() -> confluent_producer.
connector_type_name() -> confluent_producer.
schema_module() -> emqx_bridge_confluent_producer.

View File

@ -0,0 +1,343 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_confluent_producer_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-define(BRIDGE_TYPE, confluent_producer).
-define(BRIDGE_TYPE_BIN, <<"confluent_producer">>).
-define(CONNECTOR_TYPE, confluent_producer).
-define(CONNECTOR_TYPE_BIN, <<"confluent_producer">>).
-define(KAFKA_BRIDGE_TYPE, kafka_producer).
-import(emqx_common_test_helpers, [on_exit/1]).
%%------------------------------------------------------------------------------
%% CT boilerplate
%%------------------------------------------------------------------------------
all() ->
emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) ->
KafkaHost = os:getenv("KAFKA_SASL_SSL_HOST", "toxiproxy.emqx.net"),
KafkaPort = list_to_integer(os:getenv("KAFKA_SASL_SSL_PORT", "9295")),
ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"),
ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")),
ProxyName = "kafka_sasl_ssl",
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
case emqx_common_test_helpers:is_tcp_server_available(KafkaHost, KafkaPort) of
true ->
Apps = emqx_cth_suite:start(
[
emqx_conf,
emqx,
emqx_management,
emqx_resource,
emqx_bridge_confluent,
emqx_bridge,
emqx_rule_engine,
{emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"}
],
#{work_dir => ?config(priv_dir, Config)}
),
{ok, Api} = emqx_common_test_http:create_default_app(),
[
{tc_apps, Apps},
{api, Api},
{proxy_name, ProxyName},
{proxy_host, ProxyHost},
{proxy_port, ProxyPort},
{kafka_host, KafkaHost},
{kafka_port, KafkaPort}
| Config
];
false ->
case os:getenv("IS_CI") of
"yes" ->
throw(no_kafka);
_ ->
{skip, no_kafka}
end
end.
end_per_suite(Config) ->
Apps = ?config(tc_apps, Config),
emqx_cth_suite:stop(Apps),
ok.
init_per_testcase(TestCase, Config) ->
common_init_per_testcase(TestCase, Config).
common_init_per_testcase(TestCase, Config) ->
ct:timetrap(timer:seconds(60)),
emqx_bridge_v2_testlib:delete_all_bridges_and_connectors(),
emqx_config:delete_override_conf_files(),
UniqueNum = integer_to_binary(erlang:unique_integer()),
Name = iolist_to_binary([atom_to_binary(TestCase), UniqueNum]),
KafkaHost = ?config(kafka_host, Config),
KafkaPort = ?config(kafka_port, Config),
KafkaTopic = Name,
ConnectorConfig = connector_config(Name, KafkaHost, KafkaPort),
{BridgeConfig, ExtraConfig} = bridge_config(Name, Name, KafkaTopic),
ensure_topic(Config, KafkaTopic, _Opts = #{}),
ok = snabbkaffe:start_trace(),
ExtraConfig ++
[
{connector_type, ?CONNECTOR_TYPE},
{connector_name, Name},
{connector_config, ConnectorConfig},
{bridge_type, ?BRIDGE_TYPE},
{bridge_name, Name},
{bridge_config, BridgeConfig}
| Config
].
end_per_testcase(_Testcase, Config) ->
case proplists:get_bool(skip_does_not_apply, Config) of
true ->
ok;
false ->
ProxyHost = ?config(proxy_host, Config),
ProxyPort = ?config(proxy_port, Config),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
emqx_bridge_v2_testlib:delete_all_bridges_and_connectors(),
emqx_common_test_helpers:call_janitor(60_000),
ok = snabbkaffe:stop(),
ok
end.
%%------------------------------------------------------------------------------
%% Helper fns
%%------------------------------------------------------------------------------
connector_config(Name, KafkaHost, KafkaPort) ->
InnerConfigMap0 =
#{
<<"enable">> => true,
<<"bootstrap_hosts">> => iolist_to_binary([KafkaHost, ":", integer_to_binary(KafkaPort)]),
<<"authentication">> =>
#{
<<"mechanism">> => <<"plain">>,
<<"username">> => <<"emqxuser">>,
<<"password">> => <<"password">>
},
<<"connect_timeout">> => <<"5s">>,
<<"socket_opts">> =>
#{
<<"nodelay">> => true,
<<"recbuf">> => <<"1024KB">>,
<<"sndbuf">> => <<"1024KB">>,
<<"tcp_keepalive">> => <<"none">>
},
<<"ssl">> =>
#{
<<"cacertfile">> => shared_secret(client_cacertfile),
<<"certfile">> => shared_secret(client_certfile),
<<"keyfile">> => shared_secret(client_keyfile),
<<"ciphers">> => [],
<<"depth">> => 10,
<<"enable">> => true,
<<"hibernate_after">> => <<"5s">>,
<<"log_level">> => <<"notice">>,
<<"reuse_sessions">> => true,
<<"secure_renegotiate">> => true,
<<"server_name_indication">> => <<"disable">>,
%% currently, it seems our CI kafka certs fail peer verification
<<"verify">> => <<"verify_none">>,
<<"versions">> => [<<"tlsv1.3">>, <<"tlsv1.2">>]
}
},
InnerConfigMap = serde_roundtrip(InnerConfigMap0),
parse_and_check_connector_config(InnerConfigMap, Name).
parse_and_check_connector_config(InnerConfigMap, Name) ->
TypeBin = ?CONNECTOR_TYPE_BIN,
RawConf = #{<<"connectors">> => #{TypeBin => #{Name => InnerConfigMap}}},
#{<<"connectors">> := #{TypeBin := #{Name := Config}}} =
hocon_tconf:check_plain(emqx_connector_schema, RawConf, #{
required => false, atom_key => false
}),
ct:pal("parsed config: ~p", [Config]),
InnerConfigMap.
bridge_config(Name, ConnectorId, KafkaTopic) ->
InnerConfigMap0 =
#{
<<"enable">> => true,
<<"connector">> => ConnectorId,
<<"parameters">> =>
#{
<<"buffer">> =>
#{
<<"memory_overload_protection">> => true,
<<"mode">> => <<"memory">>,
<<"per_partition_limit">> => <<"2GB">>,
<<"segment_bytes">> => <<"100MB">>
},
<<"compression">> => <<"no_compression">>,
<<"kafka_header_value_encode_mode">> => <<"none">>,
<<"max_batch_bytes">> => <<"896KB">>,
<<"max_inflight">> => <<"10">>,
<<"message">> =>
#{
<<"key">> => <<"${.clientid}">>,
<<"value">> => <<"${.}">>
},
<<"partition_count_refresh_interval">> => <<"60s">>,
<<"partition_strategy">> => <<"random">>,
<<"query_mode">> => <<"async">>,
<<"required_acks">> => <<"all_isr">>,
<<"sync_query_timeout">> => <<"5s">>,
<<"topic">> => KafkaTopic
},
<<"local_topic">> => <<"t/confluent">>
%%,
},
InnerConfigMap = serde_roundtrip(InnerConfigMap0),
ExtraConfig =
[{kafka_topic, KafkaTopic}],
{parse_and_check_bridge_config(InnerConfigMap, Name), ExtraConfig}.
%% check it serializes correctly
serde_roundtrip(InnerConfigMap0) ->
IOList = hocon_pp:do(InnerConfigMap0, #{}),
{ok, InnerConfigMap} = hocon:binary(IOList),
InnerConfigMap.
parse_and_check_bridge_config(InnerConfigMap, Name) ->
TypeBin = ?BRIDGE_TYPE_BIN,
RawConf = #{<<"bridges">> => #{TypeBin => #{Name => InnerConfigMap}}},
hocon_tconf:check_plain(emqx_bridge_v2_schema, RawConf, #{required => false, atom_key => false}),
InnerConfigMap.
shared_secret_path() ->
os:getenv("CI_SHARED_SECRET_PATH", "/var/lib/secret").
shared_secret(client_keyfile) ->
filename:join([shared_secret_path(), "client.key"]);
shared_secret(client_certfile) ->
filename:join([shared_secret_path(), "client.crt"]);
shared_secret(client_cacertfile) ->
filename:join([shared_secret_path(), "ca.crt"]);
shared_secret(rig_keytab) ->
filename:join([shared_secret_path(), "rig.keytab"]).
ensure_topic(Config, KafkaTopic, Opts) ->
KafkaHost = ?config(kafka_host, Config),
KafkaPort = ?config(kafka_port, Config),
NumPartitions = maps:get(num_partitions, Opts, 3),
Endpoints = [{KafkaHost, KafkaPort}],
TopicConfigs = [
#{
name => KafkaTopic,
num_partitions => NumPartitions,
replication_factor => 1,
assignments => [],
configs => []
}
],
RequestConfig = #{timeout => 5_000},
ConnConfig =
#{
ssl => emqx_tls_lib:to_client_opts(
#{
keyfile => shared_secret(client_keyfile),
certfile => shared_secret(client_certfile),
cacertfile => shared_secret(client_cacertfile),
verify => verify_none,
enable => true
}
),
sasl => {plain, <<"emqxuser">>, <<"password">>}
},
case brod:create_topics(Endpoints, TopicConfigs, RequestConfig, ConnConfig) of
ok -> ok;
{error, topic_already_exists} -> ok
end.
make_message() ->
Time = erlang:unique_integer(),
BinTime = integer_to_binary(Time),
Payload = emqx_guid:to_hexstr(emqx_guid:gen()),
#{
clientid => BinTime,
payload => Payload,
timestamp => Time
}.
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
t_start_stop(Config) ->
emqx_bridge_v2_testlib:t_start_stop(Config, kafka_producer_stopped),
ok.
t_create_via_http(Config) ->
emqx_bridge_v2_testlib:t_create_via_http(Config),
ok.
t_on_get_status(Config) ->
emqx_bridge_v2_testlib:t_on_get_status(Config, #{failure_status => connecting}),
ok.
t_sync_query(Config) ->
ok = emqx_bridge_v2_testlib:t_sync_query(
Config,
fun make_message/0,
fun(Res) -> ?assertEqual(ok, Res) end,
emqx_bridge_kafka_impl_producer_sync_query
),
ok.
t_same_name_confluent_kafka_bridges(Config) ->
BridgeName = ?config(bridge_name, Config),
TracePoint = emqx_bridge_kafka_impl_producer_sync_query,
%% creates the AEH bridge and check it's working
ok = emqx_bridge_v2_testlib:t_sync_query(
Config,
fun make_message/0,
fun(Res) -> ?assertEqual(ok, Res) end,
TracePoint
),
%% then create a Kafka bridge with same name and delete it after creation
ConfigKafka0 = lists:keyreplace(bridge_type, 1, Config, {bridge_type, ?KAFKA_BRIDGE_TYPE}),
ConfigKafka = lists:keyreplace(
connector_type, 1, ConfigKafka0, {connector_type, ?KAFKA_BRIDGE_TYPE}
),
ok = emqx_bridge_v2_testlib:t_create_via_http(ConfigKafka),
AehResourceId = emqx_bridge_v2_testlib:resource_id(Config),
KafkaResourceId = emqx_bridge_v2_testlib:resource_id(ConfigKafka),
%% check that both bridges are healthy
?assertEqual({ok, connected}, emqx_resource_manager:health_check(AehResourceId)),
?assertEqual({ok, connected}, emqx_resource_manager:health_check(KafkaResourceId)),
?assertMatch(
{{ok, _}, {ok, _}},
?wait_async_action(
emqx_connector:disable_enable(disable, ?KAFKA_BRIDGE_TYPE, BridgeName),
#{?snk_kind := kafka_producer_stopped},
5_000
)
),
% check that AEH bridge is still working
?check_trace(
begin
BridgeId = emqx_bridge_v2_testlib:bridge_id(Config),
Message = {BridgeId, make_message()},
?assertEqual(ok, emqx_resource:simple_sync_query(AehResourceId, Message)),
ok
end,
fun(Trace) ->
?assertMatch([#{instance_id := AehResourceId}], ?of_kind(TracePoint, Trace))
end
),
ok.

View File

@ -0,0 +1,179 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_confluent_tests).
-include_lib("eunit/include/eunit.hrl").
%%===========================================================================
%% Data Section
%%===========================================================================
%% erlfmt-ignore
confluent_producer_action_hocon() ->
"""
actions.confluent_producer.my_producer {
enable = true
connector = my_connector
parameters {
buffer {
memory_overload_protection = false
mode = memory
per_partition_limit = 2GB
segment_bytes = 100MB
}
compression = no_compression
kafka_header_value_encode_mode = none
max_batch_bytes = 896KB
max_inflight = 10
message {
key = \"${.clientid}\"
value = \"${.}\"
}
partition_count_refresh_interval = 60s
partition_strategy = random
query_mode = async
required_acks = all_isr
sync_query_timeout = 5s
topic = test
}
local_topic = \"t/confluent\"
}
""".
confluent_producer_connector_hocon() ->
""
"\n"
"connectors.confluent_producer.my_producer {\n"
" enable = true\n"
" authentication {\n"
" username = \"user\"\n"
" password = \"xxx\"\n"
" }\n"
" bootstrap_hosts = \"xyz.sa-east1.gcp.confluent.cloud:9092\"\n"
" connect_timeout = 5s\n"
" metadata_request_timeout = 5s\n"
" min_metadata_refresh_interval = 3s\n"
" socket_opts {\n"
" recbuf = 1024KB\n"
" sndbuf = 1024KB\n"
" tcp_keepalive = none\n"
" }\n"
"}\n"
"".
%%===========================================================================
%% Helper functions
%%===========================================================================
parse(Hocon) ->
{ok, Conf} = hocon:binary(Hocon),
Conf.
check(SchemaMod, Conf) when is_map(Conf) ->
hocon_tconf:check_plain(SchemaMod, Conf).
check_action(Conf) when is_map(Conf) ->
check(emqx_bridge_v2_schema, Conf).
check_connector(Conf) when is_map(Conf) ->
check(emqx_connector_schema, Conf).
-define(validation_error(SchemaMod, Reason, Value),
{SchemaMod, [
#{
kind := validation_error,
reason := Reason,
value := Value
}
]}
).
-define(action_validation_error(Reason, Value),
?validation_error(emqx_bridge_v2_schema, Reason, Value)
).
-define(connector_validation_error(Reason, Value),
?validation_error(emqx_connector_schema, Reason, Value)
).
-define(ok_config(RootKey, Cfg), #{
RootKey :=
#{
<<"confluent_producer">> :=
#{
<<"my_producer">> :=
Cfg
}
}
}).
-define(ok_connector_config(Cfg), ?ok_config(<<"connectors">>, Cfg)).
-define(ok_action_config(Cfg), ?ok_config(<<"actions">>, Cfg)).
%%===========================================================================
%% Test cases
%%===========================================================================
confluent_producer_connector_test_() ->
%% ensure this module is loaded when testing only this file
_ = emqx_bridge_enterprise:module_info(),
BaseConf = parse(confluent_producer_connector_hocon()),
Override = fun(Cfg) ->
emqx_utils_maps:deep_merge(
BaseConf,
#{
<<"connectors">> =>
#{
<<"confluent_producer">> =>
#{<<"my_producer">> => Cfg}
}
}
)
end,
[
{"base config",
?_assertMatch(
?ok_connector_config(
#{
<<"authentication">> := #{
<<"mechanism">> := plain
},
<<"ssl">> := #{
<<"enable">> := true,
<<"verify">> := verify_none
}
}
),
check_connector(BaseConf)
)},
{"ssl disabled",
?_assertThrow(
?connector_validation_error(#{expected := "true"}, "false"),
check_connector(Override(#{<<"ssl">> => #{<<"enable">> => <<"false">>}}))
)},
{"bad authn mechanism: scram sha256",
?_assertThrow(
?connector_validation_error(#{expected := "plain"}, "scram_sha_256"),
check_connector(
Override(#{<<"authentication">> => #{<<"mechanism">> => <<"scram_sha_256">>}})
)
)},
{"bad authn mechanism: scram sha512",
?_assertThrow(
?connector_validation_error(#{expected := "plain"}, "scram_sha_512"),
check_connector(
Override(#{<<"authentication">> => #{<<"mechanism">> => <<"scram_sha_512">>}})
)
)}
].
confluent_producer_action_test_() ->
%% ensure this module is loaded when testing only this file
_ = emqx_bridge_enterprise:module_info(),
BaseConf = parse(confluent_producer_action_hocon()),
[
{"base config",
?_assertMatch(
?ok_action_config(_),
check_action(BaseConf)
)}
].

View File

@ -8,7 +8,7 @@
emqx_resource,
ehttpc
]},
{env, []},
{env, [{emqx_action_info_modules, [emqx_bridge_gcp_pubsub_producer_action_info]}]},
{modules, []},
{links, []}
]}.

View File

@ -134,7 +134,7 @@ start(
-spec stop(resource_id()) -> ok | {error, term()}.
stop(ResourceId) ->
?tp(gcp_pubsub_stop, #{resource_id => ResourceId}),
?tp(gcp_pubsub_stop, #{instance_id => ResourceId, resource_id => ResourceId}),
?SLOG(info, #{
msg => "stopping_gcp_pubsub_bridge",
connector => ResourceId

View File

@ -8,23 +8,30 @@
-include_lib("emqx_resource/include/emqx_resource.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-type config() :: #{
attributes_template := [#{key := binary(), value := binary()}],
-type connector_config() :: #{
connect_timeout := emqx_schema:duration_ms(),
max_retries := non_neg_integer(),
resource_opts := #{request_ttl := infinity | emqx_schema:duration_ms(), any() => term()},
service_account_json := emqx_bridge_gcp_pubsub_client:service_account_json()
}.
-type action_config() :: #{
parameters := #{
attributes_template := [#{key := binary(), value := binary()}],
ordering_key_template := binary(),
payload_template := binary(),
pubsub_topic := binary(),
resource_opts := #{request_ttl := infinity | emqx_schema:duration_ms(), any() => term()},
service_account_json := emqx_bridge_gcp_pubsub_client:service_account_json(),
any() => term()
pubsub_topic := binary()
},
resource_opts := #{request_ttl := infinity | emqx_schema:duration_ms(), any() => term()}
}.
-type state() :: #{
attributes_template := #{emqx_placeholder:tmpl_token() => emqx_placeholder:tmpl_token()},
-type connector_state() :: #{
client := emqx_bridge_gcp_pubsub_client:state(),
installed_actions := #{action_resource_id() => action_state()},
project_id := emqx_bridge_gcp_pubsub_client:project_id()
}.
-type action_state() :: #{
attributes_template := #{emqx_placeholder:tmpl_token() => emqx_placeholder:tmpl_token()},
ordering_key_template := emqx_placeholder:tmpl_token(),
payload_template := emqx_placeholder:tmpl_token(),
project_id := emqx_bridge_gcp_pubsub_client:project_id(),
pubsub_topic := binary()
}.
-type headers() :: emqx_bridge_gcp_pubsub_client:headers().
@ -41,7 +48,11 @@
on_query_async/4,
on_batch_query/3,
on_batch_query_async/4,
on_get_status/2
on_get_status/2,
on_add_channel/4,
on_remove_channel/3,
on_get_channels/1,
on_get_channel_status/3
]).
-export([reply_delegator/2]).
@ -54,53 +65,45 @@ callback_mode() -> async_if_possible.
query_mode(_Config) -> async.
-spec on_start(resource_id(), config()) -> {ok, state()} | {error, term()}.
-spec on_start(connector_resource_id(), connector_config()) ->
{ok, connector_state()} | {error, term()}.
on_start(InstanceId, Config0) ->
?SLOG(info, #{
msg => "starting_gcp_pubsub_bridge",
config => Config0
}),
Config = maps:update_with(service_account_json, fun emqx_utils_maps:binary_key_map/1, Config0),
#{
attributes_template := AttributesTemplate,
ordering_key_template := OrderingKeyTemplate,
payload_template := PayloadTemplate,
pubsub_topic := PubSubTopic,
service_account_json := #{<<"project_id">> := ProjectId}
} = Config,
#{service_account_json := #{<<"project_id">> := ProjectId}} = Config,
case emqx_bridge_gcp_pubsub_client:start(InstanceId, Config) of
{ok, Client} ->
State = #{
client => Client,
attributes_template => preproc_attributes(AttributesTemplate),
ordering_key_template => emqx_placeholder:preproc_tmpl(OrderingKeyTemplate),
payload_template => emqx_placeholder:preproc_tmpl(PayloadTemplate),
project_id => ProjectId,
pubsub_topic => PubSubTopic
installed_actions => #{},
project_id => ProjectId
},
{ok, State};
Error ->
Error
end.
-spec on_stop(resource_id(), state()) -> ok | {error, term()}.
-spec on_stop(connector_resource_id(), connector_state()) -> ok | {error, term()}.
on_stop(InstanceId, _State) ->
emqx_bridge_gcp_pubsub_client:stop(InstanceId).
-spec on_get_status(resource_id(), state()) -> connected | disconnected.
-spec on_get_status(connector_resource_id(), connector_state()) -> connected | disconnected.
on_get_status(_InstanceId, #{client := Client} = _State) ->
emqx_bridge_gcp_pubsub_client:get_status(Client).
-spec on_query(
resource_id(),
{send_message, map()},
state()
connector_resource_id(),
{message_tag(), map()},
connector_state()
) ->
{ok, map()}
| {error, {recoverable_error, term()}}
| {error, term()}.
on_query(ResourceId, {send_message, Selected}, State) ->
Requests = [{send_message, Selected}],
on_query(ResourceId, {MessageTag, Selected}, State) ->
Requests = [{MessageTag, Selected}],
?TRACE(
"QUERY_SYNC",
"gcp_pubsub_received",
@ -109,24 +112,25 @@ on_query(ResourceId, {send_message, Selected}, State) ->
do_send_requests_sync(State, Requests, ResourceId).
-spec on_query_async(
resource_id(),
{send_message, map()},
connector_resource_id(),
{message_tag(), map()},
{ReplyFun :: function(), Args :: list()},
state()
connector_state()
) -> {ok, pid()} | {error, no_pool_worker_available}.
on_query_async(ResourceId, {send_message, Selected}, ReplyFunAndArgs, State) ->
Requests = [{send_message, Selected}],
on_query_async(ResourceId, {MessageTag, Selected}, ReplyFunAndArgs, State) ->
Requests = [{MessageTag, Selected}],
?TRACE(
"QUERY_ASYNC",
"gcp_pubsub_received",
#{requests => Requests, connector => ResourceId, state => State}
),
?tp(gcp_pubsub_producer_async, #{instance_id => ResourceId, requests => Requests}),
do_send_requests_async(State, Requests, ReplyFunAndArgs).
-spec on_batch_query(
resource_id(),
[{send_message, map()}],
state()
connector_resource_id(),
[{message_tag(), map()}],
connector_state()
) ->
{ok, map()}
| {error, {recoverable_error, term()}}
@ -140,10 +144,10 @@ on_batch_query(ResourceId, Requests, State) ->
do_send_requests_sync(State, Requests, ResourceId).
-spec on_batch_query_async(
resource_id(),
[{send_message, map()}],
connector_resource_id(),
[{message_tag(), map()}],
{ReplyFun :: function(), Args :: list()},
state()
connector_state()
) -> {ok, pid()} | {error, no_pool_worker_available}.
on_batch_query_async(ResourceId, Requests, ReplyFunAndArgs, State) ->
?TRACE(
@ -151,32 +155,92 @@ on_batch_query_async(ResourceId, Requests, ReplyFunAndArgs, State) ->
"gcp_pubsub_received",
#{requests => Requests, connector => ResourceId, state => State}
),
?tp(gcp_pubsub_producer_async, #{instance_id => ResourceId, requests => Requests}),
do_send_requests_async(State, Requests, ReplyFunAndArgs).
-spec on_add_channel(
connector_resource_id(),
connector_state(),
action_resource_id(),
action_config()
) ->
{ok, connector_state()}.
on_add_channel(_ConnectorResId, ConnectorState0, ActionId, ActionConfig) ->
#{installed_actions := InstalledActions0} = ConnectorState0,
ChannelState = install_channel(ActionConfig),
InstalledActions = InstalledActions0#{ActionId => ChannelState},
ConnectorState = ConnectorState0#{installed_actions := InstalledActions},
{ok, ConnectorState}.
-spec on_remove_channel(
connector_resource_id(),
connector_state(),
action_resource_id()
) ->
{ok, connector_state()}.
on_remove_channel(_ConnectorResId, ConnectorState0, ActionId) ->
#{installed_actions := InstalledActions0} = ConnectorState0,
InstalledActions = maps:remove(ActionId, InstalledActions0),
ConnectorState = ConnectorState0#{installed_actions := InstalledActions},
{ok, ConnectorState}.
-spec on_get_channels(connector_resource_id()) ->
[{action_resource_id(), action_config()}].
on_get_channels(ConnectorResId) ->
emqx_bridge_v2:get_channels_for_connector(ConnectorResId).
-spec on_get_channel_status(connector_resource_id(), action_resource_id(), connector_state()) ->
health_check_status().
on_get_channel_status(_ConnectorResId, _ChannelId, _ConnectorState) ->
%% Should we check the underlying client? Same as on_get_status?
?status_connected.
%%-------------------------------------------------------------------------------------------------
%% Helper fns
%%-------------------------------------------------------------------------------------------------
%% TODO: check if topic exists ("unhealthy target")
install_channel(ActionConfig) ->
#{
parameters := #{
attributes_template := AttributesTemplate,
ordering_key_template := OrderingKeyTemplate,
payload_template := PayloadTemplate,
pubsub_topic := PubSubTopic
}
} = ActionConfig,
#{
attributes_template => preproc_attributes(AttributesTemplate),
ordering_key_template => emqx_placeholder:preproc_tmpl(OrderingKeyTemplate),
payload_template => emqx_placeholder:preproc_tmpl(PayloadTemplate),
pubsub_topic => PubSubTopic
}.
-spec do_send_requests_sync(
state(),
[{send_message, map()}],
connector_state(),
[{message_tag(), map()}],
resource_id()
) ->
{ok, status_code(), headers()}
| {ok, status_code(), headers(), body()}
| {error, {recoverable_error, term()}}
| {error, term()}.
do_send_requests_sync(State, Requests, InstanceId) ->
#{client := Client} = State,
do_send_requests_sync(ConnectorState, Requests, InstanceId) ->
?tp(gcp_pubsub_producer_sync, #{instance_id => InstanceId, requests => Requests}),
#{client := Client} = ConnectorState,
%% is it safe to assume the tag is the same??? And not empty???
[{MessageTag, _} | _] = Requests,
#{installed_actions := InstalledActions} = ConnectorState,
ChannelState = maps:get(MessageTag, InstalledActions),
Payloads =
lists:map(
fun({send_message, Selected}) ->
encode_payload(State, Selected)
fun({_MessageTag, Selected}) ->
encode_payload(ChannelState, Selected)
end,
Requests
),
Body = to_pubsub_request(Payloads),
Path = publish_path(State),
Path = publish_path(ConnectorState, ChannelState),
Method = post,
Request = {prepared_request, {Method, Path, Body}},
Result = emqx_bridge_gcp_pubsub_client:query_sync(Request, Client),
@ -184,21 +248,25 @@ do_send_requests_sync(State, Requests, InstanceId) ->
handle_result(Result, Request, QueryMode, InstanceId).
-spec do_send_requests_async(
state(),
[{send_message, map()}],
connector_state(),
[{message_tag(), map()}],
{ReplyFun :: function(), Args :: list()}
) -> {ok, pid()} | {error, no_pool_worker_available}.
do_send_requests_async(State, Requests, ReplyFunAndArgs0) ->
#{client := Client} = State,
do_send_requests_async(ConnectorState, Requests, ReplyFunAndArgs0) ->
#{client := Client} = ConnectorState,
%% is it safe to assume the tag is the same??? And not empty???
[{MessageTag, _} | _] = Requests,
#{installed_actions := InstalledActions} = ConnectorState,
ChannelState = maps:get(MessageTag, InstalledActions),
Payloads =
lists:map(
fun({send_message, Selected}) ->
encode_payload(State, Selected)
fun({_MessageTag, Selected}) ->
encode_payload(ChannelState, Selected)
end,
Requests
),
Body = to_pubsub_request(Payloads),
Path = publish_path(State),
Path = publish_path(ConnectorState, ChannelState),
Method = post,
Request = {prepared_request, {Method, Path, Body}},
ReplyFunAndArgs = {fun ?MODULE:reply_delegator/2, [ReplyFunAndArgs0]},
@ -206,18 +274,18 @@ do_send_requests_async(State, Requests, ReplyFunAndArgs0) ->
Request, ReplyFunAndArgs, Client
).
-spec encode_payload(state(), Selected :: map()) ->
-spec encode_payload(action_state(), Selected :: map()) ->
#{
data := binary(),
attributes => #{binary() => binary()},
'orderingKey' => binary()
}.
encode_payload(State, Selected) ->
encode_payload(ActionState, Selected) ->
#{
attributes_template := AttributesTemplate,
ordering_key_template := OrderingKeyTemplate,
payload_template := PayloadTemplate
} = State,
} = ActionState,
Data = render_payload(PayloadTemplate, Selected),
OrderingKey = render_key(OrderingKeyTemplate, Selected),
Attributes = proc_attributes(AttributesTemplate, Selected),
@ -307,13 +375,8 @@ proc_attributes(AttributesTemplate, Selected) ->
to_pubsub_request(Payloads) ->
emqx_utils_json:encode(#{messages => Payloads}).
-spec publish_path(state()) -> binary().
publish_path(
_State = #{
project_id := ProjectId,
pubsub_topic := PubSubTopic
}
) ->
-spec publish_path(connector_state(), action_state()) -> binary().
publish_path(#{project_id := ProjectId}, #{pubsub_topic := PubSubTopic}) ->
<<"/v1/projects/", ProjectId/binary, "/topics/", PubSubTopic/binary, ":publish">>.
handle_result({error, Reason}, _Request, QueryMode, ResourceId) when

View File

@ -0,0 +1,46 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_gcp_pubsub_producer_action_info).
-behaviour(emqx_action_info).
-export([
bridge_v1_type_name/0,
action_type_name/0,
connector_type_name/0,
schema_module/0,
bridge_v1_config_to_action_config/2
]).
bridge_v1_type_name() -> gcp_pubsub.
action_type_name() -> gcp_pubsub_producer.
connector_type_name() -> gcp_pubsub_producer.
schema_module() -> emqx_bridge_gcp_pubsub_producer_schema.
bridge_v1_config_to_action_config(BridgeV1Config, ConnectorName) ->
CommonActionKeys = emqx_bridge_v2_schema:top_level_common_action_keys(),
ParamsKeys = producer_action_parameters_field_keys(),
Config1 = maps:with(CommonActionKeys, BridgeV1Config),
Params = maps:with(ParamsKeys, BridgeV1Config),
Config1#{
<<"connector">> => ConnectorName,
<<"parameters">> => Params
}.
%%------------------------------------------------------------------------------------------
%% Internal helper fns
%%------------------------------------------------------------------------------------------
producer_action_parameters_field_keys() ->
[
to_bin(K)
|| {K, _} <- emqx_bridge_gcp_pubsub_producer_schema:fields(action_parameters)
].
to_bin(L) when is_list(L) -> list_to_binary(L);
to_bin(A) when is_atom(A) -> atom_to_binary(A, utf8).

View File

@ -0,0 +1,232 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_gcp_pubsub_producer_schema).
-import(hoconsc, [mk/2, ref/2]).
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
%% `hocon_schema' API
-export([
namespace/0,
roots/0,
fields/1,
desc/1
]).
%% `emqx_bridge_v2_schema' "unofficial" API
-export([
bridge_v2_examples/1,
conn_bridge_examples/1,
connector_examples/1
]).
%%-------------------------------------------------------------------------------------------------
%% `hocon_schema' API
%%-------------------------------------------------------------------------------------------------
namespace() ->
"gcp_pubsub_producer".
roots() ->
[].
%%=========================================
%% Action fields
%%=========================================
fields(action) ->
{gcp_pubsub_producer,
mk(
hoconsc:map(name, ref(?MODULE, producer_action)),
#{
desc => <<"GCP PubSub Producer Action Config">>,
required => false
}
)};
fields(producer_action) ->
emqx_bridge_v2_schema:make_producer_action_schema(
mk(
ref(?MODULE, action_parameters),
#{
required => true,
desc => ?DESC(producer_action)
}
)
);
fields(action_parameters) ->
UnsupportedFields = [local_topic],
lists:filter(
fun({Key, _Schema}) -> not lists:member(Key, UnsupportedFields) end,
emqx_bridge_gcp_pubsub:fields(producer)
);
%%=========================================
%% Connector fields
%%=========================================
fields("config_connector") ->
%% FIXME
emqx_connector_schema:common_fields() ++
emqx_bridge_gcp_pubsub:fields(connector_config) ++
emqx_resource_schema:fields("resource_opts");
%%=========================================
%% HTTP API fields: action
%%=========================================
fields("get_bridge_v2") ->
emqx_bridge_schema:status_fields() ++ fields("post_bridge_v2");
fields("post_bridge_v2") ->
[type_field(), name_field() | fields("put_bridge_v2")];
fields("put_bridge_v2") ->
fields(producer_action);
%%=========================================
%% HTTP API fields: connector
%%=========================================
fields("get_connector") ->
emqx_bridge_schema:status_fields() ++ fields("post_connector");
fields("post_connector") ->
[type_field(), name_field() | fields("put_connector")];
fields("put_connector") ->
fields("config_connector").
desc("config_connector") ->
?DESC("config_connector");
desc(action_parameters) ->
?DESC(action_parameters);
desc(producer_action) ->
?DESC(producer_action);
desc(_Name) ->
undefined.
type_field() ->
{type, mk(gcp_pubsub_producer, #{required => true, desc => ?DESC("desc_type")})}.
name_field() ->
{name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}.
%%-------------------------------------------------------------------------------------------------
%% `emqx_bridge_v2_schema' "unofficial" API
%%-------------------------------------------------------------------------------------------------
bridge_v2_examples(Method) ->
[
#{
<<"gcp_pubsub_producer">> => #{
summary => <<"GCP PubSub Producer Action">>,
value => action_example(Method)
}
}
].
connector_examples(Method) ->
[
#{
<<"gcp_pubsub_producer">> => #{
summary => <<"GCP PubSub Producer Connector">>,
value => connector_example(Method)
}
}
].
conn_bridge_examples(Method) ->
emqx_bridge_gcp_pubsub:conn_bridge_examples(Method).
action_example(post) ->
maps:merge(
action_example(put),
#{
type => <<"gcp_pubsub_producer">>,
name => <<"my_action">>
}
);
action_example(get) ->
maps:merge(
action_example(put),
#{
status => <<"connected">>,
node_status => [
#{
node => <<"emqx@localhost">>,
status => <<"connected">>
}
]
}
);
action_example(put) ->
#{
enable => true,
connector => <<"my_connector_name">>,
description => <<"My action">>,
local_topic => <<"local/topic">>,
resource_opts =>
#{batch_size => 5},
parameters =>
#{
pubsub_topic => <<"mytopic">>,
ordering_key_template => <<"${payload.ok}">>,
payload_template => <<"${payload}">>,
attributes_template =>
[
#{
key => <<"${payload.attrs.k}">>,
value => <<"${payload.attrs.v}">>
}
]
}
}.
connector_example(get) ->
maps:merge(
connector_example(put),
#{
status => <<"connected">>,
node_status => [
#{
node => <<"emqx@localhost">>,
status => <<"connected">>
}
]
}
);
connector_example(post) ->
maps:merge(
connector_example(put),
#{
type => <<"gcp_pubsub_producer">>,
name => <<"my_connector">>
}
);
connector_example(put) ->
#{
enable => true,
connect_timeout => <<"10s">>,
pool_size => 8,
pipelining => 100,
max_retries => 2,
resource_opts => #{request_ttl => <<"60s">>},
service_account_json =>
#{
auth_provider_x509_cert_url =>
<<"https://www.googleapis.com/oauth2/v1/certs">>,
auth_uri =>
<<"https://accounts.google.com/o/oauth2/auth">>,
client_email =>
<<"test@myproject.iam.gserviceaccount.com">>,
client_id => <<"123812831923812319190">>,
client_x509_cert_url =>
<<
"https://www.googleapis.com/robot/v1/"
"metadata/x509/test%40myproject.iam.gserviceaccount.com"
>>,
private_key =>
<<
"-----BEGIN PRIVATE KEY-----\n"
"MIIEvQI..."
>>,
private_key_id => <<"kid">>,
project_id => <<"myproject">>,
token_uri =>
<<"https://oauth2.googleapis.com/token">>,
type => <<"service_account">>
}
}.

View File

@ -34,16 +34,22 @@ init_per_suite(Config) ->
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
case emqx_common_test_helpers:is_tcp_server_available(GCPEmulatorHost, GCPEmulatorPort) of
true ->
ok = emqx_common_test_helpers:start_apps([emqx_conf]),
ok = emqx_connector_test_helpers:start_apps([
emqx_resource, emqx_bridge, emqx_rule_engine
]),
{ok, _} = application:ensure_all_started(emqx_connector),
Apps = emqx_cth_suite:start(
[
emqx,
emqx_conf,
emqx_bridge_gcp_pubsub,
emqx_bridge,
emqx_rule_engine
],
#{work_dir => emqx_cth_suite:work_dir(Config)}
),
emqx_mgmt_api_test_util:init_suite(),
HostPort = GCPEmulatorHost ++ ":" ++ GCPEmulatorPortStr,
true = os:putenv("PUBSUB_EMULATOR_HOST", HostPort),
Client = start_control_client(),
[
{apps, Apps},
{proxy_name, ProxyName},
{proxy_host, ProxyHost},
{proxy_port, ProxyPort},
@ -62,12 +68,11 @@ init_per_suite(Config) ->
end.
end_per_suite(Config) ->
Apps = ?config(apps, Config),
Client = ?config(client, Config),
stop_control_client(Client),
emqx_mgmt_api_test_util:end_suite(),
ok = emqx_common_test_helpers:stop_apps([emqx_conf]),
ok = emqx_connector_test_helpers:stop_apps([emqx_bridge, emqx_resource, emqx_rule_engine]),
_ = application:stop(emqx_connector),
emqx_cth_suite:stop(Apps),
os:unsetenv("PUBSUB_EMULATOR_HOST"),
ok.
@ -1472,7 +1477,7 @@ t_pull_worker_death(Config) ->
[PullWorkerPid | _] = get_pull_worker_pids(Config),
Ref = monitor(process, PullWorkerPid),
sys:terminate(PullWorkerPid, die),
sys:terminate(PullWorkerPid, die, 20_000),
receive
{'DOWN', Ref, process, PullWorkerPid, _} ->
ok
@ -1494,10 +1499,11 @@ t_pull_worker_death(Config) ->
ok.
t_async_worker_death_mid_pull(Config) ->
ct:timetrap({seconds, 120}),
ct:timetrap({seconds, 122}),
[#{pubsub_topic := PubSubTopic}] = ?config(topic_mapping, Config),
Payload = emqx_guid:to_hexstr(emqx_guid:gen()),
?check_trace(
#{timetrap => 120_000},
begin
start_and_subscribe_mqtt(Config),
@ -1513,23 +1519,28 @@ t_async_worker_death_mid_pull(Config) ->
#{?snk_kind := gcp_pubsub_consumer_worker_reply_delegator}
),
spawn_link(fun() ->
ct:pal("will kill async workers"),
?tp_span(
kill_async_worker,
#{},
begin
%% produce a message while worker is being killed
Messages = [#{<<"data">> => Payload}],
ct:pal("publishing message"),
pubsub_publish(Config, PubSubTopic, Messages),
ct:pal("published message"),
AsyncWorkerPids = get_async_worker_pids(Config),
emqx_utils:pmap(
fun(AsyncWorkerPid) ->
Ref = monitor(process, AsyncWorkerPid),
sys:terminate(AsyncWorkerPid, die),
ct:pal("killing pid ~p", [AsyncWorkerPid]),
sys:terminate(AsyncWorkerPid, die, 20_000),
receive
{'DOWN', Ref, process, AsyncWorkerPid, _} ->
ct:pal("killed pid ~p", [AsyncWorkerPid]),
ok
after 500 -> ct:fail("async worker didn't die")
after 500 -> ct:fail("async worker ~p didn't die", [AsyncWorkerPid])
end,
ok
end,
@ -1538,7 +1549,8 @@ t_async_worker_death_mid_pull(Config) ->
ok
end
)
),
ct:pal("killed async workers")
end),
?assertMatch(

View File

@ -13,8 +13,12 @@
-include_lib("jose/include/jose_jwt.hrl").
-include_lib("jose/include/jose_jws.hrl").
-define(BRIDGE_TYPE, gcp_pubsub).
-define(BRIDGE_TYPE_BIN, <<"gcp_pubsub">>).
-define(ACTION_TYPE, gcp_pubsub_producer).
-define(ACTION_TYPE_BIN, <<"gcp_pubsub_producer">>).
-define(CONNECTOR_TYPE, gcp_pubsub_producer).
-define(CONNECTOR_TYPE_BIN, <<"gcp_pubsub_producer">>).
-define(BRIDGE_V1_TYPE, gcp_pubsub).
-define(BRIDGE_V1_TYPE_BIN, <<"gcp_pubsub">>).
-import(emqx_common_test_helpers, [on_exit/1]).
@ -141,19 +145,24 @@ end_per_testcase(_TestCase, _Config) ->
generate_config(Config0) ->
#{
name := Name,
name := ActionName,
config_string := ConfigString,
pubsub_config := PubSubConfig,
service_account_json := ServiceAccountJSON
} = gcp_pubsub_config(Config0),
ResourceId = emqx_bridge_resource:resource_id(?BRIDGE_TYPE_BIN, Name),
BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_BIN, Name),
%% FIXME
%% `emqx_bridge_resource:resource_id' requires an existing connector in the config.....
ConnectorName = <<"connector_", ActionName/binary>>,
ConnectorResourceId = <<"connector:", ?CONNECTOR_TYPE_BIN/binary, ":", ConnectorName/binary>>,
ActionResourceId = emqx_bridge_v2:id(?ACTION_TYPE_BIN, ActionName, ConnectorName),
BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_V1_TYPE_BIN, ActionName),
[
{gcp_pubsub_name, Name},
{gcp_pubsub_name, ActionName},
{gcp_pubsub_config, PubSubConfig},
{gcp_pubsub_config_string, ConfigString},
{service_account_json, ServiceAccountJSON},
{resource_id, ResourceId},
{connector_resource_id, ConnectorResourceId},
{action_resource_id, ActionResourceId},
{bridge_id, BridgeId}
| Config0
].
@ -168,7 +177,7 @@ delete_all_bridges() ->
).
delete_bridge(Config) ->
Type = ?BRIDGE_TYPE,
Type = ?BRIDGE_V1_TYPE,
Name = ?config(gcp_pubsub_name, Config),
ct:pal("deleting bridge ~p", [{Type, Name}]),
emqx_bridge:remove(Type, Name).
@ -177,7 +186,7 @@ create_bridge(Config) ->
create_bridge(Config, _GCPPubSubConfigOverrides = #{}).
create_bridge(Config, GCPPubSubConfigOverrides) ->
TypeBin = ?BRIDGE_TYPE_BIN,
TypeBin = ?BRIDGE_V1_TYPE_BIN,
Name = ?config(gcp_pubsub_name, Config),
GCPPubSubConfig0 = ?config(gcp_pubsub_config, Config),
GCPPubSubConfig = emqx_utils_maps:deep_merge(GCPPubSubConfig0, GCPPubSubConfigOverrides),
@ -190,7 +199,7 @@ create_bridge_http(Config) ->
create_bridge_http(Config, _GCPPubSubConfigOverrides = #{}).
create_bridge_http(Config, GCPPubSubConfigOverrides) ->
TypeBin = ?BRIDGE_TYPE_BIN,
TypeBin = ?BRIDGE_V1_TYPE_BIN,
Name = ?config(gcp_pubsub_name, Config),
GCPPubSubConfig0 = ?config(gcp_pubsub_config, Config),
GCPPubSubConfig = emqx_utils_maps:deep_merge(GCPPubSubConfig0, GCPPubSubConfigOverrides),
@ -225,7 +234,7 @@ create_bridge_http(Config, GCPPubSubConfigOverrides) ->
create_rule_and_action_http(Config) ->
GCPPubSubName = ?config(gcp_pubsub_name, Config),
BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_BIN, GCPPubSubName),
BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_V1_TYPE_BIN, GCPPubSubName),
Params = #{
enable => true,
sql => <<"SELECT * FROM \"t/topic\"">>,
@ -382,9 +391,14 @@ assert_metrics(ExpectedMetrics, ResourceId) ->
CurrentMetrics = current_metrics(ResourceId),
TelemetryTable = get(telemetry_table),
RecordedEvents = ets:tab2list(TelemetryTable),
?retry(
_Sleep0 = 300,
_Attempts = 20,
?assertEqual(ExpectedMetrics, Metrics, #{
current_metrics => CurrentMetrics, recorded_events => RecordedEvents
}),
current_metrics => CurrentMetrics,
recorded_events => RecordedEvents
})
),
ok.
assert_empty_metrics(ResourceId) ->
@ -535,8 +549,30 @@ install_telemetry_handler(TestCase) ->
end),
Tid.
mk_res_id_filter(ResourceId) ->
fun(Event) ->
case Event of
#{metadata := #{resource_id := ResId}} when ResId =:= ResourceId ->
true;
_ ->
false
end
end.
wait_until_gauge_is(GaugeName, ExpectedValue, Timeout) ->
Events = receive_all_events(GaugeName, Timeout),
wait_until_gauge_is(#{
gauge_name => GaugeName,
expected => ExpectedValue,
timeout => Timeout
}).
wait_until_gauge_is(#{} = Opts) ->
GaugeName = maps:get(gauge_name, Opts),
ExpectedValue = maps:get(expected, Opts),
Timeout = maps:get(timeout, Opts),
MaxEvents = maps:get(max_events, Opts, 10),
FilterFn = maps:get(filter_fn, Opts, fun(_Event) -> true end),
Events = receive_all_events(GaugeName, Timeout, MaxEvents, FilterFn),
case length(Events) > 0 andalso lists:last(Events) of
#{measurements := #{gauge_set := ExpectedValue}} ->
ok;
@ -550,15 +586,36 @@ wait_until_gauge_is(GaugeName, ExpectedValue, Timeout) ->
ct:pal("no ~p gauge events received!", [GaugeName])
end.
receive_all_events(EventName, Timeout) ->
receive_all_events(EventName, Timeout, _MaxEvents = 10, _Count = 0, _Acc = []).
receive_all_events(EventName, Timeout, MaxEvents, FilterFn) ->
receive_all_events(EventName, Timeout, MaxEvents, FilterFn, _Count = 0, _Acc = []).
receive_all_events(_EventName, _Timeout, MaxEvents, Count, Acc) when Count >= MaxEvents ->
receive_all_events(_EventName, _Timeout, MaxEvents, _FilterFn, Count, Acc) when
Count >= MaxEvents
->
lists:reverse(Acc);
receive_all_events(EventName, Timeout, MaxEvents, Count, Acc) ->
receive_all_events(EventName, Timeout, MaxEvents, FilterFn, Count, Acc) ->
receive
{telemetry, #{name := [_, _, EventName]} = Event} ->
receive_all_events(EventName, Timeout, MaxEvents, Count + 1, [Event | Acc])
case FilterFn(Event) of
true ->
receive_all_events(
EventName,
Timeout,
MaxEvents,
FilterFn,
Count + 1,
[Event | Acc]
);
false ->
receive_all_events(
EventName,
Timeout,
MaxEvents,
FilterFn,
Count,
Acc
)
end
after Timeout ->
lists:reverse(Acc)
end.
@ -597,14 +654,14 @@ wait_n_events(TelemetryTable, ResourceId, NEvents, Timeout, EventName) ->
%%------------------------------------------------------------------------------
t_publish_success(Config) ->
ResourceId = ?config(resource_id, Config),
ActionResourceId = ?config(action_resource_id, Config),
ServiceAccountJSON = ?config(service_account_json, Config),
TelemetryTable = ?config(telemetry_table, Config),
Topic = <<"t/topic">>,
?assertMatch({ok, _}, create_bridge(Config)),
{ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config),
on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end),
assert_empty_metrics(ResourceId),
assert_empty_metrics(ActionResourceId),
Payload = <<"payload">>,
Message = emqx_message:make(Topic, Payload),
emqx:publish(Message),
@ -620,7 +677,7 @@ t_publish_success(Config) ->
DecodedMessages
),
%% to avoid test flakiness
wait_telemetry_event(TelemetryTable, success, ResourceId),
wait_telemetry_event(TelemetryTable, success, ActionResourceId),
wait_until_gauge_is(queuing, 0, 500),
wait_until_gauge_is(inflight, 0, 500),
assert_metrics(
@ -633,7 +690,7 @@ t_publish_success(Config) ->
retried => 0,
success => 1
},
ResourceId
ActionResourceId
),
ok.
@ -662,12 +719,12 @@ t_publish_success_infinity_timeout(Config) ->
ok.
t_publish_success_local_topic(Config) ->
ResourceId = ?config(resource_id, Config),
ActionResourceId = ?config(action_resource_id, Config),
ServiceAccountJSON = ?config(service_account_json, Config),
TelemetryTable = ?config(telemetry_table, Config),
LocalTopic = <<"local/topic">>,
{ok, _} = create_bridge(Config, #{<<"local_topic">> => LocalTopic}),
assert_empty_metrics(ResourceId),
assert_empty_metrics(ActionResourceId),
Payload = <<"payload">>,
Message = emqx_message:make(LocalTopic, Payload),
emqx:publish(Message),
@ -682,7 +739,7 @@ t_publish_success_local_topic(Config) ->
DecodedMessages
),
%% to avoid test flakiness
wait_telemetry_event(TelemetryTable, success, ResourceId),
wait_telemetry_event(TelemetryTable, success, ActionResourceId),
wait_until_gauge_is(queuing, 0, 500),
wait_until_gauge_is(inflight, 0, 500),
assert_metrics(
@ -695,7 +752,7 @@ t_publish_success_local_topic(Config) ->
retried => 0,
success => 1
},
ResourceId
ActionResourceId
),
ok.
@ -704,7 +761,7 @@ t_create_via_http(Config) ->
ok.
t_publish_templated(Config) ->
ResourceId = ?config(resource_id, Config),
ActionResourceId = ?config(action_resource_id, Config),
ServiceAccountJSON = ?config(service_account_json, Config),
TelemetryTable = ?config(telemetry_table, Config),
Topic = <<"t/topic">>,
@ -721,7 +778,7 @@ t_publish_templated(Config) ->
),
{ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config),
on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end),
assert_empty_metrics(ResourceId),
assert_empty_metrics(ActionResourceId),
Payload = <<"payload">>,
Message =
emqx_message:set_header(
@ -747,7 +804,7 @@ t_publish_templated(Config) ->
DecodedMessages
),
%% to avoid test flakiness
wait_telemetry_event(TelemetryTable, success, ResourceId),
wait_telemetry_event(TelemetryTable, success, ActionResourceId),
wait_until_gauge_is(queuing, 0, 500),
wait_until_gauge_is(inflight, 0, 500),
assert_metrics(
@ -760,7 +817,7 @@ t_publish_templated(Config) ->
retried => 0,
success => 1
},
ResourceId
ActionResourceId
),
ok.
@ -774,7 +831,7 @@ t_publish_success_batch(Config) ->
end.
test_publish_success_batch(Config) ->
ResourceId = ?config(resource_id, Config),
ActionResourceId = ?config(action_resource_id, Config),
ServiceAccountJSON = ?config(service_account_json, Config),
TelemetryTable = ?config(telemetry_table, Config),
Topic = <<"t/topic">>,
@ -796,7 +853,7 @@ test_publish_success_batch(Config) ->
),
{ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config),
on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end),
assert_empty_metrics(ResourceId),
assert_empty_metrics(ActionResourceId),
NumMessages = BatchSize * 2,
Messages = [emqx_message:make(Topic, integer_to_binary(N)) || N <- lists:seq(1, NumMessages)],
%% publish in parallel to avoid each client blocking and then
@ -822,7 +879,7 @@ test_publish_success_batch(Config) ->
wait_telemetry_event(
TelemetryTable,
success,
ResourceId,
ActionResourceId,
#{timeout => 15_000, n_events => NumMessages}
),
wait_until_gauge_is(queuing, 0, _Timeout = 400),
@ -837,7 +894,7 @@ test_publish_success_batch(Config) ->
retried => 0,
success => NumMessages
},
ResourceId
ActionResourceId
),
ok.
@ -1045,7 +1102,7 @@ t_jose_other_error(Config) ->
fun(Res, Trace) ->
?assertMatch({ok, _}, Res),
?assertMatch(
[#{error := {invalid_private_key, {unknown, error}}}],
[#{error := {invalid_private_key, {unknown, error}}} | _],
?of_kind(gcp_pubsub_connector_startup_error, Trace)
),
ok
@ -1054,7 +1111,7 @@ t_jose_other_error(Config) ->
ok.
t_publish_econnrefused(Config) ->
ResourceId = ?config(resource_id, Config),
ResourceId = ?config(connector_resource_id, Config),
%% set pipelining to 1 so that one of the 2 requests is `pending'
%% in ehttpc.
{ok, _} = create_bridge(
@ -1071,7 +1128,7 @@ t_publish_econnrefused(Config) ->
do_econnrefused_or_timeout_test(Config, econnrefused).
t_publish_timeout(Config) ->
ResourceId = ?config(resource_id, Config),
ActionResourceId = ?config(action_resource_id, Config),
%% set pipelining to 1 so that one of the 2 requests is `pending'
%% in ehttpc. also, we set the batch size to 1 to also ensure the
%% requests are done separately.
@ -1079,12 +1136,13 @@ t_publish_timeout(Config) ->
<<"pipelining">> => 1,
<<"resource_opts">> => #{
<<"batch_size">> => 1,
<<"resume_interval">> => <<"1s">>
<<"resume_interval">> => <<"1s">>,
<<"metrics_flush_interval">> => <<"700ms">>
}
}),
{ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config),
on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end),
assert_empty_metrics(ResourceId),
assert_empty_metrics(ActionResourceId),
TestPid = self(),
TimeoutHandler =
fun(Req0, State) ->
@ -1107,7 +1165,8 @@ t_publish_timeout(Config) ->
do_econnrefused_or_timeout_test(Config, timeout).
do_econnrefused_or_timeout_test(Config, Error) ->
ResourceId = ?config(resource_id, Config),
ActionResourceId = ?config(action_resource_id, Config),
ConnectorResourceId = ?config(connector_resource_id, Config),
TelemetryTable = ?config(telemetry_table, Config),
Topic = <<"t/topic">>,
Payload = <<"payload">>,
@ -1156,9 +1215,9 @@ do_econnrefused_or_timeout_test(Config, Error) ->
case Error of
econnrefused ->
case ?of_kind(gcp_pubsub_request_failed, Trace) of
[#{reason := Error, connector := ResourceId} | _] ->
[#{reason := Error, connector := ConnectorResourceId} | _] ->
ok;
[#{reason := {closed, _Msg}, connector := ResourceId} | _] ->
[#{reason := {closed, _Msg}, connector := ConnectorResourceId} | _] ->
%% _Msg = "The connection was lost."
ok;
Trace0 ->
@ -1182,7 +1241,7 @@ do_econnrefused_or_timeout_test(Config, Error) ->
%% even waiting, hard to avoid flakiness... simpler to just sleep
%% a bit until stabilization.
ct:sleep(200),
CurrentMetrics = current_metrics(ResourceId),
CurrentMetrics = current_metrics(ActionResourceId),
RecordedEvents = ets:tab2list(TelemetryTable),
ct:pal("telemetry events: ~p", [RecordedEvents]),
?assertMatch(
@ -1198,7 +1257,19 @@ do_econnrefused_or_timeout_test(Config, Error) ->
CurrentMetrics
);
timeout ->
wait_until_gauge_is(inflight, 0, _Timeout = 1_000),
wait_telemetry_event(
TelemetryTable,
late_reply,
ActionResourceId,
#{timeout => 5_000, n_events => 2}
),
wait_until_gauge_is(#{
gauge_name => inflight,
expected => 0,
filter_fn => mk_res_id_filter(ActionResourceId),
timeout => 1_000,
max_events => 20
}),
wait_until_gauge_is(queuing, 0, _Timeout = 1_000),
assert_metrics(
#{
@ -1211,7 +1282,7 @@ do_econnrefused_or_timeout_test(Config, Error) ->
success => 0,
late_reply => 2
},
ResourceId
ActionResourceId
)
end,
@ -1334,7 +1405,8 @@ t_failure_no_body(Config) ->
ok.
t_unrecoverable_error(Config) ->
ResourceId = ?config(resource_id, Config),
ActionResourceId = ?config(action_resource_id, Config),
TelemetryTable = ?config(telemetry_table, Config),
TestPid = self(),
FailureNoBodyHandler =
fun(Req0, State) ->
@ -1358,7 +1430,7 @@ t_unrecoverable_error(Config) ->
ok = emqx_bridge_http_connector_test_server:set_handler(FailureNoBodyHandler),
Topic = <<"t/topic">>,
{ok, _} = create_bridge(Config),
assert_empty_metrics(ResourceId),
assert_empty_metrics(ActionResourceId),
{ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config),
on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end),
Payload = <<"payload">>,
@ -1386,6 +1458,7 @@ t_unrecoverable_error(Config) ->
%% removed, this inflight should be 1, because we retry if
%% the worker is killed.
wait_until_gauge_is(inflight, 0, _Timeout = 400),
wait_telemetry_event(TelemetryTable, failed, ActionResourceId),
assert_metrics(
#{
dropped => 0,
@ -1398,7 +1471,7 @@ t_unrecoverable_error(Config) ->
retried => 0,
success => 0
},
ResourceId
ActionResourceId
),
ok.
@ -1407,7 +1480,7 @@ t_stop(Config) ->
{ok, _} = create_bridge(Config),
?check_trace(
?wait_async_action(
emqx_bridge_resource:stop(?BRIDGE_TYPE, Name),
emqx_bridge_resource:stop(?BRIDGE_V1_TYPE, Name),
#{?snk_kind := gcp_pubsub_stop},
5_000
),
@ -1421,13 +1494,13 @@ t_stop(Config) ->
ok.
t_get_status_ok(Config) ->
ResourceId = ?config(resource_id, Config),
ResourceId = ?config(connector_resource_id, Config),
{ok, _} = create_bridge(Config),
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)),
ok.
t_get_status_no_worker(Config) ->
ResourceId = ?config(resource_id, Config),
ResourceId = ?config(connector_resource_id, Config),
{ok, _} = create_bridge(Config),
emqx_common_test_helpers:with_mock(
ehttpc,
@ -1441,7 +1514,7 @@ t_get_status_no_worker(Config) ->
ok.
t_get_status_down(Config) ->
ResourceId = ?config(resource_id, Config),
ResourceId = ?config(connector_resource_id, Config),
{ok, _} = create_bridge(Config),
emqx_common_test_helpers:with_mock(
ehttpc,
@ -1457,7 +1530,7 @@ t_get_status_down(Config) ->
ok.
t_get_status_timeout_calling_workers(Config) ->
ResourceId = ?config(resource_id, Config),
ResourceId = ?config(connector_resource_id, Config),
{ok, _} = create_bridge(Config),
emqx_common_test_helpers:with_mock(
ehttpc,
@ -1520,7 +1593,7 @@ t_on_start_ehttpc_pool_start_failure(Config) ->
),
fun(Trace) ->
?assertMatch(
[#{reason := some_error}],
[#{reason := some_error} | _],
?of_kind(gcp_pubsub_ehttpc_pool_start_failure, Trace)
),
ok
@ -1668,7 +1741,7 @@ t_attributes(Config) ->
),
%% ensure loading cluster override file doesn't mangle the attribute
%% placeholders...
#{<<"bridges">> := #{?BRIDGE_TYPE_BIN := #{Name := RawConf}}} =
#{<<"actions">> := #{?ACTION_TYPE_BIN := #{Name := RawConf}}} =
emqx_config:read_override_conf(#{override_to => cluster}),
?assertEqual(
[
@ -1689,7 +1762,7 @@ t_attributes(Config) ->
<<"value">> => <<"${.payload.value}">>
}
],
maps:get(<<"attributes_template">>, RawConf)
emqx_utils_maps:deep_get([<<"parameters">>, <<"attributes_template">>], RawConf)
),
ok
end,

View File

@ -54,7 +54,7 @@
%%=====================================================================
%% Hocon schema
namespace() -> "connector-http".
namespace() -> "connector_http".
roots() ->
fields(config).

View File

@ -12,7 +12,7 @@
brod,
brod_gssapi
]},
{env, [{emqx_action_info_module, emqx_bridge_kafka_action_info}]},
{env, [{emqx_action_info_modules, [emqx_bridge_kafka_action_info]}]},
{modules, []},
{links, []}

View File

@ -29,7 +29,7 @@
desc/1,
host_opts/0,
ssl_client_opts_fields/0,
producer_opts/0
producer_opts/1
]).
-export([
@ -112,16 +112,15 @@ values({put, connector}) ->
values({put, KafkaType}) ->
maps:merge(values(common_config), values(KafkaType));
values(bridge_v2_producer) ->
maps:merge(
#{
enable => true,
connector => <<"my_kafka_producer_connector">>,
parameters => values(producer_values),
local_topic => <<"mqtt/local/topic">>,
resource_opts => #{
health_check_interval => "32s"
}
},
values(producer)
);
};
values(common_config) ->
#{
authentication => #{
@ -143,7 +142,11 @@ values(common_config) ->
};
values(producer) ->
#{
kafka => #{
kafka => values(producer_values),
local_topic => <<"mqtt/local/topic">>
};
values(producer_values) ->
#{
topic => <<"kafka-topic">>,
message => #{
key => <<"${.clientid}">>,
@ -174,8 +177,6 @@ values(producer) ->
segment_bytes => <<"100MB">>,
memory_overload_protection => true
}
},
local_topic => <<"mqtt/local/topic">>
};
values(consumer) ->
#{
@ -261,7 +262,7 @@ fields("config_producer") ->
fields("config_consumer") ->
fields(kafka_consumer);
fields(kafka_producer) ->
connector_config_fields() ++ producer_opts();
connector_config_fields() ++ producer_opts(v1);
fields(kafka_producer_action) ->
[
{enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})},
@ -270,7 +271,7 @@ fields(kafka_producer_action) ->
desc => ?DESC(emqx_connector_schema, "connector_field"), required => true
})},
{description, emqx_schema:description_schema()}
] ++ producer_opts();
] ++ producer_opts(action);
fields(kafka_consumer) ->
connector_config_fields() ++ fields(consumer_opts);
fields(ssl_client_opts) ->
@ -523,7 +524,7 @@ fields(consumer_kafka_opts) ->
];
fields(resource_opts) ->
SupportedFields = [health_check_interval],
CreationOpts = emqx_resource_schema:create_opts(_Overrides = []),
CreationOpts = emqx_bridge_v2_schema:resource_opts_fields(),
lists:filter(fun({Field, _}) -> lists:member(Field, SupportedFields) end, CreationOpts);
fields(action_field) ->
{kafka_producer,
@ -599,25 +600,25 @@ connector_config_fields() ->
{ssl, mk(ref(ssl_client_opts), #{})}
].
producer_opts() ->
producer_opts(ActionOrBridgeV1) ->
[
%% Note: there's an implicit convention in `emqx_bridge' that,
%% for egress bridges with this config, the published messages
%% will be forwarded to such bridges.
{local_topic, mk(binary(), #{required => false, desc => ?DESC(mqtt_topic)})},
parameters_field(),
parameters_field(ActionOrBridgeV1),
{resource_opts, mk(ref(resource_opts), #{default => #{}, desc => ?DESC(resource_opts)})}
].
%% Since e5.3.1, we want to rename the field 'kafka' to 'parameters'
%% Hoever we need to keep it backward compatible for generated schema json (version 0.1.0)
%% However we need to keep it backward compatible for generated schema json (version 0.1.0)
%% since schema is data for the 'schemas' API.
parameters_field() ->
parameters_field(ActionOrBridgeV1) ->
{Name, Alias} =
case get(emqx_bridge_schema_version) of
<<"0.1.0">> ->
case ActionOrBridgeV1 of
v1 ->
{kafka, parameters};
_ ->
action ->
{parameters, kafka}
end,
{Name,

View File

@ -10,7 +10,9 @@
bridge_v1_type_name/0,
action_type_name/0,
connector_type_name/0,
schema_module/0
schema_module/0,
connector_action_config_to_bridge_v1_config/2,
bridge_v1_config_to_action_config/2
]).
bridge_v1_type_name() -> kafka.
@ -20,3 +22,30 @@ action_type_name() -> kafka_producer.
connector_type_name() -> kafka_producer.
schema_module() -> emqx_bridge_kafka.
connector_action_config_to_bridge_v1_config(ConnectorConfig, ActionConfig) ->
BridgeV1Config1 = maps:remove(<<"connector">>, ActionConfig),
BridgeV1Config2 = emqx_utils_maps:deep_merge(ConnectorConfig, BridgeV1Config1),
emqx_utils_maps:rename(<<"parameters">>, <<"kafka">>, BridgeV1Config2).
bridge_v1_config_to_action_config(BridgeV1Conf, ConnectorName) ->
Config0 = emqx_action_info:transform_bridge_v1_config_to_action_config(
BridgeV1Conf, ConnectorName, schema_module(), kafka_producer
),
KafkaMap = maps:get(<<"kafka">>, BridgeV1Conf, #{}),
Config2 = emqx_utils_maps:deep_merge(Config0, #{<<"parameters">> => KafkaMap}),
maps:with(producer_action_field_keys(), Config2).
%%------------------------------------------------------------------------------------------
%% Internal helper functions
%%------------------------------------------------------------------------------------------
producer_action_field_keys() ->
[
to_bin(K)
|| {K, _} <- emqx_bridge_kafka:fields(kafka_producer_action)
].
to_bin(B) when is_binary(B) -> B;
to_bin(L) when is_list(L) -> list_to_binary(L);
to_bin(A) when is_atom(A) -> atom_to_binary(A, utf8).

View File

@ -481,11 +481,11 @@ on_get_status(
case wolff_client_sup:find_client(ClientId) of
{ok, Pid} ->
case wolff_client:check_connectivity(Pid) of
ok -> connected;
{error, Error} -> {connecting, State, Error}
ok -> ?status_connected;
{error, Error} -> {?status_connecting, State, Error}
end;
{error, _Reason} ->
connecting
?status_connecting
end.
on_get_channel_status(
@ -499,10 +499,10 @@ on_get_channel_status(
#{kafka_topic := KafkaTopic} = maps:get(ChannelId, Channels),
try
ok = check_topic_and_leader_connections(ClientId, KafkaTopic),
connected
?status_connected
catch
throw:#{reason := restarting} ->
conneting
?status_connecting
end.
check_topic_and_leader_connections(ClientId, KafkaTopic) ->
@ -621,8 +621,13 @@ partitioner(random) -> random;
partitioner(key_dispatch) -> first_key_dispatch.
replayq_dir(BridgeType, BridgeName) ->
RawConf = emqx_conf:get_raw([actions, BridgeType, BridgeName]),
DirName = iolist_to_binary([
emqx_bridge_lib:downgrade_type(BridgeType), ":", BridgeName, ":", atom_to_list(node())
emqx_bridge_lib:downgrade_type(BridgeType, RawConf),
":",
BridgeName,
":",
atom_to_list(node())
]),
filename:join([emqx:data_dir(), "kafka", DirName]).

View File

@ -483,11 +483,10 @@ t_failed_creation_then_fix(Config) ->
{ok, {_, [KafkaMsg]}} = brod:fetch(kafka_hosts(), KafkaTopic, 0, Offset),
?assertMatch(#kafka_message{key = BinTime}, KafkaMsg),
% %% TODO: refactor those into init/end per testcase
ok = ?PRODUCER:on_stop(ResourceId, State),
?assertEqual([], supervisor:which_children(wolff_client_sup)),
?assertEqual([], supervisor:which_children(wolff_producers_sup)),
ok = emqx_bridge:remove(list_to_atom(Type), list_to_atom(Name)),
delete_all_bridges(),
?assertEqual([], supervisor:which_children(wolff_client_sup)),
?assertEqual([], supervisor:which_children(wolff_producers_sup)),
ok.
t_custom_timestamp(_Config) ->

View File

@ -25,7 +25,7 @@ kafka_producer_test() ->
<<"kafka_producer">> :=
#{
<<"myproducer">> :=
#{<<"parameters">> := #{}}
#{<<"kafka">> := #{}}
}
}
},
@ -52,7 +52,7 @@ kafka_producer_test() ->
#{
<<"myproducer">> :=
#{
<<"parameters">> := #{},
<<"kafka">> := #{},
<<"local_topic">> := <<"mqtt/local">>
}
}
@ -68,7 +68,7 @@ kafka_producer_test() ->
#{
<<"myproducer">> :=
#{
<<"parameters">> := #{},
<<"kafka">> := #{},
<<"local_topic">> := <<"mqtt/local">>
}
}
@ -166,7 +166,7 @@ message_key_dispatch_validations_test() ->
?assertThrow(
{_, [
#{
path := "bridges.kafka_producer.myproducer.parameters",
path := "bridges.kafka_producer.myproducer.kafka",
reason := "Message key cannot be empty when `key_dispatch` strategy is used"
}
]},
@ -175,7 +175,7 @@ message_key_dispatch_validations_test() ->
?assertThrow(
{_, [
#{
path := "bridges.kafka_producer.myproducer.parameters",
path := "bridges.kafka_producer.myproducer.kafka",
reason := "Message key cannot be empty when `key_dispatch` strategy is used"
}
]},

View File

@ -23,8 +23,14 @@
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-include_lib("brod/include/brod.hrl").
-import(emqx_common_test_helpers, [on_exit/1]).
-define(TYPE, kafka_producer).
%%------------------------------------------------------------------------------
%% CT boilerplate
%%------------------------------------------------------------------------------
all() ->
emqx_common_test_helpers:all(?MODULE).
@ -51,6 +57,151 @@ end_per_suite(Config) ->
emqx_cth_suite:stop(Apps),
ok.
init_per_testcase(_TestCase, Config) ->
Config.
end_per_testcase(_TestCase, _Config) ->
emqx_common_test_helpers:call_janitor(60_000),
ok.
%%-------------------------------------------------------------------------------------
%% Helper fns
%%-------------------------------------------------------------------------------------
check_send_message_with_bridge(BridgeName) ->
%% ######################################
%% Create Kafka message
%% ######################################
Time = erlang:unique_integer(),
BinTime = integer_to_binary(Time),
Payload = list_to_binary("payload" ++ integer_to_list(Time)),
Msg = #{
clientid => BinTime,
payload => Payload,
timestamp => Time
},
Offset = resolve_kafka_offset(),
%% ######################################
%% Send message
%% ######################################
emqx_bridge_v2:send_message(?TYPE, BridgeName, Msg, #{}),
%% ######################################
%% Check if message is sent to Kafka
%% ######################################
check_kafka_message_payload(Offset, Payload).
resolve_kafka_offset() ->
KafkaTopic = emqx_bridge_kafka_impl_producer_SUITE:test_topic_one_partition(),
Partition = 0,
Hosts = emqx_bridge_kafka_impl_producer_SUITE:kafka_hosts(),
{ok, Offset0} = emqx_bridge_kafka_impl_producer_SUITE:resolve_kafka_offset(
Hosts, KafkaTopic, Partition
),
Offset0.
check_kafka_message_payload(Offset, ExpectedPayload) ->
KafkaTopic = emqx_bridge_kafka_impl_producer_SUITE:test_topic_one_partition(),
Partition = 0,
Hosts = emqx_bridge_kafka_impl_producer_SUITE:kafka_hosts(),
{ok, {_, [KafkaMsg0]}} = brod:fetch(Hosts, KafkaTopic, Partition, Offset),
?assertMatch(#kafka_message{value = ExpectedPayload}, KafkaMsg0).
bridge_v2_config(ConnectorName) ->
#{
<<"connector">> => ConnectorName,
<<"enable">> => true,
<<"kafka">> => #{
<<"buffer">> => #{
<<"memory_overload_protection">> => false,
<<"mode">> => <<"memory">>,
<<"per_partition_limit">> => <<"2GB">>,
<<"segment_bytes">> => <<"100MB">>
},
<<"compression">> => <<"no_compression">>,
<<"kafka_header_value_encode_mode">> => <<"none">>,
<<"max_batch_bytes">> => <<"896KB">>,
<<"max_inflight">> => 10,
<<"message">> => #{
<<"key">> => <<"${.clientid}">>,
<<"timestamp">> => <<"${.timestamp}">>,
<<"value">> => <<"${.payload}">>
},
<<"partition_count_refresh_interval">> => <<"60s">>,
<<"partition_strategy">> => <<"random">>,
<<"query_mode">> => <<"sync">>,
<<"required_acks">> => <<"all_isr">>,
<<"sync_query_timeout">> => <<"5s">>,
<<"topic">> => emqx_bridge_kafka_impl_producer_SUITE:test_topic_one_partition()
},
<<"local_topic">> => <<"kafka_t/#">>,
<<"resource_opts">> => #{
<<"health_check_interval">> => <<"15s">>
}
}.
connector_config() ->
#{
<<"authentication">> => <<"none">>,
<<"bootstrap_hosts">> => iolist_to_binary(kafka_hosts_string()),
<<"connect_timeout">> => <<"5s">>,
<<"enable">> => true,
<<"metadata_request_timeout">> => <<"5s">>,
<<"min_metadata_refresh_interval">> => <<"3s">>,
<<"socket_opts">> =>
#{
<<"recbuf">> => <<"1024KB">>,
<<"sndbuf">> => <<"1024KB">>,
<<"tcp_keepalive">> => <<"none">>
},
<<"ssl">> =>
#{
<<"ciphers">> => [],
<<"depth">> => 10,
<<"enable">> => false,
<<"hibernate_after">> => <<"5s">>,
<<"log_level">> => <<"notice">>,
<<"reuse_sessions">> => true,
<<"secure_renegotiate">> => true,
<<"verify">> => <<"verify_peer">>,
<<"versions">> => [<<"tlsv1.3">>, <<"tlsv1.2">>]
}
}.
kafka_hosts_string() ->
KafkaHost = os:getenv("KAFKA_PLAIN_HOST", "kafka-1.emqx.net"),
KafkaPort = os:getenv("KAFKA_PLAIN_PORT", "9092"),
KafkaHost ++ ":" ++ KafkaPort.
create_connector(Name, Config) ->
Res = emqx_connector:create(?TYPE, Name, Config),
on_exit(fun() -> emqx_connector:remove(?TYPE, Name) end),
Res.
create_action(Name, Config) ->
Res = emqx_bridge_v2:create(?TYPE, Name, Config),
on_exit(fun() -> emqx_bridge_v2:remove(?TYPE, Name) end),
Res.
bridge_api_spec_props_for_get() ->
#{
<<"bridge_kafka.get_producer">> :=
#{<<"properties">> := Props}
} =
emqx_bridge_v2_testlib:bridges_api_spec_schemas(),
Props.
action_api_spec_props_for_get() ->
#{
<<"bridge_kafka.get_bridge_v2">> :=
#{<<"properties">> := Props}
} =
emqx_bridge_v2_testlib:actions_api_spec_schemas(),
Props.
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
t_create_remove_list(_) ->
[] = emqx_bridge_v2:list(),
ConnectorConfig = connector_config(),
@ -187,106 +338,48 @@ t_unknown_topic(_Config) ->
),
ok.
check_send_message_with_bridge(BridgeName) ->
%% ######################################
%% Create Kafka message
%% ######################################
Time = erlang:unique_integer(),
BinTime = integer_to_binary(Time),
Payload = list_to_binary("payload" ++ integer_to_list(Time)),
Msg = #{
clientid => BinTime,
payload => Payload,
timestamp => Time
},
Offset = resolve_kafka_offset(),
%% ######################################
%% Send message
%% ######################################
emqx_bridge_v2:send_message(?TYPE, BridgeName, Msg, #{}),
%% ######################################
%% Check if message is sent to Kafka
%% ######################################
check_kafka_message_payload(Offset, Payload).
resolve_kafka_offset() ->
KafkaTopic = emqx_bridge_kafka_impl_producer_SUITE:test_topic_one_partition(),
Partition = 0,
Hosts = emqx_bridge_kafka_impl_producer_SUITE:kafka_hosts(),
{ok, Offset0} = emqx_bridge_kafka_impl_producer_SUITE:resolve_kafka_offset(
Hosts, KafkaTopic, Partition
t_bad_url(_Config) ->
ConnectorName = <<"test_connector">>,
ActionName = <<"test_action">>,
ActionConfig = bridge_v2_config(<<"test_connector">>),
ConnectorConfig0 = connector_config(),
ConnectorConfig = ConnectorConfig0#{<<"bootstrap_hosts">> := <<"bad_host:9092">>},
?assertMatch({ok, _}, create_connector(ConnectorName, ConnectorConfig)),
?assertMatch({ok, _}, create_action(ActionName, ActionConfig)),
?assertMatch(
{ok, #{
resource_data :=
#{
status := connecting,
error := [#{reason := unresolvable_hostname}]
}
}},
emqx_connector:lookup(?TYPE, ConnectorName)
),
Offset0.
?assertMatch({ok, #{status := connecting}}, emqx_bridge_v2:lookup(?TYPE, ActionName)),
ok.
check_kafka_message_payload(Offset, ExpectedPayload) ->
KafkaTopic = emqx_bridge_kafka_impl_producer_SUITE:test_topic_one_partition(),
Partition = 0,
Hosts = emqx_bridge_kafka_impl_producer_SUITE:kafka_hosts(),
{ok, {_, [KafkaMsg0]}} = brod:fetch(Hosts, KafkaTopic, Partition, Offset),
?assertMatch(#kafka_message{value = ExpectedPayload}, KafkaMsg0).
t_parameters_key_api_spec(_Config) ->
BridgeProps = bridge_api_spec_props_for_get(),
?assert(is_map_key(<<"kafka">>, BridgeProps), #{bridge_props => BridgeProps}),
?assertNot(is_map_key(<<"parameters">>, BridgeProps), #{bridge_props => BridgeProps}),
bridge_v2_config(ConnectorName) ->
#{
<<"connector">> => ConnectorName,
<<"enable">> => true,
<<"kafka">> => #{
<<"buffer">> => #{
<<"memory_overload_protection">> => false,
<<"mode">> => <<"memory">>,
<<"per_partition_limit">> => <<"2GB">>,
<<"segment_bytes">> => <<"100MB">>
},
<<"compression">> => <<"no_compression">>,
<<"kafka_header_value_encode_mode">> => <<"none">>,
<<"max_batch_bytes">> => <<"896KB">>,
<<"max_inflight">> => 10,
<<"message">> => #{
<<"key">> => <<"${.clientid}">>,
<<"timestamp">> => <<"${.timestamp}">>,
<<"value">> => <<"${.payload}">>
},
<<"partition_count_refresh_interval">> => <<"60s">>,
<<"partition_strategy">> => <<"random">>,
<<"query_mode">> => <<"sync">>,
<<"required_acks">> => <<"all_isr">>,
<<"sync_query_timeout">> => <<"5s">>,
<<"topic">> => emqx_bridge_kafka_impl_producer_SUITE:test_topic_one_partition()
},
<<"local_topic">> => <<"kafka_t/#">>,
<<"resource_opts">> => #{
<<"health_check_interval">> => <<"15s">>
}
}.
ActionProps = action_api_spec_props_for_get(),
?assertNot(is_map_key(<<"kafka">>, ActionProps), #{action_props => ActionProps}),
?assert(is_map_key(<<"parameters">>, ActionProps), #{action_props => ActionProps}),
connector_config() ->
#{
<<"authentication">> => <<"none">>,
<<"bootstrap_hosts">> => iolist_to_binary(kafka_hosts_string()),
<<"connect_timeout">> => <<"5s">>,
<<"enable">> => true,
<<"metadata_request_timeout">> => <<"5s">>,
<<"min_metadata_refresh_interval">> => <<"3s">>,
<<"socket_opts">> =>
#{
<<"recbuf">> => <<"1024KB">>,
<<"sndbuf">> => <<"1024KB">>,
<<"tcp_keepalive">> => <<"none">>
},
<<"ssl">> =>
#{
<<"ciphers">> => [],
<<"depth">> => 10,
<<"enable">> => false,
<<"hibernate_after">> => <<"5s">>,
<<"log_level">> => <<"notice">>,
<<"reuse_sessions">> => true,
<<"secure_renegotiate">> => true,
<<"verify">> => <<"verify_peer">>,
<<"versions">> => [<<"tlsv1.3">>, <<"tlsv1.2">>]
}
}.
ok.
kafka_hosts_string() ->
KafkaHost = os:getenv("KAFKA_PLAIN_HOST", "kafka-1.emqx.net"),
KafkaPort = os:getenv("KAFKA_PLAIN_PORT", "9092"),
KafkaHost ++ ":" ++ KafkaPort.
t_http_api_get(_Config) ->
ConnectorName = <<"test_connector">>,
ActionName = <<"test_action">>,
ActionConfig = bridge_v2_config(<<"test_connector">>),
ConnectorConfig = connector_config(),
?assertMatch({ok, _}, create_connector(ConnectorName, ConnectorConfig)),
?assertMatch({ok, _}, create_action(ActionName, ActionConfig)),
%% v1 api; no mangling of configs; has `kafka' top level config key
?assertMatch(
{ok, {{_, 200, _}, _, [#{<<"kafka">> := _}]}},
emqx_bridge_testlib:list_bridges_api()
),
ok.

View File

@ -1,13 +1,13 @@
{application, emqx_bridge_matrix, [
{description, "EMQX Enterprise MatrixDB Bridge"},
{vsn, "0.1.2"},
{vsn, "0.1.3"},
{registered, []},
{applications, [
kernel,
stdlib,
emqx_resource
]},
{env, []},
{env, [{emqx_action_info_modules, [emqx_bridge_matrix_action_info]}]},
{modules, []},
{links, []}
]}.

View File

@ -3,6 +3,8 @@
%%--------------------------------------------------------------------
-module(emqx_bridge_matrix).
-include_lib("hocon/include/hoconsc.hrl").
-export([
conn_bridge_examples/1
]).
@ -14,6 +16,12 @@
desc/1
]).
%% Examples
-export([
bridge_v2_examples/1,
connector_examples/1
]).
%% -------------------------------------------------------------------------------------------------
%% api
@ -22,7 +30,7 @@ conn_bridge_examples(Method) ->
#{
<<"matrix">> => #{
summary => <<"Matrix Bridge">>,
value => emqx_bridge_pgsql:values(Method, matrix)
value => emqx_bridge_pgsql:values_conn_bridge_examples(Method, matrix)
}
}
].
@ -35,8 +43,55 @@ roots() -> [].
fields("post") ->
emqx_bridge_pgsql:fields("post", matrix);
fields("config_connector") ->
emqx_bridge_pgsql:fields("config_connector");
fields(action) ->
{matrix,
hoconsc:mk(
hoconsc:map(name, hoconsc:ref(emqx_bridge_pgsql, pgsql_action)),
#{
desc => <<"Matrix Action Config">>,
required => false
}
)};
fields("put_bridge_v2") ->
emqx_bridge_pgsql:fields(pgsql_action);
fields("get_bridge_v2") ->
emqx_bridge_pgsql:fields(pgsql_action);
fields("post_bridge_v2") ->
emqx_bridge_pgsql:fields(pgsql_action);
fields("put_connector") ->
emqx_bridge_pgsql:fields("config_connector");
fields("get_connector") ->
emqx_bridge_pgsql:fields("config_connector");
fields("post_connector") ->
emqx_bridge_pgsql:fields("config_connector");
fields(Method) ->
emqx_bridge_pgsql:fields(Method).
desc("config_connector") ->
?DESC(emqx_postgresql_connector_schema, "config_connector");
desc(_) ->
undefined.
%% Examples
connector_examples(Method) ->
[
#{
<<"matrix">> => #{
summary => <<"Matrix Connector">>,
value => emqx_postgresql_connector_schema:values({Method, <<"matrix">>})
}
}
].
bridge_v2_examples(Method) ->
[
#{
<<"matrix">> => #{
summary => <<"Matrix Action">>,
value => emqx_bridge_pgsql:values({Method, matrix})
}
}
].

View File

@ -0,0 +1,22 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_matrix_action_info).
-behaviour(emqx_action_info).
-export([
bridge_v1_type_name/0,
action_type_name/0,
connector_type_name/0,
schema_module/0
]).
bridge_v1_type_name() -> matrix.
action_type_name() -> matrix.
connector_type_name() -> matrix.
schema_module() -> emqx_bridge_matrix.

View File

@ -9,7 +9,7 @@
emqx_resource,
emqx_mongodb
]},
{env, []},
{env, [{emqx_action_info_modules, [emqx_bridge_mongodb_action_info]}]},
{modules, []},
{links, []}
]}.

View File

@ -12,7 +12,9 @@
%% emqx_bridge_enterprise "callbacks"
-export([
conn_bridge_examples/1
bridge_v2_examples/1,
conn_bridge_examples/1,
connector_examples/1
]).
%% hocon_schema callbacks
@ -27,10 +29,13 @@
%% hocon_schema API
%%=================================================================================================
%% [TODO] Namespace should be different depending on whether this is used for a
%% connector, an action or a legacy bridge type.
namespace() ->
"bridge_mongodb".
roots() ->
%% ???
[].
fields("config") ->
@ -44,6 +49,18 @@ fields("config") ->
#{required => true, desc => ?DESC(emqx_resource_schema, "creation_opts")}
)}
];
fields("config_connector") ->
emqx_connector_schema:common_fields() ++
[
{parameters,
mk(
hoconsc:union([
ref(emqx_mongodb, "connector_" ++ T)
|| T <- ["single", "sharded", "rs"]
]),
#{required => true, desc => ?DESC("mongodb_parameters")}
)}
] ++ emqx_mongodb:fields(mongodb);
fields("creation_opts") ->
%% so far, mongodb connector does not support batching
%% but we cannot delete this field due to compatibility reasons
@ -55,12 +72,47 @@ fields("creation_opts") ->
desc => ?DESC("batch_size")
}}
]);
fields(action) ->
{mongodb,
mk(
hoconsc:map(name, ref(?MODULE, mongodb_action)),
#{desc => <<"MongoDB Action Config">>, required => false}
)};
fields(mongodb_action) ->
emqx_bridge_v2_schema:make_producer_action_schema(
mk(ref(?MODULE, action_parameters), #{
required => true, desc => ?DESC(action_parameters)
})
);
fields(action_parameters) ->
[
{collection, mk(binary(), #{desc => ?DESC("collection"), default => <<"mqtt">>})},
{payload_template, mk(binary(), #{required => false, desc => ?DESC("payload_template")})}
];
fields(resource_opts) ->
fields("creation_opts");
fields(mongodb_rs) ->
emqx_mongodb:fields(rs) ++ fields("config");
fields(mongodb_sharded) ->
emqx_mongodb:fields(sharded) ++ fields("config");
fields(mongodb_single) ->
emqx_mongodb:fields(single) ++ fields("config");
fields("post_connector") ->
type_and_name_fields(mongodb) ++
fields("config_connector");
fields("put_connector") ->
fields("config_connector");
fields("get_connector") ->
emqx_bridge_schema:status_fields() ++
fields("post_connector");
fields("get_bridge_v2") ->
emqx_bridge_schema:status_fields() ++
fields("post_bridge_v2");
fields("post_bridge_v2") ->
type_and_name_fields(mongodb) ++
fields(mongodb_action);
fields("put_bridge_v2") ->
fields(mongodb_action);
fields("post_rs") ->
fields(mongodb_rs) ++ type_and_name_fields(mongodb_rs);
fields("post_sharded") ->
@ -86,6 +138,16 @@ fields("get_single") ->
fields(mongodb_single) ++
type_and_name_fields(mongodb_single).
bridge_v2_examples(Method) ->
[
#{
<<"mongodb">> => #{
summary => <<"MongoDB Action">>,
value => action_values(Method)
}
}
].
conn_bridge_examples(Method) ->
[
#{
@ -108,16 +170,46 @@ conn_bridge_examples(Method) ->
}
].
connector_examples(Method) ->
[
#{
<<"mongodb_rs">> => #{
summary => <<"MongoDB Replica Set Connector">>,
value => connector_values(mongodb_rs, Method)
}
},
#{
<<"mongodb_sharded">> => #{
summary => <<"MongoDB Sharded Connector">>,
value => connector_values(mongodb_sharded, Method)
}
},
#{
<<"mongodb_single">> => #{
summary => <<"MongoDB Standalone Connector">>,
value => connector_values(mongodb_single, Method)
}
}
].
desc("config_connector") ->
?DESC("desc_config");
desc("config") ->
?DESC("desc_config");
desc("creation_opts") ->
?DESC(emqx_resource_schema, "creation_opts");
desc(resource_opts) ->
?DESC(emqx_resource_schema, "resource_opts");
desc(mongodb_rs) ->
?DESC(mongodb_rs_conf);
desc(mongodb_sharded) ->
?DESC(mongodb_sharded_conf);
desc(mongodb_single) ->
?DESC(mongodb_single_conf);
desc(mongodb_action) ->
?DESC(mongodb_action);
desc(action_parameters) ->
?DESC(action_parameters);
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->
["Configuration for MongoDB using `", string:to_upper(Method), "` method."];
desc(_) ->
@ -133,49 +225,102 @@ type_and_name_fields(MongoType) ->
{name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}
].
values(mongodb_rs = MongoType, Method) ->
TypeOpts = #{
connector_values(Type, Method) ->
lists:foldl(
fun(M1, M2) ->
maps:merge(M1, M2)
end,
#{
description => <<"My example connector">>,
parameters => mongo_type_opts(Type)
},
[
common_values(),
method_values(mongodb, Method)
]
).
action_values(Method) ->
maps:merge(
method_values(mongodb, Method),
#{
description => <<"My example action">>,
enable => true,
connector => <<"my_mongodb_connector">>,
parameters => #{
collection => <<"mycol">>
}
}
).
values(MongoType, Method) ->
maps:merge(
mongo_type_opts(MongoType),
bridge_values(MongoType, Method)
).
mongo_type_opts(mongodb_rs) ->
#{
mongo_type => <<"rs">>,
servers => <<"localhost:27017, localhost:27018">>,
w_mode => <<"safe">>,
r_mode => <<"safe">>,
replica_set_name => <<"rs">>
},
values(common, MongoType, Method, TypeOpts);
values(mongodb_sharded = MongoType, Method) ->
TypeOpts = #{
};
mongo_type_opts(mongodb_sharded) ->
#{
mongo_type => <<"sharded">>,
servers => <<"localhost:27017, localhost:27018">>,
w_mode => <<"safe">>
},
values(common, MongoType, Method, TypeOpts);
values(mongodb_single = MongoType, Method) ->
TypeOpts = #{
};
mongo_type_opts(mongodb_single) ->
#{
mongo_type => <<"single">>,
server => <<"localhost:27017">>,
w_mode => <<"safe">>
},
values(common, MongoType, Method, TypeOpts).
}.
values(common, MongoType, Method, TypeOpts) ->
MongoTypeBin = atom_to_binary(MongoType),
Common = #{
name => <<MongoTypeBin/binary, "_demo">>,
type => MongoTypeBin,
bridge_values(Type, _Method) ->
%% [FIXME] _Method makes a difference since PUT doesn't allow name and type
%% for connectors.
TypeBin = atom_to_binary(Type),
maps:merge(
#{
name => <<TypeBin/binary, "_demo">>,
type => TypeBin,
collection => <<"mycol">>
},
common_values()
).
common_values() ->
#{
enable => true,
collection => <<"mycol">>,
database => <<"mqtt">>,
srv_record => false,
pool_size => 8,
username => <<"myuser">>,
password => <<"******">>
},
MethodVals = method_values(MongoType, Method),
Vals0 = maps:merge(MethodVals, Common),
maps:merge(Vals0, TypeOpts).
}.
method_values(MongoType, _) ->
ConnectorType =
case MongoType of
mongodb_rs -> <<"rs">>;
mongodb_sharded -> <<"sharded">>;
mongodb_single -> <<"single">>
end,
#{mongo_type => ConnectorType}.
method_values(Type, post) ->
TypeBin = atom_to_binary(Type),
#{
name => <<TypeBin/binary, "_demo">>,
type => TypeBin
};
method_values(Type, get) ->
maps:merge(
method_values(Type, post),
#{
status => <<"connected">>,
node_status => [
#{
node => <<"emqx@localhost">>,
status => <<"connected">>
}
]
}
);
method_values(_Type, put) ->
#{}.

View File

@ -0,0 +1,95 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_mongodb_action_info).
-behaviour(emqx_action_info).
%% behaviour callbacks
-export([
bridge_v1_config_to_action_config/2,
bridge_v1_config_to_connector_config/1,
connector_action_config_to_bridge_v1_config/2,
action_type_name/0,
bridge_v1_type_name/0,
connector_type_name/0,
schema_module/0
]).
%% dynamic callback
-export([
bridge_v1_type_name_fun/1
]).
-import(emqx_utils_conv, [bin/1]).
-define(SCHEMA_MODULE, emqx_bridge_mongodb).
connector_action_config_to_bridge_v1_config(ConnectorConfig, ActionConfig) ->
fix_v1_type(
maps:merge(
maps:without(
[<<"connector">>],
map_unindent(<<"parameters">>, ActionConfig)
),
map_unindent(<<"parameters">>, ConnectorConfig)
)
).
fix_v1_type(#{<<"mongo_type">> := MongoType} = Conf) ->
Conf#{<<"type">> => v1_type(MongoType)}.
bridge_v1_config_to_action_config(BridgeV1Config, ConnectorName) ->
ActionTopLevelKeys = schema_keys(mongodb_action),
ActionParametersKeys = schema_keys(action_parameters),
ActionKeys = ActionTopLevelKeys ++ ActionParametersKeys,
ActionConfig = make_config_map(ActionKeys, ActionParametersKeys, BridgeV1Config),
ActionConfig#{<<"connector">> => ConnectorName}.
bridge_v1_config_to_connector_config(BridgeV1Config) ->
ActionTopLevelKeys = schema_keys(mongodb_action),
ActionParametersKeys = schema_keys(action_parameters),
ActionKeys = ActionTopLevelKeys ++ ActionParametersKeys,
ConnectorTopLevelKeys = schema_keys("config_connector"),
ConnectorKeys = maps:keys(BridgeV1Config) -- (ActionKeys -- ConnectorTopLevelKeys),
ConnectorParametersKeys = ConnectorKeys -- ConnectorTopLevelKeys,
make_config_map(ConnectorKeys, ConnectorParametersKeys, BridgeV1Config).
make_config_map(PickKeys, IndentKeys, Config) ->
Conf0 = maps:with(PickKeys, Config),
map_indent(<<"parameters">>, IndentKeys, Conf0).
bridge_v1_type_name() ->
{fun ?MODULE:bridge_v1_type_name_fun/1, bridge_v1_type_names()}.
action_type_name() -> mongodb.
connector_type_name() -> mongodb.
schema_module() -> ?SCHEMA_MODULE.
bridge_v1_type_names() -> [mongodb_rs, mongodb_sharded, mongodb_single].
bridge_v1_type_name_fun({#{<<"parameters">> := #{<<"mongo_type">> := MongoType}}, _}) ->
v1_type(MongoType).
v1_type(<<"rs">>) -> mongodb_rs;
v1_type(<<"sharded">>) -> mongodb_sharded;
v1_type(<<"single">>) -> mongodb_single.
map_unindent(Key, Map) ->
maps:merge(
maps:get(Key, Map),
maps:remove(Key, Map)
).
map_indent(IndentKey, PickKeys, Map) ->
maps:put(
IndentKey,
maps:with(PickKeys, Map),
maps:without(PickKeys, Map)
).
schema_keys(Name) ->
[bin(Key) || Key <- proplists:get_keys(?SCHEMA_MODULE:fields(Name))].

View File

@ -6,16 +6,19 @@
-behaviour(emqx_resource).
-include_lib("emqx/include/logger.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
%% `emqx_resource' API
-export([
on_remove_channel/3,
callback_mode/0,
on_start/2,
on_stop/2,
on_add_channel/4,
on_get_channel_status/3,
on_get_channels/1,
on_get_status/2,
on_query/3,
on_get_status/2
on_start/2,
on_stop/2
]).
%%========================================================================================
@ -24,44 +27,94 @@
callback_mode() -> emqx_mongodb:callback_mode().
on_start(InstanceId, Config) ->
on_add_channel(
_InstanceId,
#{channels := Channels} = OldState,
ChannelId,
#{parameters := Parameters} = ChannelConfig0
) ->
PayloadTemplate0 = maps:get(payload_template, Parameters, undefined),
PayloadTemplate = preprocess_template(PayloadTemplate0),
CollectionTemplateSource = maps:get(collection, Parameters),
CollectionTemplate = preprocess_template(CollectionTemplateSource),
ChannelConfig = maps:merge(
Parameters,
ChannelConfig0#{
payload_template => PayloadTemplate,
collection_template => CollectionTemplate
}
),
NewState = OldState#{channels => maps:put(ChannelId, ChannelConfig, Channels)},
{ok, NewState}.
on_get_channel_status(InstanceId, _ChannelId, State) ->
case on_get_status(InstanceId, State) of
connected ->
connected;
_ ->
connecting
end.
on_get_channels(InstanceId) ->
emqx_bridge_v2:get_channels_for_connector(InstanceId).
on_get_status(InstanceId, _State = #{connector_state := ConnectorState}) ->
emqx_mongodb:on_get_status(InstanceId, ConnectorState).
on_query(InstanceId, {Channel, Message0}, #{channels := Channels, connector_state := ConnectorState}) ->
#{
payload_template := PayloadTemplate,
collection_template := CollectionTemplate
} = ChannelState0 = maps:get(Channel, Channels),
ChannelState = ChannelState0#{
collection => emqx_placeholder:proc_tmpl(CollectionTemplate, Message0)
},
Message = render_message(PayloadTemplate, Message0),
Res = emqx_mongodb:on_query(
InstanceId,
{Channel, Message},
maps:merge(ConnectorState, ChannelState)
),
?tp(mongo_bridge_connector_on_query_return, #{instance_id => InstanceId, result => Res}),
Res;
on_query(InstanceId, Request, _State = #{connector_state := ConnectorState}) ->
emqx_mongodb:on_query(InstanceId, Request, ConnectorState).
on_remove_channel(_InstanceId, #{channels := Channels} = State, ChannelId) ->
NewState = State#{channels => maps:remove(ChannelId, Channels)},
{ok, NewState}.
on_start(InstanceId, Config0) ->
Config = config_transform(Config0),
case emqx_mongodb:on_start(InstanceId, Config) of
{ok, ConnectorState} ->
PayloadTemplate0 = maps:get(payload_template, Config, undefined),
PayloadTemplate = preprocess_template(PayloadTemplate0),
CollectionTemplateSource = maps:get(collection, Config),
CollectionTemplate = preprocess_template(CollectionTemplateSource),
State = #{
payload_template => PayloadTemplate,
collection_template => CollectionTemplate,
connector_state => ConnectorState
connector_state => ConnectorState,
channels => #{}
},
{ok, State};
Error ->
Error
end.
config_transform(#{parameters := #{mongo_type := MongoType} = Parameters} = Config) ->
maps:put(
type,
connector_type(MongoType),
maps:merge(
maps:remove(parameters, Config),
Parameters
)
).
connector_type(rs) -> mongodb_rs;
connector_type(sharded) -> mongodb_sharded;
connector_type(single) -> mongodb_single.
on_stop(InstanceId, _State = #{connector_state := ConnectorState}) ->
emqx_mongodb:on_stop(InstanceId, ConnectorState).
on_query(InstanceId, {send_message, Message0}, State) ->
#{
payload_template := PayloadTemplate,
collection_template := CollectionTemplate,
connector_state := ConnectorState
} = State,
NewConnectorState = ConnectorState#{
collection => emqx_placeholder:proc_tmpl(CollectionTemplate, Message0)
},
Message = render_message(PayloadTemplate, Message0),
Res = emqx_mongodb:on_query(InstanceId, {send_message, Message}, NewConnectorState),
?tp(mongo_bridge_connector_on_query_return, #{result => Res}),
Res;
on_query(InstanceId, Request, _State = #{connector_state := ConnectorState}) ->
emqx_mongodb:on_query(InstanceId, Request, ConnectorState).
on_get_status(InstanceId, _State = #{connector_state := ConnectorState}) ->
emqx_mongodb:on_get_status(InstanceId, ConnectorState).
ok = emqx_mongodb:on_stop(InstanceId, ConnectorState),
?tp(mongodb_stopped, #{instance_id => InstanceId}),
ok.
%%========================================================================================
%% Helper fns

View File

@ -132,7 +132,17 @@ init_per_suite(Config) ->
end_per_suite(_Config) ->
emqx_mgmt_api_test_util:end_suite(),
ok = emqx_common_test_helpers:stop_apps([emqx_mongodb, emqx_bridge, emqx_rule_engine, emqx_conf]),
ok = emqx_common_test_helpers:stop_apps(
[
emqx_management,
emqx_bridge_mongodb,
emqx_mongodb,
emqx_bridge,
emqx_connector,
emqx_rule_engine,
emqx_conf
]
),
ok.
init_per_testcase(_Testcase, Config) ->
@ -144,6 +154,7 @@ init_per_testcase(_Testcase, Config) ->
end_per_testcase(_Testcase, Config) ->
clear_db(Config),
delete_bridge(Config),
[] = emqx_connector:list(),
snabbkaffe:stop(),
ok.
@ -157,9 +168,17 @@ start_apps() ->
%% we want to make sure they are loaded before
%% ekka start in emqx_common_test_helpers:start_apps/1
emqx_common_test_helpers:render_and_load_app_config(emqx_conf),
ok = emqx_common_test_helpers:start_apps([
emqx_conf, emqx_rule_engine, emqx_bridge, emqx_mongodb
]).
ok = emqx_common_test_helpers:start_apps(
[
emqx_conf,
emqx_rule_engine,
emqx_connector,
emqx_bridge,
emqx_mongodb,
emqx_bridge_mongodb,
emqx_management
]
).
ensure_loaded() ->
_ = application:load(emqtt),
@ -198,6 +217,7 @@ mongo_config(MongoHost, MongoPort0, rs = Type, Config) ->
"\n w_mode = safe"
"\n use_legacy_protocol = auto"
"\n database = mqtt"
"\n mongo_type = rs"
"\n resource_opts = {"
"\n query_mode = ~s"
"\n worker_pool_size = 1"
@ -224,6 +244,7 @@ mongo_config(MongoHost, MongoPort0, sharded = Type, Config) ->
"\n w_mode = safe"
"\n use_legacy_protocol = auto"
"\n database = mqtt"
"\n mongo_type = sharded"
"\n resource_opts = {"
"\n query_mode = ~s"
"\n worker_pool_size = 1"
@ -253,6 +274,7 @@ mongo_config(MongoHost, MongoPort0, single = Type, Config) ->
"\n auth_source = ~s"
"\n username = ~s"
"\n password = \"file://~s\""
"\n mongo_type = single"
"\n resource_opts = {"
"\n query_mode = ~s"
"\n worker_pool_size = 1"
@ -290,13 +312,17 @@ create_bridge(Config, Overrides) ->
delete_bridge(Config) ->
Type = mongo_type_bin(?config(mongo_type, Config)),
Name = ?config(mongo_name, Config),
emqx_bridge:remove(Type, Name).
emqx_bridge:check_deps_and_remove(Type, Name, [connector, rule_actions]).
create_bridge_http(Params) ->
Path = emqx_mgmt_api_test_util:api_path(["bridges"]),
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of
{ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])};
case
emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, #{
return_all => true
})
of
{ok, {{_, 201, _}, _, Body}} -> {ok, emqx_utils_json:decode(Body, [return_maps])};
Error -> Error
end.
@ -564,8 +590,8 @@ t_get_status_server_selection_too_short(Config) ->
ok.
t_use_legacy_protocol_option(Config) ->
ResourceID = resource_id(Config),
{ok, _} = create_bridge(Config, #{<<"use_legacy_protocol">> => <<"true">>}),
ResourceID = resource_id(Config),
?retry(
_Interval0 = 200,
_NAttempts0 = 20,

View File

@ -0,0 +1,232 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_bridge_v2_mongodb_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-define(BRIDGE_TYPE, mongodb).
-define(BRIDGE_TYPE_BIN, <<"mongodb">>).
-define(CONNECTOR_TYPE, mongodb).
-define(CONNECTOR_TYPE_BIN, <<"mongodb">>).
-import(emqx_common_test_helpers, [on_exit/1]).
-import(emqx_utils_conv, [bin/1]).
%%------------------------------------------------------------------------------
%% CT boilerplate
%%------------------------------------------------------------------------------
all() ->
emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) ->
MongoHost = os:getenv("MONGO_SINGLE_HOST", "mongo"),
MongoPort = list_to_integer(os:getenv("MONGO_SINGLE_PORT", "27017")),
case emqx_common_test_helpers:is_tcp_server_available(MongoHost, MongoPort) of
true ->
Apps = emqx_cth_suite:start(
[
emqx,
emqx_conf,
emqx_connector,
emqx_bridge,
emqx_bridge_mongodb,
emqx_rule_engine,
emqx_management,
{emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"}
],
#{work_dir => emqx_cth_suite:work_dir(Config)}
),
{ok, Api} = emqx_common_test_http:create_default_app(),
[
{apps, Apps},
{api, Api},
{mongo_host, MongoHost},
{mongo_port, MongoPort}
| Config
];
false ->
case os:getenv("IS_CI") of
"yes" ->
throw(no_mongo);
_ ->
{skip, no_mongo}
end
end.
end_per_suite(Config) ->
Apps = ?config(apps, Config),
emqx_cth_suite:stop(Apps),
ok.
init_per_testcase(TestCase, Config) ->
common_init_per_testcase(TestCase, Config).
common_init_per_testcase(TestCase, Config) ->
ct:timetrap(timer:seconds(60)),
emqx_bridge_v2_testlib:delete_all_bridges_and_connectors(),
emqx_config:delete_override_conf_files(),
UniqueNum = integer_to_binary(erlang:unique_integer()),
Name = iolist_to_binary([atom_to_binary(TestCase), UniqueNum]),
AuthSource = bin(os:getenv("MONGO_AUTHSOURCE", "admin")),
Username = bin(os:getenv("MONGO_USERNAME", "")),
Password = bin(os:getenv("MONGO_PASSWORD", "")),
Passfile = filename:join(?config(priv_dir, Config), "passfile"),
ok = file:write_file(Passfile, Password),
NConfig = [
{mongo_authsource, AuthSource},
{mongo_username, Username},
{mongo_password, Password},
{mongo_passfile, Passfile}
| Config
],
ConnectorConfig = connector_config(Name, NConfig),
BridgeConfig = bridge_config(Name, Name),
ok = snabbkaffe:start_trace(),
[
{connector_type, ?CONNECTOR_TYPE},
{connector_name, Name},
{connector_config, ConnectorConfig},
{bridge_type, ?BRIDGE_TYPE},
{bridge_name, Name},
{bridge_config, BridgeConfig}
| NConfig
].
end_per_testcase(_Testcase, Config) ->
case proplists:get_bool(skip_does_not_apply, Config) of
true ->
ok;
false ->
emqx_bridge_v2_testlib:delete_all_bridges_and_connectors(),
emqx_common_test_helpers:call_janitor(60_000),
ok = snabbkaffe:stop(),
ok
end.
%%------------------------------------------------------------------------------
%% Helper fns
%%------------------------------------------------------------------------------
connector_config(Name, Config) ->
MongoHost = ?config(mongo_host, Config),
MongoPort = ?config(mongo_port, Config),
AuthSource = ?config(mongo_authsource, Config),
Username = ?config(mongo_username, Config),
PassFile = ?config(mongo_passfile, Config),
InnerConfigMap0 =
#{
<<"enable">> => true,
<<"database">> => <<"mqtt">>,
<<"parameters">> =>
#{
<<"mongo_type">> => <<"single">>,
<<"server">> => iolist_to_binary([MongoHost, ":", integer_to_binary(MongoPort)]),
<<"w_mode">> => <<"safe">>
},
<<"pool_size">> => 8,
<<"srv_record">> => false,
<<"username">> => Username,
<<"password">> => iolist_to_binary(["file://", PassFile]),
<<"auth_source">> => AuthSource
},
InnerConfigMap = serde_roundtrip(InnerConfigMap0),
parse_and_check_connector_config(InnerConfigMap, Name).
parse_and_check_connector_config(InnerConfigMap, Name) ->
TypeBin = ?CONNECTOR_TYPE_BIN,
RawConf = #{<<"connectors">> => #{TypeBin => #{Name => InnerConfigMap}}},
#{<<"connectors">> := #{TypeBin := #{Name := Config}}} =
hocon_tconf:check_plain(emqx_connector_schema, RawConf, #{
required => false, atom_key => false
}),
ct:pal("parsed config: ~p", [Config]),
InnerConfigMap.
bridge_config(Name, ConnectorId) ->
InnerConfigMap0 =
#{
<<"enable">> => true,
<<"connector">> => ConnectorId,
<<"parameters">> =>
#{},
<<"local_topic">> => <<"t/aeh">>
%%,
},
InnerConfigMap = serde_roundtrip(InnerConfigMap0),
parse_and_check_bridge_config(InnerConfigMap, Name).
%% check it serializes correctly
serde_roundtrip(InnerConfigMap0) ->
IOList = hocon_pp:do(InnerConfigMap0, #{}),
{ok, InnerConfigMap} = hocon:binary(IOList),
InnerConfigMap.
parse_and_check_bridge_config(InnerConfigMap, Name) ->
TypeBin = ?BRIDGE_TYPE_BIN,
RawConf = #{<<"bridges">> => #{TypeBin => #{Name => InnerConfigMap}}},
hocon_tconf:check_plain(emqx_bridge_v2_schema, RawConf, #{required => false, atom_key => false}),
InnerConfigMap.
shared_secret_path() ->
os:getenv("CI_SHARED_SECRET_PATH", "/var/lib/secret").
shared_secret(client_keyfile) ->
filename:join([shared_secret_path(), "client.key"]);
shared_secret(client_certfile) ->
filename:join([shared_secret_path(), "client.crt"]);
shared_secret(client_cacertfile) ->
filename:join([shared_secret_path(), "ca.crt"]);
shared_secret(rig_keytab) ->
filename:join([shared_secret_path(), "rig.keytab"]).
make_message() ->
Time = erlang:unique_integer(),
BinTime = integer_to_binary(Time),
Payload = emqx_guid:to_hexstr(emqx_guid:gen()),
#{
clientid => BinTime,
payload => Payload,
timestamp => Time
}.
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
t_start_stop(Config) ->
emqx_bridge_v2_testlib:t_start_stop(Config, mongodb_stopped),
ok.
t_create_via_http(Config) ->
emqx_bridge_v2_testlib:t_create_via_http(Config),
ok.
t_on_get_status(Config) ->
emqx_bridge_v2_testlib:t_on_get_status(Config, #{failure_status => connecting}),
ok.
t_sync_query(Config) ->
ok = emqx_bridge_v2_testlib:t_sync_query(
Config,
fun make_message/0,
fun(Res) -> ?assertEqual(ok, Res) end,
mongo_bridge_connector_on_query_return
),
ok.

View File

@ -36,7 +36,7 @@
-define(MQTT_HOST_OPTS, #{default_port => 1883}).
namespace() -> "connector-mqtt".
namespace() -> "connector_mqtt".
roots() ->
fields("config").

View File

@ -8,7 +8,7 @@
emqx_resource,
emqx_postgresql
]},
{env, []},
{env, [{emqx_action_info_modules, [emqx_bridge_pgsql_action_info]}]},
{modules, []},
{links, []}
]}.

View File

@ -1,83 +1,97 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_pgsql).
-include_lib("emqx_connector/include/emqx_connector.hrl").
-include_lib("emqx_postgresql/include/emqx_postgresql.hrl").
-include_lib("typerefl/include/types.hrl").
-include_lib("emqx/include/logger.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-include_lib("epgsql/include/epgsql.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-include_lib("emqx_resource/include/emqx_resource.hrl").
-import(hoconsc, [mk/2, enum/1, ref/2]).
-export([
conn_bridge_examples/1,
values/2,
fields/2
]).
-export([
namespace/0,
roots/0,
fields/1,
desc/1
desc/1,
fields/2
]).
-define(DEFAULT_SQL, <<
"insert into t_mqtt_msg(msgid, topic, qos, payload, arrived) "
"values (${id}, ${topic}, ${qos}, ${payload}, TO_TIMESTAMP((${timestamp} :: bigint)/1000))"
>>).
%% Examples
-export([
bridge_v2_examples/1,
conn_bridge_examples/1
]).
%% -------------------------------------------------------------------------------------------------
%% api
%% Exported for timescale and matrix bridges
-export([
values/1,
values_conn_bridge_examples/2
]).
conn_bridge_examples(Method) ->
[
#{
<<"pgsql">> => #{
summary => <<"PostgreSQL Bridge">>,
value => values(Method, pgsql)
}
}
].
-define(PGSQL_HOST_OPTIONS, #{
default_port => ?PGSQL_DEFAULT_PORT
}).
values(_Method, Type) ->
#{
enable => true,
type => Type,
name => <<"foo">>,
server => <<"127.0.0.1:5432">>,
database => <<"mqtt">>,
pool_size => 8,
username => <<"root">>,
password => <<"******">>,
sql => ?DEFAULT_SQL,
local_topic => <<"local/topic/#">>,
resource_opts => #{
worker_pool_size => 8,
health_check_interval => ?HEALTHCHECK_INTERVAL_RAW,
batch_size => ?DEFAULT_BATCH_SIZE,
batch_time => ?DEFAULT_BATCH_TIME,
query_mode => async,
max_buffer_bytes => ?DEFAULT_BUFFER_BYTES
}
}.
%% -------------------------------------------------------------------------------------------------
%% Hocon Schema Definitions
namespace() -> "bridge_pgsql".
roots() -> [].
roots() ->
[].
fields("config_connector") ->
emqx_postgresql_connector_schema:fields("config_connector");
fields(config) ->
fields("config_connector") ++
fields(action);
fields(action) ->
{pgsql,
hoconsc:mk(
hoconsc:map(name, hoconsc:ref(emqx_bridge_pgsql, pgsql_action)),
#{
desc => <<"PostgreSQL Action Config">>,
required => false
}
)};
fields(action_parameters) ->
[
{sql,
hoconsc:mk(
binary(),
#{desc => ?DESC("sql_template"), default => default_sql(), format => <<"sql">>}
)}
] ++
emqx_connector_schema_lib:prepare_statement_fields();
fields(pgsql_action) ->
emqx_bridge_v2_schema:make_producer_action_schema(
hoconsc:mk(
hoconsc:ref(?MODULE, action_parameters),
#{
required => true,
desc => ?DESC("action_parameters")
}
)
);
fields("put_bridge_v2") ->
fields(pgsql_action);
fields("get_bridge_v2") ->
fields(pgsql_action);
fields("post_bridge_v2") ->
fields(pgsql_action);
fields("config") ->
[
{enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})},
{enable, hoconsc:mk(boolean(), #{desc => ?DESC("config_enable"), default => true})},
{sql,
mk(
hoconsc:mk(
binary(),
#{desc => ?DESC("sql_template"), default => ?DEFAULT_SQL, format => <<"sql">>}
#{desc => ?DESC("sql_template"), default => default_sql(), format => <<"sql">>}
)},
{local_topic,
mk(
hoconsc:mk(
binary(),
#{desc => ?DESC("local_topic"), default => undefined}
)}
@ -94,17 +108,132 @@ fields("get") ->
fields("post", Type) ->
[type_field(Type), name_field() | fields("config")].
type_field(Type) ->
{type, hoconsc:mk(hoconsc:enum([Type]), #{required => true, desc => ?DESC("desc_type")})}.
name_field() ->
{name, hoconsc:mk(binary(), #{required => true, desc => ?DESC("desc_name")})}.
desc("config") ->
?DESC("desc_config");
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->
["Configuration for PostgreSQL using `", string:to_upper(Method), "` method."];
desc(pgsql_action) ->
?DESC("pgsql_action");
desc(action_parameters) ->
?DESC("action_parameters");
desc("config_connector") ->
?DESC(emqx_postgresql_connector_schema, "config_connector");
desc(_) ->
undefined.
%% -------------------------------------------------------------------------------------------------
default_sql() ->
<<
"insert into t_mqtt_msg(msgid, topic, qos, payload, arrived) "
"values (${id}, ${topic}, ${qos}, ${payload}, TO_TIMESTAMP((${timestamp} :: bigint)/1000))"
>>.
type_field(Type) ->
{type, mk(enum([Type]), #{required => true, desc => ?DESC("desc_type")})}.
%% Examples
name_field() ->
{name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}.
bridge_v2_examples(Method) ->
[
#{
<<"pgsql">> => #{
summary => <<"PostgreSQL Action">>,
value => values({Method, pgsql})
}
}
].
conn_bridge_examples(Method) ->
[
#{
<<"pgsql">> => #{
summary => <<"PostgreSQL Bridge">>,
value => values_conn_bridge_examples(Method, pgsql)
}
}
].
values({get, PostgreSQLType}) ->
maps:merge(
#{
status => <<"connected">>,
node_status => [
#{
node => <<"emqx@localhost">>,
status => <<"connected">>
}
]
},
values({put, PostgreSQLType})
);
values({post, PostgreSQLType}) ->
values({put, PostgreSQLType});
values({put, PostgreSQLType}) ->
maps:merge(
#{
name => <<"my_action">>,
type => PostgreSQLType,
enable => true,
connector => <<"my_connector">>,
resource_opts => #{
batch_size => 1,
batch_time => <<"50ms">>,
inflight_window => 100,
max_buffer_bytes => <<"256MB">>,
request_ttl => <<"45s">>,
worker_pool_size => 16
}
},
values(parameters)
);
values(parameters) ->
#{
<<"parameters">> => #{
<<"sql">> =>
<<
"INSERT INTO client_events(clientid, event, created_at)"
"VALUES (\n"
" ${clientid},\n"
" ${event},\n"
" TO_TIMESTAMP((${timestamp} :: bigint))\n"
")"
>>
}
}.
values_conn_bridge_examples(get, Type) ->
maps:merge(
#{
status => <<"connected">>,
node_status => [
#{
node => <<"emqx@localhost">>,
status => <<"connected">>
}
]
},
values_conn_bridge_examples(post, Type)
);
values_conn_bridge_examples(_Method, Type) ->
#{
enable => true,
type => Type,
name => <<"foo">>,
server => <<"127.0.0.1:5432">>,
database => <<"mqtt">>,
pool_size => 8,
username => <<"root">>,
password => <<"******">>,
sql => default_sql(),
local_topic => <<"local/topic/#">>,
resource_opts => #{
worker_pool_size => 8,
health_check_interval => ?HEALTHCHECK_INTERVAL_RAW,
batch_size => ?DEFAULT_BATCH_SIZE,
batch_time => ?DEFAULT_BATCH_TIME,
query_mode => async,
max_buffer_bytes => ?DEFAULT_BUFFER_BYTES
}
}.

View File

@ -0,0 +1,22 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_pgsql_action_info).
-behaviour(emqx_action_info).
-export([
bridge_v1_type_name/0,
action_type_name/0,
connector_type_name/0,
schema_module/0
]).
bridge_v1_type_name() -> pgsql.
action_type_name() -> pgsql.
connector_type_name() -> pgsql.
schema_module() -> emqx_bridge_pgsql.

View File

@ -114,7 +114,7 @@ init_per_suite(Config) ->
end_per_suite(_Config) ->
emqx_mgmt_api_test_util:end_suite(),
ok = emqx_common_test_helpers:stop_apps([emqx_bridge, emqx_conf]),
ok = emqx_common_test_helpers:stop_apps([emqx, emqx_postgresql, emqx_conf, emqx_bridge]),
ok.
init_per_testcase(_Testcase, Config) ->
@ -147,7 +147,7 @@ common_init(Config0) ->
ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")),
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
% Ensure enterprise bridge module is loaded
ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]),
ok = emqx_common_test_helpers:start_apps([emqx, emqx_postgresql, emqx_conf, emqx_bridge]),
_ = emqx_bridge_enterprise:module_info(),
emqx_mgmt_api_test_util:init_suite(),
% Connect to pgsql directly and create the table
@ -259,17 +259,16 @@ send_message(Config, Payload) ->
BridgeID = emqx_bridge_resource:bridge_id(BridgeType, Name),
emqx_bridge:send_message(BridgeID, Payload).
query_resource(Config, Request) ->
query_resource(Config, Msg = _Request) ->
Name = ?config(pgsql_name, Config),
BridgeType = ?config(pgsql_bridge_type, Config),
ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name),
emqx_resource:query(ResourceID, Request, #{timeout => 1_000}).
emqx_bridge_v2:query(BridgeType, Name, Msg, #{timeout => 1_000}).
query_resource_sync(Config, Request) ->
Name = ?config(pgsql_name, Config),
BridgeType = ?config(pgsql_bridge_type, Config),
ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name),
emqx_resource_buffer_worker:simple_sync_query(ResourceID, Request).
ActionId = emqx_bridge_v2:id(BridgeType, Name),
emqx_resource_buffer_worker:simple_sync_query(ActionId, Request).
query_resource_async(Config, Request) ->
query_resource_async(Config, Request, _Opts = #{}).
@ -279,9 +278,8 @@ query_resource_async(Config, Request, Opts) ->
BridgeType = ?config(pgsql_bridge_type, Config),
Ref = alias([reply]),
AsyncReplyFun = fun(Result) -> Ref ! {result, Ref, Result} end,
ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name),
Timeout = maps:get(timeout, Opts, 500),
Return = emqx_resource:query(ResourceID, Request, #{
Return = emqx_bridge_v2:query(BridgeType, Name, Request, #{
timeout => Timeout,
async_reply_fun => {AsyncReplyFun, []}
}),
@ -441,13 +439,12 @@ t_get_status(Config) ->
Name = ?config(pgsql_name, Config),
BridgeType = ?config(pgsql_bridge_type, Config),
ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name),
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)),
?assertMatch(#{status := connected}, emqx_bridge_v2:health_check(BridgeType, Name)),
emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
?assertMatch(
{ok, Status} when Status =:= disconnected orelse Status =:= connecting,
emqx_resource_manager:health_check(ResourceID)
#{status := Status} when Status =:= disconnected orelse Status =:= connecting,
emqx_bridge_v2:health_check(BridgeType, Name)
)
end),
ok.
@ -655,7 +652,7 @@ t_nasty_sql_string(Config) ->
t_missing_table(Config) ->
Name = ?config(pgsql_name, Config),
BridgeType = ?config(pgsql_bridge_type, Config),
ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name),
% ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name),
?check_trace(
begin
@ -665,21 +662,20 @@ t_missing_table(Config) ->
_Sleep = 1_000,
_Attempts = 20,
?assertMatch(
{ok, Status} when Status == connecting orelse Status == disconnected,
emqx_resource_manager:health_check(ResourceID)
#{status := Status} when Status == connecting orelse Status == disconnected,
emqx_bridge_v2:health_check(BridgeType, Name)
)
),
Val = integer_to_binary(erlang:unique_integer()),
SentData = #{payload => Val, timestamp => 1668602148000},
Timeout = 1000,
?assertMatch(
{error, {resource_error, #{reason := unhealthy_target}}},
query_resource(Config, {send_message, SentData, [], Timeout})
query_resource(Config, {send_message, SentData})
),
ok
end,
fun(Trace) ->
?assertMatch([_], ?of_kind(pgsql_undefined_table, Trace)),
?assertMatch([_ | _], ?of_kind(pgsql_undefined_table, Trace)),
ok
end
),
@ -689,7 +685,7 @@ t_missing_table(Config) ->
t_table_removed(Config) ->
Name = ?config(pgsql_name, Config),
BridgeType = ?config(pgsql_bridge_type, Config),
ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name),
%%ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name),
?check_trace(
begin
connect_and_create_table(Config),
@ -697,13 +693,14 @@ t_table_removed(Config) ->
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID))
?assertMatch(#{status := connected}, emqx_bridge_v2:health_check(BridgeType, Name))
),
connect_and_drop_table(Config),
Val = integer_to_binary(erlang:unique_integer()),
SentData = #{payload => Val, timestamp => 1668602148000},
case query_resource_sync(Config, {send_message, SentData, []}) of
{error, {unrecoverable_error, {error, error, <<"42P01">>, undefined_table, _, _}}} ->
ActionId = emqx_bridge_v2:id(BridgeType, Name),
case query_resource_sync(Config, {ActionId, SentData}) of
{error, {unrecoverable_error, _}} ->
ok;
?RESOURCE_ERROR_M(not_connected, _) ->
ok;
@ -720,7 +717,6 @@ t_table_removed(Config) ->
t_concurrent_health_checks(Config) ->
Name = ?config(pgsql_name, Config),
BridgeType = ?config(pgsql_bridge_type, Config),
ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name),
?check_trace(
begin
connect_and_create_table(Config),
@ -728,11 +724,13 @@ t_concurrent_health_checks(Config) ->
?retry(
_Sleep = 1_000,
_Attempts = 20,
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID))
?assertMatch(#{status := connected}, emqx_bridge_v2:health_check(BridgeType, Name))
),
emqx_utils:pmap(
fun(_) ->
?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID))
?assertMatch(
#{status := connected}, emqx_bridge_v2:health_check(BridgeType, Name)
)
end,
lists:seq(1, 20)
),

View File

@ -10,8 +10,11 @@
%% Test cases
%%===========================================================================
atoms() ->
[my_producer].
pulsar_producer_validations_test() ->
Name = list_to_atom("my_producer"),
Name = hd(atoms()),
Conf0 = pulsar_producer_hocon(),
Conf1 =
Conf0 ++

View File

@ -1,8 +1,9 @@
{application, emqx_bridge_timescale, [
{description, "EMQX Enterprise TimescaleDB Bridge"},
{vsn, "0.1.2"},
{vsn, "0.1.3"},
{registered, []},
{applications, [kernel, stdlib, emqx_resource]},
{env, [{emqx_action_info_module, emqx_bridge_timescale_action_info}]},
{env, []},
{modules, []},
{links, []}

View File

@ -3,6 +3,8 @@
%%--------------------------------------------------------------------
-module(emqx_bridge_timescale).
-include_lib("hocon/include/hoconsc.hrl").
-export([
conn_bridge_examples/1
]).
@ -14,6 +16,12 @@
desc/1
]).
%% Examples
-export([
bridge_v2_examples/1,
connector_examples/1
]).
%% -------------------------------------------------------------------------------------------------
%% api
@ -22,7 +30,7 @@ conn_bridge_examples(Method) ->
#{
<<"timescale">> => #{
summary => <<"Timescale Bridge">>,
value => emqx_bridge_pgsql:values(Method, timescale)
value => emqx_bridge_pgsql:values_conn_bridge_examples(Method, timescale)
}
}
].
@ -35,8 +43,55 @@ roots() -> [].
fields("post") ->
emqx_bridge_pgsql:fields("post", timescale);
fields("config_connector") ->
emqx_bridge_pgsql:fields("config_connector");
fields(action) ->
{timescale,
hoconsc:mk(
hoconsc:map(name, hoconsc:ref(emqx_bridge_pgsql, pgsql_action)),
#{
desc => <<"Timescale Action Config">>,
required => false
}
)};
fields("put_bridge_v2") ->
emqx_bridge_pgsql:fields(pgsql_action);
fields("get_bridge_v2") ->
emqx_bridge_pgsql:fields(pgsql_action);
fields("post_bridge_v2") ->
emqx_bridge_pgsql:fields(pgsql_action);
fields("put_connector") ->
emqx_bridge_pgsql:fields("config_connector");
fields("get_connector") ->
emqx_bridge_pgsql:fields("config_connector");
fields("post_connector") ->
emqx_bridge_pgsql:fields("config_connector");
fields(Method) ->
emqx_bridge_pgsql:fields(Method).
desc("config_connector") ->
?DESC(emqx_postgresql_connector_schema, "config_connector");
desc(_) ->
undefined.
%% Examples
connector_examples(Method) ->
[
#{
<<"timescale">> => #{
summary => <<"Timescale Connector">>,
value => emqx_postgresql_connector_schema:values({Method, <<"timescale">>})
}
}
].
bridge_v2_examples(Method) ->
[
#{
<<"timescale">> => #{
summary => <<"Timescale Action">>,
value => emqx_bridge_pgsql:values({Method, timescale})
}
}
].

View File

@ -0,0 +1,22 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_bridge_timescale_action_info).
-behaviour(emqx_action_info).
-export([
bridge_v1_type_name/0,
action_type_name/0,
connector_type_name/0,
schema_module/0
]).
bridge_v1_type_name() -> timescale.
action_type_name() -> timescale.
connector_type_name() -> timescale.
schema_module() -> emqx_bridge_timescale.

View File

@ -28,7 +28,7 @@
-export([remove/2, remove/3]).
-export([tombstone/2]).
-export([reset/2, reset/3]).
-export([dump_schema/2]).
-export([dump_schema/2, reformat_schema_dump/1]).
-export([schema_module/0]).
%% TODO: move to emqx_dashboard when we stop building api schema at build time
@ -180,9 +180,263 @@ gen_schema_json(Dir, SchemaModule, Lang) ->
include_importance_up_from => IncludeImportance,
desc_resolver => make_desc_resolver(Lang)
},
JsonMap = hocon_schema_json:gen(SchemaModule, Opts),
IoData = emqx_utils_json:encode(JsonMap, [pretty, force_utf8]),
ok = file:write_file(SchemaJsonFile, IoData).
StructsJsonArray = hocon_schema_json:gen(SchemaModule, Opts),
IoData = emqx_utils_json:encode(StructsJsonArray, [pretty, force_utf8]),
ok = file:write_file(SchemaJsonFile, IoData),
ok = gen_preformat_md_json_files(Dir, StructsJsonArray, Lang).
gen_preformat_md_json_files(Dir, StructsJsonArray, Lang) ->
NestedStruct = reformat_schema_dump(StructsJsonArray),
%% write to files
NestedJsonFile = filename:join([Dir, "schmea-v2-" ++ Lang ++ ".json"]),
io:format(user, "===< Generating: ~s~n", [NestedJsonFile]),
ok = file:write_file(
NestedJsonFile, emqx_utils_json:encode(NestedStruct, [pretty, force_utf8])
),
ok.
%% @doc This function is exported for scripts/schema-dump-reformat.escript
reformat_schema_dump(StructsJsonArray0) ->
%% prepare
StructsJsonArray = deduplicate_by_full_name(StructsJsonArray0),
#{fields := RootFields} = hd(StructsJsonArray),
RootNames0 = lists:map(fun(#{name := RootName}) -> RootName end, RootFields),
RootNames = lists:map(fun to_bin/1, RootNames0),
%% reformat
[Root | FlatStructs0] = lists:map(
fun(Struct) -> gen_flat_doc(RootNames, Struct) end, StructsJsonArray
),
FlatStructs = [Root#{text => <<"root">>, hash => <<"root">>} | FlatStructs0],
gen_nested_doc(FlatStructs).
deduplicate_by_full_name(Structs) ->
deduplicate_by_full_name(Structs, #{}, []).
deduplicate_by_full_name([], _Seen, Acc) ->
lists:reverse(Acc);
deduplicate_by_full_name([#{full_name := FullName} = H | T], Seen, Acc) ->
case maps:get(FullName, Seen, false) of
false ->
deduplicate_by_full_name(T, Seen#{FullName => H}, [H | Acc]);
H ->
%% Name clash, but identical, ignore
deduplicate_by_full_name(T, Seen, Acc);
_Different ->
%% ADD NAMESPACE!
throw({duplicate_full_name, FullName})
end.
%% Ggenerate nested docs from root struct.
%% Due to the fact that the same struct can be referenced by multiple fields,
%% we need to generate a unique nested doc for each reference.
%% The unique path to each type and is of the below format:
%% - A a path starts either with 'T-' or 'V-'. T stands for type, V stands for value.
%% - A path is a list of strings delimited by '-'.
%% - The letter S is used to separate struct name from field name.
%% - Field names are however NOT denoted by a leading 'F-'.
%% For example:
%% - T-root: the root struct;
%% - T-foo-S-footype: the struct named "footype" in the foo field of root struct;
%% - V-foo-S-footype-bar: the field named "bar" in the struct named "footype" in the foo field of root struct
gen_nested_doc(Structs) ->
KeyByFullName = lists:foldl(
fun(#{hash := FullName} = Struct, Acc) ->
maps:put(FullName, Struct, Acc)
end,
#{},
Structs
),
FindFn = fun(Hash) -> maps:get(Hash, KeyByFullName) end,
gen_nested_doc(hd(Structs), FindFn, []).
gen_nested_doc(#{fields := Fields} = Struct, FindFn, Path) ->
TypeAnchor = make_type_anchor(Path),
ValueAnchor = fun(FieldName) -> make_value_anchor(Path, FieldName) end,
NewFields = lists:map(
fun(#{text := Name} = Field) ->
NewField = expand_field(Field, FindFn, Path),
NewField#{hash => ValueAnchor(Name)}
end,
Fields
),
Struct#{
fields => NewFields,
hash => TypeAnchor
}.
%% Make anchor for type.
%% Start with "T-" to distinguish from value anchor.
make_type_anchor([]) ->
<<"T-root">>;
make_type_anchor(Path) ->
to_bin(["T-", lists:join("-", lists:reverse(Path))]).
%% Value anchor is used to link to the field's struct.
%% Start with "V-" to distinguish from type anchor.
make_value_anchor(Path, FieldName) ->
to_bin(["V-", join_path_hash(Path, FieldName)]).
%% Make a globally unique "hash" (the http anchor) for each struct field.
join_path_hash([], Name) ->
Name;
join_path_hash(Path, Name) ->
to_bin(lists:join("-", lists:reverse([Name | Path]))).
%% Expand field's struct reference to nested doc.
expand_field(#{text := Name, refs := References} = Field, FindFn, Path) ->
%% Add struct type name in path to make it unique.
NewReferences = lists:map(
fun(#{text := StructName} = Ref) ->
expand_ref(Ref, FindFn, [StructName, "S", Name | Path])
end,
References
),
Field#{refs => NewReferences};
expand_field(Field, _FindFn, _Path) ->
%% No reference, no need to expand.
Field.
expand_ref(#{hash := FullName}, FindFn, Path) ->
Struct = FindFn(FullName),
gen_nested_doc(Struct, FindFn, Path).
%% generate flat docs for each struct.
%% using references to link to other structs.
gen_flat_doc(RootNames, #{full_name := FullName, fields := Fields} = S) ->
ShortName = short_name(FullName),
case is_missing_namespace(ShortName, to_bin(FullName), RootNames) of
true ->
io:format(standard_error, "WARN: no_namespace_for: ~s~n", [FullName]);
false ->
ok
end,
#{
text => short_name(FullName),
hash => format_hash(FullName),
doc => maps:get(desc, S, <<"">>),
fields => format_fields(Fields)
}.
format_fields([]) ->
[];
format_fields([Field | Fields]) ->
[format_field(Field) | format_fields(Fields)].
format_field(#{name := Name, aliases := Aliases, type := Type} = F) ->
L = [
{text, Name},
{type, format_type(Type)},
{refs, format_refs(Type)},
{aliases,
case Aliases of
[] -> undefined;
_ -> Aliases
end},
{default, maps:get(hocon, maps:get(default, F, #{}), undefined)},
{doc, maps:get(desc, F, undefined)}
],
maps:from_list([{K, V} || {K, V} <- L, V =/= undefined]).
format_refs(Type) ->
References = find_refs(Type),
case lists:map(fun format_ref/1, References) of
[] -> undefined;
L -> L
end.
format_ref(FullName) ->
#{text => short_name(FullName), hash => format_hash(FullName)}.
find_refs(Type) ->
lists:reverse(find_refs(Type, [])).
%% go deep into union, array, and map to find references
find_refs(#{kind := union, members := Members}, Acc) ->
lists:foldl(fun find_refs/2, Acc, Members);
find_refs(#{kind := array, elements := Elements}, Acc) ->
find_refs(Elements, Acc);
find_refs(#{kind := map, values := Values}, Acc) ->
find_refs(Values, Acc);
find_refs(#{kind := struct, name := FullName}, Acc) ->
[FullName | Acc];
find_refs(_, Acc) ->
Acc.
format_type(#{kind := primitive, name := Name}) ->
format_primitive_type(Name);
format_type(#{kind := singleton, name := Name}) ->
to_bin(["String(\"", to_bin(Name), "\")"]);
format_type(#{kind := enum, symbols := Symbols}) ->
CommaSep = lists:join(",", lists:map(fun(S) -> to_bin(S) end, Symbols)),
to_bin(["Enum(", CommaSep, ")"]);
format_type(#{kind := array, elements := ElementsType}) ->
to_bin(["Array(", format_type(ElementsType), ")"]);
format_type(#{kind := union, members := MemberTypes} = U) ->
DN = maps:get(display_name, U, undefined),
case DN of
undefined ->
to_bin(["OneOf(", format_union_members(MemberTypes), ")"]);
Name ->
format_primitive_type(Name)
end;
format_type(#{kind := struct, name := FullName}) ->
to_bin(["Struct(", short_name(FullName), ")"]);
format_type(#{kind := map, name := Name, values := ValuesType}) ->
to_bin(["Map($", Name, "->", format_type(ValuesType), ")"]).
format_union_members(Members) ->
format_union_members(Members, []).
format_union_members([], Acc) ->
lists:join(",", lists:reverse(Acc));
format_union_members([Member | Members], Acc) ->
NewAcc = [format_type(Member) | Acc],
format_union_members(Members, NewAcc).
format_primitive_type(TypeStr) ->
Spec = emqx_conf_schema_types:readable_docgen(?MODULE, TypeStr),
to_bin(maps:get(type, Spec)).
%% All types should have a namespace to avlid name clashing.
is_missing_namespace(ShortName, FullName, RootNames) ->
case lists:member(ShortName, RootNames) of
true ->
false;
false ->
ShortName =:= FullName
end.
%% Returns short name from full name, fullname delemited by colon(:).
short_name(FullName) ->
case string:split(FullName, ":") of
[_, Name] -> to_bin(Name);
_ -> to_bin(FullName)
end.
%% Returns the hash-anchor from full name, fullname delemited by colon(:).
format_hash(FullName) ->
case string:split(FullName, ":") of
[Namespace, Name] ->
ok = warn_bad_namespace(Namespace),
iolist_to_binary([Namespace, "__", Name]);
_ ->
iolist_to_binary(FullName)
end.
%% namespace should only have letters, numbers, and underscores.
warn_bad_namespace(Namespace) ->
case re:run(Namespace, "^[a-zA-Z0-9_]+$", [{capture, none}]) of
nomatch ->
case erlang:get({bad_namespace, Namespace}) of
true ->
ok;
_ ->
erlang:put({bad_namespace, Namespace}, true),
io:format(standard_error, "WARN: bad_namespace: ~s~n", [Namespace])
end;
_ ->
ok
end.
%% TODO: move this function to emqx_dashboard when we stop generating this JSON at build time.
hotconf_schema_json() ->
@ -193,12 +447,7 @@ hotconf_schema_json() ->
bridge_schema_json() ->
Version = <<"0.1.0">>,
SchemaInfo = #{title => <<"EMQX Data Bridge API Schema">>, version => Version},
put(emqx_bridge_schema_version, Version),
try
gen_api_schema_json_iodata(emqx_bridge_api, SchemaInfo)
after
erase(emqx_bridge_schema_version)
end.
gen_api_schema_json_iodata(emqx_bridge_api, SchemaInfo).
%% TODO: remove it and also remove hocon_md.erl and friends.
%% markdown generation from schema is a failure and we are moving to an interactive
@ -311,12 +560,7 @@ hocon_schema_to_spec(Atom, _LocalModule) when is_atom(Atom) ->
typename_to_spec(TypeStr, Module) ->
emqx_conf_schema_types:readable_dashboard(Module, TypeStr).
to_bin(List) when is_list(List) ->
case io_lib:printable_list(List) of
true -> unicode:characters_to_binary(List);
false -> List
end;
to_bin(List) when is_list(List) -> iolist_to_binary(List);
to_bin(Boolean) when is_boolean(Boolean) -> Boolean;
to_bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8);
to_bin(X) ->
X.
to_bin(X) -> X.

View File

@ -194,7 +194,7 @@ keys() ->
emqx_config:get_root_names() -- hidden_roots().
drop_hidden_roots(Conf) ->
lists:foldl(fun(K, Acc) -> maps:remove(K, Acc) end, Conf, hidden_roots()).
maps:without(hidden_roots(), Conf).
hidden_roots() ->
[
@ -202,6 +202,7 @@ hidden_roots() ->
<<"stats">>,
<<"broker">>,
<<"persistent_session_store">>,
<<"session_persistence">>,
<<"plugins">>,
<<"zones">>
].

View File

@ -80,8 +80,7 @@ upgrade_raw_conf(RawConf) ->
RawConf1 = emqx_connector_schema:transform_bridges_v1_to_connectors_and_bridges_v2(RawConf),
emqx_otel_schema:upgrade_legacy_metrics(RawConf1).
%% root config should not have a namespace
namespace() -> undefined.
namespace() -> emqx.
tags() ->
[<<"EMQX">>].

View File

@ -108,18 +108,28 @@ config_key_path() ->
pre_config_update([?ROOT_KEY], RawConf, RawConf) ->
{ok, RawConf};
pre_config_update([?ROOT_KEY], NewConf, _RawConf) ->
case multi_validate_connector_names(NewConf) of
ok ->
{ok, convert_certs(NewConf)};
Error ->
Error
end;
pre_config_update(_, {_Oper, _, _}, undefined) ->
{error, connector_not_found};
pre_config_update(_, {Oper, _Type, _Name}, OldConfig) ->
%% to save the 'enable' to the config files
{ok, OldConfig#{<<"enable">> => operation_to_enable(Oper)}};
pre_config_update(Path, Conf, _OldConfig) when is_map(Conf) ->
case validate_connector_name_in_config(Path) of
ok ->
case emqx_connector_ssl:convert_certs(filename:join(Path), Conf) of
{error, Reason} ->
{error, Reason};
{ok, ConfNew} ->
{ok, ConfNew}
end;
Error ->
Error
end.
operation_to_enable(disable) -> false;
@ -458,3 +468,51 @@ ensure_no_channels(Configs) ->
{error, Reason, _State} ->
{error, Reason}
end.
to_bin(A) when is_atom(A) -> atom_to_binary(A, utf8);
to_bin(B) when is_binary(B) -> B.
validate_connector_name(ConnectorName) ->
try
_ = emqx_resource:validate_name(to_bin(ConnectorName)),
ok
catch
throw:Error ->
{error, Error}
end.
validate_connector_name_in_config(Path) ->
case Path of
[?ROOT_KEY, _ConnectorType, ConnectorName] ->
validate_connector_name(ConnectorName);
_ ->
ok
end.
multi_validate_connector_names(Conf) ->
ConnectorTypeAndNames =
[
{Type, Name}
|| {Type, NameToConf} <- maps:to_list(Conf),
{Name, _Conf} <- maps:to_list(NameToConf)
],
BadConnectors =
lists:filtermap(
fun({Type, Name}) ->
case validate_connector_name(Name) of
ok -> false;
_Error -> {true, #{type => Type, name => Name}}
end
end,
ConnectorTypeAndNames
),
case BadConnectors of
[] ->
ok;
[_ | _] ->
{error, #{
kind => validation_error,
reason => bad_connector_names,
bad_connectors => BadConnectors
}}
end.

View File

@ -372,7 +372,7 @@ schema("/connectors_probe") ->
case emqx_connector:remove(ConnectorType, ConnectorName) of
ok ->
?NO_CONTENT;
{error, {active_channels, Channels}} ->
{error, {post_config_update, _HandlerMod, {active_channels, Channels}}} ->
?BAD_REQUEST(
{<<"Cannot delete connector while there are active channels defined for this connector">>,
Channels}

View File

@ -20,15 +20,27 @@
resource_type(Type) when is_binary(Type) ->
resource_type(binary_to_atom(Type, utf8));
resource_type(azure_event_hub_producer) ->
%% We use AEH's Kafka interface.
emqx_bridge_kafka_impl_producer;
resource_type(confluent_producer) ->
emqx_bridge_kafka_impl_producer;
resource_type(gcp_pubsub_producer) ->
emqx_bridge_gcp_pubsub_impl_producer;
resource_type(kafka_producer) ->
emqx_bridge_kafka_impl_producer;
%% We use AEH's Kafka interface.
resource_type(azure_event_hub_producer) ->
emqx_bridge_kafka_impl_producer;
resource_type(matrix) ->
emqx_postgresql;
resource_type(mongodb) ->
emqx_bridge_mongodb_connector;
resource_type(pgsql) ->
emqx_postgresql;
resource_type(syskeeper_forwarder) ->
emqx_bridge_syskeeper_connector;
resource_type(syskeeper_proxy) ->
emqx_bridge_syskeeper_proxy_server;
resource_type(timescale) ->
emqx_postgresql;
resource_type(Type) ->
error({unknown_connector_type, Type}).
@ -37,6 +49,8 @@ connector_impl_module(ConnectorType) when is_binary(ConnectorType) ->
connector_impl_module(binary_to_atom(ConnectorType, utf8));
connector_impl_module(azure_event_hub_producer) ->
emqx_bridge_azure_event_hub;
connector_impl_module(confluent_producer) ->
emqx_bridge_confluent_producer;
connector_impl_module(_ConnectorType) ->
undefined.
@ -45,6 +59,30 @@ fields(connectors) ->
connector_structs() ->
[
{azure_event_hub_producer,
mk(
hoconsc:map(name, ref(emqx_bridge_azure_event_hub, "config_connector")),
#{
desc => <<"Azure Event Hub Connector Config">>,
required => false
}
)},
{confluent_producer,
mk(
hoconsc:map(name, ref(emqx_bridge_confluent_producer, "config_connector")),
#{
desc => <<"Confluent Connector Config">>,
required => false
}
)},
{gcp_pubsub_producer,
mk(
hoconsc:map(name, ref(emqx_bridge_gcp_pubsub_producer_schema, "config_connector")),
#{
desc => <<"GCP PubSub Producer Connector Config">>,
required => false
}
)},
{kafka_producer,
mk(
hoconsc:map(name, ref(emqx_bridge_kafka, "config_connector")),
@ -53,11 +91,11 @@ connector_structs() ->
required => false
}
)},
{azure_event_hub_producer,
{mongodb,
mk(
hoconsc:map(name, ref(emqx_bridge_azure_event_hub, "config_connector")),
hoconsc:map(name, ref(emqx_bridge_mongodb, "config_connector")),
#{
desc => <<"Azure Event Hub Connector Config">>,
desc => <<"MongoDB Connector Config">>,
required => false
}
)},
@ -76,6 +114,30 @@ connector_structs() ->
desc => <<"Syskeeper Proxy Connector Config">>,
required => false
}
)},
{pgsql,
mk(
hoconsc:map(name, ref(emqx_bridge_pgsql, "config_connector")),
#{
desc => <<"PostgreSQL Connector Config">>,
required => false
}
)},
{timescale,
mk(
hoconsc:map(name, ref(emqx_bridge_timescale, "config_connector")),
#{
desc => <<"Timescale Connector Config">>,
required => false
}
)},
{matrix,
mk(
hoconsc:map(name, ref(emqx_bridge_matrix, "config_connector")),
#{
desc => <<"Matrix Connector Config">>,
required => false
}
)}
].
@ -93,22 +155,40 @@ examples(Method) ->
schema_modules() ->
[
emqx_bridge_kafka,
emqx_bridge_azure_event_hub,
emqx_bridge_confluent_producer,
emqx_bridge_gcp_pubsub_producer_schema,
emqx_bridge_kafka,
emqx_bridge_matrix,
emqx_bridge_mongodb,
emqx_bridge_syskeeper_connector,
emqx_bridge_syskeeper_proxy
emqx_bridge_syskeeper_proxy,
emqx_bridge_timescale,
emqx_postgresql_connector_schema
].
api_schemas(Method) ->
[
%% We need to map the `type' field of a request (binary) to a
%% connector schema module.
api_ref(emqx_bridge_kafka, <<"kafka_producer">>, Method ++ "_connector"),
api_ref(
emqx_bridge_azure_event_hub, <<"azure_event_hub_producer">>, Method ++ "_connector"
),
api_ref(
emqx_bridge_confluent_producer, <<"confluent_producer">>, Method ++ "_connector"
),
api_ref(
emqx_bridge_gcp_pubsub_producer_schema,
<<"gcp_pubsub_producer">>,
Method ++ "_connector"
),
api_ref(emqx_bridge_kafka, <<"kafka_producer">>, Method ++ "_connector"),
api_ref(emqx_bridge_matrix, <<"matrix">>, Method ++ "_connector"),
api_ref(emqx_bridge_mongodb, <<"mongodb">>, Method ++ "_connector"),
api_ref(emqx_bridge_syskeeper_connector, <<"syskeeper_forwarder">>, Method),
api_ref(emqx_bridge_syskeeper_proxy, <<"syskeeper_proxy">>, Method)
api_ref(emqx_bridge_syskeeper_proxy, <<"syskeeper_proxy">>, Method),
api_ref(emqx_bridge_timescale, <<"timescale">>, Method ++ "_connector"),
api_ref(emqx_postgresql_connector_schema, <<"pgsql">>, Method ++ "_connector")
].
api_ref(Module, Type, Method) ->

View File

@ -22,13 +22,20 @@
-import(hoconsc, [mk/2, ref/2]).
-export([transform_bridges_v1_to_connectors_and_bridges_v2/1]).
-export([
transform_bridges_v1_to_connectors_and_bridges_v2/1,
transform_bridge_v1_config_to_action_config/4,
top_level_common_connector_keys/0
]).
-export([roots/0, fields/1, desc/1, namespace/0, tags/0]).
-export([get_response/0, put_request/0, post_request/0]).
-export([connector_type_to_bridge_types/1]).
-export([common_fields/0]).
-export([resource_opts_fields/0, resource_opts_fields/1]).
-if(?EMQX_RELEASE_EDITION == ee).
enterprise_api_schemas(Method) ->
@ -59,10 +66,16 @@ enterprise_fields_connectors() -> [].
-endif.
connector_type_to_bridge_types(kafka_producer) -> [kafka, kafka_producer];
connector_type_to_bridge_types(azure_event_hub_producer) -> [azure_event_hub_producer];
connector_type_to_bridge_types(confluent_producer) -> [confluent_producer];
connector_type_to_bridge_types(gcp_pubsub_producer) -> [gcp_pubsub, gcp_pubsub_producer];
connector_type_to_bridge_types(kafka_producer) -> [kafka, kafka_producer];
connector_type_to_bridge_types(matrix) -> [matrix];
connector_type_to_bridge_types(mongodb) -> [mongodb, mongodb_rs, mongodb_sharded, mongodb_single];
connector_type_to_bridge_types(pgsql) -> [pgsql];
connector_type_to_bridge_types(syskeeper_forwarder) -> [syskeeper_forwarder];
connector_type_to_bridge_types(syskeeper_proxy) -> [].
connector_type_to_bridge_types(syskeeper_proxy) -> [];
connector_type_to_bridge_types(timescale) -> [timescale].
actions_config_name() -> <<"actions">>.
@ -98,16 +111,25 @@ bridge_configs_to_transform(
end.
split_bridge_to_connector_and_action(
{ConnectorsMap, {BridgeType, BridgeName, BridgeConf, ConnectorFields, PreviousRawConfig}}
{ConnectorsMap, {BridgeType, BridgeName, BridgeV1Conf, ConnectorFields, PreviousRawConfig}}
) ->
ConnectorMap =
case emqx_action_info:has_custom_bridge_v1_config_to_connector_config(BridgeType) of
true ->
emqx_action_info:bridge_v1_config_to_connector_config(
BridgeType, BridgeV1Conf
);
false ->
%% We do an automatic transfomation to get the connector config
%% if the callback is not defined.
%% Get connector fields from bridge config
ConnectorMap = lists:foldl(
lists:foldl(
fun({ConnectorFieldName, _Spec}, ToTransformSoFar) ->
case maps:is_key(to_bin(ConnectorFieldName), BridgeConf) of
case maps:is_key(to_bin(ConnectorFieldName), BridgeV1Conf) of
true ->
NewToTransform = maps:put(
to_bin(ConnectorFieldName),
maps:get(to_bin(ConnectorFieldName), BridgeConf),
maps:get(to_bin(ConnectorFieldName), BridgeV1Conf),
ToTransformSoFar
),
NewToTransform;
@ -117,34 +139,78 @@ split_bridge_to_connector_and_action(
end,
#{},
ConnectorFields
),
%% Remove connector fields from bridge config to create Action
ActionMap0 = lists:foldl(
fun
({enable, _Spec}, ToTransformSoFar) ->
%% Enable filed is used in both
ToTransformSoFar;
({ConnectorFieldName, _Spec}, ToTransformSoFar) ->
case maps:is_key(to_bin(ConnectorFieldName), BridgeConf) of
true ->
maps:remove(to_bin(ConnectorFieldName), ToTransformSoFar);
false ->
ToTransformSoFar
end
)
end,
BridgeConf,
ConnectorFields
),
%% Generate a connector name, if needed. Avoid doing so if there was a previous config.
ConnectorName =
case PreviousRawConfig of
#{<<"connector">> := ConnectorName0} -> ConnectorName0;
_ -> generate_connector_name(ConnectorsMap, BridgeName, 0)
end,
%% Add connector field to action map
ActionMap = maps:put(<<"connector">>, ConnectorName, ActionMap0),
ActionMap =
case emqx_action_info:has_custom_bridge_v1_config_to_action_config(BridgeType) of
true ->
emqx_action_info:bridge_v1_config_to_action_config(
BridgeType, BridgeV1Conf, ConnectorName
);
false ->
transform_bridge_v1_config_to_action_config(
BridgeV1Conf, ConnectorName, ConnectorFields
)
end,
{BridgeType, BridgeName, ActionMap, ConnectorName, ConnectorMap}.
transform_bridge_v1_config_to_action_config(
BridgeV1Conf, ConnectorName, ConnectorConfSchemaMod, ConnectorConfSchemaName
) ->
ConnectorFields = ConnectorConfSchemaMod:fields(ConnectorConfSchemaName),
transform_bridge_v1_config_to_action_config(
BridgeV1Conf, ConnectorName, ConnectorFields
).
top_level_common_connector_keys() ->
[
<<"enable">>,
<<"connector">>,
<<"local_topic">>,
<<"resource_opts">>,
<<"description">>,
<<"parameters">>
].
transform_bridge_v1_config_to_action_config(
BridgeV1Conf, ConnectorName, ConnectorFields
) ->
TopKeys = top_level_common_connector_keys(),
TopKeysMap = maps:from_keys(TopKeys, true),
%% Remove connector fields
ActionMap0 = lists:foldl(
fun
({enable, _Spec}, ToTransformSoFar) ->
%% Enable filed is used in both
ToTransformSoFar;
({ConnectorFieldName, _Spec}, ToTransformSoFar) ->
ConnectorFieldNameBin = to_bin(ConnectorFieldName),
case
maps:is_key(ConnectorFieldNameBin, BridgeV1Conf) andalso
(not maps:is_key(ConnectorFieldNameBin, TopKeysMap))
of
true ->
maps:remove(ConnectorFieldNameBin, ToTransformSoFar);
false ->
ToTransformSoFar
end
end,
BridgeV1Conf,
ConnectorFields
),
%% Add the connector field
ActionMap1 = maps:put(<<"connector">>, ConnectorName, ActionMap0),
TopMap = maps:with(TopKeys, ActionMap1),
RestMap = maps:without(TopKeys, ActionMap1),
%% Other parameters should be stuffed into `parameters'
emqx_utils_maps:deep_merge(TopMap, #{<<"parameters">> => RestMap}).
generate_connector_name(ConnectorsMap, BridgeName, Attempt) ->
ConnectorNameList =
case Attempt of
@ -204,8 +270,9 @@ transform_old_style_bridges_to_connector_and_actions_of_type(
RawConfigSoFar1
),
%% Add action
ActionType = emqx_action_info:bridge_v1_type_to_action_type(to_bin(BridgeType)),
RawConfigSoFar3 = emqx_utils_maps:deep_put(
[actions_config_name(), to_bin(maybe_rename(BridgeType)), BridgeName],
[actions_config_name(), to_bin(ActionType), BridgeName],
RawConfigSoFar2,
ActionMap
),
@ -224,12 +291,6 @@ transform_bridges_v1_to_connectors_and_bridges_v2(RawConfig) ->
),
NewRawConf.
%% v1 uses 'kafka' as bridge type v2 uses 'kafka_producer'
maybe_rename(kafka) ->
kafka_producer;
maybe_rename(Name) ->
Name.
%%======================================================================================
%% HOCON Schema Callbacks
%%======================================================================================
@ -298,6 +359,30 @@ desc(connectors) ->
desc(_) ->
undefined.
common_fields() ->
[
{enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})},
{description, emqx_schema:description_schema()}
].
resource_opts_fields() ->
resource_opts_fields(_Overrides = []).
resource_opts_fields(Overrides) ->
%% Note: these don't include buffer-related configurations because buffer workers are
%% tied to the action.
ConnectorROFields = [
health_check_interval,
query_mode,
request_ttl,
start_after_created,
start_timeout
],
lists:filter(
fun({Key, _Sc}) -> lists:member(Key, ConnectorROFields) end,
emqx_resource_schema:create_opts(Overrides)
).
%%======================================================================================
%% Helper Functions
%%======================================================================================

View File

@ -204,6 +204,71 @@ t_remove_fail(_Config) ->
),
ok.
t_create_with_bad_name_direct_path({init, Config}) ->
meck:new(emqx_connector_ee_schema, [passthrough]),
meck:expect(emqx_connector_ee_schema, resource_type, 1, ?CONNECTOR),
meck:new(?CONNECTOR, [non_strict]),
meck:expect(?CONNECTOR, callback_mode, 0, async_if_possible),
meck:expect(?CONNECTOR, on_start, 2, {ok, connector_state}),
meck:expect(?CONNECTOR, on_stop, 2, ok),
meck:expect(?CONNECTOR, on_get_status, 2, connected),
Config;
t_create_with_bad_name_direct_path({'end', _Config}) ->
meck:unload(),
ok;
t_create_with_bad_name_direct_path(_Config) ->
Path = [connectors, kafka_producer, 'test_哈哈'],
ConnConfig0 = connector_config(),
%% Note: must contain SSL options to trigger original bug.
Cacertfile = emqx_common_test_helpers:app_path(
emqx,
filename:join(["etc", "certs", "cacert.pem"])
),
ConnConfig = ConnConfig0#{<<"ssl">> => #{<<"cacertfile">> => Cacertfile}},
?assertMatch(
{error,
{pre_config_update, _ConfigHandlerMod, #{
kind := validation_error,
reason := <<"Invalid name format.", _/binary>>
}}},
emqx:update_config(Path, ConnConfig)
),
ok.
t_create_with_bad_name_root_path({init, Config}) ->
meck:new(emqx_connector_ee_schema, [passthrough]),
meck:expect(emqx_connector_ee_schema, resource_type, 1, ?CONNECTOR),
meck:new(?CONNECTOR, [non_strict]),
meck:expect(?CONNECTOR, callback_mode, 0, async_if_possible),
meck:expect(?CONNECTOR, on_start, 2, {ok, connector_state}),
meck:expect(?CONNECTOR, on_stop, 2, ok),
meck:expect(?CONNECTOR, on_get_status, 2, connected),
Config;
t_create_with_bad_name_root_path({'end', _Config}) ->
meck:unload(),
ok;
t_create_with_bad_name_root_path(_Config) ->
Path = [connectors],
BadConnectorName = <<"test_哈哈">>,
ConnConfig0 = connector_config(),
%% Note: must contain SSL options to trigger original bug.
Cacertfile = emqx_common_test_helpers:app_path(
emqx,
filename:join(["etc", "certs", "cacert.pem"])
),
ConnConfig = ConnConfig0#{<<"ssl">> => #{<<"cacertfile">> => Cacertfile}},
Conf = #{<<"kafka_producer">> => #{BadConnectorName => ConnConfig}},
?assertMatch(
{error,
{pre_config_update, _ConfigHandlerMod, #{
kind := validation_error,
reason := bad_connector_names,
bad_connectors := [#{type := <<"kafka_producer">>, name := BadConnectorName}]
}}},
emqx:update_config(Path, Conf)
),
ok.
%% helpers
connector_config() ->

View File

@ -25,7 +25,7 @@
-include_lib("snabbkaffe/include/test_macros.hrl").
-define(CONNECTOR_NAME, (atom_to_binary(?FUNCTION_NAME))).
-define(CONNECTOR(NAME, TYPE), #{
-define(RESOURCE(NAME, TYPE), #{
%<<"ssl">> => #{<<"enable">> => false},
<<"type">> => TYPE,
<<"name">> => NAME
@ -52,12 +52,57 @@
-define(KAFKA_CONNECTOR_BASE, ?KAFKA_CONNECTOR_BASE(?KAFKA_BOOTSTRAP_HOST)).
-define(KAFKA_CONNECTOR(Name, BootstrapHosts),
maps:merge(
?CONNECTOR(Name, ?CONNECTOR_TYPE),
?RESOURCE(Name, ?CONNECTOR_TYPE),
?KAFKA_CONNECTOR_BASE(BootstrapHosts)
)
).
-define(KAFKA_CONNECTOR(Name), ?KAFKA_CONNECTOR(Name, ?KAFKA_BOOTSTRAP_HOST)).
-define(BRIDGE_NAME, (atom_to_binary(?FUNCTION_NAME))).
-define(BRIDGE_TYPE_STR, "kafka_producer").
-define(BRIDGE_TYPE, <<?BRIDGE_TYPE_STR>>).
-define(KAFKA_BRIDGE(Name, Connector), ?RESOURCE(Name, ?BRIDGE_TYPE)#{
<<"enable">> => true,
<<"connector">> => Connector,
<<"kafka">> => #{
<<"buffer">> => #{
<<"memory_overload_protection">> => true,
<<"mode">> => <<"hybrid">>,
<<"per_partition_limit">> => <<"2GB">>,
<<"segment_bytes">> => <<"100MB">>
},
<<"compression">> => <<"no_compression">>,
<<"kafka_ext_headers">> => [
#{
<<"kafka_ext_header_key">> => <<"clientid">>,
<<"kafka_ext_header_value">> => <<"${clientid}">>
},
#{
<<"kafka_ext_header_key">> => <<"topic">>,
<<"kafka_ext_header_value">> => <<"${topic}">>
}
],
<<"kafka_header_value_encode_mode">> => <<"none">>,
<<"kafka_headers">> => <<"${pub_props}">>,
<<"max_batch_bytes">> => <<"896KB">>,
<<"max_inflight">> => 10,
<<"message">> => #{
<<"key">> => <<"${.clientid}">>,
<<"timestamp">> => <<"${.timestamp}">>,
<<"value">> => <<"${.}">>
},
<<"partition_count_refresh_interval">> => <<"60s">>,
<<"partition_strategy">> => <<"random">>,
<<"required_acks">> => <<"all_isr">>,
<<"topic">> => <<"kafka-topic">>
},
<<"local_topic">> => <<"mqtt/local/topic">>,
<<"resource_opts">> => #{
<<"health_check_interval">> => <<"32s">>
}
}).
-define(KAFKA_BRIDGE(Name), ?KAFKA_BRIDGE(Name, ?CONNECTOR_NAME)).
%% -define(CONNECTOR_TYPE_MQTT, <<"mqtt">>).
%% -define(MQTT_CONNECTOR(SERVER, NAME), ?CONNECTOR(NAME, ?CONNECTOR_TYPE_MQTT)#{
%% <<"server">> => SERVER,
@ -105,7 +150,8 @@
emqx,
emqx_auth,
emqx_management,
{emqx_connector, "connectors {}"}
{emqx_connector, "connectors {}"},
{emqx_bridge, "actions {}"}
]).
-define(APPSPEC_DASHBOARD,
@ -128,7 +174,8 @@ all() ->
groups() ->
AllTCs = emqx_common_test_helpers:all(?MODULE),
SingleOnlyTests = [
t_connectors_probe
t_connectors_probe,
t_fail_delete_with_action
],
ClusterLaterJoinOnlyTCs = [
% t_cluster_later_join_metrics
@ -187,29 +234,38 @@ end_per_group(_, Config) ->
emqx_cth_suite:stop(?config(group_apps, Config)),
ok.
init_per_testcase(_TestCase, Config) ->
init_per_testcase(TestCase, Config) ->
case ?config(cluster_nodes, Config) of
undefined ->
init_mocks();
init_mocks(TestCase);
Nodes ->
[erpc:call(Node, ?MODULE, init_mocks, []) || Node <- Nodes]
[erpc:call(Node, ?MODULE, init_mocks, [TestCase]) || Node <- Nodes]
end,
Config.
end_per_testcase(_TestCase, Config) ->
end_per_testcase(TestCase, Config) ->
Node = ?config(node, Config),
ok = erpc:call(Node, ?MODULE, clear_resources, [TestCase]),
case ?config(cluster_nodes, Config) of
undefined ->
meck:unload();
Nodes ->
[erpc:call(Node, meck, unload, []) || Node <- Nodes]
[erpc:call(N, meck, unload, []) || N <- Nodes]
end,
Node = ?config(node, Config),
ok = emqx_common_test_helpers:call_janitor(),
ok = erpc:call(Node, fun clear_resources/0),
ok.
-define(CONNECTOR_IMPL, dummy_connector_impl).
init_mocks() ->
init_mocks(t_fail_delete_with_action) ->
init_mocks(common),
meck:expect(?CONNECTOR_IMPL, on_add_channel, 4, {ok, connector_state}),
meck:expect(?CONNECTOR_IMPL, on_remove_channel, 3, {ok, connector_state}),
meck:expect(?CONNECTOR_IMPL, on_get_channel_status, 3, connected),
ok = meck:expect(?CONNECTOR_IMPL, on_get_channels, fun(ResId) ->
emqx_bridge_v2:get_channels_for_connector(ResId)
end),
ok;
init_mocks(_TestCase) ->
meck:new(emqx_connector_ee_schema, [passthrough, no_link]),
meck:expect(emqx_connector_ee_schema, resource_type, 1, ?CONNECTOR_IMPL),
meck:new(?CONNECTOR_IMPL, [non_strict, no_link]),
@ -235,7 +291,15 @@ init_mocks() ->
),
[?CONNECTOR_IMPL, emqx_connector_ee_schema].
clear_resources() ->
clear_resources(t_fail_delete_with_action) ->
lists:foreach(
fun(#{type := Type, name := Name}) ->
ok = emqx_bridge_v2:remove(Type, Name)
end,
emqx_bridge_v2:list()
),
clear_resources(common);
clear_resources(_) ->
lists:foreach(
fun(#{type := Type, name := Name}) ->
ok = emqx_connector:remove(Type, Name)
@ -646,12 +710,85 @@ t_connectors_probe(Config) ->
request_json(
post,
uri(["connectors_probe"]),
?CONNECTOR(<<"broken_connector">>, <<"unknown_type">>),
?RESOURCE(<<"broken_connector">>, <<"unknown_type">>),
Config
)
),
ok.
t_create_with_bad_name(Config) ->
ConnectorName = <<"test_哈哈">>,
Conf0 = ?KAFKA_CONNECTOR(ConnectorName),
%% Note: must contain SSL options to trigger original bug.
Cacertfile = emqx_common_test_helpers:app_path(
emqx,
filename:join(["etc", "certs", "cacert.pem"])
),
Conf = Conf0#{<<"ssl">> => #{<<"cacertfile">> => Cacertfile}},
{ok, 400, #{
<<"code">> := <<"BAD_REQUEST">>,
<<"message">> := Msg0
}} = request_json(
post,
uri(["connectors"]),
Conf,
Config
),
Msg = emqx_utils_json:decode(Msg0, [return_maps]),
?assertMatch(#{<<"kind">> := <<"validation_error">>}, Msg),
ok.
t_fail_delete_with_action(Config) ->
Name = ?CONNECTOR_NAME,
?assertMatch(
{ok, 201, #{
<<"type">> := ?CONNECTOR_TYPE,
<<"name">> := Name,
<<"enable">> := true,
<<"status">> := <<"connected">>,
<<"node_status">> := [_ | _]
}},
request_json(
post,
uri(["connectors"]),
?KAFKA_CONNECTOR(Name),
Config
)
),
ConnectorID = emqx_connector_resource:connector_id(?CONNECTOR_TYPE, Name),
BridgeName = ?BRIDGE_NAME,
?assertMatch(
{ok, 201, #{
<<"type">> := ?BRIDGE_TYPE,
<<"name">> := BridgeName,
<<"enable">> := true,
<<"status">> := <<"connected">>,
<<"node_status">> := [_ | _],
<<"connector">> := Name,
<<"kafka">> := #{},
<<"local_topic">> := _,
<<"resource_opts">> := _
}},
request_json(
post,
uri(["actions"]),
?KAFKA_BRIDGE(?BRIDGE_NAME),
Config
)
),
%% delete the connector
?assertMatch(
{ok, 400, #{
<<"code">> := <<"BAD_REQUEST">>,
<<"message">> :=
<<"{<<\"Cannot delete connector while there are active channels",
" defined for this connector\">>,", _/binary>>
}},
request_json(delete, uri(["connectors", ConnectorID]), Config)
),
ok.
%%% helpers
listen_on_random_port() ->
SockOpts = [binary, {active, false}, {packet, raw}, {reuseaddr, true}, {backlog, 1000}],

View File

@ -12,6 +12,7 @@
-behaviour(emqx_dashboard_sso).
-export([
namespace/0,
hocon_ref/0,
login_ref/0,
fields/1,
@ -43,6 +44,8 @@
%% Hocon Schema
%%------------------------------------------------------------------------------
namespace() -> "dashboard".
hocon_ref() ->
hoconsc:ref(?MODULE, saml).

Some files were not shown because too many files have changed in this diff Show More