Merge pull request #12925 from zmstone/0424-merge-latest-master-to-release-57

0424 merge latest master to release 57
This commit is contained in:
Zaiming (Stone) Shi 2024-04-25 10:47:00 +02:00 committed by GitHub
commit 82790e6ea4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
262 changed files with 12394 additions and 2871 deletions

View File

@ -4,7 +4,7 @@ services:
greptimedb:
container_name: greptimedb
hostname: greptimedb
image: greptime/greptimedb:v0.4.4
image: greptime/greptimedb:v0.7.1
expose:
- "4000"
- "4001"

View File

@ -1,24 +1,53 @@
version: '3.9'
services:
iotdb:
container_name: iotdb
hostname: iotdb
image: apache/iotdb:1.1.0-standalone
iotdb_1_3_0:
container_name: iotdb130
hostname: iotdb130
image: apache/iotdb:1.3.0-standalone
restart: always
environment:
- enable_rest_service=true
- cn_internal_address=iotdb
- cn_internal_address=iotdb130
- cn_internal_port=10710
- cn_consensus_port=10720
- cn_target_config_node_list=iotdb:10710
- dn_rpc_address=iotdb
- dn_internal_address=iotdb
- cn_seed_config_node=iotdb130:10710
- dn_rpc_address=iotdb130
- dn_internal_address=iotdb130
- dn_rpc_port=6667
- dn_mpp_data_exchange_port=10740
- dn_schema_region_consensus_port=10750
- dn_data_region_consensus_port=10760
- dn_target_config_node_list=iotdb:10710
- dn_seed_config_node=iotdb130:10710
# volumes:
# - ./data:/iotdb/data
# - ./logs:/iotdb/logs
expose:
- "18080"
# IoTDB's REST interface, uncomment for local testing
# ports:
# - "18080:18080"
networks:
- emqx_bridge
iotdb_1_1_0:
container_name: iotdb110
hostname: iotdb110
image: apache/iotdb:1.1.0-standalone
restart: always
environment:
- enable_rest_service=true
- cn_internal_address=iotdb110
- cn_internal_port=10710
- cn_consensus_port=10720
- cn_target_config_node_list=iotdb110:10710
- dn_rpc_address=iotdb110
- dn_internal_address=iotdb110
- dn_rpc_port=6667
- dn_mpp_data_exchange_port=10740
- dn_schema_region_consensus_port=10750
- dn_data_region_consensus_port=10760
- dn_target_config_node_list=iotdb110:10710
# volumes:
# - ./data:/iotdb/data
# - ./logs:/iotdb/logs

View File

@ -9,3 +9,4 @@ accounts:
defaultGroupPerm: PUB|SUB
topicPerms:
- TopicTest=PUB|SUB
- Topic2=PUB|SUB

View File

@ -139,9 +139,15 @@
"enabled": true
},
{
"name": "iotdb",
"name": "iotdb110",
"listen": "0.0.0.0:18080",
"upstream": "iotdb:18080",
"upstream": "iotdb110:18080",
"enabled": true
},
{
"name": "iotdb130",
"listen": "0.0.0.0:28080",
"upstream": "iotdb130:18080",
"enabled": true
},
{

23
.github/CODEOWNERS vendored
View File

@ -1,18 +1,29 @@
## Default
* @emqx/emqx-review-board
# emqx-review-board members
## HJianBo
## id
## ieQu1
## keynslug
## qzhuyan
## savonarola
## terry-xiaoyu
## thalesmg
## zhongwencool
## zmstone
## apps
/apps/emqx/ @emqx/emqx-review-board @lafirest
/apps/emqx_connector/ @emqx/emqx-review-board
/apps/emqx_auth/ @emqx/emqx-review-board @JimMoen @savonarola
/apps/emqx_auth/ @emqx/emqx-review-board @JimMoen
/apps/emqx_connector/ @emqx/emqx-review-board @JimMoen
/apps/emqx_dashboard/ @emqx/emqx-review-board @JimMoen @lafirest
/apps/emqx_dashboard_rbac/ @emqx/emqx-review-board @lafirest
/apps/emqx_dashboard_sso/ @emqx/emqx-review-board @JimMoen @lafirest
/apps/emqx_exhook/ @emqx/emqx-review-board @JimMoen @HJianBo
/apps/emqx_ft/ @emqx/emqx-review-board @savonarola @keynslug
/apps/emqx_exhook/ @emqx/emqx-review-board @JimMoen
/apps/emqx_gateway/ @emqx/emqx-review-board @lafirest
/apps/emqx_management/ @emqx/emqx-review-board @lafirest @sstrigler
/apps/emqx_management/ @emqx/emqx-review-board @lafirest
/apps/emqx_opentelemetry @emqx/emqx-review-board @SergeTupchiy
/apps/emqx_plugins/ @emqx/emqx-review-board @JimMoen
/apps/emqx_prometheus/ @emqx/emqx-review-board @JimMoen
/apps/emqx_psk/ @emqx/emqx-review-board @lafirest
@ -20,7 +31,7 @@
/apps/emqx_rule_engine/ @emqx/emqx-review-board @kjellwinblad
/apps/emqx_slow_subs/ @emqx/emqx-review-board @lafirest
/apps/emqx_statsd/ @emqx/emqx-review-board @JimMoen
/apps/emqx_durable_storage/ @emqx/emqx-review-board @ieQu1 @keynslug
/apps/emqx_durable_storage/ @emqx/emqx-review-board @keynslug
## CI
/deploy/ @emqx/emqx-review-board @Rory-Z

View File

@ -151,7 +151,23 @@ jobs:
with:
ref: ${{ github.event.inputs.ref }}
fetch-depth: 0
- name: build emqx packages
- name: build tgz
env:
PROFILE: ${{ matrix.profile }}
ARCH: ${{ matrix.arch }}
OS: ${{ matrix.os }}
IS_ELIXIR: ${{ matrix.with_elixir }}
BUILDER: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}"
BUILDER_SYSTEM: force_docker
run: |
./scripts/buildx.sh \
--profile $PROFILE \
--arch $ARCH \
--builder $BUILDER \
--elixir $IS_ELIXIR \
--pkgtype tgz
- name: build pkg
if: matrix.with_elixir == 'no'
env:
PROFILE: ${{ matrix.profile }}
ARCH: ${{ matrix.arch }}

View File

@ -24,8 +24,8 @@ jobs:
matrix:
branch:
- master
- release-55
- release-56
- release-57
language:
- cpp
- python

View File

@ -31,7 +31,7 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
gh api --method GET -f head_branch=master -f status=completed -f exclude_pull_requests=true /repos/emqx/emqx/actions/runs > runs.json
gh api --method GET -f head_sha=$(git rev-parse HEAD) -f status=completed -f exclude_pull_requests=true /repos/emqx/emqx/actions/runs > runs.json
for id in $(jq -r '.workflow_runs[] | select((."conclusion" == "failure") and (."name" != "Keep master green") and .run_attempt < 3) | .id' runs.json); do
echo "rerun https://github.com/emqx/emqx/actions/runs/$id"
gh api --method POST /repos/emqx/emqx/actions/runs/$id/rerun-failed-jobs || true

View File

@ -67,12 +67,13 @@ jobs:
BUCKET=${{ secrets.AWS_S3_BUCKET }}
OUTPUT_DIR=${{ steps.profile.outputs.s3dir }}
aws s3 cp --recursive s3://$BUCKET/$OUTPUT_DIR/${{ env.ref_name }} packages
- uses: emqx/upload-assets@8d2083b4dbe3151b0b735572eaa153b6acb647fe # 0.5.0
- uses: emqx/upload-assets@974befcf0e72a1811360a81c798855efb66b0551 # 0.5.2
env:
GITHUB_TOKEN: ${{ github.token }}
with:
asset_paths: '["packages/*"]'
tag_name: "${{ env.ref_name }}"
skip_existing: true
- name: update to emqx.io
if: startsWith(env.ref_name, 'v') && ((github.event_name == 'release' && !github.event.release.prerelease) || inputs.publish_release_artefacts)
run: |

View File

@ -47,6 +47,9 @@ jobs:
echo "_EMQX_DOCKER_IMAGE_TAG=$_EMQX_DOCKER_IMAGE_TAG" >> $GITHUB_ENV
- name: dashboard tests
working-directory: ./scripts/ui-tests
env:
EMQX_VERSION: ${{ inputs.version-emqx }}
EMQX_ENTERPRISE_VERSION: ${{ inputs.version-emqx-enterprise }}
run: |
set -eu
docker compose up --abort-on-container-exit --exit-code-from selenium

View File

@ -35,6 +35,7 @@ jobs:
shell: bash
outputs:
matrix: ${{ steps.matrix.outputs.matrix }}
skip: ${{ steps.matrix.outputs.skip }}
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
with:
@ -49,12 +50,16 @@ jobs:
changed_files="$(git diff --name-only ${BEFORE_REF} ${AFTER_REF} apps/emqx)"
if [ "$changed_files" = '' ]; then
echo "nothing changed in apps/emqx, ignored."
echo "matrix=[]" | tee -a $GITHUB_OUTPUT
echo 'matrix=[]' | tee -a $GITHUB_OUTPUT
echo 'skip=true' | tee -a $GITHUB_OUTPUT
exit 0
else
echo 'skip=false' | tee -a $GITHUB_OUTPUT
echo 'matrix=[{"type": "eunit_proper_and_static"},{"type": "1_3"},{"type": "2_3"},{"type": "3_3"}]' | tee -a $GITHUB_OUTPUT
fi
echo 'matrix=[{"type": "eunit_proper_and_static"},{"type": "1_3"},{"type": "2_3"},{"type": "3_3"}]' | tee -a $GITHUB_OUTPUT
run_emqx_app_tests:
if: needs.prepare_matrix.outputs.skip != 'true'
needs:
- prepare_matrix
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}

2
.gitignore vendored
View File

@ -76,3 +76,5 @@ rebar-git-cache.tar
.docker_image_tag
.emqx_docker_image_tags
.git/
apps/emqx_utils/src/emqx_variform_parser.erl
apps/emqx_utils/src/emqx_variform_scan.erl

View File

@ -20,8 +20,8 @@ endif
# Dashboard version
# from https://github.com/emqx/emqx-dashboard5
export EMQX_DASHBOARD_VERSION ?= v1.8.0
export EMQX_EE_DASHBOARD_VERSION ?= e1.6.0
export EMQX_DASHBOARD_VERSION ?= v1.9.0-beta.1
export EMQX_EE_DASHBOARD_VERSION ?= e1.7.0-beta.1
PROFILE ?= emqx
REL_PROFILES := emqx emqx-enterprise

View File

@ -673,7 +673,6 @@ end).
-define(SHARE, "$share").
-define(QUEUE, "$queue").
-define(SHARE(Group, Topic), emqx_topic:join([<<?SHARE>>, Group, Topic])).
-define(REDISPATCH_TO(GROUP, TOPIC), {GROUP, TOPIC}).

View File

@ -20,12 +20,17 @@
-record(?TRACE, {
name :: binary() | undefined | '_',
type :: clientid | topic | ip_address | undefined | '_',
type :: clientid | topic | ip_address | ruleid | undefined | '_',
filter ::
emqx_types:topic() | emqx_types:clientid() | emqx_trace:ip_address() | undefined | '_',
emqx_types:topic()
| emqx_types:clientid()
| emqx_trace:ip_address()
| emqx_trace:ruleid()
| undefined
| '_',
enable = true :: boolean() | '_',
payload_encode = text :: hex | text | hidden | '_',
extra = #{} :: map() | '_',
extra = #{formatter => text} :: #{formatter => text | json} | '_',
start_at :: integer() | undefined | '_',
end_at :: integer() | undefined | '_'
}).

View File

@ -44,11 +44,20 @@
).
-define(SLOG_THROTTLE(Level, Data, Meta),
case emqx_log_throttler:allow(maps:get(msg, Data)) of
case logger:allow(Level, ?MODULE) of
true ->
?SLOG(Level, Data, Meta);
(fun(#{msg := __Msg} = __Data) ->
case emqx_log_throttler:allow(__Msg) of
true ->
logger:log(Level, __Data, Meta);
false ->
?_DO_TRACE(Level, __Msg, maps:merge(__Data, Meta))
end
end)(
Data
);
false ->
?_DO_TRACE(Level, maps:get(msg, Data), maps:merge(Data, Meta))
ok
end
).

View File

@ -184,7 +184,7 @@ list_all_pubranges(Node) ->
session_open(Node, ClientId) ->
ClientInfo = #{},
ConnInfo = #{peername => {undefined, undefined}},
ConnInfo = #{peername => {undefined, undefined}, proto_name => <<"MQTT">>, proto_ver => 5},
WillMsg = undefined,
erpc:call(
Node,
@ -252,7 +252,6 @@ t_session_subscription_idempotency(Config) ->
ok
end,
fun(Trace) ->
ct:pal("trace:\n ~p", [Trace]),
Session = session_open(Node1, ClientId),
?assertMatch(
#{SubTopicFilter := #{}},
@ -326,7 +325,6 @@ t_session_unsubscription_idempotency(Config) ->
ok
end,
fun(Trace) ->
ct:pal("trace:\n ~p", [Trace]),
Session = session_open(Node1, ClientId),
?assertEqual(
#{},
@ -415,10 +413,7 @@ do_t_session_discard(Params) ->
ok
end,
fun(Trace) ->
ct:pal("trace:\n ~p", [Trace]),
ok
end
[]
),
ok.

View File

@ -27,8 +27,8 @@
{lc, {git, "https://github.com/emqx/lc.git", {tag, "0.3.2"}}},
{gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}},
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}},
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.11.1"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.19.1"}}},
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.11.2"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.19.3"}}},
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.3.1"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.42.1"}}},
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.3"}}},

View File

@ -16,9 +16,14 @@
-module(emqx_config_backup).
-type ok_result() :: #{
root_key => emqx_utils_maps:config_key(),
changed => [emqx_utils_maps:config_key_path()]
}.
-type error_result() :: #{root_key => emqx_utils_maps:config_key(), reason => term()}.
-callback import_config(RawConf :: map()) ->
{ok, #{
root_key => emqx_utils_maps:config_key(),
changed => [emqx_utils_maps:config_key_path()]
}}
| {error, #{root_key => emqx_utils_maps:config_key(), reason => term()}}.
{ok, ok_result()}
| {error, error_result()}
| {results, {[ok_result()], [error_result()]}}.

View File

@ -253,8 +253,12 @@ persist_publish(Msg) ->
case emqx_persistent_message:persist(Msg) of
ok ->
[persisted];
{_SkipOrError, _Reason} ->
% TODO: log errors?
{skipped, _} ->
[];
{error, Recoverable, Reason} ->
?SLOG(debug, #{
msg => "failed_to_persist_message", is_recoverable => Recoverable, reason => Reason
}),
[]
end.

View File

@ -251,7 +251,7 @@ init(
MP -> MP
end,
ListenerId = emqx_listeners:listener_id(Type, Listener),
ClientInfo0 = set_peercert_infos(
ClientInfo = set_peercert_infos(
Peercert,
#{
zone => Zone,
@ -269,8 +269,6 @@ init(
},
Zone
),
AttrExtractionConfig = get_mqtt_conf(Zone, client_attrs_init),
ClientInfo = initialize_client_attrs_from_cert(AttrExtractionConfig, ClientInfo0, Peercert),
{NClientInfo, NConnInfo} = take_ws_cookie(ClientInfo, ConnInfo),
#channel{
conninfo = NConnInfo,
@ -1575,7 +1573,7 @@ enrich_client(ConnPkt, Channel = #channel{clientinfo = ClientInfo}) ->
fun maybe_username_as_clientid/2,
fun maybe_assign_clientid/2,
%% attr init should happen after clientid and username assign
fun maybe_set_client_initial_attr/2
fun maybe_set_client_initial_attrs/2
],
ConnPkt,
ClientInfo
@ -1587,47 +1585,6 @@ enrich_client(ConnPkt, Channel = #channel{clientinfo = ClientInfo}) ->
{error, ReasonCode, Channel#channel{clientinfo = NClientInfo}}
end.
initialize_client_attrs_from_cert(
#{
extract_from := From,
extract_regexp := Regexp,
extract_as := AttrName
},
ClientInfo,
Peercert
) when From =:= cn orelse From =:= dn ->
case extract_client_attr_from_cert(From, Regexp, Peercert) of
{ok, Value} ->
?SLOG(
debug,
#{
msg => "client_attr_init_from_cert",
extracted_as => AttrName,
extracted_value => Value
}
),
ClientInfo#{client_attrs => #{AttrName => Value}};
_ ->
ClientInfo#{client_attrs => #{}}
end;
initialize_client_attrs_from_cert(_, ClientInfo, _Peercert) ->
ClientInfo.
extract_client_attr_from_cert(cn, Regexp, Peercert) ->
CN = esockd_peercert:common_name(Peercert),
re_extract(CN, Regexp);
extract_client_attr_from_cert(dn, Regexp, Peercert) ->
DN = esockd_peercert:subject(Peercert),
re_extract(DN, Regexp).
re_extract(Str, Regexp) when is_binary(Str) ->
case re:run(Str, Regexp, [{capture, all_but_first, list}]) of
{match, [_ | _] = List} -> {ok, iolist_to_binary(List)};
_ -> nomatch
end;
re_extract(_NotStr, _Regexp) ->
ignored.
set_username(
#mqtt_packet_connect{username = Username},
ClientInfo = #{username := undefined}
@ -1668,75 +1625,50 @@ maybe_assign_clientid(#mqtt_packet_connect{clientid = <<>>}, ClientInfo) ->
maybe_assign_clientid(#mqtt_packet_connect{clientid = ClientId}, ClientInfo) ->
{ok, ClientInfo#{clientid => ClientId}}.
maybe_set_client_initial_attr(ConnPkt, #{zone := Zone} = ClientInfo0) ->
Config = get_mqtt_conf(Zone, client_attrs_init),
ClientInfo = initialize_client_attrs_from_user_property(Config, ConnPkt, ClientInfo0),
Attrs = maps:get(client_attrs, ClientInfo, #{}),
case extract_attr_from_clientinfo(Config, ClientInfo) of
{ok, Value} ->
#{extract_as := Name} = Config,
?SLOG(
debug,
#{
msg => "client_attr_init_from_clientinfo",
extracted_as => Name,
extracted_value => Value
}
),
{ok, ClientInfo#{client_attrs => Attrs#{Name => Value}}};
_ ->
{ok, ClientInfo}
end.
get_client_attrs_init_config(Zone) ->
get_mqtt_conf(Zone, client_attrs_init, []).
initialize_client_attrs_from_user_property(
#{
extract_from := user_property,
extract_as := PropertyKey
},
ConnPkt,
ClientInfo
) ->
case extract_client_attr_from_user_property(ConnPkt, PropertyKey) of
{ok, Value} ->
?SLOG(
debug,
#{
msg => "client_attr_init_from_user_property",
extracted_as => PropertyKey,
extracted_value => Value
}
),
ClientInfo#{client_attrs => #{PropertyKey => Value}};
_ ->
ClientInfo
end;
initialize_client_attrs_from_user_property(_, _ConnInfo, ClientInfo) ->
ClientInfo.
maybe_set_client_initial_attrs(ConnPkt, #{zone := Zone} = ClientInfo) ->
Inits = get_client_attrs_init_config(Zone),
UserProperty = get_user_property_as_map(ConnPkt),
{ok, initialize_client_attrs(Inits, ClientInfo#{user_property => UserProperty})}.
extract_client_attr_from_user_property(
#mqtt_packet_connect{properties = #{'User-Property' := UserProperty}}, PropertyKey
) ->
case lists:keyfind(PropertyKey, 1, UserProperty) of
{_, Value} ->
{ok, Value};
_ ->
not_found
end;
extract_client_attr_from_user_property(_ConnPkt, _PropertyKey) ->
ignored.
initialize_client_attrs(Inits, ClientInfo) ->
lists:foldl(
fun(#{expression := Variform, set_as_attr := Name}, Acc) ->
Attrs = maps:get(client_attrs, ClientInfo, #{}),
case emqx_variform:render(Variform, ClientInfo) of
{ok, Value} ->
?SLOG(
debug,
#{
msg => "client_attr_initialized",
set_as_attr => Name,
attr_value => Value
}
),
Acc#{client_attrs => Attrs#{Name => Value}};
{error, Reason} ->
?SLOG(
warning,
#{
msg => "client_attr_initialization_failed",
reason => Reason
}
),
Acc
end
end,
ClientInfo,
Inits
).
extract_attr_from_clientinfo(#{extract_from := clientid, extract_regexp := Regexp}, #{
clientid := ClientId
}) ->
re_extract(ClientId, Regexp);
extract_attr_from_clientinfo(#{extract_from := username, extract_regexp := Regexp}, #{
username := Username
}) when
Username =/= undefined
get_user_property_as_map(#mqtt_packet_connect{properties = #{'User-Property' := UserProperty}}) when
is_list(UserProperty)
->
re_extract(Username, Regexp);
extract_attr_from_clientinfo(_Config, _CLientInfo) ->
ignored.
maps:from_list(UserProperty);
get_user_property_as_map(_) ->
#{}.
fix_mountpoint(#{mountpoint := undefined} = ClientInfo) ->
ClientInfo;

View File

@ -222,7 +222,9 @@
% Messages delivered
{counter, 'messages.delivered'},
% Messages acked
{counter, 'messages.acked'}
{counter, 'messages.acked'},
% Messages persistently stored
{counter, 'messages.persisted'}
]
).
@ -718,4 +720,5 @@ reserved_idx('overload_protection.gc') -> 403;
reserved_idx('overload_protection.new_conn') -> 404;
reserved_idx('messages.validation_succeeded') -> 405;
reserved_idx('messages.validation_failed') -> 406;
reserved_idx('messages.persisted') -> 407;
reserved_idx(_) -> undefined.

View File

@ -98,7 +98,7 @@ pre_config_update(_Root, _NewConf, _OldConf) ->
%%--------------------------------------------------------------------
-spec persist(emqx_types:message()) ->
ok | {skipped, _Reason} | {error, _TODO}.
emqx_ds:store_batch_result() | {skipped, needs_no_persistence}.
persist(Msg) ->
?WHEN_ENABLED(
case needs_persistence(Msg) andalso has_subscribers(Msg) of
@ -114,6 +114,7 @@ needs_persistence(Msg) ->
-spec store_message(emqx_types:message()) -> emqx_ds:store_batch_result().
store_message(Msg) ->
emqx_metrics:inc('messages.persisted'),
emqx_ds:store_batch(?PERSISTENT_MESSAGE_DB, [Msg], #{sync => false}).
has_subscribers(#message{topic = Topic}) ->

View File

@ -75,7 +75,8 @@
%% Managment APIs:
-export([
list_client_subscriptions/1
list_client_subscriptions/1,
get_client_subscription/2
]).
%% session table operations
@ -116,15 +117,42 @@
%% Currently, this is the clientid. We avoid `emqx_types:clientid()' because that can be
%% an atom, in theory (?).
-type id() :: binary().
-type topic_filter() :: emqx_types:topic().
-type topic_filter() :: emqx_types:topic() | #share{}.
%% Subscription and subscription states:
%%
%% Persistent sessions cannot simply update or delete subscriptions,
%% since subscription parameters must be exactly the same during
%% replay.
%%
%% To solve this problem, we store subscriptions in a twofold manner:
%%
%% - `subscription' is an object that holds up-to-date information
%% about the client's subscription and a reference to the latest
%% subscription state id
%%
%% - `subscription_state' is an immutable object that holds
%% information about the subcription parameters at a certain point of
%% time
%%
%% New subscription states are created whenever the client subscribes
%% to a topics, or updates an existing subscription.
%%
%% Stream replay states contain references to the subscription states.
%%
%% Outdated subscription states are discarded when they are not
%% referenced by either subscription or stream replay state objects.
-type subscription_id() :: integer().
%% This type is a result of merging
%% `emqx_persistent_session_ds_subs:subscription()' with its current
%% state.
-type subscription() :: #{
id := subscription_id(),
start_time := emqx_ds:time(),
props := map(),
deleted := boolean()
current_state := emqx_persistent_session_ds_subs:subscription_state_id(),
subopts := map()
}.
-define(TIMER_PULL, timer_pull).
@ -184,7 +212,9 @@
seqno_q2_dup,
seqno_q2_rec,
seqno_q2_next,
n_streams
n_streams,
awaiting_rel_cnt,
awaiting_rel_max
]).
%%
@ -206,7 +236,8 @@ open(#{clientid := ClientID} = ClientInfo, ConnInfo, MaybeWillMsg, Conf) ->
ok = emqx_cm:takeover_kick(ClientID),
case session_open(ClientID, ClientInfo, ConnInfo, MaybeWillMsg) of
Session0 = #{} ->
Session = Session0#{props => Conf},
Session1 = Session0#{props => Conf},
Session = do_expire(ClientInfo, Session1),
{true, ensure_timers(Session), []};
false ->
false
@ -249,7 +280,7 @@ info(is_persistent, #{}) ->
info(subscriptions, #{s := S}) ->
emqx_persistent_session_ds_subs:to_map(S);
info(subscriptions_cnt, #{s := S}) ->
emqx_topic_gbt:size(emqx_persistent_session_ds_state:get_subscriptions(S));
emqx_persistent_session_ds_state:n_subscriptions(S);
info(subscriptions_max, #{props := Conf}) ->
maps:get(max_subscriptions, Conf);
info(upgrade_qos, #{props := Conf}) ->
@ -262,21 +293,21 @@ info(inflight_max, #{inflight := Inflight}) ->
emqx_persistent_session_ds_inflight:receive_maximum(Inflight);
info(retry_interval, #{props := Conf}) ->
maps:get(retry_interval, Conf);
% info(mqueue, #sessmem{mqueue = MQueue}) ->
% MQueue;
info(mqueue_len, #{inflight := Inflight}) ->
emqx_persistent_session_ds_inflight:n_buffered(all, Inflight);
% info(mqueue_max, #sessmem{mqueue = MQueue}) ->
% emqx_mqueue:max_len(MQueue);
info(mqueue_dropped, _Session) ->
0;
%% info(next_pkt_id, #{s := S}) ->
%% {PacketId, _} = emqx_persistent_message_ds_replayer:next_packet_id(S),
%% PacketId;
% info(awaiting_rel, #sessmem{awaiting_rel = AwaitingRel}) ->
% AwaitingRel;
%% info(awaiting_rel_cnt, #{s := S}) ->
%% seqno_diff(?QOS_2, ?rec, ?committed(?QOS_2), S);
info(awaiting_rel, #{s := S}) ->
emqx_persistent_session_ds_state:fold_awaiting_rel(fun maps:put/3, #{}, S);
info(awaiting_rel_max, #{props := Conf}) ->
maps:get(max_awaiting_rel, Conf);
info(awaiting_rel_cnt, #{s := S}) ->
emqx_persistent_session_ds_state:n_awaiting_rel(S);
info(await_rel_timeout, #{props := Conf}) ->
maps:get(await_rel_timeout, Conf);
info(seqno_q1_comm, #{s := S}) ->
emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_1), S);
info(seqno_q1_dup, #{s := S}) ->
@ -292,17 +323,7 @@ info(seqno_q2_rec, #{s := S}) ->
info(seqno_q2_next, #{s := S}) ->
emqx_persistent_session_ds_state:get_seqno(?next(?QOS_2), S);
info(n_streams, #{s := S}) ->
emqx_persistent_session_ds_state:fold_streams(
fun(_, _, Acc) -> Acc + 1 end,
0,
S
);
info(awaiting_rel_max, #{props := Conf}) ->
maps:get(max_awaiting_rel, Conf);
info(await_rel_timeout, #{props := _Conf}) ->
%% TODO: currently this setting is ignored:
%% maps:get(await_rel_timeout, Conf).
0;
emqx_persistent_session_ds_state:n_streams(S);
info({MsgsQ, _PagerParams}, _Session) when MsgsQ =:= mqueue_msgs; MsgsQ =:= inflight_msgs ->
{error, not_implemented}.
@ -337,93 +358,49 @@ print_session(ClientId) ->
-spec subscribe(topic_filter(), emqx_types:subopts(), session()) ->
{ok, session()} | {error, emqx_types:reason_code()}.
subscribe(
#share{},
_SubOpts,
_Session
) ->
%% TODO: Shared subscriptions are not supported yet:
{error, ?RC_SHARED_SUBSCRIPTIONS_NOT_SUPPORTED};
subscribe(
TopicFilter,
SubOpts,
Session = #{id := ID, s := S0}
Session
) ->
case emqx_persistent_session_ds_subs:lookup(TopicFilter, S0) of
undefined ->
%% TODO: max subscriptions
%% N.B.: we chose to update the router before adding the
%% subscription to the session/iterator table. The
%% reasoning for this is as follows:
%%
%% Messages matching this topic filter should start to be
%% persisted as soon as possible to avoid missing
%% messages. If this is the first such persistent session
%% subscription, it's important to do so early on.
%%
%% This could, in turn, lead to some inconsistency: if
%% such a route gets created but the session/iterator data
%% fails to be updated accordingly, we have a dangling
%% route. To remove such dangling routes, we may have a
%% periodic GC process that removes routes that do not
%% have a matching persistent subscription. Also, route
%% operations use dirty mnesia operations, which
%% inherently have room for inconsistencies.
%%
%% In practice, we use the iterator reference table as a
%% source of truth, since it is guarded by a transaction
%% context: we consider a subscription operation to be
%% successful if it ended up changing this table. Both
%% router and iterator information can be reconstructed
%% from this table, if needed.
ok = emqx_persistent_session_ds_router:do_add_route(TopicFilter, ID),
{SubId, S1} = emqx_persistent_session_ds_state:new_id(S0),
Subscription = #{
start_time => now_ms(),
props => SubOpts,
id => SubId,
deleted => false
},
IsNew = true;
Subscription0 = #{} ->
Subscription = Subscription0#{props => SubOpts},
IsNew = false,
S1 = S0
end,
S = emqx_persistent_session_ds_subs:on_subscribe(TopicFilter, Subscription, S1),
?tp(persistent_session_ds_subscription_added, #{
topic_filter => TopicFilter, sub => Subscription, is_new => IsNew
}),
{ok, Session#{s => S}}.
case emqx_persistent_session_ds_subs:on_subscribe(TopicFilter, SubOpts, Session) of
{ok, S1} ->
S = emqx_persistent_session_ds_state:commit(S1),
{ok, Session#{s => S}};
Error = {error, _} ->
Error
end.
-spec unsubscribe(topic_filter(), session()) ->
{ok, session(), emqx_types:subopts()} | {error, emqx_types:reason_code()}.
unsubscribe(
TopicFilter,
Session = #{id := ID, s := S0}
Session = #{id := SessionId, s := S0}
) ->
case emqx_persistent_session_ds_subs:lookup(TopicFilter, S0) of
undefined ->
{error, ?RC_NO_SUBSCRIPTION_EXISTED};
Subscription = #{props := SubOpts} ->
S = do_unsubscribe(ID, TopicFilter, Subscription, S0),
{ok, Session#{s => S}, SubOpts}
case emqx_persistent_session_ds_subs:on_unsubscribe(SessionId, TopicFilter, S0) of
{ok, S1, #{id := SubId, subopts := SubOpts}} ->
S2 = emqx_persistent_session_ds_stream_scheduler:on_unsubscribe(SubId, S1),
S = emqx_persistent_session_ds_state:commit(S2),
{ok, Session#{s => S}, SubOpts};
Error = {error, _} ->
Error
end.
-spec do_unsubscribe(id(), topic_filter(), subscription(), emqx_persistent_session_ds_state:t()) ->
emqx_persistent_session_ds_state:t().
do_unsubscribe(SessionId, TopicFilter, Subscription = #{id := SubId}, S0) ->
S1 = emqx_persistent_session_ds_subs:on_unsubscribe(TopicFilter, Subscription, S0),
?tp(persistent_session_ds_subscription_delete, #{
session_id => SessionId, topic_filter => TopicFilter
}),
S = emqx_persistent_session_ds_stream_scheduler:on_unsubscribe(SubId, S1),
?tp_span(
persistent_session_ds_subscription_route_delete,
#{session_id => SessionId, topic_filter => TopicFilter},
ok = emqx_persistent_session_ds_router:do_delete_route(TopicFilter, SessionId)
),
S.
-spec get_subscription(topic_filter(), session()) ->
emqx_types:subopts() | undefined.
get_subscription(#share{}, _) ->
%% TODO: shared subscriptions are not supported yet:
undefined;
get_subscription(TopicFilter, #{s := S}) ->
case emqx_persistent_session_ds_subs:lookup(TopicFilter, S) of
_Subscription = #{props := SubOpts} ->
#{subopts := SubOpts} ->
SubOpts;
undefined ->
undefined
@ -436,11 +413,72 @@ get_subscription(TopicFilter, #{s := S}) ->
-spec publish(emqx_types:packet_id(), emqx_types:message(), session()) ->
{ok, emqx_types:publish_result(), session()}
| {error, emqx_types:reason_code()}.
publish(
PacketId,
Msg = #message{qos = ?QOS_2, timestamp = Ts},
Session = #{s := S0}
) ->
case is_awaiting_full(Session) of
false ->
case emqx_persistent_session_ds_state:get_awaiting_rel(PacketId, S0) of
undefined ->
Results = emqx_broker:publish(Msg),
S = emqx_persistent_session_ds_state:put_awaiting_rel(PacketId, Ts, S0),
{ok, Results, Session#{s => S}};
_Ts ->
{error, ?RC_PACKET_IDENTIFIER_IN_USE}
end;
true ->
{error, ?RC_RECEIVE_MAXIMUM_EXCEEDED}
end;
publish(_PacketId, Msg, Session) ->
%% TODO: QoS2
Result = emqx_broker:publish(Msg),
{ok, Result, Session}.
is_awaiting_full(#{s := S, props := Props}) ->
emqx_persistent_session_ds_state:n_awaiting_rel(S) >=
maps:get(max_awaiting_rel, Props, infinity).
-spec expire(emqx_types:clientinfo(), session()) ->
{ok, [], timeout(), session()} | {ok, [], session()}.
expire(ClientInfo, Session0 = #{props := Props}) ->
Session = #{s := S} = do_expire(ClientInfo, Session0),
case emqx_persistent_session_ds_state:n_awaiting_rel(S) of
0 ->
{ok, [], Session};
_ ->
AwaitRelTimeout = maps:get(await_rel_timeout, Props),
{ok, [], AwaitRelTimeout, Session}
end.
do_expire(ClientInfo, Session = #{s := S0, props := Props}) ->
%% 1. Find expired packet IDs:
Now = erlang:system_time(millisecond),
AwaitRelTimeout = maps:get(await_rel_timeout, Props),
ExpiredPacketIds =
emqx_persistent_session_ds_state:fold_awaiting_rel(
fun(PacketId, Ts, Acc) ->
Age = Now - Ts,
case Age > AwaitRelTimeout of
true ->
[PacketId | Acc];
false ->
Acc
end
end,
[],
S0
),
%% 2. Perform side effects:
_ = emqx_session_events:handle_event(ClientInfo, {expired_rel, length(ExpiredPacketIds)}),
%% 3. Update state:
S = lists:foldl(
fun emqx_persistent_session_ds_state:del_awaiting_rel/2,
S0,
ExpiredPacketIds
),
Session#{s => S}.
%%--------------------------------------------------------------------
%% Client -> Broker: PUBACK
%%--------------------------------------------------------------------
@ -477,9 +515,14 @@ pubrec(PacketId, Session0) ->
-spec pubrel(emqx_types:packet_id(), session()) ->
{ok, session()} | {error, emqx_types:reason_code()}.
pubrel(_PacketId, Session = #{}) ->
% TODO: stub
{ok, Session}.
pubrel(PacketId, Session = #{s := S0}) ->
case emqx_persistent_session_ds_state:get_awaiting_rel(PacketId, S0) of
undefined ->
{error, ?RC_PACKET_IDENTIFIER_NOT_FOUND};
_TS ->
S = emqx_persistent_session_ds_state:del_awaiting_rel(PacketId, S0),
{ok, Session#{s => S}}
end.
%%--------------------------------------------------------------------
%% Client -> Broker: PUBCOMP
@ -552,6 +595,8 @@ handle_timeout(_ClientInfo, #req_sync{from = From, ref = Ref}, Session = #{s :=
S = emqx_persistent_session_ds_state:commit(S0),
From ! Ref,
{ok, [], Session#{s => S}};
handle_timeout(ClientInfo, expire_awaiting_rel, Session) ->
expire(ClientInfo, Session);
handle_timeout(_ClientInfo, Timeout, Session) ->
?SLOG(warning, #{msg => "unknown_ds_timeout", timeout => Timeout}),
{ok, [], Session}.
@ -571,7 +616,7 @@ replay(ClientInfo, [], Session0 = #{s := S0}) ->
Session = replay_streams(Session0#{replay => Streams}, ClientInfo),
{ok, [], Session}.
replay_streams(Session0 = #{replay := [{_StreamKey, Srs0} | Rest]}, ClientInfo) ->
replay_streams(Session0 = #{replay := [{StreamKey, Srs0} | Rest]}, ClientInfo) ->
case replay_batch(Srs0, Session0, ClientInfo) of
Session = #{} ->
replay_streams(Session#{replay := Rest}, ClientInfo);
@ -579,7 +624,7 @@ replay_streams(Session0 = #{replay := [{_StreamKey, Srs0} | Rest]}, ClientInfo)
RetryTimeout = ?TIMEOUT_RETRY_REPLAY,
?SLOG(warning, #{
msg => "failed_to_fetch_replay_batch",
stream => Srs0,
stream => StreamKey,
reason => Reason,
class => recoverable,
retry_in_ms => RetryTimeout
@ -645,7 +690,7 @@ list_client_subscriptions(ClientId) ->
%% TODO: this is not the most optimal implementation, since it
%% should be possible to avoid reading extra data (streams, etc.)
case print_session(ClientId) of
Sess = #{s := #{subscriptions := Subs}} ->
Sess = #{s := #{subscriptions := Subs, subscription_states := SStates}} ->
Node =
case Sess of
#{'_alive' := {true, Pid}} ->
@ -655,8 +700,9 @@ list_client_subscriptions(ClientId) ->
end,
SubList =
maps:fold(
fun(Topic, #{props := SubProps}, Acc) ->
Elem = {Topic, SubProps},
fun(Topic, #{current_state := CS}, Acc) ->
#{subopts := SubOpts} = maps:get(CS, SStates),
Elem = {Topic, SubOpts},
[Elem | Acc]
end,
[],
@ -670,6 +716,11 @@ list_client_subscriptions(ClientId) ->
{error, not_found}
end.
-spec get_client_subscription(emqx_types:clientid(), emqx_types:topic()) ->
subscription() | undefined.
get_client_subscription(ClientId, Topic) ->
emqx_persistent_session_ds_subs:cold_get_subscription(ClientId, Topic).
%%--------------------------------------------------------------------
%% Session tables operations
%%--------------------------------------------------------------------
@ -701,7 +752,12 @@ sync(ClientId) ->
%% the broker.
-spec session_open(id(), emqx_types:clientinfo(), emqx_types:conninfo(), emqx_maybe:t(message())) ->
session() | false.
session_open(SessionId, ClientInfo, NewConnInfo, MaybeWillMsg) ->
session_open(
SessionId,
ClientInfo,
NewConnInfo = #{proto_name := ProtoName, proto_ver := ProtoVer},
MaybeWillMsg
) ->
NowMS = now_ms(),
case emqx_persistent_session_ds_state:open(SessionId) of
{ok, S0} ->
@ -720,8 +776,9 @@ session_open(SessionId, ClientInfo, NewConnInfo, MaybeWillMsg) ->
maps:get(peername, NewConnInfo), S2
),
S4 = emqx_persistent_session_ds_state:set_will_message(MaybeWillMsg, S3),
S5 = emqx_persistent_session_ds_state:set_clientinfo(ClientInfo, S4),
S = emqx_persistent_session_ds_state:commit(S5),
S5 = set_clientinfo(ClientInfo, S4),
S6 = emqx_persistent_session_ds_state:set_protocol({ProtoName, ProtoVer}, S5),
S = emqx_persistent_session_ds_state:commit(S6),
Inflight = emqx_persistent_session_ds_inflight:new(
receive_maximum(NewConnInfo)
),
@ -744,7 +801,9 @@ session_open(SessionId, ClientInfo, NewConnInfo, MaybeWillMsg) ->
emqx_session:conf()
) ->
session().
session_ensure_new(Id, ClientInfo, ConnInfo, MaybeWillMsg, Conf) ->
session_ensure_new(
Id, ClientInfo, ConnInfo = #{proto_name := ProtoName, proto_ver := ProtoVer}, MaybeWillMsg, Conf
) ->
?tp(debug, persistent_session_ds_ensure_new, #{id => Id}),
Now = now_ms(),
S0 = emqx_persistent_session_ds_state:create_new(Id),
@ -767,8 +826,9 @@ session_ensure_new(Id, ClientInfo, ConnInfo, MaybeWillMsg, Conf) ->
]
),
S5 = emqx_persistent_session_ds_state:set_will_message(MaybeWillMsg, S4),
S6 = emqx_persistent_session_ds_state:set_clientinfo(ClientInfo, S5),
S = emqx_persistent_session_ds_state:commit(S6),
S6 = set_clientinfo(ClientInfo, S5),
S7 = emqx_persistent_session_ds_state:set_protocol({ProtoName, ProtoVer}, S6),
S = emqx_persistent_session_ds_state:commit(S7),
#{
id => Id,
props => Conf,
@ -779,18 +839,12 @@ session_ensure_new(Id, ClientInfo, ConnInfo, MaybeWillMsg, Conf) ->
%% @doc Called when a client reconnects with `clean session=true' or
%% during session GC
-spec session_drop(id(), _Reason) -> ok.
session_drop(ID, Reason) ->
case emqx_persistent_session_ds_state:open(ID) of
session_drop(SessionId, Reason) ->
case emqx_persistent_session_ds_state:open(SessionId) of
{ok, S0} ->
?tp(debug, drop_persistent_session, #{client_id => ID, reason => Reason}),
_S = emqx_persistent_session_ds_subs:fold(
fun(TopicFilter, Subscription, S) ->
do_unsubscribe(ID, TopicFilter, Subscription, S)
end,
S0,
S0
),
emqx_persistent_session_ds_state:delete(ID);
?tp(debug, drop_persistent_session, #{client_id => SessionId, reason => Reason}),
emqx_persistent_session_ds_subs:on_session_drop(SessionId, S0),
emqx_persistent_session_ds_state:delete(SessionId);
undefined ->
ok
end.
@ -798,6 +852,11 @@ session_drop(ID, Reason) ->
now_ms() ->
erlang:system_time(millisecond).
set_clientinfo(ClientInfo0, S) ->
%% Remove unnecessary fields from the clientinfo:
ClientInfo = maps:without([cn, dn, auth_result], ClientInfo0),
emqx_persistent_session_ds_state:set_clientinfo(ClientInfo, S).
%%--------------------------------------------------------------------
%% RPC targets (v1)
%%--------------------------------------------------------------------
@ -867,29 +926,38 @@ new_batch({StreamKey, Srs0}, BatchSize, Session0 = #{s := S0}, ClientInfo) ->
%% TODO: Handle unrecoverable error.
?SLOG(info, #{
msg => "failed_to_fetch_batch",
stream => Srs1,
stream => StreamKey,
reason => Reason,
class => Class
}),
Session0
end.
enqueue_batch(IsReplay, BatchSize, Srs0, Session = #{inflight := Inflight0}, ClientInfo) ->
enqueue_batch(IsReplay, BatchSize, Srs0, Session = #{inflight := Inflight0, s := S}, ClientInfo) ->
#srs{
it_begin = ItBegin0,
it_end = ItEnd0,
first_seqno_qos1 = FirstSeqnoQos1,
first_seqno_qos2 = FirstSeqnoQos2
first_seqno_qos2 = FirstSeqnoQos2,
sub_state_id = SubStateId
} = Srs0,
ItBegin =
case IsReplay of
true -> ItBegin0;
false -> ItEnd0
end,
SubState = #{} = emqx_persistent_session_ds_state:get_subscription_state(SubStateId, S),
case emqx_ds:next(?PERSISTENT_MESSAGE_DB, ItBegin, BatchSize) of
{ok, ItEnd, Messages} ->
{Inflight, LastSeqnoQos1, LastSeqnoQos2} = process_batch(
IsReplay, Session, ClientInfo, FirstSeqnoQos1, FirstSeqnoQos2, Messages, Inflight0
IsReplay,
Session,
SubState,
ClientInfo,
FirstSeqnoQos1,
FirstSeqnoQos2,
Messages,
Inflight0
),
Srs = Srs0#srs{
it_begin = ItBegin,
@ -913,27 +981,29 @@ enqueue_batch(IsReplay, BatchSize, Srs0, Session = #{inflight := Inflight0}, Cli
%% key_of_iter(#{3 := #{3 := #{5 := K}}}) ->
%% K.
process_batch(_IsReplay, _Session, _ClientInfo, LastSeqNoQos1, LastSeqNoQos2, [], Inflight) ->
process_batch(
_IsReplay, _Session, _SubState, _ClientInfo, LastSeqNoQos1, LastSeqNoQos2, [], Inflight
) ->
{Inflight, LastSeqNoQos1, LastSeqNoQos2};
process_batch(
IsReplay, Session, ClientInfo, FirstSeqNoQos1, FirstSeqNoQos2, [KV | Messages], Inflight0
IsReplay,
Session,
SubState,
ClientInfo,
FirstSeqNoQos1,
FirstSeqNoQos2,
[KV | Messages],
Inflight0
) ->
#{s := S, props := #{upgrade_qos := UpgradeQoS}} = Session,
{_DsMsgKey, Msg0 = #message{topic = Topic}} = KV,
#{s := S} = Session,
#{upgrade_qos := UpgradeQoS, subopts := SubOpts} = SubState,
{_DsMsgKey, Msg0} = KV,
Comm1 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_1), S),
Comm2 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_2), S),
Dup1 = emqx_persistent_session_ds_state:get_seqno(?dup(?QOS_1), S),
Dup2 = emqx_persistent_session_ds_state:get_seqno(?dup(?QOS_2), S),
Rec = emqx_persistent_session_ds_state:get_seqno(?rec, S),
Subs = emqx_persistent_session_ds_state:get_subscriptions(S),
Msgs = [
Msg
|| SubMatch <- emqx_topic_gbt:matches(Topic, Subs, []),
Msg <- begin
#{props := SubOpts} = emqx_topic_gbt:get_record(SubMatch, Subs),
emqx_session:enrich_message(ClientInfo, Msg0, SubOpts, UpgradeQoS)
end
],
Msgs = emqx_session:enrich_message(ClientInfo, Msg0, SubOpts, UpgradeQoS),
{Inflight, LastSeqNoQos1, LastSeqNoQos2} = lists:foldl(
fun(Msg = #message{qos = Qos}, {Acc, SeqNoQos10, SeqNoQos20}) ->
case Qos of
@ -989,14 +1059,16 @@ process_batch(
Msgs
),
process_batch(
IsReplay, Session, ClientInfo, LastSeqNoQos1, LastSeqNoQos2, Messages, Inflight
IsReplay, Session, SubState, ClientInfo, LastSeqNoQos1, LastSeqNoQos2, Messages, Inflight
).
%%--------------------------------------------------------------------
%% Transient messages
%%--------------------------------------------------------------------
enqueue_transient(ClientInfo, Msg0, Session = #{s := S, props := #{upgrade_qos := UpgradeQoS}}) ->
enqueue_transient(
_ClientInfo, Msg = #message{qos = Qos}, Session = #{inflight := Inflight0, s := S0}
) ->
%% TODO: Such messages won't be retransmitted, should the session
%% reconnect before transient messages are acked.
%%
@ -1006,18 +1078,6 @@ enqueue_transient(ClientInfo, Msg0, Session = #{s := S, props := #{upgrade_qos :
%% queued messages. Since streams in this DB are exclusive to the
%% session, messages from the queue can be dropped as soon as they
%% are acked.
Subs = emqx_persistent_session_ds_state:get_subscriptions(S),
Msgs = [
Msg
|| SubMatch <- emqx_topic_gbt:matches(Msg0#message.topic, Subs, []),
Msg <- begin
#{props := SubOpts} = emqx_topic_gbt:get_record(SubMatch, Subs),
emqx_session:enrich_message(ClientInfo, Msg0, SubOpts, UpgradeQoS)
end
],
lists:foldl(fun do_enqueue_transient/2, Session, Msgs).
do_enqueue_transient(Msg = #message{qos = Qos}, Session = #{inflight := Inflight0, s := S0}) ->
case Qos of
?QOS_0 ->
S = S0,

View File

@ -65,17 +65,21 @@
last_seqno_qos2 = 0 :: emqx_persistent_session_ds:seqno(),
%% This stream belongs to an unsubscribed topic-filter, and is
%% marked for deletion:
unsubscribed = false :: boolean()
unsubscribed = false :: boolean(),
%% Reference to the subscription state:
sub_state_id :: emqx_persistent_session_ds_subs:subscription_state_id()
}).
%% Session metadata keys:
-define(created_at, created_at).
-define(last_alive_at, last_alive_at).
-define(expiry_interval, expiry_interval).
%% Unique integer used to create unique identities
%% Unique integer used to create unique identities:
-define(last_id, last_id).
%% Connection info (relevent for the dashboard):
-define(peername, peername).
-define(will_message, will_message).
-define(clientinfo, clientinfo).
-define(protocol, protocol).
-endif.

View File

@ -22,6 +22,9 @@
%% It is responsible for saving, caching, and restoring session state.
%% It is completely devoid of business logic. Not even the default
%% values should be set in this module.
%%
%% Session process MUST NOT use `cold_*' functions! They are reserved
%% for use in the management APIs.
-module(emqx_persistent_session_ds_state).
-export([create_tables/0]).
@ -33,22 +36,44 @@
-export([get_clientinfo/1, set_clientinfo/2]).
-export([get_will_message/1, set_will_message/2, clear_will_message/1, clear_will_message_now/1]).
-export([get_peername/1, set_peername/2]).
-export([get_protocol/1, set_protocol/2]).
-export([new_id/1]).
-export([get_stream/2, put_stream/3, del_stream/2, fold_streams/3]).
-export([get_stream/2, put_stream/3, del_stream/2, fold_streams/3, n_streams/1]).
-export([get_seqno/2, put_seqno/3]).
-export([get_rank/2, put_rank/3, del_rank/2, fold_ranks/3]).
-export([get_subscriptions/1, put_subscription/4, del_subscription/3]).
-export([
get_subscription_state/2,
cold_get_subscription_state/2,
fold_subscription_states/3,
put_subscription_state/3,
del_subscription_state/2
]).
-export([
get_subscription/2,
cold_get_subscription/2,
fold_subscriptions/3,
n_subscriptions/1,
put_subscription/3,
del_subscription/2
]).
-export([
get_awaiting_rel/2,
put_awaiting_rel/3,
del_awaiting_rel/2,
fold_awaiting_rel/3,
n_awaiting_rel/1
]).
-export([make_session_iterator/0, session_iterator_next/2]).
-export_type([
t/0,
metadata/0,
subscriptions/0,
seqno_type/0,
stream_key/0,
rank_key/0,
session_iterator/0
session_iterator/0,
protocol/0
]).
-include("emqx_mqtt.hrl").
@ -62,8 +87,6 @@
-type message() :: emqx_types:message().
-type subscriptions() :: emqx_topic_gbt:t(_SubId, emqx_persistent_session_ds:subscription()).
-opaque session_iterator() :: emqx_persistent_session_ds:id() | '$end_of_table'.
%% Generic key-value wrapper that is used for exporting arbitrary
@ -92,13 +115,16 @@
dirty :: #{K => dirty | del}
}.
-type protocol() :: {binary(), emqx_types:proto_ver()}.
-type metadata() ::
#{
?created_at => emqx_persistent_session_ds:timestamp(),
?last_alive_at => emqx_persistent_session_ds:timestamp(),
?expiry_interval => non_neg_integer(),
?last_id => integer(),
?peername => emqx_types:peername()
?peername => emqx_types:peername(),
?protocol => protocol()
}.
-type seqno_type() ::
@ -110,22 +136,49 @@
| ?rec
| ?committed(?QOS_2).
-define(id, id).
-define(dirty, dirty).
-define(metadata, metadata).
-define(subscriptions, subscriptions).
-define(subscription_states, subscription_states).
-define(seqnos, seqnos).
-define(streams, streams).
-define(ranks, ranks).
-define(awaiting_rel, awaiting_rel).
-opaque t() :: #{
id := emqx_persistent_session_ds:id(),
dirty := boolean(),
metadata := metadata(),
subscriptions := subscriptions(),
seqnos := pmap(seqno_type(), emqx_persistent_session_ds:seqno()),
streams := pmap(emqx_ds:stream(), emqx_persistent_session_ds:stream_state()),
ranks := pmap(term(), integer())
?id := emqx_persistent_session_ds:id(),
?dirty := boolean(),
?metadata := metadata(),
?subscriptions := pmap(
emqx_persistent_session_ds:topic_filter(), emqx_persistent_session_ds_subs:subscription()
),
?subscription_states := pmap(
emqx_persistent_session_ds_subs:subscription_state_id(),
emqx_persistent_session_ds_subs:subscription_state()
),
?seqnos := pmap(seqno_type(), emqx_persistent_session_ds:seqno()),
?streams := pmap(emqx_ds:stream(), emqx_persistent_session_ds:stream_state()),
?ranks := pmap(term(), integer()),
?awaiting_rel := pmap(emqx_types:packet_id(), _Timestamp :: integer())
}.
-define(session_tab, emqx_ds_session_tab).
-define(subscription_tab, emqx_ds_session_subscriptions).
-define(subscription_states_tab, emqx_ds_session_subscription_states).
-define(stream_tab, emqx_ds_session_streams).
-define(seqno_tab, emqx_ds_session_seqnos).
-define(rank_tab, emqx_ds_session_ranks).
-define(pmap_tables, [?stream_tab, ?seqno_tab, ?rank_tab, ?subscription_tab]).
-define(awaiting_rel_tab, emqx_ds_session_awaiting_rel).
-define(pmaps, [
{?subscriptions, ?subscription_tab},
{?subscription_states, ?subscription_states_tab},
{?streams, ?stream_tab},
{?seqnos, ?seqno_tab},
{?ranks, ?rank_tab},
{?awaiting_rel, ?awaiting_rel_tab}
]).
%% Enable this flag if you suspect some code breaks the sequence:
-ifndef(CHECK_SEQNO).
@ -152,23 +205,25 @@ create_tables() ->
{attributes, record_info(fields, kv)}
]
),
[create_kv_pmap_table(Table) || Table <- ?pmap_tables],
mria:wait_for_tables([?session_tab | ?pmap_tables]).
{_, PmapTables} = lists:unzip(?pmaps),
[create_kv_pmap_table(Table) || Table <- PmapTables],
mria:wait_for_tables([?session_tab | PmapTables]).
-spec open(emqx_persistent_session_ds:id()) -> {ok, t()} | undefined.
open(SessionId) ->
ro_transaction(fun() ->
case kv_restore(?session_tab, SessionId) of
[Metadata] ->
Rec = #{
id => SessionId,
metadata => Metadata,
subscriptions => read_subscriptions(SessionId),
streams => pmap_open(?stream_tab, SessionId),
seqnos => pmap_open(?seqno_tab, SessionId),
ranks => pmap_open(?rank_tab, SessionId),
?unset_dirty
},
Rec = update_pmaps(
fun(_Pmap, Table) ->
pmap_open(Table, SessionId)
end,
#{
id => SessionId,
metadata => Metadata,
?unset_dirty
}
),
{ok, Rec};
[] ->
undefined
@ -185,27 +240,13 @@ print_session(SessionId) ->
end.
-spec format(t()) -> map().
format(#{
metadata := Metadata,
subscriptions := SubsGBT,
streams := Streams,
seqnos := Seqnos,
ranks := Ranks
}) ->
Subs = emqx_topic_gbt:fold(
fun(Key, Sub, Acc) ->
maps:put(emqx_topic_gbt:get_topic(Key), Sub, Acc)
format(Rec) ->
update_pmaps(
fun(Pmap, _Table) ->
pmap_format(Pmap)
end,
#{},
SubsGBT
),
#{
metadata => Metadata,
subscriptions => Subs,
streams => pmap_format(Streams),
seqnos => pmap_format(Seqnos),
ranks => pmap_format(Ranks)
}.
maps:without([id, dirty], Rec)
).
-spec list_sessions() -> [emqx_persistent_session_ds:id()].
list_sessions() ->
@ -215,7 +256,7 @@ list_sessions() ->
delete(Id) ->
transaction(
fun() ->
[kv_pmap_delete(Table, Id) || Table <- ?pmap_tables],
[kv_pmap_delete(Table, Id) || {_, Table} <- ?pmaps],
mnesia:delete(?session_tab, Id, write)
end
).
@ -226,36 +267,34 @@ commit(Rec = #{dirty := false}) ->
commit(
Rec = #{
id := SessionId,
metadata := Metadata,
streams := Streams,
seqnos := SeqNos,
ranks := Ranks
metadata := Metadata
}
) ->
check_sequence(Rec),
transaction(fun() ->
kv_persist(?session_tab, SessionId, Metadata),
Rec#{
streams => pmap_commit(SessionId, Streams),
seqnos => pmap_commit(SessionId, SeqNos),
ranks => pmap_commit(SessionId, Ranks),
?unset_dirty
}
update_pmaps(
fun(Pmap, _Table) ->
pmap_commit(SessionId, Pmap)
end,
Rec#{?unset_dirty}
)
end).
-spec create_new(emqx_persistent_session_ds:id()) -> t().
create_new(SessionId) ->
transaction(fun() ->
delete(SessionId),
#{
id => SessionId,
metadata => #{},
subscriptions => emqx_topic_gbt:new(),
streams => pmap_open(?stream_tab, SessionId),
seqnos => pmap_open(?seqno_tab, SessionId),
ranks => pmap_open(?rank_tab, SessionId),
?set_dirty
}
update_pmaps(
fun(_Pmap, Table) ->
pmap_open(Table, SessionId)
end,
#{
id => SessionId,
metadata => #{},
?set_dirty
}
)
end).
%%
@ -292,6 +331,14 @@ get_peername(Rec) ->
set_peername(Val, Rec) ->
set_meta(?peername, Val, Rec).
-spec get_protocol(t()) -> protocol() | undefined.
get_protocol(Rec) ->
get_meta(?protocol, Rec).
-spec set_protocol(protocol(), t()) -> t().
set_protocol(Val, Rec) ->
set_meta(?protocol, Val, Rec).
-spec get_clientinfo(t()) -> emqx_maybe:t(emqx_types:clientinfo()).
get_clientinfo(Rec) ->
get_meta(?clientinfo, Rec).
@ -336,30 +383,65 @@ new_id(Rec) ->
%%
-spec get_subscriptions(t()) -> subscriptions().
get_subscriptions(#{subscriptions := Subs}) ->
Subs.
-spec get_subscription(emqx_persistent_session_ds:topic_filter(), t()) ->
emqx_persistent_session_ds_subs:subscription() | undefined.
get_subscription(TopicFilter, Rec) ->
gen_get(?subscriptions, TopicFilter, Rec).
-spec cold_get_subscription(emqx_persistent_session_ds:id(), emqx_types:topic()) ->
[emqx_persistent_session_ds_subs:subscription()].
cold_get_subscription(SessionId, Topic) ->
kv_pmap_read(?subscription_tab, SessionId, Topic).
-spec fold_subscriptions(fun(), Acc, t()) -> Acc.
fold_subscriptions(Fun, Acc, Rec) ->
gen_fold(?subscriptions, Fun, Acc, Rec).
-spec n_subscriptions(t()) -> non_neg_integer().
n_subscriptions(Rec) ->
gen_size(?subscriptions, Rec).
-spec put_subscription(
emqx_persistent_session_ds:topic_filter(),
_SubId,
emqx_persistent_session_ds:subscription(),
emqx_persistent_session_ds_subs:subscription(),
t()
) -> t().
put_subscription(TopicFilter, SubId, Subscription, Rec = #{id := Id, subscriptions := Subs0}) ->
%% Note: currently changes to the subscriptions are persisted immediately.
Key = {TopicFilter, SubId},
transaction(fun() -> kv_pmap_persist(?subscription_tab, Id, Key, Subscription) end),
Subs = emqx_topic_gbt:insert(TopicFilter, SubId, Subscription, Subs0),
Rec#{subscriptions => Subs}.
put_subscription(TopicFilter, Subscription, Rec) ->
gen_put(?subscriptions, TopicFilter, Subscription, Rec).
-spec del_subscription(emqx_persistent_session_ds:topic_filter(), _SubId, t()) -> t().
del_subscription(TopicFilter, SubId, Rec = #{id := Id, subscriptions := Subs0}) ->
%% Note: currently the subscriptions are persisted immediately.
Key = {TopicFilter, SubId},
transaction(fun() -> kv_pmap_delete(?subscription_tab, Id, Key) end),
Subs = emqx_topic_gbt:delete(TopicFilter, SubId, Subs0),
Rec#{subscriptions => Subs}.
-spec del_subscription(emqx_persistent_session_ds:topic_filter(), t()) -> t().
del_subscription(TopicFilter, Rec) ->
gen_del(?subscriptions, TopicFilter, Rec).
%%
-spec get_subscription_state(emqx_persistent_session_ds_subs:subscription_state_id(), t()) ->
emqx_persistent_session_ds_subs:subscription_state() | undefined.
get_subscription_state(SStateId, Rec) ->
gen_get(?subscription_states, SStateId, Rec).
-spec cold_get_subscription_state(
emqx_persistent_session_ds:id(), emqx_persistent_session_ds_subs:subscription_state_id()
) ->
[emqx_persistent_session_ds_subs:subscription_state()].
cold_get_subscription_state(SessionId, SStateId) ->
kv_pmap_read(?subscription_states_tab, SessionId, SStateId).
-spec fold_subscription_states(fun(), Acc, t()) -> Acc.
fold_subscription_states(Fun, Acc, Rec) ->
gen_fold(?subscription_states, Fun, Acc, Rec).
-spec put_subscription_state(
emqx_persistent_session_ds_subs:subscription_state_id(),
emqx_persistent_session_ds_subs:subscription_state(),
t()
) -> t().
put_subscription_state(SStateId, SState, Rec) ->
gen_put(?subscription_states, SStateId, SState, Rec).
-spec del_subscription_state(emqx_persistent_session_ds_subs:subscription_state_id(), t()) -> t().
del_subscription_state(SStateId, Rec) ->
gen_del(?subscription_states, SStateId, Rec).
%%
@ -368,29 +450,33 @@ del_subscription(TopicFilter, SubId, Rec = #{id := Id, subscriptions := Subs0})
-spec get_stream(stream_key(), t()) ->
emqx_persistent_session_ds:stream_state() | undefined.
get_stream(Key, Rec) ->
gen_get(streams, Key, Rec).
gen_get(?streams, Key, Rec).
-spec put_stream(stream_key(), emqx_persistent_session_ds:stream_state(), t()) -> t().
put_stream(Key, Val, Rec) ->
gen_put(streams, Key, Val, Rec).
gen_put(?streams, Key, Val, Rec).
-spec del_stream(stream_key(), t()) -> t().
del_stream(Key, Rec) ->
gen_del(streams, Key, Rec).
gen_del(?streams, Key, Rec).
-spec fold_streams(fun(), Acc, t()) -> Acc.
fold_streams(Fun, Acc, Rec) ->
gen_fold(streams, Fun, Acc, Rec).
gen_fold(?streams, Fun, Acc, Rec).
-spec n_streams(t()) -> non_neg_integer().
n_streams(Rec) ->
gen_size(?streams, Rec).
%%
-spec get_seqno(seqno_type(), t()) -> emqx_persistent_session_ds:seqno() | undefined.
get_seqno(Key, Rec) ->
gen_get(seqnos, Key, Rec).
gen_get(?seqnos, Key, Rec).
-spec put_seqno(seqno_type(), emqx_persistent_session_ds:seqno(), t()) -> t().
put_seqno(Key, Val, Rec) ->
gen_put(seqnos, Key, Val, Rec).
gen_put(?seqnos, Key, Val, Rec).
%%
@ -398,19 +484,43 @@ put_seqno(Key, Val, Rec) ->
-spec get_rank(rank_key(), t()) -> integer() | undefined.
get_rank(Key, Rec) ->
gen_get(ranks, Key, Rec).
gen_get(?ranks, Key, Rec).
-spec put_rank(rank_key(), integer(), t()) -> t().
put_rank(Key, Val, Rec) ->
gen_put(ranks, Key, Val, Rec).
gen_put(?ranks, Key, Val, Rec).
-spec del_rank(rank_key(), t()) -> t().
del_rank(Key, Rec) ->
gen_del(ranks, Key, Rec).
gen_del(?ranks, Key, Rec).
-spec fold_ranks(fun(), Acc, t()) -> Acc.
fold_ranks(Fun, Acc, Rec) ->
gen_fold(ranks, Fun, Acc, Rec).
gen_fold(?ranks, Fun, Acc, Rec).
%%
-spec get_awaiting_rel(emqx_types:packet_id(), t()) -> integer() | undefined.
get_awaiting_rel(Key, Rec) ->
gen_get(?awaiting_rel, Key, Rec).
-spec put_awaiting_rel(emqx_types:packet_id(), _Timestamp :: integer(), t()) -> t().
put_awaiting_rel(Key, Val, Rec) ->
gen_put(?awaiting_rel, Key, Val, Rec).
-spec del_awaiting_rel(emqx_types:packet_id(), t()) -> t().
del_awaiting_rel(Key, Rec) ->
gen_del(?awaiting_rel, Key, Rec).
-spec fold_awaiting_rel(fun(), Acc, t()) -> Acc.
fold_awaiting_rel(Fun, Acc, Rec) ->
gen_fold(?awaiting_rel, Fun, Acc, Rec).
-spec n_awaiting_rel(t()) -> non_neg_integer().
n_awaiting_rel(Rec) ->
gen_size(?awaiting_rel, Rec).
%%
-spec make_session_iterator() -> session_iterator().
make_session_iterator() ->
@ -475,16 +585,20 @@ gen_del(Field, Key, Rec) ->
Rec#{?set_dirty}
).
%%
gen_size(Field, Rec) ->
check_sequence(Rec),
pmap_size(maps:get(Field, Rec)).
read_subscriptions(SessionId) ->
Records = kv_pmap_restore(?subscription_tab, SessionId),
-spec update_pmaps(fun((pmap(_K, _V) | undefined, atom()) -> term()), map()) -> map().
update_pmaps(Fun, Map) ->
lists:foldl(
fun({{TopicFilter, SubId}, Subscription}, Acc) ->
emqx_topic_gbt:insert(TopicFilter, SubId, Subscription, Acc)
fun({MapKey, Table}, Acc) ->
OldVal = maps:get(MapKey, Map, undefined),
Val = Fun(OldVal, Table),
maps:put(MapKey, Val, Acc)
end,
emqx_topic_gbt:new(),
Records
Map,
?pmaps
).
%%
@ -547,6 +661,10 @@ pmap_commit(
pmap_format(#pmap{cache = Cache}) ->
Cache.
-spec pmap_size(pmap(_K, _V)) -> non_neg_integer().
pmap_size(#pmap{cache = Cache}) ->
maps:size(Cache).
%% Functions dealing with set tables:
kv_persist(Tab, SessionId, Val0) ->
@ -574,6 +692,14 @@ kv_pmap_persist(Tab, SessionId, Key, Val0) ->
Val = encoder(encode, Tab, Val0),
mnesia:write(Tab, #kv{k = {SessionId, Key}, v = Val}, write).
kv_pmap_read(Table, SessionId, Key) ->
lists:map(
fun(#kv{v = Val}) ->
encoder(decode, Table, Val)
end,
mnesia:dirty_read(Table, {SessionId, Key})
).
kv_pmap_restore(Table, SessionId) ->
MS = [{#kv{k = {SessionId, '$1'}, v = '$2'}, [], [{{'$1', '$2'}}]}],
Objs = mnesia:select(Table, MS, read),

View File

@ -126,9 +126,10 @@ find_new_streams(S) ->
renew_streams(S0) ->
S1 = remove_unsubscribed_streams(S0),
S2 = remove_fully_replayed_streams(S1),
S3 = update_stream_subscription_state_ids(S2),
emqx_persistent_session_ds_subs:fold(
fun
(Key, #{start_time := StartTime, id := SubId, deleted := false}, Acc) ->
(Key, #{start_time := StartTime, id := SubId, current_state := SStateId}, Acc) ->
TopicFilter = emqx_topic:words(Key),
Streams = select_streams(
SubId,
@ -137,7 +138,7 @@ renew_streams(S0) ->
),
lists:foldl(
fun(I, Acc1) ->
ensure_iterator(TopicFilter, StartTime, SubId, I, Acc1)
ensure_iterator(TopicFilter, StartTime, SubId, SStateId, I, Acc1)
end,
Acc,
Streams
@ -145,8 +146,8 @@ renew_streams(S0) ->
(_Key, _DeletedSubscription, Acc) ->
Acc
end,
S2,
S2
S3,
S3
).
-spec on_unsubscribe(
@ -201,23 +202,32 @@ is_fully_acked(Srs, S) ->
%% Internal functions
%%================================================================================
ensure_iterator(TopicFilter, StartTime, SubId, {{RankX, RankY}, Stream}, S) ->
ensure_iterator(TopicFilter, StartTime, SubId, SStateId, {{RankX, RankY}, Stream}, S) ->
Key = {SubId, Stream},
case emqx_persistent_session_ds_state:get_stream(Key, S) of
undefined ->
?SLOG(debug, #{
msg => new_stream, key => Key, stream => Stream
}),
{ok, Iterator} = emqx_ds:make_iterator(
?PERSISTENT_MESSAGE_DB, Stream, TopicFilter, StartTime
),
NewStreamState = #srs{
rank_x = RankX,
rank_y = RankY,
it_begin = Iterator,
it_end = Iterator
},
emqx_persistent_session_ds_state:put_stream(Key, NewStreamState, S);
case emqx_ds:make_iterator(?PERSISTENT_MESSAGE_DB, Stream, TopicFilter, StartTime) of
{ok, Iterator} ->
NewStreamState = #srs{
rank_x = RankX,
rank_y = RankY,
it_begin = Iterator,
it_end = Iterator,
sub_state_id = SStateId
},
emqx_persistent_session_ds_state:put_stream(Key, NewStreamState, S);
{error, recoverable, Reason} ->
?SLOG(warning, #{
msg => "failed_to_initialize_stream_iterator",
stream => Stream,
class => recoverable,
reason => Reason
}),
S
end;
#srs{} ->
S
end.
@ -342,6 +352,38 @@ remove_fully_replayed_streams(S0) ->
S1
).
%% @doc Update subscription state IDs for all streams that don't have unacked messages
-spec update_stream_subscription_state_ids(emqx_persistent_session_ds_state:t()) ->
emqx_persistent_session_ds_state:t().
update_stream_subscription_state_ids(S0) ->
CommQos1 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_1), S0),
CommQos2 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_2), S0),
%% Find the latest state IDs for each subscription:
LastSStateIds = emqx_persistent_session_ds_state:fold_subscriptions(
fun(_, #{id := SubId, current_state := SStateId}, Acc) ->
Acc#{SubId => SStateId}
end,
#{},
S0
),
%% Update subscription state IDs for fully acked streams:
emqx_persistent_session_ds_state:fold_streams(
fun
(_, #srs{unsubscribed = true}, S) ->
S;
(Key = {SubId, _Stream}, SRS0, S) ->
case is_fully_acked(CommQos1, CommQos2, SRS0) of
true ->
SRS = SRS0#srs{sub_state_id = maps:get(SubId, LastSStateIds)},
emqx_persistent_session_ds_state:put_stream(Key, SRS, S);
false ->
S
end
end,
S0,
S0
).
%% @doc Compare the streams by the order in which they were replayed.
compare_streams(
{_KeyA, #srs{first_seqno_qos1 = A1, first_seqno_qos2 = A2}},

View File

@ -24,14 +24,56 @@
-module(emqx_persistent_session_ds_subs).
%% API:
-export([on_subscribe/3, on_unsubscribe/3, gc/1, lookup/2, to_map/1, fold/3, fold_all/3]).
-export([
on_subscribe/3,
on_unsubscribe/3,
on_session_drop/2,
gc/1,
lookup/2,
to_map/1,
fold/3
]).
-export_type([]).
%% Management API:
-export([
cold_get_subscription/2
]).
-export_type([subscription_state_id/0, subscription/0, subscription_state/0]).
-include("emqx_persistent_session_ds.hrl").
-include("emqx_mqtt.hrl").
-include_lib("snabbkaffe/include/trace.hrl").
%%================================================================================
%% Type declarations
%%================================================================================
-type subscription() :: #{
%% Session-unique identifier of the subscription. Other objects
%% can use it as a compact reference:
id := emqx_persistent_session_ds:subscription_id(),
%% Reference to the current subscription state:
current_state := subscription_state_id(),
%% Time when the subscription was added:
start_time := emqx_ds:time()
}.
-type subscription_state_id() :: integer().
-type subscription_state() :: #{
parent_subscription := emqx_persistent_session_ds:subscription_id(),
upgrade_qos := boolean(),
%% SubOpts:
subopts := #{
nl => _,
qos => _,
rap => _,
subid => _,
_ => _
}
}.
%%================================================================================
%% API functions
%%================================================================================
@ -39,41 +81,131 @@
%% @doc Process a new subscription
-spec on_subscribe(
emqx_persistent_session_ds:topic_filter(),
emqx_persistent_session_ds:subscription(),
emqx_persistent_session_ds_state:t()
emqx_types:subopts(),
emqx_persistent_session_ds:session()
) ->
emqx_persistent_session_ds_state:t().
on_subscribe(TopicFilter, Subscription, S) ->
emqx_persistent_session_ds_state:put_subscription(TopicFilter, [], Subscription, S).
{ok, emqx_persistent_session_ds_state:t()} | {error, ?RC_QUOTA_EXCEEDED}.
on_subscribe(TopicFilter, SubOpts, #{id := SessionId, s := S0, props := Props}) ->
#{upgrade_qos := UpgradeQoS, max_subscriptions := MaxSubscriptions} = Props,
case emqx_persistent_session_ds_state:get_subscription(TopicFilter, S0) of
undefined ->
%% This is a new subscription:
case emqx_persistent_session_ds_state:n_subscriptions(S0) < MaxSubscriptions of
true ->
ok = emqx_persistent_session_ds_router:do_add_route(TopicFilter, SessionId),
{SubId, S1} = emqx_persistent_session_ds_state:new_id(S0),
{SStateId, S2} = emqx_persistent_session_ds_state:new_id(S1),
SState = #{
parent_subscription => SubId, upgrade_qos => UpgradeQoS, subopts => SubOpts
},
S3 = emqx_persistent_session_ds_state:put_subscription_state(
SStateId, SState, S2
),
Subscription = #{
id => SubId,
current_state => SStateId,
start_time => now_ms()
},
S = emqx_persistent_session_ds_state:put_subscription(
TopicFilter, Subscription, S3
),
?tp(persistent_session_ds_subscription_added, #{
topic_filter => TopicFilter, session => SessionId
}),
{ok, S};
false ->
{error, ?RC_QUOTA_EXCEEDED}
end;
Sub0 = #{current_state := SStateId0, id := SubId} ->
SState = #{parent_subscription => SubId, upgrade_qos => UpgradeQoS, subopts => SubOpts},
case emqx_persistent_session_ds_state:get_subscription_state(SStateId0, S0) of
SState ->
%% Client resubscribed with the same parameters:
{ok, S0};
_ ->
%% Subsription parameters changed:
{SStateId, S1} = emqx_persistent_session_ds_state:new_id(S0),
S2 = emqx_persistent_session_ds_state:put_subscription_state(
SStateId, SState, S1
),
Sub = Sub0#{current_state => SStateId},
S = emqx_persistent_session_ds_state:put_subscription(TopicFilter, Sub, S2),
{ok, S}
end
end.
%% @doc Process UNSUBSCRIBE
-spec on_unsubscribe(
emqx_persistent_session_ds:id(),
emqx_persistent_session_ds:topic_filter(),
emqx_persistent_session_ds:subscription(),
emqx_persistent_session_ds_state:t()
) ->
emqx_persistent_session_ds_state:t().
on_unsubscribe(TopicFilter, Subscription0, S0) ->
%% Note: we cannot delete the subscription immediately, since its
%% metadata can be used during replay (see `process_batch'). We
%% instead mark it as deleted, and let `subscription_gc' function
%% dispatch it later:
Subscription = Subscription0#{deleted => true},
emqx_persistent_session_ds_state:put_subscription(TopicFilter, [], Subscription, S0).
{ok, emqx_persistent_session_ds_state:t(), emqx_persistent_session_ds:subscription()}
| {error, ?RC_NO_SUBSCRIPTION_EXISTED}.
on_unsubscribe(SessionId, TopicFilter, S0) ->
case lookup(TopicFilter, S0) of
undefined ->
{error, ?RC_NO_SUBSCRIPTION_EXISTED};
Subscription ->
?tp(persistent_session_ds_subscription_delete, #{
session_id => SessionId, topic_filter => TopicFilter
}),
?tp_span(
persistent_session_ds_subscription_route_delete,
#{session_id => SessionId, topic_filter => TopicFilter},
ok = emqx_persistent_session_ds_router:do_delete_route(TopicFilter, SessionId)
),
{ok, emqx_persistent_session_ds_state:del_subscription(TopicFilter, S0), Subscription}
end.
%% @doc Remove subscriptions that have been marked for deletion, and
%% that don't have any unacked messages:
-spec on_session_drop(emqx_persistent_session_ds:id(), emqx_persistent_session_ds_state:t()) -> ok.
on_session_drop(SessionId, S0) ->
fold(
fun(TopicFilter, _Subscription, S) ->
case on_unsubscribe(SessionId, TopicFilter, S) of
{ok, S1, _} -> S1;
_ -> S
end
end,
S0,
S0
).
%% @doc Remove subscription states that don't have a parent, and that
%% don't have any unacked messages:
-spec gc(emqx_persistent_session_ds_state:t()) -> emqx_persistent_session_ds_state:t().
gc(S0) ->
fold_all(
fun(TopicFilter, #{id := SubId, deleted := Deleted}, Acc) ->
case Deleted andalso has_no_unacked_streams(SubId, S0) of
true ->
emqx_persistent_session_ds_state:del_subscription(TopicFilter, [], Acc);
%% Create a set of subscription states IDs referenced either by a
%% subscription or a stream replay state:
AliveSet0 = emqx_persistent_session_ds_state:fold_subscriptions(
fun(_TopicFilter, #{current_state := SStateId}, Acc) ->
Acc#{SStateId => true}
end,
#{},
S0
),
AliveSet = emqx_persistent_session_ds_state:fold_streams(
fun(_StreamId, SRS = #srs{sub_state_id = SStateId}, Acc) ->
case emqx_persistent_session_ds_stream_scheduler:is_fully_acked(SRS, S0) of
false ->
Acc#{SStateId => true};
true ->
Acc
end
end,
AliveSet0,
S0
),
%% Delete dangling subscription states:
emqx_persistent_session_ds_state:fold_subscription_states(
fun(SStateId, _, S) ->
case maps:is_key(SStateId, AliveSet) of
true ->
S;
false ->
emqx_persistent_session_ds_state:del_subscription_state(SStateId, S)
end
end,
S0,
S0
).
@ -82,12 +214,16 @@ gc(S0) ->
-spec lookup(emqx_persistent_session_ds:topic_filter(), emqx_persistent_session_ds_state:t()) ->
emqx_persistent_session_ds:subscription() | undefined.
lookup(TopicFilter, S) ->
Subs = emqx_persistent_session_ds_state:get_subscriptions(S),
case emqx_topic_gbt:lookup(TopicFilter, [], Subs, undefined) of
#{deleted := true} ->
undefined;
Sub ->
Sub
case emqx_persistent_session_ds_state:get_subscription(TopicFilter, S) of
Sub = #{current_state := SStateId} ->
case emqx_persistent_session_ds_state:get_subscription_state(SStateId, S) of
#{subopts := SubOpts} ->
Sub#{subopts => SubOpts};
undefined ->
undefined
end;
undefined ->
undefined
end.
%% @doc Convert active subscriptions to a map, for information
@ -95,7 +231,7 @@ lookup(TopicFilter, S) ->
-spec to_map(emqx_persistent_session_ds_state:t()) -> map().
to_map(S) ->
fold(
fun(TopicFilter, #{props := Props}, Acc) -> Acc#{TopicFilter => Props} end,
fun(TopicFilter, _, Acc) -> Acc#{TopicFilter => lookup(TopicFilter, S)} end,
#{},
S
).
@ -107,48 +243,29 @@ to_map(S) ->
emqx_persistent_session_ds_state:t()
) ->
Acc.
fold(Fun, AccIn, S) ->
fold_all(
fun(TopicFilter, Sub = #{deleted := Deleted}, Acc) ->
case Deleted of
true -> Acc;
false -> Fun(TopicFilter, Sub, Acc)
end
end,
AccIn,
S
).
fold(Fun, Acc, S) ->
emqx_persistent_session_ds_state:fold_subscriptions(Fun, Acc, S).
%% @doc Fold over all subscriptions, including inactive ones:
-spec fold_all(
fun((emqx_types:topic(), emqx_persistent_session_ds:subscription(), Acc) -> Acc),
Acc,
emqx_persistent_session_ds_state:t()
) ->
Acc.
fold_all(Fun, AccIn, S) ->
Subs = emqx_persistent_session_ds_state:get_subscriptions(S),
emqx_topic_gbt:fold(
fun(Key, Sub, Acc) -> Fun(emqx_topic_gbt:get_topic(Key), Sub, Acc) end,
AccIn,
Subs
).
-spec cold_get_subscription(emqx_persistent_session_ds:id(), emqx_types:topic()) ->
emqx_persistent_session_ds:subscription() | undefined.
cold_get_subscription(SessionId, Topic) ->
case emqx_persistent_session_ds_state:cold_get_subscription(SessionId, Topic) of
[Sub = #{current_state := SStateId}] ->
case
emqx_persistent_session_ds_state:cold_get_subscription_state(SessionId, SStateId)
of
[#{subopts := Subopts}] ->
Sub#{subopts => Subopts};
_ ->
undefined
end;
_ ->
undefined
end.
%%================================================================================
%% Internal functions
%%================================================================================
-spec has_no_unacked_streams(
emqx_persistent_session_ds:subscription_id(), emqx_persistent_session_ds_state:t()
) -> boolean().
has_no_unacked_streams(SubId, S) ->
emqx_persistent_session_ds_state:fold_streams(
fun
({SID, _Stream}, Srs, Acc) when SID =:= SubId ->
emqx_persistent_session_ds_stream_scheduler:is_fully_acked(Srs, S) andalso Acc;
(_StreamKey, _Srs, Acc) ->
Acc
end,
true,
S
).
now_ms() ->
erlang:system_time(millisecond).

View File

@ -61,6 +61,8 @@
}.
-type url() :: binary().
-type json_binary() :: binary().
-type template() :: binary().
-type template_str() :: string().
-typerefl_from_string({duration/0, emqx_schema, to_duration}).
-typerefl_from_string({duration_s/0, emqx_schema, to_duration_s}).
@ -78,6 +80,8 @@
-typerefl_from_string({comma_separated_atoms/0, emqx_schema, to_comma_separated_atoms}).
-typerefl_from_string({url/0, emqx_schema, to_url}).
-typerefl_from_string({json_binary/0, emqx_schema, to_json_binary}).
-typerefl_from_string({template/0, emqx_schema, to_template}).
-typerefl_from_string({template_str/0, emqx_schema, to_template_str}).
-type parsed_server() :: #{
hostname := string(),
@ -120,7 +124,9 @@
to_erl_cipher_suite/1,
to_comma_separated_atoms/1,
to_url/1,
to_json_binary/1
to_json_binary/1,
to_template/1,
to_template_str/1
]).
-export([
@ -160,7 +166,9 @@
comma_separated_atoms/0,
url/0,
json_binary/0,
port_number/0
port_number/0,
template/0,
template_str/0
]).
-export([namespace/0, roots/0, roots/1, fields/1, desc/1, tags/0]).
@ -1734,20 +1742,38 @@ fields(durable_storage) ->
emqx_ds_schema:schema();
fields("client_attrs_init") ->
[
{extract_from,
{expression,
sc(
hoconsc:enum([clientid, username, cn, dn, user_property]),
#{desc => ?DESC("client_attrs_init_extract_from")}
typerefl:alias("string", any()),
#{
desc => ?DESC("client_attrs_init_expression"),
converter => fun compile_variform/2
}
)},
{extract_regexp, sc(binary(), #{desc => ?DESC("client_attrs_init_extract_regexp")})},
{extract_as,
{set_as_attr,
sc(binary(), #{
default => <<"alias">>,
desc => ?DESC("client_attrs_init_extract_as"),
desc => ?DESC("client_attrs_init_set_as_attr"),
validator => fun restricted_string/1
})}
].
compile_variform(undefined, _Opts) ->
undefined;
compile_variform(Expression, #{make_serializable := true}) ->
case is_binary(Expression) of
true ->
Expression;
false ->
emqx_variform:decompile(Expression)
end;
compile_variform(Expression, _Opts) ->
case emqx_variform:compile(Expression) of
{ok, Compiled} ->
Compiled;
{error, Reason} ->
throw(#{expression => Expression, reason => Reason})
end.
restricted_string(Str) ->
case emqx_utils:is_restricted_str(Str) of
true -> ok;
@ -2576,6 +2602,12 @@ to_json_binary(Str) ->
Error
end.
to_template(Str) ->
{ok, iolist_to_binary(Str)}.
to_template_str(Str) ->
{ok, unicode:characters_to_list(Str, utf8)}.
%% @doc support the following format:
%% - 127.0.0.1:1883
%% - ::1:1883
@ -3552,9 +3584,9 @@ mqtt_general() ->
)},
{"client_attrs_init",
sc(
hoconsc:union([disabled, ref("client_attrs_init")]),
hoconsc:array(ref("client_attrs_init")),
#{
default => disabled,
default => [],
desc => ?DESC("client_attrs_init")
}
)}

View File

@ -429,6 +429,11 @@ enrich_deliver(ClientInfo, {deliver, Topic, Msg}, UpgradeQoS, Session) ->
end,
enrich_message(ClientInfo, Msg, SubOpts, UpgradeQoS).
%% Caution: updating this function _may_ break consistency of replay
%% for persistent sessions. Persistent sessions expect it to return
%% the same result during replay. If it changes the behavior between
%% releases, sessions restored from the cold storage may end up
%% replaying messages with different QoS, etc.
enrich_message(
ClientInfo = #{clientid := ClientId},
Msg = #message{from = ClientId},

View File

@ -22,6 +22,7 @@
-include("types.hrl").
-include("logger.hrl").
-include("emqx_hooks.hrl").
-include("emqx_mqtt.hrl").
-export([
start_link/0,
@ -279,7 +280,7 @@ on_client_subscribed(
clientid => ClientId,
username => Username,
protocol => Protocol,
topic => Topic,
topic => emqx_topic:maybe_format_share(Topic),
subopts => SubOpts,
ts => erlang:system_time(millisecond)
},
@ -298,7 +299,7 @@ on_client_unsubscribed(
clientid => ClientId,
username => Username,
protocol => Protocol,
topic => Topic,
topic => emqx_topic:maybe_format_share(Topic),
ts => erlang:system_time(millisecond)
},
publish(unsubscribed, Payload).

View File

@ -28,7 +28,8 @@
subscribe/3,
unsubscribe/2,
log/3,
log/4
log/4,
rendered_action_template/2
]).
-export([
@ -66,6 +67,9 @@
-export_type([ip_address/0]).
-type ip_address() :: string().
-export_type([ruleid/0]).
-type ruleid() :: binary().
publish(#message{topic = <<"$SYS/", _/binary>>}) ->
ignore;
publish(#message{from = From, topic = Topic, payload = Payload}) when
@ -83,6 +87,32 @@ unsubscribe(<<"$SYS/", _/binary>>, _SubOpts) ->
unsubscribe(Topic, SubOpts) ->
?TRACE("UNSUBSCRIBE", "unsubscribe", #{topic => Topic, sub_opts => SubOpts}).
rendered_action_template(ActionID, RenderResult) ->
TraceResult = ?TRACE(
"QUERY_RENDER",
"action_template_rendered",
#{
result => RenderResult,
action_id => ActionID
}
),
case logger:get_process_metadata() of
#{stop_action_after_render := true} ->
%% We throw an unrecoverable error to stop action before the
%% resource is called/modified
StopMsg = lists:flatten(
io_lib:format(
"Action ~ts stopped after template rendering due to test setting.",
[ActionID]
)
),
MsgBin = unicode:characters_to_binary(StopMsg),
error({unrecoverable_error, {action_stopped_after_template_rendering, MsgBin}});
_ ->
ok
end,
TraceResult.
log(List, Msg, Meta) ->
log(debug, List, Msg, Meta).
@ -159,8 +189,10 @@ create(Trace) ->
case mnesia:table_info(?TRACE, size) < ?MAX_SIZE of
true ->
case to_trace(Trace) of
{ok, TraceRec} -> insert_new_trace(TraceRec);
{error, Reason} -> {error, Reason}
{ok, TraceRec} ->
insert_new_trace(TraceRec);
{error, Reason} ->
{error, Reason}
end;
false ->
{error,
@ -222,7 +254,11 @@ format(Traces) ->
lists:map(
fun(Trace0 = #?TRACE{}) ->
[_ | Values] = tuple_to_list(Trace0),
maps:from_list(lists:zip(Fields, Values))
Map0 = maps:from_list(lists:zip(Fields, Values)),
Extra = maps:get(extra, Map0, #{}),
Formatter = maps:get(formatter, Extra, text),
Map1 = Map0#{formatter => Formatter},
maps:remove(extra, Map1)
end,
Traces
).
@ -368,9 +404,17 @@ start_trace(Trace) ->
type = Type,
filter = Filter,
start_at = Start,
payload_encode = PayloadEncode
payload_encode = PayloadEncode,
extra = Extra
} = Trace,
Who = #{name => Name, type => Type, filter => Filter, payload_encode => PayloadEncode},
Formatter = maps:get(formatter, Extra, text),
Who = #{
name => Name,
type => Type,
filter => Filter,
payload_encode => PayloadEncode,
formatter => Formatter
},
emqx_trace_handler:install(Who, debug, log_file(Name, Start)).
stop_trace(Finished, Started) ->
@ -517,6 +561,9 @@ to_trace(#{type := ip_address, ip_address := Filter} = Trace, Rec) ->
Error ->
Error
end;
to_trace(#{type := ruleid, ruleid := Filter} = Trace, Rec) ->
Trace0 = maps:without([type, ruleid], Trace),
to_trace(Trace0, Rec#?TRACE{type = ruleid, filter = Filter});
to_trace(#{type := Type}, _Rec) ->
{error, io_lib:format("required ~s field", [Type])};
to_trace(#{payload_encode := PayloadEncode} = Trace, Rec) ->
@ -532,6 +579,12 @@ to_trace(#{end_at := EndAt} = Trace, Rec) ->
{ok, _Sec} ->
{error, "end_at time has already passed"}
end;
to_trace(#{formatter := Formatter} = Trace, Rec) ->
Extra = Rec#?TRACE.extra,
to_trace(
maps:remove(formatter, Trace),
Rec#?TRACE{extra = Extra#{formatter => Formatter}}
);
to_trace(_, Rec) ->
{ok, Rec}.

View File

@ -27,12 +27,14 @@
install/3,
install/4,
install/5,
install/6,
uninstall/1,
uninstall/2
]).
%% For logger handler filters callbacks
-export([
filter_ruleid/2,
filter_clientid/2,
filter_topic/2,
filter_ip_address/2
@ -45,7 +47,8 @@
name := binary(),
type := clientid | topic | ip_address,
filter := emqx_types:clientid() | emqx_types:topic() | emqx_trace:ip_address(),
payload_encode := text | hidden | hex
payload_encode := text | hidden | hex,
formatter => json | text
}.
-define(CONFIG(_LogFile_), #{
@ -68,17 +71,29 @@
Type :: clientid | topic | ip_address,
Filter :: emqx_types:clientid() | emqx_types:topic() | string(),
Level :: logger:level() | all,
LogFilePath :: string()
LogFilePath :: string(),
Formatter :: text | json
) -> ok | {error, term()}.
install(Name, Type, Filter, Level, LogFile) ->
install(Name, Type, Filter, Level, LogFile, Formatter) ->
Who = #{
type => Type,
filter => ensure_bin(Filter),
name => ensure_bin(Name),
payload_encode => payload_encode()
payload_encode => payload_encode(),
formatter => Formatter
},
install(Who, Level, LogFile).
-spec install(
Name :: binary() | list(),
Type :: clientid | topic | ip_address,
Filter :: emqx_types:clientid() | emqx_types:topic() | string(),
Level :: logger:level() | all,
LogFilePath :: string()
) -> ok | {error, term()}.
install(Name, Type, Filter, Level, LogFile) ->
install(Name, Type, Filter, Level, LogFile, text).
-spec install(
Type :: clientid | topic | ip_address,
Filter :: emqx_types:clientid() | emqx_types:topic() | string(),
@ -133,9 +148,23 @@ uninstall(HandlerId) ->
running() ->
lists:foldl(fun filter_traces/2, [], emqx_logger:get_log_handlers(started)).
-spec filter_ruleid(logger:log_event(), {binary(), atom()}) -> logger:log_event() | stop.
filter_ruleid(#{meta := Meta = #{rule_id := RuleId}} = Log, {MatchId, _Name}) ->
RuleIDs = maps:get(rule_ids, Meta, #{}),
IsMatch = (RuleId =:= MatchId) orelse maps:get(MatchId, RuleIDs, false),
filter_ret(IsMatch andalso is_trace(Meta), Log);
filter_ruleid(#{meta := Meta = #{rule_ids := RuleIDs}} = Log, {MatchId, _Name}) ->
filter_ret(maps:get(MatchId, RuleIDs, false) andalso is_trace(Meta), Log);
filter_ruleid(_Log, _ExpectId) ->
stop.
-spec filter_clientid(logger:log_event(), {binary(), atom()}) -> logger:log_event() | stop.
filter_clientid(#{meta := Meta = #{clientid := ClientId}} = Log, {MatchId, _Name}) ->
filter_ret(ClientId =:= MatchId andalso is_trace(Meta), Log);
ClientIDs = maps:get(client_ids, Meta, #{}),
IsMatch = (ClientId =:= MatchId) orelse maps:get(MatchId, ClientIDs, false),
filter_ret(IsMatch andalso is_trace(Meta), Log);
filter_clientid(#{meta := Meta = #{client_ids := ClientIDs}} = Log, {MatchId, _Name}) ->
filter_ret(maps:get(MatchId, ClientIDs, false) andalso is_trace(Meta), Log);
filter_clientid(_Log, _ExpectId) ->
stop.
@ -164,8 +193,14 @@ filters(#{type := clientid, filter := Filter, name := Name}) ->
filters(#{type := topic, filter := Filter, name := Name}) ->
[{topic, {fun ?MODULE:filter_topic/2, {ensure_bin(Filter), Name}}}];
filters(#{type := ip_address, filter := Filter, name := Name}) ->
[{ip_address, {fun ?MODULE:filter_ip_address/2, {ensure_list(Filter), Name}}}].
[{ip_address, {fun ?MODULE:filter_ip_address/2, {ensure_list(Filter), Name}}}];
filters(#{type := ruleid, filter := Filter, name := Name}) ->
[{ruleid, {fun ?MODULE:filter_ruleid/2, {ensure_bin(Filter), Name}}}].
formatter(#{type := _Type, payload_encode := PayloadEncode, formatter := json}) ->
{emqx_trace_json_formatter, #{
payload_encode => PayloadEncode
}};
formatter(#{type := _Type, payload_encode := PayloadEncode}) ->
{emqx_trace_formatter, #{
%% template is for ?SLOG message not ?TRACE.
@ -184,7 +219,8 @@ filter_traces(#{id := Id, level := Level, dst := Dst, filters := Filters}, Acc)
[{Type, {FilterFun, {Filter, Name}}}] when
Type =:= topic orelse
Type =:= clientid orelse
Type =:= ip_address
Type =:= ip_address orelse
Type =:= ruleid
->
[Init#{type => Type, filter => Filter, name => Name, filter_fun => FilterFun} | Acc];
_ ->

View File

@ -0,0 +1,130 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2020-2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_trace_json_formatter).
-include("emqx_mqtt.hrl").
-export([format/2]).
%% logger_formatter:config/0 is not exported.
-type config() :: map().
%%%-----------------------------------------------------------------
%%% Callback Function
%%%-----------------------------------------------------------------
-spec format(LogEvent, Config) -> unicode:chardata() when
LogEvent :: logger:log_event(),
Config :: config().
format(
LogMap,
#{payload_encode := PEncode}
) ->
%% We just make some basic transformations on the input LogMap and then do
%% an external call to create the JSON text
Time = emqx_utils_calendar:now_to_rfc3339(microsecond),
LogMap1 = LogMap#{time => Time},
LogMap2 = prepare_log_map(LogMap1, PEncode),
[emqx_logger_jsonfmt:best_effort_json(LogMap2, [force_utf8]), "\n"].
%%%-----------------------------------------------------------------
%%% Helper Functions
%%%-----------------------------------------------------------------
prepare_log_map(LogMap, PEncode) ->
NewKeyValuePairs = [prepare_key_value(K, V, PEncode) || {K, V} <- maps:to_list(LogMap)],
maps:from_list(NewKeyValuePairs).
prepare_key_value(payload = K, V, PEncode) ->
NewV =
try
format_payload(V, PEncode)
catch
_:_ ->
V
end,
{K, NewV};
prepare_key_value(packet = K, V, PEncode) ->
NewV =
try
format_packet(V, PEncode)
catch
_:_ ->
V
end,
{K, NewV};
prepare_key_value(rule_ids = K, V, _PEncode) ->
NewV =
try
format_map_set_to_list(V)
catch
_:_ ->
V
end,
{K, NewV};
prepare_key_value(client_ids = K, V, _PEncode) ->
NewV =
try
format_map_set_to_list(V)
catch
_:_ ->
V
end,
{K, NewV};
prepare_key_value(action_id = K, V, _PEncode) ->
try
{action_info, format_action_info(V)}
catch
_:_ ->
{K, V}
end;
prepare_key_value(K, V, PEncode) when is_map(V) ->
{K, prepare_log_map(V, PEncode)};
prepare_key_value(K, V, _PEncode) ->
{K, V}.
format_packet(undefined, _) -> "";
format_packet(Packet, Encode) -> emqx_packet:format(Packet, Encode).
format_payload(undefined, _) ->
"";
format_payload(_, hidden) ->
"******";
format_payload(Payload, text) when ?MAX_PAYLOAD_FORMAT_LIMIT(Payload) ->
unicode:characters_to_list(Payload);
format_payload(Payload, hex) when ?MAX_PAYLOAD_FORMAT_LIMIT(Payload) -> binary:encode_hex(Payload);
format_payload(<<Part:?TRUNCATED_PAYLOAD_SIZE/binary, _/binary>> = Payload, Type) ->
emqx_packet:format_truncated_payload(Part, byte_size(Payload), Type).
format_map_set_to_list(Map) ->
Items = [
begin
%% Assert that it is really a map set
true = V,
%% Assert that the keys have the expected type
true = is_binary(K),
K
end
|| {K, V} <- maps:to_list(Map)
],
lists:sort(Items).
format_action_info(V) ->
[<<"action">>, Type, Name | _] = binary:split(V, <<":">>, [global]),
#{
type => Type,
name => Name
}.

View File

@ -395,13 +395,14 @@ t_certdn_as_alias(_) ->
test_cert_extraction_as_alias(Which) ->
%% extract the first two chars
Re = <<"^(..).*$">>,
ClientId = iolist_to_binary(["ClientIdFor_", atom_to_list(Which)]),
emqx_config:put_zone_conf(default, [mqtt, client_attrs_init], #{
extract_from => Which,
extract_regexp => Re,
extract_as => <<"alias">>
}),
{ok, Compiled} = emqx_variform:compile("substr(" ++ atom_to_list(Which) ++ ",0,2)"),
emqx_config:put_zone_conf(default, [mqtt, client_attrs_init], [
#{
expression => Compiled,
set_as_attr => <<"alias">>
}
]),
SslConf = emqx_common_test_helpers:client_mtls('tlsv1.2'),
{ok, Client} = emqtt:start_link([
{clientid, ClientId}, {port, 8883}, {ssl, true}, {ssl_opts, SslConf}
@ -416,10 +417,13 @@ test_cert_extraction_as_alias(Which) ->
t_client_attr_from_user_property(_Config) ->
ClientId = atom_to_binary(?FUNCTION_NAME),
emqx_config:put_zone_conf(default, [mqtt, client_attrs_init], #{
extract_from => user_property,
extract_as => <<"group">>
}),
{ok, Compiled} = emqx_variform:compile("user_property.group"),
emqx_config:put_zone_conf(default, [mqtt, client_attrs_init], [
#{
expression => Compiled,
set_as_attr => <<"group">>
}
]),
SslConf = emqx_common_test_helpers:client_mtls('tlsv1.3'),
{ok, Client} = emqtt:start_link([
{clientid, ClientId},

View File

@ -454,7 +454,7 @@ zone_global_defaults() ->
upgrade_qos => false,
use_username_as_clientid => false,
wildcard_subscription => true,
client_attrs_init => disabled
client_attrs_init => []
},
overload_protection =>
#{

View File

@ -150,11 +150,13 @@ t_client_attr_as_mountpoint(_Config) ->
<<"limiter">> => #{},
<<"mountpoint">> => <<"groups/${client_attrs.ns}/">>
},
emqx_config:put_zone_conf(default, [mqtt, client_attrs_init], #{
extract_from => clientid,
extract_regexp => <<"^(.+)-.+$">>,
extract_as => <<"ns">>
}),
{ok, Compiled} = emqx_variform:compile("nth(1,tokens(clientid,'-'))"),
emqx_config:put_zone_conf(default, [mqtt, client_attrs_init], [
#{
expression => Compiled,
set_as_attr => <<"ns">>
}
]),
emqx_logger:set_log_level(debug),
with_listener(tcp, attr_as_moutpoint, ListenerConf, fun() ->
{ok, Client} = emqtt:start_link(#{
@ -170,7 +172,7 @@ t_client_attr_as_mountpoint(_Config) ->
?assertMatch([_], emqx_router:match_routes(MatchTopic)),
emqtt:stop(Client)
end),
emqx_config:put_zone_conf(default, [mqtt, client_attrs_init], disabled),
emqx_config:put_zone_conf(default, [mqtt, client_attrs_init], []),
ok.
t_current_conns_tcp(_Config) ->

View File

@ -476,7 +476,7 @@ t_replication_options(_Config) ->
resend_window := 60
}
},
emqx_ds_replication_layer_meta:get_options(?PERSISTENT_MESSAGE_DB)
emqx_ds_replication_layer_meta:db_config(?PERSISTENT_MESSAGE_DB)
),
?assertMatch(
#{
@ -584,6 +584,8 @@ message(Topic, Payload, PublishedAt) ->
id = emqx_guid:gen()
}.
on_message_dropped(#message{flags = #{sys := true}}, _Context, _Res, _TestPid) ->
ok;
on_message_dropped(Msg, Context, Res, TestPid) ->
ErrCtx = #{msg => Msg, ctx => Context, res => Res},
ct:pal("this hook should not be called.\n ~p", [ErrCtx]),

View File

@ -74,9 +74,6 @@ session_id() ->
topic() ->
oneof([<<"foo">>, <<"bar">>, <<"foo/#">>, <<"//+/#">>]).
subid() ->
oneof([[]]).
subscription() ->
oneof([#{}]).
@ -129,18 +126,25 @@ put_req() ->
{Track, Seqno},
{seqno_track(), seqno()},
{#s.seqno, put_seqno, Track, Seqno}
),
?LET(
{Topic, Subscription},
{topic(), subscription()},
{#s.subs, put_subscription, Topic, Subscription}
)
]).
get_req() ->
oneof([
{#s.streams, get_stream, stream_id()},
{#s.seqno, get_seqno, seqno_track()}
{#s.seqno, get_seqno, seqno_track()},
{#s.subs, get_subscription, topic()}
]).
del_req() ->
oneof([
{#s.streams, del_stream, stream_id()}
{#s.streams, del_stream, stream_id()},
{#s.subs, del_subscription, topic()}
]).
command(S) ->
@ -153,13 +157,6 @@ command(S) ->
{2, {call, ?MODULE, reopen, [session_id(S)]}},
{2, {call, ?MODULE, commit, [session_id(S)]}},
%% Subscriptions:
{3,
{call, ?MODULE, put_subscription, [
session_id(S), topic(), subid(), subscription()
]}},
{3, {call, ?MODULE, del_subscription, [session_id(S), topic(), subid()]}},
%% Metadata:
{3, {call, ?MODULE, put_metadata, [session_id(S), put_metadata()]}},
{3, {call, ?MODULE, get_metadata, [session_id(S), get_metadata()]}},
@ -170,7 +167,6 @@ command(S) ->
{3, {call, ?MODULE, gen_del, [session_id(S), del_req()]}},
%% Getters:
{4, {call, ?MODULE, get_subscriptions, [session_id(S)]}},
{1, {call, ?MODULE, iterate_sessions, [batch_size()]}}
]);
false ->
@ -207,19 +203,6 @@ postcondition(S, {call, ?MODULE, gen_get, [SessionId, {Idx, Fun, Key}]}, Result)
#{session_id => SessionId, key => Key, 'fun' => Fun}
),
true;
postcondition(S, {call, ?MODULE, get_subscriptions, [SessionId]}, Result) ->
#{SessionId := #s{subs = Subs}} = S,
?assertEqual(maps:size(Subs), emqx_topic_gbt:size(Result)),
maps:foreach(
fun({TopicFilter, Id}, Expected) ->
?assertEqual(
Expected,
emqx_topic_gbt:lookup(TopicFilter, Id, Result, default)
)
end,
Subs
),
true;
postcondition(_, _, _) ->
true.
@ -227,22 +210,6 @@ next_state(S, _V, {call, ?MODULE, create_new, [SessionId]}) ->
S#{SessionId => #s{}};
next_state(S, _V, {call, ?MODULE, delete, [SessionId]}) ->
maps:remove(SessionId, S);
next_state(S, _V, {call, ?MODULE, put_subscription, [SessionId, TopicFilter, SubId, Subscription]}) ->
Key = {TopicFilter, SubId},
update(
SessionId,
#s.subs,
fun(Subs) -> Subs#{Key => Subscription} end,
S
);
next_state(S, _V, {call, ?MODULE, del_subscription, [SessionId, TopicFilter, SubId]}) ->
Key = {TopicFilter, SubId},
update(
SessionId,
#s.subs,
fun(Subs) -> maps:remove(Key, Subs) end,
S
);
next_state(S, _V, {call, ?MODULE, put_metadata, [SessionId, {Key, _Fun, Val}]}) ->
update(
SessionId,
@ -296,19 +263,6 @@ reopen(SessionId) ->
{ok, S} = emqx_persistent_session_ds_state:open(SessionId),
put_state(SessionId, S).
put_subscription(SessionId, TopicFilter, SubId, Subscription) ->
S = emqx_persistent_session_ds_state:put_subscription(
TopicFilter, SubId, Subscription, get_state(SessionId)
),
put_state(SessionId, S).
del_subscription(SessionId, TopicFilter, SubId) ->
S = emqx_persistent_session_ds_state:del_subscription(TopicFilter, SubId, get_state(SessionId)),
put_state(SessionId, S).
get_subscriptions(SessionId) ->
emqx_persistent_session_ds_state:get_subscriptions(get_state(SessionId)).
put_metadata(SessionId, {_MetaKey, Fun, Value}) ->
S = apply(emqx_persistent_session_ds_state, Fun, [Value, get_state(SessionId)]),
put_state(SessionId, S).

View File

@ -1004,9 +1004,9 @@ t_different_groups_same_topic(Config) when is_list(Config) ->
GroupB = <<"bb">>,
Topic = <<"t/1">>,
SharedTopicGroupA = ?SHARE(GroupA, Topic),
SharedTopicGroupA = format_share(GroupA, Topic),
?UPDATE_SUB_QOS(C, SharedTopicGroupA, ?QOS_2),
SharedTopicGroupB = ?SHARE(GroupB, Topic),
SharedTopicGroupB = format_share(GroupB, Topic),
?UPDATE_SUB_QOS(C, SharedTopicGroupB, ?QOS_2),
?retry(
@ -1050,11 +1050,11 @@ t_different_groups_update_subopts(Config) when is_list(Config) ->
Topic = <<"t/1">>,
GroupA = <<"aa">>,
GroupB = <<"bb">>,
SharedTopicGroupA = ?SHARE(GroupA, Topic),
SharedTopicGroupB = ?SHARE(GroupB, Topic),
SharedTopicGroupA = format_share(GroupA, Topic),
SharedTopicGroupB = format_share(GroupB, Topic),
Fun = fun(Group, QoS) ->
?UPDATE_SUB_QOS(C, ?SHARE(Group, Topic), QoS),
?UPDATE_SUB_QOS(C, format_share(Group, Topic), QoS),
?assertMatch(
#{qos := QoS},
emqx_broker:get_subopts(ClientId, emqx_topic:make_shared_record(Group, Topic))
@ -1153,6 +1153,9 @@ t_queue_subscription(Config) when is_list(Config) ->
%% help functions
%%--------------------------------------------------------------------
format_share(Group, Topic) ->
emqx_topic:maybe_format_share(emqx_topic:make_shared_record(Group, Topic)).
kill_process(Pid) ->
kill_process(Pid, fun(_) -> erlang:exit(Pid, kill) end).

View File

@ -96,7 +96,7 @@ t_base_create_delete(_Config) ->
start_at => Now,
end_at => Now + 30 * 60,
payload_encode => text,
extra => #{}
formatter => text
}
],
?assertEqual(ExpectFormat, emqx_trace:format([TraceRec])),
@ -511,4 +511,13 @@ build_old_trace_data() ->
reload() ->
catch ok = gen_server:stop(emqx_trace),
{ok, _Pid} = emqx_trace:start_link().
case emqx_trace:start_link() of
{ok, _Pid} = Res ->
Res;
NotOKRes ->
ct:pal(
"emqx_trace:start_link() gave result: ~p\n"
"(perhaps it is already started)",
[NotOKRes]
)
end.

View File

@ -353,13 +353,13 @@ init(_Opts) ->
ok = emqx_config_handler:add_handler([listeners, '?', '?', ?CONF_ROOT], Module),
ok = hook_deny(),
{ok, #{hooked => false, providers => #{}, init_done => false},
{continue, initialize_authentication}}.
{continue, {initialize_authentication, init}}}.
handle_call(get_providers, _From, #{providers := Providers} = State) ->
reply(Providers, State);
handle_call(
{register_providers, Providers},
_From,
From,
#{providers := Reg0} = State
) ->
case lists:filter(fun({T, _}) -> maps:is_key(T, Reg0) end, Providers) of
@ -371,7 +371,7 @@ handle_call(
Reg0,
Providers
),
reply(ok, State#{providers := Reg}, initialize_authentication);
reply(ok, State#{providers := Reg}, {initialize_authentication, From});
Clashes ->
reply({error, {authentication_type_clash, Clashes}}, State)
end;
@ -447,10 +447,10 @@ handle_call(Req, _From, State) ->
?SLOG(error, #{msg => "unexpected_call", call => Req}),
{reply, ignored, State}.
handle_continue(initialize_authentication, #{init_done := true} = State) ->
handle_continue({initialize_authentication, _From}, #{init_done := true} = State) ->
{noreply, State};
handle_continue(initialize_authentication, #{providers := Providers} = State) ->
InitDone = initialize_authentication(Providers),
handle_continue({initialize_authentication, From}, #{providers := Providers} = State) ->
InitDone = initialize_authentication(Providers, From),
{noreply, maybe_hook(State#{init_done := InitDone})}.
handle_cast(Req, State) ->
@ -484,11 +484,13 @@ code_change(_OldVsn, State, _Extra) ->
%% Private functions
%%------------------------------------------------------------------------------
initialize_authentication(Providers) ->
initialize_authentication(Providers, From) ->
ProviderTypes = maps:keys(Providers),
Chains = chain_configs(),
HasProviders = has_providers_for_configs(Chains, ProviderTypes),
do_initialize_authentication(Providers, Chains, HasProviders).
Result = do_initialize_authentication(Providers, Chains, HasProviders),
?tp(info, authn_chains_initialization_done, #{from => From, result => Result}),
Result.
do_initialize_authentication(_Providers, _Chains, _HasProviders = false) ->
false;
@ -500,7 +502,6 @@ do_initialize_authentication(Providers, Chains, _HasProviders = true) ->
Chains
),
ok = unhook_deny(),
?tp(info, authn_chains_initialization_done, #{}),
true.
initialize_chain_authentication(_Providers, _ChainName, []) ->

View File

@ -69,9 +69,10 @@ t_initialize(_Config) ->
emqx_access_control:authenticate(?CLIENTINFO)
),
Self = self(),
?assertWaitEvent(
ok = emqx_authn_test_lib:register_fake_providers([{password_based, built_in_database}]),
#{?snk_kind := authn_chains_initialization_done},
#{?snk_kind := authn_chains_initialization_done, from := {Self, _}},
100
),

View File

@ -557,12 +557,14 @@ t_publish_last_will_testament_denied_topic(_Config) ->
t_alias_prefix(_Config) ->
{ok, _} = emqx_authz:update(?CMD_REPLACE, [?SOURCE_FILE_CLIENT_ATTR]),
ExtractSuffix = <<"^.*-(.*)$">>,
emqx_config:put_zone_conf(default, [mqtt, client_attrs_init], #{
extract_from => clientid,
extract_regexp => ExtractSuffix,
extract_as => <<"alias">>
}),
%% '^.*-(.*)$': extract the suffix after the last '-'
{ok, Compiled} = emqx_variform:compile("concat(regex_extract(clientid,'^.*-(.*)$'))"),
emqx_config:put_zone_conf(default, [mqtt, client_attrs_init], [
#{
expression => Compiled,
set_as_attr => <<"alias">>
}
]),
ClientId = <<"org1-name2">>,
SubTopic = <<"name2/#">>,
SubTopicNotAllowed = <<"name3/#">>,
@ -572,7 +574,7 @@ t_alias_prefix(_Config) ->
?assertMatch({ok, _, [?RC_NOT_AUTHORIZED]}, emqtt:subscribe(C, SubTopicNotAllowed)),
unlink(C),
emqtt:stop(C),
emqx_config:put_zone_conf(default, [mqtt, client_attrs_init], disalbed),
emqx_config:put_zone_conf(default, [mqtt, client_attrs_init], []),
ok.
%% client is allowed by ACL to publish to its LWT topic, is connected,

View File

@ -41,6 +41,9 @@
]).
-export([clean_cache/0]).
%% For tests
-export([hard_coded_test_action_info_modules/0]).
-callback bridge_v1_type_name() ->
atom()
| {
@ -128,8 +131,13 @@ hard_coded_action_info_modules_common() ->
emqx_bridge_mqtt_pubsub_action_info
].
%% This exists so that it can be mocked for test cases
hard_coded_test_action_info_modules() -> [].
hard_coded_action_info_modules() ->
hard_coded_action_info_modules_common() ++ hard_coded_action_info_modules_ee().
hard_coded_action_info_modules_common() ++
hard_coded_action_info_modules_ee() ++
?MODULE:hard_coded_test_action_info_modules().
%% ====================================================================
%% API

View File

@ -1030,7 +1030,26 @@ bridge_v2_type_to_connector_type(Type) ->
import_config(RawConf) ->
%% actions structure
emqx_bridge:import_config(RawConf, <<"actions">>, ?ROOT_KEY_ACTIONS, config_key_path()).
ActionRes = emqx_bridge:import_config(
RawConf, <<"actions">>, ?ROOT_KEY_ACTIONS, config_key_path()
),
SourceRes = emqx_bridge:import_config(
RawConf, <<"sources">>, ?ROOT_KEY_SOURCES, config_key_path_sources()
),
group_import_results([ActionRes, SourceRes]).
group_import_results(Results0) ->
Results = lists:foldr(
fun
({ok, OkRes}, {OkAcc, ErrAcc}) ->
{[OkRes | OkAcc], ErrAcc};
({error, ErrRes}, {OkAcc, ErrAcc}) ->
{OkAcc, [ErrRes | ErrAcc]}
end,
{[], []},
Results0
),
{results, Results}.
%%====================================================================
%% Config Update Handler API

View File

@ -1007,7 +1007,13 @@ call_operation(NodeOrAll, OperFunc, Args = [_Nodes, _ConfRootKey, BridgeType, Br
{error, not_implemented} ->
?NOT_IMPLEMENTED;
{error, timeout} ->
?BAD_REQUEST(<<"Request timeout">>);
BridgeId = emqx_bridge_resource:bridge_id(BridgeType, BridgeName),
?SLOG(warning, #{
msg => "bridge_bpapi_call_timeout",
bridge => BridgeId,
call => OperFunc
}),
?SERVICE_UNAVAILABLE(<<"Request timeout">>);
{error, {start_pool_failed, Name, Reason}} ->
Msg = bin(
io_lib:format("Failed to start ~p pool for reason ~p", [Name, redact(Reason)])
@ -1018,9 +1024,8 @@ call_operation(NodeOrAll, OperFunc, Args = [_Nodes, _ConfRootKey, BridgeType, Br
?SLOG(warning, #{
msg => "bridge_inconsistent_in_cluster_for_call_operation",
reason => not_found,
type => BridgeType,
name => BridgeName,
bridge => BridgeId
bridge => BridgeId,
call => OperFunc
}),
?SERVICE_UNAVAILABLE(<<"Bridge not found on remote node: ", BridgeId/binary>>);
{error, {node_not_found, Node}} ->

View File

@ -825,22 +825,53 @@ do_start_stop_bridges(Type, Config) ->
%% Connecting to this endpoint should always timeout
BadServer = iolist_to_binary(io_lib:format("localhost:~B", [ListenPort])),
BadName = <<"bad_", (atom_to_binary(Type))/binary>>,
CreateRes0 = request_json(
post,
uri(["bridges"]),
?MQTT_BRIDGE(BadServer, BadName),
Config
),
?assertMatch(
{ok, 201, #{
<<"type">> := ?BRIDGE_TYPE_MQTT,
<<"name">> := BadName,
<<"enable">> := true,
<<"server">> := BadServer,
<<"status">> := <<"connecting">>,
<<"node_status">> := [_ | _]
<<"server">> := BadServer
}},
request_json(
post,
uri(["bridges"]),
?MQTT_BRIDGE(BadServer, BadName),
Config
)
CreateRes0
),
{ok, 201, CreateRes1} = CreateRes0,
case CreateRes1 of
#{
<<"node_status">> := [
#{
<<"status">> := <<"disconnected">>,
<<"status_reason">> := <<"connack_timeout">>
},
#{<<"status">> := <<"connecting">>}
| _
],
%% `inconsistent': one node is `?status_disconnected' (because it has already
%% timed out), the other node is `?status_connecting' (started later and
%% haven't timed out yet)
<<"status">> := <<"inconsistent">>,
<<"status_reason">> := <<"connack_timeout">>
} ->
ok;
#{
<<"node_status">> := [_, _ | _],
<<"status">> := <<"disconnected">>,
<<"status_reason">> := <<"connack_timeout">>
} ->
ok;
#{
<<"node_status">> := [_],
<<"status">> := <<"connecting">>
} ->
ok;
_ ->
error({unexpected_result, CreateRes1})
end,
BadBridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_MQTT, BadName),
?assertMatch(
%% request from product: return 400 on such errors

View File

@ -705,7 +705,7 @@ t_async_query(Config, MakeMessageFun, IsSuccessCheck, TracePoint) ->
),
receive
{result, Result} -> IsSuccessCheck(Result)
after 5_000 ->
after 8_000 ->
throw(timeout)
end,
ok.

View File

@ -2,7 +2,7 @@
{erl_opts, [debug_info]}.
{deps, [
{ecql, {git, "https://github.com/emqx/ecql.git", {tag, "v0.6.1"}}},
{ecql, {git, "https://github.com/emqx/ecql.git", {tag, "v0.7.0"}}},
{emqx_connector, {path, "../../apps/emqx_connector"}},
{emqx_resource, {path, "../../apps/emqx_resource"}},
{emqx_bridge, {path, "../../apps/emqx_bridge"}}

View File

@ -181,7 +181,7 @@ fields("post", Type) ->
cql_field() ->
{cql,
mk(
binary(),
emqx_schema:template(),
#{desc => ?DESC("cql_template"), default => ?DEFAULT_CQL, format => <<"sql">>}
)}.

View File

@ -581,7 +581,6 @@ t_write_failure(Config) ->
)
end),
fun(Trace0) ->
ct:pal("trace: ~p", [Trace0]),
Trace = ?of_kind(
[buffer_worker_flush_nack, buffer_worker_retry_inflight_failed], Trace0
),

View File

@ -184,8 +184,12 @@ fields("post", Type) ->
sql_field() ->
{sql,
mk(
binary(),
#{desc => ?DESC("sql_template"), default => ?DEFAULT_SQL, format => <<"sql">>}
emqx_schema:template(),
#{
desc => ?DESC("sql_template"),
default => ?DEFAULT_SQL,
format => <<"sql">>
}
)}.
batch_value_separator_field() ->

View File

@ -87,6 +87,7 @@ connector_values() ->
<<"url">> => <<"http://127.0.0.1:8000">>,
<<"aws_access_key_id">> => <<"root">>,
<<"aws_secret_access_key">> => <<"******">>,
<<"region">> => <<"us-west-2">>,
<<"pool_size">> => 8,
<<"resource_opts">> =>
#{
@ -113,7 +114,8 @@ action_values() ->
<<"parameters">> =>
#{
<<"table">> => <<"mqtt_msg">>,
<<"template">> => ?DEFAULT_TEMPLATE
<<"template">> => ?DEFAULT_TEMPLATE,
<<"hash_key">> => <<"clientid">>
}
}.
@ -161,10 +163,16 @@ fields(dynamo_action) ->
fields(action_parameters) ->
Parameters =
[
{template,
{template, template_field_schema()},
{hash_key,
mk(
binary(),
#{desc => ?DESC("template"), default => ?DEFAULT_TEMPLATE}
#{desc => ?DESC("hash_key"), required => true}
)},
{range_key,
mk(
binary(),
#{desc => ?DESC("range_key"), required => false}
)}
] ++ emqx_bridge_dynamo_connector:fields(config),
lists:foldl(
@ -174,6 +182,7 @@ fields(action_parameters) ->
Parameters,
[
url,
region,
aws_access_key_id,
aws_secret_access_key,
pool_size,
@ -199,16 +208,22 @@ fields(connector_resource_opts) ->
fields("config") ->
[
{enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})},
{template,
mk(
binary(),
#{desc => ?DESC("template"), default => ?DEFAULT_TEMPLATE}
)},
{template, template_field_schema()},
{local_topic,
mk(
binary(),
#{desc => ?DESC("local_topic"), default => undefined}
)},
{hash_key,
mk(
binary(),
#{desc => ?DESC("hash_key"), required => true}
)},
{range_key,
mk(
binary(),
#{desc => ?DESC("range_key"), required => false}
)},
{resource_opts,
mk(
ref(?MODULE, "creation_opts"),
@ -230,6 +245,15 @@ fields("put") ->
fields("get") ->
emqx_bridge_schema:status_fields() ++ fields("post").
template_field_schema() ->
mk(
emqx_schema:template(),
#{
desc => ?DESC("template"),
default => ?DEFAULT_TEMPLATE
}
).
desc("config") ->
?DESC("desc_config");
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->

View File

@ -45,6 +45,7 @@ roots() ->
fields(config) ->
[
{url, mk(binary(), #{required => true, desc => ?DESC("url")})},
{region, mk(binary(), #{required => true, desc => ?DESC("region")})},
{table, mk(binary(), #{required => true, desc => ?DESC("table")})},
{aws_access_key_id,
mk(
@ -102,6 +103,12 @@ on_start(
pool_name => InstanceId,
installed_channels => #{}
},
case Config of
#{region := Region} ->
application:set_env(erlcloud, aws_region, to_str(Region));
_ ->
ok
end,
case emqx_resource_pool:start(InstanceId, ?MODULE, Options) of
ok ->
{ok, State};
@ -126,12 +133,20 @@ on_add_channel(
create_channel_state(
#{parameters := Conf} = _ChannelConfig
) ->
#{
table := Table
} = Conf,
Keys = maps:with([hash_key, range_key], Conf),
Keys1 = maps:fold(
fun(K, V, Acc) ->
Acc#{K := erlang:binary_to_existing_atom(V)}
end,
Keys,
Keys
),
Base = maps:without([template, hash_key, range_key], Conf),
Base1 = maps:merge(Base, Keys1),
Templates = parse_template_from_conf(Conf),
State = #{
table => Table,
State = Base1#{
templates => Templates
},
{ok, State}.
@ -232,11 +247,16 @@ do_query(
templates := Templates
} = ChannelState,
Result =
ecpool:pick_and_do(
PoolName,
{emqx_bridge_dynamo_connector_client, query, [Table, QueryTuple, Templates]},
no_handover
),
case ensuare_dynamo_keys(Query, ChannelState) of
true ->
ecpool:pick_and_do(
PoolName,
{emqx_bridge_dynamo_connector_client, query, [Table, QueryTuple, Templates]},
no_handover
);
_ ->
{error, missing_filter_or_range_key}
end,
case Result of
{error, Reason} ->
@ -288,6 +308,25 @@ get_query_tuple([{_ChannelId, {_QueryType, _Data}} | _]) ->
get_query_tuple([InsertQuery | _]) ->
get_query_tuple(InsertQuery).
ensuare_dynamo_keys({_, Data} = Query, State) when is_map(Data) ->
ensuare_dynamo_keys([Query], State);
ensuare_dynamo_keys([{_, Data} | _] = Queries, State) when is_map(Data) ->
Keys = maps:to_list(maps:with([hash_key, range_key], State)),
lists:all(
fun({_, Query}) ->
lists:all(
fun({_, Key}) ->
maps:is_key(Key, Query)
end,
Keys
)
end,
Queries
);
%% this is not a insert query
ensuare_dynamo_keys(_Query, _State) ->
true.
connect(Opts) ->
Config = proplists:get_value(config, Opts),
{ok, _Pid} = emqx_bridge_dynamo_connector_client:start_link(Config).

View File

@ -16,6 +16,7 @@
-define(TABLE_BIN, to_bin(?TABLE)).
-define(ACCESS_KEY_ID, "root").
-define(SECRET_ACCESS_KEY, "public").
-define(REGION, "us-west-2").
-define(HOST, "dynamo").
-define(PORT, 8000).
-define(SCHEMA, "http://").
@ -177,7 +178,9 @@ dynamo_config(BridgeType, Config) ->
"bridges.~s.~s {"
"\n enable = true"
"\n url = \"http://~s:~p\""
"\n region = ~p"
"\n table = ~p"
"\n hash_key =\"clientid\""
"\n aws_access_key_id = ~p"
"\n aws_secret_access_key = ~p"
"\n resource_opts = {"
@ -191,6 +194,7 @@ dynamo_config(BridgeType, Config) ->
Name,
Host,
Port,
?REGION,
?TABLE,
?ACCESS_KEY_ID,
%% NOTE: using file-based secrets with HOCON configs
@ -210,7 +214,8 @@ action_config(Config) ->
<<"enable">> => true,
<<"parameters">> =>
#{
<<"table">> => ?TABLE
<<"table">> => ?TABLE,
<<"hash_key">> => <<"clientid">>
},
<<"resource_opts">> =>
#{
@ -234,6 +239,7 @@ connector_config(Config) ->
<<"url">> => URL,
<<"aws_access_key_id">> => ?ACCESS_KEY_ID,
<<"aws_secret_access_key">> => AccessKey,
<<"region">> => ?REGION,
<<"enable">> => true,
<<"pool_size">> => 8,
<<"resource_opts">> =>
@ -355,7 +361,7 @@ t_setup_via_config_and_publish(Config) ->
create_bridge(Config)
),
MsgId = emqx_utils:gen_id(),
SentData = #{id => MsgId, payload => ?PAYLOAD},
SentData = #{clientid => <<"clientid">>, id => MsgId, payload => ?PAYLOAD},
?check_trace(
begin
?wait_async_action(
@ -421,7 +427,7 @@ t_setup_via_http_api_and_publish(Config) ->
create_bridge_http(PgsqlConfig)
),
MsgId = emqx_utils:gen_id(),
SentData = #{id => MsgId, payload => ?PAYLOAD},
SentData = #{clientid => <<"clientid">>, id => MsgId, payload => ?PAYLOAD},
?check_trace(
begin
?wait_async_action(
@ -486,7 +492,7 @@ t_write_failure(Config) ->
#{?snk_kind := resource_connected_enter},
20_000
),
SentData = #{id => emqx_utils:gen_id(), payload => ?PAYLOAD},
SentData = #{clientid => <<"clientid">>, id => emqx_utils:gen_id(), payload => ?PAYLOAD},
emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
?assertMatch(
{error, {resource_error, #{reason := timeout}}}, send_message(Config, SentData)
@ -513,12 +519,21 @@ t_simple_query(Config) ->
ok.
t_missing_data(Config) ->
?assertMatch(
{ok, _},
create_bridge(Config)
),
Result = send_message(Config, #{clientid => <<"clientid">>}),
?assertMatch({error, {<<"ValidationException">>, <<>>}}, Result),
ok.
t_missing_hash_key(Config) ->
?assertMatch(
{ok, _},
create_bridge(Config)
),
Result = send_message(Config, #{}),
?assertMatch({error, {unrecoverable_error, {invalid_request, _}}}, Result),
?assertMatch({error, missing_filter_or_range_key}, Result),
ok.
t_bad_parameter(Config) ->
@ -543,7 +558,9 @@ t_action_create_via_http(Config) ->
emqx_bridge_v2_testlib:t_create_via_http(Config).
t_action_sync_query(Config) ->
MakeMessageFun = fun() -> #{id => <<"the_message_id">>, payload => ?PAYLOAD} end,
MakeMessageFun = fun() ->
#{clientid => <<"clientid">>, id => <<"the_message_id">>, payload => ?PAYLOAD}
end,
IsSuccessCheck = fun(Result) -> ?assertEqual({ok, []}, Result) end,
TracePoint = dynamo_connector_query_return,
emqx_bridge_v2_testlib:t_sync_query(Config, MakeMessageFun, IsSuccessCheck, TracePoint).

View File

@ -135,7 +135,7 @@ overwrite() ->
index() ->
{index,
?HOCON(
binary(),
emqx_schema:template(),
#{
required => true,
example => <<"${payload.index}">>,
@ -146,7 +146,7 @@ index() ->
id(Required) ->
{id,
?HOCON(
binary(),
emqx_schema:template(),
#{
required => Required,
example => <<"${payload.id}">>,
@ -157,7 +157,7 @@ id(Required) ->
doc() ->
{doc,
?HOCON(
binary(),
emqx_schema:template(),
#{
required => false,
example => <<"${payload.doc}">>,
@ -187,7 +187,7 @@ doc_as_upsert() ->
routing() ->
{routing,
?HOCON(
binary(),
emqx_schema:template(),
#{
required => false,
example => <<"${payload.routing}">>,

View File

@ -122,7 +122,7 @@ fields(producer) ->
)},
{ordering_key_template,
sc(
binary(),
emqx_schema:template(),
#{
default => <<>>,
desc => ?DESC("ordering_key_template")
@ -130,7 +130,7 @@ fields(producer) ->
)},
{payload_template,
sc(
binary(),
emqx_schema:template(),
#{
default => <<>>,
desc => ?DESC("payload_template")
@ -201,8 +201,11 @@ fields(consumer_topic_mapping) ->
{qos, mk(emqx_schema:qos(), #{default => 0, desc => ?DESC(consumer_mqtt_qos)})},
{payload_template,
mk(
string(),
#{default => <<"${.}">>, desc => ?DESC(consumer_mqtt_payload)}
emqx_schema:template(),
#{
default => <<"${.}">>,
desc => ?DESC(consumer_mqtt_payload)
}
)}
];
fields("consumer_resource_opts") ->
@ -221,14 +224,18 @@ fields("consumer_resource_opts") ->
fields(key_value_pair) ->
[
{key,
mk(binary(), #{
mk(emqx_schema:template(), #{
required => true,
validator => [
emqx_resource_validator:not_empty("Key templates must not be empty")
],
desc => ?DESC(kv_pair_key)
})},
{value, mk(binary(), #{required => true, desc => ?DESC(kv_pair_value)})}
{value,
mk(emqx_schema:template(), #{
required => true,
desc => ?DESC(kv_pair_value)
})}
];
fields("get_producer") ->
emqx_bridge_schema:status_fields() ++ fields("post_producer");

View File

@ -1929,7 +1929,6 @@ t_bad_attributes(Config) ->
ok
end,
fun(Trace) ->
ct:pal("trace:\n ~p", [Trace]),
?assertMatch(
[
#{placeholder := [<<"payload">>, <<"ok">>], value := #{}},

View File

@ -6,7 +6,7 @@
{emqx_connector, {path, "../../apps/emqx_connector"}},
{emqx_resource, {path, "../../apps/emqx_resource"}},
{emqx_bridge, {path, "../../apps/emqx_bridge"}},
{greptimedb, {git, "https://github.com/GreptimeTeam/greptimedb-client-erl", {tag, "v0.1.7"}}}
{greptimedb, {git, "https://github.com/GreptimeTeam/greptimedb-ingester-erl", {tag, "v0.1.8"}}}
]}.
{plugins, [rebar3_path_deps]}.
{project_plugins, [erlfmt]}.

View File

@ -324,7 +324,7 @@ query_by_clientid(Topic, ClientId, Config) ->
{"Content-Type", "application/x-www-form-urlencoded"}
],
Body = <<"sql=select * from \"", Topic/binary, "\" where clientid='", ClientId/binary, "'">>,
{ok, 200, _Headers, RawBody0} =
{ok, StatusCode, _Headers, RawBody0} =
ehttpc:request(
EHttpcPoolName,
post,
@ -335,7 +335,6 @@ query_by_clientid(Topic, ClientId, Config) ->
case emqx_utils_json:decode(RawBody0, [return_maps]) of
#{
<<"code">> := 0,
<<"output">> := [
#{
<<"records">> := #{
@ -344,12 +343,12 @@ query_by_clientid(Topic, ClientId, Config) ->
}
}
]
} ->
} when StatusCode >= 200 andalso StatusCode =< 300 ->
make_row(Schema, Rows);
#{
<<"code">> := Code,
<<"error">> := Error
} ->
} when StatusCode > 300 ->
GreptimedbName = ?config(greptimedb_name, Config),
Type = greptimedb_type_bin(?config(greptimedb_type, Config)),
BridgeId = emqx_bridge_resource:bridge_id(Type, GreptimedbName),
@ -367,7 +366,9 @@ query_by_clientid(Topic, ClientId, Config) ->
_ ->
%% Table not found
#{}
end
end;
Error ->
{error, Error}
end.
make_row(null, _Rows) ->
@ -910,69 +911,6 @@ t_start_exception(Config) ->
),
ok.
t_write_failure(Config) ->
ProxyName = ?config(proxy_name, Config),
ProxyPort = ?config(proxy_port, Config),
ProxyHost = ?config(proxy_host, Config),
QueryMode = ?config(query_mode, Config),
{ok, _} = create_bridge(Config),
ClientId = emqx_guid:to_hexstr(emqx_guid:gen()),
Payload = #{
int_key => -123,
bool => true,
float_key => 24.5,
uint_key => 123
},
SentData = #{
<<"clientid">> => ClientId,
<<"topic">> => atom_to_binary(?FUNCTION_NAME),
<<"timestamp">> => erlang:system_time(millisecond),
<<"payload">> => Payload
},
?check_trace(
emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() ->
case QueryMode of
sync ->
?wait_async_action(
?assertMatch(
{error, {resource_error, #{reason := timeout}}},
send_message(Config, SentData)
),
#{?snk_kind := handle_async_reply, action := nack},
1_000
);
async ->
?wait_async_action(
?assertEqual(ok, send_message(Config, SentData)),
#{?snk_kind := handle_async_reply},
1_000
)
end
end),
fun(Trace0) ->
case QueryMode of
sync ->
Trace = ?of_kind(handle_async_reply, Trace0),
?assertMatch([_ | _], Trace),
[#{result := Result} | _] = Trace,
?assert(
not emqx_bridge_greptimedb_connector:is_unrecoverable_error(Result),
#{got => Result}
);
async ->
Trace = ?of_kind(handle_async_reply, Trace0),
?assertMatch([_ | _], Trace),
[#{result := Result} | _] = Trace,
?assert(
not emqx_bridge_greptimedb_connector:is_unrecoverable_error(Result),
#{got => Result}
)
end,
ok
end
),
ok.
t_missing_field(Config) ->
BatchSize = ?config(batch_size, Config),
IsBatch = BatchSize > 1,

View File

@ -167,13 +167,13 @@ fields(action_parameters) ->
})},
{partition_key,
mk(binary(), #{
required => false, desc => ?DESC(emqx_bridge_hstreamdb_connector, "partition_key")
mk(emqx_schema:template(), #{
required => false,
desc => ?DESC(emqx_bridge_hstreamdb_connector, "partition_key")
})},
{grpc_flush_timeout, fun grpc_flush_timeout/1},
{record_template,
mk(binary(), #{default => <<"${payload}">>, desc => ?DESC("record_template")})},
{record_template, record_template_schema()},
{aggregation_pool_size,
mk(pos_integer(), #{
default => ?DEFAULT_AGG_POOL_SIZE, desc => ?DESC("aggregation_pool_size")
@ -222,6 +222,12 @@ fields("put") ->
hstream_bridge_common_fields() ++
connector_fields().
record_template_schema() ->
mk(emqx_schema:template(), #{
default => <<"${payload}">>,
desc => ?DESC("record_template")
}).
grpc_timeout(type) -> emqx_schema:timeout_duration_ms();
grpc_timeout(desc) -> ?DESC(emqx_bridge_hstreamdb_connector, "grpc_timeout");
grpc_timeout(default) -> ?DEFAULT_GRPC_TIMEOUT_RAW;
@ -239,8 +245,7 @@ hstream_bridge_common_fields() ->
[
{direction, mk(egress, #{desc => ?DESC("config_direction"), default => egress})},
{local_topic, mk(binary(), #{desc => ?DESC("local_topic")})},
{record_template,
mk(binary(), #{default => <<"${payload}">>, desc => ?DESC("record_template")})}
{record_template, record_template_schema()}
] ++
emqx_resource_schema:fields("resource_opts").

View File

@ -128,9 +128,10 @@ fields("request") ->
desc => ?DESC("method"),
validator => fun ?MODULE:validate_method/1
})},
{path, hoconsc:mk(binary(), #{required => false, desc => ?DESC("path")})},
{body, hoconsc:mk(binary(), #{required => false, desc => ?DESC("body")})},
{headers, hoconsc:mk(map(), #{required => false, desc => ?DESC("headers")})},
{path, hoconsc:mk(emqx_schema:template(), #{required => false, desc => ?DESC("path")})},
{body, hoconsc:mk(emqx_schema:template(), #{required => false, desc => ?DESC("body")})},
{headers,
hoconsc:mk(map(), #{required => false, desc => ?DESC("headers"), is_template => true})},
{max_retries,
sc(
non_neg_integer(),
@ -315,7 +316,7 @@ on_query(InstId, {send_message, Msg}, State) ->
ClientId = maps:get(clientid, Msg, undefined),
on_query(
InstId,
{ClientId, Method, {Path, Headers, Body}, Timeout, Retry},
{undefined, ClientId, Method, {Path, Headers, Body}, Timeout, Retry},
State
)
end;
@ -345,19 +346,19 @@ on_query(
ClientId = clientid(Msg),
on_query(
InstId,
{ClientId, Method, {Path, Headers, Body}, Timeout, Retry},
{ActionId, ClientId, Method, {Path, Headers, Body}, Timeout, Retry},
State
)
end;
on_query(InstId, {Method, Request}, State) ->
%% TODO: Get retry from State
on_query(InstId, {undefined, Method, Request, 5000, _Retry = 2}, State);
on_query(InstId, {undefined, undefined, Method, Request, 5000, _Retry = 2}, State);
on_query(InstId, {Method, Request, Timeout}, State) ->
%% TODO: Get retry from State
on_query(InstId, {undefined, Method, Request, Timeout, _Retry = 2}, State);
on_query(InstId, {undefined, undefined, Method, Request, Timeout, _Retry = 2}, State);
on_query(
InstId,
{KeyOrNum, Method, Request, Timeout, Retry},
{ActionId, KeyOrNum, Method, Request, Timeout, Retry},
#{base_path := BasePath} = State
) ->
?TRACE(
@ -367,10 +368,12 @@ on_query(
request => redact_request(Request),
note => ?READACT_REQUEST_NOTE,
connector => InstId,
action_id => ActionId,
state => redact(State)
}
),
NRequest = formalize_request(Method, BasePath, Request),
trace_rendered_action_template(ActionId, Method, NRequest, Timeout),
Worker = resolve_pool_worker(State, KeyOrNum),
Result0 = ehttpc:request(
Worker,
@ -427,7 +430,7 @@ on_query_async(InstId, {send_message, Msg}, ReplyFunAndArgs, State) ->
ClientId = maps:get(clientid, Msg, undefined),
on_query_async(
InstId,
{ClientId, Method, {Path, Headers, Body}, Timeout},
{undefined, ClientId, Method, {Path, Headers, Body}, Timeout},
ReplyFunAndArgs,
State
)
@ -457,14 +460,14 @@ on_query_async(
ClientId = clientid(Msg),
on_query_async(
InstId,
{ClientId, Method, {Path, Headers, Body}, Timeout},
{ActionId, ClientId, Method, {Path, Headers, Body}, Timeout},
ReplyFunAndArgs,
State
)
end;
on_query_async(
InstId,
{KeyOrNum, Method, Request, Timeout},
{ActionId, KeyOrNum, Method, Request, Timeout},
ReplyFunAndArgs,
#{base_path := BasePath} = State
) ->
@ -480,6 +483,7 @@ on_query_async(
}
),
NRequest = formalize_request(Method, BasePath, Request),
trace_rendered_action_template(ActionId, Method, NRequest, Timeout),
MaxAttempts = maps:get(max_attempts, State, 3),
Context = #{
attempt => 1,
@ -499,6 +503,31 @@ on_query_async(
),
{ok, Worker}.
trace_rendered_action_template(ActionId, Method, NRequest, Timeout) ->
case NRequest of
{Path, Headers} ->
emqx_trace:rendered_action_template(
ActionId,
#{
path => Path,
method => Method,
headers => emqx_utils_redact:redact_headers(Headers),
timeout => Timeout
}
);
{Path, Headers, Body} ->
emqx_trace:rendered_action_template(
ActionId,
#{
path => Path,
method => Method,
headers => emqx_utils_redact:redact_headers(Headers),
timeout => Timeout,
body => Body
}
)
end.
resolve_pool_worker(State, undefined) ->
resolve_pool_worker(State, self());
resolve_pool_worker(#{pool_name := PoolName} = State, Key) ->

View File

@ -114,7 +114,7 @@ fields("parameters_opts") ->
[
{path,
mk(
binary(),
emqx_schema:template(),
#{
desc => ?DESC("config_path"),
required => false
@ -270,7 +270,8 @@ headers_field() ->
<<"content-type">> => <<"application/json">>,
<<"keep-alive">> => <<"timeout=5">>
},
desc => ?DESC("config_headers")
desc => ?DESC("config_headers"),
is_template => true
}
)}.
@ -287,7 +288,7 @@ method_field() ->
body_field() ->
{body,
mk(
binary(),
emqx_schema:template(),
#{
default => undefined,
desc => ?DESC("config_body")

View File

@ -30,8 +30,8 @@
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-include_lib("emqx/include/asserts.hrl").
-define(BRIDGE_TYPE, <<"webhook">>).
-define(BRIDGE_NAME, atom_to_binary(?MODULE)).
-define(BRIDGE_TYPE, emqx_bridge_http_test_lib:bridge_type()).
-define(BRIDGE_NAME, emqx_bridge_http_test_lib:bridge_name()).
all() ->
emqx_common_test_helpers:all(?MODULE).
@ -73,21 +73,10 @@ suite() ->
init_per_testcase(t_bad_bridge_config, Config) ->
Config;
init_per_testcase(t_send_async_connection_timeout, Config) ->
HTTPPath = <<"/path">>,
ServerSSLOpts = false,
{ok, {HTTPPort, _Pid}} = emqx_bridge_http_connector_test_server:start_link(
_Port = random, HTTPPath, ServerSSLOpts
),
ResponseDelayMS = 500,
ok = emqx_bridge_http_connector_test_server:set_handler(
success_http_handler(#{response_delay => ResponseDelayMS})
),
[
{http_server, #{port => HTTPPort, path => HTTPPath}},
{response_delay_ms, ResponseDelayMS}
| Config
];
init_per_testcase(Case, Config) when
Case =:= t_send_async_connection_timeout orelse Case =:= t_send_get_trace_messages
->
emqx_bridge_http_test_lib:init_http_success_server(Config);
init_per_testcase(t_path_not_found, Config) ->
HTTPPath = <<"/nonexisting/path">>,
ServerSSLOpts = false,
@ -115,7 +104,9 @@ init_per_testcase(t_bridge_probes_header_atoms, Config) ->
{ok, {HTTPPort, _Pid}} = emqx_bridge_http_connector_test_server:start_link(
_Port = random, HTTPPath, ServerSSLOpts
),
ok = emqx_bridge_http_connector_test_server:set_handler(success_http_handler()),
ok = emqx_bridge_http_connector_test_server:set_handler(
emqx_bridge_http_test_lib:success_http_handler()
),
[{http_server, #{port => HTTPPort, path => HTTPPath}} | Config];
init_per_testcase(_TestCase, Config) ->
Server = start_http_server(#{response_delay_ms => 0}),
@ -126,7 +117,8 @@ end_per_testcase(TestCase, _Config) when
TestCase =:= t_too_many_requests;
TestCase =:= t_rule_action_expired;
TestCase =:= t_bridge_probes_header_atoms;
TestCase =:= t_send_async_connection_timeout
TestCase =:= t_send_async_connection_timeout;
TestCase =:= t_send_get_trace_messages
->
ok = emqx_bridge_http_connector_test_server:stop(),
persistent_term:erase({?MODULE, times_called}),
@ -250,115 +242,8 @@ get_metrics(Name) ->
Type = <<"http">>,
emqx_bridge:get_metrics(Type, Name).
bridge_async_config(#{port := Port} = Config) ->
Type = maps:get(type, Config, ?BRIDGE_TYPE),
Name = maps:get(name, Config, ?BRIDGE_NAME),
Host = maps:get(host, Config, "localhost"),
Path = maps:get(path, Config, ""),
PoolSize = maps:get(pool_size, Config, 1),
QueryMode = maps:get(query_mode, Config, "async"),
ConnectTimeout = maps:get(connect_timeout, Config, "1s"),
RequestTimeout = maps:get(request_timeout, Config, "10s"),
ResumeInterval = maps:get(resume_interval, Config, "1s"),
HealthCheckInterval = maps:get(health_check_interval, Config, "200ms"),
ResourceRequestTTL = maps:get(resource_request_ttl, Config, "infinity"),
LocalTopic =
case maps:find(local_topic, Config) of
{ok, LT} ->
lists:flatten(["local_topic = \"", LT, "\""]);
error ->
""
end,
ConfigString = io_lib:format(
"bridges.~s.~s {\n"
" url = \"http://~s:~p~s\"\n"
" connect_timeout = \"~p\"\n"
" enable = true\n"
%% local_topic
" ~s\n"
" enable_pipelining = 100\n"
" max_retries = 2\n"
" method = \"post\"\n"
" pool_size = ~p\n"
" pool_type = \"random\"\n"
" request_timeout = \"~s\"\n"
" body = \"${id}\"\n"
" resource_opts {\n"
" inflight_window = 100\n"
" health_check_interval = \"~s\"\n"
" max_buffer_bytes = \"1GB\"\n"
" query_mode = \"~s\"\n"
" request_ttl = \"~p\"\n"
" resume_interval = \"~s\"\n"
" start_after_created = \"true\"\n"
" start_timeout = \"5s\"\n"
" worker_pool_size = \"1\"\n"
" }\n"
" ssl {\n"
" enable = false\n"
" }\n"
"}\n",
[
Type,
Name,
Host,
Port,
Path,
ConnectTimeout,
LocalTopic,
PoolSize,
RequestTimeout,
HealthCheckInterval,
QueryMode,
ResourceRequestTTL,
ResumeInterval
]
),
ct:pal(ConfigString),
parse_and_check(ConfigString, Type, Name).
parse_and_check(ConfigString, BridgeType, Name) ->
{ok, RawConf} = hocon:binary(ConfigString, #{format => map}),
hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}),
#{<<"bridges">> := #{BridgeType := #{Name := RetConfig}}} = RawConf,
RetConfig.
make_bridge(Config) ->
Type = ?BRIDGE_TYPE,
Name = ?BRIDGE_NAME,
BridgeConfig = bridge_async_config(Config#{
name => Name,
type => Type
}),
{ok, _} = emqx_bridge:create(
Type,
Name,
BridgeConfig
),
emqx_bridge_resource:bridge_id(Type, Name).
success_http_handler() ->
success_http_handler(#{response_delay => 0}).
success_http_handler(Opts) ->
ResponseDelay = maps:get(response_delay, Opts, 0),
TestPid = self(),
fun(Req0, State) ->
{ok, Body, Req} = cowboy_req:read_body(Req0),
Headers = cowboy_req:headers(Req),
ct:pal("http request received: ~p", [
#{body => Body, headers => Headers, response_delay => ResponseDelay}
]),
ResponseDelay > 0 andalso timer:sleep(ResponseDelay),
TestPid ! {http, Headers, Body},
Rep = cowboy_req:reply(
200,
#{<<"content-type">> => <<"text/plain">>},
<<"hello">>,
Req
),
{ok, Rep, State}
end.
emqx_bridge_http_test_lib:make_bridge(Config).
not_found_http_handler() ->
TestPid = self(),
@ -452,6 +337,102 @@ t_send_async_connection_timeout(Config) ->
receive_request_notifications(MessageIDs, ResponseDelayMS, []),
ok.
t_send_get_trace_messages(Config) ->
ResponseDelayMS = ?config(response_delay_ms, Config),
#{port := Port, path := Path} = ?config(http_server, Config),
BridgeID = make_bridge(#{
port => Port,
path => Path,
pool_size => 1,
query_mode => "async",
connect_timeout => integer_to_list(ResponseDelayMS * 2) ++ "ms",
request_timeout => "10s",
resume_interval => "200ms",
health_check_interval => "200ms",
resource_request_ttl => "infinity"
}),
RuleTopic = iolist_to_binary([<<"my_rule_topic/">>, atom_to_binary(?FUNCTION_NAME)]),
SQL = <<"SELECT payload.id as id FROM \"", RuleTopic/binary, "\"">>,
{ok, #{<<"id">> := RuleId}} =
emqx_bridge_testlib:create_rule_and_action_http(
?BRIDGE_TYPE,
RuleTopic,
Config,
#{sql => SQL}
),
%% ===================================
%% Create trace for RuleId
%% ===================================
Now = erlang:system_time(second) - 10,
Start = Now,
End = Now + 60,
TraceName = atom_to_binary(?FUNCTION_NAME),
Trace = #{
name => TraceName,
type => ruleid,
ruleid => RuleId,
start_at => Start,
end_at => End
},
emqx_trace_SUITE:reload(),
ok = emqx_trace:clear(),
{ok, _} = emqx_trace:create(Trace),
%% ===================================
ResourceId = emqx_bridge_resource:resource_id(BridgeID),
?retry(
_Interval0 = 200,
_NAttempts0 = 20,
?assertMatch({ok, connected}, emqx_resource_manager:health_check(ResourceId))
),
?retry(
_Interval0 = 200,
_NAttempts0 = 20,
?assertEqual(<<>>, read_rule_trace_file(TraceName, Now))
),
Msg = emqx_message:make(RuleTopic, <<"{\"id\": 1}">>),
emqx:publish(Msg),
?retry(
_Interval = 500,
_NAttempts = 20,
?assertMatch(
#{
counters := #{
'matched' := 1,
'actions.failed' := 0,
'actions.failed.unknown' := 0,
'actions.success' := 1,
'actions.total' := 1
}
},
emqx_metrics_worker:get_metrics(rule_metrics, RuleId)
)
),
ok = emqx_trace_handler_SUITE:filesync(TraceName, ruleid),
{ok, Bin} = file:read_file(emqx_trace:log_file(TraceName, Now)),
?retry(
_Interval0 = 200,
_NAttempts0 = 20,
begin
Bin = read_rule_trace_file(TraceName, Now),
?assertNotEqual(nomatch, binary:match(Bin, [<<"rule_activated">>])),
?assertNotEqual(nomatch, binary:match(Bin, [<<"SQL_yielded_result">>])),
?assertNotEqual(nomatch, binary:match(Bin, [<<"bridge_action">>])),
?assertNotEqual(nomatch, binary:match(Bin, [<<"action_template_rendered">>])),
?assertNotEqual(nomatch, binary:match(Bin, [<<"QUERY_ASYNC">>]))
end
),
emqx_trace:delete(TraceName),
ok.
read_rule_trace_file(TraceName, From) ->
emqx_trace:check(),
ok = emqx_trace_handler_SUITE:filesync(TraceName, ruleid),
{ok, Bin} = file:read_file(emqx_trace:log_file(TraceName, From)),
Bin.
t_async_free_retries(Config) ->
#{port := Port} = ?config(http_server, Config),
_BridgeID = make_bridge(#{
@ -518,7 +499,7 @@ t_async_common_retries(Config) ->
ok.
t_bad_bridge_config(_Config) ->
BridgeConfig = bridge_async_config(#{port => 12345}),
BridgeConfig = emqx_bridge_http_test_lib:bridge_async_config(#{port => 12345}),
?assertMatch(
{ok,
{{_, 201, _}, _Headers, #{
@ -540,7 +521,7 @@ t_bad_bridge_config(_Config) ->
t_start_stop(Config) ->
#{port := Port} = ?config(http_server, Config),
BridgeConfig = bridge_async_config(#{
BridgeConfig = emqx_bridge_http_test_lib:bridge_async_config(#{
type => ?BRIDGE_TYPE,
name => ?BRIDGE_NAME,
port => Port
@ -554,7 +535,7 @@ t_path_not_found(Config) ->
begin
#{port := Port, path := Path} = ?config(http_server, Config),
MQTTTopic = <<"t/webhook">>,
BridgeConfig = bridge_async_config(#{
BridgeConfig = emqx_bridge_http_test_lib:bridge_async_config(#{
type => ?BRIDGE_TYPE,
name => ?BRIDGE_NAME,
local_topic => MQTTTopic,
@ -593,7 +574,7 @@ t_too_many_requests(Config) ->
begin
#{port := Port, path := Path} = ?config(http_server, Config),
MQTTTopic = <<"t/webhook">>,
BridgeConfig = bridge_async_config(#{
BridgeConfig = emqx_bridge_http_test_lib:bridge_async_config(#{
type => ?BRIDGE_TYPE,
name => ?BRIDGE_NAME,
local_topic => MQTTTopic,
@ -633,7 +614,7 @@ t_rule_action_expired(Config) ->
?check_trace(
begin
RuleTopic = <<"t/webhook/rule">>,
BridgeConfig = bridge_async_config(#{
BridgeConfig = emqx_bridge_http_test_lib:bridge_async_config(#{
type => ?BRIDGE_TYPE,
name => ?BRIDGE_NAME,
host => "non.existent.host",
@ -689,7 +670,7 @@ t_bridge_probes_header_atoms(Config) ->
?check_trace(
begin
LocalTopic = <<"t/local/topic">>,
BridgeConfig0 = bridge_async_config(#{
BridgeConfig0 = emqx_bridge_http_test_lib:bridge_async_config(#{
type => ?BRIDGE_TYPE,
name => ?BRIDGE_NAME,
port => Port,

View File

@ -0,0 +1,161 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2020-2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_bridge_http_test_lib).
-export([
bridge_type/0,
bridge_name/0,
make_bridge/1,
bridge_async_config/1,
init_http_success_server/1,
success_http_handler/0
]).
-define(BRIDGE_TYPE, bridge_type()).
-define(BRIDGE_NAME, bridge_name()).
bridge_type() ->
<<"webhook">>.
bridge_name() ->
atom_to_binary(?MODULE).
make_bridge(Config) ->
Type = ?BRIDGE_TYPE,
Name = ?BRIDGE_NAME,
BridgeConfig = bridge_async_config(Config#{
name => Name,
type => Type
}),
{ok, _} = emqx_bridge:create(
Type,
Name,
BridgeConfig
),
emqx_bridge_resource:bridge_id(Type, Name).
bridge_async_config(#{port := Port} = Config) ->
Type = maps:get(type, Config, ?BRIDGE_TYPE),
Name = maps:get(name, Config, ?BRIDGE_NAME),
Host = maps:get(host, Config, "localhost"),
Path = maps:get(path, Config, ""),
PoolSize = maps:get(pool_size, Config, 1),
QueryMode = maps:get(query_mode, Config, "async"),
ConnectTimeout = maps:get(connect_timeout, Config, "1s"),
RequestTimeout = maps:get(request_timeout, Config, "10s"),
ResumeInterval = maps:get(resume_interval, Config, "1s"),
HealthCheckInterval = maps:get(health_check_interval, Config, "200ms"),
ResourceRequestTTL = maps:get(resource_request_ttl, Config, "infinity"),
LocalTopic =
case maps:find(local_topic, Config) of
{ok, LT} ->
lists:flatten(["local_topic = \"", LT, "\""]);
error ->
""
end,
ConfigString = io_lib:format(
"bridges.~s.~s {\n"
" url = \"http://~s:~p~s\"\n"
" connect_timeout = \"~p\"\n"
" enable = true\n"
%% local_topic
" ~s\n"
" enable_pipelining = 100\n"
" max_retries = 2\n"
" method = \"post\"\n"
" pool_size = ~p\n"
" pool_type = \"random\"\n"
" request_timeout = \"~s\"\n"
" body = \"${id}\"\n"
" resource_opts {\n"
" inflight_window = 100\n"
" health_check_interval = \"~s\"\n"
" max_buffer_bytes = \"1GB\"\n"
" query_mode = \"~s\"\n"
" request_ttl = \"~p\"\n"
" resume_interval = \"~s\"\n"
" start_after_created = \"true\"\n"
" start_timeout = \"5s\"\n"
" worker_pool_size = \"1\"\n"
" }\n"
" ssl {\n"
" enable = false\n"
" }\n"
"}\n",
[
Type,
Name,
Host,
Port,
Path,
ConnectTimeout,
LocalTopic,
PoolSize,
RequestTimeout,
HealthCheckInterval,
QueryMode,
ResourceRequestTTL,
ResumeInterval
]
),
ct:pal(ConfigString),
parse_and_check(ConfigString, Type, Name).
parse_and_check(ConfigString, BridgeType, Name) ->
{ok, RawConf} = hocon:binary(ConfigString, #{format => map}),
hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}),
#{<<"bridges">> := #{BridgeType := #{Name := RetConfig}}} = RawConf,
RetConfig.
success_http_handler() ->
success_http_handler(#{response_delay => 0}).
success_http_handler(Opts) ->
ResponseDelay = maps:get(response_delay, Opts, 0),
TestPid = self(),
fun(Req0, State) ->
{ok, Body, Req} = cowboy_req:read_body(Req0),
Headers = cowboy_req:headers(Req),
ct:pal("http request received: ~p", [
#{body => Body, headers => Headers, response_delay => ResponseDelay}
]),
ResponseDelay > 0 andalso timer:sleep(ResponseDelay),
TestPid ! {http, Headers, Body},
Rep = cowboy_req:reply(
200,
#{<<"content-type">> => <<"text/plain">>},
<<"hello">>,
Req
),
{ok, Rep, State}
end.
init_http_success_server(Config) ->
HTTPPath = <<"/path">>,
ServerSSLOpts = false,
{ok, {HTTPPort, _Pid}} = emqx_bridge_http_connector_test_server:start_link(
_Port = random, HTTPPath, ServerSSLOpts
),
ResponseDelayMS = 500,
ok = emqx_bridge_http_connector_test_server:set_handler(
success_http_handler(#{response_delay => ResponseDelayMS})
),
[
{http_server, #{port => HTTPPort, path => HTTPPath}},
{response_delay_ms, ResponseDelayMS},
{bridge_name, ?BRIDGE_NAME}
| Config
].

View File

@ -42,7 +42,7 @@
%% api
write_syntax_type() ->
typerefl:alias("string", write_syntax()).
typerefl:alias("template", write_syntax()).
%% Examples
conn_bridge_examples(Method) ->

View File

@ -59,6 +59,9 @@
-define(DEFAULT_TIMESTAMP_TMPL, "${timestamp}").
-define(set_tag, set_tag).
-define(set_field, set_field).
-define(IS_HTTP_ERROR(STATUS_CODE),
(is_integer(STATUS_CODE) andalso
(STATUS_CODE < 200 orelse STATUS_CODE >= 300))
@ -710,8 +713,8 @@ line_to_point(
precision := Precision
} = Item
) ->
{_, EncodedTags} = maps:fold(fun maps_config_to_data/3, {Data, #{}}, Tags),
{_, EncodedFields} = maps:fold(fun maps_config_to_data/3, {Data, #{}}, Fields),
{_, EncodedTags, _} = maps:fold(fun maps_config_to_data/3, {Data, #{}, ?set_tag}, Tags),
{_, EncodedFields, _} = maps:fold(fun maps_config_to_data/3, {Data, #{}, ?set_field}, Fields),
maps:without([precision], Item#{
measurement => emqx_placeholder:proc_tmpl(Measurement, Data),
tags => EncodedTags,
@ -727,34 +730,43 @@ time_unit(ms) -> millisecond;
time_unit(us) -> microsecond;
time_unit(ns) -> nanosecond.
maps_config_to_data(K, V, {Data, Res}) ->
maps_config_to_data(K, V, {Data, Res, SetType}) ->
KTransOptions = #{return => rawlist, var_trans => fun key_filter/1},
VTransOptions = #{return => rawlist, var_trans => fun data_filter/1},
NK = emqx_placeholder:proc_tmpl(K, Data, KTransOptions),
NV = proc_quoted(V, Data, VTransOptions),
case {NK, NV} of
{[undefined], _} ->
{Data, Res};
{Data, Res, SetType};
%% undefined value in normal format [undefined] or int/uint format [undefined, <<"i">>]
{_, [undefined | _]} ->
{Data, Res};
{Data, Res, SetType};
{_, {quoted, [undefined | _]}} ->
{Data, Res};
{Data, Res, SetType};
_ ->
{Data, Res#{
list_to_binary(NK) => value_type(NV, tmpl_type(V))
}}
NRes = Res#{
list_to_binary(NK) => value_type(NV, #{
tmpl_type => tmpl_type(V), set_type => SetType
})
},
{Data, NRes, SetType}
end.
value_type([Number], #{set_type := ?set_tag}) when is_number(Number) ->
%% all `tag` values are treated as string
%% See also: https://docs.influxdata.com/influxdb/v2/reference/syntax/line-protocol/#tag-set
emqx_utils_conv:bin(Number);
value_type([Str], #{set_type := ?set_tag}) when is_binary(Str) ->
Str;
value_type({quoted, ValList}, _) ->
{string_list, ValList};
value_type([Int, <<"i">>], mixed) when is_integer(Int) ->
value_type([Int, <<"i">>], #{tmpl_type := mixed}) when is_integer(Int) ->
{int, Int};
value_type([UInt, <<"u">>], mixed) when is_integer(UInt) ->
value_type([UInt, <<"u">>], #{tmpl_type := mixed}) when is_integer(UInt) ->
{uint, UInt};
%% write `1`, `1.0`, `-1.0` all as float
%% see also: https://docs.influxdata.com/influxdb/v2.7/reference/syntax/line-protocol/#float
value_type([Number], _) when is_number(Number) ->
value_type([Number], #{set_type := ?set_field}) when is_number(Number) ->
{float, Number};
value_type([<<"t">>], _) ->
't';
@ -776,9 +788,9 @@ value_type([<<"FALSE">>], _) ->
'FALSE';
value_type([<<"False">>], _) ->
'False';
value_type([Str], variable) when is_binary(Str) ->
value_type([Str], #{tmpl_type := variable}) when is_binary(Str) ->
Str;
value_type([Str], literal) when is_binary(Str) ->
value_type([Str], #{tmpl_type := literal, set_type := ?set_field}) when is_binary(Str) ->
%% if Str is a literal string suffixed with `i` or `u`, we should convert it to int/uint.
%% otherwise, we should convert it to float.
NumStr = binary:part(Str, 0, byte_size(Str) - 1),

View File

@ -864,6 +864,53 @@ t_any_num_as_float(Config) ->
TimeReturned = pad_zero(TimeReturned0),
?assertEqual(TsStr, TimeReturned).
t_tag_set_use_literal_value(Config) ->
QueryMode = ?config(query_mode, Config),
Const = erlang:system_time(nanosecond),
ConstBin = integer_to_binary(Const),
TsStr = iolist_to_binary(
calendar:system_time_to_rfc3339(Const, [{unit, nanosecond}, {offset, "Z"}])
),
?assertMatch(
{ok, _},
create_bridge(
Config,
#{
<<"write_syntax">> =>
<<"mqtt,clientid=${clientid},tag_key1=100,tag_key2=123.4,tag_key3=66i,tag_key4=${payload.float_dp}",
" ",
"field_key1=100.1,field_key2=100i,field_key3=${payload.float_dp},bar=5i",
" ", ConstBin/binary>>
}
)
),
ClientId = emqx_guid:to_hexstr(emqx_guid:gen()),
Payload = #{
%% with decimal point
float_dp => 123.4
},
SentData = #{
<<"clientid">> => ClientId,
<<"topic">> => atom_to_binary(?FUNCTION_NAME),
<<"payload">> => Payload,
<<"timestamp">> => erlang:system_time(millisecond)
},
case QueryMode of
sync ->
?assertMatch({ok, 204, _}, send_message(Config, SentData)),
ok;
async ->
?assertMatch(ok, send_message(Config, SentData))
end,
%% sleep is still need even in sync mode, or we would get an empty result sometimes
ct:sleep(1500),
PersistedData = query_by_clientid(ClientId, Config),
Expected = #{field_key1 => <<"100.1">>, field_key2 => <<"100">>, field_key3 => <<"123.4">>},
assert_persisted_data(ClientId, Expected, PersistedData),
TimeReturned0 = maps:get(<<"_time">>, maps:get(<<"field_key1">>, PersistedData)),
TimeReturned = pad_zero(TimeReturned0),
?assertEqual(TsStr, TimeReturned).
t_bad_timestamp(Config) ->
InfluxDBType = ?config(influxdb_type, Config),
InfluxDBName = ?config(influxdb_name, Config),

View File

@ -5,6 +5,8 @@
-ifndef(EMQX_BRIDGE_IOTDB_HRL).
-define(EMQX_BRIDGE_IOTDB_HRL, true).
-define(VSN_1_3_X, 'v1.3.x').
-define(VSN_1_2_X, 'v1.2.x').
-define(VSN_1_1_X, 'v1.1.x').
-define(VSN_1_0_X, 'v1.0.x').
-define(VSN_0_13_X, 'v0.13.x').

View File

@ -66,12 +66,7 @@ fields(action_config) ->
]
);
fields(action_resource_opts) ->
lists:filter(
fun({K, _V}) ->
not lists:member(K, unsupported_opts())
end,
emqx_bridge_v2_schema:action_resource_opts_fields()
);
emqx_bridge_v2_schema:action_resource_opts_fields();
fields(action_parameters) ->
[
{is_aligned,
@ -84,7 +79,7 @@ fields(action_parameters) ->
)},
{device_id,
mk(
binary(),
emqx_schema:template(),
#{
desc => ?DESC("config_device_id")
}
@ -114,7 +109,7 @@ fields(action_parameters_data) ->
)},
{measurement,
mk(
binary(),
emqx_schema:template(),
#{
required => true,
desc => ?DESC("config_parameters_measurement")
@ -122,7 +117,9 @@ fields(action_parameters_data) ->
)},
{data_type,
mk(
hoconsc:union([enum([text, boolean, int32, int64, float, double]), binary()]),
hoconsc:union([
enum([text, boolean, int32, int64, float, double]), emqx_schema:template()
]),
#{
required => true,
desc => ?DESC("config_parameters_data_type")
@ -130,7 +127,7 @@ fields(action_parameters_data) ->
)},
{value,
mk(
binary(),
emqx_schema:template(),
#{
required => true,
desc => ?DESC("config_parameters_value")
@ -150,7 +147,7 @@ fields("get_bridge_v2") ->
fields("config") ->
basic_config() ++ request_config();
fields("creation_opts") ->
proplists_without(unsupported_opts(), emqx_resource_schema:fields("creation_opts"));
emqx_resource_schema:fields("creation_opts");
fields(auth_basic) ->
[
{username, mk(binary(), #{required => true, desc => ?DESC("config_auth_basic_username")})},
@ -220,10 +217,10 @@ basic_config() ->
)},
{iotdb_version,
mk(
hoconsc:enum([?VSN_1_1_X, ?VSN_1_0_X, ?VSN_0_13_X]),
hoconsc:enum([?VSN_1_3_X, ?VSN_1_1_X, ?VSN_1_0_X, ?VSN_0_13_X]),
#{
desc => ?DESC("config_iotdb_version"),
default => ?VSN_1_1_X
default => ?VSN_1_3_X
}
)}
] ++ resource_creation_opts() ++
@ -268,12 +265,6 @@ resource_creation_opts() ->
)}
].
unsupported_opts() ->
[
batch_size,
batch_time
].
%%-------------------------------------------------------------------------------------------------
%% v2 examples
%%-------------------------------------------------------------------------------------------------

View File

@ -21,6 +21,8 @@
on_get_status/2,
on_query/3,
on_query_async/4,
on_batch_query/3,
on_batch_query_async/4,
on_add_channel/4,
on_remove_channel/3,
on_get_channels/1,
@ -94,7 +96,7 @@ connector_example_values() ->
name => <<"iotdb_connector">>,
type => iotdb,
enable => true,
iotdb_version => ?VSN_1_1_X,
iotdb_version => ?VSN_1_3_X,
authentication => #{
<<"username">> => <<"root">>,
<<"password">> => <<"******">>
@ -133,10 +135,10 @@ fields("connection_fields") ->
)},
{iotdb_version,
mk(
hoconsc:enum([?VSN_1_1_X, ?VSN_1_0_X, ?VSN_0_13_X]),
hoconsc:enum([?VSN_1_3_X, ?VSN_1_1_X, ?VSN_1_0_X, ?VSN_0_13_X]),
#{
desc => ?DESC(emqx_bridge_iotdb, "config_iotdb_version"),
default => ?VSN_1_1_X
default => ?VSN_1_3_X
}
)},
{authentication,
@ -280,8 +282,8 @@ on_query(
state => emqx_utils:redact(State)
}),
case try_render_message(Req, IoTDBVsn, Channels) of
{ok, IoTDBPayload} ->
case try_render_messages([Req], IoTDBVsn, Channels) of
{ok, [IoTDBPayload]} ->
handle_response(
emqx_bridge_http_connector:on_query(
InstanceId, {ChannelId, IoTDBPayload}, State
@ -306,8 +308,8 @@ on_query_async(
send_message => Req,
state => emqx_utils:redact(State)
}),
case try_render_message(Req, IoTDBVsn, Channels) of
{ok, IoTDBPayload} ->
case try_render_messages([Req], IoTDBVsn, Channels) of
{ok, [IoTDBPayload]} ->
ReplyFunAndArgs =
{
fun(Result) ->
@ -323,6 +325,71 @@ on_query_async(
Error
end.
on_batch_query_async(
InstId,
Requests,
Callback,
#{iotdb_version := IoTDBVsn, channels := Channels} = State
) ->
?tp(iotdb_bridge_on_batch_query_async, #{instance_id => InstId}),
[{ChannelId, _Message} | _] = Requests,
?SLOG(debug, #{
msg => "iotdb_bridge_on_query_batch_async_called",
instance_id => InstId,
send_message => Requests,
state => emqx_utils:redact(State)
}),
case try_render_messages(Requests, IoTDBVsn, Channels) of
{ok, IoTDBPayloads} ->
ReplyFunAndArgs =
{
fun(Result) ->
Response = handle_response(Result),
emqx_resource:apply_reply_fun(Callback, Response)
end,
[]
},
lists:map(
fun(IoTDBPayload) ->
emqx_bridge_http_connector:on_query_async(
InstId, {ChannelId, IoTDBPayload}, ReplyFunAndArgs, State
)
end,
IoTDBPayloads
);
Error ->
Error
end.
on_batch_query(
InstId,
[{ChannelId, _Message}] = Requests,
#{iotdb_version := IoTDBVsn, channels := Channels} = State
) ->
?tp(iotdb_bridge_on_batch_query, #{instance_id => InstId}),
?SLOG(debug, #{
msg => "iotdb_bridge_on_batch_query_called",
instance_id => InstId,
send_message => Requests,
state => emqx_utils:redact(State)
}),
case try_render_messages(Requests, IoTDBVsn, Channels) of
{ok, IoTDBPayloads} ->
lists:map(
fun(IoTDBPayload) ->
handle_response(
emqx_bridge_http_connector:on_query(
InstId, {ChannelId, IoTDBPayload}, State
)
)
end,
IoTDBPayloads
);
Error ->
Error
end.
on_add_channel(
InstanceId,
#{iotdb_version := Version, channels := Channels} = OldState0,
@ -342,6 +409,7 @@ on_add_channel(
Path =
case Version of
?VSN_1_1_X -> InsertTabletPathV2;
?VSN_1_3_X -> InsertTabletPathV2;
_ -> InsertTabletPathV1
end,
@ -442,14 +510,14 @@ maybe_preproc_tmpl(Value) when is_binary(Value) ->
maybe_preproc_tmpl(Value) ->
Value.
proc_data(PreProcessedData, Msg) ->
proc_data(PreProcessedData, Msg, IoTDBVsn) ->
NowNS = erlang:system_time(nanosecond),
Nows = #{
now_ms => erlang:convert_time_unit(NowNS, nanosecond, millisecond),
now_us => erlang:convert_time_unit(NowNS, nanosecond, microsecond),
now_ns => NowNS
},
proc_data(PreProcessedData, Msg, Nows, []).
proc_data(PreProcessedData, Msg, Nows, IoTDBVsn, []).
proc_data(
[
@ -463,15 +531,16 @@ proc_data(
],
Msg,
Nows,
IotDbVsn,
Acc
) ->
DataType = list_to_binary(
string:uppercase(binary_to_list(emqx_placeholder:proc_tmpl(DataType0, Msg)))
),
try
proc_data(T, Msg, Nows, [
proc_data(T, Msg, Nows, IotDbVsn, [
#{
timestamp => iot_timestamp(TimestampTkn, Msg, Nows),
timestamp => iot_timestamp(IotDbVsn, TimestampTkn, Msg, Nows),
measurement => emqx_placeholder:proc_tmpl(Measurement, Msg),
data_type => DataType,
value => proc_value(DataType, ValueTkn, Msg)
@ -485,23 +554,28 @@ proc_data(
?SLOG(debug, #{exception => Error, reason => Reason, stacktrace => Stacktrace}),
{error, invalid_data}
end;
proc_data([], _Msg, _Nows, Acc) ->
proc_data([], _Msg, _Nows, _IotDbVsn, Acc) ->
{ok, lists:reverse(Acc)}.
iot_timestamp(Timestamp, _, _) when is_integer(Timestamp) ->
iot_timestamp(_IotDbVsn, Timestamp, _, _) when is_integer(Timestamp) ->
Timestamp;
iot_timestamp(TimestampTkn, Msg, Nows) ->
iot_timestamp(emqx_placeholder:proc_tmpl(TimestampTkn, Msg), Nows).
iot_timestamp(IotDbVsn, TimestampTkn, Msg, Nows) ->
iot_timestamp(IotDbVsn, emqx_placeholder:proc_tmpl(TimestampTkn, Msg), Nows).
iot_timestamp(<<"now_us">>, #{now_us := NowUs}) ->
%% > v1.3.0 don't allow write nanoseconds nor microseconds
iot_timestamp(?VSN_1_3_X, <<"now_us">>, #{now_ms := NowMs}) ->
NowMs;
iot_timestamp(?VSN_1_3_X, <<"now_ns">>, #{now_ms := NowMs}) ->
NowMs;
iot_timestamp(_IotDbVsn, <<"now_us">>, #{now_us := NowUs}) ->
NowUs;
iot_timestamp(<<"now_ns">>, #{now_ns := NowNs}) ->
iot_timestamp(_IotDbVsn, <<"now_ns">>, #{now_ns := NowNs}) ->
NowNs;
iot_timestamp(Timestamp, #{now_ms := NowMs}) when
iot_timestamp(_IotDbVsn, Timestamp, #{now_ms := NowMs}) when
Timestamp =:= <<"now">>; Timestamp =:= <<"now_ms">>; Timestamp =:= <<>>
->
NowMs;
iot_timestamp(Timestamp, _) when is_binary(Timestamp) ->
iot_timestamp(_IotDbVsn, Timestamp, _) when is_binary(Timestamp) ->
binary_to_integer(Timestamp).
proc_value(<<"TEXT">>, ValueTkn, Msg) ->
@ -526,6 +600,7 @@ replace_var(Val, _Data) ->
convert_bool(B) when is_boolean(B) -> B;
convert_bool(null) -> null;
convert_bool(undefined) -> null;
convert_bool(1) -> true;
convert_bool(0) -> false;
convert_bool(<<"1">>) -> true;
@ -568,11 +643,10 @@ convert_float(undefined) ->
make_iotdb_insert_request(DataList, IsAligned, DeviceId, IoTDBVsn) ->
InitAcc = #{timestamps => [], measurements => [], dtypes => [], values => []},
Rows = replace_dtypes(aggregate_rows(DataList, InitAcc), IoTDBVsn),
{ok,
maps:merge(Rows, #{
iotdb_field_key(is_aligned, IoTDBVsn) => IsAligned,
iotdb_field_key(device_id, IoTDBVsn) => DeviceId
})}.
maps:merge(Rows, #{
iotdb_field_key(is_aligned, IoTDBVsn) => IsAligned,
iotdb_field_key(device_id, IoTDBVsn) => DeviceId
}).
replace_dtypes(Rows0, IoTDBVsn) ->
{Types, Rows} = maps:take(dtypes, Rows0),
@ -632,18 +706,24 @@ insert_value(1, Data, [Value | Values]) ->
insert_value(Index, Data, [Value | Values]) ->
[[null | Value] | insert_value(Index - 1, Data, Values)].
iotdb_field_key(is_aligned, ?VSN_1_3_X) ->
<<"is_aligned">>;
iotdb_field_key(is_aligned, ?VSN_1_1_X) ->
<<"is_aligned">>;
iotdb_field_key(is_aligned, ?VSN_1_0_X) ->
<<"is_aligned">>;
iotdb_field_key(is_aligned, ?VSN_0_13_X) ->
<<"isAligned">>;
iotdb_field_key(device_id, ?VSN_1_3_X) ->
<<"device">>;
iotdb_field_key(device_id, ?VSN_1_1_X) ->
<<"device">>;
iotdb_field_key(device_id, ?VSN_1_0_X) ->
<<"device">>;
iotdb_field_key(device_id, ?VSN_0_13_X) ->
<<"deviceId">>;
iotdb_field_key(data_types, ?VSN_1_3_X) ->
<<"data_types">>;
iotdb_field_key(data_types, ?VSN_1_1_X) ->
<<"data_types">>;
iotdb_field_key(data_types, ?VSN_1_0_X) ->
@ -706,14 +786,37 @@ preproc_data_template(DataList) ->
DataList
).
try_render_message({ChannelId, Msg}, IoTDBVsn, Channels) ->
try_render_messages([{ChannelId, _} | _] = Msgs, IoTDBVsn, Channels) ->
case maps:find(ChannelId, Channels) of
{ok, Channel} ->
render_channel_message(Channel, IoTDBVsn, Msg);
case do_render_message(Msgs, Channel, IoTDBVsn, #{}) of
RenderMsgs when is_map(RenderMsgs) ->
{ok,
lists:map(
fun({{DeviceId, IsAligned}, DataList}) ->
make_iotdb_insert_request(DataList, IsAligned, DeviceId, IoTDBVsn)
end,
maps:to_list(RenderMsgs)
)};
Error ->
Error
end;
_ ->
{error, {unrecoverable_error, {invalid_channel_id, ChannelId}}}
end.
do_render_message([], _Channel, _IoTDBVsn, Acc) ->
Acc;
do_render_message([{_, Msg} | Msgs], Channel, IoTDBVsn, Acc) ->
case render_channel_message(Channel, IoTDBVsn, Msg) of
{ok, NewDataList, DeviceId, IsAligned} ->
Fun = fun(V) -> NewDataList ++ V end,
Acc1 = maps:update_with({DeviceId, IsAligned}, Fun, NewDataList, Acc),
do_render_message(Msgs, Channel, IoTDBVsn, Acc1);
Error ->
Error
end.
render_channel_message(#{is_aligned := IsAligned} = Channel, IoTDBVsn, Message) ->
Payloads = to_list(parse_payload(get_payload(Message))),
case device_id(Message, Payloads, Channel) of
@ -724,9 +827,9 @@ render_channel_message(#{is_aligned := IsAligned} = Channel, IoTDBVsn, Message)
[] ->
{error, invalid_template};
DataTemplate ->
case proc_data(DataTemplate, Message) of
case proc_data(DataTemplate, Message, IoTDBVsn) of
{ok, DataList} ->
make_iotdb_insert_request(DataList, IsAligned, DeviceId, IoTDBVsn);
{ok, DataList, DeviceId, IsAligned};
Error ->
Error
end

View File

@ -20,14 +20,16 @@
all() ->
[
{group, plain},
{group, iotdb110},
{group, iotdb130},
{group, legacy}
].
groups() ->
AllTCs = emqx_common_test_helpers:all(?MODULE),
[
{plain, AllTCs},
{iotdb110, AllTCs},
{iotdb130, AllTCs},
{legacy, AllTCs}
].
@ -37,10 +39,15 @@ init_per_suite(Config) ->
end_per_suite(Config) ->
emqx_bridge_v2_testlib:end_per_suite(Config).
init_per_group(plain = Type, Config0) ->
init_per_group(Type, Config0) when Type =:= iotdb110 orelse Type =:= iotdb130 ->
Host = os:getenv("IOTDB_PLAIN_HOST", "toxiproxy.emqx.net"),
Port = list_to_integer(os:getenv("IOTDB_PLAIN_PORT", "18080")),
ProxyName = "iotdb",
ProxyName = atom_to_list(Type),
{IotDbVersion, DefaultPort} =
case Type of
iotdb110 -> {?VSN_1_1_X, "18080"};
iotdb130 -> {?VSN_1_3_X, "28080"}
end,
Port = list_to_integer(os:getenv("IOTDB_PLAIN_PORT", DefaultPort)),
case emqx_common_test_helpers:is_tcp_server_available(Host, Port) of
true ->
Config = emqx_bridge_v2_testlib:init_per_group(Type, ?BRIDGE_TYPE_BIN, Config0),
@ -48,7 +55,7 @@ init_per_group(plain = Type, Config0) ->
{bridge_host, Host},
{bridge_port, Port},
{proxy_name, ProxyName},
{iotdb_version, ?VSN_1_1_X},
{iotdb_version, IotDbVersion},
{iotdb_rest_prefix, <<"/rest/v2/">>}
| Config
];
@ -87,7 +94,8 @@ init_per_group(_Group, Config) ->
Config.
end_per_group(Group, Config) when
Group =:= plain;
Group =:= iotdb110;
Group =:= iotdb130;
Group =:= legacy
->
emqx_bridge_v2_testlib:end_per_group(Config),
@ -245,7 +253,9 @@ iotdb_query(Config, Query) ->
iotdb_request(Config, Path, Body, Opts).
is_success_check({ok, 200, _, Body}) ->
?assert(is_code(200, emqx_utils_json:decode(Body))).
?assert(is_code(200, emqx_utils_json:decode(Body)));
is_success_check(Other) ->
throw(Other).
is_code(Code, #{<<"code">> := Code}) -> true;
is_code(_, _) -> false.
@ -359,89 +369,96 @@ t_async_query(Config) ->
t_sync_query_aggregated(Config) ->
DeviceId = iotdb_device(Config),
MS = erlang:system_time(millisecond) - 5000,
Payload = [
make_iotdb_payload(DeviceId, "temp", "INT32", "36", 1685112026290),
make_iotdb_payload(DeviceId, "temp", "INT32", 37, 1685112026291),
make_iotdb_payload(DeviceId, "temp", "INT32", 38.7, 1685112026292),
make_iotdb_payload(DeviceId, "temp", "INT32", "39", <<"1685112026293">>),
make_iotdb_payload(DeviceId, "temp", "INT64", "36", 1685112026294),
make_iotdb_payload(DeviceId, "temp", "INT64", 36, 1685112026295),
make_iotdb_payload(DeviceId, "temp", "INT64", 36.7, 1685112026296),
%% implicit 'now()' timestamp
make_iotdb_payload(DeviceId, "temp", "INT32", "40"),
make_iotdb_payload(DeviceId, "temp", "INT32", "36", MS - 7000),
make_iotdb_payload(DeviceId, "temp", "INT32", 37, MS - 6000),
make_iotdb_payload(DeviceId, "temp", "INT64", 38.7, MS - 5000),
make_iotdb_payload(DeviceId, "temp", "INT64", "39", integer_to_binary(MS - 4000)),
make_iotdb_payload(DeviceId, "temp", "INT64", "34", MS - 3000),
make_iotdb_payload(DeviceId, "temp", "INT32", 33.7, MS - 2000),
make_iotdb_payload(DeviceId, "temp", "INT32", 32, MS - 1000),
%% [FIXME] neither nanoseconds nor microseconds don't seem to be supported by IoTDB
(make_iotdb_payload(DeviceId, "temp", "INT32", "41"))#{timestamp => <<"now_us">>},
(make_iotdb_payload(DeviceId, "temp", "INT32", "42"))#{timestamp => <<"now_ns">>},
make_iotdb_payload(DeviceId, "weight", "FLOAT", "87.3", 1685112026290),
make_iotdb_payload(DeviceId, "weight", "FLOAT", 87.3, 1685112026291),
make_iotdb_payload(DeviceId, "weight", "FLOAT", 87, 1685112026292),
make_iotdb_payload(DeviceId, "weight", "DOUBLE", "87.3", 1685112026293),
make_iotdb_payload(DeviceId, "weight", "DOUBLE", 87.3, 1685112026294),
make_iotdb_payload(DeviceId, "weight", "DOUBLE", 87, 1685112026295),
make_iotdb_payload(DeviceId, "weight", "FLOAT", "87.3", MS - 6000),
make_iotdb_payload(DeviceId, "weight", "FLOAT", 87.3, MS - 5000),
make_iotdb_payload(DeviceId, "weight", "FLOAT", 87, MS - 4000),
make_iotdb_payload(DeviceId, "weight", "DOUBLE", "87.3", MS - 3000),
make_iotdb_payload(DeviceId, "weight", "DOUBLE", 87.3, MS - 2000),
make_iotdb_payload(DeviceId, "weight", "DOUBLE", 87, MS - 1000),
make_iotdb_payload(DeviceId, "charged", "BOOLEAN", "1", 1685112026300),
make_iotdb_payload(DeviceId, "floated", "BOOLEAN", 1, 1685112026300),
make_iotdb_payload(DeviceId, "started", "BOOLEAN", true, 1685112026300),
make_iotdb_payload(DeviceId, "stoked", "BOOLEAN", "true", 1685112026300),
make_iotdb_payload(DeviceId, "enriched", "BOOLEAN", "TRUE", 1685112026300),
make_iotdb_payload(DeviceId, "gutted", "BOOLEAN", "True", 1685112026300),
make_iotdb_payload(DeviceId, "drained", "BOOLEAN", "0", 1685112026300),
make_iotdb_payload(DeviceId, "toasted", "BOOLEAN", 0, 1685112026300),
make_iotdb_payload(DeviceId, "uncharted", "BOOLEAN", false, 1685112026300),
make_iotdb_payload(DeviceId, "dazzled", "BOOLEAN", "false", 1685112026300),
make_iotdb_payload(DeviceId, "unplugged", "BOOLEAN", "FALSE", 1685112026300),
make_iotdb_payload(DeviceId, "unraveled", "BOOLEAN", "False", 1685112026300),
make_iotdb_payload(DeviceId, "undecided", "BOOLEAN", null, 1685112026300),
make_iotdb_payload(DeviceId, "charged", "BOOLEAN", "1", MS + 1000),
make_iotdb_payload(DeviceId, "floated", "BOOLEAN", 1, MS + 1000),
make_iotdb_payload(DeviceId, "started", "BOOLEAN", true, MS + 1000),
make_iotdb_payload(DeviceId, "stoked", "BOOLEAN", "true", MS + 1000),
make_iotdb_payload(DeviceId, "enriched", "BOOLEAN", "TRUE", MS + 1000),
make_iotdb_payload(DeviceId, "gutted", "BOOLEAN", "True", MS + 1000),
make_iotdb_payload(DeviceId, "drained", "BOOLEAN", "0", MS + 1000),
make_iotdb_payload(DeviceId, "toasted", "BOOLEAN", 0, MS + 1000),
make_iotdb_payload(DeviceId, "uncharted", "BOOLEAN", false, MS + 1000),
make_iotdb_payload(DeviceId, "dazzled", "BOOLEAN", "false", MS + 1000),
make_iotdb_payload(DeviceId, "unplugged", "BOOLEAN", "FALSE", MS + 1000),
make_iotdb_payload(DeviceId, "unraveled", "BOOLEAN", "False", MS + 1000),
make_iotdb_payload(DeviceId, "undecided", "BOOLEAN", null, MS + 1000),
make_iotdb_payload(DeviceId, "foo", "TEXT", "bar", 1685112026300)
make_iotdb_payload(DeviceId, "foo", "TEXT", "bar", MS + 1000)
],
MakeMessageFun = make_message_fun(iotdb_topic(Config), Payload),
ok = emqx_bridge_v2_testlib:t_sync_query(
Config, MakeMessageFun, fun is_success_check/1, iotdb_bridge_on_query
),
%% check temp
QueryTemp = <<"select temp from ", DeviceId/binary>>,
{ok, {{_, 200, _}, _, ResultTemp}} = iotdb_query(Config, QueryTemp),
?assertMatch(
#{<<"values">> := [[36, 37, 38, 39, 36, 36, 36, 40, 41, 42]]},
emqx_utils_json:decode(ResultTemp)
),
Time = integer_to_binary(MS - 20000),
%% check weight
QueryWeight = <<"select weight from ", DeviceId/binary>>,
QueryWeight = <<"select weight from ", DeviceId/binary, " where time > ", Time/binary>>,
{ok, {{_, 200, _}, _, ResultWeight}} = iotdb_query(Config, QueryWeight),
?assertMatch(
#{<<"values">> := [[87.3, 87.3, 87.0, 87.3, 87.3, 87.0]]},
emqx_utils_json:decode(ResultWeight)
),
%% check rest ts = 1685112026300
QueryRest = <<"select * from ", DeviceId/binary, " where time = 1685112026300">>,
{ok, {{_, 200, _}, _, ResultRest}} = iotdb_query(Config, QueryRest),
#{<<"values">> := Values, <<"expressions">> := Expressions} = emqx_utils_json:decode(
ResultRest
),
Results = maps:from_list(lists:zipwith(fun(K, [V]) -> {K, V} end, Expressions, Values)),
Exp = #{
exp(DeviceId, "charged") => true,
exp(DeviceId, "floated") => true,
exp(DeviceId, "started") => true,
exp(DeviceId, "stoked") => true,
exp(DeviceId, "enriched") => true,
exp(DeviceId, "gutted") => true,
exp(DeviceId, "drained") => false,
exp(DeviceId, "toasted") => false,
exp(DeviceId, "uncharted") => false,
exp(DeviceId, "dazzled") => false,
exp(DeviceId, "unplugged") => false,
exp(DeviceId, "unraveled") => false,
exp(DeviceId, "undecided") => null,
exp(DeviceId, "foo") => <<"bar">>,
exp(DeviceId, "temp") => null,
exp(DeviceId, "weight") => null
},
?assertEqual(Exp, Results),
%% [FIXME] https://github.com/apache/iotdb/issues/12375
%% null don't seem to be supported by IoTDB insertTablet when 1.3.0
case ?config(iotdb_version, Config) of
?VSN_1_3_X ->
skip;
_ ->
%% check rest ts = MS + 1000
CheckTime = integer_to_binary(MS + 1000),
QueryRest = <<"select * from ", DeviceId/binary, " where time = ", CheckTime/binary>>,
{ok, {{_, 200, _}, _, ResultRest}} = iotdb_query(Config, QueryRest),
#{<<"values">> := Values, <<"expressions">> := Expressions} = emqx_utils_json:decode(
ResultRest
),
Results = maps:from_list(lists:zipwith(fun(K, [V]) -> {K, V} end, Expressions, Values)),
Exp = #{
exp(DeviceId, "charged") => true,
exp(DeviceId, "floated") => true,
exp(DeviceId, "started") => true,
exp(DeviceId, "stoked") => true,
exp(DeviceId, "enriched") => true,
exp(DeviceId, "gutted") => true,
exp(DeviceId, "drained") => false,
exp(DeviceId, "toasted") => false,
exp(DeviceId, "uncharted") => false,
exp(DeviceId, "dazzled") => false,
exp(DeviceId, "unplugged") => false,
exp(DeviceId, "unraveled") => false,
exp(DeviceId, "undecided") => null,
exp(DeviceId, "foo") => <<"bar">>,
exp(DeviceId, "temp") => null,
exp(DeviceId, "weight") => null
},
?assertEqual(Exp, Results),
%% check temp
QueryTemp = <<"select temp from ", DeviceId/binary, " where time > ", Time/binary>>,
{ok, {{_, 200, _}, _, ResultTemp}} = iotdb_query(Config, QueryTemp),
?assertMatch(
#{<<"values">> := [[36, 37, 38, 39, 34, 33, 32, 41]]},
emqx_utils_json:decode(ResultTemp)
)
end,
ok.
exp(Dev, M0) ->

View File

@ -389,7 +389,7 @@ fields(producer_kafka_opts) ->
)},
{kafka_headers,
mk(
binary(),
emqx_schema:template(),
#{
required => false,
validator => fun kafka_header_validator/1,
@ -462,12 +462,12 @@ fields(producer_kafka_ext_headers) ->
[
{kafka_ext_header_key,
mk(
binary(),
emqx_schema:template(),
#{required => true, desc => ?DESC(producer_kafka_ext_header_key)}
)},
{kafka_ext_header_value,
mk(
binary(),
emqx_schema:template(),
#{
required => true,
validator => fun kafka_ext_header_value_validator/1,
@ -477,11 +477,20 @@ fields(producer_kafka_ext_headers) ->
];
fields(kafka_message) ->
[
{key, mk(string(), #{default => <<"${.clientid}">>, desc => ?DESC(kafka_message_key)})},
{value, mk(string(), #{default => <<"${.}">>, desc => ?DESC(kafka_message_value)})},
{key,
mk(emqx_schema:template(), #{
default => <<"${.clientid}">>,
desc => ?DESC(kafka_message_key)
})},
{value,
mk(emqx_schema:template(), #{
default => <<"${.}">>,
desc => ?DESC(kafka_message_value)
})},
{timestamp,
mk(string(), #{
default => <<"${.timestamp}">>, desc => ?DESC(kafka_message_timestamp)
mk(emqx_schema:template(), #{
default => <<"${.timestamp}">>,
desc => ?DESC(kafka_message_timestamp)
})}
];
fields(producer_buffer) ->
@ -536,8 +545,11 @@ fields(consumer_topic_mapping) ->
{qos, mk(emqx_schema:qos(), #{default => 0, desc => ?DESC(consumer_mqtt_qos)})},
{payload_template,
mk(
string(),
#{default => <<"${.}">>, desc => ?DESC(consumer_mqtt_payload)}
emqx_schema:template(),
#{
default => <<"${.}">>,
desc => ?DESC(consumer_mqtt_payload)
}
)}
];
fields(consumer_kafka_opts) ->
@ -744,8 +756,8 @@ producer_strategy_key_validator(
producer_strategy_key_validator(emqx_utils_maps:binary_key_map(Conf));
producer_strategy_key_validator(#{
<<"partition_strategy">> := key_dispatch,
<<"message">> := #{<<"key">> := ""}
}) ->
<<"message">> := #{<<"key">> := Key}
}) when Key =:= "" orelse Key =:= <<>> ->
{error, "Message key cannot be empty when `key_dispatch` strategy is used"};
producer_strategy_key_validator(_) ->
ok.

View File

@ -357,7 +357,7 @@ kafka_consumer_hocon() ->
%% assert compatibility
bridge_schema_json_test() ->
JSON = iolist_to_binary(emqx_conf:bridge_schema_json()),
JSON = iolist_to_binary(emqx_dashboard_schema_api:bridge_schema_json()),
Map = emqx_utils_json:decode(JSON),
Path = [<<"components">>, <<"schemas">>, <<"bridge_kafka.post_producer">>, <<"properties">>],
?assertMatch(#{<<"kafka">> := _}, emqx_utils_maps:deep_get(Path, Map)).

View File

@ -150,7 +150,7 @@ fields(producer) ->
[
{payload_template,
sc(
binary(),
emqx_schema:template(),
#{
default => <<"${.}">>,
desc => ?DESC("payload_template")

View File

@ -44,8 +44,10 @@ roots() -> [].
fields("config") ->
[
{enable, mk(boolean(), #{desc => ?DESC("enable"), default => true})},
{collection, mk(binary(), #{desc => ?DESC("collection"), default => <<"mqtt">>})},
{payload_template, mk(binary(), #{required => false, desc => ?DESC("payload_template")})},
{collection,
mk(emqx_schema:template(), #{desc => ?DESC("collection"), default => <<"mqtt">>})},
{payload_template,
mk(emqx_schema:template(), #{required => false, desc => ?DESC("payload_template")})},
{resource_opts,
mk(
ref(?MODULE, "creation_opts"),

View File

@ -450,7 +450,6 @@ connect(Options) ->
options => emqx_utils:redact(Options)
}),
Name = proplists:get_value(name, Options),
WorkerId = proplists:get_value(ecpool_worker_id, Options),
ClientOpts = proplists:get_value(client_opts, Options),
case emqtt:start_link(mk_client_opts(Name, WorkerId, ClientOpts)) of
{ok, Pid} ->

View File

@ -200,7 +200,7 @@ fields("ingress_local") ->
[
{topic,
mk(
binary(),
emqx_schema:template(),
#{
validator => fun emqx_schema:non_empty_string/1,
desc => ?DESC("ingress_local_topic"),
@ -217,7 +217,7 @@ fields("ingress_local") ->
)},
{retain,
mk(
hoconsc:union([boolean(), binary()]),
hoconsc:union([boolean(), emqx_schema:template()]),
#{
default => <<"${retain}">>,
desc => ?DESC("retain")
@ -225,7 +225,7 @@ fields("ingress_local") ->
)},
{payload,
mk(
binary(),
emqx_schema:template(),
#{
default => undefined,
desc => ?DESC("payload")
@ -268,7 +268,7 @@ fields("egress_remote") ->
[
{topic,
mk(
binary(),
emqx_schema:template(),
#{
required => true,
validator => fun emqx_schema:non_empty_string/1,
@ -286,7 +286,7 @@ fields("egress_remote") ->
)},
{retain,
mk(
hoconsc:union([boolean(), binary()]),
hoconsc:union([boolean(), emqx_schema:template()]),
#{
required => false,
default => false,
@ -295,7 +295,7 @@ fields("egress_remote") ->
)},
{payload,
mk(
binary(),
emqx_schema:template(),
#{
default => undefined,
desc => ?DESC("payload")
@ -344,7 +344,7 @@ desc(_) ->
undefined.
qos() ->
hoconsc:union([emqx_schema:qos(), binary()]).
hoconsc:union([emqx_schema:qos(), emqx_schema:template()]).
parse_server(Str) ->
#{hostname := Host, port := Port} = emqx_schema:parse_server(Str, ?MQTT_HOST_OPTS),

View File

@ -117,7 +117,7 @@ fields("config") ->
{enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})},
{sql,
mk(
binary(),
emqx_schema:template(),
#{desc => ?DESC("sql_template"), default => ?DEFAULT_SQL, format => <<"sql">>}
)},
{local_topic,

View File

@ -517,7 +517,6 @@ t_write_failure(Config) ->
ok
end,
fun(Trace0) ->
ct:pal("trace: ~p", [Trace0]),
Trace = ?of_kind(buffer_worker_flush_nack, Trace0),
?assertMatch([#{result := {error, _}} | _], Trace),
[#{result := {error, Error}} | _] = Trace,

View File

@ -146,7 +146,7 @@ fields(action_parameters_data) ->
[
{timestamp,
mk(
binary(),
emqx_schema:template(),
#{
desc => ?DESC("config_parameters_timestamp"),
required => false
@ -154,7 +154,7 @@ fields(action_parameters_data) ->
)},
{metric,
mk(
binary(),
emqx_schema:template(),
#{
required => true,
desc => ?DESC("config_parameters_metric")
@ -162,7 +162,7 @@ fields(action_parameters_data) ->
)},
{tags,
mk(
hoconsc:union([map(), binary()]),
hoconsc:union([map(), emqx_schema:template()]),
#{
required => true,
desc => ?DESC("config_parameters_tags"),
@ -188,7 +188,7 @@ fields(action_parameters_data) ->
)},
{value,
mk(
hoconsc:union([integer(), float(), binary()]),
hoconsc:union([integer(), float(), emqx_schema:template()]),
#{
required => true,
desc => ?DESC("config_parameters_value")

View File

@ -158,7 +158,7 @@ fields(action_parameters) ->
[
{sql,
hoconsc:mk(
binary(),
emqx_schema:template(),
#{desc => ?DESC("sql_template"), default => ?DEFAULT_SQL, format => <<"sql">>}
)}
];
@ -177,7 +177,7 @@ fields("config") ->
)},
{sql,
hoconsc:mk(
binary(),
emqx_schema:template(),
#{desc => ?DESC("sql_template"), default => ?DEFAULT_SQL, format => <<"sql">>}
)},
{local_topic,

View File

@ -61,7 +61,7 @@ fields(action_parameters) ->
[
{sql,
hoconsc:mk(
binary(),
emqx_schema:template(),
#{desc => ?DESC("sql_template"), default => default_sql(), format => <<"sql">>}
)}
];

View File

@ -520,7 +520,6 @@ t_write_failure(Config) ->
)
end),
fun(Trace0) ->
ct:pal("trace: ~p", [Trace0]),
Trace = ?of_kind(buffer_worker_flush_nack, Trace0),
?assertMatch([#{result := {error, _}} | _], Trace),
[#{result := {error, Error}} | _] = Trace,

View File

@ -51,12 +51,12 @@ fields(action_parameters) ->
fields(producer_pulsar_message) ->
[
{key,
?HOCON(string(), #{
?HOCON(emqx_schema:template(), #{
default => <<"${.clientid}">>,
desc => ?DESC("producer_key_template")
})},
{value,
?HOCON(string(), #{
?HOCON(emqx_schema:template(), #{
default => <<"${.}">>,
desc => ?DESC("producer_value_template")
})}

View File

@ -1235,7 +1235,7 @@ t_resilience(Config) ->
after 1_000 -> ct:fail("producer didn't stop!")
end,
Consumed = lists:flatmap(
fun(_) -> receive_consumed(5_000) end, lists:seq(1, NumProduced)
fun(_) -> receive_consumed(10_000) end, lists:seq(1, NumProduced)
),
?assertEqual(NumProduced, length(Consumed)),
ExpectedPayloads = lists:map(fun integer_to_binary/1, lists:seq(1, NumProduced)),

View File

@ -99,7 +99,7 @@ fields(action_parameters) ->
)},
{payload_template,
hoconsc:mk(
binary(),
emqx_schema:template(),
#{
default => <<"">>,
desc => ?DESC(?CONNECTOR_SCHEMA, "payload_template")

View File

@ -52,7 +52,7 @@ init_per_group(_Group, Config) ->
common_init_per_group(Opts) ->
emqx_common_test_helpers:render_and_load_app_config(emqx_conf),
ok = emqx_common_test_helpers:start_apps([
emqx_conf, emqx_bridge, emqx_bridge_rabbitmq, emqx_rule_engine
emqx_conf, emqx_bridge, emqx_bridge_rabbitmq, emqx_rule_engine, emqx_modules
]),
ok = emqx_connector_test_helpers:start_apps([emqx_resource]),
{ok, _} = application:ensure_all_started(emqx_connector),
@ -116,7 +116,9 @@ end_per_group(_Group, Config) ->
} = get_channel_connection(Config),
amqp_channel:call(Channel, #'queue.purge'{queue = rabbit_mq_queue()}),
emqx_mgmt_api_test_util:end_suite(),
ok = emqx_common_test_helpers:stop_apps([emqx_conf, emqx_bridge_rabbitmq, emqx_rule_engine]),
ok = emqx_common_test_helpers:stop_apps([
emqx_conf, emqx_bridge_rabbitmq, emqx_rule_engine, emqx_modules
]),
ok = emqx_connector_test_helpers:stop_apps([emqx_resource]),
_ = application:stop(emqx_connector),
_ = application:stop(emqx_bridge),

View File

@ -211,7 +211,7 @@ desc(_) ->
undefined.
command_template(type) ->
list(binary());
hoconsc:array(emqx_schema:template());
command_template(required) ->
true;
command_template(validator) ->

View File

@ -162,8 +162,13 @@ fields(action_parameters) ->
[
{template,
mk(
binary(),
emqx_schema:template(),
#{desc => ?DESC("template"), default => ?DEFAULT_TEMPLATE}
)},
{strategy,
mk(
hoconsc:union([roundrobin, binary()]),
#{desc => ?DESC("strategy"), default => roundrobin}
)}
] ++ emqx_bridge_rocketmq_connector:fields(config),
lists:foldl(
@ -173,6 +178,7 @@ fields(action_parameters) ->
Parameters,
[
servers,
namespace,
pool_size,
auto_reconnect,
access_key,
@ -205,17 +211,21 @@ fields("config") ->
{enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})},
{template,
mk(
binary(),
emqx_schema:template(),
#{desc => ?DESC("template"), default => ?DEFAULT_TEMPLATE}
)},
{local_topic,
mk(
binary(),
#{desc => ?DESC("local_topic"), required => false}
)},
{strategy,
mk(
hoconsc:union([roundrobin, binary()]),
#{desc => ?DESC("strategy"), default => roundrobin}
)}
] ++ emqx_resource_schema:fields("resource_opts") ++
(emqx_bridge_rocketmq_connector:fields(config) --
emqx_connector_schema_lib:prepare_statement_fields());
emqx_bridge_rocketmq_connector:fields(config);
fields("post") ->
[type_field(), name_field() | fields("config")];
fields("put") ->

View File

@ -45,9 +45,14 @@ roots() ->
fields(config) ->
[
{servers, servers()},
{topic,
{namespace,
mk(
binary(),
#{required => false, desc => ?DESC(namespace)}
)},
{topic,
mk(
emqx_schema:template(),
#{default => <<"TopicTest">>, desc => ?DESC(topic)}
)},
{access_key,
@ -107,7 +112,7 @@ on_start(
),
ClientId = client_id(InstanceId),
ACLInfo = acl_info(AccessKey, SecretKey, SecurityToken),
ClientCfg = #{acl_info => ACLInfo},
ClientCfg = namespace(#{acl_info => ACLInfo}, Config),
State = #{
client_id => ClientId,
@ -156,10 +161,12 @@ create_channel_state(
TopicTks = emqx_placeholder:preproc_tmpl(Topic),
ProducerOpts = make_producer_opts(Conf, ACLInfo),
Templates = parse_template(Conf),
DispatchStrategy = parse_dispatch_strategy(Conf),
State = #{
topic => Topic,
topic_tokens => TopicTks,
templates => Templates,
dispatch_strategy => DispatchStrategy,
sync_timeout => SyncTimeout,
acl_info => ACLInfo,
producers_opts => ProducerOpts
@ -202,7 +209,7 @@ on_stop(InstanceId, _State) ->
({_, client_id, ClientId}) ->
destory_producers_map(ClientId),
ok = rocketmq:stop_and_delete_supervised_client(ClientId);
({_, _Topic, Producer}) ->
({_, _ProducerGroup, Producer}) ->
_ = rocketmq:stop_and_delete_supervised_producers(Producer)
end,
emqx_resource:get_allocated_resources_list(InstanceId)
@ -250,15 +257,16 @@ do_query(
#{
topic_tokens := TopicTks,
templates := Templates,
dispatch_strategy := DispatchStrategy,
sync_timeout := RequestTimeout,
producers_opts := ProducerOpts
} = maps:get(ChannelId, Channels),
TopicKey = get_topic_key(Query, TopicTks),
Data = apply_template(Query, Templates),
Data = apply_template(Query, Templates, DispatchStrategy),
Result = safe_do_produce(
InstanceId, QueryFunc, ClientId, TopicKey, Data, ProducerOpts, RequestTimeout
ChannelId, InstanceId, QueryFunc, ClientId, TopicKey, Data, ProducerOpts, RequestTimeout
),
case Result of
{error, Reason} ->
@ -284,9 +292,11 @@ do_query(
get_channel_id({ChannelId, _}) -> ChannelId;
get_channel_id([{ChannelId, _} | _]) -> ChannelId.
safe_do_produce(InstanceId, QueryFunc, ClientId, TopicKey, Data, ProducerOpts, RequestTimeout) ->
safe_do_produce(
ChannelId, InstanceId, QueryFunc, ClientId, TopicKey, Data, ProducerOpts, RequestTimeout
) ->
try
Producers = get_producers(InstanceId, ClientId, TopicKey, ProducerOpts),
Producers = get_producers(ChannelId, InstanceId, ClientId, TopicKey, ProducerOpts),
produce(InstanceId, QueryFunc, Producers, Data, RequestTimeout)
catch
_Type:Reason ->
@ -315,24 +325,57 @@ parse_template([{Key, H} | T], Templates) ->
parse_template([], Templates) ->
Templates.
%% returns a procedure to generate the produce context
parse_dispatch_strategy(#{strategy := roundrobin}) ->
fun(_) ->
#{}
end;
parse_dispatch_strategy(#{strategy := Template}) ->
Tokens = emqx_placeholder:preproc_tmpl(Template),
fun(Msg) ->
#{
key =>
case emqx_placeholder:proc_tmpl(Tokens, Msg) of
<<"undefined">> ->
%% Since the key may be absent on some kinds of events (ex:
%% `topic' is absent in `client.disconnected'), and this key is
%% used for routing, we generate a random key when it's absent to
%% better distribute the load, effectively making it `random'
%% dispatch if the key is absent and we are using `key_dispatch'.
%% Otherwise, it'll be deterministic.
emqx_guid:gen();
Key ->
Key
end
}
end.
get_topic_key({_, Msg}, TopicTks) ->
emqx_placeholder:proc_tmpl(TopicTks, Msg);
get_topic_key([Query | _], TopicTks) ->
get_topic_key(Query, TopicTks).
apply_template({Key, Msg} = _Req, Templates) ->
%% return a message data and its context,
%% {binary(), rocketmq_producers:produce_context()})
apply_template({Key, Msg} = _Req, Templates, DispatchStrategy) ->
{
case maps:get(Key, Templates, undefined) of
undefined ->
emqx_utils_json:encode(Msg);
Template ->
emqx_placeholder:proc_tmpl(Template, Msg)
end,
DispatchStrategy(Msg)
};
apply_template([{Key, _} | _] = Reqs, Templates, DispatchStrategy) ->
case maps:get(Key, Templates, undefined) of
undefined ->
emqx_utils_json:encode(Msg);
[{emqx_utils_json:encode(Msg), DispatchStrategy(Msg)} || {_, Msg} <- Reqs];
Template ->
emqx_placeholder:proc_tmpl(Template, Msg)
end;
apply_template([{Key, _} | _] = Reqs, Templates) ->
case maps:get(Key, Templates, undefined) of
undefined ->
[emqx_utils_json:encode(Msg) || {_, Msg} <- Reqs];
Template ->
[emqx_placeholder:proc_tmpl(Template, Msg) || {_, Msg} <- Reqs]
[
{emqx_placeholder:proc_tmpl(Template, Msg), DispatchStrategy(Msg)}
|| {_, Msg} <- Reqs
]
end.
client_id(ResourceId) ->
@ -377,6 +420,10 @@ acl_info(AccessKey, SecretKey, SecurityToken) when is_binary(AccessKey) ->
acl_info(_, _, _) ->
#{}.
namespace(ClientCfg, Config) ->
Namespace = maps:get(namespace, Config, <<>>),
ClientCfg#{namespace => Namespace}.
create_producers_map(ClientId) ->
_ = ets:new(ClientId, [public, named_table, {read_concurrency, true}]),
ok.
@ -391,16 +438,21 @@ destory_producers_map(ClientId) ->
ets:delete(Tid)
end.
get_producers(InstanceId, ClientId, Topic, ProducerOpts) ->
case ets:lookup(ClientId, Topic) of
get_producers(ChannelId, InstanceId, ClientId, Topic, ProducerOpts) ->
%% The topic need to be included in the name since we can have multiple
%% topics per channel due to templating.
ProducerGroup = iolist_to_binary([ChannelId, "_", Topic]),
case ets:lookup(ClientId, ProducerGroup) of
[{_, Producers}] ->
Producers;
_ ->
ProducerGroup = iolist_to_binary([atom_to_list(ClientId), "_", Topic]),
%% TODO: the name needs to be an atom but this may cause atom leak so we
%% should figure out a way to avoid this
ProducerOpts2 = ProducerOpts#{name => binary_to_atom(ProducerGroup)},
{ok, Producers} = rocketmq:ensure_supervised_producers(
ClientId, ProducerGroup, Topic, ProducerOpts
ClientId, ProducerGroup, Topic, ProducerOpts2
),
ok = emqx_resource:allocate_resource(InstanceId, Topic, Producers),
ets:insert(ClientId, {Topic, Producers}),
ok = emqx_resource:allocate_resource(InstanceId, ProducerGroup, Producers),
ets:insert(ClientId, {ProducerGroup, Producers}),
Producers
end.

View File

@ -263,6 +263,60 @@ t_setup_via_http_api_and_publish(Config) ->
),
ok.
t_setup_two_actions_via_http_api_and_publish(Config) ->
BridgeType = ?GET_CONFIG(rocketmq_bridge_type, Config),
Name = ?GET_CONFIG(rocketmq_name, Config),
RocketMQConf = ?GET_CONFIG(rocketmq_config, Config),
RocketMQConf2 = RocketMQConf#{
<<"name">> => Name,
<<"type">> => BridgeType
},
?assertMatch(
{ok, _},
create_bridge_http(RocketMQConf2)
),
{ok, #{raw_config := ActionConf}} = emqx_bridge_v2:lookup(actions, BridgeType, Name),
Topic2 = <<"Topic2">>,
ActionConf2 = emqx_utils_maps:deep_force_put(
[<<"parameters">>, <<"topic">>], ActionConf, Topic2
),
Action2Name = atom_to_binary(?FUNCTION_NAME),
{ok, _} = emqx_bridge_v2:create(BridgeType, Action2Name, ActionConf2),
SentData = #{payload => ?PAYLOAD},
?check_trace(
begin
?wait_async_action(
?assertEqual(ok, send_message(Config, SentData)),
#{?snk_kind := rocketmq_connector_query_return},
10_000
),
ok
end,
fun(Trace0) ->
Trace = ?of_kind(rocketmq_connector_query_return, Trace0),
?assertMatch([#{result := ok}], Trace),
ok
end
),
Config2 = proplists:delete(rocketmq_name, Config),
Config3 = [{rocketmq_name, Action2Name} | Config2],
?check_trace(
begin
?wait_async_action(
?assertEqual(ok, send_message(Config3, SentData)),
#{?snk_kind := rocketmq_connector_query_return},
10_000
),
ok
end,
fun(Trace0) ->
Trace = ?of_kind(rocketmq_connector_query_return, Trace0),
?assertMatch([#{result := ok}], Trace),
ok
end
),
ok.
t_get_status(Config) ->
?assertMatch(
{ok, _},

View File

@ -77,7 +77,7 @@ fields(s3_upload_parameters) ->
[
{content,
hoconsc:mk(
string(),
emqx_schema:template(),
#{
required => false,
default => <<"${.}">>,

View File

@ -192,7 +192,7 @@ fields(action_parameters) ->
[
{sql,
mk(
binary(),
emqx_schema:template(),
#{desc => ?DESC("sql_template"), default => ?DEFAULT_SQL, format => <<"sql">>}
)}
];

View File

@ -112,7 +112,7 @@ fields("parameters") ->
[
{target_topic,
mk(
binary(),
emqx_schema:template(),
#{desc => ?DESC("target_topic"), default => <<"${topic}">>}
)},
{target_qos,
@ -122,7 +122,7 @@ fields("parameters") ->
)},
{template,
mk(
binary(),
emqx_schema:template(),
#{desc => ?DESC("template"), default => <<"${payload}">>}
)}
];

View File

@ -83,7 +83,7 @@ fields("config") ->
{enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})},
{sql,
mk(
binary(),
emqx_schema:template(),
#{
desc => ?DESC("sql_template"),
default => ?DEFAULT_SQL,
@ -125,7 +125,7 @@ fields(action_parameters) ->
{database, fun emqx_connector_schema_lib:database/1},
{sql,
mk(
binary(),
emqx_schema:template(),
#{
desc => ?DESC("sql_template"),
default => ?DEFAULT_SQL,

View File

@ -224,6 +224,7 @@ reset() -> gen_server:call(?MODULE, reset).
status() ->
transaction(fun ?MODULE:trans_status/0, []).
%% DO NOT delete this on_leave_clean/0, It's use when rpc before v560.
on_leave_clean() ->
on_leave_clean(node()).
@ -367,7 +368,7 @@ handle_call({fast_forward_to_commit, ToTnxId}, _From, State) ->
NodeId = do_fast_forward_to_commit(ToTnxId, State),
{reply, NodeId, State, catch_up(State)};
handle_call(on_leave, _From, State) ->
{atomic, ok} = transaction(fun ?MODULE:on_leave_clean/0, []),
{atomic, ok} = transaction(fun ?MODULE:on_leave_clean/1, [node()]),
{reply, ok, State#{is_leaving := true}};
handle_call(_, _From, State) ->
{reply, ok, State, catch_up(State)}.

View File

@ -31,13 +31,6 @@
-export([dump_schema/2, reformat_schema_dump/2]).
-export([schema_module/0]).
%% TODO: move to emqx_dashboard when we stop building api schema at build time
-export([
hotconf_schema_json/0,
bridge_schema_json/0,
hocon_schema_to_spec/2
]).
%% for rpc
-export([get_node_and_config/1]).
@ -311,12 +304,22 @@ gen_flat_doc(RootNames, #{full_name := FullName, fields := Fields} = S, DescReso
false ->
ok
end,
#{
text => short_name(FullName),
hash => format_hash(FullName),
doc => maps:get(desc, S, <<"">>),
fields => format_fields(Fields, DescResolver)
}.
try
#{
text => short_name(FullName),
hash => format_hash(FullName),
doc => maps:get(desc, S, <<"">>),
fields => format_fields(Fields, DescResolver)
}
catch
throw:Reason ->
io:format(
standard_error,
"failed_to_build_doc for ~s:~n~p~n",
[FullName, Reason]
),
error(failed_to_build_doc)
end.
format_fields(Fields, DescResolver) ->
[format_field(F, DescResolver) || F <- Fields].
@ -456,17 +459,6 @@ warn_bad_namespace(Namespace) ->
ok
end.
%% TODO: move this function to emqx_dashboard when we stop generating this JSON at build time.
hotconf_schema_json() ->
SchemaInfo = #{title => <<"EMQX Hot Conf API Schema">>, version => <<"0.1.0">>},
gen_api_schema_json_iodata(emqx_mgmt_api_configs, SchemaInfo).
%% TODO: move this function to emqx_dashboard when we stop generating this JSON at build time.
bridge_schema_json() ->
Version = <<"0.1.0">>,
SchemaInfo = #{title => <<"EMQX Data Bridge API Schema">>, version => Version},
gen_api_schema_json_iodata(emqx_bridge_api, SchemaInfo).
%% @doc return the root schema module.
-spec schema_module() -> module().
schema_module() ->
@ -506,57 +498,6 @@ make_desc_resolver(Lang) ->
unicode:characters_to_binary(Desc)
end.
gen_api_schema_json_iodata(SchemaMod, SchemaInfo) ->
emqx_dashboard_swagger:gen_api_schema_json_iodata(
SchemaMod,
SchemaInfo,
fun ?MODULE:hocon_schema_to_spec/2
).
-define(TO_REF(_N_, _F_), iolist_to_binary([to_bin(_N_), ".", to_bin(_F_)])).
-define(TO_COMPONENTS_SCHEMA(_M_, _F_),
iolist_to_binary([
<<"#/components/schemas/">>,
?TO_REF(emqx_dashboard_swagger:namespace(_M_), _F_)
])
).
hocon_schema_to_spec(?R_REF(Module, StructName), _LocalModule) ->
{#{<<"$ref">> => ?TO_COMPONENTS_SCHEMA(Module, StructName)}, [{Module, StructName}]};
hocon_schema_to_spec(?REF(StructName), LocalModule) ->
{#{<<"$ref">> => ?TO_COMPONENTS_SCHEMA(LocalModule, StructName)}, [{LocalModule, StructName}]};
hocon_schema_to_spec(Type, LocalModule) when ?IS_TYPEREFL(Type) ->
{typename_to_spec(typerefl:name(Type), LocalModule), []};
hocon_schema_to_spec(?ARRAY(Item), LocalModule) ->
{Schema, Refs} = hocon_schema_to_spec(Item, LocalModule),
{#{type => array, items => Schema}, Refs};
hocon_schema_to_spec(?ENUM(Items), _LocalModule) ->
{#{type => enum, symbols => Items}, []};
hocon_schema_to_spec(?MAP(Name, Type), LocalModule) ->
{Schema, SubRefs} = hocon_schema_to_spec(Type, LocalModule),
{
#{
<<"type">> => object,
<<"properties">> => #{<<"$", (to_bin(Name))/binary>> => Schema}
},
SubRefs
};
hocon_schema_to_spec(?UNION(Types, _DisplayName), LocalModule) ->
{OneOf, Refs} = lists:foldl(
fun(Type, {Acc, RefsAcc}) ->
{Schema, SubRefs} = hocon_schema_to_spec(Type, LocalModule),
{[Schema | Acc], SubRefs ++ RefsAcc}
end,
{[], []},
hoconsc:union_members(Types)
),
{#{<<"oneOf">> => OneOf}, Refs};
hocon_schema_to_spec(Atom, _LocalModule) when is_atom(Atom) ->
{#{type => enum, symbols => [Atom]}, []}.
typename_to_spec(TypeStr, Module) ->
emqx_conf_schema_types:readable_dashboard(Module, TypeStr).
join_format(Snippets) ->
case [S || S <- Snippets, S =/= undefined] of
[] ->

View File

@ -33,8 +33,19 @@ readable(Module, TypeStr) when is_list(TypeStr) ->
%% Module is ignored so far as all types are distinguished by their names
readable(TypeStr)
catch
throw:unknown_type ->
fail(#{reason => unknown_type, type => TypeStr, module => Module})
throw:Reason ->
throw(#{
reason => Reason,
type => TypeStr,
module => Module
});
error:Reason:Stacktrace ->
throw(#{
reason => Reason,
stacktrace => Stacktrace,
type => TypeStr,
module => Module
})
end.
readable_swagger(Module, TypeStr) ->
@ -49,22 +60,28 @@ readable_docgen(Module, TypeStr) ->
get_readable(Module, TypeStr, Flavor) ->
Map = readable(Module, TypeStr),
case maps:get(Flavor, Map, undefined) of
undefined -> fail(#{reason => unknown_type, module => Module, type => TypeStr});
undefined -> throw(#{reason => unknown_type, module => Module, type => TypeStr});
Value -> Value
end.
%% Fail the build or test. Production code should never get here.
-spec fail(_) -> no_return().
fail(Reason) ->
io:format(standard_error, "ERROR: ~p~n", [Reason]),
error(Reason).
readable("boolean()") ->
#{
swagger => #{type => boolean},
dashboard => #{type => boolean},
docgen => #{type => "Boolean"}
};
readable("template()") ->
#{
swagger => #{type => string},
dashboard => #{type => string, is_template => true},
docgen => #{type => "String", desc => ?DESC(template)}
};
readable("template_str()") ->
#{
swagger => #{type => string},
dashboard => #{type => string, is_template => true},
docgen => #{type => "String", desc => ?DESC(template)}
};
readable("binary()") ->
#{
swagger => #{type => string},

Some files were not shown because too many files have changed in this diff Show More