diff --git a/.ci/docker-compose-file/credentials.env b/.ci/docker-compose-file/credentials.env new file mode 100644 index 000000000..50cc83a3f --- /dev/null +++ b/.ci/docker-compose-file/credentials.env @@ -0,0 +1,7 @@ +MONGO_USERNAME=emqx +MONGO_PASSWORD=passw0rd +MONGO_AUTHSOURCE=admin + +# See "Environment Variables" @ https://hub.docker.com/_/mongo +MONGO_INITDB_ROOT_USERNAME=${MONGO_USERNAME} +MONGO_INITDB_ROOT_PASSWORD=${MONGO_PASSWORD} diff --git a/.ci/docker-compose-file/docker-compose-mongo-single-tcp.yaml b/.ci/docker-compose-file/docker-compose-mongo-single-tcp.yaml index 39f37e66c..0eae6c358 100644 --- a/.ci/docker-compose-file/docker-compose-mongo-single-tcp.yaml +++ b/.ci/docker-compose-file/docker-compose-mongo-single-tcp.yaml @@ -9,6 +9,9 @@ services: - emqx_bridge ports: - "27017:27017" + env_file: + - .env + - credentials.env command: --ipv6 --bind_ip_all diff --git a/.ci/docker-compose-file/docker-compose.yaml b/.ci/docker-compose-file/docker-compose.yaml index d4a44bfb0..f3943b010 100644 --- a/.ci/docker-compose-file/docker-compose.yaml +++ b/.ci/docker-compose-file/docker-compose.yaml @@ -5,6 +5,7 @@ services: container_name: erlang image: ${DOCKER_CT_RUNNER_IMAGE:-ghcr.io/emqx/emqx-builder/5.2-3:1.14.5-25.3.2-2-ubuntu22.04} env_file: + - credentials.env - conf.env environment: GITHUB_ACTIONS: ${GITHUB_ACTIONS:-} diff --git a/.ci/docker-compose-file/redis/sentinel-tcp/sentinel-base.conf b/.ci/docker-compose-file/redis/sentinel-tcp/sentinel-base.conf index 419f2a935..c43de536b 100644 --- a/.ci/docker-compose-file/redis/sentinel-tcp/sentinel-base.conf +++ b/.ci/docker-compose-file/redis/sentinel-tcp/sentinel-base.conf @@ -1,7 +1,7 @@ sentinel resolve-hostnames yes bind :: 0.0.0.0 -sentinel monitor mymaster redis-sentinel-master 6379 1 -sentinel auth-pass mymaster public -sentinel down-after-milliseconds mymaster 10000 -sentinel failover-timeout mymaster 20000 +sentinel monitor mytcpmaster redis-sentinel-master 6379 1 +sentinel auth-pass mytcpmaster public +sentinel down-after-milliseconds mytcpmaster 10000 +sentinel failover-timeout mytcpmaster 20000 diff --git a/.ci/docker-compose-file/redis/sentinel-tls/sentinel-base.conf b/.ci/docker-compose-file/redis/sentinel-tls/sentinel-base.conf index 8363ae383..7ea32f805 100644 --- a/.ci/docker-compose-file/redis/sentinel-tls/sentinel-base.conf +++ b/.ci/docker-compose-file/redis/sentinel-tls/sentinel-base.conf @@ -8,7 +8,7 @@ tls-key-file /etc/certs/key.pem tls-ca-cert-file /etc/certs/cacert.pem tls-auth-clients no -sentinel monitor mymaster redis-sentinel-tls-master 6389 1 -sentinel auth-pass mymaster public -sentinel down-after-milliseconds mymaster 10000 -sentinel failover-timeout mymaster 20000 +sentinel monitor mytlsmaster redis-sentinel-tls-master 6389 1 +sentinel auth-pass mytlsmaster public +sentinel down-after-milliseconds mytlsmaster 10000 +sentinel failover-timeout mytlsmaster 20000 diff --git a/.github/ISSUE_TEMPLATE/bug-report.yaml b/.github/ISSUE_TEMPLATE/bug-report.yaml index 61f7afb6d..c79a13f21 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.yaml +++ b/.github/ISSUE_TEMPLATE/bug-report.yaml @@ -61,10 +61,6 @@ body: # paste output here $ uname -a # paste output here - - # On Windows: - C:\> wmic os get Caption, Version, BuildNumber, OSArchitecture - # paste output here ``` diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 24024f68a..74f06969a 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,9 +1,8 @@ Fixes - +Release version: v/e5.? ## Summary -copilot:summary ## PR Checklist Please convert it to a draft if any of the following conditions are not met. Reviewers may skip over until all the items are checked: diff --git a/.github/workflows/build_packages.yaml b/.github/workflows/build_packages.yaml index 7f23bf85e..6d642ebef 100644 --- a/.github/workflows/build_packages.yaml +++ b/.github/workflows/build_packages.yaml @@ -65,58 +65,6 @@ on: default: '5.2-3' jobs: - windows: - runs-on: windows-2019 - if: inputs.profile == 'emqx' - strategy: - fail-fast: false - matrix: - profile: # for now only CE for windows - - emqx - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.inputs.ref }} - fetch-depth: 0 - - - uses: ilammy/msvc-dev-cmd@v1.12.0 - - uses: erlef/setup-beam@v1.16.0 - with: - otp-version: 25.3.2 - - name: build - env: - PYTHON: python - DIAGNOSTIC: 1 - run: | - # ensure crypto app (openssl) - erl -eval "erlang:display(crypto:info_lib())" -s init stop - make ${{ matrix.profile }}-tgz - - name: run emqx - timeout-minutes: 5 - run: | - $ErrorActionPreference = "Stop" - ./_build/${{ matrix.profile }}/rel/emqx/bin/emqx start - Start-Sleep -s 10 - $pingOutput = ./_build/${{ matrix.profile }}/rel/emqx/bin/emqx ping - if ($pingOutput = 'pong') { - echo "EMQX started OK" - } else { - echo "Failed to ping EMQX $pingOutput" - Exit 1 - } - ./_build/${{ matrix.profile }}/rel/emqx/bin/emqx stop - echo "EMQX stopped" - ./_build/${{ matrix.profile }}/rel/emqx/bin/emqx install - echo "EMQX installed" - ./_build/${{ matrix.profile }}/rel/emqx/bin/emqx uninstall - echo "EMQX uninstalled" - - uses: actions/upload-artifact@v3 - if: success() - with: - name: ${{ matrix.profile }} - path: _packages/${{ matrix.profile }}/ - retention-days: 7 - mac: strategy: fail-fast: false @@ -126,9 +74,9 @@ jobs: otp: - ${{ inputs.otp_vsn }} os: - - macos-11 - macos-12 - macos-12-arm64 + - macos-13 runs-on: ${{ matrix.os }} steps: - uses: emqx/self-hosted-cleanup-action@v1.0.3 diff --git a/.github/workflows/build_packages_cron.yaml b/.github/workflows/build_packages_cron.yaml index 244ffbd72..97fb9536c 100644 --- a/.github/workflows/build_packages_cron.yaml +++ b/.github/workflows/build_packages_cron.yaml @@ -130,59 +130,3 @@ jobs: with: payload: | {"text": "Scheduled build of ${{ matrix.profile }} package for ${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"} - - windows: - if: github.repository_owner == 'emqx' - runs-on: windows-2019 - strategy: - fail-fast: false - matrix: - profile: - - emqx - otp: - - 25.3.2 - steps: - - uses: actions/checkout@v3 - - uses: ilammy/msvc-dev-cmd@v1.12.0 - - uses: erlef/setup-beam@v1.16.0 - with: - otp-version: ${{ matrix.otp }} - - name: build - env: - PYTHON: python - DIAGNOSTIC: 1 - run: | - # ensure crypto app (openssl) - erl -eval "erlang:display(crypto:info_lib())" -s init stop - make ${{ matrix.profile }}-tgz - - name: run emqx - timeout-minutes: 5 - run: | - ./_build/${{ matrix.profile }}/rel/emqx/bin/emqx start - Start-Sleep -s 10 - $pingOutput = ./_build/${{ matrix.profile }}/rel/emqx/bin/emqx ping - if ($pingOutput = 'pong') { - echo "EMQX started OK" - } else { - echo "Failed to ping EMQX $pingOutput" - Exit 1 - } - ./_build/${{ matrix.profile }}/rel/emqx/bin/emqx stop - echo "EMQX stopped" - ./_build/${{ matrix.profile }}/rel/emqx/bin/emqx install - echo "EMQX installed" - ./_build/${{ matrix.profile }}/rel/emqx/bin/emqx uninstall - echo "EMQX uninstalled" - - uses: actions/upload-artifact@v3 - with: - name: windows - path: _packages/${{ matrix.profile }}/* - retention-days: 7 - - name: Send notification to Slack - uses: slackapi/slack-github-action@v1.23.0 - if: failure() - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - with: - payload: | - {"text": "Scheduled build of ${{ matrix.profile }} package for Windows failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"} diff --git a/.tool-versions b/.tool-versions index a988325fa..824207a4a 100644 --- a/.tool-versions +++ b/.tool-versions @@ -1,2 +1,2 @@ -erlang 25.3.2-2 -elixir 1.14.5-otp-25 +erlang 26.1.2-1 +elixir 1.15.7-otp-26 diff --git a/README-CN.md b/README-CN.md index 8c6f8d8c3..f989b9bed 100644 --- a/README-CN.md +++ b/README-CN.md @@ -77,7 +77,7 @@ EMQX Cloud 文档:[docs.emqx.com/zh/cloud/latest/](https://docs.emqx.com/zh/cl 优雅的跨平台 MQTT 5.0 客户端工具,提供了桌面端、命令行、Web 三种版本,帮助您更快的开发和调试 MQTT 服务和应用。 -- [车联网平台搭建从入门到精通 ](https://www.emqx.com/zh/blog/category/internet-of-vehicles) +- [车联网平台搭建从入门到精通](https://www.emqx.com/zh/blog/category/internet-of-vehicles) 结合 EMQ 在车联网领域的实践经验,从协议选择等理论知识,到平台架构设计等实战操作,分享如何搭建一个可靠、高效、符合行业场景需求的车联网平台。 diff --git a/apps/emqx/include/emqx.hrl b/apps/emqx/include/emqx.hrl index 86a64d8bb..654d96d8c 100644 --- a/apps/emqx/include/emqx.hrl +++ b/apps/emqx/include/emqx.hrl @@ -39,9 +39,6 @@ %% System topic -define(SYSTOP, <<"$SYS/">>). -%% Queue topic --define(QUEUE, <<"$queue/">>). - %%-------------------------------------------------------------------- %% alarms %%-------------------------------------------------------------------- diff --git a/apps/emqx/include/emqx_mqtt.hrl b/apps/emqx/include/emqx_mqtt.hrl index 4d0188f71..53fed0f9d 100644 --- a/apps/emqx/include/emqx_mqtt.hrl +++ b/apps/emqx/include/emqx_mqtt.hrl @@ -55,6 +55,17 @@ %% MQTT-3.1.1 and MQTT-5.0 [MQTT-4.7.3-3] -define(MAX_TOPIC_LEN, 65535). +%%-------------------------------------------------------------------- +%% MQTT Share-Sub Internal +%%-------------------------------------------------------------------- + +-record(share, {group :: emqx_types:group(), topic :: emqx_types:topic()}). + +%% guards +-define(IS_TOPIC(T), + (is_binary(T) orelse is_record(T, share)) +). + %%-------------------------------------------------------------------- %% MQTT QoS Levels %%-------------------------------------------------------------------- @@ -661,13 +672,10 @@ end). -define(PACKET(Type), #mqtt_packet{header = #mqtt_packet_header{type = Type}}). -define(SHARE, "$share"). +-define(QUEUE, "$queue"). -define(SHARE(Group, Topic), emqx_topic:join([<>, Group, Topic])). --define(IS_SHARE(Topic), - case Topic of - <> -> true; - _ -> false - end -). + +-define(REDISPATCH_TO(GROUP, TOPIC), {GROUP, TOPIC}). -define(SHARE_EMPTY_FILTER, share_subscription_topic_cannot_be_empty). -define(SHARE_EMPTY_GROUP, share_subscription_group_name_cannot_be_empty). diff --git a/apps/emqx/include/emqx_trace.hrl b/apps/emqx/include/emqx_trace.hrl index 62028bcc0..3f9316727 100644 --- a/apps/emqx/include/emqx_trace.hrl +++ b/apps/emqx/include/emqx_trace.hrl @@ -32,6 +32,5 @@ -define(SHARD, ?COMMON_SHARD). -define(MAX_SIZE, 30). --define(OWN_KEYS, [level, filters, filter_default, handlers]). -endif. diff --git a/apps/emqx/include/http_api.hrl b/apps/emqx/include/http_api.hrl index ba1438374..0f6372584 100644 --- a/apps/emqx/include/http_api.hrl +++ b/apps/emqx/include/http_api.hrl @@ -17,6 +17,7 @@ %% HTTP API Auth -define(BAD_USERNAME_OR_PWD, 'BAD_USERNAME_OR_PWD'). -define(BAD_API_KEY_OR_SECRET, 'BAD_API_KEY_OR_SECRET'). +-define(API_KEY_NOT_ALLOW_MSG, <<"This API Key don't have permission to access this resource">>). %% Bad Request -define(BAD_REQUEST, 'BAD_REQUEST'). diff --git a/apps/emqx/include/logger.hrl b/apps/emqx/include/logger.hrl index d803f67be..a40f9dc9c 100644 --- a/apps/emqx/include/logger.hrl +++ b/apps/emqx/include/logger.hrl @@ -40,7 +40,9 @@ end ). +-define(AUDIT_HANDLER, emqx_audit). -define(TRACE_FILTER, emqx_trace_filter). +-define(OWN_KEYS, [level, filters, filter_default, handlers]). -define(TRACE(Tag, Msg, Meta), ?TRACE(debug, Tag, Msg, Meta)). @@ -61,25 +63,35 @@ ) end). --define(AUDIT(_Level_, _From_, _Meta_), begin - case emqx_config:get([log, audit], #{enable => false}) of - #{enable := false} -> +-ifdef(EMQX_RELEASE_EDITION). + +-if(?EMQX_RELEASE_EDITION == ee). + +-define(AUDIT(_LevelFun_, _MetaFun_), begin + case logger_config:get(logger, ?AUDIT_HANDLER) of + {error, {not_found, _}} -> ok; - #{enable := true, level := _AllowLevel_} -> + {ok, Handler = #{level := _AllowLevel_}} -> + _Level_ = _LevelFun_, case logger:compare_levels(_AllowLevel_, _Level_) of _R_ when _R_ == lt; _R_ == eq -> - emqx_trace:log( - _Level_, - [{emqx_audit, fun(L, _) -> L end, undefined, undefined}], - _Msg = undefined, - _Meta_#{from => _From_} - ); - gt -> + emqx_audit:log(_Level_, _MetaFun_, Handler); + _ -> ok end end end). +-else. +%% Only for compile pass, ce edition will not call it +-define(AUDIT(_L_, _M_), _ = {_L_, _M_}). +-endif. + +-else. +%% Only for compile pass, ce edition will not call it +-define(AUDIT(_L_, _M_), _ = {_L_, _M_}). +-endif. + %% print to 'user' group leader -define(ULOG(Fmt, Args), io:format(user, Fmt, Args)). -define(ELOG(Fmt, Args), io:format(standard_error, Fmt, Args)). diff --git a/apps/emqx/integration_test/emqx_persistent_session_ds_SUITE.erl b/apps/emqx/integration_test/emqx_persistent_session_ds_SUITE.erl index f22a4f97e..5e1297df6 100644 --- a/apps/emqx/integration_test/emqx_persistent_session_ds_SUITE.erl +++ b/apps/emqx/integration_test/emqx_persistent_session_ds_SUITE.erl @@ -9,14 +9,9 @@ -include_lib("stdlib/include/assert.hrl"). -include_lib("common_test/include/ct.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-include_lib("emqx/include/asserts.hrl"). -include_lib("emqx/include/emqx_mqtt.hrl"). --include_lib("emqx/src/emqx_persistent_session_ds.hrl"). - --define(DEFAULT_KEYSPACE, default). --define(DS_SHARD_ID, <<"local">>). --define(DS_SHARD, {?DEFAULT_KEYSPACE, ?DS_SHARD_ID}). - -import(emqx_common_test_helpers, [on_exit/1]). %%------------------------------------------------------------------------------ @@ -45,7 +40,7 @@ init_per_testcase(TestCase, Config) when Cluster = cluster(#{n => 1}), ClusterOpts = #{work_dir => emqx_cth_suite:work_dir(TestCase, Config)}, NodeSpecs = emqx_cth_cluster:mk_nodespecs(Cluster, ClusterOpts), - Nodes = emqx_cth_cluster:start(Cluster, ClusterOpts), + Nodes = emqx_cth_cluster:start(NodeSpecs), [ {cluster, Cluster}, {node_specs, NodeSpecs}, @@ -53,12 +48,36 @@ init_per_testcase(TestCase, Config) when {nodes, Nodes} | Config ]; +init_per_testcase(t_session_gc = TestCase, Config) -> + Opts = #{ + n => 3, + roles => [core, core, replicant], + extra_emqx_conf => + "\n session_persistence {" + "\n last_alive_update_interval = 500ms " + "\n session_gc_interval = 2s " + "\n session_gc_batch_size = 1 " + "\n }" + }, + Cluster = cluster(Opts), + ClusterOpts = #{work_dir => emqx_cth_suite:work_dir(TestCase, Config)}, + NodeSpecs = emqx_cth_cluster:mk_nodespecs(Cluster, ClusterOpts), + Nodes = emqx_cth_cluster:start(Cluster, ClusterOpts), + [ + {cluster, Cluster}, + {node_specs, NodeSpecs}, + {cluster_opts, ClusterOpts}, + {nodes, Nodes}, + {gc_interval, timer:seconds(2)} + | Config + ]; init_per_testcase(_TestCase, Config) -> Config. end_per_testcase(TestCase, Config) when TestCase =:= t_session_subscription_idempotency; - TestCase =:= t_session_unsubscription_idempotency + TestCase =:= t_session_unsubscription_idempotency; + TestCase =:= t_session_gc -> Nodes = ?config(nodes, Config), emqx_common_test_helpers:call_janitor(60_000), @@ -72,32 +91,38 @@ end_per_testcase(_TestCase, _Config) -> %% Helper fns %%------------------------------------------------------------------------------ -cluster(#{n := N}) -> - Spec = #{role => core, apps => app_specs()}, +cluster(#{n := N} = Opts) -> + MkRole = fun(M) -> + case maps:get(roles, Opts, undefined) of + undefined -> + core; + Roles -> + lists:nth(M, Roles) + end + end, + MkSpec = fun(M) -> #{role => MkRole(M), apps => app_specs(Opts)} end, lists:map( fun(M) -> Name = list_to_atom("ds_SUITE" ++ integer_to_list(M)), - {Name, Spec} + {Name, MkSpec(M)} end, lists:seq(1, N) ). app_specs() -> + app_specs(_Opts = #{}). + +app_specs(Opts) -> + ExtraEMQXConf = maps:get(extra_emqx_conf, Opts, ""), [ emqx_durable_storage, - {emqx, "persistent_session_store = {ds = true}"} + {emqx, "session_persistence = {enable = true}" ++ ExtraEMQXConf} ]. get_mqtt_port(Node, Type) -> {_IP, Port} = erpc:call(Node, emqx_config, get, [[listeners, Type, default, bind]]), Port. -get_all_iterator_ids(Node) -> - Fn = fun(K, _V, Acc) -> [K | Acc] end, - erpc:call(Node, fun() -> - emqx_ds_storage_layer:foldl_iterator_prefix(?DS_SHARD, <<>>, Fn, []) - end). - wait_nodeup(Node) -> ?retry( _Sleep0 = 500, @@ -127,33 +152,37 @@ start_client(Opts0 = #{}) -> restart_node(Node, NodeSpec) -> ?tp(will_restart_node, #{}), - ?tp(notice, "restarting node", #{node => Node}), - true = monitor_node(Node, true), - ok = erpc:call(Node, init, restart, []), - receive - {nodedown, Node} -> - ok - after 10_000 -> - ct:fail("node ~p didn't stop", [Node]) - end, - ?tp(notice, "waiting for nodeup", #{node => Node}), + emqx_cth_cluster:restart(Node, NodeSpec), wait_nodeup(Node), - wait_gen_rpc_down(NodeSpec), - ?tp(notice, "restarting apps", #{node => Node}), - Apps = maps:get(apps, NodeSpec), - ok = erpc:call(Node, emqx_cth_suite, load_apps, [Apps]), - _ = erpc:call(Node, emqx_cth_suite, start_apps, [Apps, NodeSpec]), - %% have to re-inject this so that we may stop the node succesfully at the - %% end.... - ok = emqx_cth_cluster:set_node_opts(Node, NodeSpec), - ok = snabbkaffe:forward_trace(Node), - ?tp(notice, "node restarted", #{node => Node}), ?tp(restarted_node, #{}), ok. is_persistent_connect_opts(#{properties := #{'Session-Expiry-Interval' := EI}}) -> EI > 0. +list_all_sessions(Node) -> + erpc:call(Node, emqx_persistent_session_ds, list_all_sessions, []). + +list_all_subscriptions(Node) -> + erpc:call(Node, emqx_persistent_session_ds, list_all_subscriptions, []). + +list_all_pubranges(Node) -> + erpc:call(Node, emqx_persistent_session_ds, list_all_pubranges, []). + +prop_only_cores_run_gc(CoreNodes) -> + {"only core nodes run gc", fun(Trace) -> ?MODULE:prop_only_cores_run_gc(Trace, CoreNodes) end}. +prop_only_cores_run_gc(Trace, CoreNodes) -> + GCNodes = lists:usort([ + N + || #{ + ?snk_kind := K, + ?snk_meta := #{node := N} + } <- Trace, + lists:member(K, [ds_session_gc, ds_session_gc_lock_taken]), + N =/= node() + ]), + ?assertEqual(lists:usort(CoreNodes), GCNodes). + %%------------------------------------------------------------------------------ %% Testcases %%------------------------------------------------------------------------------ @@ -233,10 +262,10 @@ t_session_subscription_idempotency(Config) -> end, fun(Trace) -> ct:pal("trace:\n ~p", [Trace]), - SubTopicFilterWords = emqx_topic:words(SubTopicFilter), + ConnInfo = #{}, ?assertMatch( - {ok, #{}, #{SubTopicFilterWords := #{}}}, - erpc:call(Node1, emqx_persistent_session_ds, session_open, [ClientId]) + #{subscriptions := #{SubTopicFilter := #{}}}, + erpc:call(Node1, emqx_persistent_session_ds, session_open, [ClientId, ConnInfo]) ) end ), @@ -307,9 +336,10 @@ t_session_unsubscription_idempotency(Config) -> end, fun(Trace) -> ct:pal("trace:\n ~p", [Trace]), + ConnInfo = #{}, ?assertMatch( - {ok, #{}, Subs = #{}} when map_size(Subs) =:= 0, - erpc:call(Node1, emqx_persistent_session_ds, session_open, [ClientId]) + #{subscriptions := Subs = #{}} when map_size(Subs) =:= 0, + erpc:call(Node1, emqx_persistent_session_ds, session_open, [ClientId, ConnInfo]) ), ok end @@ -370,18 +400,12 @@ do_t_session_discard(Params) -> _Attempts0 = 50, true = map_size(emqx_persistent_session_ds:list_all_streams()) > 0 ), - ?retry( - _Sleep0 = 100, - _Attempts0 = 50, - true = map_size(emqx_persistent_session_ds:list_all_iterators()) > 0 - ), ok = emqtt:stop(Client0), ?tp(notice, "disconnected", #{}), ?tp(notice, "reconnecting", #{}), - %% we still have iterators and streams + %% we still have streams ?assert(map_size(emqx_persistent_session_ds:list_all_streams()) > 0), - ?assert(map_size(emqx_persistent_session_ds:list_all_iterators()) > 0), Client1 = start_client(ReconnectOpts), {ok, _} = emqtt:connect(Client1), ?assertEqual([], emqtt:subscriptions(Client1)), @@ -394,7 +418,7 @@ do_t_session_discard(Params) -> ?assertEqual(#{}, emqx_persistent_session_ds:list_all_subscriptions()), ?assertEqual([], emqx_persistent_session_ds_router:topics()), ?assertEqual(#{}, emqx_persistent_session_ds:list_all_streams()), - ?assertEqual(#{}, emqx_persistent_session_ds:list_all_iterators()), + ?assertEqual(#{}, emqx_persistent_session_ds:list_all_pubranges()), ok = emqtt:stop(Client1), ?tp(notice, "disconnected", #{}), @@ -406,3 +430,201 @@ do_t_session_discard(Params) -> end ), ok. + +t_session_expiration1(Config) -> + ClientId = atom_to_binary(?FUNCTION_NAME), + Opts = #{ + clientid => ClientId, + sequence => [ + {#{clean_start => false, properties => #{'Session-Expiry-Interval' => 30}}, #{}}, + {#{clean_start => false, properties => #{'Session-Expiry-Interval' => 1}}, #{}}, + {#{clean_start => false, properties => #{'Session-Expiry-Interval' => 30}}, #{}} + ] + }, + do_t_session_expiration(Config, Opts). + +t_session_expiration2(Config) -> + ClientId = atom_to_binary(?FUNCTION_NAME), + Opts = #{ + clientid => ClientId, + sequence => [ + {#{clean_start => false, properties => #{'Session-Expiry-Interval' => 30}}, #{}}, + {#{clean_start => false, properties => #{'Session-Expiry-Interval' => 30}}, #{ + 'Session-Expiry-Interval' => 1 + }}, + {#{clean_start => false, properties => #{'Session-Expiry-Interval' => 30}}, #{}} + ] + }, + do_t_session_expiration(Config, Opts). + +do_t_session_expiration(_Config, Opts) -> + #{ + clientid := ClientId, + sequence := [ + {FirstConn, FirstDisconn}, + {SecondConn, SecondDisconn}, + {ThirdConn, ThirdDisconn} + ] + } = Opts, + CommonParams = #{proto_ver => v5, clientid => ClientId}, + ?check_trace( + begin + Topic = <<"some/topic">>, + Params0 = maps:merge(CommonParams, FirstConn), + Client0 = start_client(Params0), + {ok, _} = emqtt:connect(Client0), + {ok, _, [?RC_GRANTED_QOS_2]} = emqtt:subscribe(Client0, Topic, ?QOS_2), + Subs0 = emqx_persistent_session_ds:list_all_subscriptions(), + ?assertEqual(1, map_size(Subs0), #{subs => Subs0}), + Info0 = maps:from_list(emqtt:info(Client0)), + ?assertEqual(0, maps:get(session_present, Info0), #{info => Info0}), + emqtt:disconnect(Client0, ?RC_NORMAL_DISCONNECTION, FirstDisconn), + + Params1 = maps:merge(CommonParams, SecondConn), + Client1 = start_client(Params1), + {ok, _} = emqtt:connect(Client1), + Info1 = maps:from_list(emqtt:info(Client1)), + ?assertEqual(1, maps:get(session_present, Info1), #{info => Info1}), + Subs1 = emqtt:subscriptions(Client1), + ?assertEqual([], Subs1), + emqtt:disconnect(Client1, ?RC_NORMAL_DISCONNECTION, SecondDisconn), + + ct:sleep(1_500), + + Params2 = maps:merge(CommonParams, ThirdConn), + Client2 = start_client(Params2), + {ok, _} = emqtt:connect(Client2), + Info2 = maps:from_list(emqtt:info(Client2)), + ?assertEqual(0, maps:get(session_present, Info2), #{info => Info2}), + Subs2 = emqtt:subscriptions(Client2), + ?assertEqual([], Subs2), + emqtt:publish(Client2, Topic, <<"payload">>), + ?assertNotReceive({publish, #{topic := Topic}}), + %% ensure subscriptions are absent from table. + ?assertEqual(#{}, emqx_persistent_session_ds:list_all_subscriptions()), + emqtt:disconnect(Client2, ?RC_NORMAL_DISCONNECTION, ThirdDisconn), + + ok + end, + [] + ), + ok. + +t_session_gc(Config) -> + GCInterval = ?config(gc_interval, Config), + [Node1, Node2, Node3] = Nodes = ?config(nodes, Config), + CoreNodes = [Node1, Node2], + [ + Port1, + Port2, + Port3 + ] = lists:map(fun(N) -> get_mqtt_port(N, tcp) end, Nodes), + CommonParams = #{ + clean_start => false, + proto_ver => v5 + }, + StartClient = fun(ClientId, Port, ExpiryInterval) -> + Params = maps:merge(CommonParams, #{ + clientid => ClientId, + port => Port, + properties => #{'Session-Expiry-Interval' => ExpiryInterval} + }), + Client = start_client(Params), + {ok, _} = emqtt:connect(Client), + Client + end, + + ?check_trace( + begin + ClientId0 = <<"session_gc0">>, + Client0 = StartClient(ClientId0, Port1, 30), + + ClientId1 = <<"session_gc1">>, + Client1 = StartClient(ClientId1, Port2, 1), + + ClientId2 = <<"session_gc2">>, + Client2 = StartClient(ClientId2, Port3, 1), + + lists:foreach( + fun(Client) -> + Topic = <<"some/topic">>, + Payload = <<"hi">>, + {ok, _, [?RC_GRANTED_QOS_1]} = emqtt:subscribe(Client, Topic, ?QOS_1), + {ok, _} = emqtt:publish(Client, Topic, Payload, ?QOS_1), + ok + end, + [Client0, Client1, Client2] + ), + + %% Clients are still alive; no session is garbage collected. + Res0 = ?block_until( + #{ + ?snk_kind := ds_session_gc, + ?snk_span := {complete, _}, + ?snk_meta := #{node := N} + } when + N =/= node(), + 3 * GCInterval + 1_000 + ), + ?assertMatch({ok, _}, Res0), + {ok, #{?snk_meta := #{time := T0}}} = Res0, + Sessions0 = list_all_sessions(Node1), + Subs0 = list_all_subscriptions(Node1), + ?assertEqual(3, map_size(Sessions0), #{sessions => Sessions0}), + ?assertEqual(3, map_size(Subs0), #{subs => Subs0}), + + %% Now we disconnect 2 of them; only those should be GC'ed. + ?assertMatch( + {ok, {ok, _}}, + ?wait_async_action( + emqtt:stop(Client1), + #{?snk_kind := terminate}, + 1_000 + ) + ), + ct:pal("disconnected client1"), + ?assertMatch( + {ok, {ok, _}}, + ?wait_async_action( + emqtt:stop(Client2), + #{?snk_kind := terminate}, + 1_000 + ) + ), + ct:pal("disconnected client2"), + ?assertMatch( + {ok, _}, + ?block_until( + #{ + ?snk_kind := ds_session_gc_cleaned, + ?snk_meta := #{node := N, time := T}, + session_ids := [ClientId1] + } when + N =/= node() andalso T > T0, + 4 * GCInterval + 1_000 + ) + ), + ?assertMatch( + {ok, _}, + ?block_until( + #{ + ?snk_kind := ds_session_gc_cleaned, + ?snk_meta := #{node := N, time := T}, + session_ids := [ClientId2] + } when + N =/= node() andalso T > T0, + 4 * GCInterval + 1_000 + ) + ), + Sessions1 = list_all_sessions(Node1), + Subs1 = list_all_subscriptions(Node1), + ?assertEqual(1, map_size(Sessions1), #{sessions => Sessions1}), + ?assertEqual(1, map_size(Subs1), #{subs => Subs1}), + + ok + end, + [ + prop_only_cores_run_gc(CoreNodes) + ] + ), + ok. diff --git a/apps/emqx/priv/bpapi.versions b/apps/emqx/priv/bpapi.versions index 7042f5186..e2c726fe5 100644 --- a/apps/emqx/priv/bpapi.versions +++ b/apps/emqx/priv/bpapi.versions @@ -39,10 +39,12 @@ {emqx_mgmt_api_plugins,2}. {emqx_mgmt_cluster,1}. {emqx_mgmt_cluster,2}. +{emqx_mgmt_data_backup,1}. {emqx_mgmt_trace,1}. {emqx_mgmt_trace,2}. {emqx_node_rebalance,1}. {emqx_node_rebalance,2}. +{emqx_node_rebalance,3}. {emqx_node_rebalance_api,1}. {emqx_node_rebalance_api,2}. {emqx_node_rebalance_evacuation,1}. diff --git a/apps/emqx/rebar.config b/apps/emqx/rebar.config index 71f581267..7d160e9cf 100644 --- a/apps/emqx/rebar.config +++ b/apps/emqx/rebar.config @@ -27,9 +27,9 @@ {lc, {git, "https://github.com/emqx/lc.git", {tag, "0.3.2"}}}, {gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}}, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}}, - {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.7"}}}, + {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.8"}}}, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.16"}}}, - {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.2.1"}}}, + {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.2.2"}}}, {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.40.0"}}}, {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.3"}}}, {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}}, @@ -45,7 +45,7 @@ {meck, "0.9.2"}, {proper, "1.4.0"}, {bbmustache, "1.10.0"}, - {emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.9.1"}}} + {emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.9.7"}}} ]}, {extra_src_dirs, [{"test", [recursive]}, {"integration_test", [recursive]}]} @@ -55,7 +55,7 @@ {meck, "0.9.2"}, {proper, "1.4.0"}, {bbmustache, "1.10.0"}, - {emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.9.1"}}} + {emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.9.7"}}} ]}, {extra_src_dirs, [{"test", [recursive]}]} ]} diff --git a/apps/emqx/rebar.config.script b/apps/emqx/rebar.config.script index 174663e80..d68998c4c 100644 --- a/apps/emqx/rebar.config.script +++ b/apps/emqx/rebar.config.script @@ -24,7 +24,7 @@ IsQuicSupp = fun() -> end, Bcrypt = {bcrypt, {git, "https://github.com/emqx/erlang-bcrypt.git", {tag, "0.6.0"}}}, -Quicer = {quicer, {git, "https://github.com/emqx/quic.git", {tag, "0.0.202"}}}. +Quicer = {quicer, {git, "https://github.com/emqx/quic.git", {tag, "0.0.303"}}}. Dialyzer = fun(Config) -> {dialyzer, OldDialyzerConfig} = lists:keyfind(dialyzer, 1, Config), diff --git a/apps/emqx/src/bhvrs/emqx_db_backup.erl b/apps/emqx/src/bhvrs/emqx_db_backup.erl index fddbdb1d0..95a142c0e 100644 --- a/apps/emqx/src/bhvrs/emqx_db_backup.erl +++ b/apps/emqx/src/bhvrs/emqx_db_backup.erl @@ -16,4 +16,21 @@ -module(emqx_db_backup). +-type traverse_break_reason() :: over | migrate. + -callback backup_tables() -> [mria:table()]. + +%% validate the backup +%% return `ok` to traverse the next item +%% return `{ok, over}` to finish the traverse +%% return `{ok, migrate}` to call the migration callback +-callback validate_mnesia_backup(tuple()) -> + ok + | {ok, traverse_break_reason()} + | {error, term()}. + +-callback migrate_mnesia_backup(tuple()) -> {ok, tuple()} | {error, term()}. + +-optional_callbacks([validate_mnesia_backup/1, migrate_mnesia_backup/1]). + +-export_type([traverse_break_reason/0]). diff --git a/apps/emqx/src/config/emqx_config_logger.erl b/apps/emqx/src/config/emqx_config_logger.erl index c675edb52..ce74db8f0 100644 --- a/apps/emqx/src/config/emqx_config_logger.erl +++ b/apps/emqx/src/config/emqx_config_logger.erl @@ -23,8 +23,9 @@ -export([post_config_update/5]). -export([filter_audit/2]). +-include("logger.hrl"). + -define(LOG, [log]). --define(AUDIT_HANDLER, emqx_audit). add_handler() -> ok = emqx_config_handler:add_handler(?LOG, ?MODULE), @@ -95,6 +96,10 @@ update_log_handlers(NewHandlers) -> ok = application:set_env(kernel, logger, NewHandlers), ok. +%% Don't remove audit log handler here, we need record this removed action into audit log file. +%% we will remove audit log handler after audit log is record in emqx_audit:log/3. +update_log_handler({removed, ?AUDIT_HANDLER}) -> + ok; update_log_handler({removed, Id}) -> log_to_console("Config override: ~s is removed~n", [id_for_log(Id)]), logger:remove_handler(Id); diff --git a/apps/emqx/src/emqx.app.src b/apps/emqx/src/emqx.app.src index 0545f36a5..915a66f17 100644 --- a/apps/emqx/src/emqx.app.src +++ b/apps/emqx/src/emqx.app.src @@ -2,7 +2,7 @@ {application, emqx, [ {id, "emqx"}, {description, "EMQX Core"}, - {vsn, "5.1.14"}, + {vsn, "5.1.15"}, {modules, []}, {registered, []}, {applications, [ diff --git a/apps/emqx/src/emqx_broker.erl b/apps/emqx/src/emqx_broker.erl index 403e3757f..cc9cb98a6 100644 --- a/apps/emqx/src/emqx_broker.erl +++ b/apps/emqx/src/emqx_broker.erl @@ -118,18 +118,20 @@ create_tabs() -> %% Subscribe API %%------------------------------------------------------------------------------ --spec subscribe(emqx_types:topic()) -> ok. -subscribe(Topic) when is_binary(Topic) -> +-spec subscribe(emqx_types:topic() | emqx_types:share()) -> ok. +subscribe(Topic) when ?IS_TOPIC(Topic) -> subscribe(Topic, undefined). --spec subscribe(emqx_types:topic(), emqx_types:subid() | emqx_types:subopts()) -> ok. -subscribe(Topic, SubId) when is_binary(Topic), ?IS_SUBID(SubId) -> +-spec subscribe(emqx_types:topic() | emqx_types:share(), emqx_types:subid() | emqx_types:subopts()) -> + ok. +subscribe(Topic, SubId) when ?IS_TOPIC(Topic), ?IS_SUBID(SubId) -> subscribe(Topic, SubId, ?DEFAULT_SUBOPTS); -subscribe(Topic, SubOpts) when is_binary(Topic), is_map(SubOpts) -> +subscribe(Topic, SubOpts) when ?IS_TOPIC(Topic), is_map(SubOpts) -> subscribe(Topic, undefined, SubOpts). --spec subscribe(emqx_types:topic(), emqx_types:subid(), emqx_types:subopts()) -> ok. -subscribe(Topic, SubId, SubOpts0) when is_binary(Topic), ?IS_SUBID(SubId), is_map(SubOpts0) -> +-spec subscribe(emqx_types:topic() | emqx_types:share(), emqx_types:subid(), emqx_types:subopts()) -> + ok. +subscribe(Topic, SubId, SubOpts0) when ?IS_TOPIC(Topic), ?IS_SUBID(SubId), is_map(SubOpts0) -> SubOpts = maps:merge(?DEFAULT_SUBOPTS, SubOpts0), _ = emqx_trace:subscribe(Topic, SubId, SubOpts), SubPid = self(), @@ -151,13 +153,13 @@ with_subid(undefined, SubOpts) -> with_subid(SubId, SubOpts) -> maps:put(subid, SubId, SubOpts). -%% @private do_subscribe(Topic, SubPid, SubOpts) -> true = ets:insert(?SUBSCRIPTION, {SubPid, Topic}), - Group = maps:get(share, SubOpts, undefined), - do_subscribe(Group, Topic, SubPid, SubOpts). + do_subscribe2(Topic, SubPid, SubOpts). -do_subscribe(undefined, Topic, SubPid, SubOpts) -> +do_subscribe2(Topic, SubPid, SubOpts) when is_binary(Topic) -> + %% FIXME: subscribe shard bug + %% https://emqx.atlassian.net/browse/EMQX-10214 case emqx_broker_helper:get_sub_shard(SubPid, Topic) of 0 -> true = ets:insert(?SUBSCRIBER, {Topic, SubPid}), @@ -168,34 +170,40 @@ do_subscribe(undefined, Topic, SubPid, SubOpts) -> true = ets:insert(?SUBOPTION, {{Topic, SubPid}, maps:put(shard, I, SubOpts)}), call(pick({Topic, I}), {subscribe, Topic, I}) end; -%% Shared subscription -do_subscribe(Group, Topic, SubPid, SubOpts) -> +do_subscribe2(Topic = #share{group = Group, topic = RealTopic}, SubPid, SubOpts) when + is_binary(RealTopic) +-> true = ets:insert(?SUBOPTION, {{Topic, SubPid}, SubOpts}), - emqx_shared_sub:subscribe(Group, Topic, SubPid). + emqx_shared_sub:subscribe(Group, RealTopic, SubPid). %%-------------------------------------------------------------------- %% Unsubscribe API %%-------------------------------------------------------------------- --spec unsubscribe(emqx_types:topic()) -> ok. -unsubscribe(Topic) when is_binary(Topic) -> +-spec unsubscribe(emqx_types:topic() | emqx_types:share()) -> ok. +unsubscribe(Topic) when ?IS_TOPIC(Topic) -> SubPid = self(), case ets:lookup(?SUBOPTION, {Topic, SubPid}) of [{_, SubOpts}] -> - _ = emqx_broker_helper:reclaim_seq(Topic), _ = emqx_trace:unsubscribe(Topic, SubOpts), do_unsubscribe(Topic, SubPid, SubOpts); [] -> ok end. +-spec do_unsubscribe(emqx_types:topic() | emqx_types:share(), pid(), emqx_types:subopts()) -> + ok. do_unsubscribe(Topic, SubPid, SubOpts) -> true = ets:delete(?SUBOPTION, {Topic, SubPid}), true = ets:delete_object(?SUBSCRIPTION, {SubPid, Topic}), - Group = maps:get(share, SubOpts, undefined), - do_unsubscribe(Group, Topic, SubPid, SubOpts). + do_unsubscribe2(Topic, SubPid, SubOpts). -do_unsubscribe(undefined, Topic, SubPid, SubOpts) -> +-spec do_unsubscribe2(emqx_types:topic() | emqx_types:share(), pid(), emqx_types:subopts()) -> + ok. +do_unsubscribe2(Topic, SubPid, SubOpts) when + is_binary(Topic), is_pid(SubPid), is_map(SubOpts) +-> + _ = emqx_broker_helper:reclaim_seq(Topic), case maps:get(shard, SubOpts, 0) of 0 -> true = ets:delete_object(?SUBSCRIBER, {Topic, SubPid}), @@ -205,7 +213,9 @@ do_unsubscribe(undefined, Topic, SubPid, SubOpts) -> true = ets:delete_object(?SUBSCRIBER, {{shard, Topic, I}, SubPid}), cast(pick({Topic, I}), {unsubscribed, Topic, I}) end; -do_unsubscribe(Group, Topic, SubPid, _SubOpts) -> +do_unsubscribe2(#share{group = Group, topic = Topic}, SubPid, _SubOpts) when + is_binary(Group), is_binary(Topic), is_pid(SubPid) +-> emqx_shared_sub:unsubscribe(Group, Topic, SubPid). %%-------------------------------------------------------------------- @@ -306,7 +316,9 @@ aggre([], true, Acc) -> lists:usort(Acc). %% @doc Forward message to another node. --spec forward(node(), emqx_types:topic(), emqx_types:delivery(), RpcMode :: sync | async) -> +-spec forward( + node(), emqx_types:topic() | emqx_types:share(), emqx_types:delivery(), RpcMode :: sync | async +) -> emqx_types:deliver_result(). forward(Node, To, Delivery, async) -> true = emqx_broker_proto_v1:forward_async(Node, To, Delivery), @@ -329,7 +341,8 @@ forward(Node, To, Delivery, sync) -> Result end. --spec dispatch(emqx_types:topic(), emqx_types:delivery()) -> emqx_types:deliver_result(). +-spec dispatch(emqx_types:topic() | emqx_types:share(), emqx_types:delivery()) -> + emqx_types:deliver_result(). dispatch(Topic, Delivery = #delivery{}) when is_binary(Topic) -> case emqx:is_running() of true -> @@ -353,7 +366,11 @@ inc_dropped_cnt(Msg) -> end. -compile({inline, [subscribers/1]}). --spec subscribers(emqx_types:topic() | {shard, emqx_types:topic(), non_neg_integer()}) -> +-spec subscribers( + emqx_types:topic() + | emqx_types:share() + | {shard, emqx_types:topic() | emqx_types:share(), non_neg_integer()} +) -> [pid()]. subscribers(Topic) when is_binary(Topic) -> lookup_value(?SUBSCRIBER, Topic, []); @@ -372,7 +389,7 @@ subscriber_down(SubPid) -> SubOpts when is_map(SubOpts) -> _ = emqx_broker_helper:reclaim_seq(Topic), true = ets:delete(?SUBOPTION, {Topic, SubPid}), - do_unsubscribe(undefined, Topic, SubPid, SubOpts); + do_unsubscribe2(Topic, SubPid, SubOpts); undefined -> ok end @@ -386,7 +403,7 @@ subscriber_down(SubPid) -> %%-------------------------------------------------------------------- -spec subscriptions(pid() | emqx_types:subid()) -> - [{emqx_types:topic(), emqx_types:subopts()}]. + [{emqx_types:topic() | emqx_types:share(), emqx_types:subopts()}]. subscriptions(SubPid) when is_pid(SubPid) -> [ {Topic, lookup_value(?SUBOPTION, {Topic, SubPid}, #{})} @@ -400,20 +417,22 @@ subscriptions(SubId) -> [] end. --spec subscriptions_via_topic(emqx_types:topic()) -> [emqx_types:subopts()]. +-spec subscriptions_via_topic(emqx_types:topic() | emqx_types:share()) -> [emqx_types:subopts()]. subscriptions_via_topic(Topic) -> MatchSpec = [{{{Topic, '_'}, '_'}, [], ['$_']}], ets:select(?SUBOPTION, MatchSpec). --spec subscribed(pid() | emqx_types:subid(), emqx_types:topic()) -> boolean(). +-spec subscribed( + pid() | emqx_types:subid(), emqx_types:topic() | emqx_types:share() +) -> boolean(). subscribed(SubPid, Topic) when is_pid(SubPid) -> ets:member(?SUBOPTION, {Topic, SubPid}); subscribed(SubId, Topic) when ?IS_SUBID(SubId) -> SubPid = emqx_broker_helper:lookup_subpid(SubId), ets:member(?SUBOPTION, {Topic, SubPid}). --spec get_subopts(pid(), emqx_types:topic()) -> maybe(emqx_types:subopts()). -get_subopts(SubPid, Topic) when is_pid(SubPid), is_binary(Topic) -> +-spec get_subopts(pid(), emqx_types:topic() | emqx_types:share()) -> maybe(emqx_types:subopts()). +get_subopts(SubPid, Topic) when is_pid(SubPid), ?IS_TOPIC(Topic) -> lookup_value(?SUBOPTION, {Topic, SubPid}); get_subopts(SubId, Topic) when ?IS_SUBID(SubId) -> case emqx_broker_helper:lookup_subpid(SubId) of @@ -423,7 +442,7 @@ get_subopts(SubId, Topic) when ?IS_SUBID(SubId) -> undefined end. --spec set_subopts(emqx_types:topic(), emqx_types:subopts()) -> boolean(). +-spec set_subopts(emqx_types:topic() | emqx_types:share(), emqx_types:subopts()) -> boolean(). set_subopts(Topic, NewOpts) when is_binary(Topic), is_map(NewOpts) -> set_subopts(self(), Topic, NewOpts). @@ -437,7 +456,7 @@ set_subopts(SubPid, Topic, NewOpts) -> false end. --spec topics() -> [emqx_types:topic()]. +-spec topics() -> [emqx_types:topic() | emqx_types:share()]. topics() -> emqx_router:topics(). @@ -542,7 +561,8 @@ code_change(_OldVsn, State, _Extra) -> %% Internal functions %%-------------------------------------------------------------------- --spec do_dispatch(emqx_types:topic(), emqx_types:delivery()) -> emqx_types:deliver_result(). +-spec do_dispatch(emqx_types:topic() | emqx_types:share(), emqx_types:delivery()) -> + emqx_types:deliver_result(). do_dispatch(Topic, #delivery{message = Msg}) -> DispN = lists:foldl( fun(Sub, N) -> @@ -560,6 +580,8 @@ do_dispatch(Topic, #delivery{message = Msg}) -> {ok, DispN} end. +%% Donot dispatch to share subscriber here. +%% we do it in `emqx_shared_sub.erl` with configured strategy do_dispatch(SubPid, Topic, Msg) when is_pid(SubPid) -> case erlang:is_process_alive(SubPid) of true -> diff --git a/apps/emqx/src/emqx_channel.erl b/apps/emqx/src/emqx_channel.erl index c2f62c840..816ab7b2b 100644 --- a/apps/emqx/src/emqx_channel.erl +++ b/apps/emqx/src/emqx_channel.erl @@ -423,6 +423,7 @@ handle_in( {ok, Channel} end; handle_in( + %% TODO: Why discard the Reason Code? ?PUBREC_PACKET(PacketId, _ReasonCode, Properties), Channel = #channel{clientinfo = ClientInfo, session = Session} @@ -476,60 +477,27 @@ handle_in( ok = emqx_metrics:inc('packets.pubcomp.missed'), {ok, Channel} end; -handle_in( - SubPkt = ?SUBSCRIBE_PACKET(PacketId, Properties, TopicFilters), - Channel = #channel{clientinfo = ClientInfo} -) -> - case emqx_packet:check(SubPkt) of - ok -> - TopicFilters0 = parse_topic_filters(TopicFilters), - TopicFilters1 = enrich_subopts_subid(Properties, TopicFilters0), - TupleTopicFilters0 = check_sub_authzs(TopicFilters1, Channel), - HasAuthzDeny = lists:any( - fun({_TopicFilter, ReasonCode}) -> - ReasonCode =:= ?RC_NOT_AUTHORIZED - end, - TupleTopicFilters0 - ), - DenyAction = emqx:get_config([authorization, deny_action], ignore), - case DenyAction =:= disconnect andalso HasAuthzDeny of - true -> - handle_out(disconnect, ?RC_NOT_AUTHORIZED, Channel); - false -> - TopicFilters2 = [ - TopicFilter - || {TopicFilter, ?RC_SUCCESS} <- TupleTopicFilters0 - ], - TopicFilters3 = run_hooks( - 'client.subscribe', - [ClientInfo, Properties], - TopicFilters2 - ), - {TupleTopicFilters1, NChannel} = process_subscribe( - TopicFilters3, - Properties, - Channel - ), - TupleTopicFilters2 = - lists:foldl( - fun - ({{Topic, Opts = #{deny_subscription := true}}, _QoS}, Acc) -> - Key = {Topic, maps:without([deny_subscription], Opts)}, - lists:keyreplace(Key, 1, Acc, {Key, ?RC_UNSPECIFIED_ERROR}); - (Tuple = {Key, _Value}, Acc) -> - lists:keyreplace(Key, 1, Acc, Tuple) - end, - TupleTopicFilters0, - TupleTopicFilters1 - ), - ReasonCodes2 = [ - ReasonCode - || {_TopicFilter, ReasonCode} <- TupleTopicFilters2 - ], - handle_out(suback, {PacketId, ReasonCodes2}, NChannel) - end; - {error, ReasonCode} -> - handle_out(disconnect, ReasonCode, Channel) +handle_in(SubPkt = ?SUBSCRIBE_PACKET(PacketId, _Properties, _TopicFilters0), Channel0) -> + Pipe = pipeline( + [ + fun check_subscribe/2, + fun enrich_subscribe/2, + %% TODO && FIXME (EMQX-10786): mount topic before authz check. + fun check_sub_authzs/2, + fun check_sub_caps/2 + ], + SubPkt, + Channel0 + ), + case Pipe of + {ok, NPkt = ?SUBSCRIBE_PACKET(_PacketId, TFChecked), Channel} -> + {TFSubedWithNRC, NChannel} = process_subscribe(run_sub_hooks(NPkt, Channel), Channel), + ReasonCodes = gen_reason_codes(TFChecked, TFSubedWithNRC), + handle_out(suback, {PacketId, ReasonCodes}, NChannel); + {error, {disconnect, RC}, Channel} -> + %% funcs in pipeline always cause action: `disconnect` + %% And Only one ReasonCode in DISCONNECT packet + handle_out(disconnect, RC, Channel) end; handle_in( Packet = ?UNSUBSCRIBE_PACKET(PacketId, Properties, TopicFilters), @@ -540,7 +508,7 @@ handle_in( TopicFilters1 = run_hooks( 'client.unsubscribe', [ClientInfo, Properties], - parse_topic_filters(TopicFilters) + parse_raw_topic_filters(TopicFilters) ), {ReasonCodes, NChannel} = process_unsubscribe(TopicFilters1, Properties, Channel), handle_out(unsuback, {PacketId, ReasonCodes}, NChannel); @@ -782,32 +750,14 @@ after_message_acked(ClientInfo, Msg, PubAckProps) -> %% Process Subscribe %%-------------------------------------------------------------------- --compile({inline, [process_subscribe/3]}). -process_subscribe(TopicFilters, SubProps, Channel) -> - process_subscribe(TopicFilters, SubProps, Channel, []). +process_subscribe(TopicFilters, Channel) -> + process_subscribe(TopicFilters, Channel, []). -process_subscribe([], _SubProps, Channel, Acc) -> +process_subscribe([], Channel, Acc) -> {lists:reverse(Acc), Channel}; -process_subscribe([Topic = {TopicFilter, SubOpts} | More], SubProps, Channel, Acc) -> - case check_sub_caps(TopicFilter, SubOpts, Channel) of - ok -> - {ReasonCode, NChannel} = do_subscribe( - TopicFilter, - SubOpts#{sub_props => SubProps}, - Channel - ), - process_subscribe(More, SubProps, NChannel, [{Topic, ReasonCode} | Acc]); - {error, ReasonCode} -> - ?SLOG( - warning, - #{ - msg => "cannot_subscribe_topic_filter", - reason => emqx_reason_codes:name(ReasonCode) - }, - #{topic => TopicFilter} - ), - process_subscribe(More, SubProps, Channel, [{Topic, ReasonCode} | Acc]) - end. +process_subscribe([Filter = {TopicFilter, SubOpts} | More], Channel, Acc) -> + {NReasonCode, NChannel} = do_subscribe(TopicFilter, SubOpts, Channel), + process_subscribe(More, NChannel, [{Filter, NReasonCode} | Acc]). do_subscribe( TopicFilter, @@ -818,11 +768,13 @@ do_subscribe( session = Session } ) -> + %% TODO && FIXME (EMQX-10786): mount topic before authz check. NTopicFilter = emqx_mountpoint:mount(MountPoint, TopicFilter), - NSubOpts = enrich_subopts(maps:merge(?DEFAULT_SUBOPTS, SubOpts), Channel), - case emqx_session:subscribe(ClientInfo, NTopicFilter, NSubOpts, Session) of + case emqx_session:subscribe(ClientInfo, NTopicFilter, SubOpts, Session) of {ok, NSession} -> - {QoS, Channel#channel{session = NSession}}; + %% TODO && FIXME (EMQX-11216): QoS as ReasonCode(max granted QoS) for now + RC = QoS, + {RC, Channel#channel{session = NSession}}; {error, RC} -> ?SLOG( warning, @@ -835,6 +787,30 @@ do_subscribe( {RC, Channel} end. +gen_reason_codes(TFChecked, TFSubedWitNhRC) -> + do_gen_reason_codes([], TFChecked, TFSubedWitNhRC). + +%% Initial RC is `RC_SUCCESS | RC_NOT_AUTHORIZED`, generated by check_sub_authzs/2 +%% And then TF with `RC_SUCCESS` will passing through `process_subscribe/2` and +%% NRC should override the initial RC. +do_gen_reason_codes(Acc, [], []) -> + lists:reverse(Acc); +do_gen_reason_codes( + Acc, + [{_, ?RC_SUCCESS} | RestTF], + [{_, NRC} | RestWithNRC] +) -> + %% will passing through `process_subscribe/2` + %% use NRC to override IintialRC + do_gen_reason_codes([NRC | Acc], RestTF, RestWithNRC); +do_gen_reason_codes( + Acc, + [{_, InitialRC} | Rest], + RestWithNRC +) -> + %% InitialRC is not `RC_SUCCESS`, use it. + do_gen_reason_codes([InitialRC | Acc], Rest, RestWithNRC). + %%-------------------------------------------------------------------- %% Process Unsubscribe %%-------------------------------------------------------------------- @@ -1213,13 +1189,8 @@ handle_call(Req, Channel) -> ok | {ok, channel()} | {shutdown, Reason :: term(), channel()}. handle_info({subscribe, TopicFilters}, Channel) -> - {_, NChannel} = lists:foldl( - fun({TopicFilter, SubOpts}, {_, ChannelAcc}) -> - do_subscribe(TopicFilter, SubOpts, ChannelAcc) - end, - {[], Channel}, - parse_topic_filters(TopicFilters) - ), + NTopicFilters = enrich_subscribe(TopicFilters, Channel), + {_TopicFiltersWithRC, NChannel} = process_subscribe(NTopicFilters, Channel), {ok, NChannel}; handle_info({unsubscribe, TopicFilters}, Channel) -> {_RC, NChannel} = process_unsubscribe(TopicFilters, #{}, Channel), @@ -1234,12 +1205,13 @@ handle_info( #channel{ conn_state = ConnState, clientinfo = ClientInfo, + conninfo = ConnInfo, session = Session } ) when ConnState =:= connected orelse ConnState =:= reauthenticating -> - {Intent, Session1} = emqx_session:disconnect(ClientInfo, Session), + {Intent, Session1} = emqx_session:disconnect(ClientInfo, ConnInfo, Session), Channel1 = ensure_disconnected(Reason, maybe_publish_will_msg(Channel)), Channel2 = Channel1#channel{session = Session1}, case maybe_shutdown(Reason, Intent, Channel2) of @@ -1351,7 +1323,8 @@ handle_timeout( {ok, Replies, NSession} -> handle_out(publish, Replies, Channel#channel{session = NSession}) end; -handle_timeout(_TRef, expire_session, Channel) -> +handle_timeout(_TRef, expire_session, Channel = #channel{session = Session}) -> + ok = emqx_session:destroy(Session), shutdown(expired, Channel); handle_timeout( _TRef, @@ -1859,49 +1832,156 @@ check_pub_caps( ) -> emqx_mqtt_caps:check_pub(Zone, #{qos => QoS, retain => Retain, topic => Topic}). +%%-------------------------------------------------------------------- +%% Check Subscribe Packet + +check_subscribe(SubPkt, _Channel) -> + case emqx_packet:check(SubPkt) of + ok -> ok; + {error, RC} -> {error, {disconnect, RC}} + end. + %%-------------------------------------------------------------------- %% Check Sub Authorization -check_sub_authzs(TopicFilters, Channel) -> - check_sub_authzs(TopicFilters, Channel, []). - check_sub_authzs( - [TopicFilter = {Topic, _} | More], - Channel = #channel{clientinfo = ClientInfo}, - Acc + ?SUBSCRIBE_PACKET(PacketId, SubProps, TopicFilters0), + Channel = #channel{clientinfo = ClientInfo} ) -> + CheckResult = do_check_sub_authzs(TopicFilters0, ClientInfo), + HasAuthzDeny = lists:any( + fun({{_TopicFilter, _SubOpts}, ReasonCode}) -> + ReasonCode =:= ?RC_NOT_AUTHORIZED + end, + CheckResult + ), + DenyAction = emqx:get_config([authorization, deny_action], ignore), + case DenyAction =:= disconnect andalso HasAuthzDeny of + true -> + {error, {disconnect, ?RC_NOT_AUTHORIZED}, Channel}; + false -> + {ok, ?SUBSCRIBE_PACKET(PacketId, SubProps, CheckResult), Channel} + end. + +do_check_sub_authzs(TopicFilters, ClientInfo) -> + do_check_sub_authzs(ClientInfo, TopicFilters, []). + +do_check_sub_authzs(_ClientInfo, [], Acc) -> + lists:reverse(Acc); +do_check_sub_authzs(ClientInfo, [TopicFilter = {Topic, _SubOpts} | More], Acc) -> + %% subsclibe authz check only cares the real topic filter when shared-sub + %% e.g. only check <<"t/#">> for <<"$share/g/t/#">> Action = authz_action(TopicFilter), - case emqx_access_control:authorize(ClientInfo, Action, Topic) of + case + emqx_access_control:authorize( + ClientInfo, + Action, + emqx_topic:get_shared_real_topic(Topic) + ) + of + %% TODO: support maximum QoS granted + %% MQTT-3.1.1 [MQTT-3.8.4-6] and MQTT-5.0 [MQTT-3.8.4-7] + %% Not implemented yet: + %% {allow, RC} -> do_check_sub_authzs(ClientInfo, More, [{TopicFilter, RC} | Acc]); allow -> - check_sub_authzs(More, Channel, [{TopicFilter, ?RC_SUCCESS} | Acc]); + do_check_sub_authzs(ClientInfo, More, [{TopicFilter, ?RC_SUCCESS} | Acc]); deny -> - check_sub_authzs(More, Channel, [{TopicFilter, ?RC_NOT_AUTHORIZED} | Acc]) - end; -check_sub_authzs([], _Channel, Acc) -> - lists:reverse(Acc). + do_check_sub_authzs(ClientInfo, More, [{TopicFilter, ?RC_NOT_AUTHORIZED} | Acc]) + end. %%-------------------------------------------------------------------- %% Check Sub Caps -check_sub_caps(TopicFilter, SubOpts, #channel{clientinfo = ClientInfo}) -> - emqx_mqtt_caps:check_sub(ClientInfo, TopicFilter, SubOpts). +check_sub_caps( + ?SUBSCRIBE_PACKET(PacketId, SubProps, TopicFilters), + Channel = #channel{clientinfo = ClientInfo} +) -> + CheckResult = do_check_sub_caps(ClientInfo, TopicFilters), + {ok, ?SUBSCRIBE_PACKET(PacketId, SubProps, CheckResult), Channel}. + +do_check_sub_caps(ClientInfo, TopicFilters) -> + do_check_sub_caps(ClientInfo, TopicFilters, []). + +do_check_sub_caps(_ClientInfo, [], Acc) -> + lists:reverse(Acc); +do_check_sub_caps(ClientInfo, [TopicFilter = {{Topic, SubOpts}, ?RC_SUCCESS} | More], Acc) -> + case emqx_mqtt_caps:check_sub(ClientInfo, Topic, SubOpts) of + ok -> + do_check_sub_caps(ClientInfo, More, [TopicFilter | Acc]); + {error, NRC} -> + ?SLOG( + warning, + #{ + msg => "cannot_subscribe_topic_filter", + reason => emqx_reason_codes:name(NRC) + }, + #{topic => Topic} + ), + do_check_sub_caps(ClientInfo, More, [{{Topic, SubOpts}, NRC} | Acc]) + end; +do_check_sub_caps(ClientInfo, [TopicFilter = {{_Topic, _SubOpts}, _OtherRC} | More], Acc) -> + do_check_sub_caps(ClientInfo, More, [TopicFilter | Acc]). %%-------------------------------------------------------------------- -%% Enrich SubId +%% Run Subscribe Hooks -enrich_subopts_subid(#{'Subscription-Identifier' := SubId}, TopicFilters) -> - [{Topic, SubOpts#{subid => SubId}} || {Topic, SubOpts} <- TopicFilters]; -enrich_subopts_subid(_Properties, TopicFilters) -> - TopicFilters. +run_sub_hooks( + ?SUBSCRIBE_PACKET(_PacketId, Properties, TopicFilters0), + _Channel = #channel{clientinfo = ClientInfo} +) -> + TopicFilters = [ + TopicFilter + || {TopicFilter, ?RC_SUCCESS} <- TopicFilters0 + ], + _NTopicFilters = run_hooks('client.subscribe', [ClientInfo, Properties], TopicFilters). %%-------------------------------------------------------------------- %% Enrich SubOpts -enrich_subopts(SubOpts, _Channel = ?IS_MQTT_V5) -> - SubOpts; -enrich_subopts(SubOpts, #channel{clientinfo = #{zone := Zone, is_bridge := IsBridge}}) -> +%% for api subscribe without sub-authz check and sub-caps check. +enrich_subscribe(TopicFilters, Channel) when is_list(TopicFilters) -> + do_enrich_subscribe(#{}, TopicFilters, Channel); +%% for mqtt clients sent subscribe packet. +enrich_subscribe(?SUBSCRIBE_PACKET(PacketId, Properties, TopicFilters), Channel) -> + NTopicFilters = do_enrich_subscribe(Properties, TopicFilters, Channel), + {ok, ?SUBSCRIBE_PACKET(PacketId, Properties, NTopicFilters), Channel}. + +do_enrich_subscribe(Properties, TopicFilters, Channel) -> + _NTopicFilters = run_fold( + [ + %% TODO: do try catch with reason code here + fun(TFs, _) -> parse_raw_topic_filters(TFs) end, + fun enrich_subopts_subid/2, + fun enrich_subopts_porps/2, + fun enrich_subopts_flags/2 + ], + TopicFilters, + #{sub_props => Properties, channel => Channel} + ). + +enrich_subopts_subid(TopicFilters, #{sub_props := #{'Subscription-Identifier' := SubId}}) -> + [{Topic, SubOpts#{subid => SubId}} || {Topic, SubOpts} <- TopicFilters]; +enrich_subopts_subid(TopicFilters, _State) -> + TopicFilters. + +enrich_subopts_porps(TopicFilters, #{sub_props := SubProps}) -> + [{Topic, SubOpts#{sub_props => SubProps}} || {Topic, SubOpts} <- TopicFilters]. + +enrich_subopts_flags(TopicFilters, #{channel := Channel}) -> + do_enrich_subopts_flags(TopicFilters, Channel). + +do_enrich_subopts_flags(TopicFilters, ?IS_MQTT_V5) -> + [{Topic, merge_default_subopts(SubOpts)} || {Topic, SubOpts} <- TopicFilters]; +do_enrich_subopts_flags(TopicFilters, #channel{clientinfo = #{zone := Zone, is_bridge := IsBridge}}) -> + Rap = flag(IsBridge), NL = flag(get_mqtt_conf(Zone, ignore_loop_deliver)), - SubOpts#{rap => flag(IsBridge), nl => NL}. + [ + {Topic, (merge_default_subopts(SubOpts))#{rap => Rap, nl => NL}} + || {Topic, SubOpts} <- TopicFilters + ]. + +merge_default_subopts(SubOpts) -> + maps:merge(?DEFAULT_SUBOPTS, SubOpts). %%-------------------------------------------------------------------- %% Enrich ConnAck Caps @@ -2091,8 +2171,8 @@ maybe_shutdown(Reason, _Intent = shutdown, Channel) -> %%-------------------------------------------------------------------- %% Parse Topic Filters --compile({inline, [parse_topic_filters/1]}). -parse_topic_filters(TopicFilters) -> +%% [{<<"$share/group/topic">>, _SubOpts = #{}} | _] +parse_raw_topic_filters(TopicFilters) -> lists:map(fun emqx_topic:parse/1, TopicFilters). %%-------------------------------------------------------------------- diff --git a/apps/emqx/src/emqx_cm.erl b/apps/emqx/src/emqx_cm.erl index 537c60876..92b95c7c3 100644 --- a/apps/emqx/src/emqx_cm.erl +++ b/apps/emqx/src/emqx_cm.erl @@ -91,6 +91,7 @@ clean_down/1, mark_channel_connected/1, mark_channel_disconnected/1, + is_channel_connected/1, get_connected_client_count/0 ]). diff --git a/apps/emqx/src/emqx_cm_sup.erl b/apps/emqx/src/emqx_cm_sup.erl index 9db73e8e4..e7420b4da 100644 --- a/apps/emqx/src/emqx_cm_sup.erl +++ b/apps/emqx/src/emqx_cm_sup.erl @@ -47,7 +47,17 @@ init([]) -> Locker = child_spec(emqx_cm_locker, 5000, worker), Registry = child_spec(emqx_cm_registry, 5000, worker), Manager = child_spec(emqx_cm, 5000, worker), - {ok, {SupFlags, [Banned, Flapping, Locker, Registry, Manager]}}. + DSSessionGCSup = child_spec(emqx_persistent_session_ds_sup, infinity, supervisor), + Children = + [ + Banned, + Flapping, + Locker, + Registry, + Manager, + DSSessionGCSup + ], + {ok, {SupFlags, Children}}. %%-------------------------------------------------------------------- %% Internal functions diff --git a/apps/emqx/src/emqx_message.erl b/apps/emqx/src/emqx_message.erl index 4ff36504d..b65c8360f 100644 --- a/apps/emqx/src/emqx_message.erl +++ b/apps/emqx/src/emqx_message.erl @@ -301,7 +301,9 @@ update_expiry(Msg) -> Msg. %% @doc Message to PUBLISH Packet. --spec to_packet(emqx_types:packet_id(), emqx_types:message()) -> +%% +%% When QoS=0 then packet id must be `undefined' +-spec to_packet(emqx_types:packet_id() | undefined, emqx_types:message()) -> emqx_types:packet(). to_packet( PacketId, diff --git a/apps/emqx/src/emqx_mountpoint.erl b/apps/emqx/src/emqx_mountpoint.erl index 5b5dac954..c19736690 100644 --- a/apps/emqx/src/emqx_mountpoint.erl +++ b/apps/emqx/src/emqx_mountpoint.erl @@ -17,6 +17,7 @@ -module(emqx_mountpoint). -include("emqx.hrl"). +-include("emqx_mqtt.hrl"). -include("emqx_placeholder.hrl"). -include("types.hrl"). @@ -34,38 +35,54 @@ -spec mount(maybe(mountpoint()), Any) -> Any when Any :: emqx_types:topic() + | emqx_types:share() | emqx_types:message() | emqx_types:topic_filters(). mount(undefined, Any) -> Any; -mount(MountPoint, Topic) when is_binary(Topic) -> - prefix(MountPoint, Topic); -mount(MountPoint, Msg = #message{topic = Topic}) -> - Msg#message{topic = prefix(MountPoint, Topic)}; +mount(MountPoint, Topic) when ?IS_TOPIC(Topic) -> + prefix_maybe_share(MountPoint, Topic); +mount(MountPoint, Msg = #message{topic = Topic}) when is_binary(Topic) -> + Msg#message{topic = prefix_maybe_share(MountPoint, Topic)}; mount(MountPoint, TopicFilters) when is_list(TopicFilters) -> - [{prefix(MountPoint, Topic), SubOpts} || {Topic, SubOpts} <- TopicFilters]. + [{prefix_maybe_share(MountPoint, Topic), SubOpts} || {Topic, SubOpts} <- TopicFilters]. -%% @private --compile({inline, [prefix/2]}). -prefix(MountPoint, Topic) -> - <>. +-spec prefix_maybe_share(maybe(mountpoint()), Any) -> Any when + Any :: + emqx_types:topic() + | emqx_types:share(). +prefix_maybe_share(MountPoint, Topic) when + is_binary(MountPoint) andalso is_binary(Topic) +-> + <>; +prefix_maybe_share(MountPoint, #share{group = Group, topic = Topic}) when + is_binary(MountPoint) andalso is_binary(Topic) +-> + #share{group = Group, topic = prefix_maybe_share(MountPoint, Topic)}. -spec unmount(maybe(mountpoint()), Any) -> Any when Any :: emqx_types:topic() + | emqx_types:share() | emqx_types:message(). unmount(undefined, Any) -> Any; -unmount(MountPoint, Topic) when is_binary(Topic) -> +unmount(MountPoint, Topic) when ?IS_TOPIC(Topic) -> + unmount_maybe_share(MountPoint, Topic); +unmount(MountPoint, Msg = #message{topic = Topic}) when is_binary(Topic) -> + Msg#message{topic = unmount_maybe_share(MountPoint, Topic)}. + +unmount_maybe_share(MountPoint, Topic) when + is_binary(MountPoint) andalso is_binary(Topic) +-> case string:prefix(Topic, MountPoint) of nomatch -> Topic; Topic1 -> Topic1 end; -unmount(MountPoint, Msg = #message{topic = Topic}) -> - case string:prefix(Topic, MountPoint) of - nomatch -> Msg; - Topic1 -> Msg#message{topic = Topic1} - end. +unmount_maybe_share(MountPoint, TopicFilter = #share{topic = Topic}) when + is_binary(MountPoint) andalso is_binary(Topic) +-> + TopicFilter#share{topic = unmount_maybe_share(MountPoint, Topic)}. -spec replvar(maybe(mountpoint()), map()) -> maybe(mountpoint()). replvar(undefined, _Vars) -> diff --git a/apps/emqx/src/emqx_mqtt_caps.erl b/apps/emqx/src/emqx_mqtt_caps.erl index 11f495dbd..5cf10691d 100644 --- a/apps/emqx/src/emqx_mqtt_caps.erl +++ b/apps/emqx/src/emqx_mqtt_caps.erl @@ -102,16 +102,19 @@ do_check_pub(_Flags, _Caps) -> -spec check_sub( emqx_types:clientinfo(), - emqx_types:topic(), + emqx_types:topic() | emqx_types:share(), emqx_types:subopts() ) -> ok_or_error(emqx_types:reason_code()). check_sub(ClientInfo = #{zone := Zone}, Topic, SubOpts) -> Caps = emqx_config:get_zone_conf(Zone, [mqtt]), Flags = #{ + %% TODO: qos check + %% (max_qos_allowed, Map) -> + %% max_qos_allowed => maps:get(max_qos_allowed, Caps, 2), topic_levels => emqx_topic:levels(Topic), is_wildcard => emqx_topic:wildcard(Topic), - is_shared => maps:is_key(share, SubOpts), + is_shared => erlang:is_record(Topic, share), is_exclusive => maps:get(is_exclusive, SubOpts, false) }, do_check_sub(Flags, Caps, ClientInfo, Topic). @@ -126,13 +129,19 @@ do_check_sub(#{is_shared := true}, #{shared_subscription := false}, _, _) -> {error, ?RC_SHARED_SUBSCRIPTIONS_NOT_SUPPORTED}; do_check_sub(#{is_exclusive := true}, #{exclusive_subscription := false}, _, _) -> {error, ?RC_TOPIC_FILTER_INVALID}; -do_check_sub(#{is_exclusive := true}, #{exclusive_subscription := true}, ClientInfo, Topic) -> +do_check_sub(#{is_exclusive := true}, #{exclusive_subscription := true}, ClientInfo, Topic) when + is_binary(Topic) +-> case emqx_exclusive_subscription:check_subscribe(ClientInfo, Topic) of deny -> {error, ?RC_QUOTA_EXCEEDED}; _ -> ok end; +%% for max_qos_allowed +%% see: RC_GRANTED_QOS_0, RC_GRANTED_QOS_1, RC_GRANTED_QOS_2 +%% do_check_sub(_, _) -> +%% {ok, RC}; do_check_sub(_Flags, _Caps, _, _) -> ok. diff --git a/apps/emqx/src/emqx_persistent_message.erl b/apps/emqx/src/emqx_persistent_message.erl index 632ff2a27..295ddd3dc 100644 --- a/apps/emqx/src/emqx_persistent_message.erl +++ b/apps/emqx/src/emqx_persistent_message.erl @@ -19,7 +19,7 @@ -include("emqx.hrl"). -export([init/0]). --export([is_store_enabled/0]). +-export([is_persistence_enabled/0, force_ds/0]). %% Message persistence -export([ @@ -28,9 +28,8 @@ -define(PERSISTENT_MESSAGE_DB, emqx_persistent_message). -%% FIXME -define(WHEN_ENABLED(DO), - case is_store_enabled() of + case is_persistence_enabled() of true -> DO; false -> {skipped, disabled} end @@ -40,18 +39,40 @@ init() -> ?WHEN_ENABLED(begin - ok = emqx_ds:open_db(?PERSISTENT_MESSAGE_DB, #{ - backend => builtin, - storage => {emqx_ds_storage_bitfield_lts, #{}} - }), + Backend = storage_backend(), + ok = emqx_ds:open_db(?PERSISTENT_MESSAGE_DB, Backend), ok = emqx_persistent_session_ds_router:init_tables(), ok = emqx_persistent_session_ds:create_tables(), ok end). --spec is_store_enabled() -> boolean(). -is_store_enabled() -> - emqx_config:get([persistent_session_store, ds]). +-spec is_persistence_enabled() -> boolean(). +is_persistence_enabled() -> + emqx_config:get([session_persistence, enable]). + +-spec storage_backend() -> emqx_ds:create_db_opts(). +storage_backend() -> + storage_backend(emqx_config:get([session_persistence, storage])). + +%% Dev-only option: force all messages to go through +%% `emqx_persistent_session_ds': +-spec force_ds() -> boolean(). +force_ds() -> + emqx_config:get([session_persistence, force_persistence]). + +storage_backend(#{ + builtin := #{enable := true, n_shards := NShards, replication_factor := ReplicationFactor} +}) -> + #{ + backend => builtin, + storage => {emqx_ds_storage_bitfield_lts, #{}}, + n_shards => NShards, + replication_factor => ReplicationFactor + }; +storage_backend(#{ + fdb := #{enable := true} = FDBConfig +}) -> + FDBConfig#{backend => fdb}. %%-------------------------------------------------------------------- diff --git a/apps/emqx/src/emqx_persistent_message_ds_replayer.erl b/apps/emqx/src/emqx_persistent_message_ds_replayer.erl index 69b6675d8..fb8170904 100644 --- a/apps/emqx/src/emqx_persistent_message_ds_replayer.erl +++ b/apps/emqx/src/emqx_persistent_message_ds_replayer.erl @@ -19,12 +19,18 @@ -module(emqx_persistent_message_ds_replayer). %% API: --export([new/0, next_packet_id/1, replay/2, commit_offset/3, poll/3, n_inflight/1]). +-export([new/0, open/1, next_packet_id/1, n_inflight/1]). + +-export([poll/4, replay/2, commit_offset/4]). + +-export([seqno_to_packet_id/1, packet_id_to_seqno/2]). + +-export([committed_until/2]). %% internal exports: -export([]). --export_type([inflight/0]). +-export_type([inflight/0, seqno/0]). -include_lib("emqx/include/logger.hrl"). -include("emqx_persistent_session_ds.hrl"). @@ -34,6 +40,13 @@ -include_lib("eunit/include/eunit.hrl"). -endif. +-define(EPOCH_SIZE, 16#10000). + +-define(ACK, 0). +-define(COMP, 1). + +-define(TRACK_FLAG(WHICH), (1 bsl WHICH)). + %%================================================================================ %% Type declarations %%================================================================================ @@ -41,23 +54,23 @@ %% Note: sequence numbers are monotonic; they don't wrap around: -type seqno() :: non_neg_integer(). --record(range, { - stream :: emqx_ds:stream(), - first :: seqno(), - last :: seqno(), - iterator_next :: emqx_ds:iterator() | undefined -}). - --type range() :: #range{}. +-type track() :: ack | comp. +-type commit_type() :: rec. -record(inflight, { - next_seqno = 0 :: seqno(), - acked_seqno = 0 :: seqno(), - offset_ranges = [] :: [range()] + next_seqno = 1 :: seqno(), + commits = #{ack => 1, comp => 1, rec => 1} :: #{track() | commit_type() => seqno()}, + %% Ranges are sorted in ascending order of their sequence numbers. + offset_ranges = [] :: [ds_pubrange()] }). -opaque inflight() :: #inflight{}. +-type reply_fun() :: fun( + (seqno(), emqx_types:message()) -> + emqx_session:replies() | {_AdvanceSeqno :: false, emqx_session:replies()} +). + %%================================================================================ %% API funcions %%================================================================================ @@ -66,85 +79,98 @@ new() -> #inflight{}. +-spec open(emqx_persistent_session_ds:id()) -> inflight(). +open(SessionId) -> + {Ranges, RecUntil} = ro_transaction( + fun() -> {get_ranges(SessionId), get_committed_offset(SessionId, rec)} end + ), + {Commits, NextSeqno} = compute_inflight_range(Ranges), + #inflight{ + commits = Commits#{rec => RecUntil}, + next_seqno = NextSeqno, + offset_ranges = Ranges + }. + -spec next_packet_id(inflight()) -> {emqx_types:packet_id(), inflight()}. -next_packet_id(Inflight0 = #inflight{next_seqno = LastSeqNo}) -> - Inflight = Inflight0#inflight{next_seqno = LastSeqNo + 1}, - case LastSeqNo rem 16#10000 of - 0 -> - %% We skip sequence numbers that lead to PacketId = 0 to - %% simplify math. Note: it leads to occasional gaps in the - %% sequence numbers. - next_packet_id(Inflight); - PacketId -> - {PacketId, Inflight} - end. +next_packet_id(Inflight0 = #inflight{next_seqno = LastSeqno}) -> + Inflight = Inflight0#inflight{next_seqno = next_seqno(LastSeqno)}, + {seqno_to_packet_id(LastSeqno), Inflight}. -spec n_inflight(inflight()) -> non_neg_integer(). -n_inflight(#inflight{next_seqno = NextSeqNo, acked_seqno = AckedSeqno}) -> - %% NOTE: this function assumes that gaps in the sequence ID occur - %% _only_ when the packet ID wraps: - case AckedSeqno >= ((NextSeqNo bsr 16) bsl 16) of - true -> - NextSeqNo - AckedSeqno; - false -> - NextSeqNo - AckedSeqno - 1 - end. - --spec replay(emqx_persistent_session_ds:id(), inflight()) -> - emqx_session:replies(). -replay(_SessionId, _Inflight = #inflight{offset_ranges = _Ranges}) -> - []. - --spec commit_offset(emqx_persistent_session_ds:id(), emqx_types:packet_id(), inflight()) -> - {_IsValidOffset :: boolean(), inflight()}. -commit_offset( - SessionId, - PacketId, - Inflight0 = #inflight{ - acked_seqno = AckedSeqno0, next_seqno = NextSeqNo, offset_ranges = Ranges0 - } -) -> - AckedSeqno = - case packet_id_to_seqno(NextSeqNo, PacketId) of - N when N > AckedSeqno0; AckedSeqno0 =:= 0 -> +n_inflight(#inflight{offset_ranges = Ranges}) -> + %% TODO + %% This is not very efficient. Instead, we can take the maximum of + %% `range_size(AckedUntil, NextSeqno)` and `range_size(CompUntil, NextSeqno)`. + %% This won't be exact number but a pessimistic estimate, but this way we + %% will penalize clients that PUBACK QoS 1 messages but don't PUBCOMP QoS 2 + %% messages for some reason. For that to work, we need to additionally track + %% actual `AckedUntil` / `CompUntil` during `commit_offset/4`. + lists:foldl( + fun + (#ds_pubrange{type = ?T_CHECKPOINT}, N) -> N; - OutOfRange -> - ?SLOG(warning, #{ - msg => "out-of-order_ack", - prev_seqno => AckedSeqno0, - acked_seqno => OutOfRange, - next_seqno => NextSeqNo, - packet_id => PacketId - }), - AckedSeqno0 + (#ds_pubrange{type = ?T_INFLIGHT, id = {_, First}, until = Until}, N) -> + N + range_size(First, Until) end, - Ranges = lists:filter( - fun(#range{stream = Stream, last = LastSeqno, iterator_next = ItNext}) -> - case LastSeqno =< AckedSeqno of - true -> - %% This range has been fully - %% acked. Remove it and replace saved - %% iterator with the trailing iterator. - update_iterator(SessionId, Stream, ItNext), - false; - false -> - %% This range still has unacked - %% messages: - true - end + 0, + Ranges + ). + +-spec replay(reply_fun(), inflight()) -> {emqx_session:replies(), inflight()}. +replay(ReplyFun, Inflight0 = #inflight{offset_ranges = Ranges0}) -> + {Ranges, Replies} = lists:mapfoldr( + fun(Range, Acc) -> + replay_range(ReplyFun, Range, Acc) end, + [], Ranges0 ), - Inflight = Inflight0#inflight{acked_seqno = AckedSeqno, offset_ranges = Ranges}, - {true, Inflight}. + Inflight = Inflight0#inflight{offset_ranges = Ranges}, + {Replies, Inflight}. --spec poll(emqx_persistent_session_ds:id(), inflight(), pos_integer()) -> +-spec commit_offset(emqx_persistent_session_ds:id(), Offset, emqx_types:packet_id(), inflight()) -> + {_IsValidOffset :: boolean(), inflight()} +when + Offset :: track() | commit_type(). +commit_offset( + SessionId, + Track, + PacketId, + Inflight0 = #inflight{commits = Commits} +) when Track == ack orelse Track == comp -> + case validate_commit(Track, PacketId, Inflight0) of + CommitUntil when is_integer(CommitUntil) -> + %% TODO + %% We do not preserve `CommitUntil` in the database. Instead, we discard + %% fully acked ranges from the database. In effect, this means that the + %% most recent `CommitUntil` the client has sent may be lost in case of a + %% crash or client loss. + Inflight1 = Inflight0#inflight{commits = Commits#{Track := CommitUntil}}, + Inflight = discard_committed(SessionId, Inflight1), + {true, Inflight}; + false -> + {false, Inflight0} + end; +commit_offset( + SessionId, + CommitType = rec, + PacketId, + Inflight0 = #inflight{commits = Commits} +) -> + case validate_commit(CommitType, PacketId, Inflight0) of + CommitUntil when is_integer(CommitUntil) -> + update_committed_offset(SessionId, CommitType, CommitUntil), + Inflight = Inflight0#inflight{commits = Commits#{CommitType := CommitUntil}}, + {true, Inflight}; + false -> + {false, Inflight0} + end. + +-spec poll(reply_fun(), emqx_persistent_session_ds:id(), inflight(), pos_integer()) -> {emqx_session:replies(), inflight()}. -poll(SessionId, Inflight0, WindowSize) when WindowSize > 0, WindowSize < 16#7fff -> - #inflight{next_seqno = NextSeqNo0, acked_seqno = AckedSeqno} = - Inflight0, +poll(ReplyFun, SessionId, Inflight0, WindowSize) when WindowSize > 0, WindowSize < ?EPOCH_SIZE -> FetchThreshold = max(1, WindowSize div 2), - FreeSpace = AckedSeqno + WindowSize - NextSeqNo0, + FreeSpace = WindowSize - n_inflight(Inflight0), case FreeSpace >= FetchThreshold of false -> %% TODO: this branch is meant to avoid fetching data from @@ -153,10 +179,27 @@ poll(SessionId, Inflight0, WindowSize) when WindowSize > 0, WindowSize < 16#7fff %% client get stuck even? {[], Inflight0}; true -> + %% TODO: Wrap this in `mria:async_dirty/2`? Streams = shuffle(get_streams(SessionId)), - fetch(SessionId, Inflight0, Streams, FreeSpace, []) + fetch(ReplyFun, SessionId, Inflight0, Streams, FreeSpace, []) end. +%% Which seqno this track is committed until. +%% "Until" means this is first seqno that is _not yet committed_ for this track. +-spec committed_until(track() | commit_type(), inflight()) -> seqno(). +committed_until(Track, #inflight{commits = Commits}) -> + maps:get(Track, Commits). + +-spec seqno_to_packet_id(seqno()) -> emqx_types:packet_id() | 0. +seqno_to_packet_id(Seqno) -> + Seqno rem ?EPOCH_SIZE. + +%% Reconstruct session counter by adding most significant bits from +%% the current counter to the packet id. +-spec packet_id_to_seqno(emqx_types:packet_id(), inflight()) -> seqno(). +packet_id_to_seqno(PacketId, #inflight{next_seqno = NextSeqno}) -> + packet_id_to_seqno_(NextSeqno, PacketId). + %%================================================================================ %% Internal exports %%================================================================================ @@ -165,87 +208,329 @@ poll(SessionId, Inflight0, WindowSize) when WindowSize > 0, WindowSize < 16#7fff %% Internal functions %%================================================================================ -fetch(_SessionId, Inflight, _Streams = [], _N, Acc) -> - {lists:reverse(Acc), Inflight}; -fetch(_SessionId, Inflight, _Streams, 0, Acc) -> - {lists:reverse(Acc), Inflight}; -fetch(SessionId, Inflight0, [Stream | Streams], N, Publishes0) -> - #inflight{next_seqno = FirstSeqNo, offset_ranges = Ranges0} = Inflight0, - ItBegin = get_last_iterator(SessionId, Stream, Ranges0), - {ok, ItEnd, Messages} = emqx_ds:next(?PERSISTENT_MESSAGE_DB, ItBegin, N), - {NMessages, Publishes, Inflight1} = - lists:foldl( - fun(Msg, {N0, PubAcc0, InflightAcc0}) -> - {PacketId, InflightAcc} = next_packet_id(InflightAcc0), - PubAcc = [{PacketId, Msg} | PubAcc0], - {N0 + 1, PubAcc, InflightAcc} - end, - {0, Publishes0, Inflight0}, - Messages - ), - #inflight{next_seqno = LastSeqNo} = Inflight1, - case NMessages > 0 of - true -> - Range = #range{ - first = FirstSeqNo, - last = LastSeqNo - 1, - stream = Stream, - iterator_next = ItEnd - }, - Inflight = Inflight1#inflight{offset_ranges = Ranges0 ++ [Range]}, - fetch(SessionId, Inflight, Streams, N - NMessages, Publishes); - false -> - fetch(SessionId, Inflight1, Streams, N, Publishes) - end. +compute_inflight_range([]) -> + {#{ack => 1, comp => 1}, 1}; +compute_inflight_range(Ranges) -> + _RangeLast = #ds_pubrange{until = LastSeqno} = lists:last(Ranges), + AckedUntil = find_committed_until(ack, Ranges), + CompUntil = find_committed_until(comp, Ranges), + Commits = #{ + ack => emqx_maybe:define(AckedUntil, LastSeqno), + comp => emqx_maybe:define(CompUntil, LastSeqno) + }, + {Commits, LastSeqno}. --spec update_iterator(emqx_persistent_session_ds:id(), emqx_ds:stream(), emqx_ds:iterator()) -> ok. -update_iterator(DSSessionId, Stream, Iterator) -> - %% Workaround: we convert `Stream' to a binary before attempting to store it in - %% mnesia(rocksdb) because of a bug in `mnesia_rocksdb' when trying to do - %% `mnesia:dirty_all_keys' later. - StreamBin = term_to_binary(Stream), - mria:dirty_write(?SESSION_ITER_TAB, #ds_iter{id = {DSSessionId, StreamBin}, iter = Iterator}). - -get_last_iterator(SessionId, Stream, Ranges) -> - case lists:keyfind(Stream, #range.stream, lists:reverse(Ranges)) of - false -> - get_iterator(SessionId, Stream); - #range{iterator_next = Next} -> - Next - end. - --spec get_iterator(emqx_persistent_session_ds:id(), emqx_ds:stream()) -> emqx_ds:iterator(). -get_iterator(DSSessionId, Stream) -> - %% See comment in `update_iterator'. - StreamBin = term_to_binary(Stream), - Id = {DSSessionId, StreamBin}, - [#ds_iter{iter = It}] = mnesia:dirty_read(?SESSION_ITER_TAB, Id), - It. - --spec get_streams(emqx_persistent_session_ds:id()) -> [emqx_ds:stream()]. -get_streams(SessionId) -> - lists:map( - fun(#ds_stream{stream = Stream}) -> - Stream +find_committed_until(Track, Ranges) -> + RangesUncommitted = lists:dropwhile( + fun(Range) -> + case Range of + #ds_pubrange{type = ?T_CHECKPOINT} -> + true; + #ds_pubrange{type = ?T_INFLIGHT, tracks = Tracks} -> + not has_track(Track, Tracks) + end end, - mnesia:dirty_read(?SESSION_STREAM_TAB, SessionId) + Ranges + ), + case RangesUncommitted of + [#ds_pubrange{id = {_, CommittedUntil}} | _] -> + CommittedUntil; + [] -> + undefined + end. + +-spec get_ranges(emqx_persistent_session_ds:id()) -> [ds_pubrange()]. +get_ranges(SessionId) -> + Pat = erlang:make_tuple( + record_info(size, ds_pubrange), + '_', + [{1, ds_pubrange}, {#ds_pubrange.id, {SessionId, '_'}}] + ), + mnesia:match_object(?SESSION_PUBRANGE_TAB, Pat, read). + +fetch(ReplyFun, SessionId, Inflight0, [DSStream | Streams], N, Acc) when N > 0 -> + #inflight{next_seqno = FirstSeqno, offset_ranges = Ranges} = Inflight0, + ItBegin = get_last_iterator(DSStream, Ranges), + {ok, ItEnd, Messages} = emqx_ds:next(?PERSISTENT_MESSAGE_DB, ItBegin, N), + case Messages of + [] -> + fetch(ReplyFun, SessionId, Inflight0, Streams, N, Acc); + _ -> + %% We need to preserve the iterator pointing to the beginning of the + %% range, so that we can replay it if needed. + {Publishes, {UntilSeqno, Tracks}} = publish(ReplyFun, FirstSeqno, Messages), + Size = range_size(FirstSeqno, UntilSeqno), + Range0 = #ds_pubrange{ + id = {SessionId, FirstSeqno}, + type = ?T_INFLIGHT, + tracks = Tracks, + until = UntilSeqno, + stream = DSStream#ds_stream.ref, + iterator = ItBegin + }, + ok = preserve_range(Range0), + %% ...Yet we need to keep the iterator pointing past the end of the + %% range, so that we can pick up where we left off: it will become + %% `ItBegin` of the next range for this stream. + Range = keep_next_iterator(ItEnd, Range0), + Inflight = Inflight0#inflight{ + next_seqno = UntilSeqno, + offset_ranges = Ranges ++ [Range] + }, + fetch(ReplyFun, SessionId, Inflight, Streams, N - Size, [Publishes | Acc]) + end; +fetch(_ReplyFun, _SessionId, Inflight, _Streams, _N, Acc) -> + Publishes = lists:append(lists:reverse(Acc)), + {Publishes, Inflight}. + +discard_committed( + SessionId, + Inflight0 = #inflight{commits = Commits, offset_ranges = Ranges0} +) -> + %% TODO: This could be kept and incrementally updated in the inflight state. + Checkpoints = find_checkpoints(Ranges0), + %% TODO: Wrap this in `mria:async_dirty/2`? + Ranges = discard_committed_ranges(SessionId, Commits, Checkpoints, Ranges0), + Inflight0#inflight{offset_ranges = Ranges}. + +find_checkpoints(Ranges) -> + lists:foldl( + fun(#ds_pubrange{stream = StreamRef, until = Until}, Acc) -> + %% For each stream, remember the last range over this stream. + Acc#{StreamRef => Until} + end, + #{}, + Ranges ). -%% Reconstruct session counter by adding most significant bits from -%% the current counter to the packet id. --spec packet_id_to_seqno(non_neg_integer(), emqx_types:packet_id()) -> non_neg_integer(). -packet_id_to_seqno(NextSeqNo, PacketId) -> - Epoch = NextSeqNo bsr 16, - case packet_id_to_seqno_(Epoch, PacketId) of - N when N =< NextSeqNo -> - N; - _ -> - packet_id_to_seqno_(Epoch - 1, PacketId) +discard_committed_ranges( + SessionId, + Commits, + Checkpoints, + Ranges = [Range = #ds_pubrange{until = Until, stream = StreamRef} | Rest] +) -> + case discard_committed_range(Commits, Range) of + discard -> + %% This range has been fully committed. + %% Either discard it completely, or preserve the iterator for the next range + %% over this stream (i.e. a checkpoint). + RangeKept = + case maps:get(StreamRef, Checkpoints) of + CP when CP > Until -> + discard_range(Range), + []; + Until -> + [checkpoint_range(Range)] + end, + %% Since we're (intentionally) not using transactions here, it's important to + %% issue database writes in the same order in which ranges are stored: from + %% the oldest to the newest. This is also why we need to compute which ranges + %% should become checkpoints before we start writing anything. + RangeKept ++ discard_committed_ranges(SessionId, Commits, Checkpoints, Rest); + keep -> + %% This range has not been fully committed. + [Range | discard_committed_ranges(SessionId, Commits, Checkpoints, Rest)]; + keep_all -> + %% The rest of ranges (if any) still have uncommitted messages. + Ranges; + TracksLeft -> + %% Only some track has been committed. + %% Preserve the uncommitted tracks in the database. + RangeKept = Range#ds_pubrange{tracks = TracksLeft}, + preserve_range(restore_first_iterator(RangeKept)), + [RangeKept | discard_committed_ranges(SessionId, Commits, Checkpoints, Rest)] + end; +discard_committed_ranges(_SessionId, _Commits, _Checkpoints, []) -> + []. + +discard_committed_range(_Commits, #ds_pubrange{type = ?T_CHECKPOINT}) -> + discard; +discard_committed_range( + #{ack := AckedUntil, comp := CompUntil}, + #ds_pubrange{until = Until} +) when Until > AckedUntil andalso Until > CompUntil -> + keep_all; +discard_committed_range(Commits, #ds_pubrange{until = Until, tracks = Tracks}) -> + case discard_tracks(Commits, Until, Tracks) of + 0 -> + discard; + Tracks -> + keep; + TracksLeft -> + TracksLeft end. --spec packet_id_to_seqno_(non_neg_integer(), emqx_types:packet_id()) -> non_neg_integer(). -packet_id_to_seqno_(Epoch, PacketId) -> - (Epoch bsl 16) + PacketId. +discard_tracks(#{ack := AckedUntil, comp := CompUntil}, Until, Tracks) -> + TAck = + case Until > AckedUntil of + true -> ?TRACK_FLAG(?ACK) band Tracks; + false -> 0 + end, + TComp = + case Until > CompUntil of + true -> ?TRACK_FLAG(?COMP) band Tracks; + false -> 0 + end, + TAck bor TComp. + +replay_range( + ReplyFun, + Range0 = #ds_pubrange{type = ?T_INFLIGHT, id = {_, First}, until = Until, iterator = It}, + Acc +) -> + Size = range_size(First, Until), + {ok, ItNext, MessagesUnacked} = emqx_ds:next(?PERSISTENT_MESSAGE_DB, It, Size), + %% Asserting that range is consistent with the message storage state. + {Replies, {Until, _TracksInitial}} = publish(ReplyFun, First, MessagesUnacked), + %% Again, we need to keep the iterator pointing past the end of the + %% range, so that we can pick up where we left off. + Range = keep_next_iterator(ItNext, Range0), + {Range, Replies ++ Acc}; +replay_range(_ReplyFun, Range0 = #ds_pubrange{type = ?T_CHECKPOINT}, Acc) -> + {Range0, Acc}. + +validate_commit( + Track, + PacketId, + Inflight = #inflight{commits = Commits, next_seqno = NextSeqno} +) -> + Seqno = packet_id_to_seqno_(NextSeqno, PacketId), + CommittedUntil = maps:get(Track, Commits), + CommitNext = get_commit_next(Track, Inflight), + case Seqno >= CommittedUntil andalso Seqno < CommitNext of + true -> + next_seqno(Seqno); + false -> + ?SLOG(warning, #{ + msg => "out-of-order_commit", + track => Track, + packet_id => PacketId, + commit_seqno => Seqno, + committed_until => CommittedUntil, + commit_next => CommitNext + }), + false + end. + +get_commit_next(ack, #inflight{next_seqno = NextSeqno}) -> + NextSeqno; +get_commit_next(rec, #inflight{next_seqno = NextSeqno}) -> + NextSeqno; +get_commit_next(comp, #inflight{commits = Commits}) -> + maps:get(rec, Commits). + +publish(ReplyFun, FirstSeqno, Messages) -> + lists:mapfoldl( + fun(Message, {Seqno, TAcc}) -> + case ReplyFun(Seqno, Message) of + {_Advance = false, Reply} -> + {Reply, {Seqno, TAcc}}; + Reply -> + NextSeqno = next_seqno(Seqno), + NextTAcc = add_msg_track(Message, TAcc), + {Reply, {NextSeqno, NextTAcc}} + end + end, + {FirstSeqno, 0}, + Messages + ). + +add_msg_track(Message, Tracks) -> + case emqx_message:qos(Message) of + 1 -> ?TRACK_FLAG(?ACK) bor Tracks; + 2 -> ?TRACK_FLAG(?COMP) bor Tracks; + _ -> Tracks + end. + +keep_next_iterator(ItNext, Range = #ds_pubrange{iterator = ItFirst, misc = Misc}) -> + Range#ds_pubrange{ + iterator = ItNext, + %% We need to keep the first iterator around, in case we need to preserve + %% this range again, updating still uncommitted tracks it's part of. + misc = Misc#{iterator_first => ItFirst} + }. + +restore_first_iterator(Range = #ds_pubrange{misc = Misc = #{iterator_first := ItFirst}}) -> + Range#ds_pubrange{ + iterator = ItFirst, + misc = maps:remove(iterator_first, Misc) + }. + +-spec preserve_range(ds_pubrange()) -> ok. +preserve_range(Range = #ds_pubrange{type = ?T_INFLIGHT}) -> + mria:dirty_write(?SESSION_PUBRANGE_TAB, Range). + +has_track(ack, Tracks) -> + (?TRACK_FLAG(?ACK) band Tracks) > 0; +has_track(comp, Tracks) -> + (?TRACK_FLAG(?COMP) band Tracks) > 0. + +-spec discard_range(ds_pubrange()) -> ok. +discard_range(#ds_pubrange{id = RangeId}) -> + mria:dirty_delete(?SESSION_PUBRANGE_TAB, RangeId). + +-spec checkpoint_range(ds_pubrange()) -> ds_pubrange(). +checkpoint_range(Range0 = #ds_pubrange{type = ?T_INFLIGHT}) -> + Range = Range0#ds_pubrange{type = ?T_CHECKPOINT, misc = #{}}, + ok = mria:dirty_write(?SESSION_PUBRANGE_TAB, Range), + Range; +checkpoint_range(Range = #ds_pubrange{type = ?T_CHECKPOINT}) -> + %% This range should have been checkpointed already. + Range. + +get_last_iterator(DSStream = #ds_stream{ref = StreamRef}, Ranges) -> + case lists:keyfind(StreamRef, #ds_pubrange.stream, lists:reverse(Ranges)) of + false -> + DSStream#ds_stream.beginning; + #ds_pubrange{iterator = ItNext} -> + ItNext + end. + +-spec get_streams(emqx_persistent_session_ds:id()) -> [ds_stream()]. +get_streams(SessionId) -> + mnesia:dirty_read(?SESSION_STREAM_TAB, SessionId). + +-spec get_committed_offset(emqx_persistent_session_ds:id(), _Name) -> seqno(). +get_committed_offset(SessionId, Name) -> + case mnesia:read(?SESSION_COMMITTED_OFFSET_TAB, {SessionId, Name}) of + [] -> + 1; + [#ds_committed_offset{until = Seqno}] -> + Seqno + end. + +-spec update_committed_offset(emqx_persistent_session_ds:id(), _Name, seqno()) -> ok. +update_committed_offset(SessionId, Name, Until) -> + mria:dirty_write(?SESSION_COMMITTED_OFFSET_TAB, #ds_committed_offset{ + id = {SessionId, Name}, until = Until + }). + +next_seqno(Seqno) -> + NextSeqno = Seqno + 1, + case seqno_to_packet_id(NextSeqno) of + 0 -> + %% We skip sequence numbers that lead to PacketId = 0 to + %% simplify math. Note: it leads to occasional gaps in the + %% sequence numbers. + NextSeqno + 1; + _ -> + NextSeqno + end. + +packet_id_to_seqno_(NextSeqno, PacketId) -> + Epoch = NextSeqno bsr 16, + case (Epoch bsl 16) + PacketId of + N when N =< NextSeqno -> + N; + N -> + N - ?EPOCH_SIZE + end. + +range_size(FirstSeqno, UntilSeqno) -> + %% This function assumes that gaps in the sequence ID occur _only_ when the + %% packet ID wraps. + Size = UntilSeqno - FirstSeqno, + Size + (FirstSeqno bsr 16) - (UntilSeqno bsr 16). -spec shuffle([A]) -> [A]. shuffle(L0) -> @@ -259,24 +544,28 @@ shuffle(L0) -> {_, L} = lists:unzip(L2), L. +ro_transaction(Fun) -> + {atomic, Res} = mria:ro_transaction(?DS_MRIA_SHARD, Fun), + Res. + -ifdef(TEST). %% This test only tests boundary conditions (to make sure property-based test didn't skip them): packet_id_to_seqno_test() -> %% Packet ID = 1; first epoch: - ?assertEqual(1, packet_id_to_seqno(1, 1)), - ?assertEqual(1, packet_id_to_seqno(10, 1)), - ?assertEqual(1, packet_id_to_seqno(1 bsl 16 - 1, 1)), - ?assertEqual(1, packet_id_to_seqno(1 bsl 16, 1)), + ?assertEqual(1, packet_id_to_seqno_(1, 1)), + ?assertEqual(1, packet_id_to_seqno_(10, 1)), + ?assertEqual(1, packet_id_to_seqno_(1 bsl 16 - 1, 1)), + ?assertEqual(1, packet_id_to_seqno_(1 bsl 16, 1)), %% Packet ID = 1; second and 3rd epochs: - ?assertEqual(1 bsl 16 + 1, packet_id_to_seqno(1 bsl 16 + 1, 1)), - ?assertEqual(1 bsl 16 + 1, packet_id_to_seqno(2 bsl 16, 1)), - ?assertEqual(2 bsl 16 + 1, packet_id_to_seqno(2 bsl 16 + 1, 1)), + ?assertEqual(1 bsl 16 + 1, packet_id_to_seqno_(1 bsl 16 + 1, 1)), + ?assertEqual(1 bsl 16 + 1, packet_id_to_seqno_(2 bsl 16, 1)), + ?assertEqual(2 bsl 16 + 1, packet_id_to_seqno_(2 bsl 16 + 1, 1)), %% Packet ID = 16#ffff: PID = 1 bsl 16 - 1, - ?assertEqual(PID, packet_id_to_seqno(PID, PID)), - ?assertEqual(PID, packet_id_to_seqno(1 bsl 16, PID)), - ?assertEqual(1 bsl 16 + PID, packet_id_to_seqno(2 bsl 16, PID)), + ?assertEqual(PID, packet_id_to_seqno_(PID, PID)), + ?assertEqual(PID, packet_id_to_seqno_(1 bsl 16, PID)), + ?assertEqual(1 bsl 16 + PID, packet_id_to_seqno_(2 bsl 16, PID)), ok. packet_id_to_seqno_test_() -> @@ -291,8 +580,8 @@ packet_id_to_seqno_prop() -> SeqNo, seqno_gen(NextSeqNo), begin - PacketId = SeqNo rem 16#10000, - ?assertEqual(SeqNo, packet_id_to_seqno(NextSeqNo, PacketId)), + PacketId = seqno_to_packet_id(SeqNo), + ?assertEqual(SeqNo, packet_id_to_seqno_(NextSeqNo, PacketId)), true end ) @@ -311,4 +600,55 @@ seqno_gen(NextSeqNo) -> Max = max(0, NextSeqNo - 1), range(Min, Max). +range_size_test_() -> + [ + ?_assertEqual(0, range_size(42, 42)), + ?_assertEqual(1, range_size(42, 43)), + ?_assertEqual(1, range_size(16#ffff, 16#10001)), + ?_assertEqual(16#ffff - 456 + 123, range_size(16#1f0000 + 456, 16#200000 + 123)) + ]. + +compute_inflight_range_test_() -> + [ + ?_assertEqual( + {#{ack => 1, comp => 1}, 1}, + compute_inflight_range([]) + ), + ?_assertEqual( + {#{ack => 12, comp => 13}, 42}, + compute_inflight_range([ + #ds_pubrange{id = {<<>>, 1}, until = 2, type = ?T_CHECKPOINT}, + #ds_pubrange{id = {<<>>, 4}, until = 8, type = ?T_CHECKPOINT}, + #ds_pubrange{id = {<<>>, 11}, until = 12, type = ?T_CHECKPOINT}, + #ds_pubrange{ + id = {<<>>, 12}, + until = 13, + type = ?T_INFLIGHT, + tracks = ?TRACK_FLAG(?ACK) + }, + #ds_pubrange{ + id = {<<>>, 13}, + until = 20, + type = ?T_INFLIGHT, + tracks = ?TRACK_FLAG(?COMP) + }, + #ds_pubrange{ + id = {<<>>, 20}, + until = 42, + type = ?T_INFLIGHT, + tracks = ?TRACK_FLAG(?ACK) bor ?TRACK_FLAG(?COMP) + } + ]) + ), + ?_assertEqual( + {#{ack => 13, comp => 13}, 13}, + compute_inflight_range([ + #ds_pubrange{id = {<<>>, 1}, until = 2, type = ?T_CHECKPOINT}, + #ds_pubrange{id = {<<>>, 4}, until = 8, type = ?T_CHECKPOINT}, + #ds_pubrange{id = {<<>>, 11}, until = 12, type = ?T_CHECKPOINT}, + #ds_pubrange{id = {<<>>, 12}, until = 13, type = ?T_CHECKPOINT} + ]) + ) + ]. + -endif. diff --git a/apps/emqx/src/emqx_persistent_session_ds.erl b/apps/emqx/src/emqx_persistent_session_ds.erl index 6c0fc2dcc..9844e6d48 100644 --- a/apps/emqx/src/emqx_persistent_session_ds.erl +++ b/apps/emqx/src/emqx_persistent_session_ds.erl @@ -56,13 +56,16 @@ deliver/3, replay/3, handle_timeout/3, - disconnect/1, + disconnect/2, terminate/2 ]). %% session table operations -export([create_tables/0]). +%% internal export used by session GC process +-export([destroy_session/1]). + %% Remove me later (satisfy checks for an unused BPAPI) -export([ do_open_iterator/3, @@ -70,24 +73,27 @@ do_ensure_all_iterators_closed/1 ]). +-export([print_session/1]). + -ifdef(TEST). -export([ - session_open/1, + session_open/2, list_all_sessions/0, list_all_subscriptions/0, list_all_streams/0, - list_all_iterators/0 + list_all_pubranges/0 ]). -endif. %% Currently, this is the clientid. We avoid `emqx_types:clientid()' because that can be %% an atom, in theory (?). -type id() :: binary(). --type topic_filter() :: emqx_ds:topic_filter(). +-type topic_filter() :: emqx_types:topic(). +-type topic_filter_words() :: emqx_ds:topic_filter(). -type subscription_id() :: {id(), topic_filter()}. -type subscription() :: #{ start_time := emqx_ds:time(), - propts := map(), + props := map(), extra := map() }. -type session() :: #{ @@ -95,23 +101,26 @@ id := id(), %% When the session was created created_at := timestamp(), - %% When the session should expire - expires_at := timestamp() | never, + %% When the client was last considered alive + last_alive_at := timestamp(), %% Client’s Subscriptions. - iterators := #{topic() => subscription()}, + subscriptions := #{topic_filter() => subscription()}, %% Inflight messages inflight := emqx_persistent_message_ds_replayer:inflight(), %% Receive maximum receive_maximum := pos_integer(), + %% Connection Info + conninfo := emqx_types:conninfo(), %% props := map() }. -type timestamp() :: emqx_utils_calendar:epoch_millisecond(). --type topic() :: emqx_types:topic(). +-type millisecond() :: non_neg_integer(). -type clientinfo() :: emqx_types:clientinfo(). -type conninfo() :: emqx_session:conninfo(). -type replies() :: emqx_session:replies(). +-type timer() :: pull | get_streams | bump_last_alive_at. -define(STATS_KEYS, [ subscriptions_cnt, @@ -121,6 +130,12 @@ next_pkt_id ]). +-define(IS_EXPIRED(NOW_MS, LAST_ALIVE_AT, EI), + (is_number(LAST_ALIVE_AT) andalso + is_number(EI) andalso + (NOW_MS >= LAST_ALIVE_AT + EI)) +). + -export_type([id/0]). %% @@ -142,7 +157,7 @@ open(#{clientid := ClientID} = _ClientInfo, ConnInfo) -> %% somehow isolate those idling not-yet-expired sessions into a separate process %% space, and move this call back into `emqx_cm` where it belongs. ok = emqx_cm:discard_session(ClientID), - case open_session(ClientID) of + case session_open(ClientID, ConnInfo) of Session0 = #{} -> ensure_timers(), ReceiveMaximum = receive_maximum(ConnInfo), @@ -153,24 +168,13 @@ open(#{clientid := ClientID} = _ClientInfo, ConnInfo) -> end. ensure_session(ClientID, ConnInfo, Conf) -> - {ok, Session, #{}} = session_ensure_new(ClientID, Conf), + Session = session_ensure_new(ClientID, ConnInfo, Conf), ReceiveMaximum = receive_maximum(ConnInfo), - Session#{iterators => #{}, receive_maximum => ReceiveMaximum}. - -open_session(ClientID) -> - case session_open(ClientID) of - {ok, Session, Subscriptions} -> - Session#{iterators => prep_subscriptions(Subscriptions)}; - false -> - false - end. - -prep_subscriptions(Subscriptions) -> - maps:fold( - fun(Topic, Subscription, Acc) -> Acc#{emqx_topic:join(Topic) => Subscription} end, - #{}, - Subscriptions - ). + Session#{ + conninfo => ConnInfo, + receive_maximum => ReceiveMaximum, + subscriptions => #{} + }. -spec destroy(session() | clientinfo()) -> ok. destroy(#{id := ClientID}) -> @@ -195,9 +199,9 @@ info(created_at, #{created_at := CreatedAt}) -> CreatedAt; info(is_persistent, #{}) -> true; -info(subscriptions, #{iterators := Iters}) -> +info(subscriptions, #{subscriptions := Iters}) -> maps:map(fun(_, #{props := SubOpts}) -> SubOpts end, Iters); -info(subscriptions_cnt, #{iterators := Iters}) -> +info(subscriptions_cnt, #{subscriptions := Iters}) -> maps:size(Iters); info(subscriptions_max, #{props := Conf}) -> maps:get(max_subscriptions, Conf); @@ -235,51 +239,71 @@ info(await_rel_timeout, #{props := Conf}) -> stats(Session) -> info(?STATS_KEYS, Session). +%% Debug/troubleshooting +-spec print_session(emqx_types:client_id()) -> map() | undefined. +print_session(ClientId) -> + catch ro_transaction( + fun() -> + case mnesia:read(?SESSION_TAB, ClientId) of + [Session] -> + #{ + session => Session, + streams => mnesia:read(?SESSION_STREAM_TAB, ClientId), + pubranges => session_read_pubranges(ClientId), + offsets => session_read_offsets(ClientId), + subscriptions => session_read_subscriptions(ClientId) + }; + [] -> + undefined + end + end + ). + %%-------------------------------------------------------------------- %% Client -> Broker: SUBSCRIBE / UNSUBSCRIBE %%-------------------------------------------------------------------- --spec subscribe(topic(), emqx_types:subopts(), session()) -> +-spec subscribe(topic_filter(), emqx_types:subopts(), session()) -> {ok, session()} | {error, emqx_types:reason_code()}. subscribe( TopicFilter, SubOpts, - Session = #{id := ID, iterators := Iters} -) when is_map_key(TopicFilter, Iters) -> - Iterator = maps:get(TopicFilter, Iters), - NIterator = update_subscription(TopicFilter, Iterator, SubOpts, ID), - {ok, Session#{iterators := Iters#{TopicFilter => NIterator}}}; + Session = #{id := ID, subscriptions := Subs} +) when is_map_key(TopicFilter, Subs) -> + Subscription = maps:get(TopicFilter, Subs), + NSubscription = update_subscription(TopicFilter, Subscription, SubOpts, ID), + {ok, Session#{subscriptions := Subs#{TopicFilter => NSubscription}}}; subscribe( TopicFilter, SubOpts, - Session = #{id := ID, iterators := Iters} + Session = #{id := ID, subscriptions := Subs} ) -> % TODO: max_subscriptions - Iterator = add_subscription(TopicFilter, SubOpts, ID), - {ok, Session#{iterators := Iters#{TopicFilter => Iterator}}}. + Subscription = add_subscription(TopicFilter, SubOpts, ID), + {ok, Session#{subscriptions := Subs#{TopicFilter => Subscription}}}. --spec unsubscribe(topic(), session()) -> +-spec unsubscribe(topic_filter(), session()) -> {ok, session(), emqx_types:subopts()} | {error, emqx_types:reason_code()}. unsubscribe( TopicFilter, - Session = #{id := ID, iterators := Iters} -) when is_map_key(TopicFilter, Iters) -> - Iterator = maps:get(TopicFilter, Iters), - SubOpts = maps:get(props, Iterator), + Session = #{id := ID, subscriptions := Subs} +) when is_map_key(TopicFilter, Subs) -> + Subscription = maps:get(TopicFilter, Subs), + SubOpts = maps:get(props, Subscription), ok = del_subscription(TopicFilter, ID), - {ok, Session#{iterators := maps:remove(TopicFilter, Iters)}, SubOpts}; + {ok, Session#{subscriptions := maps:remove(TopicFilter, Subs)}, SubOpts}; unsubscribe( _TopicFilter, _Session = #{} ) -> {error, ?RC_NO_SUBSCRIPTION_EXISTED}. --spec get_subscription(topic(), session()) -> +-spec get_subscription(topic_filter(), session()) -> emqx_types:subopts() | undefined. -get_subscription(TopicFilter, #{iterators := Iters}) -> - case maps:get(TopicFilter, Iters, undefined) of - Iterator = #{} -> - maps:get(props, Iterator); +get_subscription(TopicFilter, #{subscriptions := Subs}) -> + case maps:get(TopicFilter, Subs, undefined) of + Subscription = #{} -> + maps:get(props, Subscription); undefined -> undefined end. @@ -289,12 +313,12 @@ get_subscription(TopicFilter, #{iterators := Iters}) -> %%-------------------------------------------------------------------- -spec publish(emqx_types:packet_id(), emqx_types:message(), session()) -> - {ok, emqx_types:publish_result(), replies(), session()} + {ok, emqx_types:publish_result(), session()} | {error, emqx_types:reason_code()}. publish(_PacketId, Msg, Session) -> - %% TODO: + %% TODO: QoS2 Result = emqx_broker:publish(Msg), - {ok, Result, [], Session}. + {ok, Result, Session}. %%-------------------------------------------------------------------- %% Client -> Broker: PUBACK @@ -307,12 +331,13 @@ publish(_PacketId, Msg, Session) -> {ok, emqx_types:message(), replies(), session()} | {error, emqx_types:reason_code()}. puback(_ClientInfo, PacketId, Session = #{id := Id, inflight := Inflight0}) -> - case emqx_persistent_message_ds_replayer:commit_offset(Id, PacketId, Inflight0) of + case emqx_persistent_message_ds_replayer:commit_offset(Id, ack, PacketId, Inflight0) of {true, Inflight} -> %% TODO - Msg = #message{}, + Msg = emqx_message:make(Id, <<>>, <<>>), {ok, Msg, [], Session#{inflight => Inflight}}; {false, _} -> + %% Invalid Packet Id {error, ?RC_PACKET_IDENTIFIER_NOT_FOUND} end. @@ -323,9 +348,16 @@ puback(_ClientInfo, PacketId, Session = #{id := Id, inflight := Inflight0}) -> -spec pubrec(emqx_types:packet_id(), session()) -> {ok, emqx_types:message(), session()} | {error, emqx_types:reason_code()}. -pubrec(_PacketId, _Session = #{}) -> - % TODO: stub - {error, ?RC_PACKET_IDENTIFIER_NOT_FOUND}. +pubrec(PacketId, Session = #{id := Id, inflight := Inflight0}) -> + case emqx_persistent_message_ds_replayer:commit_offset(Id, rec, PacketId, Inflight0) of + {true, Inflight} -> + %% TODO + Msg = emqx_message:make(Id, <<>>, <<>>), + {ok, Msg, Session#{inflight => Inflight}}; + {false, _} -> + %% Invalid Packet Id + {error, ?RC_PACKET_IDENTIFIER_NOT_FOUND} + end. %%-------------------------------------------------------------------- %% Client -> Broker: PUBREL @@ -344,16 +376,23 @@ pubrel(_PacketId, Session = #{}) -> -spec pubcomp(clientinfo(), emqx_types:packet_id(), session()) -> {ok, emqx_types:message(), replies(), session()} | {error, emqx_types:reason_code()}. -pubcomp(_ClientInfo, _PacketId, _Session = #{}) -> - % TODO: stub - {error, ?RC_PACKET_IDENTIFIER_NOT_FOUND}. +pubcomp(_ClientInfo, PacketId, Session = #{id := Id, inflight := Inflight0}) -> + case emqx_persistent_message_ds_replayer:commit_offset(Id, comp, PacketId, Inflight0) of + {true, Inflight} -> + %% TODO + Msg = emqx_message:make(Id, <<>>, <<>>), + {ok, Msg, [], Session#{inflight => Inflight}}; + {false, _} -> + %% Invalid Packet Id + {error, ?RC_PACKET_IDENTIFIER_NOT_FOUND} + end. %%-------------------------------------------------------------------- -spec deliver(clientinfo(), [emqx_types:deliver()], session()) -> {ok, replies(), session()}. deliver(_ClientInfo, _Delivers, Session) -> - %% TODO: QoS0 and system messages end up here. + %% TODO: system messages end up here. {ok, [], Session}. -spec handle_timeout(clientinfo(), _Timeout, session()) -> @@ -363,43 +402,82 @@ handle_timeout( pull, Session = #{id := Id, inflight := Inflight0, receive_maximum := ReceiveMaximum} ) -> - {Publishes, Inflight} = emqx_persistent_message_ds_replayer:poll(Id, Inflight0, ReceiveMaximum), - %% TODO: make these values configurable: + {Publishes, Inflight} = emqx_persistent_message_ds_replayer:poll( + fun + (_Seqno, Message = #message{qos = ?QOS_0}) -> + {false, {undefined, Message}}; + (Seqno, Message) -> + PacketId = emqx_persistent_message_ds_replayer:seqno_to_packet_id(Seqno), + {PacketId, Message} + end, + Id, + Inflight0, + ReceiveMaximum + ), + IdlePollInterval = emqx_config:get([session_persistence, idle_poll_interval]), Timeout = case Publishes of [] -> - 100; + IdlePollInterval; [_ | _] -> 0 end, ensure_timer(pull, Timeout), - {ok, Publishes, Session#{inflight => Inflight}}; -handle_timeout(_ClientInfo, get_streams, Session = #{id := Id}) -> - renew_streams(Id), + {ok, Publishes, Session#{inflight := Inflight}}; +handle_timeout(_ClientInfo, get_streams, Session) -> + renew_streams(Session), ensure_timer(get_streams), + {ok, [], Session}; +handle_timeout(_ClientInfo, bump_last_alive_at, Session0) -> + %% Note: we take a pessimistic approach here and assume that the client will be alive + %% until the next bump timeout. With this, we avoid garbage collecting this session + %% too early in case the session/connection/node crashes earlier without having time + %% to commit the time. + BumpInterval = emqx_config:get([session_persistence, last_alive_update_interval]), + EstimatedLastAliveAt = now_ms() + BumpInterval, + Session = session_set_last_alive_at_trans(Session0, EstimatedLastAliveAt), + ensure_timer(bump_last_alive_at), {ok, [], Session}. -spec replay(clientinfo(), [], session()) -> {ok, replies(), session()}. -replay(_ClientInfo, [], Session = #{}) -> - {ok, [], Session}. +replay(_ClientInfo, [], Session = #{inflight := Inflight0}) -> + AckedUntil = emqx_persistent_message_ds_replayer:committed_until(ack, Inflight0), + RecUntil = emqx_persistent_message_ds_replayer:committed_until(rec, Inflight0), + CompUntil = emqx_persistent_message_ds_replayer:committed_until(comp, Inflight0), + ReplyFun = fun + (_Seqno, #message{qos = ?QOS_0}) -> + {false, []}; + (Seqno, #message{qos = ?QOS_1}) when Seqno < AckedUntil -> + []; + (Seqno, #message{qos = ?QOS_2}) when Seqno < CompUntil -> + []; + (Seqno, #message{qos = ?QOS_2}) when Seqno < RecUntil -> + PacketId = emqx_persistent_message_ds_replayer:seqno_to_packet_id(Seqno), + {pubrel, PacketId}; + (Seqno, Message) -> + PacketId = emqx_persistent_message_ds_replayer:seqno_to_packet_id(Seqno), + {PacketId, emqx_message:set_flag(dup, true, Message)} + end, + {Replies, Inflight} = emqx_persistent_message_ds_replayer:replay(ReplyFun, Inflight0), + {ok, Replies, Session#{inflight := Inflight}}. %%-------------------------------------------------------------------- --spec disconnect(session()) -> {shutdown, session()}. -disconnect(Session = #{}) -> +-spec disconnect(session(), emqx_types:conninfo()) -> {shutdown, session()}. +disconnect(Session0, ConnInfo) -> + Session = session_set_last_alive_at_trans(Session0, ConnInfo, now_ms()), {shutdown, Session}. -spec terminate(Reason :: term(), session()) -> ok. terminate(_Reason, _Session = #{}) -> - % TODO: close iterators ok. %%-------------------------------------------------------------------- --spec add_subscription(topic(), emqx_types:subopts(), id()) -> +-spec add_subscription(topic_filter(), emqx_types:subopts(), id()) -> subscription(). -add_subscription(TopicFilterBin, SubOpts, DSSessionID) -> +add_subscription(TopicFilter, SubOpts, DSSessionID) -> %% N.B.: we chose to update the router before adding the subscription to the %% session/iterator table. The reasoning for this is as follows: %% @@ -418,8 +496,7 @@ add_subscription(TopicFilterBin, SubOpts, DSSessionID) -> %% since it is guarded by a transaction context: we consider a subscription %% operation to be successful if it ended up changing this table. Both router %% and iterator information can be reconstructed from this table, if needed. - ok = emqx_persistent_session_ds_router:do_add_route(TopicFilterBin, DSSessionID), - TopicFilter = emqx_topic:words(TopicFilterBin), + ok = emqx_persistent_session_ds_router:do_add_route(TopicFilter, DSSessionID), {ok, DSSubExt, IsNew} = session_add_subscription( DSSessionID, TopicFilter, SubOpts ), @@ -427,20 +504,19 @@ add_subscription(TopicFilterBin, SubOpts, DSSessionID) -> %% we'll list streams and open iterators when implementing message replay. DSSubExt. --spec update_subscription(topic(), subscription(), emqx_types:subopts(), id()) -> +-spec update_subscription(topic_filter(), subscription(), emqx_types:subopts(), id()) -> subscription(). -update_subscription(TopicFilterBin, DSSubExt, SubOpts, DSSessionID) -> - TopicFilter = emqx_topic:words(TopicFilterBin), +update_subscription(TopicFilter, DSSubExt, SubOpts, DSSessionID) -> {ok, NDSSubExt, false} = session_add_subscription( DSSessionID, TopicFilter, SubOpts ), ok = ?tp(persistent_session_ds_iterator_updated, #{sub => DSSubExt}), NDSSubExt. --spec del_subscription(topic(), id()) -> +-spec del_subscription(topic_filter(), id()) -> ok. -del_subscription(TopicFilterBin, DSSessionId) -> - TopicFilter = emqx_topic:words(TopicFilterBin), +del_subscription(TopicFilter, DSSessionId) -> + %% TODO: transaction? ?tp_span( persistent_session_ds_subscription_delete, #{session_id => DSSessionId}, @@ -449,7 +525,7 @@ del_subscription(TopicFilterBin, DSSessionId) -> ?tp_span( persistent_session_ds_subscription_route_delete, #{session_id => DSSessionId}, - ok = emqx_persistent_session_ds_router:do_delete_route(TopicFilterBin, DSSessionId) + ok = emqx_persistent_session_ds_router:do_delete_route(TopicFilter, DSSessionId) ). %%-------------------------------------------------------------------- @@ -457,10 +533,6 @@ del_subscription(TopicFilterBin, DSSessionId) -> %%-------------------------------------------------------------------- create_tables() -> - ok = emqx_ds:open_db(?PERSISTENT_MESSAGE_DB, #{ - backend => builtin, - storage => {emqx_ds_storage_bitfield_lts, #{}} - }), ok = mria:create_table( ?SESSION_TAB, [ @@ -492,17 +564,31 @@ create_tables() -> ] ), ok = mria:create_table( - ?SESSION_ITER_TAB, + ?SESSION_PUBRANGE_TAB, + [ + {rlog_shard, ?DS_MRIA_SHARD}, + {type, ordered_set}, + {storage, storage()}, + {record_name, ds_pubrange}, + {attributes, record_info(fields, ds_pubrange)} + ] + ), + ok = mria:create_table( + ?SESSION_COMMITTED_OFFSET_TAB, [ {rlog_shard, ?DS_MRIA_SHARD}, {type, set}, {storage, storage()}, - {record_name, ds_iter}, - {attributes, record_info(fields, ds_iter)} + {record_name, ds_committed_offset}, + {attributes, record_info(fields, ds_committed_offset)} ] ), ok = mria:wait_for_tables([ - ?SESSION_TAB, ?SESSION_SUBSCRIPTIONS_TAB, ?SESSION_STREAM_TAB, ?SESSION_ITER_TAB + ?SESSION_TAB, + ?SESSION_SUBSCRIPTIONS_TAB, + ?SESSION_STREAM_TAB, + ?SESSION_PUBRANGE_TAB, + ?SESSION_COMMITTED_OFFSET_TAB ]), ok. @@ -521,60 +607,103 @@ storage() -> %% %% Note: session API doesn't handle session takeovers, it's the job of %% the broker. --spec session_open(id()) -> - {ok, session(), #{topic() => subscription()}} | false. -session_open(SessionId) -> +-spec session_open(id(), emqx_types:conninfo()) -> + session() | false. +session_open(SessionId, NewConnInfo) -> + NowMS = now_ms(), transaction(fun() -> case mnesia:read(?SESSION_TAB, SessionId, write) of - [Record = #session{}] -> - Session = export_session(Record), - DSSubs = session_read_subscriptions(SessionId), - Subscriptions = export_subscriptions(DSSubs), - {ok, Session, Subscriptions}; - [] -> + [Record0 = #session{last_alive_at = LastAliveAt, conninfo = ConnInfo}] -> + EI = expiry_interval(ConnInfo), + case ?IS_EXPIRED(NowMS, LastAliveAt, EI) of + true -> + session_drop(SessionId), + false; + false -> + %% new connection being established + Record1 = Record0#session{conninfo = NewConnInfo}, + Record = session_set_last_alive_at(Record1, NowMS), + Session = export_session(Record), + DSSubs = session_read_subscriptions(SessionId), + Subscriptions = export_subscriptions(DSSubs), + Inflight = emqx_persistent_message_ds_replayer:open(SessionId), + Session#{ + conninfo => NewConnInfo, + inflight => Inflight, + subscriptions => Subscriptions + } + end; + _ -> false end end). --spec session_ensure_new(id(), _Props :: map()) -> - {ok, session(), #{topic() => subscription()}}. -session_ensure_new(SessionId, Props) -> +-spec session_ensure_new(id(), emqx_types:conninfo(), _Props :: map()) -> + session(). +session_ensure_new(SessionId, ConnInfo, Props) -> transaction(fun() -> ok = session_drop_subscriptions(SessionId), - Session = export_session(session_create(SessionId, Props)), - {ok, Session, #{}} + Session = export_session(session_create(SessionId, ConnInfo, Props)), + Session#{ + subscriptions => #{}, + inflight => emqx_persistent_message_ds_replayer:new() + } end). -session_create(SessionId, Props) -> +session_create(SessionId, ConnInfo, Props) -> Session = #session{ id = SessionId, - created_at = erlang:system_time(millisecond), - expires_at = never, - props = Props, - inflight = emqx_persistent_message_ds_replayer:new() + created_at = now_ms(), + last_alive_at = now_ms(), + conninfo = ConnInfo, + props = Props }, ok = mnesia:write(?SESSION_TAB, Session, write), Session. +session_set_last_alive_at_trans(Session, LastAliveAt) -> + #{conninfo := ConnInfo} = Session, + session_set_last_alive_at_trans(Session, ConnInfo, LastAliveAt). + +session_set_last_alive_at_trans(Session, NewConnInfo, LastAliveAt) -> + #{id := SessionId} = Session, + transaction(fun() -> + case mnesia:read(?SESSION_TAB, SessionId, write) of + [#session{} = SessionRecord0] -> + SessionRecord = SessionRecord0#session{conninfo = NewConnInfo}, + _ = session_set_last_alive_at(SessionRecord, LastAliveAt), + ok; + _ -> + %% log and crash? + ok + end + end), + Session#{conninfo := NewConnInfo, last_alive_at := LastAliveAt}. + +session_set_last_alive_at(SessionRecord0, LastAliveAt) -> + SessionRecord = SessionRecord0#session{last_alive_at = LastAliveAt}, + ok = mnesia:write(?SESSION_TAB, SessionRecord, write), + SessionRecord. + %% @doc Called when a client reconnects with `clean session=true' or %% during session GC -spec session_drop(id()) -> ok. session_drop(DSSessionId) -> transaction(fun() -> ok = session_drop_subscriptions(DSSessionId), - ok = session_drop_iterators(DSSessionId), + ok = session_drop_pubranges(DSSessionId), + ok = session_drop_offsets(DSSessionId), ok = session_drop_streams(DSSessionId), ok = mnesia:delete(?SESSION_TAB, DSSessionId, write) end). -spec session_drop_subscriptions(id()) -> ok. session_drop_subscriptions(DSSessionId) -> - Subscriptions = session_read_subscriptions(DSSessionId), + Subscriptions = session_read_subscriptions(DSSessionId, write), lists:foreach( fun(#ds_sub{id = DSSubId} = DSSub) -> TopicFilter = subscription_id_to_topic_filter(DSSubId), - TopicFilterBin = emqx_topic:join(TopicFilter), - ok = emqx_persistent_session_ds_router:do_delete_route(TopicFilterBin, DSSessionId), + ok = emqx_persistent_session_ds_router:do_delete_route(TopicFilter, DSSessionId), ok = session_del_subscription(DSSub) end, Subscriptions @@ -633,19 +762,44 @@ session_del_subscription(DSSessionId, TopicFilter) -> session_del_subscription(#ds_sub{id = DSSubId}) -> mnesia:delete(?SESSION_SUBSCRIPTIONS_TAB, DSSubId, write). -session_read_subscriptions(DSSessionId) -> +session_read_subscriptions(DSSessionID) -> + session_read_subscriptions(DSSessionID, read). + +session_read_subscriptions(DSSessionId, LockKind) -> MS = ets:fun2ms( fun(Sub = #ds_sub{id = {Sess, _}}) when Sess =:= DSSessionId -> Sub end ), - mnesia:select(?SESSION_SUBSCRIPTIONS_TAB, MS, read). + mnesia:select(?SESSION_SUBSCRIPTIONS_TAB, MS, LockKind). + +session_read_pubranges(DSSessionID) -> + session_read_pubranges(DSSessionID, read). + +session_read_pubranges(DSSessionId, LockKind) -> + MS = ets:fun2ms( + fun(#ds_pubrange{id = {Sess, First}}) when Sess =:= DSSessionId -> + {DSSessionId, First} + end + ), + mnesia:select(?SESSION_PUBRANGE_TAB, MS, LockKind). + +session_read_offsets(DSSessionID) -> + session_read_offsets(DSSessionID, read). + +session_read_offsets(DSSessionId, LockKind) -> + MS = ets:fun2ms( + fun(#ds_committed_offset{id = {Sess, Type}}) when Sess =:= DSSessionId -> + {DSSessionId, Type} + end + ), + mnesia:select(?SESSION_COMMITTED_OFFSET_TAB, MS, LockKind). -spec new_subscription_id(id(), topic_filter()) -> {subscription_id(), integer()}. new_subscription_id(DSSessionId, TopicFilter) -> %% Note: here we use _milliseconds_ to match with the timestamp %% field of `#message' record. - NowMS = erlang:system_time(millisecond), + NowMS = now_ms(), DSSubId = {DSSessionId, TopicFilter}, {DSSubId, NowMS}. @@ -653,6 +807,9 @@ new_subscription_id(DSSessionId, TopicFilter) -> subscription_id_to_topic_filter({_DSSessionId, TopicFilter}) -> TopicFilter. +now_ms() -> + erlang:system_time(millisecond). + %%-------------------------------------------------------------------- %% RPC targets (v1) %%-------------------------------------------------------------------- @@ -677,84 +834,100 @@ do_ensure_all_iterators_closed(_DSSessionID) -> %% Reading batches %%-------------------------------------------------------------------- --spec renew_streams(id()) -> ok. -renew_streams(DSSessionId) -> - Subscriptions = ro_transaction(fun() -> session_read_subscriptions(DSSessionId) end), - ExistingStreams = ro_transaction(fun() -> mnesia:read(?SESSION_STREAM_TAB, DSSessionId) end), - lists:foreach( - fun(#ds_sub{id = {_, TopicFilter}, start_time = StartTime}) -> - renew_streams(DSSessionId, ExistingStreams, TopicFilter, StartTime) +-spec renew_streams(session()) -> ok. +renew_streams(#{id := SessionId, subscriptions := Subscriptions}) -> + transaction(fun() -> + ExistingStreams = mnesia:read(?SESSION_STREAM_TAB, SessionId, write), + maps:fold( + fun(TopicFilter, #{start_time := StartTime}, Streams) -> + TopicFilterWords = emqx_topic:words(TopicFilter), + renew_topic_streams(SessionId, TopicFilterWords, StartTime, Streams) + end, + ExistingStreams, + Subscriptions + ) + end), + ok. + +-spec renew_topic_streams(id(), topic_filter_words(), emqx_ds:time(), _Acc :: [ds_stream()]) -> ok. +renew_topic_streams(DSSessionId, TopicFilter, StartTime, ExistingStreams) -> + TopicStreams = emqx_ds:get_streams(?PERSISTENT_MESSAGE_DB, TopicFilter, StartTime), + lists:foldl( + fun({Rank, Stream}, Streams) -> + case lists:keymember(Stream, #ds_stream.stream, Streams) of + true -> + Streams; + false -> + StreamRef = length(Streams) + 1, + DSStream = session_store_stream( + DSSessionId, + StreamRef, + Stream, + Rank, + TopicFilter, + StartTime + ), + [DSStream | Streams] + end end, - Subscriptions + ExistingStreams, + TopicStreams ). --spec renew_streams(id(), [ds_stream()], emqx_ds:topic_filter(), emqx_ds:time()) -> ok. -renew_streams(DSSessionId, ExistingStreams, TopicFilter, StartTime) -> - AllStreams = emqx_ds:get_streams(?PERSISTENT_MESSAGE_DB, TopicFilter, StartTime), - transaction( - fun() -> - lists:foreach( - fun({Rank, Stream}) -> - Rec = #ds_stream{ - session = DSSessionId, - topic_filter = TopicFilter, - stream = Stream, - rank = Rank - }, - case lists:member(Rec, ExistingStreams) of - true -> - ok; - false -> - mnesia:write(?SESSION_STREAM_TAB, Rec, write), - {ok, Iterator} = emqx_ds:make_iterator( - ?PERSISTENT_MESSAGE_DB, Stream, TopicFilter, StartTime - ), - %% Workaround: we convert `Stream' to a binary before - %% attempting to store it in mnesia(rocksdb) because of a bug - %% in `mnesia_rocksdb' when trying to do - %% `mnesia:dirty_all_keys' later. - StreamBin = term_to_binary(Stream), - IterRec = #ds_iter{id = {DSSessionId, StreamBin}, iter = Iterator}, - mnesia:write(?SESSION_ITER_TAB, IterRec, write) - end - end, - AllStreams - ) - end - ). +session_store_stream(DSSessionId, StreamRef, Stream, Rank, TopicFilter, StartTime) -> + {ok, ItBegin} = emqx_ds:make_iterator( + ?PERSISTENT_MESSAGE_DB, + Stream, + TopicFilter, + StartTime + ), + DSStream = #ds_stream{ + session = DSSessionId, + ref = StreamRef, + stream = Stream, + rank = Rank, + beginning = ItBegin + }, + mnesia:write(?SESSION_STREAM_TAB, DSStream, write), + DSStream. %% must be called inside a transaction -spec session_drop_streams(id()) -> ok. session_drop_streams(DSSessionId) -> - MS = ets:fun2ms( - fun(#ds_stream{session = DSSessionId0}) when DSSessionId0 =:= DSSessionId -> - DSSessionId0 - end - ), - StreamIDs = mnesia:select(?SESSION_STREAM_TAB, MS, write), - lists:foreach(fun(Key) -> mnesia:delete(?SESSION_STREAM_TAB, Key, write) end, StreamIDs). + mnesia:delete(?SESSION_STREAM_TAB, DSSessionId, write). %% must be called inside a transaction --spec session_drop_iterators(id()) -> ok. -session_drop_iterators(DSSessionId) -> - MS = ets:fun2ms( - fun(#ds_iter{id = {DSSessionId0, StreamBin}}) when DSSessionId0 =:= DSSessionId -> - StreamBin - end - ), - StreamBins = mnesia:select(?SESSION_ITER_TAB, MS, write), +-spec session_drop_pubranges(id()) -> ok. +session_drop_pubranges(DSSessionId) -> + RangeIds = session_read_pubranges(DSSessionId, write), lists:foreach( - fun(StreamBin) -> - mnesia:delete(?SESSION_ITER_TAB, {DSSessionId, StreamBin}, write) + fun(RangeId) -> + mnesia:delete(?SESSION_PUBRANGE_TAB, RangeId, write) end, - StreamBins + RangeIds + ). + +%% must be called inside a transaction +-spec session_drop_offsets(id()) -> ok. +session_drop_offsets(DSSessionId) -> + OffsetIds = session_read_offsets(DSSessionId, write), + lists:foreach( + fun(OffsetId) -> + mnesia:delete(?SESSION_COMMITTED_OFFSET_TAB, OffsetId, write) + end, + OffsetIds ). %%-------------------------------------------------------------------------------- transaction(Fun) -> - {atomic, Res} = mria:transaction(?DS_MRIA_SHARD, Fun), - Res. + case mnesia:is_transaction() of + true -> + Fun(); + false -> + {atomic, Res} = mria:transaction(?DS_MRIA_SHARD, Fun), + Res + end. ro_transaction(Fun) -> {atomic, Res} = mria:ro_transaction(?DS_MRIA_SHARD, Fun), @@ -772,7 +945,7 @@ export_subscriptions(DSSubs) -> ). export_session(#session{} = Record) -> - export_record(Record, #session.id, [id, created_at, expires_at, inflight, props], #{}). + export_record(Record, #session.id, [id, created_at, last_alive_at, conninfo, props], #{}). export_subscription(#ds_sub{} = Record) -> export_record(Record, #ds_sub.start_time, [start_time, props, extra], #{}). @@ -786,13 +959,17 @@ export_record(_, _, [], Acc) -> %% effects. Add `CBM:init' callback to the session behavior? ensure_timers() -> ensure_timer(pull), - ensure_timer(get_streams). + ensure_timer(get_streams), + ensure_timer(bump_last_alive_at). --spec ensure_timer(pull | get_streams) -> ok. +-spec ensure_timer(timer()) -> ok. +ensure_timer(bump_last_alive_at = Type) -> + BumpInterval = emqx_config:get([session_persistence, last_alive_update_interval]), + ensure_timer(Type, BumpInterval); ensure_timer(Type) -> ensure_timer(Type, 100). --spec ensure_timer(pull | get_streams, non_neg_integer()) -> ok. +-spec ensure_timer(timer(), non_neg_integer()) -> ok. ensure_timer(Type, Timeout) -> _ = emqx_utils:start_timer(Timeout, {emqx_session, Type}), ok. @@ -804,13 +981,23 @@ receive_maximum(ConnInfo) -> %% indicates that it's optional. maps:get(receive_maximum, ConnInfo, 65_535). +-spec expiry_interval(conninfo()) -> millisecond(). +expiry_interval(ConnInfo) -> + maps:get(expiry_interval, ConnInfo, 0). + -ifdef(TEST). list_all_sessions() -> DSSessionIds = mnesia:dirty_all_keys(?SESSION_TAB), - Sessions = lists:map( + ConnInfo = #{}, + Sessions = lists:filtermap( fun(SessionID) -> - {ok, Session, Subscriptions} = session_open(SessionID), - {SessionID, #{session => Session, subscriptions => Subscriptions}} + Sess = session_open(SessionID, ConnInfo), + case Sess of + false -> + false; + _ -> + {true, {SessionID, Sess}} + end end, DSSessionIds ), @@ -850,16 +1037,18 @@ list_all_streams() -> ), maps:from_list(DSStreams). -list_all_iterators() -> - DSIterIds = mnesia:dirty_all_keys(?SESSION_ITER_TAB), - DSIters = lists:map( - fun(DSIterId) -> - [Record] = mnesia:dirty_read(?SESSION_ITER_TAB, DSIterId), - {DSIterId, export_record(Record, #ds_iter.id, [id, iter], #{})} +list_all_pubranges() -> + DSPubranges = mnesia:dirty_match_object(?SESSION_PUBRANGE_TAB, #ds_pubrange{_ = '_'}), + lists:foldl( + fun(Record = #ds_pubrange{id = {SessionId, First}}, Acc) -> + Range = export_record( + Record, #ds_pubrange.until, [until, stream, type, iterator], #{first => First} + ), + maps:put(SessionId, maps:get(SessionId, Acc, []) ++ [Range], Acc) end, - DSIterIds - ), - maps:from_list(DSIters). + #{}, + DSPubranges + ). %% ifdef(TEST) -endif. diff --git a/apps/emqx/src/emqx_persistent_session_ds.hrl b/apps/emqx/src/emqx_persistent_session_ds.hrl index cc995ce66..306e63b2e 100644 --- a/apps/emqx/src/emqx_persistent_session_ds.hrl +++ b/apps/emqx/src/emqx_persistent_session_ds.hrl @@ -21,9 +21,13 @@ -define(SESSION_TAB, emqx_ds_session). -define(SESSION_SUBSCRIPTIONS_TAB, emqx_ds_session_subscriptions). -define(SESSION_STREAM_TAB, emqx_ds_stream_tab). --define(SESSION_ITER_TAB, emqx_ds_iter_tab). +-define(SESSION_PUBRANGE_TAB, emqx_ds_pubrange_tab). +-define(SESSION_COMMITTED_OFFSET_TAB, emqx_ds_committed_offset_tab). -define(DS_MRIA_SHARD, emqx_ds_session_shard). +-define(T_INFLIGHT, 1). +-define(T_CHECKPOINT, 2). + -record(ds_sub, { id :: emqx_persistent_session_ds:subscription_id(), start_time :: emqx_ds:time(), @@ -34,16 +38,53 @@ -record(ds_stream, { session :: emqx_persistent_session_ds:id(), - topic_filter :: emqx_ds:topic_filter(), + ref :: _StreamRef, stream :: emqx_ds:stream(), - rank :: emqx_ds:stream_rank() + rank :: emqx_ds:stream_rank(), + beginning :: emqx_ds:iterator() }). -type ds_stream() :: #ds_stream{}. --type ds_stream_bin() :: binary(). --record(ds_iter, { - id :: {emqx_persistent_session_ds:id(), ds_stream_bin()}, - iter :: emqx_ds:iterator() +-record(ds_pubrange, { + id :: { + %% What session this range belongs to. + _Session :: emqx_persistent_session_ds:id(), + %% Where this range starts. + _First :: emqx_persistent_message_ds_replayer:seqno() + }, + %% Where this range ends: the first seqno that is not included in the range. + until :: emqx_persistent_message_ds_replayer:seqno(), + %% Which stream this range is over. + stream :: _StreamRef, + %% Type of a range: + %% * Inflight range is a range of yet unacked messages from this stream. + %% * Checkpoint range was already acked, its purpose is to keep track of the + %% very last iterator for this stream. + type :: ?T_INFLIGHT | ?T_CHECKPOINT, + %% What commit tracks this range is part of. + %% This is rarely stored: we only need to persist it when the range contains + %% QoS 2 messages. + tracks = 0 :: non_neg_integer(), + %% Meaning of this depends on the type of the range: + %% * For inflight range, this is the iterator pointing to the first message in + %% the range. + %% * For checkpoint range, this is the iterator pointing right past the last + %% message in the range. + iterator :: emqx_ds:iterator(), + %% Reserved for future use. + misc = #{} :: map() +}). +-type ds_pubrange() :: #ds_pubrange{}. + +-record(ds_committed_offset, { + id :: { + %% What session this marker belongs to. + _Session :: emqx_persistent_session_ds:id(), + %% Marker name. + _CommitType + }, + %% Where this marker is pointing to: the first seqno that is not marked. + until :: emqx_persistent_message_ds_replayer:seqno() }). -record(session, { @@ -51,8 +92,8 @@ id :: emqx_persistent_session_ds:id(), %% creation time created_at :: _Millisecond :: non_neg_integer(), - expires_at = never :: _Millisecond :: non_neg_integer() | never, - inflight :: emqx_persistent_message_ds_replayer:inflight(), + last_alive_at :: _Millisecond :: non_neg_integer(), + conninfo :: emqx_types:conninfo(), %% for future usage props = #{} :: map() }). diff --git a/apps/emqx/src/emqx_persistent_session_ds_gc_worker.erl b/apps/emqx/src/emqx_persistent_session_ds_gc_worker.erl new file mode 100644 index 000000000..bf607804f --- /dev/null +++ b/apps/emqx/src/emqx_persistent_session_ds_gc_worker.erl @@ -0,0 +1,161 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-module(emqx_persistent_session_ds_gc_worker). + +-behaviour(gen_server). + +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-include_lib("stdlib/include/qlc.hrl"). +-include_lib("stdlib/include/ms_transform.hrl"). + +-include("emqx_persistent_session_ds.hrl"). + +%% API +-export([ + start_link/0 +]). + +%% `gen_server' API +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2 +]). + +%% call/cast/info records +-record(gc, {}). + +%%-------------------------------------------------------------------------------- +%% API +%%-------------------------------------------------------------------------------- + +start_link() -> + gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). + +%%-------------------------------------------------------------------------------- +%% `gen_server' API +%%-------------------------------------------------------------------------------- + +init(_Opts) -> + ensure_gc_timer(), + State = #{}, + {ok, State}. + +handle_call(_Call, _From, State) -> + {reply, error, State}. + +handle_cast(_Cast, State) -> + {noreply, State}. + +handle_info(#gc{}, State) -> + try_gc(), + ensure_gc_timer(), + {noreply, State}; +handle_info(_Info, State) -> + {noreply, State}. + +%%-------------------------------------------------------------------------------- +%% Internal fns +%%-------------------------------------------------------------------------------- + +ensure_gc_timer() -> + Timeout = emqx_config:get([session_persistence, session_gc_interval]), + _ = erlang:send_after(Timeout, self(), #gc{}), + ok. + +try_gc() -> + %% Only cores should run GC. + CoreNodes = mria_membership:running_core_nodelist(), + Res = global:trans( + {?MODULE, self()}, + fun() -> ?tp_span(ds_session_gc, #{}, start_gc()) end, + CoreNodes, + %% Note: we set retries to 1 here because, in rare occasions, GC might start at the + %% same time in more than one node, and each one will abort the other. By allowing + %% one retry, at least one node will (hopefully) get to enter the transaction and + %% the other will abort. If GC runs too fast, both nodes might run in sequence. + %% But, in that case, GC is clearly not too costly, and that shouldn't be a problem, + %% resource-wise. + _Retries = 1 + ), + case Res of + aborted -> + ?tp(ds_session_gc_lock_taken, #{}), + ok; + ok -> + ok + end. + +now_ms() -> + erlang:system_time(millisecond). + +start_gc() -> + do_gc(more). + +zombie_session_ms() -> + NowMS = now_ms(), + GCInterval = emqx_config:get([session_persistence, session_gc_interval]), + BumpInterval = emqx_config:get([session_persistence, last_alive_update_interval]), + TimeThreshold = max(GCInterval, BumpInterval) * 3, + ets:fun2ms( + fun( + #session{ + id = DSSessionId, + last_alive_at = LastAliveAt, + conninfo = #{expiry_interval := EI} + } + ) when + LastAliveAt + EI + TimeThreshold =< NowMS + -> + DSSessionId + end + ). + +do_gc(more) -> + GCBatchSize = emqx_config:get([session_persistence, session_gc_batch_size]), + MS = zombie_session_ms(), + {atomic, Next} = mria:transaction(?DS_MRIA_SHARD, fun() -> + Res = mnesia:select(?SESSION_TAB, MS, GCBatchSize, write), + case Res of + '$end_of_table' -> + done; + {[], Cont} -> + %% since `GCBatchsize' is just a "recommendation" for `select', we try only + %% _once_ the continuation and then stop if it yields nothing, to avoid a + %% dead loop. + case mnesia:select(Cont) of + '$end_of_table' -> + done; + {[], _Cont} -> + done; + {DSSessionIds0, _Cont} -> + do_gc_(DSSessionIds0), + more + end; + {DSSessionIds0, _Cont} -> + do_gc_(DSSessionIds0), + more + end + end), + do_gc(Next); +do_gc(done) -> + ok. + +do_gc_(DSSessionIds) -> + lists:foreach(fun emqx_persistent_session_ds:destroy_session/1, DSSessionIds), + ?tp(ds_session_gc_cleaned, #{session_ids => DSSessionIds}), + ok. diff --git a/apps/emqx/src/emqx_persistent_session_ds_sup.erl b/apps/emqx/src/emqx_persistent_session_ds_sup.erl new file mode 100644 index 000000000..5bd620e8b --- /dev/null +++ b/apps/emqx/src/emqx_persistent_session_ds_sup.erl @@ -0,0 +1,78 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-module(emqx_persistent_session_ds_sup). + +-behaviour(supervisor). + +%% API +-export([ + start_link/0 +]). + +%% `supervisor' API +-export([ + init/1 +]). + +%%-------------------------------------------------------------------------------- +%% API +%%-------------------------------------------------------------------------------- + +start_link() -> + supervisor:start_link({local, ?MODULE}, ?MODULE, []). + +%%-------------------------------------------------------------------------------- +%% `supervisor' API +%%-------------------------------------------------------------------------------- + +init(Opts) -> + case emqx_persistent_message:is_persistence_enabled() of + true -> + do_init(Opts); + false -> + ignore + end. + +do_init(_Opts) -> + SupFlags = #{ + strategy => rest_for_one, + intensity => 10, + period => 2, + auto_shutdown => never + }, + CoreChildren = [ + worker(gc_worker, emqx_persistent_session_ds_gc_worker, []) + ], + Children = + case mria_rlog:role() of + core -> CoreChildren; + replicant -> [] + end, + {ok, {SupFlags, Children}}. + +%%-------------------------------------------------------------------------------- +%% Internal fns +%%-------------------------------------------------------------------------------- + +worker(Id, Mod, Args) -> + #{ + id => Id, + start => {Mod, start_link, Args}, + type => worker, + restart => permanent, + shutdown => 10_000, + significant => false + }. diff --git a/apps/emqx/src/emqx_quic_stream.erl b/apps/emqx/src/emqx_quic_stream.erl index 05413b0cf..63af45b1b 100644 --- a/apps/emqx/src/emqx_quic_stream.erl +++ b/apps/emqx/src/emqx_quic_stream.erl @@ -184,7 +184,7 @@ peer_send_aborted(Stream, ErrorCode, S) -> -spec peer_send_shutdown(stream_handle(), undefined, cb_data()) -> cb_ret(). peer_send_shutdown(Stream, undefined, S) -> - ok = quicer:async_shutdown_stream(Stream, ?QUIC_STREAM_SHUTDOWN_FLAG_GRACEFUL, 0), + _ = quicer:async_shutdown_stream(Stream, ?QUIC_STREAM_SHUTDOWN_FLAG_GRACEFUL, 0), {ok, S}. -spec send_complete(stream_handle(), boolean(), cb_data()) -> cb_ret(). diff --git a/apps/emqx/src/emqx_reason_codes.erl b/apps/emqx/src/emqx_reason_codes.erl index 77a8c1be2..543a62216 100644 --- a/apps/emqx/src/emqx_reason_codes.erl +++ b/apps/emqx/src/emqx_reason_codes.erl @@ -177,6 +177,7 @@ compat(connack, 16#9D) -> ?CONNACK_SERVER; compat(connack, 16#9F) -> ?CONNACK_SERVER; compat(suback, Code) when Code =< ?QOS_2 -> Code; compat(suback, Code) when Code >= 16#80 -> 16#80; +%% TODO: 16#80(qos0) 16#81(qos1) 16#82(qos2) for mqtt-v3.1.1 compat(unsuback, _Code) -> undefined; compat(_Other, _Code) -> undefined. diff --git a/apps/emqx/src/emqx_router.erl b/apps/emqx/src/emqx_router.erl index fd988eda1..1aebb1b21 100644 --- a/apps/emqx/src/emqx_router.erl +++ b/apps/emqx/src/emqx_router.erl @@ -52,6 +52,9 @@ lookup_routes/1 ]). +%% Topics API +-export([select/3]). + -export([print_routes/1]). -export([ @@ -59,7 +62,10 @@ foldr_routes/2 ]). --export([topics/0]). +-export([ + topics/0, + stats/1 +]). %% Exported for tests -export([has_route/2]). @@ -219,6 +225,19 @@ mria_delete_route(v2, Topic, Dest) -> mria_delete_route(v1, Topic, Dest) -> mria_delete_route_v1(Topic, Dest). +-spec select(Spec, _Limit :: pos_integer(), Continuation) -> + {[emqx_types:route()], Continuation} | '$end_of_table' +when + Spec :: {_TopicPat, _DestPat}, + Continuation :: term() | '$end_of_table'. +select(MatchSpec, Limit, Cont) -> + select(get_schema_vsn(), MatchSpec, Limit, Cont). + +select(v2, MatchSpec, Limit, Cont) -> + select_v2(MatchSpec, Limit, Cont); +select(v1, MatchSpec, Limit, Cont) -> + select_v1(MatchSpec, Limit, Cont). + -spec topics() -> list(emqx_types:topic()). topics() -> topics(get_schema_vsn()). @@ -228,6 +247,15 @@ topics(v2) -> topics(v1) -> list_topics_v1(). +-spec stats(n_routes) -> non_neg_integer(). +stats(Item) -> + stats(get_schema_vsn(), Item). + +stats(v2, Item) -> + get_stats_v2(Item); +stats(v1, Item) -> + get_stats_v1(Item). + %% @doc Print routes to a topic -spec print_routes(emqx_types:topic()) -> ok. print_routes(Topic) -> @@ -345,9 +373,17 @@ cleanup_routes_v1(Node) -> ] end). +select_v1({MTopic, MDest}, Limit, undefined) -> + ets:match_object(?ROUTE_TAB, #route{topic = MTopic, dest = MDest}, Limit); +select_v1(_Spec, _Limit, Cont) -> + ets:select(Cont). + list_topics_v1() -> list_route_tab_topics(). +get_stats_v1(n_routes) -> + emqx_maybe:define(ets:info(?ROUTE_TAB, size), 0). + list_route_tab_topics() -> mnesia:dirty_all_keys(?ROUTE_TAB). @@ -436,11 +472,52 @@ get_dest_node({_, Node}) -> get_dest_node(Node) -> Node. +select_v2(Spec, Limit, undefined) -> + Stream = mk_route_stream(Spec), + select_next(Limit, Stream); +select_v2(_Spec, Limit, Stream) -> + select_next(Limit, Stream). + +select_next(N, Stream) -> + case emqx_utils_stream:consume(N, Stream) of + {Routes, SRest} -> + {Routes, SRest}; + Routes -> + {Routes, '$end_of_table'} + end. + +mk_route_stream(Spec) -> + emqx_utils_stream:chain( + mk_route_stream(route, Spec), + mk_route_stream(filter, Spec) + ). + +mk_route_stream(route, Spec) -> + emqx_utils_stream:ets(fun(Cont) -> select_v1(Spec, 1, Cont) end); +mk_route_stream(filter, {MTopic, MDest}) -> + emqx_utils_stream:map( + fun routeidx_to_route/1, + emqx_utils_stream:ets( + fun + (undefined) -> + MatchSpec = #routeidx{entry = emqx_trie_search:make_pat(MTopic, MDest)}, + ets:match_object(?ROUTE_TAB_FILTERS, MatchSpec, 1); + (Cont) -> + ets:match_object(Cont) + end + ) + ). + list_topics_v2() -> Pat = #routeidx{entry = '$1'}, Filters = [emqx_topic_index:get_topic(K) || [K] <- ets:match(?ROUTE_TAB_FILTERS, Pat)], list_route_tab_topics() ++ Filters. +get_stats_v2(n_routes) -> + NTopics = emqx_maybe:define(ets:info(?ROUTE_TAB, size), 0), + NWildcards = emqx_maybe:define(ets:info(?ROUTE_TAB_FILTERS, size), 0), + NTopics + NWildcards. + fold_routes_v2(FunName, FoldFun, AccIn) -> FilterFoldFun = mk_filtertab_fold_fun(FoldFun), Acc = ets:FunName(FoldFun, AccIn, ?ROUTE_TAB), @@ -449,6 +526,9 @@ fold_routes_v2(FunName, FoldFun, AccIn) -> mk_filtertab_fold_fun(FoldFun) -> fun(#routeidx{entry = K}, Acc) -> FoldFun(match_to_route(K), Acc) end. +routeidx_to_route(#routeidx{entry = M}) -> + match_to_route(M). + match_to_route(M) -> #route{topic = emqx_topic_index:get_topic(M), dest = emqx_topic_index:get_id(M)}. diff --git a/apps/emqx/src/emqx_router_helper.erl b/apps/emqx/src/emqx_router_helper.erl index b9cdbae4b..c43192d4e 100644 --- a/apps/emqx/src/emqx_router_helper.erl +++ b/apps/emqx/src/emqx_router_helper.erl @@ -190,12 +190,7 @@ code_change(_OldVsn, State, _Extra) -> %%-------------------------------------------------------------------- stats_fun() -> - case ets:info(?ROUTE_TAB, size) of - undefined -> - ok; - Size -> - emqx_stats:setstat('topics.count', 'topics.max', Size) - end. + emqx_stats:setstat('topics.count', 'topics.max', emqx_router:stats(n_routes)). cleanup_routes(Node) -> emqx_router:cleanup_routes(Node). diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl index 3ad03c4d4..f46387d3b 100644 --- a/apps/emqx/src/emqx_schema.erl +++ b/apps/emqx/src/emqx_schema.erl @@ -294,7 +294,19 @@ roots(low) -> {"persistent_session_store", sc( ref("persistent_session_store"), - #{importance => ?IMPORTANCE_HIDDEN} + #{ + %% NOTE + %% Due to some quirks in interaction between `emqx_config` and + %% `hocon_tconf`, schema roots cannot currently be deprecated. + importance => ?IMPORTANCE_HIDDEN + } + )}, + {"session_persistence", + sc( + ref("session_persistence"), + #{ + importance => ?IMPORTANCE_HIDDEN + } )}, {"trace", sc( @@ -309,11 +321,12 @@ roots(low) -> ]. fields("persistent_session_store") -> + Deprecated = #{deprecated => {since, "5.4.0"}}, [ {"enabled", sc( boolean(), - #{ + Deprecated#{ default => false, %% TODO(5.2): change field name to 'enable' and keep 'enabled' as an alias aliases => [enable], @@ -323,7 +336,7 @@ fields("persistent_session_store") -> {"ds", sc( boolean(), - #{ + Deprecated#{ default => false, importance => ?IMPORTANCE_HIDDEN } @@ -331,7 +344,7 @@ fields("persistent_session_store") -> {"on_disc", sc( boolean(), - #{ + Deprecated#{ default => true, desc => ?DESC(persistent_store_on_disc) } @@ -339,7 +352,7 @@ fields("persistent_session_store") -> {"ram_cache", sc( boolean(), - #{ + Deprecated#{ default => false, desc => ?DESC(persistent_store_ram_cache) } @@ -347,7 +360,7 @@ fields("persistent_session_store") -> {"backend", sc( hoconsc:union([ref("persistent_session_builtin")]), - #{ + Deprecated#{ default => #{ <<"type">> => <<"builtin">>, <<"session">> => @@ -363,7 +376,7 @@ fields("persistent_session_store") -> {"max_retain_undelivered", sc( duration(), - #{ + Deprecated#{ default => <<"1h">>, desc => ?DESC(persistent_session_store_max_retain_undelivered) } @@ -371,7 +384,7 @@ fields("persistent_session_store") -> {"message_gc_interval", sc( duration(), - #{ + Deprecated#{ default => <<"1h">>, desc => ?DESC(persistent_session_store_message_gc_interval) } @@ -379,7 +392,7 @@ fields("persistent_session_store") -> {"session_message_gc_interval", sc( duration(), - #{ + Deprecated#{ default => <<"1m">>, desc => ?DESC(persistent_session_store_session_message_gc_interval) } @@ -1382,7 +1395,7 @@ fields("broker_routing") -> sc( hoconsc:enum([v1, v2]), #{ - default => v1, + default => v2, 'readOnly' => true, desc => ?DESC(broker_routing_storage_schema) } @@ -1740,6 +1753,103 @@ fields("trace") -> importance => ?IMPORTANCE_HIDDEN, desc => ?DESC(fields_trace_payload_encode) })} + ]; +fields("session_persistence") -> + [ + {"enable", + sc( + boolean(), #{ + desc => ?DESC(session_persistence_enable), + default => false + } + )}, + {"storage", + sc( + ref("session_storage_backend"), #{ + desc => ?DESC(session_persistence_storage), + validator => fun validate_backend_enabled/1, + default => #{ + <<"builtin">> => #{} + } + } + )}, + {"idle_poll_interval", + sc( + timeout_duration(), + #{ + default => <<"100ms">>, + desc => ?DESC(session_ds_idle_poll_interval) + } + )}, + {"last_alive_update_interval", + sc( + timeout_duration(), + #{ + default => <<"5000ms">>, + desc => ?DESC(session_ds_last_alive_update_interval) + } + )}, + {"session_gc_interval", + sc( + timeout_duration(), + #{ + default => <<"10m">>, + desc => ?DESC(session_ds_session_gc_interval) + } + )}, + {"session_gc_batch_size", + sc( + pos_integer(), + #{ + default => 100, + importance => ?IMPORTANCE_LOW, + desc => ?DESC(session_ds_session_gc_batch_size) + } + )}, + {"force_persistence", + sc( + boolean(), + #{ + default => false, + %% Only for testing, shall remain hidden + importance => ?IMPORTANCE_HIDDEN + } + )} + ]; +fields("session_storage_backend") -> + [ + {"builtin", + sc(ref("session_storage_backend_builtin"), #{ + desc => ?DESC(session_storage_backend_builtin), + required => {false, recursively} + })} + ] ++ emqx_schema_hooks:injection_point('session_persistence.storage_backends', []); +fields("session_storage_backend_builtin") -> + [ + {"enable", + sc( + boolean(), + #{ + desc => ?DESC(session_storage_backend_enable), + default => true + } + )}, + {"n_shards", + sc( + pos_integer(), + #{ + desc => ?DESC(session_builtin_n_shards), + default => 16 + } + )}, + {"replication_factor", + sc( + pos_integer(), + #{ + default => 3, + importance => ?IMPORTANCE_HIDDEN + } + )} ]. mqtt_listener(Bind) -> @@ -1992,6 +2102,8 @@ desc("ocsp") -> "Per listener OCSP Stapling configuration."; desc("crl_cache") -> "Global CRL cache options."; +desc("session_persistence") -> + "Settings governing durable sessions persistence."; desc(_) -> undefined. @@ -2014,6 +2126,17 @@ ensure_list(V) -> filter(Opts) -> [{K, V} || {K, V} <- Opts, V =/= undefined]. +validate_backend_enabled(Config) -> + Enabled = maps:filter(fun(_, #{<<"enable">> := E}) -> E end, Config), + case maps:to_list(Enabled) of + [{_Type, _BackendConfig}] -> + ok; + _Conflicts = [_ | _] -> + {error, multiple_enabled_backends}; + _None = [] -> + {error, no_enabled_backend} + end. + %% @private This function defines the SSL opts which are commonly used by %% SSL listener and client. -spec common_ssl_opts_schema(map(), server | client) -> hocon_schema:field_schema(). diff --git a/apps/emqx/src/emqx_schema_secret.erl b/apps/emqx/src/emqx_schema_secret.erl new file mode 100644 index 000000000..635285ce7 --- /dev/null +++ b/apps/emqx/src/emqx_schema_secret.erl @@ -0,0 +1,85 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +%% @doc HOCON schema that defines _secret_ concept. +-module(emqx_schema_secret). + +-include_lib("emqx/include/logger.hrl"). +-include_lib("typerefl/include/types.hrl"). + +-export([mk/1]). + +%% HOCON Schema API +-export([convert_secret/2]). + +%% @doc Secret value. +-type t() :: binary(). + +%% @doc Source of the secret value. +%% * "file://...": file path to a file containing secret value. +%% * other binaries: secret value itself. +-type source() :: iodata(). + +-type secret() :: binary() | function(). +-reflect_type([secret/0]). + +-define(SCHEMA, #{ + required => false, + format => <<"password">>, + sensitive => true, + converter => fun ?MODULE:convert_secret/2 +}). + +-dialyzer({nowarn_function, source/1}). + +%% + +-spec mk(#{atom() => _}) -> hocon_schema:field_schema(). +mk(Overrides = #{}) -> + hoconsc:mk(secret(), maps:merge(?SCHEMA, Overrides)). + +convert_secret(undefined, #{}) -> + undefined; +convert_secret(Secret, #{make_serializable := true}) -> + unicode:characters_to_binary(source(Secret)); +convert_secret(Secret, #{}) when is_function(Secret, 0) -> + Secret; +convert_secret(Secret, #{}) when is_integer(Secret) -> + wrap(integer_to_binary(Secret)); +convert_secret(Secret, #{}) -> + try unicode:characters_to_binary(Secret) of + String when is_binary(String) -> + wrap(String); + {error, _, _} -> + throw(invalid_string) + catch + error:_ -> + throw(invalid_type) + end. + +-spec wrap(source()) -> emqx_secret:t(t()). +wrap(<<"file://", Filename/binary>>) -> + emqx_secret:wrap_load({file, Filename}); +wrap(Secret) -> + emqx_secret:wrap(Secret). + +-spec source(emqx_secret:t(t())) -> source(). +source(Secret) when is_function(Secret) -> + source(emqx_secret:term(Secret)); +source({file, Filename}) -> + <<"file://", Filename/binary>>; +source(Secret) -> + Secret. diff --git a/apps/emqx/src/emqx_secret.erl b/apps/emqx/src/emqx_secret.erl index 72c4f3c08..dfbfa488e 100644 --- a/apps/emqx/src/emqx_secret.erl +++ b/apps/emqx/src/emqx_secret.erl @@ -19,23 +19,52 @@ -module(emqx_secret). %% API: --export([wrap/1, unwrap/1]). +-export([wrap/1, wrap_load/1, unwrap/1, term/1]). -export_type([t/1]). -opaque t(T) :: T | fun(() -> t(T)). +%% Secret loader module. +%% Any changes related to processing of secrets should be made there. +-define(LOADER, emqx_secret_loader). + %%================================================================================ %% API funcions %%================================================================================ +%% @doc Wrap a term in a secret closure. +%% This effectively hides the term from any term formatting / printing code. +-spec wrap(T) -> t(T). wrap(Term) -> fun() -> Term end. +%% @doc Wrap a loader function call over a term in a secret closure. +%% This is slightly more flexible form of `wrap/1` with the same basic purpose. +-spec wrap_load(emqx_secret_loader:source()) -> t(_). +wrap_load(Source) -> + fun() -> + apply(?LOADER, load, [Source]) + end. + +%% @doc Unwrap a secret closure, revealing the secret. +%% This is either `Term` or `Module:Function(Term)` depending on how it was wrapped. +-spec unwrap(t(T)) -> T. unwrap(Term) when is_function(Term, 0) -> %% Handle potentially nested funs unwrap(Term()); unwrap(Term) -> Term. + +%% @doc Inspect the term wrapped in a secret closure. +-spec term(t(_)) -> _Term. +term(Wrap) when is_function(Wrap, 0) -> + case erlang:fun_info(Wrap, module) of + {module, ?MODULE} -> + {env, Env} = erlang:fun_info(Wrap, env), + lists:last(Env); + _ -> + error(badarg, [Wrap]) + end. diff --git a/apps/emqx/src/emqx_secret_loader.erl b/apps/emqx/src/emqx_secret_loader.erl new file mode 100644 index 000000000..2e99587bf --- /dev/null +++ b/apps/emqx/src/emqx_secret_loader.erl @@ -0,0 +1,42 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_secret_loader). + +%% API +-export([load/1]). +-export([file/1]). + +-export_type([source/0]). + +-type source() :: {file, file:filename_all()}. + +-spec load(source()) -> binary() | no_return(). +load({file, Filename}) -> + file(Filename). + +-spec file(file:filename_all()) -> binary() | no_return(). +file(Filename) -> + case file:read_file(Filename) of + {ok, Secret} -> + string:trim(Secret, trailing); + {error, Reason} -> + throw(#{ + msg => failed_to_read_secret_file, + path => Filename, + reason => emqx_utils:explain_posix(Reason) + }) + end. diff --git a/apps/emqx/src/emqx_session.erl b/apps/emqx/src/emqx_session.erl index 52342d7ee..108e8ec09 100644 --- a/apps/emqx/src/emqx_session.erl +++ b/apps/emqx/src/emqx_session.erl @@ -84,7 +84,7 @@ -export([ deliver/3, handle_timeout/3, - disconnect/2, + disconnect/3, terminate/3 ]). @@ -267,7 +267,7 @@ destroy(Session) -> -spec subscribe( clientinfo(), - emqx_types:topic(), + emqx_types:topic() | emqx_types:share(), emqx_types:subopts(), t() ) -> @@ -287,7 +287,7 @@ subscribe(ClientInfo, TopicFilter, SubOpts, Session) -> -spec unsubscribe( clientinfo(), - emqx_types:topic(), + emqx_types:topic() | emqx_types:share(), emqx_types:subopts(), t() ) -> @@ -418,7 +418,13 @@ enrich_delivers(ClientInfo, [D | Rest], UpgradeQoS, Session) -> end. enrich_deliver(ClientInfo, {deliver, Topic, Msg}, UpgradeQoS, Session) -> - SubOpts = ?IMPL(Session):get_subscription(Topic, Session), + SubOpts = + case Msg of + #message{headers = #{redispatch_to := ?REDISPATCH_TO(Group, T)}} -> + ?IMPL(Session):get_subscription(emqx_topic:make_shared_record(Group, T), Session); + _ -> + ?IMPL(Session):get_subscription(Topic, Session) + end, enrich_message(ClientInfo, Msg, SubOpts, UpgradeQoS). enrich_message( @@ -497,10 +503,10 @@ cancel_timer(Name, Timers) -> %%-------------------------------------------------------------------- --spec disconnect(clientinfo(), t()) -> +-spec disconnect(clientinfo(), eqmx_types:conninfo(), t()) -> {idle | shutdown, t()}. -disconnect(_ClientInfo, Session) -> - ?IMPL(Session):disconnect(Session). +disconnect(_ClientInfo, ConnInfo, Session) -> + ?IMPL(Session):disconnect(Session, ConnInfo). -spec terminate(clientinfo(), Reason :: term(), t()) -> ok. @@ -611,21 +617,27 @@ maybe_mock_impl_mod(_) -> -spec choose_impl_mod(conninfo()) -> module(). choose_impl_mod(#{expiry_interval := EI}) -> - hd(choose_impl_candidates(EI, emqx_persistent_message:is_store_enabled())). + hd(choose_impl_candidates(EI, emqx_persistent_message:is_persistence_enabled())). -spec choose_impl_candidates(conninfo()) -> [module()]. choose_impl_candidates(#{expiry_interval := EI}) -> - choose_impl_candidates(EI, emqx_persistent_message:is_store_enabled()). + choose_impl_candidates(EI, emqx_persistent_message:is_persistence_enabled()). choose_impl_candidates(_, _IsPSStoreEnabled = false) -> [emqx_session_mem]; choose_impl_candidates(0, _IsPSStoreEnabled = true) -> - %% NOTE - %% If ExpiryInterval is 0, the natural choice is `emqx_session_mem`. Yet we still - %% need to look the existing session up in the `emqx_persistent_session_ds` store - %% first, because previous connection may have set ExpiryInterval to a non-zero - %% value. - [emqx_session_mem, emqx_persistent_session_ds]; + case emqx_persistent_message:force_ds() of + false -> + %% NOTE + %% If ExpiryInterval is 0, the natural choice is + %% `emqx_session_mem'. Yet we still need to look the + %% existing session up in the `emqx_persistent_session_ds' + %% store first, because previous connection may have set + %% ExpiryInterval to a non-zero value. + [emqx_session_mem, emqx_persistent_session_ds]; + true -> + [emqx_persistent_session_ds] + end; choose_impl_candidates(EI, _IsPSStoreEnabled = true) when EI > 0 -> [emqx_persistent_session_ds]. diff --git a/apps/emqx/src/emqx_session_mem.erl b/apps/emqx/src/emqx_session_mem.erl index 3ea4f9f3b..178c71e12 100644 --- a/apps/emqx/src/emqx_session_mem.erl +++ b/apps/emqx/src/emqx_session_mem.erl @@ -87,7 +87,7 @@ deliver/3, replay/3, handle_timeout/3, - disconnect/1, + disconnect/2, terminate/2 ]). @@ -316,7 +316,7 @@ unsubscribe( {error, ?RC_NO_SUBSCRIPTION_EXISTED} end. --spec get_subscription(emqx_types:topic(), session()) -> +-spec get_subscription(emqx_types:topic() | emqx_types:share(), session()) -> emqx_types:subopts() | undefined. get_subscription(Topic, #session{subscriptions = Subs}) -> maps:get(Topic, Subs, undefined). @@ -725,8 +725,8 @@ append(L1, L2) -> L1 ++ L2. %%-------------------------------------------------------------------- --spec disconnect(session()) -> {idle, session()}. -disconnect(Session = #session{}) -> +-spec disconnect(session(), emqx_types:conninfo()) -> {idle, session()}. +disconnect(Session = #session{}, _ConnInfo) -> % TODO: isolate expiry timer / timeout handling here? {idle, Session}. diff --git a/apps/emqx/src/emqx_shared_sub.erl b/apps/emqx/src/emqx_shared_sub.erl index 84921be6b..0a6538282 100644 --- a/apps/emqx/src/emqx_shared_sub.erl +++ b/apps/emqx/src/emqx_shared_sub.erl @@ -95,7 +95,6 @@ -define(ACK, shared_sub_ack). -define(NACK(Reason), {shared_sub_nack, Reason}). -define(NO_ACK, no_ack). --define(REDISPATCH_TO(GROUP, TOPIC), {GROUP, TOPIC}). -define(SUBSCRIBER_DOWN, noproc). -type redispatch_to() :: ?REDISPATCH_TO(emqx_types:group(), emqx_types:topic()). @@ -234,19 +233,18 @@ without_group_ack(Msg) -> get_group_ack(Msg) -> emqx_message:get_header(shared_dispatch_ack, Msg, ?NO_ACK). -with_redispatch_to(#message{qos = ?QOS_0} = Msg, _Group, _Topic) -> - Msg; +%% always add `redispatch_to` header to the message +%% for QOS_0 msgs, redispatch_to is not needed and filtered out in is_redispatch_needed/1 with_redispatch_to(Msg, Group, Topic) -> emqx_message:set_headers(#{redispatch_to => ?REDISPATCH_TO(Group, Topic)}, Msg). -%% @hidden Redispatch is needed only for the messages with redispatch_to header added. -is_redispatch_needed(#message{} = Msg) -> - case get_redispatch_to(Msg) of - ?REDISPATCH_TO(_, _) -> - true; - _ -> - false - end. +%% @hidden Redispatch is needed only for the messages which not QOS_0 +is_redispatch_needed(#message{qos = ?QOS_0}) -> + false; +is_redispatch_needed(#message{headers = #{redispatch_to := ?REDISPATCH_TO(_, _)}}) -> + true; +is_redispatch_needed(#message{}) -> + false. %% @doc Redispatch shared deliveries to other members in the group. redispatch(Messages0) -> diff --git a/apps/emqx/src/emqx_topic.erl b/apps/emqx/src/emqx_topic.erl index 6d232c68d..76c6ef34e 100644 --- a/apps/emqx/src/emqx_topic.erl +++ b/apps/emqx/src/emqx_topic.erl @@ -36,9 +36,16 @@ parse/2 ]). +-export([ + maybe_format_share/1, + get_shared_real_topic/1, + make_shared_record/2 +]). + -type topic() :: emqx_types:topic(). -type word() :: emqx_types:word(). -type words() :: emqx_types:words(). +-type share() :: emqx_types:share(). %% Guards -define(MULTI_LEVEL_WILDCARD_NOT_LAST(C, REST), @@ -50,7 +57,9 @@ %%-------------------------------------------------------------------- %% @doc Is wildcard topic? --spec wildcard(topic() | words()) -> true | false. +-spec wildcard(topic() | share() | words()) -> true | false. +wildcard(#share{topic = Topic}) when is_binary(Topic) -> + wildcard(Topic); wildcard(Topic) when is_binary(Topic) -> wildcard(words(Topic)); wildcard([]) -> @@ -64,7 +73,7 @@ wildcard([_H | T]) -> %% @doc Match Topic name with filter. -spec match(Name, Filter) -> boolean() when - Name :: topic() | words(), + Name :: topic() | share() | words(), Filter :: topic() | words(). match(<<$$, _/binary>>, <<$+, _/binary>>) -> false; @@ -72,6 +81,10 @@ match(<<$$, _/binary>>, <<$#, _/binary>>) -> false; match(Name, Filter) when is_binary(Name), is_binary(Filter) -> match(words(Name), words(Filter)); +match(#share{} = Name, Filter) -> + match_share(Name, Filter); +match(Name, #share{} = Filter) -> + match_share(Name, Filter); match([], []) -> true; match([H | T1], [H | T2]) -> @@ -87,12 +100,29 @@ match([_H1 | _], []) -> match([], [_H | _T2]) -> false. +-spec match_share(Name, Filter) -> boolean() when + Name :: share(), + Filter :: topic() | share(). +match_share(#share{topic = Name}, Filter) when is_binary(Filter) -> + %% only match real topic filter for normal topic filter. + match(words(Name), words(Filter)); +match_share(#share{group = Group, topic = Name}, #share{group = Group, topic = Filter}) -> + %% Matching real topic filter When subed same share group. + match(words(Name), words(Filter)); +match_share(#share{}, _) -> + %% Otherwise, non-matched. + false; +match_share(Name, #share{topic = Filter}) when is_binary(Name) -> + %% Only match real topic filter for normal topic_filter/topic_name. + match(Name, Filter). + -spec match_any(Name, [Filter]) -> boolean() when Name :: topic() | words(), Filter :: topic() | words(). match_any(Topic, Filters) -> lists:any(fun(Filter) -> match(Topic, Filter) end, Filters). +%% TODO: validate share topic #share{} for emqx_trace.erl %% @doc Validate topic name or filter -spec validate(topic() | {name | filter, topic()}) -> true. validate(Topic) when is_binary(Topic) -> @@ -107,7 +137,7 @@ validate(_, <<>>) -> validate(_, Topic) when is_binary(Topic) andalso (size(Topic) > ?MAX_TOPIC_LEN) -> %% MQTT-5.0 [MQTT-4.7.3-3] error(topic_too_long); -validate(filter, SharedFilter = <<"$share/", _Rest/binary>>) -> +validate(filter, SharedFilter = <>) -> validate_share(SharedFilter); validate(filter, Filter) when is_binary(Filter) -> validate2(words(Filter)); @@ -139,12 +169,12 @@ validate3(<>) when C == $#; C == $+; C == 0 -> validate3(<<_/utf8, Rest/binary>>) -> validate3(Rest). -validate_share(<<"$share/", Rest/binary>>) when +validate_share(<>) when Rest =:= <<>> orelse Rest =:= <<"/">> -> %% MQTT-5.0 [MQTT-4.8.2-1] error(?SHARE_EMPTY_FILTER); -validate_share(<<"$share/", Rest/binary>>) -> +validate_share(<>) -> case binary:split(Rest, <<"/">>) of %% MQTT-5.0 [MQTT-4.8.2-1] [<<>>, _] -> @@ -156,7 +186,7 @@ validate_share(<<"$share/", Rest/binary>>) -> validate_share(ShareName, Filter) end. -validate_share(_, <<"$share/", _Rest/binary>>) -> +validate_share(_, <>) -> error(?SHARE_RECURSIVELY); validate_share(ShareName, Filter) -> case binary:match(ShareName, [<<"+">>, <<"#">>]) of @@ -185,7 +215,9 @@ bin('#') -> <<"#">>; bin(B) when is_binary(B) -> B; bin(L) when is_list(L) -> list_to_binary(L). --spec levels(topic()) -> pos_integer(). +-spec levels(topic() | share()) -> pos_integer(). +levels(#share{topic = Topic}) when is_binary(Topic) -> + levels(Topic); levels(Topic) when is_binary(Topic) -> length(tokens(Topic)). @@ -197,6 +229,8 @@ tokens(Topic) -> %% @doc Split Topic Path to Words -spec words(topic()) -> words(). +words(#share{topic = Topic}) when is_binary(Topic) -> + words(Topic); words(Topic) when is_binary(Topic) -> [word(W) || W <- tokens(Topic)]. @@ -237,26 +271,29 @@ do_join(_TopicAcc, [C | Words]) when ?MULTI_LEVEL_WILDCARD_NOT_LAST(C, Words) -> do_join(TopicAcc, [Word | Words]) -> do_join(<>, Words). --spec parse(topic() | {topic(), map()}) -> {topic(), #{share => binary()}}. +-spec parse(topic() | {topic(), map()}) -> {topic() | share(), map()}. parse(TopicFilter) when is_binary(TopicFilter) -> parse(TopicFilter, #{}); parse({TopicFilter, Options}) when is_binary(TopicFilter) -> parse(TopicFilter, Options). --spec parse(topic(), map()) -> {topic(), map()}. -parse(TopicFilter = <<"$queue/", _/binary>>, #{share := _Group}) -> - error({invalid_topic_filter, TopicFilter}); -parse(TopicFilter = <<"$share/", _/binary>>, #{share := _Group}) -> - error({invalid_topic_filter, TopicFilter}); -parse(<<"$queue/", TopicFilter/binary>>, Options) -> - parse(TopicFilter, Options#{share => <<"$queue">>}); -parse(TopicFilter = <<"$share/", Rest/binary>>, Options) -> +-spec parse(topic() | share(), map()) -> {topic() | share(), map()}. +%% <<"$queue/[real_topic_filter]>">> equivalent to <<"$share/$queue/[real_topic_filter]">> +%% So the head of `real_topic_filter` MUST NOT be `<<$queue>>` or `<<$share>>` +parse(#share{topic = Topic = <>}, _Options) -> + error({invalid_topic_filter, Topic}); +parse(#share{topic = Topic = <>}, _Options) -> + error({invalid_topic_filter, Topic}); +parse(<>, Options) -> + parse(#share{group = <>, topic = Topic}, Options); +parse(TopicFilter = <>, Options) -> case binary:split(Rest, <<"/">>) of [_Any] -> error({invalid_topic_filter, TopicFilter}); - [ShareName, Filter] -> - case binary:match(ShareName, [<<"+">>, <<"#">>]) of - nomatch -> parse(Filter, Options#{share => ShareName}); + %% `Group` could be `$share` or `$queue` + [Group, Topic] -> + case binary:match(Group, [<<"+">>, <<"#">>]) of + nomatch -> parse(#share{group = Group, topic = Topic}, Options); _ -> error({invalid_topic_filter, TopicFilter}) end end; @@ -267,5 +304,22 @@ parse(TopicFilter = <<"$exclusive/", Topic/binary>>, Options) -> _ -> {Topic, Options#{is_exclusive => true}} end; -parse(TopicFilter, Options) -> +parse(TopicFilter, Options) when + ?IS_TOPIC(TopicFilter) +-> {TopicFilter, Options}. + +get_shared_real_topic(#share{topic = TopicFilter}) -> + TopicFilter; +get_shared_real_topic(TopicFilter) when is_binary(TopicFilter) -> + TopicFilter. + +make_shared_record(Group, Topic) -> + #share{group = Group, topic = Topic}. + +maybe_format_share(#share{group = <>, topic = Topic}) -> + join([<>, Topic]); +maybe_format_share(#share{group = Group, topic = Topic}) -> + join([<>, Group, Topic]); +maybe_format_share(Topic) -> + join([Topic]). diff --git a/apps/emqx/src/emqx_trace/emqx_trace.erl b/apps/emqx/src/emqx_trace/emqx_trace.erl index 99bbcc5f9..6588c99dc 100644 --- a/apps/emqx/src/emqx_trace/emqx_trace.erl +++ b/apps/emqx/src/emqx_trace/emqx_trace.erl @@ -105,7 +105,7 @@ log_filter([{Id, FilterFun, Filter, Name} | Rest], Log0) -> ignore -> ignore; Log -> - case logger_config:get(ets:whereis(logger), Id) of + case logger_config:get(logger, Id) of {ok, #{module := Module} = HandlerConfig0} -> HandlerConfig = maps:without(?OWN_KEYS, HandlerConfig0), try diff --git a/apps/emqx/src/emqx_trie_search.erl b/apps/emqx/src/emqx_trie_search.erl index c8c088b58..da37f2b21 100644 --- a/apps/emqx/src/emqx_trie_search.erl +++ b/apps/emqx/src/emqx_trie_search.erl @@ -98,7 +98,7 @@ -module(emqx_trie_search). --export([make_key/2, filter/1]). +-export([make_key/2, make_pat/2, filter/1]). -export([match/2, matches/3, get_id/1, get_topic/1]). -export_type([key/1, word/0, words/0, nextf/0, opts/0]). @@ -127,6 +127,12 @@ make_key(Topic, ID) when is_binary(Topic) -> make_key(Words, ID) when is_list(Words) -> {Words, {ID}}. +-spec make_pat(emqx_types:topic() | words() | '_', _ID | '_') -> _Pat. +make_pat(Pattern = '_', ID) -> + {Pattern, {ID}}; +make_pat(Topic, ID) -> + make_key(Topic, ID). + %% @doc Parse a topic filter into a list of words. Returns `false` if it's not a filter. -spec filter(emqx_types:topic()) -> words() | false. filter(Topic) -> diff --git a/apps/emqx/src/emqx_types.erl b/apps/emqx/src/emqx_types.erl index 504540cf6..1a4825736 100644 --- a/apps/emqx/src/emqx_types.erl +++ b/apps/emqx/src/emqx_types.erl @@ -40,6 +40,10 @@ words/0 ]). +-export_type([ + share/0 +]). + -export_type([ socktype/0, sockstate/0, @@ -136,11 +140,14 @@ -type subid() :: binary() | atom(). --type group() :: binary() | undefined. +%% '_' for match spec +-type group() :: binary() | '_'. -type topic() :: binary(). -type word() :: '' | '+' | '#' | binary(). -type words() :: list(word()). +-type share() :: #share{}. + -type socktype() :: tcp | udp | ssl | proxy | atom(). -type sockstate() :: idle | running | blocked | closed. -type conninfo() :: #{ @@ -207,7 +214,6 @@ rap := 0 | 1, nl := 0 | 1, qos := qos(), - share => binary(), atom() => term() }. -type reason_code() :: 0..16#FF. diff --git a/apps/emqx/src/emqx_vm.erl b/apps/emqx/src/emqx_vm.erl index 79ad9905c..894595f72 100644 --- a/apps/emqx/src/emqx_vm.erl +++ b/apps/emqx/src/emqx_vm.erl @@ -418,6 +418,9 @@ get_otp_version() -> end. read_otp_version() -> + string:trim(do_read_otp_version()). + +do_read_otp_version() -> ReleasesDir = filename:join([code:root_dir(), "releases"]), Filename = filename:join([ReleasesDir, emqx_app:get_release(), "BUILD_INFO"]), case file:read_file(Filename) of diff --git a/apps/emqx/test/emqx_broker_SUITE.erl b/apps/emqx/test/emqx_broker_SUITE.erl index a205f6fcd..b416f1730 100644 --- a/apps/emqx/test/emqx_broker_SUITE.erl +++ b/apps/emqx/test/emqx_broker_SUITE.erl @@ -299,14 +299,19 @@ t_nosub_pub(Config) when is_list(Config) -> ?assertEqual(1, emqx_metrics:val('messages.dropped')). t_shared_subscribe({init, Config}) -> - emqx_broker:subscribe(<<"topic">>, <<"clientid">>, #{share => <<"group">>}), + emqx_broker:subscribe( + emqx_topic:make_shared_record(<<"group">>, <<"topic">>), <<"clientid">>, #{} + ), ct:sleep(100), Config; t_shared_subscribe(Config) when is_list(Config) -> emqx_broker:safe_publish(emqx_message:make(ct, <<"topic">>, <<"hello">>)), ?assert( receive - {deliver, <<"topic">>, #message{payload = <<"hello">>}} -> + {deliver, <<"topic">>, #message{ + headers = #{redispatch_to := ?REDISPATCH_TO(<<"group">>, <<"topic">>)}, + payload = <<"hello">> + }} -> true; Msg -> ct:pal("Msg: ~p", [Msg]), @@ -316,7 +321,7 @@ t_shared_subscribe(Config) when is_list(Config) -> end ); t_shared_subscribe({'end', _Config}) -> - emqx_broker:unsubscribe(<<"$share/group/topic">>). + emqx_broker:unsubscribe(emqx_topic:make_shared_record(<<"group">>, <<"topic">>)). t_shared_subscribe_2({init, Config}) -> Config; @@ -723,24 +728,6 @@ t_connack_auth_error(Config) when is_list(Config) -> ?assertEqual(2, emqx_metrics:val('packets.connack.auth_error')), ok. -t_handle_in_empty_client_subscribe_hook({init, Config}) -> - Hook = {?MODULE, client_subscribe_delete_all_hook, []}, - ok = emqx_hooks:put('client.subscribe', Hook, _Priority = 100), - Config; -t_handle_in_empty_client_subscribe_hook({'end', _Config}) -> - emqx_hooks:del('client.subscribe', {?MODULE, client_subscribe_delete_all_hook}), - ok; -t_handle_in_empty_client_subscribe_hook(Config) when is_list(Config) -> - {ok, C} = emqtt:start_link(), - {ok, _} = emqtt:connect(C), - try - {ok, _, RCs} = emqtt:subscribe(C, <<"t">>), - ?assertEqual([?RC_UNSPECIFIED_ERROR], RCs), - ok - after - emqtt:disconnect(C) - end. - authenticate_deny(_Credentials, _Default) -> {stop, {error, bad_username_or_password}}. @@ -800,7 +787,3 @@ recv_msgs(Count, Msgs) -> after 100 -> Msgs end. - -client_subscribe_delete_all_hook(_ClientInfo, _Username, TopicFilter) -> - EmptyFilters = [{T, Opts#{deny_subscription => true}} || {T, Opts} <- TopicFilter], - {stop, EmptyFilters}. diff --git a/apps/emqx/test/emqx_channel_SUITE.erl b/apps/emqx/test/emqx_channel_SUITE.erl index 8f6a2baaa..c6b4c0518 100644 --- a/apps/emqx/test/emqx_channel_SUITE.erl +++ b/apps/emqx/test/emqx_channel_SUITE.erl @@ -456,7 +456,7 @@ t_process_subscribe(_) -> ok = meck:expect(emqx_session, subscribe, fun(_, _, _, Session) -> {ok, Session} end), TopicFilters = [TopicFilter = {<<"+">>, ?DEFAULT_SUBOPTS}], {[{TopicFilter, ?RC_SUCCESS}], _Channel} = - emqx_channel:process_subscribe(TopicFilters, #{}, channel()). + emqx_channel:process_subscribe(TopicFilters, channel()). t_process_unsubscribe(_) -> ok = meck:expect(emqx_session, unsubscribe, fun(_, _, _, Session) -> {ok, Session} end), @@ -914,7 +914,13 @@ t_check_pub_alias(_) -> t_check_sub_authzs(_) -> emqx_config:put_zone_conf(default, [authorization, enable], true), TopicFilter = {<<"t">>, ?DEFAULT_SUBOPTS}, - [{TopicFilter, 0}] = emqx_channel:check_sub_authzs([TopicFilter], channel()). + SubPkt = ?SUBSCRIBE_PACKET(1, #{}, [TopicFilter]), + CheckedSubPkt = ?SUBSCRIBE_PACKET(1, #{}, [{TopicFilter, ?RC_SUCCESS}]), + Channel = channel(), + ?assertEqual( + {ok, CheckedSubPkt, Channel}, + emqx_channel:check_sub_authzs(SubPkt, Channel) + ). t_enrich_connack_caps(_) -> ok = meck:new(emqx_mqtt_caps, [passthrough, no_history]), @@ -1061,6 +1067,7 @@ clientinfo(InitProps) -> clientid => <<"clientid">>, username => <<"username">>, is_superuser => false, + is_bridge => false, mountpoint => undefined }, InitProps diff --git a/apps/emqx/test/emqx_common_test_helpers.erl b/apps/emqx/test/emqx_common_test_helpers.erl index 4671851f8..18919103c 100644 --- a/apps/emqx/test/emqx_common_test_helpers.erl +++ b/apps/emqx/test/emqx_common_test_helpers.erl @@ -753,24 +753,15 @@ start_slave(Name, Opts) when is_map(Opts) -> case SlaveMod of ct_slave -> ct:pal("~p: node data dir: ~s", [Node, NodeDataDir]), - ct_slave:start( - Node, - [ - {kill_if_fail, true}, - {monitor_master, true}, - {init_timeout, 20_000}, - {startup_timeout, 20_000}, - {erl_flags, erl_flags()}, - {env, [ - {"HOCON_ENV_OVERRIDE_PREFIX", "EMQX_"}, - {"EMQX_NODE__COOKIE", Cookie}, - {"EMQX_NODE__DATA_DIR", NodeDataDir} - ]} - ] - ); + Envs = [ + {"HOCON_ENV_OVERRIDE_PREFIX", "EMQX_"}, + {"EMQX_NODE__COOKIE", Cookie}, + {"EMQX_NODE__DATA_DIR", NodeDataDir} + ], + emqx_cth_peer:start(Node, erl_flags(), Envs); slave -> - Env = " -env HOCON_ENV_OVERRIDE_PREFIX EMQX_", - slave:start_link(host(), Name, ebin_path() ++ Env) + Envs = [{"HOCON_ENV_OVERRIDE_PREFIX", "EMQX_"}], + emqx_cth_peer:start(Node, ebin_path(), Envs) end end, case DoStart() of @@ -789,13 +780,7 @@ start_slave(Name, Opts) when is_map(Opts) -> %% Node stopping stop_slave(Node0) -> Node = node_name(Node0), - SlaveMod = get_peer_mod(Node), - erase_peer_mod(Node), - case SlaveMod:stop(Node) of - ok -> ok; - {ok, _} -> ok; - {error, not_started, _} -> ok - end. + emqx_cth_peer:stop(Node). %% EPMD starting start_epmd() -> @@ -1022,11 +1007,11 @@ set_envs(Node, Env) -> ). erl_flags() -> - %% One core and redirecting logs to master - "+S 1:1 -master " ++ atom_to_list(node()) ++ " " ++ ebin_path(). + %% One core + ["+S", "1:1"] ++ ebin_path(). ebin_path() -> - string:join(["-pa" | lists:filter(fun is_lib/1, code:get_path())], " "). + ["-pa" | lists:filter(fun is_lib/1, code:get_path())]. is_lib(Path) -> string:prefix(Path, code:lib_dir()) =:= nomatch andalso diff --git a/apps/emqx/test/emqx_common_test_http.erl b/apps/emqx/test/emqx_common_test_http.erl index 2d1128f05..30ebe409f 100644 --- a/apps/emqx/test/emqx_common_test_http.erl +++ b/apps/emqx/test/emqx_common_test_http.erl @@ -34,6 +34,9 @@ -define(DEFAULT_APP_KEY, <<"default_app_key">>). -define(DEFAULT_APP_SECRET, <<"default_app_secret">>). +%% from emqx_dashboard/include/emqx_dashboard_rbac.hrl +-define(ROLE_API_SUPERUSER, <<"administrator">>). + request_api(Method, Url, Auth) -> request_api(Method, Url, [], Auth, []). @@ -96,7 +99,8 @@ create_default_app() -> ?DEFAULT_APP_SECRET, true, ExpiredAt, - <<"default app key for test">> + <<"default app key for test">>, + ?ROLE_API_SUPERUSER ). delete_default_app() -> diff --git a/apps/emqx/test/emqx_cth_cluster.erl b/apps/emqx/test/emqx_cth_cluster.erl index a47e96251..029907f57 100644 --- a/apps/emqx/test/emqx_cth_cluster.erl +++ b/apps/emqx/test/emqx_cth_cluster.erl @@ -38,19 +38,19 @@ %% in `end_per_suite/1` or `end_per_group/2`) with the result from step 2. -module(emqx_cth_cluster). --export([start/2]). +-export([start/1, start/2, restart/2]). -export([stop/1, stop_node/1]). --export([start_bare_node/2]). +-export([start_bare_nodes/1, start_bare_nodes/2]). -export([share_load_module/2]). -export([node_name/1, mk_nodespecs/2]). --export([start_apps/2, set_node_opts/2]). +-export([start_apps/2]). -define(APPS_CLUSTERING, [gen_rpc, mria, ekka]). -define(TIMEOUT_NODE_START_MS, 15000). --define(TIMEOUT_APPS_START_MS, 60000). +-define(TIMEOUT_APPS_START_MS, 30000). -define(TIMEOUT_NODE_STOP_S, 15). %% @@ -109,9 +109,12 @@ when }. start(Nodes, ClusterOpts) -> NodeSpecs = mk_nodespecs(Nodes, ClusterOpts), - ct:pal("Starting cluster:\n ~p", [NodeSpecs]), + start(NodeSpecs). + +start(NodeSpecs) -> + ct:pal("(Re)starting nodes:\n ~p", [NodeSpecs]), % 1. Start bare nodes with only basic applications running - _ = emqx_utils:pmap(fun start_node_init/1, NodeSpecs, ?TIMEOUT_NODE_START_MS), + ok = start_nodes_init(NodeSpecs, ?TIMEOUT_NODE_START_MS), % 2. Start applications needed to enable clustering % Generally, this causes some applications to restart, but we deliberately don't % start them yet. @@ -121,6 +124,11 @@ start(Nodes, ClusterOpts) -> _ = emqx_utils:pmap(fun run_node_phase_apps/1, NodeSpecs, ?TIMEOUT_APPS_START_MS), [Node || #{name := Node} <- NodeSpecs]. +restart(Node, Spec) -> + ct:pal("Stopping peer node ~p", [Node]), + ok = emqx_cth_peer:stop(Node), + start([Spec#{boot_type => restart}]). + mk_nodespecs(Nodes, ClusterOpts) -> NodeSpecs = lists:zipwith( fun(N, {Name, Opts}) -> mk_init_nodespec(N, Name, Opts, ClusterOpts) end, @@ -282,8 +290,50 @@ allocate_listener_port(Type, #{base_port := BasePort}) -> allocate_listener_ports(Types, Spec) -> lists:foldl(fun maps:merge/2, #{}, [allocate_listener_port(Type, Spec) || Type <- Types]). -start_node_init(Spec = #{name := Node}) -> - Node = start_bare_node(Node, Spec), +start_nodes_init(Specs, Timeout) -> + Names = lists:map(fun(#{name := Name}) -> Name end, Specs), + Nodes = start_bare_nodes(Names, Timeout), + lists:foreach(fun node_init/1, Nodes). + +start_bare_nodes(Names) -> + start_bare_nodes(Names, ?TIMEOUT_NODE_START_MS). +start_bare_nodes(Names, Timeout) -> + Args = erl_flags(), + Envs = [], + Waits = lists:map( + fun(Name) -> + WaitTag = {boot_complete, Name}, + WaitBoot = {self(), WaitTag}, + {ok, _} = emqx_cth_peer:start(Name, Args, Envs, WaitBoot), + WaitTag + end, + Names + ), + Deadline = erlang:monotonic_time() + erlang:convert_time_unit(Timeout, millisecond, nanosecond), + Nodes = wait_boot_complete(Waits, Deadline), + lists:foreach(fun(Node) -> pong = net_adm:ping(Node) end, Nodes), + Nodes. + +wait_boot_complete([], _) -> + []; +wait_boot_complete(Waits, Deadline) -> + case erlang:monotonic_time() > Deadline of + true -> + error({timeout, Waits}); + false -> + ok + end, + receive + {{boot_complete, _Name} = Wait, {started, Node, _Pid}} -> + ct:pal("~p", [Wait]), + [Node | wait_boot_complete(Waits -- [Wait], Deadline)]; + {{boot_complete, _Name}, Otherwise} -> + error({unexpected, Otherwise}) + after 100 -> + wait_boot_complete(Waits, Deadline) + end. + +node_init(Node) -> % Make it possible to call `ct:pal` and friends (if running under rebar3) _ = share_load_module(Node, cthr), % Enable snabbkaffe trace forwarding @@ -300,12 +350,6 @@ run_node_phase_apps(Spec = #{name := Node}) -> ok = start_apps(Node, Spec), ok. -set_node_opts(Node, Spec) -> - erpc:call(Node, persistent_term, put, [{?MODULE, opts}, Spec]). - -get_node_opts(Node) -> - erpc:call(Node, persistent_term, get, [{?MODULE, opts}]). - load_apps(Node, #{apps := Apps}) -> erpc:call(Node, emqx_cth_suite, load_apps, [Apps]). @@ -322,8 +366,12 @@ start_apps(Node, #{apps := Apps} = Spec) -> ok. suite_opts(Spec) -> - maps:with([work_dir], Spec). + maps:with([work_dir, boot_type], Spec). +maybe_join_cluster(_Node, #{boot_type := restart}) -> + %% when restart, the node should already be in the cluster + %% hence no need to (re)join + ok; maybe_join_cluster(_Node, #{role := replicant}) -> ok; maybe_join_cluster(Node, Spec) -> @@ -352,23 +400,7 @@ stop(Nodes) -> stop_node(Name) -> Node = node_name(Name), - try get_node_opts(Node) of - Opts -> - stop_node(Name, Opts) - catch - error:{erpc, _} -> - ok - end. - -stop_node(Node, #{driver := ct_slave}) -> - case ct_slave:stop(Node, [{stop_timeout, ?TIMEOUT_NODE_STOP_S}]) of - {ok, _} -> - ok; - {error, Reason, _} when Reason == not_connected; Reason == not_started -> - ok - end; -stop_node(Node, #{driver := slave}) -> - slave:stop(Node). + ok = emqx_cth_peer:stop(Node). %% Ports @@ -391,36 +423,12 @@ listener_port(BasePort, wss) -> %% --spec start_bare_node(atom(), map()) -> node(). -start_bare_node(Name, Spec = #{driver := ct_slave}) -> - {ok, Node} = ct_slave:start( - node_name(Name), - [ - {kill_if_fail, true}, - {monitor_master, true}, - {init_timeout, 20_000}, - {startup_timeout, 20_000}, - {erl_flags, erl_flags()}, - {env, []} - ] - ), - init_bare_node(Node, Spec); -start_bare_node(Name, Spec = #{driver := slave}) -> - {ok, Node} = slave:start_link(host(), Name, ebin_path()), - init_bare_node(Node, Spec). - -init_bare_node(Node, Spec) -> - pong = net_adm:ping(Node), - % Preserve node spec right on the remote node - ok = set_node_opts(Node, Spec), - Node. - erl_flags() -> - %% One core and redirecting logs to master - "+S 1:1 -master " ++ atom_to_list(node()) ++ " " ++ ebin_path(). + %% One core + ["+S", "1:1"] ++ ebin_path(). ebin_path() -> - string:join(["-pa" | lists:filter(fun is_lib/1, code:get_path())], " "). + ["-pa" | lists:filter(fun is_lib/1, code:get_path())]. is_lib(Path) -> string:prefix(Path, code:lib_dir()) =:= nomatch andalso diff --git a/apps/emqx/test/emqx_cth_peer.erl b/apps/emqx/test/emqx_cth_peer.erl new file mode 100644 index 000000000..8b1996cbd --- /dev/null +++ b/apps/emqx/test/emqx_cth_peer.erl @@ -0,0 +1,79 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +%% @doc Common Test Helper proxy module for slave -> peer migration. +%% OTP 26 has slave module deprecated, use peer instead. + +-module(emqx_cth_peer). + +-export([start/2, start/3, start/4]). +-export([start_link/2, start_link/3, start_link/4]). +-export([stop/1]). + +start(Name, Args) -> + start(Name, Args, []). + +start(Name, Args, Envs) -> + start(Name, Args, Envs, timer:seconds(20)). + +start(Name, Args, Envs, Timeout) when is_atom(Name) -> + do_start(Name, Args, Envs, Timeout, start). + +start_link(Name, Args) -> + start_link(Name, Args, []). + +start_link(Name, Args, Envs) -> + start_link(Name, Args, Envs, timer:seconds(20)). + +start_link(Name, Args, Envs, Timeout) when is_atom(Name) -> + do_start(Name, Args, Envs, Timeout, start_link). + +do_start(Name0, Args, Envs, Timeout, Func) when is_atom(Name0) -> + {Name, Host} = parse_node_name(Name0), + {ok, Pid, Node} = peer:Func(#{ + name => Name, + host => Host, + args => Args, + env => Envs, + wait_boot => Timeout, + longnames => true, + shutdown => {halt, 1000} + }), + true = register(Node, Pid), + {ok, Node}. + +stop(Node) when is_atom(Node) -> + Pid = whereis(Node), + case is_pid(Pid) of + true -> + unlink(Pid), + ok = peer:stop(Pid); + false -> + ct:pal("The control process for node ~p is unexpetedly down", [Node]), + ok + end. + +parse_node_name(NodeName) -> + case string:tokens(atom_to_list(NodeName), "@") of + [Name, Host] -> + {list_to_atom(Name), Host}; + [_] -> + {NodeName, host()} + end. + +host() -> + [_Name, Host] = string:tokens(atom_to_list(node()), "@"), + Host. diff --git a/apps/emqx/test/emqx_cth_suite.erl b/apps/emqx/test/emqx_cth_suite.erl index 401d4f59d..5e91b92c9 100644 --- a/apps/emqx/test/emqx_cth_suite.erl +++ b/apps/emqx/test/emqx_cth_suite.erl @@ -453,6 +453,9 @@ stop_apps(Apps) -> %% +verify_clean_suite_state(#{boot_type := restart}) -> + %% when testing node restart, we do not need to verify clean state + ok; verify_clean_suite_state(#{work_dir := WorkDir}) -> {ok, []} = file:list_dir(WorkDir), false = emqx_schema_hooks:any_injections(), diff --git a/apps/emqx/test/emqx_metrics_worker_SUITE.erl b/apps/emqx/test/emqx_metrics_worker_SUITE.erl index 194c9cc99..784eac18e 100644 --- a/apps/emqx/test/emqx_metrics_worker_SUITE.erl +++ b/apps/emqx/test/emqx_metrics_worker_SUITE.erl @@ -53,9 +53,9 @@ t_get_metrics(_) -> ?assertMatch( #{ rate := #{ - a := #{current := 0.0, max := 0.0, last5m := 0.0}, - b := #{current := 0.0, max := 0.0, last5m := 0.0}, - c := #{current := 0.0, max := 0.0, last5m := 0.0} + a := #{current := +0.0, max := +0.0, last5m := +0.0}, + b := #{current := +0.0, max := +0.0, last5m := +0.0}, + c := #{current := +0.0, max := +0.0, last5m := +0.0} }, gauges := #{}, counters := #{ @@ -118,9 +118,9 @@ t_clear_metrics(_Config) -> ?assertMatch( #{ rate := #{ - a := #{current := 0.0, max := 0.0, last5m := 0.0}, - b := #{current := 0.0, max := 0.0, last5m := 0.0}, - c := #{current := 0.0, max := 0.0, last5m := 0.0} + a := #{current := +0.0, max := +0.0, last5m := +0.0}, + b := #{current := +0.0, max := +0.0, last5m := +0.0}, + c := #{current := +0.0, max := +0.0, last5m := +0.0} }, gauges := #{}, slides := #{}, @@ -145,7 +145,7 @@ t_clear_metrics(_Config) -> #{ counters => #{}, gauges => #{}, - rate => #{current => 0.0, last5m => 0.0, max => 0.0}, + rate => #{current => +0.0, last5m => +0.0, max => +0.0}, slides => #{} }, emqx_metrics_worker:get_metrics(?NAME, Id) @@ -160,9 +160,9 @@ t_reset_metrics(_) -> ?assertMatch( #{ rate := #{ - a := #{current := 0.0, max := 0.0, last5m := 0.0}, - b := #{current := 0.0, max := 0.0, last5m := 0.0}, - c := #{current := 0.0, max := 0.0, last5m := 0.0} + a := #{current := +0.0, max := +0.0, last5m := +0.0}, + b := #{current := +0.0, max := +0.0, last5m := +0.0}, + c := #{current := +0.0, max := +0.0, last5m := +0.0} }, gauges := #{}, counters := #{ diff --git a/apps/emqx/test/emqx_mountpoint_SUITE.erl b/apps/emqx/test/emqx_mountpoint_SUITE.erl index 6d065d521..1d9539409 100644 --- a/apps/emqx/test/emqx_mountpoint_SUITE.erl +++ b/apps/emqx/test/emqx_mountpoint_SUITE.erl @@ -29,6 +29,7 @@ ). -include_lib("emqx/include/emqx.hrl"). +-include_lib("emqx/include/emqx_mqtt.hrl"). -include_lib("eunit/include/eunit.hrl"). all() -> emqx_common_test_helpers:all(?MODULE). @@ -52,6 +53,24 @@ t_mount(_) -> mount(<<"device/1/">>, TopicFilters) ). +t_mount_share(_) -> + T = {TopicFilter, Opts} = emqx_topic:parse(<<"$share/group/topic">>), + TopicFilters = [T], + ?assertEqual(TopicFilter, #share{group = <<"group">>, topic = <<"topic">>}), + + ?assertEqual( + TopicFilter, + mount(undefined, TopicFilter) + ), + ?assertEqual( + #share{group = <<"group">>, topic = <<"device/1/topic">>}, + mount(<<"device/1/">>, TopicFilter) + ), + ?assertEqual( + [{#share{group = <<"group">>, topic = <<"device/1/topic">>}, Opts}], + mount(<<"device/1/">>, TopicFilters) + ). + t_unmount(_) -> Msg = emqx_message:make(<<"clientid">>, <<"device/1/topic">>, <<"payload">>), ?assertEqual(<<"topic">>, unmount(undefined, <<"topic">>)), @@ -61,6 +80,21 @@ t_unmount(_) -> ?assertEqual(<<"device/1/topic">>, unmount(<<"device/2/">>, <<"device/1/topic">>)), ?assertEqual(Msg#message{topic = <<"device/1/topic">>}, unmount(<<"device/2/">>, Msg)). +t_unmount_share(_) -> + {TopicFilter, _Opts} = emqx_topic:parse(<<"$share/group/topic">>), + MountedTopicFilter = #share{group = <<"group">>, topic = <<"device/1/topic">>}, + + ?assertEqual(TopicFilter, #share{group = <<"group">>, topic = <<"topic">>}), + + ?assertEqual( + TopicFilter, + unmount(undefined, TopicFilter) + ), + ?assertEqual( + #share{group = <<"group">>, topic = <<"topic">>}, + unmount(<<"device/1/">>, MountedTopicFilter) + ). + t_replvar(_) -> ?assertEqual(undefined, replvar(undefined, #{})), ?assertEqual( diff --git a/apps/emqx/test/emqx_mqtt_caps_SUITE.erl b/apps/emqx/test/emqx_mqtt_caps_SUITE.erl index 297ee7f7d..e97684b74 100644 --- a/apps/emqx/test/emqx_mqtt_caps_SUITE.erl +++ b/apps/emqx/test/emqx_mqtt_caps_SUITE.erl @@ -76,6 +76,8 @@ t_check_sub(_) -> ), ?assertEqual( {error, ?RC_SHARED_SUBSCRIPTIONS_NOT_SUPPORTED}, - emqx_mqtt_caps:check_sub(ClientInfo, <<"topic">>, SubOpts#{share => true}) + emqx_mqtt_caps:check_sub( + ClientInfo, #share{group = <<"group">>, topic = <<"topic">>}, SubOpts + ) ), emqx_config:put([zones], OldConf). diff --git a/apps/emqx/test/emqx_persistent_messages_SUITE.erl b/apps/emqx/test/emqx_persistent_messages_SUITE.erl index 45cf85a05..80a83c0a4 100644 --- a/apps/emqx/test/emqx_persistent_messages_SUITE.erl +++ b/apps/emqx/test/emqx_persistent_messages_SUITE.erl @@ -233,6 +233,55 @@ t_session_subscription_iterators(Config) -> ), ok. +t_qos0(_Config) -> + Sub = connect(<>, true, 30), + Pub = connect(<>, true, 0), + try + {ok, _, [1]} = emqtt:subscribe(Sub, <<"t/#">>, qos1), + + Messages = [ + {<<"t/1">>, <<"1">>, 0}, + {<<"t/1">>, <<"2">>, 1}, + {<<"t/1">>, <<"3">>, 0} + ], + [emqtt:publish(Pub, Topic, Payload, Qos) || {Topic, Payload, Qos} <- Messages], + ?assertMatch( + [ + #{qos := 0, topic := <<"t/1">>, payload := <<"1">>}, + #{qos := 1, topic := <<"t/1">>, payload := <<"2">>}, + #{qos := 0, topic := <<"t/1">>, payload := <<"3">>} + ], + receive_messages(3) + ) + after + emqtt:stop(Sub), + emqtt:stop(Pub) + end. + +t_publish_as_persistent(_Config) -> + Sub = connect(<>, true, 30), + Pub = connect(<>, true, 30), + try + {ok, _, [1]} = emqtt:subscribe(Sub, <<"t/#">>, qos1), + Messages = [ + {<<"t/1">>, <<"1">>, 0}, + {<<"t/1">>, <<"2">>, 1}, + {<<"t/1">>, <<"3">>, 2} + ], + [emqtt:publish(Pub, Topic, Payload, Qos) || {Topic, Payload, Qos} <- Messages], + ?assertMatch( + [ + #{qos := 0, topic := <<"t/1">>, payload := <<"1">>}, + #{qos := 1, topic := <<"t/1">>, payload := <<"2">>}, + #{qos := 2, topic := <<"t/1">>, payload := <<"3">>} + ], + receive_messages(3) + ) + after + emqtt:stop(Sub), + emqtt:stop(Pub) + end. + %% connect(ClientId, CleanStart, EI) -> @@ -273,7 +322,7 @@ consume(It) -> end. receive_messages(Count) -> - receive_messages(Count, []). + lists:reverse(receive_messages(Count, [])). receive_messages(0, Msgs) -> Msgs; @@ -291,7 +340,7 @@ publish(Node, Message) -> app_specs() -> [ emqx_durable_storage, - {emqx, "persistent_session_store {ds = true}"} + {emqx, "session_persistence {enable = true}"} ]. cluster() -> @@ -307,4 +356,6 @@ get_mqtt_port(Node, Type) -> clear_db() -> ok = emqx_ds:drop_db(?PERSISTENT_MESSAGE_DB), + mria:stop(), + ok = mnesia:delete_schema([node()]), ok. diff --git a/apps/emqx/test/emqx_persistent_session_SUITE.erl b/apps/emqx/test/emqx_persistent_session_SUITE.erl index bd7ca1c46..66bb8dcf5 100644 --- a/apps/emqx/test/emqx_persistent_session_SUITE.erl +++ b/apps/emqx/test/emqx_persistent_session_SUITE.erl @@ -17,6 +17,7 @@ -module(emqx_persistent_session_SUITE). -include_lib("stdlib/include/assert.hrl"). +-include_lib("emqx/include/asserts.hrl"). -include_lib("common_test/include/ct.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). -include_lib("emqx/include/emqx_mqtt.hrl"). @@ -35,8 +36,8 @@ all() -> % NOTE % Tests are disabled while existing session persistence impl is being % phased out. - {group, persistent_store_disabled}, - {group, persistent_store_ds} + {group, persistence_disabled}, + {group, persistence_enabled} ]. %% A persistent session can be resumed in two ways: @@ -53,28 +54,28 @@ all() -> groups() -> TCs = emqx_common_test_helpers:all(?MODULE), TCsNonGeneric = [t_choose_impl], + TCGroups = [{group, tcp}, {group, quic}, {group, ws}], [ - {persistent_store_disabled, [{group, no_kill_connection_process}]}, - {persistent_store_ds, [{group, no_kill_connection_process}]}, - {no_kill_connection_process, [], [{group, tcp}, {group, quic}, {group, ws}]}, + {persistence_disabled, TCGroups}, + {persistence_enabled, TCGroups}, {tcp, [], TCs}, {quic, [], TCs -- TCsNonGeneric}, {ws, [], TCs -- TCsNonGeneric} ]. -init_per_group(persistent_store_disabled, Config) -> +init_per_group(persistence_disabled, Config) -> [ - {emqx_config, "persistent_session_store { enabled = false }"}, - {persistent_store, false} + {emqx_config, "session_persistence { enable = false }"}, + {persistence, false} | Config ]; -init_per_group(persistent_store_ds, Config) -> +init_per_group(persistence_enabled, Config) -> [ - {emqx_config, "persistent_session_store { ds = true }"}, - {persistent_store, ds} + {emqx_config, "session_persistence { enable = true }"}, + {persistence, ds} | Config ]; -init_per_group(Group, Config) when Group == tcp -> +init_per_group(tcp, Config) -> Apps = emqx_cth_suite:start( [{emqx, ?config(emqx_config, Config)}], #{work_dir => emqx_cth_suite:work_dir(Config)} @@ -85,7 +86,7 @@ init_per_group(Group, Config) when Group == tcp -> {group_apps, Apps} | Config ]; -init_per_group(Group, Config) when Group == ws -> +init_per_group(ws, Config) -> Apps = emqx_cth_suite:start( [{emqx, ?config(emqx_config, Config)}], #{work_dir => emqx_cth_suite:work_dir(Config)} @@ -99,7 +100,7 @@ init_per_group(Group, Config) when Group == ws -> {group_apps, Apps} | Config ]; -init_per_group(Group, Config) when Group == quic -> +init_per_group(quic, Config) -> Apps = emqx_cth_suite:start( [ {emqx, @@ -118,11 +119,7 @@ init_per_group(Group, Config) when Group == quic -> {ssl, true}, {group_apps, Apps} | Config - ]; -init_per_group(no_kill_connection_process, Config) -> - [{kill_connection_process, false} | Config]; -init_per_group(kill_connection_process, Config) -> - [{kill_connection_process, true} | Config]. + ]. get_listener_port(Type, Name) -> case emqx_config:get([listeners, Type, Name, bind]) of @@ -181,64 +178,106 @@ client_info(Key, Client) -> maps:get(Key, maps:from_list(emqtt:info(Client)), undefined). receive_messages(Count) -> - receive_messages(Count, []). + receive_messages(Count, 15000). -receive_messages(0, Msgs) -> - Msgs; -receive_messages(Count, Msgs) -> +receive_messages(Count, Timeout) -> + Deadline = erlang:monotonic_time(millisecond) + Timeout, + receive_message_loop(Count, Deadline). + +receive_message_loop(0, _Deadline) -> + []; +receive_message_loop(Count, Deadline) -> + Timeout = max(0, Deadline - erlang:monotonic_time(millisecond)), receive {publish, Msg} -> - receive_messages(Count - 1, [Msg | Msgs]); + [Msg | receive_message_loop(Count - 1, Deadline)]; + {pubrel, Msg} -> + [{pubrel, Msg} | receive_message_loop(Count - 1, Deadline)]; _Other -> - receive_messages(Count, Msgs) - after 15000 -> - Msgs + receive_message_loop(Count, Deadline) + after Timeout -> + [] end. maybe_kill_connection_process(ClientId, Config) -> - case ?config(kill_connection_process, Config) of - true -> - case emqx_cm:lookup_channels(ClientId) of - [] -> - ok; - [ConnectionPid] -> - ?assert(is_pid(ConnectionPid)), - Ref = monitor(process, ConnectionPid), - ConnectionPid ! die_if_test, - receive - {'DOWN', Ref, process, ConnectionPid, normal} -> ok - after 3000 -> error(process_did_not_die) - end, - wait_for_cm_unregister(ClientId) - end; - false -> - ok - end. - -wait_for_cm_unregister(ClientId) -> - wait_for_cm_unregister(ClientId, 100). - -wait_for_cm_unregister(_ClientId, 0) -> - error(cm_did_not_unregister); -wait_for_cm_unregister(ClientId, N) -> + Persistence = ?config(persistence, Config), case emqx_cm:lookup_channels(ClientId) of [] -> ok; - [_] -> - timer:sleep(100), - wait_for_cm_unregister(ClientId, N - 1) + [ConnectionPid] when Persistence == ds -> + Ref = monitor(process, ConnectionPid), + ConnectionPid ! die_if_test, + ?assertReceive( + {'DOWN', Ref, process, ConnectionPid, Reason} when + Reason == normal orelse Reason == noproc, + 3000 + ), + wait_connection_process_unregistered(ClientId); + _ -> + ok end. -publish(Topic, Payloads) -> - publish(Topic, Payloads, false, 2). +wait_connection_process_dies(ClientId) -> + case emqx_cm:lookup_channels(ClientId) of + [] -> + ok; + [ConnectionPid] -> + Ref = monitor(process, ConnectionPid), + ?assertReceive( + {'DOWN', Ref, process, ConnectionPid, Reason} when + Reason == normal orelse Reason == noproc, + 3000 + ), + wait_connection_process_unregistered(ClientId) + end. -publish(Topic, Payloads, WaitForUnregister, QoS) -> - Fun = fun(Client, Payload) -> - {ok, _} = emqtt:publish(Client, Topic, Payload, QoS) +wait_connection_process_unregistered(ClientId) -> + ?retry( + _Timeout = 100, + _Retries = 20, + ?assertEqual([], emqx_cm:lookup_channels(ClientId)) + ). + +wait_channel_disconnected(ClientId) -> + ?retry( + _Timeout = 100, + _Retries = 20, + case emqx_cm:lookup_channels(ClientId) of + [] -> + false; + [ChanPid] -> + false = emqx_cm:is_channel_connected(ChanPid) + end + ). + +disconnect_client(ClientPid) -> + ClientId = proplists:get_value(clientid, emqtt:info(ClientPid)), + ok = emqtt:disconnect(ClientPid), + false = wait_channel_disconnected(ClientId), + ok. + +messages(Topic, Payloads) -> + messages(Topic, Payloads, ?QOS_2). + +messages(Topic, Payloads, QoS) -> + [#mqtt_msg{topic = Topic, payload = P, qos = QoS} || P <- Payloads]. + +publish(Topic, Payload) -> + publish(Topic, Payload, ?QOS_2). + +publish(Topic, Payload, QoS) -> + publish_many(messages(Topic, [Payload], QoS)). + +publish_many(Messages) -> + publish_many(Messages, false). + +publish_many(Messages, WaitForUnregister) -> + Fun = fun(Client, Message) -> + {ok, _} = emqtt:publish(Client, Message) end, - do_publish(Payloads, Fun, WaitForUnregister). + do_publish(Messages, Fun, WaitForUnregister). -do_publish(Payloads = [_ | _], PublishFun, WaitForUnregister) -> +do_publish(Messages = [_ | _], PublishFun, WaitForUnregister) -> %% Publish from another process to avoid connection confusion. {Pid, Ref} = spawn_monitor( @@ -252,34 +291,16 @@ do_publish(Payloads = [_ | _], PublishFun, WaitForUnregister) -> {port, 1883} ]), {ok, _} = emqtt:connect(Client), - lists:foreach(fun(Payload) -> PublishFun(Client, Payload) end, Payloads), + lists:foreach(fun(Message) -> PublishFun(Client, Message) end, Messages), ok = emqtt:disconnect(Client), %% Snabbkaffe sometimes fails unless all processes are gone. - case WaitForUnregister of - false -> - ok; - true -> - case emqx_cm:lookup_channels(ClientID) of - [] -> - ok; - [ConnectionPid] -> - ?assert(is_pid(ConnectionPid)), - Ref1 = monitor(process, ConnectionPid), - receive - {'DOWN', Ref1, process, ConnectionPid, _} -> ok - after 3000 -> error(process_did_not_die) - end, - wait_for_cm_unregister(ClientID) - end - end + WaitForUnregister andalso wait_connection_process_dies(ClientID) end ), receive {'DOWN', Ref, process, Pid, normal} -> ok; {'DOWN', Ref, process, Pid, What} -> error({failed_publish, What}) - end; -do_publish(Payload, PublishFun, WaitForUnregister) -> - do_publish([Payload], PublishFun, WaitForUnregister). + end. %%-------------------------------------------------------------------- %% Test Cases @@ -297,7 +318,7 @@ t_choose_impl(Config) -> {ok, _} = emqtt:ConnFun(Client), [ChanPid] = emqx_cm:lookup_channels(ClientId), ?assertEqual( - case ?config(persistent_store, Config) of + case ?config(persistence, Config) of false -> emqx_session_mem; ds -> emqx_persistent_session_ds end, @@ -332,8 +353,6 @@ t_connect_discards_existing_client(Config) -> end. %% [MQTT-3.1.2-23] -t_connect_session_expiry_interval(init, Config) -> skip_ds_tc(Config); -t_connect_session_expiry_interval('end', _Config) -> ok. t_connect_session_expiry_interval(Config) -> ConnFun = ?config(conn_fun, Config), Topic = ?config(topic, Config), @@ -341,6 +360,45 @@ t_connect_session_expiry_interval(Config) -> Payload = <<"test message">>, ClientId = ?config(client_id, Config), + {ok, Client1} = emqtt:start_link([ + {clientid, ClientId}, + {proto_ver, v5}, + {properties, #{'Session-Expiry-Interval' => 30}} + | Config + ]), + {ok, _} = emqtt:ConnFun(Client1), + {ok, _, [?RC_GRANTED_QOS_1]} = emqtt:subscribe(Client1, STopic, ?QOS_1), + ok = emqtt:disconnect(Client1), + + maybe_kill_connection_process(ClientId, Config), + + publish(Topic, Payload, ?QOS_1), + + {ok, Client2} = emqtt:start_link([ + {clientid, ClientId}, + {proto_ver, v5}, + {properties, #{'Session-Expiry-Interval' => 30}}, + {clean_start, false} + | Config + ]), + {ok, _} = emqtt:ConnFun(Client2), + [Msg | _] = receive_messages(1), + ?assertEqual({ok, iolist_to_binary(Topic)}, maps:find(topic, Msg)), + ?assertEqual({ok, iolist_to_binary(Payload)}, maps:find(payload, Msg)), + ?assertEqual({ok, ?QOS_1}, maps:find(qos, Msg)), + ok = emqtt:disconnect(Client2). + +%% [MQTT-3.1.2-23] +%% TODO: un-skip after QoS 2 support is implemented in DS. +t_connect_session_expiry_interval_qos2(init, Config) -> skip_ds_tc(Config); +t_connect_session_expiry_interval_qos2('end', _Config) -> ok. +t_connect_session_expiry_interval_qos2(Config) -> + ConnFun = ?config(conn_fun, Config), + Topic = ?config(topic, Config), + STopic = ?config(stopic, Config), + Payload = <<"test message">>, + ClientId = ?config(client_id, Config), + {ok, Client1} = emqtt:start_link([ {clientid, ClientId}, {proto_ver, v5}, @@ -423,7 +481,7 @@ t_cancel_on_disconnect(Config) -> {ok, _} = emqtt:ConnFun(Client1), ok = emqtt:disconnect(Client1, 0, #{'Session-Expiry-Interval' => 0}), - wait_for_cm_unregister(ClientId), + wait_connection_process_unregistered(ClientId), {ok, Client2} = emqtt:start_link([ {clientid, ClientId}, @@ -455,7 +513,7 @@ t_persist_on_disconnect(Config) -> %% Strangely enough, the disconnect is reported as successful by emqtt. ok = emqtt:disconnect(Client1, 0, #{'Session-Expiry-Interval' => 30}), - wait_for_cm_unregister(ClientId), + wait_connection_process_unregistered(ClientId), {ok, Client2} = emqtt:start_link([ {clientid, ClientId}, @@ -470,8 +528,6 @@ t_persist_on_disconnect(Config) -> ?assertEqual(0, client_info(session_present, Client2)), ok = emqtt:disconnect(Client2). -t_process_dies_session_expires(init, Config) -> skip_ds_tc(Config); -t_process_dies_session_expires('end', _Config) -> ok. t_process_dies_session_expires(Config) -> %% Emulate an error in the connect process, %% or that the node of the process goes down. @@ -494,7 +550,7 @@ t_process_dies_session_expires(Config) -> maybe_kill_connection_process(ClientId, Config), - ok = publish(Topic, [Payload]), + ok = publish(Topic, Payload), timer:sleep(1100), @@ -535,7 +591,7 @@ t_publish_while_client_is_gone_qos1(Config) -> ok = emqtt:disconnect(Client1), maybe_kill_connection_process(ClientId, Config), - ok = publish(Topic, [Payload1, Payload2], false, 1), + ok = publish_many(messages(Topic, [Payload1, Payload2], ?QOS_1)), {ok, Client2} = emqtt:start_link([ {proto_ver, v5}, @@ -547,7 +603,7 @@ t_publish_while_client_is_gone_qos1(Config) -> {ok, _} = emqtt:ConnFun(Client2), Msgs = receive_messages(2), ?assertMatch([_, _], Msgs), - [Msg2, Msg1] = Msgs, + [Msg1, Msg2] = Msgs, ?assertEqual({ok, iolist_to_binary(Payload1)}, maps:find(payload, Msg1)), ?assertEqual({ok, 1}, maps:find(qos, Msg1)), ?assertEqual({ok, iolist_to_binary(Payload2)}, maps:find(payload, Msg2)), @@ -555,8 +611,123 @@ t_publish_while_client_is_gone_qos1(Config) -> ok = emqtt:disconnect(Client2). -t_publish_while_client_is_gone(init, Config) -> skip_ds_tc(Config); -t_publish_while_client_is_gone('end', _Config) -> ok. +t_publish_many_while_client_is_gone_qos1(Config) -> + %% A persistent session should receive all of the still unacked messages + %% for its subscriptions after the client dies or reconnects, in addition + %% to new messages that were published while the client was gone. The order + %% of the messages should be consistent across reconnects. + ClientId = ?config(client_id, Config), + ConnFun = ?config(conn_fun, Config), + {ok, Client1} = emqtt:start_link([ + {proto_ver, v5}, + {clientid, ClientId}, + {properties, #{'Session-Expiry-Interval' => 30}}, + {clean_start, true}, + {auto_ack, never} + | Config + ]), + {ok, _} = emqtt:ConnFun(Client1), + + STopics = [ + <<"t/+/foo">>, + <<"msg/feed/#">>, + <<"loc/+/+/+">> + ], + [{ok, _, [?QOS_1]} = emqtt:subscribe(Client1, ST, ?QOS_1) || ST <- STopics], + + Pubs1 = [ + #mqtt_msg{topic = <<"t/42/foo">>, payload = <<"M1">>, qos = 1}, + #mqtt_msg{topic = <<"t/42/foo">>, payload = <<"M2">>, qos = 1}, + #mqtt_msg{topic = <<"msg/feed/me">>, payload = <<"M3">>, qos = 1}, + #mqtt_msg{topic = <<"loc/1/2/42">>, payload = <<"M4">>, qos = 1}, + #mqtt_msg{topic = <<"t/42/foo">>, payload = <<"M5">>, qos = 1}, + #mqtt_msg{topic = <<"loc/3/4/5">>, payload = <<"M6">>, qos = 1}, + #mqtt_msg{topic = <<"msg/feed/me">>, payload = <<"M7">>, qos = 1} + ], + ok = publish_many(Pubs1), + NPubs1 = length(Pubs1), + + Msgs1 = receive_messages(NPubs1), + NMsgs1 = length(Msgs1), + ?assertEqual(NPubs1, NMsgs1), + + ct:pal("Msgs1 = ~p", [Msgs1]), + + %% TODO + %% This assertion doesn't currently hold because `emqx_ds` doesn't enforce + %% strict ordering reflecting client publishing order. Instead, per-topic + %% ordering is guaranteed per each client. In fact, this violates the MQTT + %% specification, but we deemed it acceptable for now. + %% ?assertMatch([ + %% #{payload := <<"M1">>}, + %% #{payload := <<"M2">>}, + %% #{payload := <<"M3">>}, + %% #{payload := <<"M4">>}, + %% #{payload := <<"M5">>}, + %% #{payload := <<"M6">>}, + %% #{payload := <<"M7">>} + %% ], Msgs1), + + ?assertEqual( + get_topicwise_order(Pubs1), + get_topicwise_order(Msgs1) + ), + + NAcked = 4, + [ok = emqtt:puback(Client1, PktId) || #{packet_id := PktId} <- lists:sublist(Msgs1, NAcked)], + + %% Ensure that PUBACKs are propagated to the channel. + pong = emqtt:ping(Client1), + + ok = disconnect_client(Client1), + maybe_kill_connection_process(ClientId, Config), + + Pubs2 = [ + #mqtt_msg{topic = <<"loc/3/4/5">>, payload = <<"M8">>, qos = 1}, + #mqtt_msg{topic = <<"t/100/foo">>, payload = <<"M9">>, qos = 1}, + #mqtt_msg{topic = <<"t/100/foo">>, payload = <<"M10">>, qos = 1}, + #mqtt_msg{topic = <<"msg/feed/friend">>, payload = <<"M11">>, qos = 1}, + #mqtt_msg{topic = <<"msg/feed/me">>, payload = <<"M12">>, qos = 1} + ], + ok = publish_many(Pubs2), + NPubs2 = length(Pubs2), + + {ok, Client2} = emqtt:start_link([ + {proto_ver, v5}, + {clientid, ClientId}, + {properties, #{'Session-Expiry-Interval' => 30}}, + {clean_start, false}, + {auto_ack, false} + | Config + ]), + {ok, _} = emqtt:ConnFun(Client2), + + %% Try to receive _at most_ `NPubs` messages. + %% There shouldn't be that much unacked messages in the replay anyway, + %% but it's an easy number to pick. + NPubs = NPubs1 + NPubs2, + Msgs2 = receive_messages(NPubs, _Timeout = 2000), + NMsgs2 = length(Msgs2), + + ct:pal("Msgs2 = ~p", [Msgs2]), + + ?assert(NMsgs2 < NPubs, Msgs2), + ?assert(NMsgs2 > NPubs2, Msgs2), + ?assert(NMsgs2 >= NPubs - NAcked, Msgs2), + NSame = NMsgs2 - NPubs2, + ?assert( + lists:all(fun(#{dup := Dup}) -> Dup end, lists:sublist(Msgs2, NSame)) + ), + ?assertNot( + lists:all(fun(#{dup := Dup}) -> Dup end, lists:nthtail(NSame, Msgs2)) + ), + ?assertEqual( + [maps:with([packet_id, topic, payload], M) || M <- lists:nthtail(NMsgs1 - NSame, Msgs1)], + [maps:with([packet_id, topic, payload], M) || M <- lists:sublist(Msgs2, NSame)] + ), + + ok = disconnect_client(Client2). + t_publish_while_client_is_gone(Config) -> %% A persistent session should receive messages in its %% subscription even if the process owning the session dies. @@ -579,7 +750,7 @@ t_publish_while_client_is_gone(Config) -> ok = emqtt:disconnect(Client1), maybe_kill_connection_process(ClientId, Config), - ok = publish(Topic, [Payload1, Payload2]), + ok = publish_many(messages(Topic, [Payload1, Payload2])), {ok, Client2} = emqtt:start_link([ {proto_ver, v5}, @@ -591,7 +762,7 @@ t_publish_while_client_is_gone(Config) -> {ok, _} = emqtt:ConnFun(Client2), Msgs = receive_messages(2), ?assertMatch([_, _], Msgs), - [Msg2, Msg1] = Msgs, + [Msg1, Msg2] = Msgs, ?assertEqual({ok, iolist_to_binary(Payload1)}, maps:find(payload, Msg1)), ?assertEqual({ok, 2}, maps:find(qos, Msg1)), ?assertEqual({ok, iolist_to_binary(Payload2)}, maps:find(payload, Msg2)), @@ -599,9 +770,157 @@ t_publish_while_client_is_gone(Config) -> ok = emqtt:disconnect(Client2). -%% TODO: don't skip after QoS2 support is added to DS. -t_clean_start_drops_subscriptions(init, Config) -> skip_ds_tc(Config); -t_clean_start_drops_subscriptions('end', _Config) -> ok. +t_publish_many_while_client_is_gone(Config) -> + %% A persistent session should receive all of the still unacked messages + %% for its subscriptions after the client dies or reconnects, in addition + %% to PUBRELs for the messages it has PUBRECed. While client must send + %% PUBACKs and PUBRECs in order, those orders are independent of each other. + ClientId = ?config(client_id, Config), + ConnFun = ?config(conn_fun, Config), + ClientOpts = [ + {proto_ver, v5}, + {clientid, ClientId}, + {properties, #{'Session-Expiry-Interval' => 30}}, + {auto_ack, never} + | Config + ], + + {ok, Client1} = emqtt:start_link([{clean_start, true} | ClientOpts]), + {ok, _} = emqtt:ConnFun(Client1), + {ok, _, [?QOS_1]} = emqtt:subscribe(Client1, <<"t/+/foo">>, ?QOS_1), + {ok, _, [?QOS_2]} = emqtt:subscribe(Client1, <<"msg/feed/#">>, ?QOS_2), + {ok, _, [?QOS_2]} = emqtt:subscribe(Client1, <<"loc/+/+/+">>, ?QOS_2), + + Pubs1 = [ + #mqtt_msg{topic = <<"t/42/foo">>, payload = <<"M1">>, qos = 1}, + #mqtt_msg{topic = <<"t/42/foo">>, payload = <<"M2">>, qos = 1}, + #mqtt_msg{topic = <<"msg/feed/me">>, payload = <<"M3">>, qos = 2}, + #mqtt_msg{topic = <<"loc/1/2/42">>, payload = <<"M4">>, qos = 2}, + #mqtt_msg{topic = <<"t/100/foo">>, payload = <<"M5">>, qos = 2}, + #mqtt_msg{topic = <<"t/100/foo">>, payload = <<"M6">>, qos = 1}, + #mqtt_msg{topic = <<"loc/3/4/5">>, payload = <<"M7">>, qos = 2}, + #mqtt_msg{topic = <<"t/100/foo">>, payload = <<"M8">>, qos = 1}, + #mqtt_msg{topic = <<"msg/feed/me">>, payload = <<"M9">>, qos = 2} + ], + ok = publish_many(Pubs1), + NPubs1 = length(Pubs1), + + Msgs1 = receive_messages(NPubs1), + ct:pal("Msgs1 = ~p", [Msgs1]), + NMsgs1 = length(Msgs1), + ?assertEqual(NPubs1, NMsgs1), + + ?assertEqual( + get_topicwise_order(Pubs1), + get_topicwise_order(Msgs1) + ), + + %% PUBACK every QoS 1 message. + lists:foreach( + fun(PktId) -> ok = emqtt:puback(Client1, PktId) end, + [PktId || #{qos := 1, packet_id := PktId} <- Msgs1] + ), + + %% PUBREC first `NRecs` QoS 2 messages. + NRecs = 3, + PubRecs1 = lists:sublist([PktId || #{qos := 2, packet_id := PktId} <- Msgs1], NRecs), + lists:foreach( + fun(PktId) -> ok = emqtt:pubrec(Client1, PktId) end, + PubRecs1 + ), + + %% Ensure that PUBACKs / PUBRECs are propagated to the channel. + pong = emqtt:ping(Client1), + + %% Receive PUBRELs for the sent PUBRECs. + PubRels1 = receive_messages(NRecs), + ct:pal("PubRels1 = ~p", [PubRels1]), + ?assertEqual( + PubRecs1, + [PktId || {pubrel, #{packet_id := PktId}} <- PubRels1], + PubRels1 + ), + + ok = disconnect_client(Client1), + maybe_kill_connection_process(ClientId, Config), + + Pubs2 = [ + #mqtt_msg{topic = <<"loc/3/4/5">>, payload = <<"M10">>, qos = 2}, + #mqtt_msg{topic = <<"t/100/foo">>, payload = <<"M11">>, qos = 1}, + #mqtt_msg{topic = <<"msg/feed/friend">>, payload = <<"M12">>, qos = 2} + ], + ok = publish_many(Pubs2), + NPubs2 = length(Pubs2), + + {ok, Client2} = emqtt:start_link([{clean_start, false} | ClientOpts]), + {ok, _} = emqtt:ConnFun(Client2), + + %% Try to receive _at most_ `NPubs` messages. + %% There shouldn't be that much unacked messages in the replay anyway, + %% but it's an easy number to pick. + NPubs = NPubs1 + NPubs2, + Msgs2 = receive_messages(NPubs, _Timeout = 2000), + ct:pal("Msgs2 = ~p", [Msgs2]), + + %% We should again receive PUBRELs for the PUBRECs we sent earlier. + ?assertEqual( + get_msgs_essentials(PubRels1), + [get_msg_essentials(PubRel) || PubRel = {pubrel, _} <- Msgs2] + ), + + %% We should receive duplicates only for QoS 2 messages where PUBRELs were + %% not sent, in the same order as the original messages. + Msgs2Dups = [get_msg_essentials(M) || M = #{dup := true} <- Msgs2], + ?assertEqual( + Msgs2Dups, + [M || M = #{qos := 2} <- Msgs2Dups] + ), + ?assertEqual( + get_msgs_essentials(pick_respective_msgs(Msgs2Dups, Msgs1)), + Msgs2Dups + ), + + %% Now complete all yet incomplete QoS 2 message flows instead. + PubRecs2 = [PktId || #{qos := 2, packet_id := PktId} <- Msgs2], + lists:foreach( + fun(PktId) -> ok = emqtt:pubrec(Client2, PktId) end, + PubRecs2 + ), + + PubRels2 = receive_messages(length(PubRecs2)), + ct:pal("PubRels2 = ~p", [PubRels2]), + ?assertEqual( + PubRecs2, + [PktId || {pubrel, #{packet_id := PktId}} <- PubRels2], + PubRels2 + ), + + %% PUBCOMP every PUBREL. + PubComps = [PktId || {pubrel, #{packet_id := PktId}} <- PubRels1 ++ PubRels2], + lists:foreach( + fun(PktId) -> ok = emqtt:pubcomp(Client2, PktId) end, + PubComps + ), + + %% Ensure that PUBCOMPs are propagated to the channel. + pong = emqtt:ping(Client2), + + ok = disconnect_client(Client2), + maybe_kill_connection_process(ClientId, Config), + + {ok, Client3} = emqtt:start_link([{clean_start, false} | ClientOpts]), + {ok, _} = emqtt:ConnFun(Client3), + + %% Only the last unacked QoS 1 message should be retransmitted. + Msgs3 = receive_messages(NPubs, _Timeout = 2000), + ct:pal("Msgs3 = ~p", [Msgs3]), + ?assertMatch( + [#{topic := <<"t/100/foo">>, payload := <<"M11">>, qos := 1, dup := true}], + Msgs3 + ), + + ok = disconnect_client(Client3). + t_clean_start_drops_subscriptions(Config) -> %% 1. A persistent session is started and disconnected. %% 2. While disconnected, a message is published and persisted. @@ -627,13 +946,13 @@ t_clean_start_drops_subscriptions(Config) -> | Config ]), {ok, _} = emqtt:ConnFun(Client1), - {ok, _, [2]} = emqtt:subscribe(Client1, STopic, qos2), + {ok, _, [1]} = emqtt:subscribe(Client1, STopic, qos1), ok = emqtt:disconnect(Client1), maybe_kill_connection_process(ClientId, Config), %% 2. - ok = publish(Topic, Payload1), + ok = publish(Topic, Payload1, ?QOS_1), %% 3. {ok, Client2} = emqtt:start_link([ @@ -645,12 +964,14 @@ t_clean_start_drops_subscriptions(Config) -> ]), {ok, _} = emqtt:ConnFun(Client2), ?assertEqual(0, client_info(session_present, Client2)), - {ok, _, [2]} = emqtt:subscribe(Client2, STopic, qos2), + {ok, _, [1]} = emqtt:subscribe(Client2, STopic, qos1), - ok = publish(Topic, Payload2), + timer:sleep(100), + ok = publish(Topic, Payload2, ?QOS_1), [Msg1] = receive_messages(1), ?assertEqual({ok, iolist_to_binary(Payload2)}, maps:find(payload, Msg1)), + pong = emqtt:ping(Client2), ok = emqtt:disconnect(Client2), maybe_kill_connection_process(ClientId, Config), @@ -664,10 +985,11 @@ t_clean_start_drops_subscriptions(Config) -> ]), {ok, _} = emqtt:ConnFun(Client3), - ok = publish(Topic, Payload3), + ok = publish(Topic, Payload3, ?QOS_1), [Msg2] = receive_messages(1), ?assertEqual({ok, iolist_to_binary(Payload3)}, maps:find(payload, Msg2)), + pong = emqtt:ping(Client3), ok = emqtt:disconnect(Client3). t_unsubscribe(Config) -> @@ -731,8 +1053,32 @@ t_multiple_subscription_matches(Config) -> ?assertEqual({ok, 2}, maps:find(qos, Msg2)), ok = emqtt:disconnect(Client2). +get_topicwise_order(Msgs) -> + maps:groups_from_list(fun get_msgpub_topic/1, fun get_msgpub_payload/1, Msgs). + +get_msgpub_topic(#mqtt_msg{topic = Topic}) -> + Topic; +get_msgpub_topic(#{topic := Topic}) -> + Topic. + +get_msgpub_payload(#mqtt_msg{payload = Payload}) -> + Payload; +get_msgpub_payload(#{payload := Payload}) -> + Payload. + +get_msg_essentials(Msg = #{}) -> + maps:with([packet_id, topic, payload, qos], Msg); +get_msg_essentials({pubrel, Msg}) -> + {pubrel, maps:with([packet_id, reason_code], Msg)}. + +get_msgs_essentials(Msgs) -> + [get_msg_essentials(M) || M <- Msgs]. + +pick_respective_msgs(MsgRefs, Msgs) -> + [M || M <- Msgs, Ref <- MsgRefs, maps:get(packet_id, M) =:= maps:get(packet_id, Ref)]. + skip_ds_tc(Config) -> - case ?config(persistent_store, Config) of + case ?config(persistence, Config) of ds -> {skip, "Testcase not yet supported under 'emqx_persistent_session_ds' implementation"}; _ -> diff --git a/apps/emqx/test/emqx_persistent_session_ds_router_SUITE.erl b/apps/emqx/test/emqx_persistent_session_ds_router_SUITE.erl index 3e48173c3..cc50d66ee 100644 --- a/apps/emqx/test/emqx_persistent_session_ds_router_SUITE.erl +++ b/apps/emqx/test/emqx_persistent_session_ds_router_SUITE.erl @@ -38,7 +38,7 @@ init_per_suite(Config) -> AppSpecs = [ emqx_durable_storage, {emqx, #{ - config => #{persistent_session_store => #{ds => true}}, + config => #{session_persistence => #{enable => true}}, override_env => [{boot_modules, [broker]}] }} ], diff --git a/apps/emqx/test/emqx_proper_types.erl b/apps/emqx/test/emqx_proper_types.erl index 0a66b3628..6c2ad56f9 100644 --- a/apps/emqx/test/emqx_proper_types.erl +++ b/apps/emqx/test/emqx_proper_types.erl @@ -511,13 +511,7 @@ peercert() -> conn_mod() -> oneof([ emqx_connection, - emqx_ws_connection, - emqx_coap_mqtt_adapter, - emqx_sn_gateway, - emqx_lwm2m_protocol, - emqx_gbt32960_conn, - emqx_jt808_connection, - emqx_tcp_connection + emqx_ws_connection ]). proto_name() -> diff --git a/apps/emqx/test/emqx_quic_multistreams_SUITE.erl b/apps/emqx/test/emqx_quic_multistreams_SUITE.erl index c5eaf4c24..b2205a659 100644 --- a/apps/emqx/test/emqx_quic_multistreams_SUITE.erl +++ b/apps/emqx/test/emqx_quic_multistreams_SUITE.erl @@ -669,22 +669,21 @@ t_multi_streams_packet_malform(Config) -> case quicer:send(MalformStream, <<0, 0, 0, 0, 0, 0, 0, 0, 0, 0>>) of {ok, 10} -> ok; {error, cancelled} -> ok; - {error, stm_send_error, aborted} -> ok + {error, stm_send_error, aborted} -> ok; + {error, closed} -> ok end, ?assert(is_list(emqtt:info(C))), - - {error, stm_send_error, _} = + {error, closed} = snabbkaffe:retry( 10000, 10, fun() -> - {error, stm_send_error, _} = quicer:send( + {error, closed} = quicer:send( MalformStream, <<1, 2, 3, 4, 5, 6, 7, 8, 9, 0>> ) end ), - ?assert(is_list(emqtt:info(C))), ok = emqtt:disconnect(C). @@ -770,9 +769,9 @@ t_multi_streams_packet_too_large(Config) -> timeout = recv_pub(1), ?assert(is_list(emqtt:info(C))), - %% Connection could be kept - {error, stm_send_error, _} = quicer:send(via_stream(PubVia), <<1>>), - {error, stm_send_error, _} = quicer:send(via_stream(PubVia2), <<1>>), + %% Connection could be kept but data stream are closed! + {error, closed} = quicer:send(via_stream(PubVia), <<1>>), + {error, closed} = quicer:send(via_stream(PubVia2), <<1>>), %% We could send data over new stream {ok, PubVia3} = emqtt:start_data_stream(C, []), ok = emqtt:publish_async( diff --git a/apps/emqx/test/emqx_router_helper_SUITE.erl b/apps/emqx/test/emqx_router_helper_SUITE.erl index 8fe052af8..c16277884 100644 --- a/apps/emqx/test/emqx_router_helper_SUITE.erl +++ b/apps/emqx/test/emqx_router_helper_SUITE.erl @@ -80,7 +80,7 @@ t_mnesia(_) -> ct:sleep(200). t_cleanup_membership_mnesia_down(_Config) -> - Slave = emqx_cth_cluster:node_name(?FUNCTION_NAME), + Slave = emqx_cth_cluster:node_name(node2), emqx_router:add_route(<<"a/b/c">>, Slave), emqx_router:add_route(<<"d/e/f">>, node()), ?assertMatch([_, _], emqx_router:topics()), @@ -92,7 +92,7 @@ t_cleanup_membership_mnesia_down(_Config) -> ?assertEqual([<<"d/e/f">>], emqx_router:topics()). t_cleanup_membership_node_down(_Config) -> - Slave = emqx_cth_cluster:node_name(?FUNCTION_NAME), + Slave = emqx_cth_cluster:node_name(node3), emqx_router:add_route(<<"a/b/c">>, Slave), emqx_router:add_route(<<"d/e/f">>, node()), ?assertMatch([_, _], emqx_router:topics()), @@ -104,7 +104,7 @@ t_cleanup_membership_node_down(_Config) -> ?assertEqual([<<"d/e/f">>], emqx_router:topics()). t_cleanup_monitor_node_down(_Config) -> - Slave = emqx_cth_cluster:start_bare_node(?FUNCTION_NAME, #{driver => ct_slave}), + [Slave] = emqx_cth_cluster:start_bare_nodes([node4]), emqx_router:add_route(<<"a/b/c">>, Slave), emqx_router:add_route(<<"d/e/f">>, node()), ?assertMatch([_, _], emqx_router:topics()), diff --git a/apps/emqx/test/emqx_routing_SUITE.erl b/apps/emqx/test/emqx_routing_SUITE.erl index a54e1b4dd..c9ad63cf1 100644 --- a/apps/emqx/test/emqx_routing_SUITE.erl +++ b/apps/emqx/test/emqx_routing_SUITE.erl @@ -218,38 +218,41 @@ t_routing_schema_switch(VFrom, VTo, Config) -> ], #{work_dir => WorkDir} ), - % Verify that new nodes switched to schema v1/v2 in presence of v1/v2 routes respectively Nodes = [Node1, Node2, Node3], - ?assertEqual( - [{ok, VTo}, {ok, VTo}, {ok, VTo}], - erpc:multicall(Nodes, emqx_router, get_schema_vsn, []) - ), - % Wait for all nodes to agree on cluster state - ?retry( - 500, - 10, - ?assertMatch( - [{ok, [Node1, Node2, Node3]}], - lists:usort(erpc:multicall(Nodes, emqx, running_nodes, [])) - ) - ), - % Verify that routing works as expected - C2 = start_client(Node2), - ok = subscribe(C2, <<"a/+/d">>), - C3 = start_client(Node3), - ok = subscribe(C3, <<"d/e/f/#">>), - {ok, _} = publish(C1, <<"a/b/d">>, <<"hey-newbies">>), - {ok, _} = publish(C2, <<"a/b/c">>, <<"hi">>), - {ok, _} = publish(C3, <<"d/e/f/42">>, <<"hello">>), - ?assertReceive({pub, C2, #{topic := <<"a/b/d">>, payload := <<"hey-newbies">>}}), - ?assertReceive({pub, C1, #{topic := <<"a/b/c">>, payload := <<"hi">>}}), - ?assertReceive({pub, C1, #{topic := <<"d/e/f/42">>, payload := <<"hello">>}}), - ?assertReceive({pub, C3, #{topic := <<"d/e/f/42">>, payload := <<"hello">>}}), - ?assertNotReceive(_), - ok = emqtt:stop(C1), - ok = emqtt:stop(C2), - ok = emqtt:stop(C3), - ok = emqx_cth_cluster:stop(Nodes). + try + % Verify that new nodes switched to schema v1/v2 in presence of v1/v2 routes respectively + ?assertEqual( + [{ok, VTo}, {ok, VTo}, {ok, VTo}], + erpc:multicall(Nodes, emqx_router, get_schema_vsn, []) + ), + % Wait for all nodes to agree on cluster state + ?retry( + 500, + 10, + ?assertMatch( + [{ok, [Node1, Node2, Node3]}], + lists:usort(erpc:multicall(Nodes, emqx, running_nodes, [])) + ) + ), + % Verify that routing works as expected + C2 = start_client(Node2), + ok = subscribe(C2, <<"a/+/d">>), + C3 = start_client(Node3), + ok = subscribe(C3, <<"d/e/f/#">>), + {ok, _} = publish(C1, <<"a/b/d">>, <<"hey-newbies">>), + {ok, _} = publish(C2, <<"a/b/c">>, <<"hi">>), + {ok, _} = publish(C3, <<"d/e/f/42">>, <<"hello">>), + ?assertReceive({pub, C2, #{topic := <<"a/b/d">>, payload := <<"hey-newbies">>}}), + ?assertReceive({pub, C1, #{topic := <<"a/b/c">>, payload := <<"hi">>}}), + ?assertReceive({pub, C1, #{topic := <<"d/e/f/42">>, payload := <<"hello">>}}), + ?assertReceive({pub, C3, #{topic := <<"d/e/f/42">>, payload := <<"hello">>}}), + ?assertNotReceive(_), + ok = emqtt:stop(C1), + ok = emqtt:stop(C2), + ok = emqtt:stop(C3) + after + ok = emqx_cth_cluster:stop(Nodes) + end. %% diff --git a/apps/emqx/test/emqx_secret_tests.erl b/apps/emqx/test/emqx_secret_tests.erl new file mode 100644 index 000000000..cd6588c83 --- /dev/null +++ b/apps/emqx/test/emqx_secret_tests.erl @@ -0,0 +1,76 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_secret_tests). + +-include_lib("eunit/include/eunit.hrl"). + +wrap_unwrap_test() -> + ?assertEqual( + 42, + emqx_secret:unwrap(emqx_secret:wrap(42)) + ). + +unwrap_immediate_test() -> + ?assertEqual( + 42, + emqx_secret:unwrap(42) + ). + +wrap_unwrap_load_test_() -> + Secret = <<"foobaz">>, + { + setup, + fun() -> write_temp_file(Secret) end, + fun(Filename) -> file:delete(Filename) end, + fun(Filename) -> + ?_assertEqual( + Secret, + emqx_secret:unwrap(emqx_secret:wrap_load({file, Filename})) + ) + end + }. + +wrap_load_term_test() -> + ?assertEqual( + {file, "no/such/file/i/swear"}, + emqx_secret:term(emqx_secret:wrap_load({file, "no/such/file/i/swear"})) + ). + +wrap_unwrap_missing_file_test() -> + ?assertThrow( + #{msg := failed_to_read_secret_file, reason := "No such file or directory"}, + emqx_secret:unwrap(emqx_secret:wrap_load({file, "no/such/file/i/swear"})) + ). + +wrap_term_test() -> + ?assertEqual( + 42, + emqx_secret:term(emqx_secret:wrap(42)) + ). + +external_fun_term_error_test() -> + Term = {foo, bar}, + ?assertError( + badarg, + emqx_secret:term(fun() -> Term end) + ). + +write_temp_file(Bytes) -> + Ts = erlang:system_time(millisecond), + Filename = filename:join("/tmp", ?MODULE_STRING ++ integer_to_list(-Ts)), + ok = file:write_file(Filename, Bytes), + Filename. diff --git a/apps/emqx/test/emqx_shared_sub_SUITE.erl b/apps/emqx/test/emqx_shared_sub_SUITE.erl index 4b4535cea..cc6908fb6 100644 --- a/apps/emqx/test/emqx_shared_sub_SUITE.erl +++ b/apps/emqx/test/emqx_shared_sub_SUITE.erl @@ -63,6 +63,7 @@ init_per_suite(Config) -> end, emqx_common_test_helpers:boot_modules(all), emqx_common_test_helpers:start_apps([]), + emqx_logger:set_log_level(debug), [{dist_pid, DistPid} | Config]. end_per_suite(Config) -> @@ -137,7 +138,8 @@ t_random_basic(Config) when is_list(Config) -> ClientId = <<"ClientId">>, Topic = <<"foo">>, Payload = <<"hello">>, - emqx:subscribe(Topic, #{qos => 2, share => <<"group1">>}), + Group = <<"group1">>, + emqx_broker:subscribe(emqx_topic:make_shared_record(Group, Topic), #{qos => 2}), MsgQoS2 = emqx_message:make(ClientId, 2, Topic, Payload), %% wait for the subscription to show up ct:sleep(200), @@ -402,7 +404,7 @@ t_hash(Config) when is_list(Config) -> ok = ensure_config(hash_clientid, false), test_two_messages(hash_clientid). -t_hash_clinetid(Config) when is_list(Config) -> +t_hash_clientid(Config) when is_list(Config) -> ok = ensure_config(hash_clientid, false), test_two_messages(hash_clientid). @@ -528,14 +530,15 @@ last_message(ExpectedPayload, Pids, Timeout) -> t_dispatch(Config) when is_list(Config) -> ok = ensure_config(random), Topic = <<"foo">>, + Group = <<"group1">>, ?assertEqual( {error, no_subscribers}, - emqx_shared_sub:dispatch(<<"group1">>, Topic, #delivery{message = #message{}}) + emqx_shared_sub:dispatch(Group, Topic, #delivery{message = #message{}}) ), - emqx:subscribe(Topic, #{qos => 2, share => <<"group1">>}), + emqx_broker:subscribe(emqx_topic:make_shared_record(Group, Topic), #{qos => 2}), ?assertEqual( {ok, 1}, - emqx_shared_sub:dispatch(<<"group1">>, Topic, #delivery{message = #message{}}) + emqx_shared_sub:dispatch(Group, Topic, #delivery{message = #message{}}) ). t_uncovered_func(Config) when is_list(Config) -> @@ -572,7 +575,7 @@ t_local(Config) when is_list(Config) -> <<"sticky_group">> => sticky }, - Node = start_slave('local_shared_sub_testtesttest', 21999), + Node = start_slave('local_shared_sub_local_1', 21999), ok = ensure_group_config(GroupConfig), ok = ensure_group_config(Node, GroupConfig), @@ -625,7 +628,7 @@ t_remote(Config) when is_list(Config) -> <<"sticky_group">> => sticky }, - Node = start_slave('remote_shared_sub_testtesttest', 21999), + Node = start_slave('remote_shared_sub_remote_1', 21999), ok = ensure_group_config(GroupConfig), ok = ensure_group_config(Node, GroupConfig), @@ -674,7 +677,7 @@ t_local_fallback(Config) when is_list(Config) -> Topic = <<"local_foo/bar">>, ClientId1 = <<"ClientId1">>, ClientId2 = <<"ClientId2">>, - Node = start_slave('local_fallback_shared_sub_test', 11888), + Node = start_slave('local_fallback_shared_sub_1', 11888), {ok, ConnPid1} = emqtt:start_link([{clientid, ClientId1}]), {ok, _} = emqtt:connect(ConnPid1), @@ -991,37 +994,110 @@ t_session_kicked(Config) when is_list(Config) -> ?assertEqual([], collect_msgs(0)), ok. -%% FIXME: currently doesn't work -%% t_different_groups_same_topic({init, Config}) -> -%% TestName = atom_to_binary(?FUNCTION_NAME), -%% ClientId = <>, -%% {ok, C} = emqtt:start_link([{clientid, ClientId}, {proto_ver, v5}]), -%% {ok, _} = emqtt:connect(C), -%% [{client, C}, {clientid, ClientId} | Config]; -%% t_different_groups_same_topic({'end', Config}) -> -%% C = ?config(client, Config), -%% emqtt:stop(C), -%% ok; -%% t_different_groups_same_topic(Config) when is_list(Config) -> -%% C = ?config(client, Config), -%% ClientId = ?config(clientid, Config), -%% %% Subscribe and unsubscribe to both $queue and $shared topics -%% Topic = <<"t/1">>, -%% SharedTopic0 = <<"$share/aa/", Topic/binary>>, -%% SharedTopic1 = <<"$share/bb/", Topic/binary>>, -%% {ok, _, [2]} = emqtt:subscribe(C, {SharedTopic0, 2}), -%% {ok, _, [2]} = emqtt:subscribe(C, {SharedTopic1, 2}), +-define(UPDATE_SUB_QOS(ConnPid, Topic, QoS), + ?assertMatch({ok, _, [QoS]}, emqtt:subscribe(ConnPid, {Topic, QoS})) +). -%% Message0 = emqx_message:make(ClientId, _QoS = 2, Topic, <<"hi">>), -%% emqx:publish(Message0), -%% ?assertMatch([ {publish, #{payload := <<"hi">>}} -%% , {publish, #{payload := <<"hi">>}} -%% ], collect_msgs(5_000), #{routes => ets:tab2list(emqx_route)}), +t_different_groups_same_topic({init, Config}) -> + TestName = atom_to_binary(?FUNCTION_NAME), + ClientId = <>, + {ok, C} = emqtt:start_link([{clientid, ClientId}, {proto_ver, v5}]), + {ok, _} = emqtt:connect(C), + [{client, C}, {clientid, ClientId} | Config]; +t_different_groups_same_topic({'end', Config}) -> + C = ?config(client, Config), + emqtt:stop(C), + ok; +t_different_groups_same_topic(Config) when is_list(Config) -> + C = ?config(client, Config), + ClientId = ?config(clientid, Config), + %% Subscribe and unsubscribe to different group `aa` and `bb` with same topic + GroupA = <<"aa">>, + GroupB = <<"bb">>, + Topic = <<"t/1">>, -%% {ok, _, [0]} = emqtt:unsubscribe(C, SharedTopic0), -%% {ok, _, [0]} = emqtt:unsubscribe(C, SharedTopic1), + SharedTopicGroupA = ?SHARE(GroupA, Topic), + ?UPDATE_SUB_QOS(C, SharedTopicGroupA, ?QOS_2), + SharedTopicGroupB = ?SHARE(GroupB, Topic), + ?UPDATE_SUB_QOS(C, SharedTopicGroupB, ?QOS_2), -%% ok. + ?retry( + _Sleep0 = 100, + _Attempts0 = 50, + begin + ?assertEqual(2, length(emqx_router:match_routes(Topic))) + end + ), + + Message0 = emqx_message:make(ClientId, ?QOS_2, Topic, <<"hi">>), + emqx:publish(Message0), + ?assertMatch( + [ + {publish, #{payload := <<"hi">>}}, + {publish, #{payload := <<"hi">>}} + ], + collect_msgs(5_000), + #{routes => ets:tab2list(emqx_route)} + ), + + {ok, _, [?RC_SUCCESS]} = emqtt:unsubscribe(C, SharedTopicGroupA), + {ok, _, [?RC_SUCCESS]} = emqtt:unsubscribe(C, SharedTopicGroupB), + + ok. + +t_different_groups_update_subopts({init, Config}) -> + TestName = atom_to_binary(?FUNCTION_NAME), + ClientId = <>, + {ok, C} = emqtt:start_link([{clientid, ClientId}, {proto_ver, v5}]), + {ok, _} = emqtt:connect(C), + [{client, C}, {clientid, ClientId} | Config]; +t_different_groups_update_subopts({'end', Config}) -> + C = ?config(client, Config), + emqtt:stop(C), + ok; +t_different_groups_update_subopts(Config) when is_list(Config) -> + C = ?config(client, Config), + ClientId = ?config(clientid, Config), + %% Subscribe and unsubscribe to different group `aa` and `bb` with same topic + Topic = <<"t/1">>, + GroupA = <<"aa">>, + GroupB = <<"bb">>, + SharedTopicGroupA = ?SHARE(GroupA, Topic), + SharedTopicGroupB = ?SHARE(GroupB, Topic), + + Fun = fun(Group, QoS) -> + ?UPDATE_SUB_QOS(C, ?SHARE(Group, Topic), QoS), + ?assertMatch( + #{qos := QoS}, + emqx_broker:get_subopts(ClientId, emqx_topic:make_shared_record(Group, Topic)) + ) + end, + + [Fun(Group, QoS) || QoS <- [?QOS_0, ?QOS_1, ?QOS_2], Group <- [GroupA, GroupB]], + + ?retry( + _Sleep0 = 100, + _Attempts0 = 50, + begin + ?assertEqual(2, length(emqx_router:match_routes(Topic))) + end + ), + + Message0 = emqx_message:make(ClientId, _QoS = 2, Topic, <<"hi">>), + emqx:publish(Message0), + ?assertMatch( + [ + {publish, #{payload := <<"hi">>}}, + {publish, #{payload := <<"hi">>}} + ], + collect_msgs(5_000), + #{routes => ets:tab2list(emqx_route)} + ), + + {ok, _, [?RC_SUCCESS]} = emqtt:unsubscribe(C, SharedTopicGroupA), + {ok, _, [?RC_SUCCESS]} = emqtt:unsubscribe(C, SharedTopicGroupB), + + ok. t_queue_subscription({init, Config}) -> TestName = atom_to_binary(?FUNCTION_NAME), @@ -1038,23 +1114,19 @@ t_queue_subscription({'end', Config}) -> t_queue_subscription(Config) when is_list(Config) -> C = ?config(client, Config), ClientId = ?config(clientid, Config), - %% Subscribe and unsubscribe to both $queue and $shared topics + %% Subscribe and unsubscribe to both $queue share and $share/ with same topic Topic = <<"t/1">>, QueueTopic = <<"$queue/", Topic/binary>>, SharedTopic = <<"$share/aa/", Topic/binary>>, - {ok, _, [?RC_GRANTED_QOS_2]} = emqtt:subscribe(C, {QueueTopic, 2}), - {ok, _, [?RC_GRANTED_QOS_2]} = emqtt:subscribe(C, {SharedTopic, 2}), - %% FIXME: we should actually see 2 routes, one for each group - %% ($queue and aa), but currently the latest subscription - %% overwrites the existing one. + ?UPDATE_SUB_QOS(C, QueueTopic, ?QOS_2), + ?UPDATE_SUB_QOS(C, SharedTopic, ?QOS_2), + ?retry( _Sleep0 = 100, _Attempts0 = 50, begin - ct:pal("routes: ~p", [ets:tab2list(emqx_route)]), - %% FIXME: should ensure we have 2 subscriptions - [_] = emqx_router:lookup_routes(Topic) + ?assertEqual(2, length(emqx_router:match_routes(Topic))) end ), @@ -1063,37 +1135,29 @@ t_queue_subscription(Config) when is_list(Config) -> emqx:publish(Message0), ?assertMatch( [ + {publish, #{payload := <<"hi">>}}, {publish, #{payload := <<"hi">>}} - %% FIXME: should receive one message from each group - %% , {publish, #{payload := <<"hi">>}} ], - collect_msgs(5_000) + collect_msgs(5_000), + #{routes => ets:tab2list(emqx_route)} ), {ok, _, [?RC_SUCCESS]} = emqtt:unsubscribe(C, QueueTopic), - %% FIXME: return code should be success instead of 17 ("no_subscription_existed") - {ok, _, [?RC_NO_SUBSCRIPTION_EXISTED]} = emqtt:unsubscribe(C, SharedTopic), + {ok, _, [?RC_SUCCESS]} = emqtt:unsubscribe(C, SharedTopic), - %% FIXME: this should eventually be true, but currently we leak - %% the previous group subscription... - %% ?retry( - %% _Sleep0 = 100, - %% _Attempts0 = 50, - %% begin - %% ct:pal("routes: ~p", [ets:tab2list(emqx_route)]), - %% [] = emqx_router:lookup_routes(Topic) - %% end - %% ), + ?retry( + _Sleep0 = 100, + _Attempts0 = 50, + begin + ?assertEqual(0, length(emqx_router:match_routes(Topic))) + end + ), ct:sleep(500), Message1 = emqx_message:make(ClientId, _QoS = 2, Topic, <<"hello">>), emqx:publish(Message1), - %% FIXME: we should *not* receive any messages... - %% ?assertEqual([], collect_msgs(1_000), #{routes => ets:tab2list(emqx_route)}), - %% This is from the leaked group... - ?assertMatch([{publish, #{topic := Topic}}], collect_msgs(1_000), #{ - routes => ets:tab2list(emqx_route) - }), + %% we should *not* receive any messages. + ?assertEqual([], collect_msgs(1_000), #{routes => ets:tab2list(emqx_route)}), ok. @@ -1190,34 +1254,24 @@ recv_msgs(Count, Msgs) -> end. start_slave(Name, Port) -> - {ok, Node} = ct_slave:start( - list_to_atom(atom_to_list(Name) ++ "@" ++ host()), - [ - {kill_if_fail, true}, - {monitor_master, true}, - {init_timeout, 10000}, - {startup_timeout, 10000}, - {erl_flags, ebin_path()} - ] + {ok, Node} = emqx_cth_peer:start_link( + Name, + ebin_path() ), - pong = net_adm:ping(Node), setup_node(Node, Port), Node. stop_slave(Node) -> rpc:call(Node, mria, leave, []), - ct_slave:stop(Node). + emqx_cth_peer:stop(Node). host() -> [_, Host] = string:tokens(atom_to_list(node()), "@"), Host. ebin_path() -> - string:join(["-pa" | lists:filter(fun is_lib/1, code:get_path())], " "). - -is_lib(Path) -> - string:prefix(Path, code:lib_dir()) =:= nomatch. + ["-pa" | code:get_path()]. setup_node(Node, Port) -> EnvHandler = diff --git a/apps/emqx/test/emqx_topic_SUITE.erl b/apps/emqx/test/emqx_topic_SUITE.erl index c49c93fb2..4761ea17d 100644 --- a/apps/emqx/test/emqx_topic_SUITE.erl +++ b/apps/emqx/test/emqx_topic_SUITE.erl @@ -238,11 +238,11 @@ long_topic() -> t_parse(_) -> ?assertError( {invalid_topic_filter, <<"$queue/t">>}, - parse(<<"$queue/t">>, #{share => <<"g">>}) + parse(#share{group = <<"$queue">>, topic = <<"$queue/t">>}, #{}) ), ?assertError( {invalid_topic_filter, <<"$share/g/t">>}, - parse(<<"$share/g/t">>, #{share => <<"g">>}) + parse(#share{group = <<"g">>, topic = <<"$share/g/t">>}, #{}) ), ?assertError( {invalid_topic_filter, <<"$share/t">>}, @@ -254,8 +254,12 @@ t_parse(_) -> ), ?assertEqual({<<"a/b/+/#">>, #{}}, parse(<<"a/b/+/#">>)), ?assertEqual({<<"a/b/+/#">>, #{qos => 1}}, parse({<<"a/b/+/#">>, #{qos => 1}})), - ?assertEqual({<<"topic">>, #{share => <<"$queue">>}}, parse(<<"$queue/topic">>)), - ?assertEqual({<<"topic">>, #{share => <<"group">>}}, parse(<<"$share/group/topic">>)), + ?assertEqual( + {#share{group = <<"$queue">>, topic = <<"topic">>}, #{}}, parse(<<"$queue/topic">>) + ), + ?assertEqual( + {#share{group = <<"group">>, topic = <<"topic">>}, #{}}, parse(<<"$share/group/topic">>) + ), %% The '$local' and '$fastlane' topics have been deprecated. ?assertEqual({<<"$local/topic">>, #{}}, parse(<<"$local/topic">>)), ?assertEqual({<<"$local/$queue/topic">>, #{}}, parse(<<"$local/$queue/topic">>)), diff --git a/apps/emqx/test/emqx_topic_index_SUITE.erl b/apps/emqx/test/emqx_topic_index_SUITE.erl index 9df9743f1..71e508306 100644 --- a/apps/emqx/test/emqx_topic_index_SUITE.erl +++ b/apps/emqx/test/emqx_topic_index_SUITE.erl @@ -209,9 +209,6 @@ t_match_fast_forward(Config) -> M:insert(<<"a/b/1/2/3/4/5/6/7/8/9/#">>, id1, <<>>, Tab), M:insert(<<"z/y/x/+/+">>, id2, <<>>, Tab), M:insert(<<"a/b/c/+">>, id3, <<>>, Tab), - % dbg:tracer(), - % dbg:p(all, c), - % dbg:tpl({ets, next, '_'}, x), ?assertEqual(id1, id(match(M, <<"a/b/1/2/3/4/5/6/7/8/9/0">>, Tab))), ?assertEqual([id1], [id(X) || X <- matches(M, <<"a/b/1/2/3/4/5/6/7/8/9/0">>, Tab)]). diff --git a/apps/emqx_audit/BSL.txt b/apps/emqx_audit/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_audit/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_audit/README.md b/apps/emqx_audit/README.md new file mode 100644 index 000000000..48c625ed5 --- /dev/null +++ b/apps/emqx_audit/README.md @@ -0,0 +1,5 @@ +emqx_audit +===== + +Audit log for EMQX, empowers users to efficiently access the desired audit trail data +and facilitates auditing, compliance, troubleshooting, and security analysis. diff --git a/apps/emqx_audit/include/emqx_audit.hrl b/apps/emqx_audit/include/emqx_audit.hrl new file mode 100644 index 000000000..8304a9060 --- /dev/null +++ b/apps/emqx_audit/include/emqx_audit.hrl @@ -0,0 +1,26 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-define(AUDIT, emqx_audit). + +-record(?AUDIT, { + %% basic info + created_at, + node, + from, + source, + source_ip, + %% operation info + operation_id, + operation_type, + args, + operation_result, + failure, + %% request detail + http_method, + http_request, + http_status_code, + duration_ms, + extra +}). diff --git a/apps/emqx_audit/rebar.config b/apps/emqx_audit/rebar.config new file mode 100644 index 000000000..fac0f9b07 --- /dev/null +++ b/apps/emqx_audit/rebar.config @@ -0,0 +1,5 @@ +{erl_opts, [debug_info]}. +{deps, [ + {emqx, {path, "../emqx"}}, + {emqx_utils, {path, "../emqx_utils"}} +]}. diff --git a/apps/emqx_audit/src/emqx_audit.app.src b/apps/emqx_audit/src/emqx_audit.app.src new file mode 100644 index 000000000..96cdd11ce --- /dev/null +++ b/apps/emqx_audit/src/emqx_audit.app.src @@ -0,0 +1,10 @@ +{application, emqx_audit, [ + {description, "Audit log for EMQX"}, + {vsn, "0.1.0"}, + {registered, []}, + {mod, {emqx_audit_app, []}}, + {applications, [kernel, stdlib, emqx]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_audit/src/emqx_audit.erl b/apps/emqx_audit/src/emqx_audit.erl new file mode 100644 index 000000000..98f4a70e8 --- /dev/null +++ b/apps/emqx_audit/src/emqx_audit.erl @@ -0,0 +1,245 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_audit). + +-behaviour(gen_server). + +-include_lib("emqx/include/emqx.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include("emqx_audit.hrl"). + +%% API +-export([start_link/0]). +-export([log/3]). + +-export([trans_clean_expired/2]). + +%% gen_server callbacks +-export([ + init/1, + handle_continue/2, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). + +-define(FILTER_REQ, [cert, host_info, has_sent_resp, pid, path_info, peer, ref, sock, streamid]). + +-ifdef(TEST). +-define(INTERVAL, 100). +-else. +-define(INTERVAL, 10000). +-endif. + +to_audit(#{from := cli, cmd := Cmd, args := Args, duration_ms := DurationMs}) -> + #?AUDIT{ + operation_id = <<"">>, + operation_type = atom_to_binary(Cmd), + args = Args, + operation_result = <<"">>, + failure = <<"">>, + duration_ms = DurationMs, + from = cli, + source = <<"">>, + source_ip = <<"">>, + http_status_code = <<"">>, + http_method = <<"">>, + http_request = <<"">> + }; +to_audit(#{from := From} = Log) when From =:= dashboard orelse From =:= rest_api -> + #{ + source := Source, + source_ip := SourceIp, + %% operation info + operation_id := OperationId, + operation_type := OperationType, + operation_result := OperationResult, + %% request detail + http_status_code := StatusCode, + http_method := Method, + http_request := Request, + duration_ms := DurationMs + } = Log, + #?AUDIT{ + from = From, + source = Source, + source_ip = SourceIp, + %% operation info + operation_id = OperationId, + operation_type = OperationType, + operation_result = OperationResult, + failure = maps:get(failure, Log, <<"">>), + %% request detail + http_status_code = StatusCode, + http_method = Method, + http_request = Request, + duration_ms = DurationMs, + args = <<"">> + }; +to_audit(#{from := erlang_console, function := F, args := Args}) -> + #?AUDIT{ + from = erlang_console, + source = <<"">>, + source_ip = <<"">>, + %% operation info + operation_id = <<"">>, + operation_type = <<"">>, + operation_result = <<"">>, + failure = <<"">>, + %% request detail + http_status_code = <<"">>, + http_method = <<"">>, + http_request = <<"">>, + duration_ms = 0, + args = iolist_to_binary(io_lib:format("~p: ~p~n", [F, Args])) + }. + +log(_Level, undefined, _Handler) -> + ok; +log(Level, Meta1, Handler) -> + Meta2 = Meta1#{time => logger:timestamp(), level => Level}, + log_to_file(Level, Meta2, Handler), + log_to_db(Meta2), + remove_handler_when_disabled(). + +remove_handler_when_disabled() -> + case emqx_config:get([log, audit, enable], false) of + true -> + ok; + false -> + _ = logger:remove_handler(?AUDIT_HANDLER), + ok + end. + +log_to_db(Log) -> + Audit0 = to_audit(Log), + Audit = Audit0#?AUDIT{ + node = node(), + created_at = erlang:system_time(microsecond) + }, + mria:dirty_write(?AUDIT, Audit). + +start_link() -> + gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). + +init([]) -> + ok = mria:create_table(?AUDIT, [ + {type, ordered_set}, + {rlog_shard, ?COMMON_SHARD}, + {storage, disc_copies}, + {record_name, ?AUDIT}, + {attributes, record_info(fields, ?AUDIT)} + ]), + {ok, #{}, {continue, setup}}. + +handle_continue(setup, State) -> + ok = mria:wait_for_tables([?AUDIT]), + NewState = State#{role => mria_rlog:role()}, + ?AUDIT(alert, #{ + cmd => emqx, + args => ["start"], + version => emqx_release:version(), + from => cli, + duration_ms => 0 + }), + {noreply, NewState, interval(NewState)}. + +handle_call(_Request, _From, State) -> + {reply, ignore, State, interval(State)}. + +handle_cast(_Request, State) -> + {noreply, State, interval(State)}. + +handle_info(timeout, State) -> + ExtraWait = clean_expired_logs(), + {noreply, State, interval(State) + ExtraWait}; +handle_info(_Info, State) -> + {noreply, State, interval(State)}. + +terminate(_Reason, _State) -> + ok. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +%%%=================================================================== +%%% Internal functions +%%%=================================================================== + +%% if clean_expired transaction aborted, it will be scheduled with extra 60 seconds. +clean_expired_logs() -> + MaxSize = max_size(), + Oldest = mnesia:dirty_first(?AUDIT), + CurSize = mnesia:table_info(?AUDIT, size), + case CurSize - MaxSize of + DelSize when DelSize > 0 -> + case + mria:transaction( + ?COMMON_SHARD, + fun ?MODULE:trans_clean_expired/2, + [Oldest, DelSize] + ) + of + {atomic, ok} -> + 0; + {aborted, Reason} -> + ?SLOG(error, #{ + msg => "clean_expired_audit_aborted", + reason => Reason, + delete_size => DelSize, + current_size => CurSize, + max_count => MaxSize + }), + 60000 + end; + _ -> + 0 + end. + +trans_clean_expired(Oldest, DelCount) -> + First = mnesia:first(?AUDIT), + %% Other node already clean from the oldest record. + %% ensure not delete twice, otherwise records that should not be deleted will be deleted. + case First =:= Oldest of + true -> do_clean_expired(First, DelCount); + false -> ok + end. + +do_clean_expired(_, DelSize) when DelSize =< 0 -> ok; +do_clean_expired('$end_of_table', _DelSize) -> + ok; +do_clean_expired(CurKey, DeleteSize) -> + mnesia:delete(?AUDIT, CurKey, sticky_write), + do_clean_expired(mnesia:next(?AUDIT, CurKey), DeleteSize - 1). + +max_size() -> + emqx_conf:get([log, audit, max_filter_size], 5000). + +interval(#{role := replicant}) -> hibernate; +interval(#{role := core}) -> ?INTERVAL + rand:uniform(?INTERVAL). + +log_to_file(Level, Meta, #{module := Module} = Handler) -> + Log = #{level => Level, meta => Meta, msg => undefined}, + Handler1 = maps:without(?OWN_KEYS, Handler), + try + erlang:apply(Module, log, [Log, Handler1]) + catch + C:R:S -> + case logger:remove_handler(?AUDIT_HANDLER) of + ok -> + logger:internal_log( + error, {removed_failing_handler, ?AUDIT_HANDLER, C, R, S} + ); + {error, {not_found, _}} -> + ok; + {error, Reason} -> + logger:internal_log( + error, + {removed_handler_failed, ?AUDIT_HANDLER, Reason, C, R, S} + ) + end + end. diff --git a/apps/emqx_audit/src/emqx_audit_api.erl b/apps/emqx_audit/src/emqx_audit_api.erl new file mode 100644 index 000000000..a7fd8f4ad --- /dev/null +++ b/apps/emqx_audit/src/emqx_audit_api.erl @@ -0,0 +1,398 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_audit_api). + +-behaviour(minirest_api). + +%% API +-export([api_spec/0, paths/0, schema/1, namespace/0, fields/1]). +-export([audit/2]). +-export([qs2ms/2, format/1]). + +-include_lib("emqx/include/logger.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include("emqx_audit.hrl"). + +-import(hoconsc, [mk/2, ref/2, array/1]). + +-define(TAGS, ["Audit"]). + +-define(AUDIT_QS_SCHEMA, [ + {<<"node">>, atom}, + {<<"from">>, atom}, + {<<"source">>, binary}, + {<<"source_ip">>, binary}, + {<<"operation_id">>, binary}, + {<<"operation_type">>, binary}, + {<<"operation_result">>, atom}, + {<<"http_status_code">>, integer}, + {<<"http_method">>, atom}, + {<<"gte_created_at">>, timestamp}, + {<<"lte_created_at">>, timestamp}, + {<<"gte_duration_ms">>, timestamp}, + {<<"lte_duration_ms">>, timestamp} +]). +-define(DISABLE_MSG, <<"Audit is disabled">>). + +namespace() -> "audit". + +api_spec() -> + emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}). + +paths() -> + ["/audit"]. + +schema("/audit") -> + #{ + 'operationId' => audit, + get => #{ + tags => ?TAGS, + description => ?DESC(audit_get), + parameters => [ + {node, + ?HOCON(binary(), #{ + in => query, + required => false, + example => <<"emqx@127.0.0.1">>, + desc => ?DESC(filter_node) + })}, + {from, + ?HOCON(?ENUM([dashboard, rest_api, cli, erlang_console]), #{ + in => query, + required => false, + example => <<"dashboard">>, + desc => ?DESC(filter_from) + })}, + {source, + ?HOCON(binary(), #{ + in => query, + required => false, + example => <<"admin">>, + desc => ?DESC(filter_source) + })}, + {source_ip, + ?HOCON(binary(), #{ + in => query, + required => false, + example => <<"127.0.0.1">>, + desc => ?DESC(filter_source_ip) + })}, + {operation_id, + ?HOCON(binary(), #{ + in => query, + required => false, + example => <<"/rules/{id}">>, + desc => ?DESC(filter_operation_id) + })}, + {operation_type, + ?HOCON(binary(), #{ + in => query, + example => <<"rules">>, + required => false, + desc => ?DESC(filter_operation_type) + })}, + {operation_result, + ?HOCON(?ENUM([success, failure]), #{ + in => query, + example => failure, + required => false, + desc => ?DESC(filter_operation_result) + })}, + {http_status_code, + ?HOCON(integer(), #{ + in => query, + example => 200, + required => false, + desc => ?DESC(filter_http_status_code) + })}, + {http_method, + ?HOCON(?ENUM([post, put, delete]), #{ + in => query, + example => post, + required => false, + desc => ?DESC(filter_http_method) + })}, + {gte_duration_ms, + ?HOCON(integer(), #{ + in => query, + example => 0, + required => false, + desc => ?DESC(filter_gte_duration_ms) + })}, + {lte_duration_ms, + ?HOCON(integer(), #{ + in => query, + example => 1000, + required => false, + desc => ?DESC(filter_lte_duration_ms) + })}, + {gte_created_at, + ?HOCON(emqx_utils_calendar:epoch_millisecond(), #{ + in => query, + required => false, + example => <<"2023-10-15T00:00:00.820384+08:00">>, + desc => ?DESC(filter_gte_created_at) + })}, + {lte_created_at, + ?HOCON(emqx_utils_calendar:epoch_millisecond(), #{ + in => query, + example => <<"2023-10-16T00:00:00.820384+08:00">>, + required => false, + desc => ?DESC(filter_lte_created_at) + })}, + ref(emqx_dashboard_swagger, page), + ref(emqx_dashboard_swagger, limit) + ], + summary => <<"List audit logs">>, + responses => #{ + 200 => + emqx_dashboard_swagger:schema_with_example( + array(?REF(audit_list)), + audit_log_list_example() + ), + 400 => emqx_dashboard_swagger:error_codes( + ['BAD_REQUEST'], + ?DISABLE_MSG + ) + } + } + }. + +fields(audit_list) -> + [ + {data, mk(array(?REF(audit)), #{desc => ?DESC("audit_resp")})}, + {meta, mk(ref(emqx_dashboard_swagger, meta), #{})} + ]; +fields(audit) -> + [ + {created_at, + ?HOCON( + emqx_utils_calendar:epoch_millisecond(), + #{ + desc => "The time when the log is created" + } + )}, + {node, + ?HOCON(binary(), #{ + desc => "The node name to which the log is created" + })}, + {from, + ?HOCON(?ENUM([dashboard, rest_api, cli, erlang_console]), #{ + desc => "The source type of the log" + })}, + {source, + ?HOCON(binary(), #{ + desc => "The source of the log" + })}, + {source_ip, + ?HOCON(binary(), #{ + desc => "The source ip of the log" + })}, + {operation_id, + ?HOCON(binary(), #{ + desc => "The operation id of the log" + })}, + {operation_type, + ?HOCON(binary(), #{ + desc => "The operation type of the log" + })}, + {operation_result, + ?HOCON(?ENUM([success, failure]), #{ + desc => "The operation result of the log" + })}, + {http_status_code, + ?HOCON(integer(), #{ + desc => "The http status code of the log" + })}, + {http_method, + ?HOCON(?ENUM([post, put, delete]), #{ + desc => "The http method of the log" + })}, + {duration_ms, + ?HOCON(integer(), #{ + desc => "The duration of the log" + })}, + {args, + ?HOCON(?ARRAY(binary()), #{ + desc => "The args of the log" + })}, + {failure, + ?HOCON(?ARRAY(binary()), #{ + desc => "The failure of the log" + })}, + {http_request, + ?HOCON(?REF(http_request), #{ + desc => "The http request of the log" + })} + ]; +fields(http_request) -> + [ + {bindings, ?HOCON(map(), #{})}, + {body, ?HOCON(map(), #{})}, + {headers, ?HOCON(map(), #{})}, + {method, ?HOCON(?ENUM([post, put, delete]), #{})} + ]. + +audit(get, #{query_string := QueryString}) -> + case emqx_config:get([log, audit, enable], false) of + false -> + {400, #{code => 'BAD_REQUEST', message => ?DISABLE_MSG}}; + true -> + case + emqx_mgmt_api:node_query( + node(), + ?AUDIT, + QueryString, + ?AUDIT_QS_SCHEMA, + fun ?MODULE:qs2ms/2, + fun ?MODULE:format/1 + ) + of + {error, page_limit_invalid} -> + {400, #{code => 'BAD_REQUEST', message => <<"page_limit_invalid">>}}; + {error, Node, Error} -> + Message = list_to_binary( + io_lib:format("bad rpc call ~p, Reason ~p", [Node, Error]) + ), + {500, #{code => <<"NODE_DOWN">>, message => Message}}; + Result -> + {200, Result} + end + end. + +qs2ms(_Tab, {Qs, _}) -> + #{ + match_spec => gen_match_spec(Qs, #?AUDIT{_ = '_'}, []), + fuzzy_fun => undefined + }. + +gen_match_spec([], Audit, Conn) -> + [{Audit, Conn, ['$_']}]; +gen_match_spec([{node, '=:=', T} | Qs], Audit, Conn) -> + gen_match_spec(Qs, Audit#?AUDIT{node = T}, Conn); +gen_match_spec([{from, '=:=', T} | Qs], Audit, Conn) -> + gen_match_spec(Qs, Audit#?AUDIT{from = T}, Conn); +gen_match_spec([{source, '=:=', T} | Qs], Audit, Conn) -> + gen_match_spec(Qs, Audit#?AUDIT{source = T}, Conn); +gen_match_spec([{source_ip, '=:=', T} | Qs], Audit, Conn) -> + gen_match_spec(Qs, Audit#?AUDIT{source_ip = T}, Conn); +gen_match_spec([{operation_id, '=:=', T} | Qs], Audit, Conn) -> + gen_match_spec(Qs, Audit#?AUDIT{operation_id = T}, Conn); +gen_match_spec([{operation_type, '=:=', T} | Qs], Audit, Conn) -> + gen_match_spec(Qs, Audit#?AUDIT{operation_type = T}, Conn); +gen_match_spec([{operation_result, '=:=', T} | Qs], Audit, Conn) -> + gen_match_spec(Qs, Audit#?AUDIT{operation_result = T}, Conn); +gen_match_spec([{http_status_code, '=:=', T} | Qs], Audit, Conn) -> + gen_match_spec(Qs, Audit#?AUDIT{http_status_code = T}, Conn); +gen_match_spec([{http_method, '=:=', T} | Qs], Audit, Conn) -> + gen_match_spec(Qs, Audit#?AUDIT{http_method = T}, Conn); +gen_match_spec([{created_at, Hold, T} | Qs], Audit, Conn) -> + gen_match_spec(Qs, Audit#?AUDIT{created_at = '$1'}, [{'$1', Hold, T} | Conn]); +gen_match_spec([{created_at, Hold1, T1, Hold2, T2} | Qs], Audit, Conn) -> + gen_match_spec(Qs, Audit#?AUDIT{created_at = '$1'}, [ + {'$1', Hold1, T1}, {'$1', Hold2, T2} | Conn + ]); +gen_match_spec([{duration_ms, Hold, T} | Qs], Audit, Conn) -> + gen_match_spec(Qs, Audit#?AUDIT{duration_ms = '$2'}, [{'$2', Hold, T} | Conn]); +gen_match_spec([{duration_ms, Hold1, T1, Hold2, T2} | Qs], Audit, Conn) -> + gen_match_spec(Qs, Audit#?AUDIT{duration_ms = '$2'}, [ + {'$2', Hold1, T1}, {'$2', Hold2, T2} | Conn + ]). + +format(Audit) -> + #?AUDIT{ + created_at = CreatedAt, + node = Node, + from = From, + source = Source, + source_ip = SourceIp, + operation_id = OperationId, + operation_type = OperationType, + operation_result = OperationResult, + http_status_code = HttpStatusCode, + http_method = HttpMethod, + duration_ms = DurationMs, + args = Args, + failure = Failure, + http_request = HttpRequest + } = Audit, + #{ + created_at => emqx_utils_calendar:epoch_to_rfc3339(CreatedAt, microsecond), + node => Node, + from => From, + source => Source, + source_ip => SourceIp, + operation_id => OperationId, + operation_type => OperationType, + operation_result => OperationResult, + http_status_code => HttpStatusCode, + http_method => HttpMethod, + duration_ms => DurationMs, + args => Args, + failure => Failure, + http_request => HttpRequest + }. + +audit_log_list_example() -> + #{ + data => [api_example(), cli_example()], + meta => #{ + <<"count">> => 2, + <<"hasnext">> => false, + <<"limit">> => 50, + <<"page">> => 1 + } + }. + +api_example() -> + #{ + <<"args">> => "", + <<"created_at">> => "2023-10-17T10:41:20.383993+08:00", + <<"duration_ms">> => 0, + <<"failure">> => "", + <<"from">> => "dashboard", + <<"http_method">> => "post", + <<"http_request">> => #{ + <<"bindings">> => #{}, + <<"body">> => #{ + <<"password">> => "******", + <<"username">> => "admin" + }, + <<"headers">> => #{ + <<"accept">> => "*/*", + <<"authorization">> => "******", + <<"connection">> => "keep-alive", + <<"content-length">> => "45", + <<"content-type">> => "application/json" + }, + <<"method">> => "post" + }, + <<"http_status_code">> => 200, + <<"node">> => "emqx@127.0.0.1", + <<"operation_id">> => "/login", + <<"operation_result">> => "success", + <<"operation_type">> => "login", + <<"source">> => "admin", + <<"source_ip">> => "127.0.0.1" + }. + +cli_example() -> + #{ + <<"args">> => [<<"show">>, <<"log">>], + <<"created_at">> => "2023-10-17T10:45:13.100426+08:00", + <<"duration_ms">> => 7, + <<"failure">> => "", + <<"from">> => "cli", + <<"http_method">> => "", + <<"http_request">> => "", + <<"http_status_code">> => "", + <<"node">> => "emqx@127.0.0.1", + <<"operation_id">> => "", + <<"operation_result">> => "", + <<"operation_type">> => "conf", + <<"source">> => "", + <<"source_ip">> => "" + }. diff --git a/apps/emqx_audit/src/emqx_audit_app.erl b/apps/emqx_audit/src/emqx_audit_app.erl new file mode 100644 index 000000000..aa8fa1a39 --- /dev/null +++ b/apps/emqx_audit/src/emqx_audit_app.erl @@ -0,0 +1,15 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_audit_app). + +-behaviour(application). + +-export([start/2, stop/1]). + +start(_StartType, _StartArgs) -> + emqx_audit_sup:start_link(). + +stop(_State) -> + ok. diff --git a/apps/emqx_audit/src/emqx_audit_sup.erl b/apps/emqx_audit/src/emqx_audit_sup.erl new file mode 100644 index 000000000..b3a5ca985 --- /dev/null +++ b/apps/emqx_audit/src/emqx_audit_sup.erl @@ -0,0 +1,33 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_audit_sup). + +-behaviour(supervisor). + +-export([start_link/0]). + +-export([init/1]). + +-define(SERVER, ?MODULE). + +start_link() -> + supervisor:start_link({local, ?SERVER}, ?MODULE, []). + +init([]) -> + SupFlags = #{ + strategy => one_for_all, + intensity => 10, + period => 10 + }, + ChildSpecs = [ + #{ + id => emqx_audit, + start => {emqx_audit, start_link, []}, + type => worker, + restart => transient, + shutdown => 1000 + } + ], + {ok, {SupFlags, ChildSpecs}}. diff --git a/apps/emqx_audit/test/emqx_audit_api_SUITE.erl b/apps/emqx_audit/test/emqx_audit_api_SUITE.erl new file mode 100644 index 000000000..50b39d240 --- /dev/null +++ b/apps/emqx_audit/test/emqx_audit_api_SUITE.erl @@ -0,0 +1,248 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-module(emqx_audit_api_SUITE). +-compile(export_all). +-compile(nowarn_export_all). + +-include_lib("eunit/include/eunit.hrl"). + +all() -> + [ + {group, audit, [sequence]} + ]. + +groups() -> + [ + {audit, [sequence], common_tests()} + ]. + +common_tests() -> + emqx_common_test_helpers:all(?MODULE). + +-define(CONF_DEFAULT, #{ + node => + #{ + name => "emqx1@127.0.0.1", + cookie => "emqxsecretcookie", + data_dir => "data" + }, + log => #{ + audit => + #{ + enable => true, + ignore_high_frequency_request => true, + level => info, + max_filter_size => 15, + rotation_count => 2, + rotation_size => "10MB", + time_offset => "system" + } + } +}). + +init_per_suite(Config) -> + _ = application:load(emqx_conf), + emqx_config:erase_all(), + emqx_mgmt_api_test_util:init_suite([emqx_ctl, emqx_conf, emqx_audit]), + ok = emqx_common_test_helpers:load_config(emqx_enterprise_schema, ?CONF_DEFAULT), + emqx_config:save_schema_mod_and_names(emqx_enterprise_schema), + ok = emqx_config_logger:refresh_config(), + application:set_env(emqx, boot_modules, []), + emqx_conf_cli:load(), + Config. + +end_per_suite(_) -> + emqx_mgmt_api_test_util:end_suite([emqx_audit, emqx_conf, emqx_ctl]). + +t_http_api(_) -> + process_flag(trap_exit, true), + AuditPath = emqx_mgmt_api_test_util:api_path(["audit"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + {ok, Zones} = emqx_mgmt_api_configs_SUITE:get_global_zone(), + NewZones = emqx_utils_maps:deep_put([<<"mqtt">>, <<"max_qos_allowed">>], Zones, 1), + {ok, #{<<"mqtt">> := Res}} = emqx_mgmt_api_configs_SUITE:update_global_zone(NewZones), + ?assertMatch(#{<<"max_qos_allowed">> := 1}, Res), + {ok, Res1} = emqx_mgmt_api_test_util:request_api(get, AuditPath, "limit=1", AuthHeader), + ?assertMatch( + #{ + <<"data">> := [ + #{ + <<"from">> := <<"rest_api">>, + <<"operation_id">> := <<"/configs/global_zone">>, + <<"source_ip">> := <<"127.0.0.1">>, + <<"source">> := _, + <<"http_request">> := #{ + <<"method">> := <<"put">>, + <<"body">> := #{<<"mqtt">> := #{<<"max_qos_allowed">> := 1}}, + <<"bindings">> := _, + <<"headers">> := #{<<"authorization">> := <<"******">>} + }, + <<"http_status_code">> := 200, + <<"operation_result">> := <<"success">>, + <<"operation_type">> := <<"configs">> + } + ] + }, + emqx_utils_json:decode(Res1, [return_maps]) + ), + ok. + +t_disabled(_) -> + Enable = [log, audit, enable], + ?assertEqual(true, emqx:get_config(Enable)), + AuditPath = emqx_mgmt_api_test_util:api_path(["audit"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + {ok, _} = emqx_mgmt_api_test_util:request_api(get, AuditPath, "limit=1", AuthHeader), + Size1 = mnesia:table_info(emqx_audit, size), + + {ok, Logs} = emqx_mgmt_api_configs_SUITE:get_config("log"), + Logs1 = emqx_utils_maps:deep_put([<<"audit">>, <<"max_filter_size">>], Logs, 100), + NewLogs = emqx_utils_maps:deep_put([<<"audit">>, <<"enable">>], Logs1, false), + {ok, _} = emqx_mgmt_api_configs_SUITE:update_config("log", NewLogs), + {ok, GetLog1} = emqx_mgmt_api_configs_SUITE:get_config("log"), + ?assertEqual(NewLogs, GetLog1), + ?assertMatch( + {error, _}, + emqx_mgmt_api_test_util:request_api(get, AuditPath, "limit=1", AuthHeader) + ), + + Size2 = mnesia:table_info(emqx_audit, size), + %% Record the audit disable action, so the size + 1 + ?assertEqual(Size1 + 1, Size2), + + {ok, Zones} = emqx_mgmt_api_configs_SUITE:get_global_zone(), + NewZones = emqx_utils_maps:deep_put([<<"mqtt">>, <<"max_topic_levels">>], Zones, 111), + {ok, #{<<"mqtt">> := Res}} = emqx_mgmt_api_configs_SUITE:update_global_zone(NewZones), + ?assertMatch(#{<<"max_topic_levels">> := 111}, Res), + Size3 = mnesia:table_info(emqx_audit, size), + %% Don't record mqtt update request. + ?assertEqual(Size2, Size3), + %% enabled again + {ok, _} = emqx_mgmt_api_configs_SUITE:update_config("log", Logs1), + {ok, GetLog2} = emqx_mgmt_api_configs_SUITE:get_config("log"), + ?assertEqual(Logs1, GetLog2), + Size4 = mnesia:table_info(emqx_audit, size), + ?assertEqual(Size3 + 1, Size4), + ok. + +t_cli(_Config) -> + ok = emqx_ctl:run_command(["conf", "show", "log"]), + AuditPath = emqx_mgmt_api_test_util:api_path(["audit"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + {ok, Res} = emqx_mgmt_api_test_util:request_api(get, AuditPath, "limit=1", AuthHeader), + #{<<"data">> := Data} = emqx_utils_json:decode(Res, [return_maps]), + ?assertMatch( + [ + #{ + <<"from">> := <<"cli">>, + <<"operation_id">> := <<"">>, + <<"source_ip">> := <<"">>, + <<"operation_type">> := <<"conf">>, + <<"args">> := [<<"show">>, <<"log">>], + <<"node">> := _, + <<"source">> := <<"">>, + <<"http_request">> := <<"">> + } + ], + Data + ), + + %% check filter + {ok, Res1} = emqx_mgmt_api_test_util:request_api(get, AuditPath, "from=cli", AuthHeader), + #{<<"data">> := Data1} = emqx_utils_json:decode(Res1, [return_maps]), + ?assertEqual(Data, Data1), + {ok, Res2} = emqx_mgmt_api_test_util:request_api( + get, AuditPath, "from=erlang_console", AuthHeader + ), + ?assertMatch(#{<<"data">> := []}, emqx_utils_json:decode(Res2, [return_maps])), + ok. + +t_max_size(_Config) -> + {ok, _} = emqx:update_config([log, audit, max_filter_size], 1000), + SizeFun = + fun() -> + AuditPath = emqx_mgmt_api_test_util:api_path(["audit"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Limit = "limit=1000", + {ok, Res} = emqx_mgmt_api_test_util:request_api(get, AuditPath, Limit, AuthHeader), + #{<<"data">> := Data} = emqx_utils_json:decode(Res, [return_maps]), + erlang:length(Data) + end, + InitSize = SizeFun(), + lists:foreach( + fun(_) -> + ok = emqx_ctl:run_command(["conf", "show", "log"]) + end, + lists:duplicate(100, 1) + ), + timer:sleep(110), + Size1 = SizeFun(), + ?assert(Size1 - InitSize >= 100, {Size1, InitSize}), + {ok, _} = emqx:update_config([log, audit, max_filter_size], 10), + %% wait for clean_expired + timer:sleep(250), + ExpectSize = emqx:get_config([log, audit, max_filter_size]), + Size2 = SizeFun(), + ?assertEqual(ExpectSize, Size2, {sys:get_state(emqx_audit)}), + ok. + +t_kickout_clients_without_log(_) -> + process_flag(trap_exit, true), + AuditPath = emqx_mgmt_api_test_util:api_path(["audit"]), + {ok, AuditLogs1} = emqx_mgmt_api_test_util:request_api(get, AuditPath), + kickout_clients(), + {ok, AuditLogs2} = emqx_mgmt_api_test_util:request_api(get, AuditPath), + ?assertEqual(AuditLogs1, AuditLogs2), + ok. + +kickout_clients() -> + ClientId1 = <<"client1">>, + ClientId2 = <<"client2">>, + ClientId3 = <<"client3">>, + + {ok, C1} = emqtt:start_link(#{ + clientid => ClientId1, + proto_ver => v5, + properties => #{'Session-Expiry-Interval' => 120} + }), + {ok, _} = emqtt:connect(C1), + {ok, C2} = emqtt:start_link(#{clientid => ClientId2}), + {ok, _} = emqtt:connect(C2), + {ok, C3} = emqtt:start_link(#{clientid => ClientId3}), + {ok, _} = emqtt:connect(C3), + + timer:sleep(300), + + %% get /clients + ClientsPath = emqx_mgmt_api_test_util:api_path(["clients"]), + {ok, Clients} = emqx_mgmt_api_test_util:request_api(get, ClientsPath), + ClientsResponse = emqx_utils_json:decode(Clients, [return_maps]), + ClientsMeta = maps:get(<<"meta">>, ClientsResponse), + ClientsPage = maps:get(<<"page">>, ClientsMeta), + ClientsLimit = maps:get(<<"limit">>, ClientsMeta), + ClientsCount = maps:get(<<"count">>, ClientsMeta), + ?assertEqual(ClientsPage, 1), + ?assertEqual(ClientsLimit, emqx_mgmt:default_row_limit()), + ?assertEqual(ClientsCount, 3), + + %% kickout clients + KickoutPath = emqx_mgmt_api_test_util:api_path(["clients", "kickout", "bulk"]), + KickoutBody = [ClientId1, ClientId2, ClientId3], + {ok, 204, _} = emqx_mgmt_api_test_util:request_api_with_body(post, KickoutPath, KickoutBody), + + {ok, Clients2} = emqx_mgmt_api_test_util:request_api(get, ClientsPath), + ClientsResponse2 = emqx_utils_json:decode(Clients2, [return_maps]), + ?assertMatch(#{<<"data">> := []}, ClientsResponse2). diff --git a/apps/emqx_auth_ldap/include/emqx_auth_ldap.hrl b/apps/emqx_auth_ldap/include/emqx_auth_ldap.hrl index 9cf6ac3c0..dcf0c07af 100644 --- a/apps/emqx_auth_ldap/include/emqx_auth_ldap.hrl +++ b/apps/emqx_auth_ldap/include/emqx_auth_ldap.hrl @@ -26,10 +26,6 @@ -define(AUTHN_BACKEND, ldap). -define(AUTHN_BACKEND_BIN, <<"ldap">>). --define(AUTHN_BACKEND_BIND, ldap_bind). --define(AUTHN_BACKEND_BIND_BIN, <<"ldap_bind">>). - -define(AUTHN_TYPE, {?AUTHN_MECHANISM, ?AUTHN_BACKEND}). --define(AUTHN_TYPE_BIND, {?AUTHN_MECHANISM, ?AUTHN_BACKEND_BIND}). -endif. diff --git a/apps/emqx_auth_ldap/src/emqx_auth_ldap.app.src b/apps/emqx_auth_ldap/src/emqx_auth_ldap.app.src index 3d4d5f467..d84d6ff81 100644 --- a/apps/emqx_auth_ldap/src/emqx_auth_ldap.app.src +++ b/apps/emqx_auth_ldap/src/emqx_auth_ldap.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_auth_ldap, [ {description, "EMQX LDAP Authentication and Authorization"}, - {vsn, "0.1.1"}, + {vsn, "0.1.2"}, {registered, []}, {mod, {emqx_auth_ldap_app, []}}, {applications, [ diff --git a/apps/emqx_auth_ldap/src/emqx_auth_ldap_app.erl b/apps/emqx_auth_ldap/src/emqx_auth_ldap_app.erl index 7d05faab9..5e7bd2bc6 100644 --- a/apps/emqx_auth_ldap/src/emqx_auth_ldap_app.erl +++ b/apps/emqx_auth_ldap/src/emqx_auth_ldap_app.erl @@ -25,12 +25,10 @@ start(_StartType, _StartArgs) -> ok = emqx_authz:register_source(?AUTHZ_TYPE, emqx_authz_ldap), ok = emqx_authn:register_provider(?AUTHN_TYPE, emqx_authn_ldap), - ok = emqx_authn:register_provider(?AUTHN_TYPE_BIND, emqx_authn_ldap_bind), {ok, Sup} = emqx_auth_ldap_sup:start_link(), {ok, Sup}. stop(_State) -> ok = emqx_authn:deregister_provider(?AUTHN_TYPE), - ok = emqx_authn:deregister_provider(?AUTHN_TYPE_BIND), ok = emqx_authz:unregister_source(?AUTHZ_TYPE), ok. diff --git a/apps/emqx_auth_ldap/src/emqx_authn_ldap.erl b/apps/emqx_auth_ldap/src/emqx_authn_ldap.erl index 975a7f828..acdd08f50 100644 --- a/apps/emqx_auth_ldap/src/emqx_authn_ldap.erl +++ b/apps/emqx_auth_ldap/src/emqx_authn_ldap.erl @@ -16,19 +16,10 @@ -module(emqx_authn_ldap). --include_lib("emqx_auth/include/emqx_authn.hrl"). -include_lib("emqx/include/logger.hrl"). --include_lib("eldap/include/eldap.hrl"). -behaviour(emqx_authn_provider). -%% a compatible attribute for version 4.x --define(ISENABLED_ATTR, "isEnabled"). --define(VALID_ALGORITHMS, [md5, ssha, sha, sha256, sha384, sha512]). -%% TODO -%% 1. Supports more salt algorithms, SMD5 SSHA 256/384/512 -%% 2. Supports https://datatracker.ietf.org/doc/html/rfc3112 - -export([ create/2, update/2, @@ -69,163 +60,25 @@ authenticate(#{auth_method := _}, _) -> ignore; authenticate(#{password := undefined}, _) -> {error, bad_username_or_password}; -authenticate( - #{password := Password} = Credential, - #{ - password_attribute := PasswordAttr, - is_superuser_attribute := IsSuperuserAttr, - query_timeout := Timeout, - resource_id := ResourceId - } = State -) -> - case - emqx_resource:simple_sync_query( - ResourceId, - {query, Credential, [PasswordAttr, IsSuperuserAttr, ?ISENABLED_ATTR], Timeout} - ) - of - {ok, []} -> - ignore; - {ok, [Entry]} -> - is_enabled(Password, Entry, State); - {error, Reason} -> - ?TRACE_AUTHN_PROVIDER(error, "ldap_query_failed", #{ - resource => ResourceId, - timeout => Timeout, - reason => Reason - }), - ignore +authenticate(Credential, #{method := #{type := Type}} = State) -> + case Type of + hash -> + emqx_authn_ldap_hash:authenticate(Credential, State); + bind -> + emqx_authn_ldap_bind:authenticate(Credential, State) end. +%% it used the deprecated config form +parse_config( + #{password_attribute := PasswordAttr, is_superuser_attribute := IsSuperuserAttr} = Config0 +) -> + Config = maps:without([password_attribute, is_superuser_attribute], Config0), + parse_config(Config#{ + method => #{ + type => hash, + password_attribute => PasswordAttr, + is_superuser_attribute => IsSuperuserAttr + } + }); parse_config(Config) -> - maps:with([query_timeout, password_attribute, is_superuser_attribute], Config). - -%% To compatible v4.x -is_enabled(Password, #eldap_entry{attributes = Attributes} = Entry, State) -> - IsEnabled = get_lower_bin_value(?ISENABLED_ATTR, Attributes, "true"), - case emqx_authn_utils:to_bool(IsEnabled) of - true -> - ensure_password(Password, Entry, State); - _ -> - {error, user_disabled} - end. - -ensure_password( - Password, - #eldap_entry{attributes = Attributes} = Entry, - #{password_attribute := PasswordAttr} = State -) -> - case get_value(PasswordAttr, Attributes) of - undefined -> - {error, no_password}; - [LDAPPassword | _] -> - extract_hash_algorithm(LDAPPassword, Password, fun try_decode_password/4, Entry, State) - end. - -%% RFC 2307 format password -%% https://datatracker.ietf.org/doc/html/rfc2307 -extract_hash_algorithm(LDAPPassword, Password, OnFail, Entry, State) -> - case - re:run( - LDAPPassword, - "{([^{}]+)}(.+)", - [{capture, all_but_first, list}, global] - ) - of - {match, [[HashTypeStr, PasswordHashStr]]} -> - case emqx_utils:safe_to_existing_atom(string:to_lower(HashTypeStr)) of - {ok, HashType} -> - PasswordHash = to_binary(PasswordHashStr), - is_valid_algorithm(HashType, PasswordHash, Password, Entry, State); - _Error -> - {error, invalid_hash_type} - end; - _ -> - OnFail(LDAPPassword, Password, Entry, State) - end. - -is_valid_algorithm(HashType, PasswordHash, Password, Entry, State) -> - case lists:member(HashType, ?VALID_ALGORITHMS) of - true -> - verify_password(HashType, PasswordHash, Password, Entry, State); - _ -> - {error, {invalid_hash_type, HashType}} - end. - -%% this password is in LDIF format which is base64 encoding -try_decode_password(LDAPPassword, Password, Entry, State) -> - case safe_base64_decode(LDAPPassword) of - {ok, Decode} -> - extract_hash_algorithm( - Decode, - Password, - fun(_, _, _, _) -> - {error, invalid_password} - end, - Entry, - State - ); - {error, Reason} -> - {error, {invalid_password, Reason}} - end. - -%% sha with salt -%% https://www.openldap.org/faq/data/cache/347.html -verify_password(ssha, PasswordData, Password, Entry, State) -> - case safe_base64_decode(PasswordData) of - {ok, <>} -> - verify_password(sha, hash, PasswordHash, Salt, suffix, Password, Entry, State); - {ok, _} -> - {error, invalid_ssha_password}; - {error, Reason} -> - {error, {invalid_password, Reason}} - end; -verify_password( - Algorithm, - Base64HashData, - Password, - Entry, - State -) -> - verify_password(Algorithm, base64, Base64HashData, <<>>, disable, Password, Entry, State). - -verify_password(Algorithm, LDAPPasswordType, LDAPPassword, Salt, Position, Password, Entry, State) -> - PasswordHash = hash_password(Algorithm, Salt, Position, Password), - case compare_password(LDAPPasswordType, LDAPPassword, PasswordHash) of - true -> - {ok, is_superuser(Entry, State)}; - _ -> - {error, bad_username_or_password} - end. - -is_superuser(Entry, #{is_superuser_attribute := Attr} = _State) -> - Value = get_lower_bin_value(Attr, Entry#eldap_entry.attributes, "false"), - #{is_superuser => emqx_authn_utils:to_bool(Value)}. - -safe_base64_decode(Data) -> - try - {ok, base64:decode(Data)} - catch - _:Reason -> - {error, {invalid_base64_data, Reason}} - end. - -get_lower_bin_value(Key, Proplists, Default) -> - [Value | _] = get_value(Key, Proplists, [Default]), - to_binary(string:to_lower(Value)). - -to_binary(Value) -> - erlang:list_to_binary(Value). - -hash_password(Algorithm, _Salt, disable, Password) -> - hash_password(Algorithm, Password); -hash_password(Algorithm, Salt, suffix, Password) -> - hash_password(Algorithm, <>). - -hash_password(Algorithm, Data) -> - crypto:hash(Algorithm, Data). - -compare_password(hash, LDAPPasswordHash, PasswordHash) -> - emqx_passwd:compare_secure(LDAPPasswordHash, PasswordHash); -compare_password(base64, Base64HashData, PasswordHash) -> - emqx_passwd:compare_secure(Base64HashData, base64:encode(PasswordHash)). + maps:with([query_timeout, method], Config). diff --git a/apps/emqx_auth_ldap/src/emqx_authn_ldap_bind.erl b/apps/emqx_auth_ldap/src/emqx_authn_ldap_bind.erl index 000d545b9..1f2af261e 100644 --- a/apps/emqx_auth_ldap/src/emqx_authn_ldap_bind.erl +++ b/apps/emqx_auth_ldap/src/emqx_authn_ldap_bind.erl @@ -20,32 +20,13 @@ -include_lib("emqx/include/logger.hrl"). -include_lib("eldap/include/eldap.hrl"). --behaviour(emqx_authn_provider). - -export([ - create/2, - update/2, - authenticate/2, - destroy/1 + authenticate/2 ]). %%------------------------------------------------------------------------------ %% APIs %%------------------------------------------------------------------------------ - -create(_AuthenticatorID, Config) -> - emqx_authn_ldap:do_create(?MODULE, Config). - -update(Config, State) -> - emqx_authn_ldap:update(Config, State). - -destroy(State) -> - emqx_authn_ldap:destroy(State). - -authenticate(#{auth_method := _}, _) -> - ignore; -authenticate(#{password := undefined}, _) -> - {error, bad_username_or_password}; authenticate( #{password := _Password} = Credential, #{ diff --git a/apps/emqx_auth_ldap/src/emqx_authn_ldap_bind_schema.erl b/apps/emqx_auth_ldap/src/emqx_authn_ldap_bind_schema.erl deleted file mode 100644 index e5e83daa1..000000000 --- a/apps/emqx_auth_ldap/src/emqx_authn_ldap_bind_schema.erl +++ /dev/null @@ -1,66 +0,0 @@ -%%-------------------------------------------------------------------- -%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%%-------------------------------------------------------------------- - --module(emqx_authn_ldap_bind_schema). - --behaviour(emqx_authn_schema). - --export([ - fields/1, - desc/1, - refs/0, - select_union_member/1, - namespace/0 -]). - --include("emqx_auth_ldap.hrl"). --include_lib("hocon/include/hoconsc.hrl"). - -namespace() -> "authn". - -refs() -> - [?R_REF(ldap_bind)]. - -select_union_member(#{ - <<"mechanism">> := ?AUTHN_MECHANISM_BIN, <<"backend">> := ?AUTHN_BACKEND_BIND_BIN -}) -> - refs(); -select_union_member(#{<<"backend">> := ?AUTHN_BACKEND_BIND_BIN}) -> - throw(#{ - reason => "unknown_mechanism", - expected => ?AUTHN_MECHANISM - }); -select_union_member(_) -> - undefined. - -fields(ldap_bind) -> - [ - {mechanism, emqx_authn_schema:mechanism(?AUTHN_MECHANISM)}, - {backend, emqx_authn_schema:backend(?AUTHN_BACKEND_BIND)}, - {query_timeout, fun query_timeout/1} - ] ++ - emqx_authn_schema:common_fields() ++ - emqx_ldap:fields(config) ++ emqx_ldap:fields(bind_opts). - -desc(ldap_bind) -> - ?DESC(ldap_bind); -desc(_) -> - undefined. - -query_timeout(type) -> emqx_schema:timeout_duration_ms(); -query_timeout(desc) -> ?DESC(?FUNCTION_NAME); -query_timeout(default) -> <<"5s">>; -query_timeout(_) -> undefined. diff --git a/apps/emqx_auth_ldap/src/emqx_authn_ldap_hash.erl b/apps/emqx_auth_ldap/src/emqx_authn_ldap_hash.erl new file mode 100644 index 000000000..e051e57e9 --- /dev/null +++ b/apps/emqx_auth_ldap/src/emqx_authn_ldap_hash.erl @@ -0,0 +1,197 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_authn_ldap_hash). + +-include_lib("emqx_auth/include/emqx_authn.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("eldap/include/eldap.hrl"). + +%% a compatible attribute for version 4.x +-define(ISENABLED_ATTR, "isEnabled"). +-define(VALID_ALGORITHMS, [md5, ssha, sha, sha256, sha384, sha512]). +%% TODO +%% 1. Supports more salt algorithms, SMD5 SSHA 256/384/512 +%% 2. Supports https://datatracker.ietf.org/doc/html/rfc3112 + +-export([ + authenticate/2 +]). + +-import(proplists, [get_value/2, get_value/3]). + +%%------------------------------------------------------------------------------ +%% APIs +%%------------------------------------------------------------------------------ +authenticate( + #{password := Password} = Credential, + #{ + method := #{ + password_attribute := PasswordAttr, + is_superuser_attribute := IsSuperuserAttr + }, + query_timeout := Timeout, + resource_id := ResourceId + } = State +) -> + case + emqx_resource:simple_sync_query( + ResourceId, + {query, Credential, [PasswordAttr, IsSuperuserAttr, ?ISENABLED_ATTR], Timeout} + ) + of + {ok, []} -> + ignore; + {ok, [Entry]} -> + is_enabled(Password, Entry, State); + {error, Reason} -> + ?TRACE_AUTHN_PROVIDER(error, "ldap_query_failed", #{ + resource => ResourceId, + timeout => Timeout, + reason => Reason + }), + ignore + end. + +%% To compatible v4.x +is_enabled(Password, #eldap_entry{attributes = Attributes} = Entry, State) -> + IsEnabled = get_lower_bin_value(?ISENABLED_ATTR, Attributes, "true"), + case emqx_authn_utils:to_bool(IsEnabled) of + true -> + ensure_password(Password, Entry, State); + _ -> + {error, user_disabled} + end. + +ensure_password( + Password, + #eldap_entry{attributes = Attributes} = Entry, + #{method := #{password_attribute := PasswordAttr}} = State +) -> + case get_value(PasswordAttr, Attributes) of + undefined -> + {error, no_password}; + [LDAPPassword | _] -> + extract_hash_algorithm(LDAPPassword, Password, fun try_decode_password/4, Entry, State) + end. + +%% RFC 2307 format password +%% https://datatracker.ietf.org/doc/html/rfc2307 +extract_hash_algorithm(LDAPPassword, Password, OnFail, Entry, State) -> + case + re:run( + LDAPPassword, + "{([^{}]+)}(.+)", + [{capture, all_but_first, list}, global] + ) + of + {match, [[HashTypeStr, PasswordHashStr]]} -> + case emqx_utils:safe_to_existing_atom(string:to_lower(HashTypeStr)) of + {ok, HashType} -> + PasswordHash = to_binary(PasswordHashStr), + is_valid_algorithm(HashType, PasswordHash, Password, Entry, State); + _Error -> + {error, invalid_hash_type} + end; + _ -> + OnFail(LDAPPassword, Password, Entry, State) + end. + +is_valid_algorithm(HashType, PasswordHash, Password, Entry, State) -> + case lists:member(HashType, ?VALID_ALGORITHMS) of + true -> + verify_password(HashType, PasswordHash, Password, Entry, State); + _ -> + {error, {invalid_hash_type, HashType}} + end. + +%% this password is in LDIF format which is base64 encoding +try_decode_password(LDAPPassword, Password, Entry, State) -> + case safe_base64_decode(LDAPPassword) of + {ok, Decode} -> + extract_hash_algorithm( + Decode, + Password, + fun(_, _, _, _) -> + {error, invalid_password} + end, + Entry, + State + ); + {error, Reason} -> + {error, {invalid_password, Reason}} + end. + +%% sha with salt +%% https://www.openldap.org/faq/data/cache/347.html +verify_password(ssha, PasswordData, Password, Entry, State) -> + case safe_base64_decode(PasswordData) of + {ok, <>} -> + verify_password(sha, hash, PasswordHash, Salt, suffix, Password, Entry, State); + {ok, _} -> + {error, invalid_ssha_password}; + {error, Reason} -> + {error, {invalid_password, Reason}} + end; +verify_password( + Algorithm, + Base64HashData, + Password, + Entry, + State +) -> + verify_password(Algorithm, base64, Base64HashData, <<>>, disable, Password, Entry, State). + +verify_password(Algorithm, LDAPPasswordType, LDAPPassword, Salt, Position, Password, Entry, State) -> + PasswordHash = hash_password(Algorithm, Salt, Position, Password), + case compare_password(LDAPPasswordType, LDAPPassword, PasswordHash) of + true -> + {ok, is_superuser(Entry, State)}; + _ -> + {error, bad_username_or_password} + end. + +is_superuser(Entry, #{method := #{is_superuser_attribute := Attr}} = _State) -> + Value = get_lower_bin_value(Attr, Entry#eldap_entry.attributes, "false"), + #{is_superuser => emqx_authn_utils:to_bool(Value)}. + +safe_base64_decode(Data) -> + try + {ok, base64:decode(Data)} + catch + _:Reason -> + {error, {invalid_base64_data, Reason}} + end. + +get_lower_bin_value(Key, Proplists, Default) -> + [Value | _] = get_value(Key, Proplists, [Default]), + to_binary(string:to_lower(Value)). + +to_binary(Value) -> + erlang:list_to_binary(Value). + +hash_password(Algorithm, _Salt, disable, Password) -> + hash_password(Algorithm, Password); +hash_password(Algorithm, Salt, suffix, Password) -> + hash_password(Algorithm, <>). + +hash_password(Algorithm, Data) -> + crypto:hash(Algorithm, Data). + +compare_password(hash, LDAPPasswordHash, PasswordHash) -> + emqx_passwd:compare_secure(LDAPPasswordHash, PasswordHash); +compare_password(base64, Base64HashData, PasswordHash) -> + emqx_passwd:compare_secure(Base64HashData, base64:encode(PasswordHash)). diff --git a/apps/emqx_auth_ldap/src/emqx_authn_ldap_schema.erl b/apps/emqx_auth_ldap/src/emqx_authn_ldap_schema.erl index fe9917fa1..3190d6e14 100644 --- a/apps/emqx_auth_ldap/src/emqx_authn_ldap_schema.erl +++ b/apps/emqx_auth_ldap/src/emqx_authn_ldap_schema.erl @@ -32,7 +32,7 @@ namespace() -> "authn". refs() -> - [?R_REF(ldap)]. + [?R_REF(ldap), ?R_REF(ldap_deprecated)]. select_union_member(#{<<"mechanism">> := ?AUTHN_MECHANISM_BIN, <<"backend">> := ?AUTHN_BACKEND_BIN}) -> refs(); @@ -44,12 +44,34 @@ select_union_member(#{<<"backend">> := ?AUTHN_BACKEND_BIN}) -> select_union_member(_) -> undefined. +fields(ldap_deprecated) -> + common_fields() ++ + [ + {password_attribute, password_attribute()}, + {is_superuser_attribute, is_superuser_attribute()} + ]; fields(ldap) -> + common_fields() ++ + [ + {method, + ?HOCON( + hoconsc:union([?R_REF(hash_method), ?R_REF(bind_method)]), + #{desc => ?DESC(method)} + )} + ]; +fields(hash_method) -> + [ + {type, method_type(hash)}, + {password_attribute, password_attribute()}, + {is_superuser_attribute, is_superuser_attribute()} + ]; +fields(bind_method) -> + [{type, method_type(bind)}] ++ emqx_ldap:fields(bind_opts). + +common_fields() -> [ {mechanism, emqx_authn_schema:mechanism(?AUTHN_MECHANISM)}, {backend, emqx_authn_schema:backend(?AUTHN_BACKEND)}, - {password_attribute, fun password_attribute/1}, - {is_superuser_attribute, fun is_superuser_attribute/1}, {query_timeout, fun query_timeout/1} ] ++ emqx_authn_schema:common_fields() ++ @@ -57,18 +79,35 @@ fields(ldap) -> desc(ldap) -> ?DESC(ldap); +desc(ldap_deprecated) -> + ?DESC(ldap_deprecated); +desc(hash_method) -> + ?DESC(hash_method); +desc(bind_method) -> + ?DESC(bind_method); desc(_) -> undefined. -password_attribute(type) -> string(); -password_attribute(desc) -> ?DESC(?FUNCTION_NAME); -password_attribute(default) -> <<"userPassword">>; -password_attribute(_) -> undefined. +method_type(Type) -> + ?HOCON(?ENUM([Type]), #{desc => ?DESC(?FUNCTION_NAME), default => Type}). -is_superuser_attribute(type) -> string(); -is_superuser_attribute(desc) -> ?DESC(?FUNCTION_NAME); -is_superuser_attribute(default) -> <<"isSuperuser">>; -is_superuser_attribute(_) -> undefined. +password_attribute() -> + ?HOCON( + string(), + #{ + desc => ?DESC(?FUNCTION_NAME), + default => <<"userPassword">> + } + ). + +is_superuser_attribute() -> + ?HOCON( + string(), + #{ + desc => ?DESC(?FUNCTION_NAME), + default => <<"isSuperuser">> + } + ). query_timeout(type) -> emqx_schema:timeout_duration_ms(); query_timeout(desc) -> ?DESC(?FUNCTION_NAME); diff --git a/apps/emqx_auth_ldap/test/emqx_authn_ldap_SUITE.erl b/apps/emqx_auth_ldap/test/emqx_authn_ldap_SUITE.erl index 63bceee85..2aa1c5c96 100644 --- a/apps/emqx_auth_ldap/test/emqx_authn_ldap_SUITE.erl +++ b/apps/emqx_auth_ldap/test/emqx_authn_ldap_SUITE.erl @@ -70,6 +70,29 @@ end_per_suite(Config) -> %% Tests %%------------------------------------------------------------------------------ +t_create_with_deprecated_cfg(_Config) -> + AuthConfig = deprecated_raw_ldap_auth_config(), + + {ok, _} = emqx:update_config( + ?PATH, + {create_authenticator, ?GLOBAL, AuthConfig} + ), + + {ok, [#{provider := emqx_authn_ldap, state := State}]} = emqx_authn_chains:list_authenticators( + ?GLOBAL + ), + ?assertMatch( + #{ + method := #{ + type := hash, + is_superuser_attribute := _, + password_attribute := "not_the_default_value" + } + }, + State + ), + emqx_authn_test_lib:delete_config(?ResourceID). + t_create(_Config) -> AuthConfig = raw_ldap_auth_config(), @@ -225,6 +248,19 @@ raw_ldap_auth_config() -> <<"pool_size">> => 8 }. +deprecated_raw_ldap_auth_config() -> + #{ + <<"mechanism">> => <<"password_based">>, + <<"backend">> => <<"ldap">>, + <<"server">> => ldap_server(), + <<"is_superuser_attribute">> => <<"isSuperuser">>, + <<"password_attribute">> => <<"not_the_default_value">>, + <<"base_dn">> => <<"uid=${username},ou=testdevice,dc=emqx,dc=io">>, + <<"username">> => <<"cn=root,dc=emqx,dc=io">>, + <<"password">> => <<"public">>, + <<"pool_size">> => 8 + }. + user_seeds() -> New = fun(Username, Password, Result) -> #{ diff --git a/apps/emqx_auth_ldap/test/emqx_authn_ldap_bind_SUITE.erl b/apps/emqx_auth_ldap/test/emqx_authn_ldap_bind_SUITE.erl index 1f390264b..d2b3c371c 100644 --- a/apps/emqx_auth_ldap/test/emqx_authn_ldap_bind_SUITE.erl +++ b/apps/emqx_auth_ldap/test/emqx_authn_ldap_bind_SUITE.erl @@ -27,7 +27,7 @@ -define(LDAP_RESOURCE, <<"emqx_authn_ldap_bind_SUITE">>). -define(PATH, [authentication]). --define(ResourceID, <<"password_based:ldap_bind">>). +-define(ResourceID, <<"password_based:ldap">>). all() -> emqx_common_test_helpers:all(?MODULE). @@ -78,7 +78,7 @@ t_create(_Config) -> {create_authenticator, ?GLOBAL, AuthConfig} ), - {ok, [#{provider := emqx_authn_ldap_bind}]} = emqx_authn_chains:list_authenticators(?GLOBAL), + {ok, [#{provider := emqx_authn_ldap}]} = emqx_authn_chains:list_authenticators(?GLOBAL), emqx_authn_test_lib:delete_config(?ResourceID). t_create_invalid(_Config) -> @@ -146,10 +146,10 @@ t_destroy(_Config) -> {create_authenticator, ?GLOBAL, AuthConfig} ), - {ok, [#{provider := emqx_authn_ldap_bind, state := State}]} = + {ok, [#{provider := emqx_authn_ldap, state := State}]} = emqx_authn_chains:list_authenticators(?GLOBAL), - {ok, _} = emqx_authn_ldap_bind:authenticate( + {ok, _} = emqx_authn_ldap:authenticate( #{ username => <<"mqttuser0001">>, password => <<"mqttuser0001">> @@ -165,7 +165,7 @@ t_destroy(_Config) -> % Authenticator should not be usable anymore ?assertMatch( ignore, - emqx_authn_ldap_bind:authenticate( + emqx_authn_ldap:authenticate( #{ username => <<"mqttuser0001">>, password => <<"mqttuser0001">> @@ -199,7 +199,7 @@ t_update(_Config) -> % We update with config with correct query, provider should update and work properly {ok, _} = emqx:update_config( ?PATH, - {update_authenticator, ?GLOBAL, <<"password_based:ldap_bind">>, CorrectConfig} + {update_authenticator, ?GLOBAL, <<"password_based:ldap">>, CorrectConfig} ), {ok, _} = emqx_access_control:authenticate( @@ -218,14 +218,17 @@ t_update(_Config) -> raw_ldap_auth_config() -> #{ <<"mechanism">> => <<"password_based">>, - <<"backend">> => <<"ldap_bind">>, + <<"backend">> => <<"ldap">>, <<"server">> => ldap_server(), <<"base_dn">> => <<"ou=testdevice,dc=emqx,dc=io">>, <<"filter">> => <<"(uid=${username})">>, <<"username">> => <<"cn=root,dc=emqx,dc=io">>, <<"password">> => <<"public">>, <<"pool_size">> => 8, - <<"bind_password">> => <<"${password}">> + <<"method">> => #{ + <<"type">> => <<"bind">>, + <<"bind_password">> => <<"${password}">> + } }. user_seeds() -> diff --git a/apps/emqx_auth_mongodb/test/emqx_authn_mongodb_SUITE.erl b/apps/emqx_auth_mongodb/test/emqx_authn_mongodb_SUITE.erl index c6623c11f..9ccad551d 100644 --- a/apps/emqx_auth_mongodb/test/emqx_authn_mongodb_SUITE.erl +++ b/apps/emqx_auth_mongodb/test/emqx_authn_mongodb_SUITE.erl @@ -278,6 +278,10 @@ raw_mongo_auth_config() -> <<"server">> => mongo_server(), <<"w_mode">> => <<"unsafe">>, + <<"auth_source">> => mongo_authsource(), + <<"username">> => mongo_username(), + <<"password">> => mongo_password(), + <<"filter">> => #{<<"username">> => <<"${username}">>}, <<"password_hash_field">> => <<"password_hash">>, <<"salt_field">> => <<"salt">>, @@ -464,9 +468,21 @@ mongo_config() -> {database, <<"mqtt">>}, {host, ?MONGO_HOST}, {port, ?MONGO_DEFAULT_PORT}, + {auth_source, mongo_authsource()}, + {login, mongo_username()}, + {password, mongo_password()}, {register, ?MONGO_CLIENT} ]. +mongo_authsource() -> + iolist_to_binary(os:getenv("MONGO_AUTHSOURCE", "admin")). + +mongo_username() -> + iolist_to_binary(os:getenv("MONGO_USERNAME", "")). + +mongo_password() -> + iolist_to_binary(os:getenv("MONGO_PASSWORD", "")). + start_apps(Apps) -> lists:foreach(fun application:ensure_all_started/1, Apps). diff --git a/apps/emqx_auth_mongodb/test/emqx_authz_mongodb_SUITE.erl b/apps/emqx_auth_mongodb/test/emqx_authz_mongodb_SUITE.erl index c57dce860..b19d7fba2 100644 --- a/apps/emqx_auth_mongodb/test/emqx_authz_mongodb_SUITE.erl +++ b/apps/emqx_auth_mongodb/test/emqx_authz_mongodb_SUITE.erl @@ -397,6 +397,10 @@ raw_mongo_authz_config() -> <<"collection">> => <<"acl">>, <<"server">> => mongo_server(), + <<"auth_source">> => mongo_authsource(), + <<"username">> => mongo_username(), + <<"password">> => mongo_password(), + <<"filter">> => #{<<"username">> => <<"${username}">>} }. @@ -408,9 +412,21 @@ mongo_config() -> {database, <<"mqtt">>}, {host, ?MONGO_HOST}, {port, ?MONGO_DEFAULT_PORT}, + {auth_source, mongo_authsource()}, + {login, mongo_username()}, + {password, mongo_password()}, {register, ?MONGO_CLIENT} ]. +mongo_authsource() -> + iolist_to_binary(os:getenv("MONGO_AUTHSOURCE", "admin")). + +mongo_username() -> + iolist_to_binary(os:getenv("MONGO_USERNAME", "")). + +mongo_password() -> + iolist_to_binary(os:getenv("MONGO_PASSWORD", "")). + start_apps(Apps) -> lists:foreach(fun application:ensure_all_started/1, Apps). diff --git a/apps/emqx_auth_redis/src/emqx_auth_redis.app.src b/apps/emqx_auth_redis/src/emqx_auth_redis.app.src index bd33606d3..b5669e706 100644 --- a/apps/emqx_auth_redis/src/emqx_auth_redis.app.src +++ b/apps/emqx_auth_redis/src/emqx_auth_redis.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_auth_redis, [ {description, "EMQX Redis Authentication and Authorization"}, - {vsn, "0.1.1"}, + {vsn, "0.1.2"}, {registered, []}, {mod, {emqx_auth_redis_app, []}}, {applications, [ diff --git a/apps/emqx_auth_redis/src/emqx_authn_redis_schema.erl b/apps/emqx_auth_redis/src/emqx_authn_redis_schema.erl index f3e124ca1..b72905f6b 100644 --- a/apps/emqx_auth_redis/src/emqx_authn_redis_schema.erl +++ b/apps/emqx_auth_redis/src/emqx_authn_redis_schema.erl @@ -64,12 +64,8 @@ refs(_) -> expected => "single | cluster | sentinel" }). -fields(redis_single) -> - common_fields() ++ emqx_redis:fields(single); -fields(redis_cluster) -> - common_fields() ++ emqx_redis:fields(cluster); -fields(redis_sentinel) -> - common_fields() ++ emqx_redis:fields(sentinel). +fields(Type) -> + common_fields() ++ emqx_redis:fields(Type). desc(redis_single) -> ?DESC(single); diff --git a/apps/emqx_auth_redis/src/emqx_authz_redis_schema.erl b/apps/emqx_auth_redis/src/emqx_authz_redis_schema.erl index 5cd084795..96949b0ea 100644 --- a/apps/emqx_auth_redis/src/emqx_authz_redis_schema.erl +++ b/apps/emqx_auth_redis/src/emqx_authz_redis_schema.erl @@ -34,17 +34,9 @@ namespace() -> "authz". type() -> ?AUTHZ_TYPE. -fields(redis_single) -> +fields(Type) -> emqx_authz_schema:authz_common_fields(?AUTHZ_TYPE) ++ - emqx_redis:fields(single) ++ - [{cmd, cmd()}]; -fields(redis_sentinel) -> - emqx_authz_schema:authz_common_fields(?AUTHZ_TYPE) ++ - emqx_redis:fields(sentinel) ++ - [{cmd, cmd()}]; -fields(redis_cluster) -> - emqx_authz_schema:authz_common_fields(?AUTHZ_TYPE) ++ - emqx_redis:fields(cluster) ++ + emqx_redis:fields(Type) ++ [{cmd, cmd()}]. desc(redis_single) -> diff --git a/apps/emqx_bridge/src/emqx_action_info.erl b/apps/emqx_bridge/src/emqx_action_info.erl index 3b5589921..f975a1c93 100644 --- a/apps/emqx_bridge/src/emqx_action_info.erl +++ b/apps/emqx_bridge/src/emqx_action_info.erl @@ -22,7 +22,7 @@ -export([ action_type_to_connector_type/1, - action_type_to_bridge_v1_type/1, + action_type_to_bridge_v1_type/2, bridge_v1_type_to_action_type/1, is_action_type/1, registered_schema_modules/0, @@ -35,7 +35,12 @@ transform_bridge_v1_config_to_action_config/4 ]). --callback bridge_v1_type_name() -> atom(). +-callback bridge_v1_type_name() -> + atom() + | { + fun(({ActionConfig :: map(), ConnectorConfig :: map()}) -> Type :: atom()), + TypeList :: [atom()] + }. -callback action_type_name() -> atom(). -callback connector_type_name() -> atom(). -callback schema_module() -> atom(). @@ -68,8 +73,16 @@ -if(?EMQX_RELEASE_EDITION == ee). hard_coded_action_info_modules_ee() -> [ + emqx_bridge_azure_event_hub_action_info, + emqx_bridge_confluent_producer_action_info, + emqx_bridge_gcp_pubsub_producer_action_info, emqx_bridge_kafka_action_info, - emqx_bridge_azure_event_hub_action_info + emqx_bridge_matrix_action_info, + emqx_bridge_mongodb_action_info, + emqx_bridge_pgsql_action_info, + emqx_bridge_syskeeper_action_info, + emqx_bridge_timescale_action_info, + emqx_bridge_redis_action_info ]. -else. hard_coded_action_info_modules_ee() -> @@ -106,16 +119,30 @@ bridge_v1_type_to_action_type(Type) -> ActionType -> ActionType end. -action_type_to_bridge_v1_type(Bin) when is_binary(Bin) -> - action_type_to_bridge_v1_type(binary_to_existing_atom(Bin)); -action_type_to_bridge_v1_type(Type) -> +action_type_to_bridge_v1_type(Bin, Conf) when is_binary(Bin) -> + action_type_to_bridge_v1_type(binary_to_existing_atom(Bin), Conf); +action_type_to_bridge_v1_type(ActionType, ActionConf) -> ActionInfoMap = info_map(), ActionTypeToBridgeV1Type = maps:get(action_type_to_bridge_v1_type, ActionInfoMap), - case maps:get(Type, ActionTypeToBridgeV1Type, undefined) of - undefined -> Type; - BridgeV1Type -> BridgeV1Type + case maps:get(ActionType, ActionTypeToBridgeV1Type, undefined) of + undefined -> + ActionType; + BridgeV1TypeFun when is_function(BridgeV1TypeFun) -> + case get_confs(ActionType, ActionConf) of + {ConnectorConfig, ActionConfig} -> BridgeV1TypeFun({ConnectorConfig, ActionConfig}); + undefined -> ActionType + end; + BridgeV1Type -> + BridgeV1Type end. +get_confs(ActionType, #{<<"connector">> := ConnectorName} = ActionConfig) -> + ConnectorType = action_type_to_connector_type(ActionType), + ConnectorConfig = emqx_conf:get_raw([connectors, ConnectorType, ConnectorName]), + {ConnectorConfig, ActionConfig}; +get_confs(_, _) -> + undefined. + %% This function should return true for all inputs that are bridge V1 types for %% bridges that have been refactored to bridge V2s, and for all all bridge V2 %% types. For everything else the function should return false. @@ -232,37 +259,56 @@ get_info_map(Module) -> %% Force the module to get loaded _ = code:ensure_loaded(Module), ActionType = Module:action_type_name(), - BridgeV1Type = + {BridgeV1TypeOrFun, BridgeV1Types} = case erlang:function_exported(Module, bridge_v1_type_name, 0) of true -> - Module:bridge_v1_type_name(); + case Module:bridge_v1_type_name() of + {_BridgeV1TypeFun, _BridgeV1Types} = BridgeV1TypeTuple -> + BridgeV1TypeTuple; + BridgeV1Type0 -> + {BridgeV1Type0, [BridgeV1Type0]} + end; false -> - Module:action_type_name() + {ActionType, [ActionType]} end, #{ - action_type_names => #{ - ActionType => true, - BridgeV1Type => true - }, - bridge_v1_type_to_action_type => #{ - BridgeV1Type => ActionType, - %% Alias the bridge V1 type to the action type - ActionType => ActionType - }, + action_type_names => + lists:foldl( + fun(BridgeV1Type, M) -> + M#{BridgeV1Type => true} + end, + #{ActionType => true}, + BridgeV1Types + ), + bridge_v1_type_to_action_type => + lists:foldl( + fun(BridgeV1Type, M) -> + %% Alias the bridge V1 type to the action type + M#{BridgeV1Type => ActionType} + end, + #{ActionType => ActionType}, + BridgeV1Types + ), action_type_to_bridge_v1_type => #{ - ActionType => BridgeV1Type - }, - action_type_to_connector_type => #{ - ActionType => Module:connector_type_name(), - %% Alias the bridge V1 type to the action type - BridgeV1Type => Module:connector_type_name() + ActionType => BridgeV1TypeOrFun }, + action_type_to_connector_type => + lists:foldl( + fun(BridgeV1Type, M) -> + M#{BridgeV1Type => Module:connector_type_name()} + end, + #{ActionType => Module:connector_type_name()}, + BridgeV1Types + ), action_type_to_schema_module => #{ ActionType => Module:schema_module() }, - action_type_to_info_module => #{ - ActionType => Module, - %% Alias the bridge V1 type to the action type - BridgeV1Type => Module - } + action_type_to_info_module => + lists:foldl( + fun(BridgeV1Type, M) -> + M#{BridgeV1Type => Module} + end, + #{ActionType => Module}, + BridgeV1Types + ) }. diff --git a/apps/emqx_bridge/src/emqx_bridge.app.src b/apps/emqx_bridge/src/emqx_bridge.app.src index f829b12df..2aa610f24 100644 --- a/apps/emqx_bridge/src/emqx_bridge.app.src +++ b/apps/emqx_bridge/src/emqx_bridge.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_bridge, [ {description, "EMQX bridges"}, - {vsn, "0.1.30"}, + {vsn, "0.1.31"}, {registered, [emqx_bridge_sup]}, {mod, {emqx_bridge_app, []}}, {applications, [ diff --git a/apps/emqx_bridge/src/emqx_bridge.erl b/apps/emqx_bridge/src/emqx_bridge.erl index 4156a37d1..d26a44a1d 100644 --- a/apps/emqx_bridge/src/emqx_bridge.erl +++ b/apps/emqx_bridge/src/emqx_bridge.erl @@ -92,7 +92,8 @@ T == iotdb; T == kinesis_producer; T == greptimedb; - T == azure_event_hub_producer + T == azure_event_hub_producer; + T == syskeeper_forwarder ). -define(ROOT_KEY, bridges). @@ -236,9 +237,15 @@ send_to_matched_egress_bridges_loop(Topic, Msg, [Id | Ids]) -> send_to_matched_egress_bridges_loop(Topic, Msg, Ids). send_message(BridgeId, Message) -> - {BridgeType, BridgeName} = emqx_bridge_resource:parse_bridge_id(BridgeId), - ResId = emqx_bridge_resource:resource_id(BridgeType, BridgeName), - send_message(BridgeType, BridgeName, ResId, Message, #{}). + {BridgeV1Type, BridgeName} = emqx_bridge_resource:parse_bridge_id(BridgeId), + case emqx_bridge_v2:is_bridge_v2_type(BridgeV1Type) of + true -> + ActionType = emqx_action_info:bridge_v1_type_to_action_type(BridgeV1Type), + emqx_bridge_v2:send_message(ActionType, BridgeName, Message, #{}); + false -> + ResId = emqx_bridge_resource:resource_id(BridgeV1Type, BridgeName), + send_message(BridgeV1Type, BridgeName, ResId, Message, #{}) + end. send_message(BridgeType, BridgeName, ResId, Message, QueryOpts0) -> case emqx:get_config([?ROOT_KEY, BridgeType, BridgeName], not_found) of @@ -376,8 +383,8 @@ disable_enable(Action, BridgeType0, BridgeName) when ) end. -create(BridgeType0, BridgeName, RawConf) -> - BridgeType = upgrade_type(BridgeType0), +create(BridgeV1Type, BridgeName, RawConf) -> + BridgeType = upgrade_type(BridgeV1Type), ?SLOG(debug, #{ bridge_action => create, bridge_type => BridgeType, @@ -386,7 +393,7 @@ create(BridgeType0, BridgeName, RawConf) -> }), case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of true -> - emqx_bridge_v2:bridge_v1_split_config_and_create(BridgeType, BridgeName, RawConf); + emqx_bridge_v2:bridge_v1_split_config_and_create(BridgeV1Type, BridgeName, RawConf); false -> emqx_conf:update( emqx_bridge:config_key_path() ++ [BridgeType, BridgeName], @@ -407,7 +414,7 @@ remove(BridgeType0, BridgeName) -> }), case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of true -> - emqx_bridge_v2:remove(BridgeType, BridgeName); + emqx_bridge_v2:bridge_v1_remove(BridgeType0, BridgeName); false -> remove_v1(BridgeType, BridgeName) end. diff --git a/apps/emqx_bridge/src/emqx_bridge_api.erl b/apps/emqx_bridge/src/emqx_bridge_api.erl index c9c761105..b725eb740 100644 --- a/apps/emqx_bridge/src/emqx_bridge_api.erl +++ b/apps/emqx_bridge/src/emqx_bridge_api.erl @@ -908,7 +908,7 @@ format_resource( redact( maps:merge( RawConfFull#{ - type => downgrade_type(Type), + type => downgrade_type(Type, RawConf), name => maps:get(<<"name">>, RawConf, BridgeName), node => Node }, @@ -1163,5 +1163,5 @@ non_compat_bridge_msg() -> upgrade_type(Type) -> emqx_bridge_lib:upgrade_type(Type). -downgrade_type(Type) -> - emqx_bridge_lib:downgrade_type(Type). +downgrade_type(Type, Conf) -> + emqx_bridge_lib:downgrade_type(Type, Conf). diff --git a/apps/emqx_bridge/src/emqx_bridge_lib.erl b/apps/emqx_bridge/src/emqx_bridge_lib.erl index 4be605745..ed8e918fa 100644 --- a/apps/emqx_bridge/src/emqx_bridge_lib.erl +++ b/apps/emqx_bridge/src/emqx_bridge_lib.erl @@ -18,7 +18,7 @@ -export([ maybe_withdraw_rule_action/3, upgrade_type/1, - downgrade_type/1 + downgrade_type/2 ]). %% @doc A bridge can be used as a rule action. @@ -61,23 +61,31 @@ upgrade_type(Type) when is_list(Type) -> atom_to_list(emqx_bridge_v2:bridge_v1_type_to_bridge_v2_type(list_to_binary(Type))). %% @doc Kafka producer bridge type renamed from 'kafka' to 'kafka_bridge' since 5.3.1 -downgrade_type(Type) when is_atom(Type) -> - emqx_bridge_v2:bridge_v2_type_to_bridge_v1_type(Type); -downgrade_type(Type) when is_binary(Type) -> - atom_to_binary(emqx_bridge_v2:bridge_v2_type_to_bridge_v1_type(Type)); -downgrade_type(Type) when is_list(Type) -> - atom_to_list(emqx_bridge_v2:bridge_v2_type_to_bridge_v1_type(list_to_binary(Type))). +downgrade_type(Type, Conf) when is_atom(Type) -> + emqx_bridge_v2:bridge_v2_type_to_bridge_v1_type(Type, Conf); +downgrade_type(Type, Conf) when is_binary(Type) -> + atom_to_binary(emqx_bridge_v2:bridge_v2_type_to_bridge_v1_type(Type, Conf)); +downgrade_type(Type, Conf) when is_list(Type) -> + atom_to_list(emqx_bridge_v2:bridge_v2_type_to_bridge_v1_type(list_to_binary(Type), Conf)). %% A rule might be referencing an old version bridge type name %% i.e. 'kafka' instead of 'kafka_producer' so we need to try both external_ids(Type, Name) -> - case downgrade_type(Type) of + case downgrade_type(Type, get_conf(Type, Name)) of Type -> [external_id(Type, Name)]; Type0 -> [external_id(Type0, Name), external_id(Type, Name)] end. +get_conf(BridgeType, BridgeName) -> + case emqx_bridge_v2:is_bridge_v2_type(BridgeType) of + true -> + emqx_conf:get_raw([actions, BridgeType, BridgeName]); + false -> + undefined + end. + %% Creates the external id for the bridge_v2 that is used by the rule actions %% to refer to the bridge_v2 external_id(BridgeType, BridgeName) -> diff --git a/apps/emqx_bridge/src/emqx_bridge_resource.erl b/apps/emqx_bridge/src/emqx_bridge_resource.erl index 7f58a880c..0a870abb8 100644 --- a/apps/emqx_bridge/src/emqx_bridge_resource.erl +++ b/apps/emqx_bridge/src/emqx_bridge_resource.erl @@ -362,9 +362,10 @@ parse_confs(<<"iotdb">>, Name, Conf) -> authentication := #{ username := Username, - password := Password + password := Secret } } = Conf, + Password = emqx_secret:unwrap(Secret), BasicToken = base64:encode(<>), %% This version atom correspond to the macro ?VSN_1_1_X in %% emqx_bridge_iotdb.hrl. It would be better to use the macro directly, but diff --git a/apps/emqx_bridge/src/emqx_bridge_v2.erl b/apps/emqx_bridge/src/emqx_bridge_v2.erl index 0c0c0752d..97d0afb43 100644 --- a/apps/emqx_bridge/src/emqx_bridge_v2.erl +++ b/apps/emqx_bridge/src/emqx_bridge_v2.erl @@ -55,6 +55,7 @@ disable_enable/3, health_check/2, send_message/4, + query/4, start/2, reset_metrics/2, create_dry_run/2, @@ -111,12 +112,14 @@ bridge_v1_create_dry_run/2, bridge_v1_type_to_bridge_v2_type/1, %% Exception from the naming convention: - bridge_v2_type_to_bridge_v1_type/1, + bridge_v2_type_to_bridge_v1_type/2, bridge_v1_id_to_connector_resource_id/1, bridge_v1_enable_disable/3, bridge_v1_restart/2, bridge_v1_stop/2, - bridge_v1_start/2 + bridge_v1_start/2, + %% For test cases only + bridge_v1_remove/2 ]). %%==================================================================== @@ -410,10 +413,10 @@ uninstall_bridge_v2( CreationOpts = emqx_resource:fetch_creation_opts(Config), ok = emqx_resource_buffer_worker_sup:stop_workers(BridgeV2Id, CreationOpts), ok = emqx_resource:clear_metrics(BridgeV2Id), - case combine_connector_and_bridge_v2_config(BridgeV2Type, BridgeName, Config) of + case validate_referenced_connectors(BridgeV2Type, ConnectorName, BridgeName) of {error, _} -> ok; - _CombinedConfig -> + ok -> %% Deinstall from connector ConnectorId = emqx_connector_resource:resource_id( connector_type(BridgeV2Type), ConnectorName @@ -547,25 +550,25 @@ get_query_mode(BridgeV2Type, Config) -> ResourceType = emqx_connector_resource:connector_to_resource_type(ConnectorType), emqx_resource:query_mode(ResourceType, Config, CreationOpts). --spec send_message(bridge_v2_type(), bridge_v2_name(), Message :: term(), QueryOpts :: map()) -> +-spec query(bridge_v2_type(), bridge_v2_name(), Message :: term(), QueryOpts :: map()) -> term() | {error, term()}. -send_message(BridgeType, BridgeName, Message, QueryOpts0) -> +query(BridgeType, BridgeName, Message, QueryOpts0) -> case lookup_conf(BridgeType, BridgeName) of #{enable := true} = Config0 -> Config = combine_connector_and_bridge_v2_config(BridgeType, BridgeName, Config0), - do_send_msg_with_enabled_config(BridgeType, BridgeName, Message, QueryOpts0, Config); + do_query_with_enabled_config(BridgeType, BridgeName, Message, QueryOpts0, Config); #{enable := false} -> {error, bridge_stopped}; _Error -> {error, bridge_not_found} end. -do_send_msg_with_enabled_config( +do_query_with_enabled_config( _BridgeType, _BridgeName, _Message, _QueryOpts0, {error, Reason} = Error ) -> ?SLOG(error, Reason), Error; -do_send_msg_with_enabled_config( +do_query_with_enabled_config( BridgeType, BridgeName, Message, QueryOpts0, Config ) -> QueryMode = get_query_mode(BridgeType, Config), @@ -579,7 +582,17 @@ do_send_msg_with_enabled_config( } ), BridgeV2Id = id(BridgeType, BridgeName), - emqx_resource:query(BridgeV2Id, {BridgeV2Id, Message}, QueryOpts). + case Message of + {send_message, Msg} -> + emqx_resource:query(BridgeV2Id, {BridgeV2Id, Msg}, QueryOpts); + Msg -> + emqx_resource:query(BridgeV2Id, Msg, QueryOpts) + end. + +-spec send_message(bridge_v2_type(), bridge_v2_name(), Message :: term(), QueryOpts :: map()) -> + term() | {error, term()}. +send_message(BridgeType, BridgeName, Message, QueryOpts0) -> + query(BridgeType, BridgeName, {send_message, Message}, QueryOpts0). -spec health_check(BridgeType :: term(), BridgeName :: term()) -> #{status := emqx_resource:resource_status(), error := term()} | {error, Reason :: term()}. @@ -785,17 +798,24 @@ parse_id(Id) -> end. get_channels_for_connector(ConnectorId) -> - {ConnectorType, ConnectorName} = emqx_connector_resource:parse_connector_id(ConnectorId), - RootConf = maps:keys(emqx:get_config([?ROOT_KEY], #{})), - RelevantBridgeV2Types = [ - Type - || Type <- RootConf, - connector_type(Type) =:= ConnectorType - ], - lists:flatten([ - get_channels_for_connector(ConnectorName, BridgeV2Type) - || BridgeV2Type <- RelevantBridgeV2Types - ]). + try emqx_connector_resource:parse_connector_id(ConnectorId) of + {ConnectorType, ConnectorName} -> + RootConf = maps:keys(emqx:get_config([?ROOT_KEY], #{})), + RelevantBridgeV2Types = [ + Type + || Type <- RootConf, + connector_type(Type) =:= ConnectorType + ], + lists:flatten([ + get_channels_for_connector(ConnectorName, BridgeV2Type) + || BridgeV2Type <- RelevantBridgeV2Types + ]) + catch + _:_ -> + %% ConnectorId is not a valid connector id so we assume the connector + %% has no channels (e.g. it is a a connector for authn or authz) + [] + end. get_channels_for_connector(ConnectorName, BridgeV2Type) -> BridgeV2s = emqx:get_config([?ROOT_KEY, BridgeV2Type], #{}), @@ -1053,8 +1073,8 @@ bridge_v1_is_valid(BridgeV1Type, BridgeName) -> bridge_v1_type_to_bridge_v2_type(Type) -> emqx_action_info:bridge_v1_type_to_action_type(Type). -bridge_v2_type_to_bridge_v1_type(Type) -> - emqx_action_info:action_type_to_bridge_v1_type(Type). +bridge_v2_type_to_bridge_v1_type(ActionType, ActionConf) -> + emqx_action_info:action_type_to_bridge_v1_type(ActionType, ActionConf). is_bridge_v2_type(Type) -> emqx_action_info:is_action_type(Type). @@ -1064,25 +1084,30 @@ bridge_v1_list_and_transform() -> [B || B <- Bridges, B =/= not_bridge_v1_compatible_error()]. bridge_v1_lookup_and_transform(ActionType, Name) -> - BridgeV1Type = ?MODULE:bridge_v2_type_to_bridge_v1_type(ActionType), - case ?MODULE:bridge_v1_is_valid(BridgeV1Type, Name) of - true -> - case lookup(ActionType, Name) of - {ok, #{raw_config := #{<<"connector">> := ConnectorName}} = BridgeV2} -> + case lookup(ActionType, Name) of + {ok, #{raw_config := #{<<"connector">> := ConnectorName} = RawConfig} = ActionConfig} -> + BridgeV1Type = ?MODULE:bridge_v2_type_to_bridge_v1_type(ActionType, RawConfig), + case ?MODULE:bridge_v1_is_valid(BridgeV1Type, Name) of + true -> ConnectorType = connector_type(ActionType), case emqx_connector:lookup(ConnectorType, ConnectorName) of {ok, Connector} -> bridge_v1_lookup_and_transform_helper( - BridgeV1Type, Name, ActionType, BridgeV2, ConnectorType, Connector + BridgeV1Type, + Name, + ActionType, + ActionConfig, + ConnectorType, + Connector ); Error -> Error end; - Error -> - Error + false -> + not_bridge_v1_compatible_error() end; - false -> - not_bridge_v1_compatible_error() + Error -> + Error end. not_bridge_v1_compatible_error() -> @@ -1276,6 +1301,8 @@ split_and_validate_bridge_v1_config(BridgeV1Type, BridgeName, RawConf, PreviousR #{bin(BridgeV2Type) => #{bin(BridgeName) => PreviousRawConf}}, PreviousRawConf =/= undefined ), + %% [FIXME] this will loop through all connector types, instead pass the + %% connector type and just do it for that one Output = emqx_connector_schema:transform_bridges_v1_to_connectors_and_bridges_v2( FakeGlobalConfig ), @@ -1360,6 +1387,34 @@ bridge_v1_create_dry_run(BridgeType, RawConfig0) -> {error, Reason} end. +%% Only called by test cases (may create broken references) +bridge_v1_remove(BridgeV1Type, BridgeName) -> + ActionType = ?MODULE:bridge_v1_type_to_bridge_v2_type(BridgeV1Type), + bridge_v1_remove( + ActionType, + BridgeName, + lookup_conf(ActionType, BridgeName) + ). + +bridge_v1_remove( + ActionType, + Name, + #{connector := ConnectorName} +) -> + case remove(ActionType, Name) of + ok -> + ConnectorType = connector_type(ActionType), + emqx_connector:remove(ConnectorType, ConnectorName); + Error -> + Error + end; +bridge_v1_remove( + _ActionType, + _Name, + Error +) -> + Error. + bridge_v1_check_deps_and_remove(BridgeV1Type, BridgeName, RemoveDeps) -> BridgeV2Type = ?MODULE:bridge_v1_type_to_bridge_v2_type(BridgeV1Type), bridge_v1_check_deps_and_remove( diff --git a/apps/emqx_bridge/src/emqx_bridge_v2_api.erl b/apps/emqx_bridge/src/emqx_bridge_v2_api.erl index f2a51c6cb..e6fcca50a 100644 --- a/apps/emqx_bridge/src/emqx_bridge_v2_api.erl +++ b/apps/emqx_bridge/src/emqx_bridge_v2_api.erl @@ -21,6 +21,7 @@ -include_lib("hocon/include/hoconsc.hrl"). -include_lib("emqx/include/logger.hrl"). -include_lib("emqx_utils/include/emqx_utils_api.hrl"). +-include_lib("emqx_bridge/include/emqx_bridge.hrl"). -import(hoconsc, [mk/2, array/1, enum/1]). -import(emqx_utils, [redact/1]). @@ -37,6 +38,8 @@ -export([ '/actions'/2, '/actions/:id'/2, + '/actions/:id/metrics'/2, + '/actions/:id/metrics/reset'/2, '/actions/:id/enable/:enable'/2, '/actions/:id/:operation'/2, '/nodes/:node/actions/:id/:operation'/2, @@ -44,8 +47,8 @@ '/action_types'/2 ]). -%% BpAPI --export([lookup_from_local_node/2]). +%% BpAPI / RPC Targets +-export([lookup_from_local_node/2, get_metrics_from_local_node/2]). -define(BRIDGE_NOT_FOUND(BRIDGE_TYPE, BRIDGE_NAME), ?NOT_FOUND( @@ -80,6 +83,10 @@ paths() -> "/actions/:id/enable/:enable", "/actions/:id/:operation", "/nodes/:node/actions/:id/:operation", + %% Caveat: metrics paths must come *after* `/:operation', otherwise minirest will + %% try to match the latter first, trying to interpret `metrics' as an operation... + "/actions/:id/metrics", + "/actions/:id/metrics/reset", "/actions_probe", "/action_types" ]. @@ -247,6 +254,34 @@ schema("/actions/:id") -> } } }; +schema("/actions/:id/metrics") -> + #{ + 'operationId' => '/actions/:id/metrics', + get => #{ + tags => [<<"actions">>], + summary => <<"Get action metrics">>, + description => ?DESC("desc_bridge_metrics"), + parameters => [param_path_id()], + responses => #{ + 200 => emqx_bridge_schema:metrics_fields(), + 404 => error_schema('NOT_FOUND', "Action not found") + } + } + }; +schema("/actions/:id/metrics/reset") -> + #{ + 'operationId' => '/actions/:id/metrics/reset', + put => #{ + tags => [<<"actions">>], + summary => <<"Reset action metrics">>, + description => ?DESC("desc_api6"), + parameters => [param_path_id()], + responses => #{ + 204 => <<"Reset success">>, + 404 => error_schema('NOT_FOUND', "Action not found") + } + } + }; schema("/actions/:id/enable/:enable") -> #{ 'operationId' => '/actions/:id/enable/:enable', @@ -429,6 +464,19 @@ schema("/action_types") -> end ). +'/actions/:id/metrics'(get, #{bindings := #{id := Id}}) -> + ?TRY_PARSE_ID(Id, get_metrics_from_all_nodes(BridgeType, BridgeName)). + +'/actions/:id/metrics/reset'(put, #{bindings := #{id := Id}}) -> + ?TRY_PARSE_ID( + Id, + begin + ActionType = emqx_bridge_v2:bridge_v2_type_to_connector_type(BridgeType), + ok = emqx_bridge_v2:reset_metrics(ActionType, BridgeName), + ?NO_CONTENT + end + ). + '/actions/:id/enable/:enable'(put, #{bindings := #{id := Id, enable := Enable}}) -> ?TRY_PARSE_ID( Id, @@ -570,6 +618,18 @@ lookup_from_all_nodes(BridgeType, BridgeName, SuccCode) -> ?INTERNAL_ERROR(Reason) end. +get_metrics_from_all_nodes(ActionType, ActionName) -> + Nodes = emqx:running_nodes(), + Result = maybe_unwrap( + emqx_bridge_proto_v5:v2_get_metrics_from_all_nodes(Nodes, ActionType, ActionName) + ), + case Result of + Metrics when is_list(Metrics) -> + {200, format_bridge_metrics(lists:zip(Nodes, Metrics))}; + {error, Reason} -> + ?INTERNAL_ERROR(Reason) + end. + operation_func(all, start) -> v2_start_bridge_to_all_nodes; operation_func(_Node, start) -> v2_start_bridge_to_node. @@ -720,12 +780,17 @@ aggregate_status(AllStatus) -> false -> inconsistent end. +%% RPC Target lookup_from_local_node(BridgeType, BridgeName) -> case emqx_bridge_v2:lookup(BridgeType, BridgeName) of {ok, Res} -> {ok, format_resource(Res, node())}; Error -> Error end. +%% RPC Target +get_metrics_from_local_node(ActionType, ActionName) -> + format_metrics(emqx_bridge_v2:get_metrics(ActionType, ActionName)). + %% resource format_resource( #{ @@ -751,6 +816,123 @@ format_resource( ) ). +format_metrics(#{ + counters := #{ + 'dropped' := Dropped, + 'dropped.other' := DroppedOther, + 'dropped.expired' := DroppedExpired, + 'dropped.queue_full' := DroppedQueueFull, + 'dropped.resource_not_found' := DroppedResourceNotFound, + 'dropped.resource_stopped' := DroppedResourceStopped, + 'matched' := Matched, + 'retried' := Retried, + 'late_reply' := LateReply, + 'failed' := SentFailed, + 'success' := SentSucc, + 'received' := Rcvd + }, + gauges := Gauges, + rate := #{ + matched := #{current := Rate, last5m := Rate5m, max := RateMax} + } +}) -> + Queued = maps:get('queuing', Gauges, 0), + SentInflight = maps:get('inflight', Gauges, 0), + ?METRICS( + Dropped, + DroppedOther, + DroppedExpired, + DroppedQueueFull, + DroppedResourceNotFound, + DroppedResourceStopped, + Matched, + Queued, + Retried, + LateReply, + SentFailed, + SentInflight, + SentSucc, + Rate, + Rate5m, + RateMax, + Rcvd + ); +format_metrics(_Metrics) -> + %% Empty metrics: can happen when a node joins another and a + %% bridge is not yet replicated to it, so the counters map is + %% empty. + empty_metrics(). + +empty_metrics() -> + ?METRICS( + _Dropped = 0, + _DroppedOther = 0, + _DroppedExpired = 0, + _DroppedQueueFull = 0, + _DroppedResourceNotFound = 0, + _DroppedResourceStopped = 0, + _Matched = 0, + _Queued = 0, + _Retried = 0, + _LateReply = 0, + _SentFailed = 0, + _SentInflight = 0, + _SentSucc = 0, + _Rate = 0, + _Rate5m = 0, + _RateMax = 0, + _Rcvd = 0 + ). + +format_bridge_metrics(Bridges) -> + NodeMetrics = lists:filtermap( + fun + ({Node, Metrics}) when is_map(Metrics) -> + {true, #{node => Node, metrics => Metrics}}; + ({Node, _}) -> + {true, #{node => Node, metrics => empty_metrics()}} + end, + Bridges + ), + #{ + metrics => aggregate_metrics(NodeMetrics), + node_metrics => NodeMetrics + }. + +aggregate_metrics(AllMetrics) -> + InitMetrics = ?EMPTY_METRICS, + lists:foldl(fun aggregate_metrics/2, InitMetrics, AllMetrics). + +aggregate_metrics( + #{ + metrics := ?metrics( + M1, M2, M3, M4, M5, M6, M7, M8, M9, M10, M11, M12, M13, M14, M15, M16, M17 + ) + }, + ?metrics( + N1, N2, N3, N4, N5, N6, N7, N8, N9, N10, N11, N12, N13, N14, N15, N16, N17 + ) +) -> + ?METRICS( + M1 + N1, + M2 + N2, + M3 + N3, + M4 + N4, + M5 + N5, + M6 + N6, + M7 + N7, + M8 + N8, + M9 + N9, + M10 + N10, + M11 + N11, + M12 + N12, + M13 + N13, + M14 + N14, + M15 + N15, + M16 + N16, + M17 + N17 + ). + format_bridge_status_and_error(Data) -> maps:fold(fun format_resource_data/3, #{}, maps:with([status, error], Data)). diff --git a/apps/emqx_bridge/src/proto/emqx_bridge_proto_v5.erl b/apps/emqx_bridge/src/proto/emqx_bridge_proto_v5.erl index 1417615a7..75b99f0ec 100644 --- a/apps/emqx_bridge/src/proto/emqx_bridge_proto_v5.erl +++ b/apps/emqx_bridge/src/proto/emqx_bridge_proto_v5.erl @@ -34,7 +34,8 @@ v2_start_bridge_to_node/3, v2_start_bridge_to_all_nodes/3, v2_list_bridges_on_nodes/1, - v2_lookup_from_all_nodes/3 + v2_lookup_from_all_nodes/3, + v2_get_metrics_from_all_nodes/3 ]). -include_lib("emqx/include/bpapi.hrl"). @@ -156,6 +157,17 @@ v2_lookup_from_all_nodes(Nodes, BridgeType, BridgeName) -> ?TIMEOUT ). +-spec v2_get_metrics_from_all_nodes([node()], key(), key()) -> + emqx_rpc:erpc_multicall(). +v2_get_metrics_from_all_nodes(Nodes, ActionType, ActionName) -> + erpc:multicall( + Nodes, + emqx_bridge_v2_api, + get_metrics_from_local_node, + [ActionType, ActionName], + ?TIMEOUT + ). + -spec v2_start_bridge_to_all_nodes([node()], key(), key()) -> emqx_rpc:erpc_multicall(). v2_start_bridge_to_all_nodes(Nodes, BridgeType, BridgeName) -> diff --git a/apps/emqx_bridge/src/schema/emqx_bridge_v2_schema.erl b/apps/emqx_bridge/src/schema/emqx_bridge_v2_schema.erl index b0ac870e7..2c2dde4da 100644 --- a/apps/emqx_bridge/src/schema/emqx_bridge_v2_schema.erl +++ b/apps/emqx_bridge/src/schema/emqx_bridge_v2_schema.erl @@ -41,6 +41,12 @@ -export([types/0, types_sc/0]). -export([resource_opts_fields/0, resource_opts_fields/1]). +-export([ + make_producer_action_schema/1, + make_consumer_action_schema/1, + top_level_common_action_keys/0 +]). + -export_type([action_type/0]). %% Should we explicitly list them here so dialyzer may be more helpful? @@ -117,7 +123,9 @@ roots() -> end. fields(actions) -> - registered_schema_fields(). + registered_schema_fields(); +fields(resource_opts) -> + emqx_resource_schema:create_opts(_Overrides = []). registered_schema_fields() -> [ @@ -127,6 +135,8 @@ registered_schema_fields() -> desc(actions) -> ?DESC("desc_bridges_v2"); +desc(resource_opts) -> + ?DESC(emqx_resource_schema, "resource_opts"); desc(_) -> undefined. @@ -176,6 +186,42 @@ examples(Method) -> SchemaModules = [Mod || {_, Mod} <- emqx_action_info:registered_schema_modules()], lists:foldl(Fun, #{}, SchemaModules). +top_level_common_action_keys() -> + [ + <<"connector">>, + <<"description">>, + <<"enable">>, + <<"local_topic">>, + <<"parameters">>, + <<"resource_opts">> + ]. + +%%====================================================================================== +%% Helper functions for making HOCON Schema +%%====================================================================================== + +make_producer_action_schema(ActionParametersRef) -> + [ + {local_topic, mk(binary(), #{required => false, desc => ?DESC(mqtt_topic)})} + | make_consumer_action_schema(ActionParametersRef) + ]. + +make_consumer_action_schema(ActionParametersRef) -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {connector, + mk(binary(), #{ + desc => ?DESC(emqx_connector_schema, "connector_field"), required => true + })}, + {description, emqx_schema:description_schema()}, + {parameters, ActionParametersRef}, + {resource_opts, + mk(ref(?MODULE, resource_opts), #{ + default => #{}, + desc => ?DESC(emqx_resource_schema, "resource_opts") + })} + ]. + -ifdef(TEST). -include_lib("hocon/include/hocon_types.hrl"). schema_homogeneous_test() -> @@ -195,24 +241,19 @@ schema_homogeneous_test() -> is_bad_schema(#{type := ?MAP(_, ?R_REF(Module, TypeName))}) -> Fields = Module:fields(TypeName), - ExpectedFieldNames = common_field_names(), - MissingFileds = lists:filter( + ExpectedFieldNames = lists:map(fun binary_to_atom/1, top_level_common_action_keys()), + MissingFields = lists:filter( fun(Name) -> lists:keyfind(Name, 1, Fields) =:= false end, ExpectedFieldNames ), - case MissingFileds of + case MissingFields of [] -> false; _ -> {true, #{ - schema_modle => Module, + schema_module => Module, type_name => TypeName, - missing_fields => MissingFileds + missing_fields => MissingFields }} end. -common_field_names() -> - [ - enable, description, local_topic, connector, resource_opts, parameters - ]. - -endif. diff --git a/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl index 7b5208f06..e88206ccd 100644 --- a/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl @@ -73,16 +73,15 @@ -define(HTTP_BRIDGE(URL), ?HTTP_BRIDGE(URL, ?BRIDGE_NAME)). -define(APPSPECS, [ - emqx_conf, emqx, + emqx_conf, emqx_auth, emqx_auth_mnesia, emqx_management, emqx_connector, emqx_bridge_http, - emqx_bridge, - {emqx_rule_engine, "rule_engine { rules {} }"}, - {emqx_bridge, "bridges {}"} + {emqx_bridge, "actions {}\n bridges {}"}, + {emqx_rule_engine, "rule_engine { rules {} }"} ]). -define(APPSPEC_DASHBOARD, @@ -120,10 +119,10 @@ end_per_suite(_Config) -> ok. init_per_group(cluster = Name, Config) -> - Nodes = [NodePrimary | _] = mk_cluster(Config), + Nodes = [NodePrimary | _] = mk_cluster(Name, Config), init_api([{group, Name}, {cluster_nodes, Nodes}, {node, NodePrimary} | Config]); init_per_group(cluster_later_join = Name, Config) -> - Nodes = [NodePrimary | _] = mk_cluster(Config, #{join_to => undefined}), + Nodes = [NodePrimary | _] = mk_cluster(Name, Config, #{join_to => undefined}), init_api([{group, Name}, {cluster_nodes, Nodes}, {node, NodePrimary} | Config]); init_per_group(_Name, Config) -> WorkDir = emqx_cth_suite:work_dir(Config), @@ -135,10 +134,10 @@ init_api(Config) -> {ok, App} = erpc:call(APINode, emqx_common_test_http, create_default_app, []), [{api, App} | Config]. -mk_cluster(Config) -> - mk_cluster(Config, #{}). +mk_cluster(Name, Config) -> + mk_cluster(Name, Config, #{}). -mk_cluster(Config, Opts) -> +mk_cluster(Name, Config, Opts) -> Node1Apps = ?APPSPECS ++ [?APPSPEC_DASHBOARD], Node2Apps = ?APPSPECS, emqx_cth_cluster:start( @@ -146,7 +145,7 @@ mk_cluster(Config, Opts) -> {emqx_bridge_api_SUITE1, Opts#{role => core, apps => Node1Apps}}, {emqx_bridge_api_SUITE2, Opts#{role => core, apps => Node2Apps}} ], - #{work_dir => emqx_cth_suite:work_dir(Config)} + #{work_dir => emqx_cth_suite:work_dir(Name, Config)} ). end_per_group(Group, Config) when @@ -162,7 +161,7 @@ init_per_testcase(t_broken_bpapi_vsn, Config) -> meck:new(emqx_bpapi, [passthrough]), meck:expect(emqx_bpapi, supported_version, 1, -1), meck:expect(emqx_bpapi, supported_version, 2, -1), - init_per_testcase(commong, Config); + init_per_testcase(common, Config); init_per_testcase(t_old_bpapi_vsn, Config) -> meck:new(emqx_bpapi, [passthrough]), meck:expect(emqx_bpapi, supported_version, 1, 1), @@ -188,6 +187,18 @@ end_per_testcase(_, Config) -> ok. clear_resources() -> + lists:foreach( + fun(#{type := Type, name := Name}) -> + ok = emqx_bridge_v2:remove(Type, Name) + end, + emqx_bridge_v2:list() + ), + lists:foreach( + fun(#{type := Type, name := Name}) -> + ok = emqx_connector:remove(Type, Name) + end, + emqx_connector:list() + ), lists:foreach( fun(#{type := Type, name := Name}) -> ok = emqx_bridge:remove(Type, Name) @@ -1314,6 +1325,7 @@ t_cluster_later_join_metrics(Config) -> BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_HTTP, Name), ?check_trace( + #{timetrap => 15_000}, begin %% Create a bridge on only one of the nodes. ?assertMatch({ok, 201, _}, request_json(post, uri(["bridges"]), BridgeParams, Config)), @@ -1325,24 +1337,28 @@ t_cluster_later_join_metrics(Config) -> }}, request_json(get, uri(["bridges", BridgeID, "metrics"]), Config) ), + + ct:print("node joining cluster"), %% Now join the other node join with the api node. ok = erpc:call(OtherNode, ekka, join, [PrimaryNode]), - %% Check metrics; shouldn't crash even if the bridge is not - %% ready on the node that just joined the cluster. + %% Hack / workaround for the fact that `emqx_machine_boot' doesn't restart the + %% applications, in particular `emqx_conf' doesn't restart and synchronize the + %% transaction id. It's also unclear at the moment why the equivalent test in + %% `emqx_bridge_v2_api_SUITE' doesn't need this hack. + ok = erpc:call(OtherNode, application, stop, [emqx_conf]), + ok = erpc:call(OtherNode, application, start, [emqx_conf]), + ct:print("node joined cluster"), %% assert: wait for the bridge to be ready on the other node. - fun - WaitConfSync(0) -> - throw(waiting_config_sync_timeout); - WaitConfSync(N) -> - timer:sleep(1000), - case erpc:call(OtherNode, emqx_bridge, list, []) of - [] -> WaitConfSync(N - 1); - [_] -> ok - end - end( - 60 - ), + {_, {ok, _}} = + ?wait_async_action( + {emqx_cluster_rpc, OtherNode} ! wake_up, + #{?snk_kind := cluster_rpc_caught_up, ?snk_meta := #{node := OtherNode}}, + 10_000 + ), + + %% Check metrics; shouldn't crash even if the bridge is not + %% ready on the node that just joined the cluster. ?assertMatch( {ok, 200, #{ <<"metrics">> := #{<<"success">> := _}, diff --git a/apps/emqx_bridge/test/emqx_bridge_compatible_config_tests.erl b/apps/emqx_bridge/test/emqx_bridge_compatible_config_tests.erl index 91c0a23d0..b267e9bf7 100644 --- a/apps/emqx_bridge/test/emqx_bridge_compatible_config_tests.erl +++ b/apps/emqx_bridge/test/emqx_bridge_compatible_config_tests.erl @@ -126,7 +126,7 @@ check(Conf) when is_map(Conf) -> %% erlfmt-ignore %% this is config generated from v5.0.11 webhook_v5011_hocon() -> -""" +" bridges{ webhook { the_name{ @@ -143,7 +143,7 @@ bridges{ } } } -""". +". full_webhook_v5011_hocon() -> "" @@ -215,7 +215,7 @@ full_webhook_v5019_hocon() -> %% erlfmt-ignore %% this is a generated from v5.0.11 mqtt_v5011_hocon() -> -""" +" bridges { mqtt { bridge_one { @@ -257,12 +257,12 @@ bridges { } } } -""". +". %% erlfmt-ignore %% a more complete version mqtt_v5011_full_hocon() -> -""" +" bridges { mqtt { bridge_one { @@ -330,4 +330,4 @@ bridges { } } } -""". +". diff --git a/apps/emqx_bridge/test/emqx_bridge_v1_compatibility_layer_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_v1_compatibility_layer_SUITE.erl index aa564aa9c..b5c0ec9f2 100644 --- a/apps/emqx_bridge/test/emqx_bridge_v1_compatibility_layer_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_v1_compatibility_layer_SUITE.erl @@ -60,15 +60,7 @@ init_per_testcase(_TestCase, Config) -> ets:new(fun_table_name(), [named_table, public]), %% Create a fake connector {ok, _} = emqx_connector:create(con_type(), con_name(), con_config()), - [ - {mocked_mods, [ - emqx_connector_schema, - emqx_connector_resource, - - emqx_bridge_v2 - ]} - | Config - ]. + Config. end_per_testcase(_TestCase, _Config) -> ets:delete(fun_table_name()), diff --git a/apps/emqx_bridge/test/emqx_bridge_v2_api_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_v2_api_SUITE.erl index cf58eefde..83a857b47 100644 --- a/apps/emqx_bridge/test/emqx_bridge_v2_api_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_v2_api_SUITE.erl @@ -56,6 +56,7 @@ -define(CONNECTOR(Name), ?KAFKA_CONNECTOR(Name, ?KAFKA_BOOTSTRAP_HOST)). -define(CONNECTOR, ?CONNECTOR(?CONNECTOR_NAME)). +-define(MQTT_LOCAL_TOPIC, <<"mqtt/local/topic">>). -define(BRIDGE_NAME, (atom_to_binary(?FUNCTION_NAME))). -define(BRIDGE_TYPE_STR, "kafka_producer"). -define(BRIDGE_TYPE, <>). @@ -93,7 +94,7 @@ <<"required_acks">> => <<"all_isr">>, <<"topic">> => <<"kafka-topic">> }, - <<"local_topic">> => <<"mqtt/local/topic">>, + <<"local_topic">> => ?MQTT_LOCAL_TOPIC, <<"resource_opts">> => #{ <<"health_check_interval">> => <<"32s">> } @@ -105,48 +106,6 @@ ). -define(KAFKA_BRIDGE_UPDATE(Name), ?KAFKA_BRIDGE_UPDATE(Name, ?CONNECTOR_NAME)). -%% -define(BRIDGE_TYPE_MQTT, <<"mqtt">>). -%% -define(MQTT_BRIDGE(SERVER, NAME), ?BRIDGE(NAME, ?BRIDGE_TYPE_MQTT)#{ -%% <<"server">> => SERVER, -%% <<"username">> => <<"user1">>, -%% <<"password">> => <<"">>, -%% <<"proto_ver">> => <<"v5">>, -%% <<"egress">> => #{ -%% <<"remote">> => #{ -%% <<"topic">> => <<"emqx/${topic}">>, -%% <<"qos">> => <<"${qos}">>, -%% <<"retain">> => false -%% } -%% } -%% }). -%% -define(MQTT_BRIDGE(SERVER), ?MQTT_BRIDGE(SERVER, <<"mqtt_egress_test_bridge">>)). - -%% -define(BRIDGE_TYPE_HTTP, <<"kafka">>). -%% -define(HTTP_BRIDGE(URL, NAME), ?BRIDGE(NAME, ?BRIDGE_TYPE_HTTP)#{ -%% <<"url">> => URL, -%% <<"local_topic">> => <<"emqx_webhook/#">>, -%% <<"method">> => <<"post">>, -%% <<"body">> => <<"${payload}">>, -%% <<"headers">> => #{ -%% % NOTE -%% % The Pascal-Case is important here. -%% % The reason is kinda ridiculous: `emqx_bridge_resource:create_dry_run/2` converts -%% % bridge config keys into atoms, and the atom 'Content-Type' exists in the ERTS -%% % when this happens (while the 'content-type' does not). -%% <<"Content-Type">> => <<"application/json">> -%% } -%% }). -%% -define(HTTP_BRIDGE(URL), ?HTTP_BRIDGE(URL, ?BRIDGE_NAME)). - -%% -define(URL(PORT, PATH), -%% list_to_binary( -%% io_lib:format( -%% "http://localhost:~s/~s", -%% [integer_to_list(PORT), PATH] -%% ) -%% ) -%% ). - -define(APPSPECS, [ emqx_conf, emqx, @@ -166,7 +125,7 @@ all() -> [ {group, single}, - %{group, cluster_later_join}, + {group, cluster_later_join}, {group, cluster} ]. -else. @@ -182,7 +141,7 @@ groups() -> t_fix_broken_bridge_config ], ClusterLaterJoinOnlyTCs = [ - % t_cluster_later_join_metrics + t_cluster_later_join_metrics ], [ {single, [], AllTCs -- ClusterLaterJoinOnlyTCs}, @@ -202,9 +161,9 @@ end_per_suite(_Config) -> init_per_group(cluster = Name, Config) -> Nodes = [NodePrimary | _] = mk_cluster(Name, Config), init_api([{group, Name}, {cluster_nodes, Nodes}, {node, NodePrimary} | Config]); -%% init_per_group(cluster_later_join = Name, Config) -> -%% Nodes = [NodePrimary | _] = mk_cluster(Name, Config, #{join_to => undefined}), -%% init_api([{group, Name}, {cluster_nodes, Nodes}, {node, NodePrimary} | Config]); +init_per_group(cluster_later_join = Name, Config) -> + Nodes = [NodePrimary | _] = mk_cluster(Name, Config, #{join_to => undefined}), + init_api([{group, Name}, {cluster_nodes, Nodes}, {node, NodePrimary} | Config]); init_per_group(Name, Config) -> WorkDir = filename:join(?config(priv_dir, Config), Name), Apps = emqx_cth_suite:start(?APPSPECS ++ [?APPSPEC_DASHBOARD], #{work_dir => WorkDir}), @@ -226,7 +185,7 @@ mk_cluster(Name, Config, Opts) -> {emqx_bridge_v2_api_SUITE_1, Opts#{role => core, apps => Node1Apps}}, {emqx_bridge_v2_api_SUITE_2, Opts#{role => core, apps => Node2Apps}} ], - #{work_dir => filename:join(?config(priv_dir, Config), Name)} + #{work_dir => emqx_cth_suite:work_dir(Name, Config)} ). end_per_group(Group, Config) when @@ -1041,6 +1000,143 @@ t_bad_name(Config) -> ), ok. +t_metrics(Config) -> + {ok, 200, []} = request_json(get, uri([?ROOT]), Config), + + ActionName = ?BRIDGE_NAME, + ?assertMatch( + {ok, 201, _}, + request_json( + post, + uri([?ROOT]), + ?KAFKA_BRIDGE(?BRIDGE_NAME), + Config + ) + ), + + ActionID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, ActionName), + + ?assertMatch( + {ok, 200, #{ + <<"metrics">> := #{<<"matched">> := 0}, + <<"node_metrics">> := [#{<<"metrics">> := #{<<"matched">> := 0}} | _] + }}, + request_json(get, uri([?ROOT, ActionID, "metrics"]), Config) + ), + + {ok, 200, Bridge} = request_json(get, uri([?ROOT, ActionID]), Config), + ?assertNot(maps:is_key(<<"metrics">>, Bridge)), + ?assertNot(maps:is_key(<<"node_metrics">>, Bridge)), + + Body = <<"my msg">>, + _ = publish_message(?MQTT_LOCAL_TOPIC, Body, Config), + + %% check for non-empty bridge metrics + ?retry( + _Sleep0 = 200, + _Retries0 = 20, + ?assertMatch( + {ok, 200, #{ + <<"metrics">> := #{<<"matched">> := 1}, + <<"node_metrics">> := [#{<<"metrics">> := #{<<"matched">> := 1}} | _] + }}, + request_json(get, uri([?ROOT, ActionID, "metrics"]), Config) + ) + ), + + %% check for absence of metrics when listing all bridges + {ok, 200, Bridges} = request_json(get, uri([?ROOT]), Config), + ?assertNotMatch( + [ + #{ + <<"metrics">> := #{}, + <<"node_metrics">> := [_ | _] + } + ], + Bridges + ), + ok. + +t_reset_metrics(Config) -> + %% assert there's no bridges at first + {ok, 200, []} = request_json(get, uri([?ROOT]), Config), + + ActionName = ?BRIDGE_NAME, + ?assertMatch( + {ok, 201, _}, + request_json( + post, + uri([?ROOT]), + ?KAFKA_BRIDGE(?BRIDGE_NAME), + Config + ) + ), + ActionID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, ActionName), + + Body = <<"my msg">>, + _ = publish_message(?MQTT_LOCAL_TOPIC, Body, Config), + ?retry( + _Sleep0 = 200, + _Retries0 = 20, + ?assertMatch( + {ok, 200, #{ + <<"metrics">> := #{<<"matched">> := 1}, + <<"node_metrics">> := [#{<<"metrics">> := #{}} | _] + }}, + request_json(get, uri([?ROOT, ActionID, "metrics"]), Config) + ) + ), + + {ok, 204, <<>>} = request(put, uri([?ROOT, ActionID, "metrics", "reset"]), Config), + + ?retry( + _Sleep0 = 200, + _Retries0 = 20, + ?assertMatch( + {ok, 200, #{ + <<"metrics">> := #{<<"matched">> := 0}, + <<"node_metrics">> := [#{<<"metrics">> := #{}} | _] + }}, + request_json(get, uri([?ROOT, ActionID, "metrics"]), Config) + ) + ), + + ok. + +t_cluster_later_join_metrics(Config) -> + [PrimaryNode, OtherNode | _] = ?config(cluster_nodes, Config), + Name = ?BRIDGE_NAME, + ActionParams = ?KAFKA_BRIDGE(Name), + ActionID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name), + ?check_trace( + begin + %% Create a bridge on only one of the nodes. + ?assertMatch({ok, 201, _}, request_json(post, uri([?ROOT]), ActionParams, Config)), + %% Pre-condition. + ?assertMatch( + {ok, 200, #{ + <<"metrics">> := #{<<"success">> := _}, + <<"node_metrics">> := [#{<<"metrics">> := #{}} | _] + }}, + request_json(get, uri([?ROOT, ActionID, "metrics"]), Config) + ), + %% Now join the other node join with the api node. + ok = erpc:call(OtherNode, ekka, join, [PrimaryNode]), + %% Check metrics; shouldn't crash even if the bridge is not + %% ready on the node that just joined the cluster. + ?assertMatch( + {ok, 200, #{ + <<"metrics">> := #{<<"success">> := _}, + <<"node_metrics">> := [#{<<"metrics">> := #{}}, #{<<"metrics">> := #{}} | _] + }}, + request_json(get, uri([?ROOT, ActionID, "metrics"]), Config) + ), + ok + end, + [] + ), + ok. + %%% helpers listen_on_random_port() -> SockOpts = [binary, {active, false}, {packet, raw}, {reuseaddr, true}, {backlog, 1000}], diff --git a/apps/emqx_bridge/test/emqx_bridge_v2_testlib.erl b/apps/emqx_bridge/test/emqx_bridge_v2_testlib.erl index 5cb9b043f..a6b92caaa 100644 --- a/apps/emqx_bridge/test/emqx_bridge_v2_testlib.erl +++ b/apps/emqx_bridge/test/emqx_bridge_v2_testlib.erl @@ -146,6 +146,35 @@ create_bridge(Config, Overrides) -> ct:pal("creating bridge with config: ~p", [BridgeConfig]), emqx_bridge_v2:create(BridgeType, BridgeName, BridgeConfig). +maybe_json_decode(X) -> + case emqx_utils_json:safe_decode(X, [return_maps]) of + {ok, Decoded} -> Decoded; + {error, _} -> X + end. + +request(Method, Path, Params) -> + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + case emqx_mgmt_api_test_util:request_api(Method, Path, "", AuthHeader, Params, Opts) of + {ok, {Status, Headers, Body0}} -> + Body = maybe_json_decode(Body0), + {ok, {Status, Headers, Body}}; + {error, {Status, Headers, Body0}} -> + Body = + case emqx_utils_json:safe_decode(Body0, [return_maps]) of + {ok, Decoded0 = #{<<"message">> := Msg0}} -> + Msg = maybe_json_decode(Msg0), + Decoded0#{<<"message">> := Msg}; + {ok, Decoded0} -> + Decoded0; + {error, _} -> + Body0 + end, + {error, {Status, Headers, Body}}; + Error -> + Error + end. + list_bridges_api() -> Params = [], Path = emqx_mgmt_api_test_util:api_path(["actions"]), @@ -209,6 +238,50 @@ create_bridge_api(Config, Overrides) -> ct:pal("bridge create result: ~p", [Res]), Res. +create_connector_api(Config) -> + create_connector_api(Config, _Overrides = #{}). + +create_connector_api(Config, Overrides) -> + ConnectorConfig0 = ?config(connector_config, Config), + ConnectorName = ?config(connector_name, Config), + ConnectorType = ?config(connector_type, Config), + Method = post, + Path = emqx_mgmt_api_test_util:api_path(["connectors"]), + ConnectorConfig = emqx_utils_maps:deep_merge(ConnectorConfig0, Overrides), + Params = ConnectorConfig#{<<"type">> => ConnectorType, <<"name">> => ConnectorName}, + ct:pal("creating connector (http):\n ~p", [Params]), + Res = request(Method, Path, Params), + ct:pal("connector create (http) result:\n ~p", [Res]), + Res. + +create_action_api(Config) -> + create_action_api(Config, _Overrides = #{}). + +create_action_api(Config, Overrides) -> + ActionName = ?config(action_name, Config), + ActionType = ?config(action_type, Config), + ActionConfig0 = ?config(action_config, Config), + ActionConfig = emqx_utils_maps:deep_merge(ActionConfig0, Overrides), + Params = ActionConfig#{<<"type">> => ActionType, <<"name">> => ActionName}, + Method = post, + Path = emqx_mgmt_api_test_util:api_path(["actions"]), + ct:pal("creating action (http):\n ~p", [Params]), + Res = request(Method, Path, Params), + ct:pal("action create (http) result:\n ~p", [Res]), + Res. + +get_action_api(Config) -> + ActionName = ?config(action_name, Config), + ActionType = ?config(action_type, Config), + ActionId = emqx_bridge_resource:bridge_id(ActionType, ActionName), + Params = [], + Method = get, + Path = emqx_mgmt_api_test_util:api_path(["actions", ActionId]), + ct:pal("getting action (http)"), + Res = request(Method, Path, Params), + ct:pal("get action (http) result:\n ~p", [Res]), + Res. + update_bridge_api(Config) -> update_bridge_api(Config, _Overrides = #{}). @@ -552,18 +625,24 @@ t_on_get_status(Config, Opts) -> _Attempts = 20, ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) ), - emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> - ct:sleep(500), - ?retry( - _Interval0 = 200, - _Attempts0 = 10, - ?assertEqual({ok, FailureStatus}, emqx_resource_manager:health_check(ResourceId)) - ) - end), - %% Check that it recovers itself. - ?retry( - _Sleep = 1_000, - _Attempts = 20, - ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) - ), + case ProxyHost of + undefined -> + ok; + _ -> + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ?retry( + _Interval0 = 100, + _Attempts0 = 20, + ?assertEqual( + {ok, FailureStatus}, emqx_resource_manager:health_check(ResourceId) + ) + ) + end), + %% Check that it recovers itself. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ) + end, ok. diff --git a/apps/emqx_bridge_azure_event_hub/rebar.config b/apps/emqx_bridge_azure_event_hub/rebar.config index efe337029..90be538b3 100644 --- a/apps/emqx_bridge_azure_event_hub/rebar.config +++ b/apps/emqx_bridge_azure_event_hub/rebar.config @@ -2,7 +2,7 @@ {erl_opts, [debug_info]}. {deps, [ {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.8.0"}}} , {kafka_protocol, {git, "https://github.com/kafka4beam/kafka_protocol.git", {tag, "4.1.3"}}} - , {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.0"}}} + , {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.1"}}} , {brod, {git, "https://github.com/kafka4beam/brod.git", {tag, "3.16.8"}}} , {snappyer, "1.2.9"} , {emqx_connector, {path, "../../apps/emqx_connector"}} diff --git a/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub.app.src b/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub.app.src index f1c097d29..12d0890c3 100644 --- a/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub.app.src +++ b/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_azure_event_hub, [ {description, "EMQX Enterprise Azure Event Hub Bridge"}, - {vsn, "0.1.4"}, + {vsn, "0.1.5"}, {registered, []}, {applications, [ kernel, diff --git a/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub.erl b/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub.erl index 553d77326..cf733ddfd 100644 --- a/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub.erl +++ b/apps/emqx_bridge_azure_event_hub/src/emqx_bridge_azure_event_hub.erl @@ -17,7 +17,7 @@ desc/1 ]). -%% emqx_bridge_enterprise "unofficial" API +%% `emqx_bridge_v2_schema' "unofficial" API -export([ bridge_v2_examples/1, conn_bridge_examples/1, @@ -31,8 +31,8 @@ -import(hoconsc, [mk/2, enum/1, ref/2]). --define(AEH_CONNECTOR_TYPE, azure_event_hub_producer). --define(AEH_CONNECTOR_TYPE_BIN, <<"azure_event_hub_producer">>). +-define(CONNECTOR_TYPE, azure_event_hub_producer). +-define(CONNECTOR_TYPE_BIN, <<"azure_event_hub_producer">>). %%------------------------------------------------------------------------------------------------- %% `hocon_schema' API @@ -42,18 +42,17 @@ namespace() -> "bridge_azure_event_hub". roots() -> ["config_producer"]. -fields("put_connector") -> +fields(Field) when + Field == "get_connector"; + Field == "put_connector"; + Field == "post_connector" +-> Fields = override( - emqx_bridge_kafka:fields("put_connector"), - connector_overrides() - ), - override_documentations(Fields); -fields("get_connector") -> - emqx_bridge_schema:status_fields() ++ - fields("post_connector"); -fields("post_connector") -> - Fields = override( - emqx_bridge_kafka:fields("post_connector"), + emqx_connector_schema:api_fields( + Field, + ?CONNECTOR_TYPE, + emqx_bridge_kafka:kafka_connector_config_fields() + ), connector_overrides() ), override_documentations(Fields); @@ -170,7 +169,7 @@ struct_names() -> bridge_v2_examples(Method) -> [ #{ - ?AEH_CONNECTOR_TYPE_BIN => #{ + ?CONNECTOR_TYPE_BIN => #{ summary => <<"Azure Event Hub Action">>, value => values({Method, bridge_v2}) } @@ -180,7 +179,7 @@ bridge_v2_examples(Method) -> connector_examples(Method) -> [ #{ - ?AEH_CONNECTOR_TYPE_BIN => #{ + ?CONNECTOR_TYPE_BIN => #{ summary => <<"Azure Event Hub Connector">>, value => values({Method, connector}) } @@ -197,6 +196,20 @@ conn_bridge_examples(Method) -> } ]. +values({get, connector}) -> + maps:merge( + #{ + status => <<"connected">>, + node_status => [ + #{ + node => <<"emqx@localhost">>, + status => <<"connected">> + } + ], + actions => [<<"my_action">>] + }, + values({post, connector}) + ); values({get, AEHType}) -> maps:merge( #{ @@ -217,7 +230,7 @@ values({post, bridge_v2}) -> enable => true, connector => <<"my_azure_event_hub_producer_connector">>, name => <<"my_azure_event_hub_producer_action">>, - type => ?AEH_CONNECTOR_TYPE_BIN + type => ?CONNECTOR_TYPE_BIN } ); values({post, connector}) -> @@ -225,7 +238,7 @@ values({post, connector}) -> values(common_config), #{ name => <<"my_azure_event_hub_producer_connector">>, - type => ?AEH_CONNECTOR_TYPE_BIN, + type => ?CONNECTOR_TYPE_BIN, ssl => #{ enable => true, server_name_indication => <<"auto">>, @@ -358,7 +371,7 @@ connector_overrides() -> } ), type => mk( - ?AEH_CONNECTOR_TYPE, + ?CONNECTOR_TYPE, #{ required => true, desc => ?DESC("connector_type") @@ -414,7 +427,7 @@ bridge_v2_overrides() -> }), ssl => mk(ref(ssl_client_opts), #{default => #{<<"enable">> => true}}), type => mk( - ?AEH_CONNECTOR_TYPE, + ?CONNECTOR_TYPE, #{ required => true, desc => ?DESC("bridge_v2_type") diff --git a/apps/emqx_bridge_azure_event_hub/test/emqx_bridge_azure_event_hub_tests.erl b/apps/emqx_bridge_azure_event_hub/test/emqx_bridge_azure_event_hub_tests.erl index 92d268d20..1b135d0f7 100644 --- a/apps/emqx_bridge_azure_event_hub/test/emqx_bridge_azure_event_hub_tests.erl +++ b/apps/emqx_bridge_azure_event_hub/test/emqx_bridge_azure_event_hub_tests.erl @@ -12,7 +12,7 @@ %% erlfmt-ignore aeh_producer_hocon() -> -""" +" bridges.azure_event_hub_producer.my_producer { enable = true authentication { @@ -62,7 +62,7 @@ bridges.azure_event_hub_producer.my_producer { server_name_indication = auto } } -""". +". %%=========================================================================== %% Helper functions diff --git a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.app.src b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.app.src index 59661d7c0..97be100d2 100644 --- a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.app.src +++ b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_cassandra, [ {description, "EMQX Enterprise Cassandra Bridge"}, - {vsn, "0.1.5"}, + {vsn, "0.1.6"}, {registered, []}, {applications, [ kernel, diff --git a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_connector.erl b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_connector.erl index afea652ef..e29dc7931 100644 --- a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_connector.erl +++ b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_connector.erl @@ -70,7 +70,7 @@ cassandra_db_fields() -> {keyspace, fun keyspace/1}, {pool_size, fun emqx_connector_schema_lib:pool_size/1}, {username, fun emqx_connector_schema_lib:username/1}, - {password, fun emqx_connector_schema_lib:password/1}, + {password, emqx_connector_schema_lib:password_field()}, {auto_reconnect, fun emqx_connector_schema_lib:auto_reconnect/1} ]. @@ -111,14 +111,14 @@ on_start( emqx_schema:parse_servers(Servers0, ?DEFAULT_SERVER_OPTION) ), - Options = [ - {nodes, Servers}, - {keyspace, Keyspace}, - {auto_reconnect, ?AUTO_RECONNECT_INTERVAL}, - {pool_size, PoolSize} - ], - Options1 = maybe_add_opt(username, Config, Options), - Options2 = maybe_add_opt(password, Config, Options1, _IsSensitive = true), + Options = + maps:to_list(maps:with([username, password], Config)) ++ + [ + {nodes, Servers}, + {keyspace, Keyspace}, + {auto_reconnect, ?AUTO_RECONNECT_INTERVAL}, + {pool_size, PoolSize} + ], SslOpts = case maps:get(enable, SSL) of @@ -131,7 +131,7 @@ on_start( [] end, State = parse_prepare_cql(Config), - case emqx_resource_pool:start(InstId, ?MODULE, Options2 ++ SslOpts) of + case emqx_resource_pool:start(InstId, ?MODULE, Options ++ SslOpts) of ok -> {ok, init_prepare(State#{pool_name => InstId, prepare_statement => #{}})}; {error, Reason} -> @@ -387,6 +387,7 @@ conn_opts(Opts) -> conn_opts([], Acc) -> Acc; conn_opts([{password, Password} | Opts], Acc) -> + %% TODO: teach `ecql` to accept 0-arity closures as passwords. conn_opts(Opts, [{password, emqx_secret:unwrap(Password)} | Acc]); conn_opts([Opt | Opts], Acc) -> conn_opts(Opts, [Opt | Acc]). @@ -512,19 +513,3 @@ maybe_assign_type(V) when is_integer(V) -> maybe_assign_type(V) when is_float(V) -> {double, V}; maybe_assign_type(V) -> V. - -maybe_add_opt(Key, Conf, Opts) -> - maybe_add_opt(Key, Conf, Opts, _IsSensitive = false). - -maybe_add_opt(Key, Conf, Opts, IsSensitive) -> - case Conf of - #{Key := Val} -> - [{Key, maybe_wrap(IsSensitive, Val)} | Opts]; - _ -> - Opts - end. - -maybe_wrap(false = _IsSensitive, Val) -> - Val; -maybe_wrap(true, Val) -> - emqx_secret:wrap(Val). diff --git a/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_connector_SUITE.erl b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_connector_SUITE.erl index fcd482b47..de306e3f0 100644 --- a/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_connector_SUITE.erl +++ b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_connector_SUITE.erl @@ -40,10 +40,9 @@ all() -> ]. groups() -> - TCs = emqx_common_test_helpers:all(?MODULE), [ - {auth, TCs}, - {noauth, TCs} + {auth, [t_lifecycle, t_start_passfile]}, + {noauth, [t_lifecycle]} ]. cassandra_servers(CassandraHost) -> @@ -115,32 +114,37 @@ end_per_testcase(_, _Config) -> t_lifecycle(Config) -> perform_lifecycle_check( - <<"emqx_connector_cassandra_SUITE">>, + <>, cassandra_config(Config) ). -show(X) -> - erlang:display(X), - X. - -show(Label, What) -> - erlang:display({Label, What}), - What. +t_start_passfile(Config) -> + ResourceID = atom_to_binary(?FUNCTION_NAME), + PasswordFilename = filename:join(?config(priv_dir, Config), "passfile"), + ok = file:write_file(PasswordFilename, ?CASSA_PASSWORD), + InitialConfig = emqx_utils_maps:deep_merge( + cassandra_config(Config), + #{ + <<"config">> => #{ + password => iolist_to_binary(["file://", PasswordFilename]) + } + } + ), + ?assertMatch( + #{status := connected}, + create_local_resource(ResourceID, check_config(InitialConfig)) + ), + ?assertEqual( + ok, + emqx_resource:remove_local(ResourceID) + ). perform_lifecycle_check(ResourceId, InitialConfig) -> - {ok, #{config := CheckedConfig}} = - emqx_resource:check_config(?CASSANDRA_RESOURCE_MOD, InitialConfig), - {ok, #{ + CheckedConfig = check_config(InitialConfig), + #{ state := #{pool_name := PoolName} = State, status := InitialStatus - }} = - emqx_resource:create_local( - ResourceId, - ?CONNECTOR_RESOURCE_GROUP, - ?CASSANDRA_RESOURCE_MOD, - CheckedConfig, - #{} - ), + } = create_local_resource(ResourceId, CheckedConfig), ?assertEqual(InitialStatus, connected), % Instance should match the state and status of the just started resource {ok, ?CONNECTOR_RESOURCE_GROUP, #{ @@ -191,6 +195,21 @@ perform_lifecycle_check(ResourceId, InitialConfig) -> %% utils %%-------------------------------------------------------------------- +check_config(Config) -> + {ok, #{config := CheckedConfig}} = emqx_resource:check_config(?CASSANDRA_RESOURCE_MOD, Config), + CheckedConfig. + +create_local_resource(ResourceId, CheckedConfig) -> + {ok, Bridge} = + emqx_resource:create_local( + ResourceId, + ?CONNECTOR_RESOURCE_GROUP, + ?CASSANDRA_RESOURCE_MOD, + CheckedConfig, + #{} + ), + Bridge. + cassandra_config(Config) -> Host = ?config(cassa_host, Config), AuthOpts = maps:from_list(?config(cassa_auth_opts, Config)), diff --git a/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse.app.src b/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse.app.src index 4f7519440..85c035be1 100644 --- a/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse.app.src +++ b/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_clickhouse, [ {description, "EMQX Enterprise ClickHouse Bridge"}, - {vsn, "0.2.3"}, + {vsn, "0.2.4"}, {registered, []}, {applications, [ kernel, diff --git a/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse_connector.erl b/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse_connector.erl index 97b855ad2..8f575dd8d 100644 --- a/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse_connector.erl +++ b/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse_connector.erl @@ -145,7 +145,7 @@ on_start( Options = [ {url, URL}, {user, maps:get(username, Config, "default")}, - {key, emqx_secret:wrap(maps:get(password, Config, "public"))}, + {key, maps:get(password, Config, emqx_secret:wrap("public"))}, {database, DB}, {auto_reconnect, ?AUTO_RECONNECT_INTERVAL}, {pool_size, PoolSize}, @@ -243,6 +243,7 @@ connect(Options) -> URL = iolist_to_binary(emqx_http_lib:normalize(proplists:get_value(url, Options))), User = proplists:get_value(user, Options), Database = proplists:get_value(database, Options), + %% TODO: teach `clickhouse` to accept 0-arity closures as passwords. Key = emqx_secret:unwrap(proplists:get_value(key, Options)), Pool = proplists:get_value(pool, Options), PoolSize = proplists:get_value(pool_size, Options), diff --git a/apps/emqx_bridge_clickhouse/test/emqx_bridge_clickhouse_connector_SUITE.erl b/apps/emqx_bridge_clickhouse/test/emqx_bridge_clickhouse_connector_SUITE.erl index 12d678e85..e1d3149db 100644 --- a/apps/emqx_bridge_clickhouse/test/emqx_bridge_clickhouse_connector_SUITE.erl +++ b/apps/emqx_bridge_clickhouse/test/emqx_bridge_clickhouse_connector_SUITE.erl @@ -10,10 +10,12 @@ -include("emqx_connector.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("stdlib/include/assert.hrl"). +-include_lib("common_test/include/ct.hrl"). -define(APP, emqx_bridge_clickhouse). -define(CLICKHOUSE_HOST, "clickhouse"). -define(CLICKHOUSE_RESOURCE_MOD, emqx_bridge_clickhouse_connector). +-define(CLICKHOUSE_PASSWORD, "public"). %% This test SUITE requires a running clickhouse instance. If you don't want to %% bring up the whole CI infrastuctucture with the `scripts/ct/run.sh` script @@ -57,7 +59,7 @@ init_per_suite(Config) -> clickhouse:start_link([ {url, clickhouse_url()}, {user, <<"default">>}, - {key, "public"}, + {key, ?CLICKHOUSE_PASSWORD}, {pool, tmp_pool} ]), {ok, _, _} = clickhouse:query(Conn, <<"CREATE DATABASE IF NOT EXISTS mqtt">>, #{}), @@ -92,6 +94,31 @@ t_lifecycle(_Config) -> clickhouse_config() ). +t_start_passfile(Config) -> + ResourceID = atom_to_binary(?FUNCTION_NAME), + PasswordFilename = filename:join(?config(priv_dir, Config), "passfile"), + ok = file:write_file(PasswordFilename, <>), + InitialConfig = clickhouse_config(#{ + password => iolist_to_binary(["file://", PasswordFilename]) + }), + {ok, #{config := ResourceConfig}} = + emqx_resource:check_config(?CLICKHOUSE_RESOURCE_MOD, InitialConfig), + ?assertMatch( + {ok, #{status := connected}}, + emqx_resource:create_local( + ResourceID, + ?CONNECTOR_RESOURCE_GROUP, + ?CLICKHOUSE_RESOURCE_MOD, + ResourceConfig, + #{} + ) + ), + ?assertEqual( + ok, + emqx_resource:remove_local(ResourceID) + ), + ok. + show(X) -> erlang:display(X), X. @@ -168,12 +195,15 @@ perform_lifecycle_check(ResourceID, InitialConfig) -> % %%------------------------------------------------------------------------------ clickhouse_config() -> + clickhouse_config(#{}). + +clickhouse_config(Overrides) -> Config = #{ auto_reconnect => true, database => <<"mqtt">>, username => <<"default">>, - password => <<"public">>, + password => <>, pool_size => 8, url => iolist_to_binary( io_lib:format( @@ -186,7 +216,7 @@ clickhouse_config() -> ), connect_timeout => <<"10s">> }, - #{<<"config">> => Config}. + #{<<"config">> => maps:merge(Config, Overrides)}. test_query_no_params() -> {query, <<"SELECT 1">>}. diff --git a/apps/emqx_bridge_confluent/BSL.txt b/apps/emqx_bridge_confluent/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_confluent/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_confluent/README.md b/apps/emqx_bridge_confluent/README.md new file mode 100644 index 000000000..be297a14b --- /dev/null +++ b/apps/emqx_bridge_confluent/README.md @@ -0,0 +1,27 @@ +# Confluent Data Integration Bridge + +This application houses the Confluent Producer data integration bridge for EMQX Enterprise +Edition. It provides the means to connect to Confluent Producer and publish messages to +it via the Kafka protocol. + +Currently, our Kafka Producer library (`wolff`) has its own `replayq` buffering +implementation, so this bridge does not require buffer workers from `emqx_resource`. It +implements the connection management and interaction without need for a separate connector +app, since it's not used by authentication and authorization applications. + +# Documentation links + +For more information about Kafka interface for Confluent, please see [the official +docs](https://docs.confluent.io/cloud/current/overview.html). + +# Configurations + +Please see [Ingest Data into Confluent](https://docs.emqx.com/en/enterprise/v5.3/data-integration/data-bridge-confluent.html) for more detailed info. + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_confluent/docker-ct b/apps/emqx_bridge_confluent/docker-ct new file mode 100644 index 000000000..5288ee246 --- /dev/null +++ b/apps/emqx_bridge_confluent/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +kafka diff --git a/apps/emqx_bridge_confluent/rebar.config b/apps/emqx_bridge_confluent/rebar.config new file mode 100644 index 000000000..0c0c2eece --- /dev/null +++ b/apps/emqx_bridge_confluent/rebar.config @@ -0,0 +1,15 @@ +%% -*- mode: erlang; -*- +{erl_opts, [debug_info]}. +{deps, [ {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.8.0"}}} + , {kafka_protocol, {git, "https://github.com/kafka4beam/kafka_protocol.git", {tag, "4.1.3"}}} + , {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.1"}}} + , {brod, {git, "https://github.com/kafka4beam/brod.git", {tag, "3.16.8"}}} + , {snappyer, "1.2.9"} + , {emqx_connector, {path, "../../apps/emqx_connector"}} + , {emqx_resource, {path, "../../apps/emqx_resource"}} + , {emqx_bridge, {path, "../../apps/emqx_bridge"}} + ]}. + +{shell, [ + {apps, [emqx_bridge_confluent]} +]}. diff --git a/apps/emqx_bridge_confluent/src/emqx_bridge_confluent.app.src b/apps/emqx_bridge_confluent/src/emqx_bridge_confluent.app.src new file mode 100644 index 000000000..64d1dec09 --- /dev/null +++ b/apps/emqx_bridge_confluent/src/emqx_bridge_confluent.app.src @@ -0,0 +1,15 @@ +{application, emqx_bridge_confluent, [ + {description, "EMQX Enterprise Confluent Connector and Action"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + emqx_resource, + telemetry, + wolff + ]}, + {env, [{emqx_action_info_modules, [emqx_bridge_confluent_producer_action_info]}]}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_confluent/src/emqx_bridge_confluent_producer.erl b/apps/emqx_bridge_confluent/src/emqx_bridge_confluent_producer.erl new file mode 100644 index 000000000..a43a8a285 --- /dev/null +++ b/apps/emqx_bridge_confluent/src/emqx_bridge_confluent_producer.erl @@ -0,0 +1,419 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_confluent_producer). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-behaviour(hocon_schema). +-behaviour(emqx_connector_resource). + +%% `hocon_schema' API +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +%% emqx_bridge_enterprise "unofficial" API +-export([ + bridge_v2_examples/1, + connector_examples/1 +]). + +%% emqx_connector_resource behaviour callbacks +-export([connector_config/1]). + +-export([host_opts/0]). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-define(CONNECTOR_TYPE, confluent_producer). +-define(CONNECTOR_TYPE_BIN, <<"confluent_producer">>). + +%%------------------------------------------------------------------------------------------------- +%% `hocon_schema' API +%%------------------------------------------------------------------------------------------------- + +namespace() -> "confluent". + +roots() -> ["config_producer"]. + +fields(Field) when + Field == "get_connector"; + Field == "put_connector"; + Field == "post_connector" +-> + Fields = override( + emqx_connector_schema:api_fields( + Field, + ?CONNECTOR_TYPE, + emqx_bridge_kafka:kafka_connector_config_fields() + ), + connector_overrides() + ), + override_documentations(Fields); +fields("put_bridge_v2") -> + Fields = override( + emqx_bridge_kafka:fields("put_bridge_v2"), + bridge_v2_overrides() + ), + override_documentations(Fields); +fields("get_bridge_v2") -> + emqx_bridge_schema:status_fields() ++ + fields("post_bridge_v2"); +fields("post_bridge_v2") -> + Fields = override( + emqx_bridge_kafka:fields("post_bridge_v2"), + bridge_v2_overrides() + ), + override_documentations(Fields); +fields("config_bridge_v2") -> + fields(actions); +fields("config_connector") -> + Fields = override( + emqx_bridge_kafka:fields("config_connector"), + connector_overrides() + ), + override_documentations(Fields); +fields(auth_username_password) -> + Fields = override( + emqx_bridge_kafka:fields(auth_username_password), + auth_overrides() + ), + override_documentations(Fields); +fields(ssl_client_opts) -> + Fields = override( + emqx_bridge_kafka:ssl_client_opts_fields(), + ssl_overrides() + ), + override_documentations(Fields); +fields(producer_kafka_opts) -> + Fields = override( + emqx_bridge_kafka:fields(producer_kafka_opts), + kafka_producer_overrides() + ), + override_documentations(Fields); +fields(kafka_message) -> + Fields0 = emqx_bridge_kafka:fields(kafka_message), + Fields = proplists:delete(timestamp, Fields0), + override_documentations(Fields); +fields(action) -> + {confluent_producer, + mk( + hoconsc:map(name, ref(emqx_bridge_confluent_producer, actions)), + #{ + desc => <<"Confluent Actions Config">>, + required => false + } + )}; +fields(actions) -> + Fields = + override( + emqx_bridge_kafka:producer_opts(action), + bridge_v2_overrides() + ) ++ + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {connector, + mk(binary(), #{ + desc => ?DESC(emqx_connector_schema, "connector_field"), required => true + })}, + {description, emqx_schema:description_schema()} + ], + override_documentations(Fields); +fields(Method) -> + Fields = emqx_bridge_kafka:fields(Method), + override_documentations(Fields). + +desc("config") -> + ?DESC("desc_config"); +desc("config_connector") -> + ?DESC("desc_config"); +desc("get_" ++ Type) when Type == "connector"; Type == "bridge_v2" -> + ["Configuration for Confluent using `GET` method."]; +desc("put_" ++ Type) when Type == "connector"; Type == "bridge_v2" -> + ["Configuration for Confluent using `PUT` method."]; +desc("post_" ++ Type) when Type == "connector"; Type == "bridge_v2" -> + ["Configuration for Confluent using `POST` method."]; +desc(Name) -> + lists:member(Name, struct_names()) orelse throw({missing_desc, Name}), + ?DESC(Name). + +struct_names() -> + [ + auth_username_password, + kafka_message, + producer_kafka_opts, + actions, + ssl_client_opts + ]. + +bridge_v2_examples(Method) -> + [ + #{ + ?CONNECTOR_TYPE_BIN => #{ + summary => <<"Confluent Action">>, + value => values({Method, bridge_v2}) + } + } + ]. + +connector_examples(Method) -> + [ + #{ + ?CONNECTOR_TYPE_BIN => #{ + summary => <<"Confluent Connector">>, + value => values({Method, connector}) + } + } + ]. + +values({get, connector}) -> + maps:merge( + #{ + status => <<"connected">>, + node_status => [ + #{ + node => <<"emqx@localhost">>, + status => <<"connected">> + } + ], + actions => [<<"my_action">>] + }, + values({post, connector}) + ); +values({get, ConfluentType}) -> + maps:merge( + #{ + status => <<"connected">>, + node_status => [ + #{ + node => <<"emqx@localhost">>, + status => <<"connected">> + } + ] + }, + values({post, ConfluentType}) + ); +values({post, bridge_v2}) -> + maps:merge( + values(action), + #{ + enable => true, + connector => <<"my_confluent_producer_connector">>, + name => <<"my_confluent_producer_action">>, + type => ?CONNECTOR_TYPE_BIN + } + ); +values({post, connector}) -> + maps:merge( + values(common_config), + #{ + name => <<"my_confluent_producer_connector">>, + type => ?CONNECTOR_TYPE_BIN, + ssl => #{ + enable => true, + server_name_indication => <<"auto">>, + verify => <<"verify_none">>, + versions => [<<"tlsv1.3">>, <<"tlsv1.2">>] + } + } + ); +values({put, connector}) -> + values(common_config); +values({put, bridge_v2}) -> + maps:merge( + values(action), + #{ + enable => true, + connector => <<"my_confluent_producer_connector">> + } + ); +values(common_config) -> + #{ + authentication => #{ + password => <<"******">> + }, + bootstrap_hosts => <<"xyz.sa-east1.gcp.confluent.cloud:9092">>, + connect_timeout => <<"5s">>, + enable => true, + metadata_request_timeout => <<"4s">>, + min_metadata_refresh_interval => <<"3s">>, + socket_opts => #{ + sndbuf => <<"1024KB">>, + recbuf => <<"1024KB">>, + nodelay => true, + tcp_keepalive => <<"none">> + } + }; +values(action) -> + #{ + parameters => #{ + topic => <<"topic">>, + message => #{ + key => <<"${.clientid}">>, + value => <<"${.}">> + }, + max_batch_bytes => <<"896KB">>, + partition_strategy => <<"random">>, + required_acks => <<"all_isr">>, + partition_count_refresh_interval => <<"60s">>, + kafka_headers => <<"${.pub_props}">>, + kafka_ext_headers => [ + #{ + kafka_ext_header_key => <<"clientid">>, + kafka_ext_header_value => <<"${clientid}">> + }, + #{ + kafka_ext_header_key => <<"topic">>, + kafka_ext_header_value => <<"${topic}">> + } + ], + kafka_header_value_encode_mode => none, + max_inflight => 10, + buffer => #{ + mode => <<"hybrid">>, + per_partition_limit => <<"2GB">>, + segment_bytes => <<"100MB">>, + memory_overload_protection => true + } + }, + local_topic => <<"mqtt/local/topic">> + }. + +%%------------------------------------------------------------------------------------------------- +%% `emqx_connector_resource' API +%%------------------------------------------------------------------------------------------------- + +connector_config(Config) -> + %% Default port for Confluent is 9092 + BootstrapHosts0 = maps:get(bootstrap_hosts, Config), + BootstrapHosts = emqx_schema:parse_servers( + BootstrapHosts0, + ?MODULE:host_opts() + ), + Config#{bootstrap_hosts := BootstrapHosts}. + +%%------------------------------------------------------------------------------------------------- +%% Internal fns +%%------------------------------------------------------------------------------------------------- + +ref(Name) -> + hoconsc:ref(?MODULE, Name). + +connector_overrides() -> + #{ + authentication => + mk( + ref(auth_username_password), + #{ + default => #{}, + required => true, + desc => ?DESC("authentication") + } + ), + bootstrap_hosts => + mk( + binary(), + #{ + required => true, + validator => emqx_schema:servers_validator( + host_opts(), _Required = true + ) + } + ), + ssl => mk( + ref(ssl_client_opts), + #{ + required => true, + default => #{<<"enable">> => true} + } + ), + type => mk( + ?CONNECTOR_TYPE, + #{ + required => true, + desc => ?DESC("connector_type") + } + ) + }. + +bridge_v2_overrides() -> + #{ + parameters => + mk(ref(producer_kafka_opts), #{ + required => true, + validator => fun emqx_bridge_kafka:producer_strategy_key_validator/1 + }), + ssl => mk(ref(ssl_client_opts), #{ + default => #{ + <<"enable">> => true, + <<"verify">> => <<"verify_none">> + } + }), + type => mk( + ?CONNECTOR_TYPE, + #{ + required => true, + desc => ?DESC("bridge_v2_type") + } + ) + }. +auth_overrides() -> + #{ + mechanism => + mk(plain, #{ + required => true, + default => plain, + importance => ?IMPORTANCE_HIDDEN + }), + username => mk(binary(), #{required => true}), + password => emqx_connector_schema_lib:password_field(#{required => true}) + }. + +%% Kafka has SSL disabled by default +%% Confluent must use SSL +ssl_overrides() -> + #{ + "enable" => mk(true, #{default => true, importance => ?IMPORTANCE_HIDDEN}), + "verify" => mk(verify_none, #{default => verify_none, importance => ?IMPORTANCE_HIDDEN}) + }. + +kafka_producer_overrides() -> + #{ + message => mk(ref(kafka_message), #{}) + }. + +override_documentations(Fields) -> + lists:map( + fun({Name, Sc}) -> + case hocon_schema:field_schema(Sc, desc) of + ?DESC(emqx_bridge_kafka, Key) -> + %% to please dialyzer... + Override = #{type => hocon_schema:field_schema(Sc, type), desc => ?DESC(Key)}, + {Name, hocon_schema:override(Sc, Override)}; + _ -> + {Name, Sc} + end + end, + Fields + ). + +override(Fields, Overrides) -> + lists:map( + fun({Name, Sc}) -> + case maps:find(Name, Overrides) of + {ok, Override} -> + {Name, hocon_schema:override(Sc, Override)}; + error -> + {Name, Sc} + end + end, + Fields + ). + +host_opts() -> + #{default_port => 9092}. diff --git a/apps/emqx_bridge_confluent/src/emqx_bridge_confluent_producer_action_info.erl b/apps/emqx_bridge_confluent/src/emqx_bridge_confluent_producer_action_info.erl new file mode 100644 index 000000000..f19920075 --- /dev/null +++ b/apps/emqx_bridge_confluent/src/emqx_bridge_confluent_producer_action_info.erl @@ -0,0 +1,19 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_confluent_producer_action_info). + +-behaviour(emqx_action_info). + +-export([ + action_type_name/0, + connector_type_name/0, + schema_module/0 +]). + +action_type_name() -> confluent_producer. + +connector_type_name() -> confluent_producer. + +schema_module() -> emqx_bridge_confluent_producer. diff --git a/apps/emqx_bridge_confluent/test/emqx_bridge_confluent_producer_SUITE.erl b/apps/emqx_bridge_confluent/test/emqx_bridge_confluent_producer_SUITE.erl new file mode 100644 index 000000000..2977f72cf --- /dev/null +++ b/apps/emqx_bridge_confluent/test/emqx_bridge_confluent_producer_SUITE.erl @@ -0,0 +1,343 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_confluent_producer_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-define(BRIDGE_TYPE, confluent_producer). +-define(BRIDGE_TYPE_BIN, <<"confluent_producer">>). +-define(CONNECTOR_TYPE, confluent_producer). +-define(CONNECTOR_TYPE_BIN, <<"confluent_producer">>). +-define(KAFKA_BRIDGE_TYPE, kafka_producer). + +-import(emqx_common_test_helpers, [on_exit/1]). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + KafkaHost = os:getenv("KAFKA_SASL_SSL_HOST", "toxiproxy.emqx.net"), + KafkaPort = list_to_integer(os:getenv("KAFKA_SASL_SSL_PORT", "9295")), + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + ProxyName = "kafka_sasl_ssl", + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + case emqx_common_test_helpers:is_tcp_server_available(KafkaHost, KafkaPort) of + true -> + Apps = emqx_cth_suite:start( + [ + emqx_conf, + emqx, + emqx_management, + emqx_resource, + emqx_bridge_confluent, + emqx_bridge, + emqx_rule_engine, + {emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"} + ], + #{work_dir => ?config(priv_dir, Config)} + ), + {ok, Api} = emqx_common_test_http:create_default_app(), + [ + {tc_apps, Apps}, + {api, Api}, + {proxy_name, ProxyName}, + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort}, + {kafka_host, KafkaHost}, + {kafka_port, KafkaPort} + | Config + ]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_kafka); + _ -> + {skip, no_kafka} + end + end. + +end_per_suite(Config) -> + Apps = ?config(tc_apps, Config), + emqx_cth_suite:stop(Apps), + ok. + +init_per_testcase(TestCase, Config) -> + common_init_per_testcase(TestCase, Config). + +common_init_per_testcase(TestCase, Config) -> + ct:timetrap(timer:seconds(60)), + emqx_bridge_v2_testlib:delete_all_bridges_and_connectors(), + emqx_config:delete_override_conf_files(), + UniqueNum = integer_to_binary(erlang:unique_integer()), + Name = iolist_to_binary([atom_to_binary(TestCase), UniqueNum]), + KafkaHost = ?config(kafka_host, Config), + KafkaPort = ?config(kafka_port, Config), + KafkaTopic = Name, + ConnectorConfig = connector_config(Name, KafkaHost, KafkaPort), + {BridgeConfig, ExtraConfig} = bridge_config(Name, Name, KafkaTopic), + ensure_topic(Config, KafkaTopic, _Opts = #{}), + ok = snabbkaffe:start_trace(), + ExtraConfig ++ + [ + {connector_type, ?CONNECTOR_TYPE}, + {connector_name, Name}, + {connector_config, ConnectorConfig}, + {bridge_type, ?BRIDGE_TYPE}, + {bridge_name, Name}, + {bridge_config, BridgeConfig} + | Config + ]. + +end_per_testcase(_Testcase, Config) -> + case proplists:get_bool(skip_does_not_apply, Config) of + true -> + ok; + false -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + emqx_bridge_v2_testlib:delete_all_bridges_and_connectors(), + emqx_common_test_helpers:call_janitor(60_000), + ok = snabbkaffe:stop(), + ok + end. + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +connector_config(Name, KafkaHost, KafkaPort) -> + InnerConfigMap0 = + #{ + <<"enable">> => true, + <<"bootstrap_hosts">> => iolist_to_binary([KafkaHost, ":", integer_to_binary(KafkaPort)]), + <<"authentication">> => + #{ + <<"mechanism">> => <<"plain">>, + <<"username">> => <<"emqxuser">>, + <<"password">> => <<"password">> + }, + <<"connect_timeout">> => <<"5s">>, + <<"socket_opts">> => + #{ + <<"nodelay">> => true, + <<"recbuf">> => <<"1024KB">>, + <<"sndbuf">> => <<"1024KB">>, + <<"tcp_keepalive">> => <<"none">> + }, + <<"ssl">> => + #{ + <<"cacertfile">> => shared_secret(client_cacertfile), + <<"certfile">> => shared_secret(client_certfile), + <<"keyfile">> => shared_secret(client_keyfile), + <<"ciphers">> => [], + <<"depth">> => 10, + <<"enable">> => true, + <<"hibernate_after">> => <<"5s">>, + <<"log_level">> => <<"notice">>, + <<"reuse_sessions">> => true, + <<"secure_renegotiate">> => true, + <<"server_name_indication">> => <<"disable">>, + %% currently, it seems our CI kafka certs fail peer verification + <<"verify">> => <<"verify_none">>, + <<"versions">> => [<<"tlsv1.3">>, <<"tlsv1.2">>] + } + }, + InnerConfigMap = serde_roundtrip(InnerConfigMap0), + parse_and_check_connector_config(InnerConfigMap, Name). + +parse_and_check_connector_config(InnerConfigMap, Name) -> + TypeBin = ?CONNECTOR_TYPE_BIN, + RawConf = #{<<"connectors">> => #{TypeBin => #{Name => InnerConfigMap}}}, + #{<<"connectors">> := #{TypeBin := #{Name := Config}}} = + hocon_tconf:check_plain(emqx_connector_schema, RawConf, #{ + required => false, atom_key => false + }), + ct:pal("parsed config: ~p", [Config]), + InnerConfigMap. + +bridge_config(Name, ConnectorId, KafkaTopic) -> + InnerConfigMap0 = + #{ + <<"enable">> => true, + <<"connector">> => ConnectorId, + <<"parameters">> => + #{ + <<"buffer">> => + #{ + <<"memory_overload_protection">> => true, + <<"mode">> => <<"memory">>, + <<"per_partition_limit">> => <<"2GB">>, + <<"segment_bytes">> => <<"100MB">> + }, + <<"compression">> => <<"no_compression">>, + <<"kafka_header_value_encode_mode">> => <<"none">>, + <<"max_batch_bytes">> => <<"896KB">>, + <<"max_inflight">> => <<"10">>, + <<"message">> => + #{ + <<"key">> => <<"${.clientid}">>, + <<"value">> => <<"${.}">> + }, + <<"partition_count_refresh_interval">> => <<"60s">>, + <<"partition_strategy">> => <<"random">>, + <<"query_mode">> => <<"async">>, + <<"required_acks">> => <<"all_isr">>, + <<"sync_query_timeout">> => <<"5s">>, + <<"topic">> => KafkaTopic + }, + <<"local_topic">> => <<"t/confluent">> + %%, + }, + InnerConfigMap = serde_roundtrip(InnerConfigMap0), + ExtraConfig = + [{kafka_topic, KafkaTopic}], + {parse_and_check_bridge_config(InnerConfigMap, Name), ExtraConfig}. + +%% check it serializes correctly +serde_roundtrip(InnerConfigMap0) -> + IOList = hocon_pp:do(InnerConfigMap0, #{}), + {ok, InnerConfigMap} = hocon:binary(IOList), + InnerConfigMap. + +parse_and_check_bridge_config(InnerConfigMap, Name) -> + TypeBin = ?BRIDGE_TYPE_BIN, + RawConf = #{<<"bridges">> => #{TypeBin => #{Name => InnerConfigMap}}}, + hocon_tconf:check_plain(emqx_bridge_v2_schema, RawConf, #{required => false, atom_key => false}), + InnerConfigMap. + +shared_secret_path() -> + os:getenv("CI_SHARED_SECRET_PATH", "/var/lib/secret"). + +shared_secret(client_keyfile) -> + filename:join([shared_secret_path(), "client.key"]); +shared_secret(client_certfile) -> + filename:join([shared_secret_path(), "client.crt"]); +shared_secret(client_cacertfile) -> + filename:join([shared_secret_path(), "ca.crt"]); +shared_secret(rig_keytab) -> + filename:join([shared_secret_path(), "rig.keytab"]). + +ensure_topic(Config, KafkaTopic, Opts) -> + KafkaHost = ?config(kafka_host, Config), + KafkaPort = ?config(kafka_port, Config), + NumPartitions = maps:get(num_partitions, Opts, 3), + Endpoints = [{KafkaHost, KafkaPort}], + TopicConfigs = [ + #{ + name => KafkaTopic, + num_partitions => NumPartitions, + replication_factor => 1, + assignments => [], + configs => [] + } + ], + RequestConfig = #{timeout => 5_000}, + ConnConfig = + #{ + ssl => emqx_tls_lib:to_client_opts( + #{ + keyfile => shared_secret(client_keyfile), + certfile => shared_secret(client_certfile), + cacertfile => shared_secret(client_cacertfile), + verify => verify_none, + enable => true + } + ), + sasl => {plain, <<"emqxuser">>, <<"password">>} + }, + case brod:create_topics(Endpoints, TopicConfigs, RequestConfig, ConnConfig) of + ok -> ok; + {error, topic_already_exists} -> ok + end. + +make_message() -> + Time = erlang:unique_integer(), + BinTime = integer_to_binary(Time), + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + #{ + clientid => BinTime, + payload => Payload, + timestamp => Time + }. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_start_stop(Config) -> + emqx_bridge_v2_testlib:t_start_stop(Config, kafka_producer_stopped), + ok. + +t_create_via_http(Config) -> + emqx_bridge_v2_testlib:t_create_via_http(Config), + ok. + +t_on_get_status(Config) -> + emqx_bridge_v2_testlib:t_on_get_status(Config, #{failure_status => connecting}), + ok. + +t_sync_query(Config) -> + ok = emqx_bridge_v2_testlib:t_sync_query( + Config, + fun make_message/0, + fun(Res) -> ?assertEqual(ok, Res) end, + emqx_bridge_kafka_impl_producer_sync_query + ), + ok. + +t_same_name_confluent_kafka_bridges(Config) -> + BridgeName = ?config(bridge_name, Config), + TracePoint = emqx_bridge_kafka_impl_producer_sync_query, + %% creates the AEH bridge and check it's working + ok = emqx_bridge_v2_testlib:t_sync_query( + Config, + fun make_message/0, + fun(Res) -> ?assertEqual(ok, Res) end, + TracePoint + ), + + %% then create a Kafka bridge with same name and delete it after creation + ConfigKafka0 = lists:keyreplace(bridge_type, 1, Config, {bridge_type, ?KAFKA_BRIDGE_TYPE}), + ConfigKafka = lists:keyreplace( + connector_type, 1, ConfigKafka0, {connector_type, ?KAFKA_BRIDGE_TYPE} + ), + ok = emqx_bridge_v2_testlib:t_create_via_http(ConfigKafka), + + AehResourceId = emqx_bridge_v2_testlib:resource_id(Config), + KafkaResourceId = emqx_bridge_v2_testlib:resource_id(ConfigKafka), + %% check that both bridges are healthy + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(AehResourceId)), + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(KafkaResourceId)), + ?assertMatch( + {{ok, _}, {ok, _}}, + ?wait_async_action( + emqx_connector:disable_enable(disable, ?KAFKA_BRIDGE_TYPE, BridgeName), + #{?snk_kind := kafka_producer_stopped}, + 5_000 + ) + ), + % check that AEH bridge is still working + ?check_trace( + begin + BridgeId = emqx_bridge_v2_testlib:bridge_id(Config), + Message = {BridgeId, make_message()}, + ?assertEqual(ok, emqx_resource:simple_sync_query(AehResourceId, Message)), + ok + end, + fun(Trace) -> + ?assertMatch([#{instance_id := AehResourceId}], ?of_kind(TracePoint, Trace)) + end + ), + ok. diff --git a/apps/emqx_bridge_confluent/test/emqx_bridge_confluent_tests.erl b/apps/emqx_bridge_confluent/test/emqx_bridge_confluent_tests.erl new file mode 100644 index 000000000..a7efebf89 --- /dev/null +++ b/apps/emqx_bridge_confluent/test/emqx_bridge_confluent_tests.erl @@ -0,0 +1,179 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_confluent_tests). + +-include_lib("eunit/include/eunit.hrl"). + +%%=========================================================================== +%% Data Section +%%=========================================================================== + +%% erlfmt-ignore +confluent_producer_action_hocon() -> +" +actions.confluent_producer.my_producer { + enable = true + connector = my_connector + parameters { + buffer { + memory_overload_protection = false + mode = memory + per_partition_limit = 2GB + segment_bytes = 100MB + } + compression = no_compression + kafka_header_value_encode_mode = none + max_batch_bytes = 896KB + max_inflight = 10 + message { + key = \"${.clientid}\" + value = \"${.}\" + } + partition_count_refresh_interval = 60s + partition_strategy = random + query_mode = async + required_acks = all_isr + sync_query_timeout = 5s + topic = test + } + local_topic = \"t/confluent\" +} +". + +confluent_producer_connector_hocon() -> + "" + "\n" + "connectors.confluent_producer.my_producer {\n" + " enable = true\n" + " authentication {\n" + " username = \"user\"\n" + " password = \"xxx\"\n" + " }\n" + " bootstrap_hosts = \"xyz.sa-east1.gcp.confluent.cloud:9092\"\n" + " connect_timeout = 5s\n" + " metadata_request_timeout = 5s\n" + " min_metadata_refresh_interval = 3s\n" + " socket_opts {\n" + " recbuf = 1024KB\n" + " sndbuf = 1024KB\n" + " tcp_keepalive = none\n" + " }\n" + "}\n" + "". + +%%=========================================================================== +%% Helper functions +%%=========================================================================== + +parse(Hocon) -> + {ok, Conf} = hocon:binary(Hocon), + Conf. + +check(SchemaMod, Conf) when is_map(Conf) -> + hocon_tconf:check_plain(SchemaMod, Conf). + +check_action(Conf) when is_map(Conf) -> + check(emqx_bridge_v2_schema, Conf). + +check_connector(Conf) when is_map(Conf) -> + check(emqx_connector_schema, Conf). + +-define(validation_error(SchemaMod, Reason, Value), + {SchemaMod, [ + #{ + kind := validation_error, + reason := Reason, + value := Value + } + ]} +). +-define(action_validation_error(Reason, Value), + ?validation_error(emqx_bridge_v2_schema, Reason, Value) +). +-define(connector_validation_error(Reason, Value), + ?validation_error(emqx_connector_schema, Reason, Value) +). + +-define(ok_config(RootKey, Cfg), #{ + RootKey := + #{ + <<"confluent_producer">> := + #{ + <<"my_producer">> := + Cfg + } + } +}). +-define(ok_connector_config(Cfg), ?ok_config(<<"connectors">>, Cfg)). +-define(ok_action_config(Cfg), ?ok_config(<<"actions">>, Cfg)). + +%%=========================================================================== +%% Test cases +%%=========================================================================== + +confluent_producer_connector_test_() -> + %% ensure this module is loaded when testing only this file + _ = emqx_bridge_enterprise:module_info(), + BaseConf = parse(confluent_producer_connector_hocon()), + Override = fun(Cfg) -> + emqx_utils_maps:deep_merge( + BaseConf, + #{ + <<"connectors">> => + #{ + <<"confluent_producer">> => + #{<<"my_producer">> => Cfg} + } + } + ) + end, + [ + {"base config", + ?_assertMatch( + ?ok_connector_config( + #{ + <<"authentication">> := #{ + <<"mechanism">> := plain + }, + <<"ssl">> := #{ + <<"enable">> := true, + <<"verify">> := verify_none + } + } + ), + check_connector(BaseConf) + )}, + {"ssl disabled", + ?_assertThrow( + ?connector_validation_error(#{expected := "true"}, "false"), + check_connector(Override(#{<<"ssl">> => #{<<"enable">> => <<"false">>}})) + )}, + {"bad authn mechanism: scram sha256", + ?_assertThrow( + ?connector_validation_error(#{expected := "plain"}, "scram_sha_256"), + check_connector( + Override(#{<<"authentication">> => #{<<"mechanism">> => <<"scram_sha_256">>}}) + ) + )}, + {"bad authn mechanism: scram sha512", + ?_assertThrow( + ?connector_validation_error(#{expected := "plain"}, "scram_sha_512"), + check_connector( + Override(#{<<"authentication">> => #{<<"mechanism">> => <<"scram_sha_512">>}}) + ) + )} + ]. + +confluent_producer_action_test_() -> + %% ensure this module is loaded when testing only this file + _ = emqx_bridge_enterprise:module_info(), + BaseConf = parse(confluent_producer_action_hocon()), + [ + {"base config", + ?_assertMatch( + ?ok_action_config(_), + check_action(BaseConf) + )} + ]. diff --git a/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo.app.src b/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo.app.src index ed5078432..a4b372056 100644 --- a/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo.app.src +++ b/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_dynamo, [ {description, "EMQX Enterprise Dynamo Bridge"}, - {vsn, "0.1.3"}, + {vsn, "0.1.4"}, {registered, []}, {applications, [ kernel, diff --git a/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo_connector.erl b/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo_connector.erl index 0d62845fd..9cdb8886c 100644 --- a/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo_connector.erl +++ b/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo_connector.erl @@ -45,12 +45,10 @@ fields(config) -> #{required => true, desc => ?DESC("aws_access_key_id")} )}, {aws_secret_access_key, - mk( - binary(), + emqx_schema_secret:mk( #{ required => true, - desc => ?DESC("aws_secret_access_key"), - sensitive => true + desc => ?DESC("aws_secret_access_key") } )}, {pool_size, fun emqx_connector_schema_lib:pool_size/1}, @@ -89,7 +87,7 @@ on_start( host => Host, port => Port, aws_access_key_id => to_str(AccessKeyID), - aws_secret_access_key => to_str(SecretAccessKey), + aws_secret_access_key => SecretAccessKey, schema => Schema }}, {pool_size, PoolSize} @@ -182,9 +180,8 @@ do_query( end. connect(Opts) -> - Options = proplists:get_value(config, Opts), - {ok, _Pid} = Result = emqx_bridge_dynamo_connector_client:start_link(Options), - Result. + Config = proplists:get_value(config, Opts), + {ok, _Pid} = emqx_bridge_dynamo_connector_client:start_link(Config). parse_template(Config) -> Templates = diff --git a/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo_connector_client.erl b/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo_connector_client.erl index 1b379298f..1cb326cf7 100644 --- a/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo_connector_client.erl +++ b/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo_connector_client.erl @@ -20,8 +20,7 @@ handle_cast/2, handle_info/2, terminate/2, - code_change/3, - format_status/2 + code_change/3 ]). -ifdef(TEST). @@ -62,11 +61,13 @@ start_link(Options) -> %% Initialize dynamodb data bridge init(#{ aws_access_key_id := AccessKeyID, - aws_secret_access_key := SecretAccessKey, + aws_secret_access_key := Secret, host := Host, port := Port, schema := Schema }) -> + %% TODO: teach `erlcloud` to to accept 0-arity closures as passwords. + SecretAccessKey = to_str(emqx_secret:unwrap(Secret)), erlcloud_ddb2:configure(AccessKeyID, SecretAccessKey, Host, Port, Schema), {ok, #{}}. @@ -101,13 +102,6 @@ terminate(_Reason, _State) -> code_change(_OldVsn, State, _Extra) -> {ok, State}. --spec format_status( - Opt :: normal | terminate, - Status :: list() -) -> Status :: term(). -format_status(_Opt, Status) -> - Status. - %%%=================================================================== %%% Internal functions %%%=================================================================== @@ -184,3 +178,8 @@ convert2binary(Value) when is_list(Value) -> unicode:characters_to_binary(Value); convert2binary(Value) when is_map(Value) -> emqx_utils_json:encode(Value). + +to_str(List) when is_list(List) -> + List; +to_str(Bin) when is_binary(Bin) -> + erlang:binary_to_list(Bin). diff --git a/apps/emqx_bridge_dynamo/test/emqx_bridge_dynamo_SUITE.erl b/apps/emqx_bridge_dynamo/test/emqx_bridge_dynamo_SUITE.erl index 9490e6455..936d2d506 100644 --- a/apps/emqx_bridge_dynamo/test/emqx_bridge_dynamo_SUITE.erl +++ b/apps/emqx_bridge_dynamo/test/emqx_bridge_dynamo_SUITE.erl @@ -22,8 +22,6 @@ -define(BATCH_SIZE, 10). -define(PAYLOAD, <<"HELLO">>). --define(GET_CONFIG(KEY__, CFG__), proplists:get_value(KEY__, CFG__)). - %% How to run it locally (all commands are run in $PROJ_ROOT dir): %% run ct in docker container %% run script: @@ -84,7 +82,9 @@ end_per_group(_Group, _Config) -> ok. init_per_suite(Config) -> - Config. + SecretFile = filename:join(?config(priv_dir, Config), "secret"), + ok = file:write_file(SecretFile, <>), + [{dynamo_secretfile, SecretFile} | Config]. end_per_suite(_Config) -> emqx_mgmt_api_test_util:end_suite(), @@ -158,32 +158,35 @@ common_init(ConfigT) -> end. dynamo_config(BridgeType, Config) -> - Port = integer_to_list(?GET_CONFIG(port, Config)), - Url = "http://" ++ ?GET_CONFIG(host, Config) ++ ":" ++ Port, + Host = ?config(host, Config), + Port = ?config(port, Config), Name = atom_to_binary(?MODULE), - BatchSize = ?GET_CONFIG(batch_size, Config), - QueryMode = ?GET_CONFIG(query_mode, Config), + BatchSize = ?config(batch_size, Config), + QueryMode = ?config(query_mode, Config), + SecretFile = ?config(dynamo_secretfile, Config), ConfigString = io_lib:format( - "bridges.~s.~s {\n" - " enable = true\n" - " url = ~p\n" - " table = ~p\n" - " aws_access_key_id = ~p\n" - " aws_secret_access_key = ~p\n" - " resource_opts = {\n" - " request_ttl = 500ms\n" - " batch_size = ~b\n" - " query_mode = ~s\n" - " }\n" - "}", + "bridges.~s.~s {" + "\n enable = true" + "\n url = \"http://~s:~p\"" + "\n table = ~p" + "\n aws_access_key_id = ~p" + "\n aws_secret_access_key = ~p" + "\n resource_opts = {" + "\n request_ttl = 500ms" + "\n batch_size = ~b" + "\n query_mode = ~s" + "\n }" + "\n }", [ BridgeType, Name, - Url, + Host, + Port, ?TABLE, ?ACCESS_KEY_ID, - ?SECRET_ACCESS_KEY, + %% NOTE: using file-based secrets with HOCON configs + "file://" ++ SecretFile, BatchSize, QueryMode ] @@ -252,8 +255,8 @@ delete_table(_Config) -> erlcloud_ddb2:delete_table(?TABLE_BIN). setup_dynamo(Config) -> - Host = ?GET_CONFIG(host, Config), - Port = ?GET_CONFIG(port, Config), + Host = ?config(host, Config), + Port = ?config(port, Config), erlcloud_ddb2:configure(?ACCESS_KEY_ID, ?SECRET_ACCESS_KEY, Host, Port, ?SCHEMA). directly_setup_dynamo() -> @@ -313,7 +316,9 @@ t_setup_via_http_api_and_publish(Config) -> PgsqlConfig0 = ?config(dynamo_config, Config), PgsqlConfig = PgsqlConfig0#{ <<"name">> => Name, - <<"type">> => BridgeType + <<"type">> => BridgeType, + %% NOTE: using literal secret with HTTP API requests. + <<"aws_secret_access_key">> => <> }, ?assertMatch( {ok, _}, @@ -400,7 +405,7 @@ t_simple_query(Config) -> ), Request = {get_item, {<<"id">>, <<"not_exists">>}}, Result = query_resource(Config, Request), - case ?GET_CONFIG(batch_size, Config) of + case ?config(batch_size, Config) of ?BATCH_SIZE -> ?assertMatch({error, {unrecoverable_error, {invalid_request, _}}}, Result); 1 -> diff --git a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.app.src b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.app.src index d4c16e13c..59a02c190 100644 --- a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.app.src +++ b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_gcp_pubsub, [ {description, "EMQX Enterprise GCP Pub/Sub Bridge"}, - {vsn, "0.1.10"}, + {vsn, "0.1.11"}, {registered, []}, {applications, [ kernel, @@ -8,7 +8,7 @@ emqx_resource, ehttpc ]}, - {env, []}, + {env, [{emqx_action_info_modules, [emqx_bridge_gcp_pubsub_producer_action_info]}]}, {modules, []}, {links, []} ]}. diff --git a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_client.erl b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_client.erl index eeceb0c43..454c0d7ea 100644 --- a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_client.erl +++ b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_client.erl @@ -134,7 +134,7 @@ start( -spec stop(resource_id()) -> ok | {error, term()}. stop(ResourceId) -> - ?tp(gcp_pubsub_stop, #{resource_id => ResourceId}), + ?tp(gcp_pubsub_stop, #{instance_id => ResourceId, resource_id => ResourceId}), ?SLOG(info, #{ msg => "stopping_gcp_pubsub_bridge", connector => ResourceId diff --git a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_impl_producer.erl b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_impl_producer.erl index cd7568001..487118b3e 100644 --- a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_impl_producer.erl +++ b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_impl_producer.erl @@ -8,23 +8,30 @@ -include_lib("emqx_resource/include/emqx_resource.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). --type config() :: #{ - attributes_template := [#{key := binary(), value := binary()}], +-type connector_config() :: #{ connect_timeout := emqx_schema:duration_ms(), max_retries := non_neg_integer(), - ordering_key_template := binary(), - payload_template := binary(), - pubsub_topic := binary(), resource_opts := #{request_ttl := infinity | emqx_schema:duration_ms(), any() => term()}, - service_account_json := emqx_bridge_gcp_pubsub_client:service_account_json(), - any() => term() + service_account_json := emqx_bridge_gcp_pubsub_client:service_account_json() }. --type state() :: #{ - attributes_template := #{emqx_placeholder:tmpl_token() => emqx_placeholder:tmpl_token()}, +-type action_config() :: #{ + parameters := #{ + attributes_template := [#{key := binary(), value := binary()}], + ordering_key_template := binary(), + payload_template := binary(), + pubsub_topic := binary() + }, + resource_opts := #{request_ttl := infinity | emqx_schema:duration_ms(), any() => term()} +}. +-type connector_state() :: #{ client := emqx_bridge_gcp_pubsub_client:state(), + installed_actions := #{action_resource_id() => action_state()}, + project_id := emqx_bridge_gcp_pubsub_client:project_id() +}. +-type action_state() :: #{ + attributes_template := #{emqx_placeholder:tmpl_token() => emqx_placeholder:tmpl_token()}, ordering_key_template := emqx_placeholder:tmpl_token(), payload_template := emqx_placeholder:tmpl_token(), - project_id := emqx_bridge_gcp_pubsub_client:project_id(), pubsub_topic := binary() }. -type headers() :: emqx_bridge_gcp_pubsub_client:headers(). @@ -41,7 +48,11 @@ on_query_async/4, on_batch_query/3, on_batch_query_async/4, - on_get_status/2 + on_get_status/2, + on_add_channel/4, + on_remove_channel/3, + on_get_channels/1, + on_get_channel_status/3 ]). -export([reply_delegator/2]). @@ -54,53 +65,45 @@ callback_mode() -> async_if_possible. query_mode(_Config) -> async. --spec on_start(resource_id(), config()) -> {ok, state()} | {error, term()}. +-spec on_start(connector_resource_id(), connector_config()) -> + {ok, connector_state()} | {error, term()}. on_start(InstanceId, Config0) -> ?SLOG(info, #{ msg => "starting_gcp_pubsub_bridge", config => Config0 }), Config = maps:update_with(service_account_json, fun emqx_utils_maps:binary_key_map/1, Config0), - #{ - attributes_template := AttributesTemplate, - ordering_key_template := OrderingKeyTemplate, - payload_template := PayloadTemplate, - pubsub_topic := PubSubTopic, - service_account_json := #{<<"project_id">> := ProjectId} - } = Config, + #{service_account_json := #{<<"project_id">> := ProjectId}} = Config, case emqx_bridge_gcp_pubsub_client:start(InstanceId, Config) of {ok, Client} -> State = #{ client => Client, - attributes_template => preproc_attributes(AttributesTemplate), - ordering_key_template => emqx_placeholder:preproc_tmpl(OrderingKeyTemplate), - payload_template => emqx_placeholder:preproc_tmpl(PayloadTemplate), - project_id => ProjectId, - pubsub_topic => PubSubTopic + installed_actions => #{}, + project_id => ProjectId }, {ok, State}; Error -> Error end. --spec on_stop(resource_id(), state()) -> ok | {error, term()}. +-spec on_stop(connector_resource_id(), connector_state()) -> ok | {error, term()}. on_stop(InstanceId, _State) -> emqx_bridge_gcp_pubsub_client:stop(InstanceId). --spec on_get_status(resource_id(), state()) -> connected | disconnected. +-spec on_get_status(connector_resource_id(), connector_state()) -> connected | disconnected. on_get_status(_InstanceId, #{client := Client} = _State) -> emqx_bridge_gcp_pubsub_client:get_status(Client). -spec on_query( - resource_id(), - {send_message, map()}, - state() + connector_resource_id(), + {message_tag(), map()}, + connector_state() ) -> {ok, map()} | {error, {recoverable_error, term()}} | {error, term()}. -on_query(ResourceId, {send_message, Selected}, State) -> - Requests = [{send_message, Selected}], +on_query(ResourceId, {MessageTag, Selected}, State) -> + Requests = [{MessageTag, Selected}], ?TRACE( "QUERY_SYNC", "gcp_pubsub_received", @@ -109,24 +112,25 @@ on_query(ResourceId, {send_message, Selected}, State) -> do_send_requests_sync(State, Requests, ResourceId). -spec on_query_async( - resource_id(), - {send_message, map()}, + connector_resource_id(), + {message_tag(), map()}, {ReplyFun :: function(), Args :: list()}, - state() + connector_state() ) -> {ok, pid()} | {error, no_pool_worker_available}. -on_query_async(ResourceId, {send_message, Selected}, ReplyFunAndArgs, State) -> - Requests = [{send_message, Selected}], +on_query_async(ResourceId, {MessageTag, Selected}, ReplyFunAndArgs, State) -> + Requests = [{MessageTag, Selected}], ?TRACE( "QUERY_ASYNC", "gcp_pubsub_received", #{requests => Requests, connector => ResourceId, state => State} ), + ?tp(gcp_pubsub_producer_async, #{instance_id => ResourceId, requests => Requests}), do_send_requests_async(State, Requests, ReplyFunAndArgs). -spec on_batch_query( - resource_id(), - [{send_message, map()}], - state() + connector_resource_id(), + [{message_tag(), map()}], + connector_state() ) -> {ok, map()} | {error, {recoverable_error, term()}} @@ -140,10 +144,10 @@ on_batch_query(ResourceId, Requests, State) -> do_send_requests_sync(State, Requests, ResourceId). -spec on_batch_query_async( - resource_id(), - [{send_message, map()}], + connector_resource_id(), + [{message_tag(), map()}], {ReplyFun :: function(), Args :: list()}, - state() + connector_state() ) -> {ok, pid()} | {error, no_pool_worker_available}. on_batch_query_async(ResourceId, Requests, ReplyFunAndArgs, State) -> ?TRACE( @@ -151,32 +155,92 @@ on_batch_query_async(ResourceId, Requests, ReplyFunAndArgs, State) -> "gcp_pubsub_received", #{requests => Requests, connector => ResourceId, state => State} ), + ?tp(gcp_pubsub_producer_async, #{instance_id => ResourceId, requests => Requests}), do_send_requests_async(State, Requests, ReplyFunAndArgs). +-spec on_add_channel( + connector_resource_id(), + connector_state(), + action_resource_id(), + action_config() +) -> + {ok, connector_state()}. +on_add_channel(_ConnectorResId, ConnectorState0, ActionId, ActionConfig) -> + #{installed_actions := InstalledActions0} = ConnectorState0, + ChannelState = install_channel(ActionConfig), + InstalledActions = InstalledActions0#{ActionId => ChannelState}, + ConnectorState = ConnectorState0#{installed_actions := InstalledActions}, + {ok, ConnectorState}. + +-spec on_remove_channel( + connector_resource_id(), + connector_state(), + action_resource_id() +) -> + {ok, connector_state()}. +on_remove_channel(_ConnectorResId, ConnectorState0, ActionId) -> + #{installed_actions := InstalledActions0} = ConnectorState0, + InstalledActions = maps:remove(ActionId, InstalledActions0), + ConnectorState = ConnectorState0#{installed_actions := InstalledActions}, + {ok, ConnectorState}. + +-spec on_get_channels(connector_resource_id()) -> + [{action_resource_id(), action_config()}]. +on_get_channels(ConnectorResId) -> + emqx_bridge_v2:get_channels_for_connector(ConnectorResId). + +-spec on_get_channel_status(connector_resource_id(), action_resource_id(), connector_state()) -> + health_check_status(). +on_get_channel_status(_ConnectorResId, _ChannelId, _ConnectorState) -> + %% Should we check the underlying client? Same as on_get_status? + ?status_connected. + %%------------------------------------------------------------------------------------------------- %% Helper fns %%------------------------------------------------------------------------------------------------- +%% TODO: check if topic exists ("unhealthy target") +install_channel(ActionConfig) -> + #{ + parameters := #{ + attributes_template := AttributesTemplate, + ordering_key_template := OrderingKeyTemplate, + payload_template := PayloadTemplate, + pubsub_topic := PubSubTopic + } + } = ActionConfig, + #{ + attributes_template => preproc_attributes(AttributesTemplate), + ordering_key_template => emqx_placeholder:preproc_tmpl(OrderingKeyTemplate), + payload_template => emqx_placeholder:preproc_tmpl(PayloadTemplate), + pubsub_topic => PubSubTopic + }. + -spec do_send_requests_sync( - state(), - [{send_message, map()}], + connector_state(), + [{message_tag(), map()}], resource_id() ) -> {ok, status_code(), headers()} | {ok, status_code(), headers(), body()} | {error, {recoverable_error, term()}} | {error, term()}. -do_send_requests_sync(State, Requests, InstanceId) -> - #{client := Client} = State, +do_send_requests_sync(ConnectorState, Requests, InstanceId) -> + ?tp(gcp_pubsub_producer_sync, #{instance_id => InstanceId, requests => Requests}), + #{client := Client} = ConnectorState, + %% is it safe to assume the tag is the same??? And not empty??? + [{MessageTag, _} | _] = Requests, + #{installed_actions := InstalledActions} = ConnectorState, + ChannelState = maps:get(MessageTag, InstalledActions), Payloads = lists:map( - fun({send_message, Selected}) -> - encode_payload(State, Selected) + fun({_MessageTag, Selected}) -> + encode_payload(ChannelState, Selected) end, Requests ), Body = to_pubsub_request(Payloads), - Path = publish_path(State), + Path = publish_path(ConnectorState, ChannelState), Method = post, Request = {prepared_request, {Method, Path, Body}}, Result = emqx_bridge_gcp_pubsub_client:query_sync(Request, Client), @@ -184,21 +248,25 @@ do_send_requests_sync(State, Requests, InstanceId) -> handle_result(Result, Request, QueryMode, InstanceId). -spec do_send_requests_async( - state(), - [{send_message, map()}], + connector_state(), + [{message_tag(), map()}], {ReplyFun :: function(), Args :: list()} ) -> {ok, pid()} | {error, no_pool_worker_available}. -do_send_requests_async(State, Requests, ReplyFunAndArgs0) -> - #{client := Client} = State, +do_send_requests_async(ConnectorState, Requests, ReplyFunAndArgs0) -> + #{client := Client} = ConnectorState, + %% is it safe to assume the tag is the same??? And not empty??? + [{MessageTag, _} | _] = Requests, + #{installed_actions := InstalledActions} = ConnectorState, + ChannelState = maps:get(MessageTag, InstalledActions), Payloads = lists:map( - fun({send_message, Selected}) -> - encode_payload(State, Selected) + fun({_MessageTag, Selected}) -> + encode_payload(ChannelState, Selected) end, Requests ), Body = to_pubsub_request(Payloads), - Path = publish_path(State), + Path = publish_path(ConnectorState, ChannelState), Method = post, Request = {prepared_request, {Method, Path, Body}}, ReplyFunAndArgs = {fun ?MODULE:reply_delegator/2, [ReplyFunAndArgs0]}, @@ -206,18 +274,18 @@ do_send_requests_async(State, Requests, ReplyFunAndArgs0) -> Request, ReplyFunAndArgs, Client ). --spec encode_payload(state(), Selected :: map()) -> +-spec encode_payload(action_state(), Selected :: map()) -> #{ data := binary(), attributes => #{binary() => binary()}, 'orderingKey' => binary() }. -encode_payload(State, Selected) -> +encode_payload(ActionState, Selected) -> #{ attributes_template := AttributesTemplate, ordering_key_template := OrderingKeyTemplate, payload_template := PayloadTemplate - } = State, + } = ActionState, Data = render_payload(PayloadTemplate, Selected), OrderingKey = render_key(OrderingKeyTemplate, Selected), Attributes = proc_attributes(AttributesTemplate, Selected), @@ -307,13 +375,8 @@ proc_attributes(AttributesTemplate, Selected) -> to_pubsub_request(Payloads) -> emqx_utils_json:encode(#{messages => Payloads}). --spec publish_path(state()) -> binary(). -publish_path( - _State = #{ - project_id := ProjectId, - pubsub_topic := PubSubTopic - } -) -> +-spec publish_path(connector_state(), action_state()) -> binary(). +publish_path(#{project_id := ProjectId}, #{pubsub_topic := PubSubTopic}) -> <<"/v1/projects/", ProjectId/binary, "/topics/", PubSubTopic/binary, ":publish">>. handle_result({error, Reason}, _Request, QueryMode, ResourceId) when diff --git a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_producer_action_info.erl b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_producer_action_info.erl new file mode 100644 index 000000000..6b5391b09 --- /dev/null +++ b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_producer_action_info.erl @@ -0,0 +1,46 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_gcp_pubsub_producer_action_info). + +-behaviour(emqx_action_info). + +-export([ + bridge_v1_type_name/0, + action_type_name/0, + connector_type_name/0, + schema_module/0, + bridge_v1_config_to_action_config/2 +]). + +bridge_v1_type_name() -> gcp_pubsub. + +action_type_name() -> gcp_pubsub_producer. + +connector_type_name() -> gcp_pubsub_producer. + +schema_module() -> emqx_bridge_gcp_pubsub_producer_schema. + +bridge_v1_config_to_action_config(BridgeV1Config, ConnectorName) -> + CommonActionKeys = emqx_bridge_v2_schema:top_level_common_action_keys(), + ParamsKeys = producer_action_parameters_field_keys(), + Config1 = maps:with(CommonActionKeys, BridgeV1Config), + Params = maps:with(ParamsKeys, BridgeV1Config), + Config1#{ + <<"connector">> => ConnectorName, + <<"parameters">> => Params + }. + +%%------------------------------------------------------------------------------------------ +%% Internal helper fns +%%------------------------------------------------------------------------------------------ + +producer_action_parameters_field_keys() -> + [ + to_bin(K) + || {K, _} <- emqx_bridge_gcp_pubsub_producer_schema:fields(action_parameters) + ]. + +to_bin(L) when is_list(L) -> list_to_binary(L); +to_bin(A) when is_atom(A) -> atom_to_binary(A, utf8). diff --git a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_producer_schema.erl b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_producer_schema.erl new file mode 100644 index 000000000..a4c939d7a --- /dev/null +++ b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_producer_schema.erl @@ -0,0 +1,238 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_gcp_pubsub_producer_schema). + +-import(hoconsc, [mk/2, ref/2]). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +%% `hocon_schema' API +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +%% `emqx_bridge_v2_schema' "unofficial" API +-export([ + bridge_v2_examples/1, + conn_bridge_examples/1, + connector_examples/1 +]). + +-define(CONNECTOR_TYPE, gcp_pubsub_producer). + +%%------------------------------------------------------------------------------------------------- +%% `hocon_schema' API +%%------------------------------------------------------------------------------------------------- + +namespace() -> + "gcp_pubsub_producer". + +roots() -> + []. + +%%========================================= +%% Action fields +%%========================================= +fields(action) -> + {gcp_pubsub_producer, + mk( + hoconsc:map(name, ref(?MODULE, producer_action)), + #{ + desc => <<"GCP PubSub Producer Action Config">>, + required => false + } + )}; +fields(producer_action) -> + emqx_bridge_v2_schema:make_producer_action_schema( + mk( + ref(?MODULE, action_parameters), + #{ + required => true, + desc => ?DESC(producer_action) + } + ) + ); +fields(action_parameters) -> + UnsupportedFields = [local_topic], + lists:filter( + fun({Key, _Schema}) -> not lists:member(Key, UnsupportedFields) end, + emqx_bridge_gcp_pubsub:fields(producer) + ); +%%========================================= +%% Connector fields +%%========================================= +fields("config_connector") -> + %% FIXME + emqx_connector_schema:common_fields() ++ + connector_config_fields(); +%%========================================= +%% HTTP API fields: action +%%========================================= +fields("get_bridge_v2") -> + emqx_bridge_schema:status_fields() ++ fields("post_bridge_v2"); +fields("post_bridge_v2") -> + [type_field(), name_field() | fields("put_bridge_v2")]; +fields("put_bridge_v2") -> + fields(producer_action); +%%========================================= +%% HTTP API fields: connector +%%========================================= +fields(Field) when + Field == "get_connector"; + Field == "put_connector"; + Field == "post_connector" +-> + emqx_connector_schema:api_fields(Field, ?CONNECTOR_TYPE, connector_config_fields()). + +connector_config_fields() -> + emqx_bridge_gcp_pubsub:fields(connector_config) ++ + emqx_resource_schema:fields("resource_opts"). + +desc("config_connector") -> + ?DESC("config_connector"); +desc(action_parameters) -> + ?DESC(action_parameters); +desc(producer_action) -> + ?DESC(producer_action); +desc(_Name) -> + undefined. + +type_field() -> + {type, mk(gcp_pubsub_producer, #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. + +%%------------------------------------------------------------------------------------------------- +%% `emqx_bridge_v2_schema' "unofficial" API +%%------------------------------------------------------------------------------------------------- + +bridge_v2_examples(Method) -> + [ + #{ + <<"gcp_pubsub_producer">> => #{ + summary => <<"GCP PubSub Producer Action">>, + value => action_example(Method) + } + } + ]. + +connector_examples(Method) -> + [ + #{ + <<"gcp_pubsub_producer">> => #{ + summary => <<"GCP PubSub Producer Connector">>, + value => connector_example(Method) + } + } + ]. + +conn_bridge_examples(Method) -> + emqx_bridge_gcp_pubsub:conn_bridge_examples(Method). + +action_example(post) -> + maps:merge( + action_example(put), + #{ + type => <<"gcp_pubsub_producer">>, + name => <<"my_action">> + } + ); +action_example(get) -> + maps:merge( + action_example(put), + #{ + status => <<"connected">>, + node_status => [ + #{ + node => <<"emqx@localhost">>, + status => <<"connected">> + } + ] + } + ); +action_example(put) -> + #{ + enable => true, + connector => <<"my_connector_name">>, + description => <<"My action">>, + local_topic => <<"local/topic">>, + resource_opts => + #{batch_size => 5}, + parameters => + #{ + pubsub_topic => <<"mytopic">>, + ordering_key_template => <<"${payload.ok}">>, + payload_template => <<"${payload}">>, + attributes_template => + [ + #{ + key => <<"${payload.attrs.k}">>, + value => <<"${payload.attrs.v}">> + } + ] + } + }. + +connector_example(get) -> + maps:merge( + connector_example(post), + #{ + status => <<"connected">>, + node_status => [ + #{ + node => <<"emqx@localhost">>, + status => <<"connected">> + } + ], + actions => [<<"my_action">>] + } + ); +connector_example(post) -> + maps:merge( + connector_example(put), + #{ + type => <<"gcp_pubsub_producer">>, + name => <<"my_connector">> + } + ); +connector_example(put) -> + #{ + enable => true, + connect_timeout => <<"10s">>, + pool_size => 8, + pipelining => 100, + max_retries => 2, + resource_opts => #{request_ttl => <<"60s">>}, + service_account_json => + #{ + auth_provider_x509_cert_url => + <<"https://www.googleapis.com/oauth2/v1/certs">>, + auth_uri => + <<"https://accounts.google.com/o/oauth2/auth">>, + client_email => + <<"test@myproject.iam.gserviceaccount.com">>, + client_id => <<"123812831923812319190">>, + client_x509_cert_url => + << + "https://www.googleapis.com/robot/v1/" + "metadata/x509/test%40myproject.iam.gserviceaccount.com" + >>, + private_key => + << + "-----BEGIN PRIVATE KEY-----\n" + "MIIEvQI..." + >>, + private_key_id => <<"kid">>, + project_id => <<"myproject">>, + token_uri => + <<"https://oauth2.googleapis.com/token">>, + type => <<"service_account">> + } + }. diff --git a/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_producer_SUITE.erl b/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_producer_SUITE.erl index acfe3df8b..f65b80f90 100644 --- a/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_producer_SUITE.erl +++ b/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_producer_SUITE.erl @@ -13,8 +13,12 @@ -include_lib("jose/include/jose_jwt.hrl"). -include_lib("jose/include/jose_jws.hrl"). --define(BRIDGE_TYPE, gcp_pubsub). --define(BRIDGE_TYPE_BIN, <<"gcp_pubsub">>). +-define(ACTION_TYPE, gcp_pubsub_producer). +-define(ACTION_TYPE_BIN, <<"gcp_pubsub_producer">>). +-define(CONNECTOR_TYPE, gcp_pubsub_producer). +-define(CONNECTOR_TYPE_BIN, <<"gcp_pubsub_producer">>). +-define(BRIDGE_V1_TYPE, gcp_pubsub). +-define(BRIDGE_V1_TYPE_BIN, <<"gcp_pubsub">>). -import(emqx_common_test_helpers, [on_exit/1]). @@ -141,19 +145,24 @@ end_per_testcase(_TestCase, _Config) -> generate_config(Config0) -> #{ - name := Name, + name := ActionName, config_string := ConfigString, pubsub_config := PubSubConfig, service_account_json := ServiceAccountJSON } = gcp_pubsub_config(Config0), - ResourceId = emqx_bridge_resource:resource_id(?BRIDGE_TYPE_BIN, Name), - BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_BIN, Name), + %% FIXME + %% `emqx_bridge_resource:resource_id' requires an existing connector in the config..... + ConnectorName = <<"connector_", ActionName/binary>>, + ConnectorResourceId = <<"connector:", ?CONNECTOR_TYPE_BIN/binary, ":", ConnectorName/binary>>, + ActionResourceId = emqx_bridge_v2:id(?ACTION_TYPE_BIN, ActionName, ConnectorName), + BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_V1_TYPE_BIN, ActionName), [ - {gcp_pubsub_name, Name}, + {gcp_pubsub_name, ActionName}, {gcp_pubsub_config, PubSubConfig}, {gcp_pubsub_config_string, ConfigString}, {service_account_json, ServiceAccountJSON}, - {resource_id, ResourceId}, + {connector_resource_id, ConnectorResourceId}, + {action_resource_id, ActionResourceId}, {bridge_id, BridgeId} | Config0 ]. @@ -168,7 +177,7 @@ delete_all_bridges() -> ). delete_bridge(Config) -> - Type = ?BRIDGE_TYPE, + Type = ?BRIDGE_V1_TYPE, Name = ?config(gcp_pubsub_name, Config), ct:pal("deleting bridge ~p", [{Type, Name}]), emqx_bridge:remove(Type, Name). @@ -177,7 +186,7 @@ create_bridge(Config) -> create_bridge(Config, _GCPPubSubConfigOverrides = #{}). create_bridge(Config, GCPPubSubConfigOverrides) -> - TypeBin = ?BRIDGE_TYPE_BIN, + TypeBin = ?BRIDGE_V1_TYPE_BIN, Name = ?config(gcp_pubsub_name, Config), GCPPubSubConfig0 = ?config(gcp_pubsub_config, Config), GCPPubSubConfig = emqx_utils_maps:deep_merge(GCPPubSubConfig0, GCPPubSubConfigOverrides), @@ -190,7 +199,7 @@ create_bridge_http(Config) -> create_bridge_http(Config, _GCPPubSubConfigOverrides = #{}). create_bridge_http(Config, GCPPubSubConfigOverrides) -> - TypeBin = ?BRIDGE_TYPE_BIN, + TypeBin = ?BRIDGE_V1_TYPE_BIN, Name = ?config(gcp_pubsub_name, Config), GCPPubSubConfig0 = ?config(gcp_pubsub_config, Config), GCPPubSubConfig = emqx_utils_maps:deep_merge(GCPPubSubConfig0, GCPPubSubConfigOverrides), @@ -225,7 +234,7 @@ create_bridge_http(Config, GCPPubSubConfigOverrides) -> create_rule_and_action_http(Config) -> GCPPubSubName = ?config(gcp_pubsub_name, Config), - BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_BIN, GCPPubSubName), + BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_V1_TYPE_BIN, GCPPubSubName), Params = #{ enable => true, sql => <<"SELECT * FROM \"t/topic\"">>, @@ -382,9 +391,14 @@ assert_metrics(ExpectedMetrics, ResourceId) -> CurrentMetrics = current_metrics(ResourceId), TelemetryTable = get(telemetry_table), RecordedEvents = ets:tab2list(TelemetryTable), - ?assertEqual(ExpectedMetrics, Metrics, #{ - current_metrics => CurrentMetrics, recorded_events => RecordedEvents - }), + ?retry( + _Sleep0 = 300, + _Attempts = 20, + ?assertEqual(ExpectedMetrics, Metrics, #{ + current_metrics => CurrentMetrics, + recorded_events => RecordedEvents + }) + ), ok. assert_empty_metrics(ResourceId) -> @@ -535,8 +549,30 @@ install_telemetry_handler(TestCase) -> end), Tid. +mk_res_id_filter(ResourceId) -> + fun(Event) -> + case Event of + #{metadata := #{resource_id := ResId}} when ResId =:= ResourceId -> + true; + _ -> + false + end + end. + wait_until_gauge_is(GaugeName, ExpectedValue, Timeout) -> - Events = receive_all_events(GaugeName, Timeout), + wait_until_gauge_is(#{ + gauge_name => GaugeName, + expected => ExpectedValue, + timeout => Timeout + }). + +wait_until_gauge_is(#{} = Opts) -> + GaugeName = maps:get(gauge_name, Opts), + ExpectedValue = maps:get(expected, Opts), + Timeout = maps:get(timeout, Opts), + MaxEvents = maps:get(max_events, Opts, 10), + FilterFn = maps:get(filter_fn, Opts, fun(_Event) -> true end), + Events = receive_all_events(GaugeName, Timeout, MaxEvents, FilterFn), case length(Events) > 0 andalso lists:last(Events) of #{measurements := #{gauge_set := ExpectedValue}} -> ok; @@ -550,15 +586,36 @@ wait_until_gauge_is(GaugeName, ExpectedValue, Timeout) -> ct:pal("no ~p gauge events received!", [GaugeName]) end. -receive_all_events(EventName, Timeout) -> - receive_all_events(EventName, Timeout, _MaxEvents = 10, _Count = 0, _Acc = []). +receive_all_events(EventName, Timeout, MaxEvents, FilterFn) -> + receive_all_events(EventName, Timeout, MaxEvents, FilterFn, _Count = 0, _Acc = []). -receive_all_events(_EventName, _Timeout, MaxEvents, Count, Acc) when Count >= MaxEvents -> +receive_all_events(_EventName, _Timeout, MaxEvents, _FilterFn, Count, Acc) when + Count >= MaxEvents +-> lists:reverse(Acc); -receive_all_events(EventName, Timeout, MaxEvents, Count, Acc) -> +receive_all_events(EventName, Timeout, MaxEvents, FilterFn, Count, Acc) -> receive {telemetry, #{name := [_, _, EventName]} = Event} -> - receive_all_events(EventName, Timeout, MaxEvents, Count + 1, [Event | Acc]) + case FilterFn(Event) of + true -> + receive_all_events( + EventName, + Timeout, + MaxEvents, + FilterFn, + Count + 1, + [Event | Acc] + ); + false -> + receive_all_events( + EventName, + Timeout, + MaxEvents, + FilterFn, + Count, + Acc + ) + end after Timeout -> lists:reverse(Acc) end. @@ -597,14 +654,14 @@ wait_n_events(TelemetryTable, ResourceId, NEvents, Timeout, EventName) -> %%------------------------------------------------------------------------------ t_publish_success(Config) -> - ResourceId = ?config(resource_id, Config), + ActionResourceId = ?config(action_resource_id, Config), ServiceAccountJSON = ?config(service_account_json, Config), TelemetryTable = ?config(telemetry_table, Config), Topic = <<"t/topic">>, ?assertMatch({ok, _}, create_bridge(Config)), {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), - assert_empty_metrics(ResourceId), + assert_empty_metrics(ActionResourceId), Payload = <<"payload">>, Message = emqx_message:make(Topic, Payload), emqx:publish(Message), @@ -620,7 +677,7 @@ t_publish_success(Config) -> DecodedMessages ), %% to avoid test flakiness - wait_telemetry_event(TelemetryTable, success, ResourceId), + wait_telemetry_event(TelemetryTable, success, ActionResourceId), wait_until_gauge_is(queuing, 0, 500), wait_until_gauge_is(inflight, 0, 500), assert_metrics( @@ -633,7 +690,7 @@ t_publish_success(Config) -> retried => 0, success => 1 }, - ResourceId + ActionResourceId ), ok. @@ -662,12 +719,12 @@ t_publish_success_infinity_timeout(Config) -> ok. t_publish_success_local_topic(Config) -> - ResourceId = ?config(resource_id, Config), + ActionResourceId = ?config(action_resource_id, Config), ServiceAccountJSON = ?config(service_account_json, Config), TelemetryTable = ?config(telemetry_table, Config), LocalTopic = <<"local/topic">>, {ok, _} = create_bridge(Config, #{<<"local_topic">> => LocalTopic}), - assert_empty_metrics(ResourceId), + assert_empty_metrics(ActionResourceId), Payload = <<"payload">>, Message = emqx_message:make(LocalTopic, Payload), emqx:publish(Message), @@ -682,7 +739,7 @@ t_publish_success_local_topic(Config) -> DecodedMessages ), %% to avoid test flakiness - wait_telemetry_event(TelemetryTable, success, ResourceId), + wait_telemetry_event(TelemetryTable, success, ActionResourceId), wait_until_gauge_is(queuing, 0, 500), wait_until_gauge_is(inflight, 0, 500), assert_metrics( @@ -695,7 +752,7 @@ t_publish_success_local_topic(Config) -> retried => 0, success => 1 }, - ResourceId + ActionResourceId ), ok. @@ -704,7 +761,7 @@ t_create_via_http(Config) -> ok. t_publish_templated(Config) -> - ResourceId = ?config(resource_id, Config), + ActionResourceId = ?config(action_resource_id, Config), ServiceAccountJSON = ?config(service_account_json, Config), TelemetryTable = ?config(telemetry_table, Config), Topic = <<"t/topic">>, @@ -721,7 +778,7 @@ t_publish_templated(Config) -> ), {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), - assert_empty_metrics(ResourceId), + assert_empty_metrics(ActionResourceId), Payload = <<"payload">>, Message = emqx_message:set_header( @@ -747,7 +804,7 @@ t_publish_templated(Config) -> DecodedMessages ), %% to avoid test flakiness - wait_telemetry_event(TelemetryTable, success, ResourceId), + wait_telemetry_event(TelemetryTable, success, ActionResourceId), wait_until_gauge_is(queuing, 0, 500), wait_until_gauge_is(inflight, 0, 500), assert_metrics( @@ -760,7 +817,7 @@ t_publish_templated(Config) -> retried => 0, success => 1 }, - ResourceId + ActionResourceId ), ok. @@ -774,7 +831,7 @@ t_publish_success_batch(Config) -> end. test_publish_success_batch(Config) -> - ResourceId = ?config(resource_id, Config), + ActionResourceId = ?config(action_resource_id, Config), ServiceAccountJSON = ?config(service_account_json, Config), TelemetryTable = ?config(telemetry_table, Config), Topic = <<"t/topic">>, @@ -796,7 +853,7 @@ test_publish_success_batch(Config) -> ), {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), - assert_empty_metrics(ResourceId), + assert_empty_metrics(ActionResourceId), NumMessages = BatchSize * 2, Messages = [emqx_message:make(Topic, integer_to_binary(N)) || N <- lists:seq(1, NumMessages)], %% publish in parallel to avoid each client blocking and then @@ -822,7 +879,7 @@ test_publish_success_batch(Config) -> wait_telemetry_event( TelemetryTable, success, - ResourceId, + ActionResourceId, #{timeout => 15_000, n_events => NumMessages} ), wait_until_gauge_is(queuing, 0, _Timeout = 400), @@ -837,7 +894,7 @@ test_publish_success_batch(Config) -> retried => 0, success => NumMessages }, - ResourceId + ActionResourceId ), ok. @@ -1045,7 +1102,7 @@ t_jose_other_error(Config) -> fun(Res, Trace) -> ?assertMatch({ok, _}, Res), ?assertMatch( - [#{error := {invalid_private_key, {unknown, error}}}], + [#{error := {invalid_private_key, {unknown, error}}} | _], ?of_kind(gcp_pubsub_connector_startup_error, Trace) ), ok @@ -1054,7 +1111,7 @@ t_jose_other_error(Config) -> ok. t_publish_econnrefused(Config) -> - ResourceId = ?config(resource_id, Config), + ResourceId = ?config(connector_resource_id, Config), %% set pipelining to 1 so that one of the 2 requests is `pending' %% in ehttpc. {ok, _} = create_bridge( @@ -1071,7 +1128,7 @@ t_publish_econnrefused(Config) -> do_econnrefused_or_timeout_test(Config, econnrefused). t_publish_timeout(Config) -> - ResourceId = ?config(resource_id, Config), + ActionResourceId = ?config(action_resource_id, Config), %% set pipelining to 1 so that one of the 2 requests is `pending' %% in ehttpc. also, we set the batch size to 1 to also ensure the %% requests are done separately. @@ -1079,12 +1136,13 @@ t_publish_timeout(Config) -> <<"pipelining">> => 1, <<"resource_opts">> => #{ <<"batch_size">> => 1, - <<"resume_interval">> => <<"1s">> + <<"resume_interval">> => <<"1s">>, + <<"metrics_flush_interval">> => <<"700ms">> } }), {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), - assert_empty_metrics(ResourceId), + assert_empty_metrics(ActionResourceId), TestPid = self(), TimeoutHandler = fun(Req0, State) -> @@ -1107,7 +1165,8 @@ t_publish_timeout(Config) -> do_econnrefused_or_timeout_test(Config, timeout). do_econnrefused_or_timeout_test(Config, Error) -> - ResourceId = ?config(resource_id, Config), + ActionResourceId = ?config(action_resource_id, Config), + ConnectorResourceId = ?config(connector_resource_id, Config), TelemetryTable = ?config(telemetry_table, Config), Topic = <<"t/topic">>, Payload = <<"payload">>, @@ -1156,9 +1215,9 @@ do_econnrefused_or_timeout_test(Config, Error) -> case Error of econnrefused -> case ?of_kind(gcp_pubsub_request_failed, Trace) of - [#{reason := Error, connector := ResourceId} | _] -> + [#{reason := Error, connector := ConnectorResourceId} | _] -> ok; - [#{reason := {closed, _Msg}, connector := ResourceId} | _] -> + [#{reason := {closed, _Msg}, connector := ConnectorResourceId} | _] -> %% _Msg = "The connection was lost." ok; Trace0 -> @@ -1182,7 +1241,7 @@ do_econnrefused_or_timeout_test(Config, Error) -> %% even waiting, hard to avoid flakiness... simpler to just sleep %% a bit until stabilization. ct:sleep(200), - CurrentMetrics = current_metrics(ResourceId), + CurrentMetrics = current_metrics(ActionResourceId), RecordedEvents = ets:tab2list(TelemetryTable), ct:pal("telemetry events: ~p", [RecordedEvents]), ?assertMatch( @@ -1198,7 +1257,19 @@ do_econnrefused_or_timeout_test(Config, Error) -> CurrentMetrics ); timeout -> - wait_until_gauge_is(inflight, 0, _Timeout = 1_000), + wait_telemetry_event( + TelemetryTable, + late_reply, + ActionResourceId, + #{timeout => 5_000, n_events => 2} + ), + wait_until_gauge_is(#{ + gauge_name => inflight, + expected => 0, + filter_fn => mk_res_id_filter(ActionResourceId), + timeout => 1_000, + max_events => 20 + }), wait_until_gauge_is(queuing, 0, _Timeout = 1_000), assert_metrics( #{ @@ -1211,7 +1282,7 @@ do_econnrefused_or_timeout_test(Config, Error) -> success => 0, late_reply => 2 }, - ResourceId + ActionResourceId ) end, @@ -1334,7 +1405,8 @@ t_failure_no_body(Config) -> ok. t_unrecoverable_error(Config) -> - ResourceId = ?config(resource_id, Config), + ActionResourceId = ?config(action_resource_id, Config), + TelemetryTable = ?config(telemetry_table, Config), TestPid = self(), FailureNoBodyHandler = fun(Req0, State) -> @@ -1358,7 +1430,7 @@ t_unrecoverable_error(Config) -> ok = emqx_bridge_http_connector_test_server:set_handler(FailureNoBodyHandler), Topic = <<"t/topic">>, {ok, _} = create_bridge(Config), - assert_empty_metrics(ResourceId), + assert_empty_metrics(ActionResourceId), {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), Payload = <<"payload">>, @@ -1386,6 +1458,7 @@ t_unrecoverable_error(Config) -> %% removed, this inflight should be 1, because we retry if %% the worker is killed. wait_until_gauge_is(inflight, 0, _Timeout = 400), + wait_telemetry_event(TelemetryTable, failed, ActionResourceId), assert_metrics( #{ dropped => 0, @@ -1398,7 +1471,7 @@ t_unrecoverable_error(Config) -> retried => 0, success => 0 }, - ResourceId + ActionResourceId ), ok. @@ -1407,7 +1480,7 @@ t_stop(Config) -> {ok, _} = create_bridge(Config), ?check_trace( ?wait_async_action( - emqx_bridge_resource:stop(?BRIDGE_TYPE, Name), + emqx_bridge_resource:stop(?BRIDGE_V1_TYPE, Name), #{?snk_kind := gcp_pubsub_stop}, 5_000 ), @@ -1421,13 +1494,13 @@ t_stop(Config) -> ok. t_get_status_ok(Config) -> - ResourceId = ?config(resource_id, Config), + ResourceId = ?config(connector_resource_id, Config), {ok, _} = create_bridge(Config), ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)), ok. t_get_status_no_worker(Config) -> - ResourceId = ?config(resource_id, Config), + ResourceId = ?config(connector_resource_id, Config), {ok, _} = create_bridge(Config), emqx_common_test_helpers:with_mock( ehttpc, @@ -1441,7 +1514,7 @@ t_get_status_no_worker(Config) -> ok. t_get_status_down(Config) -> - ResourceId = ?config(resource_id, Config), + ResourceId = ?config(connector_resource_id, Config), {ok, _} = create_bridge(Config), emqx_common_test_helpers:with_mock( ehttpc, @@ -1457,7 +1530,7 @@ t_get_status_down(Config) -> ok. t_get_status_timeout_calling_workers(Config) -> - ResourceId = ?config(resource_id, Config), + ResourceId = ?config(connector_resource_id, Config), {ok, _} = create_bridge(Config), emqx_common_test_helpers:with_mock( ehttpc, @@ -1520,7 +1593,7 @@ t_on_start_ehttpc_pool_start_failure(Config) -> ), fun(Trace) -> ?assertMatch( - [#{reason := some_error}], + [#{reason := some_error} | _], ?of_kind(gcp_pubsub_ehttpc_pool_start_failure, Trace) ), ok @@ -1668,7 +1741,7 @@ t_attributes(Config) -> ), %% ensure loading cluster override file doesn't mangle the attribute %% placeholders... - #{<<"bridges">> := #{?BRIDGE_TYPE_BIN := #{Name := RawConf}}} = + #{<<"actions">> := #{?ACTION_TYPE_BIN := #{Name := RawConf}}} = emqx_config:read_override_conf(#{override_to => cluster}), ?assertEqual( [ @@ -1689,7 +1762,7 @@ t_attributes(Config) -> <<"value">> => <<"${.payload.value}">> } ], - maps:get(<<"attributes_template">>, RawConf) + emqx_utils_maps:deep_get([<<"parameters">>, <<"attributes_template">>], RawConf) ), ok end, diff --git a/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_tests.erl b/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_tests.erl index 885754470..de7467f62 100644 --- a/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_tests.erl +++ b/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_tests.erl @@ -12,7 +12,7 @@ %% erlfmt-ignore gcp_pubsub_producer_hocon() -> -""" +" bridges.gcp_pubsub.my_producer { attributes_template = [ {key = \"${payload.key}\", value = fixed_value} @@ -54,7 +54,7 @@ bridges.gcp_pubsub.my_producer { type = service_account } } -""". +". %%=========================================================================== %% Helper functions diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src index a8a938a0b..c28c3ed92 100644 --- a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_greptimedb, [ {description, "EMQX GreptimeDB Bridge"}, - {vsn, "0.1.4"}, + {vsn, "0.1.5"}, {registered, []}, {applications, [ kernel, diff --git a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl index ff4ba313e..d588f7f8c 100644 --- a/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl +++ b/apps/emqx_bridge_greptimedb/src/emqx_bridge_greptimedb_connector.erl @@ -147,13 +147,7 @@ fields(greptimedb) -> [ {dbname, mk(binary(), #{required => true, desc => ?DESC("dbname")})}, {username, mk(binary(), #{desc => ?DESC("username")})}, - {password, - mk(binary(), #{ - desc => ?DESC("password"), - format => <<"password">>, - sensitive => true, - converter => fun emqx_schema:password_converter/2 - })} + {password, emqx_schema_secret:mk(#{desc => ?DESC("password")})} ] ++ emqx_connector_schema_lib:ssl_fields(). server() -> @@ -302,7 +296,8 @@ ssl_config(SSL = #{enable := true}) -> auth(#{username := Username, password := Password}) -> [ - {auth, {basic, #{username => str(Username), password => str(Password)}}} + %% TODO: teach `greptimedb` to accept 0-arity closures as passwords. + {auth, {basic, #{username => str(Username), password => emqx_secret:unwrap(Password)}}} ]; auth(_) -> []. diff --git a/apps/emqx_bridge_http/src/emqx_bridge_http_schema.erl b/apps/emqx_bridge_http/src/emqx_bridge_http_schema.erl index 935c8e470..6a9219c11 100644 --- a/apps/emqx_bridge_http/src/emqx_bridge_http_schema.erl +++ b/apps/emqx_bridge_http/src/emqx_bridge_http_schema.erl @@ -30,7 +30,6 @@ %%====================================================================================== %% Hocon Schema Definitions - namespace() -> "bridge_http". roots() -> []. diff --git a/apps/emqx_bridge_http/test/emqx_bridge_http_connector_tests.erl b/apps/emqx_bridge_http/test/emqx_bridge_http_connector_tests.erl index 4f5e2929c..f2de91123 100644 --- a/apps/emqx_bridge_http/test/emqx_bridge_http_connector_tests.erl +++ b/apps/emqx_bridge_http/test/emqx_bridge_http_connector_tests.erl @@ -175,7 +175,7 @@ check_atom_key(Conf) when is_map(Conf) -> %% erlfmt-ignore webhook_config_hocon() -> -""" +" bridges.webhook.a { body = \"${.}\" connect_timeout = 15s @@ -209,4 +209,4 @@ bridges.webhook.a { } url = \"http://some.host:4000/api/echo\" } -""". +". diff --git a/apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb.app.src b/apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb.app.src index c6236d97c..ef288368d 100644 --- a/apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb.app.src +++ b/apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_influxdb, [ {description, "EMQX Enterprise InfluxDB Bridge"}, - {vsn, "0.1.6"}, + {vsn, "0.1.7"}, {registered, []}, {applications, [ kernel, diff --git a/apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb_connector.erl b/apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb_connector.erl index c8053a53d..2b4fb8d74 100644 --- a/apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb_connector.erl +++ b/apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb_connector.erl @@ -192,20 +192,14 @@ fields(influxdb_api_v1) -> [ {database, mk(binary(), #{required => true, desc => ?DESC("database")})}, {username, mk(binary(), #{desc => ?DESC("username")})}, - {password, - mk(binary(), #{ - desc => ?DESC("password"), - format => <<"password">>, - sensitive => true, - converter => fun emqx_schema:password_converter/2 - })} + {password, emqx_schema_secret:mk(#{desc => ?DESC("password")})} ] ++ emqx_connector_schema_lib:ssl_fields(); fields(influxdb_api_v2) -> fields(common) ++ [ {bucket, mk(binary(), #{required => true, desc => ?DESC("bucket")})}, {org, mk(binary(), #{required => true, desc => ?DESC("org")})}, - {token, mk(binary(), #{required => true, desc => ?DESC("token")})} + {token, emqx_schema_secret:mk(#{required => true, desc => ?DESC("token")})} ] ++ emqx_connector_schema_lib:ssl_fields(). server() -> @@ -363,7 +357,8 @@ protocol_config(#{ {version, v2}, {bucket, str(Bucket)}, {org, str(Org)}, - {token, Token} + %% TODO: teach `influxdb` to accept 0-arity closures as passwords. + {token, emqx_secret:unwrap(Token)} ] ++ ssl_config(SSL). ssl_config(#{enable := false}) -> @@ -383,7 +378,8 @@ username(_) -> []. password(#{password := Password}) -> - [{password, str(Password)}]; + %% TODO: teach `influxdb` to accept 0-arity closures as passwords. + [{password, str(emqx_secret:unwrap(Password))}]; password(_) -> []. diff --git a/apps/emqx_bridge_iotdb/src/emqx_bridge_iotdb.app.src b/apps/emqx_bridge_iotdb/src/emqx_bridge_iotdb.app.src index b79c4c2ce..42b3c165f 100644 --- a/apps/emqx_bridge_iotdb/src/emqx_bridge_iotdb.app.src +++ b/apps/emqx_bridge_iotdb/src/emqx_bridge_iotdb.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_bridge_iotdb, [ {description, "EMQX Enterprise Apache IoTDB Bridge"}, - {vsn, "0.1.3"}, + {vsn, "0.1.4"}, {modules, [ emqx_bridge_iotdb, emqx_bridge_iotdb_impl diff --git a/apps/emqx_bridge_iotdb/src/emqx_bridge_iotdb.erl b/apps/emqx_bridge_iotdb/src/emqx_bridge_iotdb.erl index 38dfebe97..25bafbd00 100644 --- a/apps/emqx_bridge_iotdb/src/emqx_bridge_iotdb.erl +++ b/apps/emqx_bridge_iotdb/src/emqx_bridge_iotdb.erl @@ -51,12 +51,9 @@ fields(auth_basic) -> [ {username, mk(binary(), #{required => true, desc => ?DESC("config_auth_basic_username")})}, {password, - mk(binary(), #{ + emqx_schema_secret:mk(#{ required => true, - desc => ?DESC("config_auth_basic_password"), - format => <<"password">>, - sensitive => true, - converter => fun emqx_schema:password_converter/2 + desc => ?DESC("config_auth_basic_password") })} ]. diff --git a/apps/emqx_bridge_kafka/rebar.config b/apps/emqx_bridge_kafka/rebar.config index 92e83fa04..b69ec1262 100644 --- a/apps/emqx_bridge_kafka/rebar.config +++ b/apps/emqx_bridge_kafka/rebar.config @@ -2,7 +2,7 @@ {erl_opts, [debug_info]}. {deps, [ {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.8.0"}}} , {kafka_protocol, {git, "https://github.com/kafka4beam/kafka_protocol.git", {tag, "4.1.3"}}} - , {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.0"}}} + , {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.1"}}} , {brod, {git, "https://github.com/kafka4beam/brod.git", {tag, "3.16.8"}}} , {snappyer, "1.2.9"} , {emqx_connector, {path, "../../apps/emqx_connector"}} diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src index da8df2ddc..1d9d5c807 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_bridge_kafka, [ {description, "EMQX Enterprise Kafka Bridge"}, - {vsn, "0.1.12"}, + {vsn, "0.1.13"}, {registered, [emqx_bridge_kafka_consumer_sup]}, {applications, [ kernel, diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl index 8c90e0896..951fb5ef5 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.erl @@ -33,10 +33,13 @@ ]). -export([ + kafka_connector_config_fields/0, kafka_producer_converter/2, producer_strategy_key_validator/1 ]). +-define(CONNECTOR_TYPE, kafka_producer). + %% ------------------------------------------------------------------------------------------------- %% api @@ -76,6 +79,20 @@ conn_bridge_examples(Method) -> } ]. +values({get, connector}) -> + maps:merge( + #{ + status => <<"connected">>, + node_status => [ + #{ + node => <<"emqx@localhost">>, + status => <<"connected">> + } + ], + actions => [<<"my_action">>] + }, + values({post, connector}) + ); values({get, KafkaType}) -> maps:merge( #{ @@ -112,16 +129,15 @@ values({put, connector}) -> values({put, KafkaType}) -> maps:merge(values(common_config), values(KafkaType)); values(bridge_v2_producer) -> - maps:merge( - #{ - enable => true, - connector => <<"my_kafka_producer_connector">>, - resource_opts => #{ - health_check_interval => "32s" - } - }, - values(producer) - ); + #{ + enable => true, + connector => <<"my_kafka_producer_connector">>, + parameters => values(producer_values), + local_topic => <<"mqtt/local/topic">>, + resource_opts => #{ + health_check_interval => "32s" + } + }; values(common_config) -> #{ authentication => #{ @@ -143,40 +159,42 @@ values(common_config) -> }; values(producer) -> #{ - kafka => #{ - topic => <<"kafka-topic">>, - message => #{ - key => <<"${.clientid}">>, - value => <<"${.}">>, - timestamp => <<"${.timestamp}">> - }, - max_batch_bytes => <<"896KB">>, - compression => <<"no_compression">>, - partition_strategy => <<"random">>, - required_acks => <<"all_isr">>, - partition_count_refresh_interval => <<"60s">>, - kafka_headers => <<"${pub_props}">>, - kafka_ext_headers => [ - #{ - kafka_ext_header_key => <<"clientid">>, - kafka_ext_header_value => <<"${clientid}">> - }, - #{ - kafka_ext_header_key => <<"topic">>, - kafka_ext_header_value => <<"${topic}">> - } - ], - kafka_header_value_encode_mode => none, - max_inflight => 10, - buffer => #{ - mode => <<"hybrid">>, - per_partition_limit => <<"2GB">>, - segment_bytes => <<"100MB">>, - memory_overload_protection => true - } - }, + kafka => values(producer_values), local_topic => <<"mqtt/local/topic">> }; +values(producer_values) -> + #{ + topic => <<"kafka-topic">>, + message => #{ + key => <<"${.clientid}">>, + value => <<"${.}">>, + timestamp => <<"${.timestamp}">> + }, + max_batch_bytes => <<"896KB">>, + compression => <<"no_compression">>, + partition_strategy => <<"random">>, + required_acks => <<"all_isr">>, + partition_count_refresh_interval => <<"60s">>, + kafka_headers => <<"${pub_props}">>, + kafka_ext_headers => [ + #{ + kafka_ext_header_key => <<"clientid">>, + kafka_ext_header_value => <<"${clientid}">> + }, + #{ + kafka_ext_header_key => <<"topic">>, + kafka_ext_header_value => <<"${topic}">> + } + ], + kafka_header_value_encode_mode => none, + max_inflight => 10, + buffer => #{ + mode => <<"hybrid">>, + per_partition_limit => <<"2GB">>, + segment_bytes => <<"100MB">>, + memory_overload_protection => true + } + }; values(consumer) -> #{ kafka => #{ @@ -246,6 +264,16 @@ namespace() -> "bridge_kafka". roots() -> ["config_consumer", "config_producer", "config_bridge_v2"]. +fields(Field) when + Field == "get_connector"; + Field == "put_connector"; + Field == "post_connector" +-> + emqx_connector_schema:api_fields( + Field, + ?CONNECTOR_TYPE, + kafka_connector_config_fields() + ); fields("post_" ++ Type) -> [type_field(Type), name_field() | fields("config_" ++ Type)]; fields("put_" ++ Type) -> @@ -283,11 +311,9 @@ fields(auth_username_password) -> })}, {username, mk(binary(), #{required => true, desc => ?DESC(auth_sasl_username)})}, {password, - mk(binary(), #{ + emqx_connector_schema_lib:password_field(#{ required => true, - sensitive => true, - desc => ?DESC(auth_sasl_password), - converter => fun emqx_schema:password_converter/2 + desc => ?DESC(auth_sasl_password) })} ]; fields(auth_gssapi_kerberos) -> @@ -486,8 +512,7 @@ fields(consumer_opts) -> {value_encoding_mode, mk(enum([none, base64]), #{ default => none, desc => ?DESC(consumer_value_encoding_mode) - })}, - {resource_opts, mk(ref(resource_opts), #{default => #{}})} + })} ]; fields(consumer_topic_mapping) -> [ @@ -561,9 +586,11 @@ desc(Name) -> ?DESC(Name). connector_config_fields() -> + emqx_connector_schema:common_fields() ++ + kafka_connector_config_fields(). + +kafka_connector_config_fields() -> [ - {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, - {description, emqx_schema:description_schema()}, {bootstrap_hosts, mk( binary(), @@ -599,7 +626,7 @@ connector_config_fields() -> })}, {socket_opts, mk(ref(socket_opts), #{required => false, desc => ?DESC(socket_opts)})}, {ssl, mk(ref(ssl_client_opts), #{})} - ]. + ] ++ [resource_opts()]. producer_opts(ActionOrBridgeV1) -> [ @@ -607,12 +634,14 @@ producer_opts(ActionOrBridgeV1) -> %% for egress bridges with this config, the published messages %% will be forwarded to such bridges. {local_topic, mk(binary(), #{required => false, desc => ?DESC(mqtt_topic)})}, - parameters_field(ActionOrBridgeV1), - {resource_opts, mk(ref(resource_opts), #{default => #{}, desc => ?DESC(resource_opts)})} - ]. + parameters_field(ActionOrBridgeV1) + ] ++ [resource_opts() || ActionOrBridgeV1 =:= action]. + +resource_opts() -> + {resource_opts, mk(ref(resource_opts), #{default => #{}, desc => ?DESC(resource_opts)})}. %% Since e5.3.1, we want to rename the field 'kafka' to 'parameters' -%% Hoever we need to keep it backward compatible for generated schema json (version 0.1.0) +%% However we need to keep it backward compatible for generated schema json (version 0.1.0) %% since schema is data for the 'schemas' API. parameters_field(ActionOrBridgeV1) -> {Name, Alias} = diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl.erl index b3ad2ca36..eb8f36fb5 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl.erl +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl.erl @@ -31,8 +31,8 @@ make_client_id(BridgeType0, BridgeName0) -> sasl(none) -> undefined; -sasl(#{mechanism := Mechanism, username := Username, password := Password}) -> - {Mechanism, Username, emqx_secret:wrap(Password)}; +sasl(#{mechanism := Mechanism, username := Username, password := Secret}) -> + {Mechanism, Username, Secret}; sasl(#{ kerberos_principal := Principal, kerberos_keytab_file := KeyTabFile diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_consumer.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_consumer.erl index 89cb9a78f..24ea4d300 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_consumer.erl +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_consumer.erl @@ -251,7 +251,7 @@ do_handle_message(Message, State) -> Payload = render(FullMessage, PayloadTemplate), MQTTTopic = render(FullMessage, MQTTTopicTemplate), MQTTMessage = emqx_message:make(ResourceId, MQTTQoS, MQTTTopic, Payload), - _ = emqx:publish(MQTTMessage), + _ = emqx_broker:safe_publish(MQTTMessage), emqx_hooks:run(Hookpoint, [FullMessage]), emqx_resource_metrics:received_inc(ResourceId), %% note: just `ack' does not commit the offset to the diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_producer.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_producer.erl index 84401aaa6..bf8c76bee 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_producer.erl +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_producer.erl @@ -81,11 +81,24 @@ on_start(InstId, Config) -> ClientId = InstId, emqx_resource:allocate_resource(InstId, ?kafka_client_id, ClientId), ok = ensure_client(ClientId, Hosts, ClientConfig), - %% Check if this is a dry run - {ok, #{ - client_id => ClientId, - installed_bridge_v2s => #{} - }}. + %% Note: we must return `{error, _}' here if the client cannot connect so that the + %% connector will immediately enter the `?status_disconnected' state, and then avoid + %% giving the impression that channels/actions may be added immediately and start + %% buffering, which won't happen if it's `?status_connecting'. That would lead to + %% data loss, since Kafka Producer uses wolff's internal buffering, which is started + %% only when its producers start. + case check_client_connectivity(ClientId) of + ok -> + {ok, #{ + client_id => ClientId, + installed_bridge_v2s => #{} + }}; + {error, {find_client, Reason}} -> + %% Race condition? Crash? We just checked it with `ensure_client'... + {error, Reason}; + {error, {connectivity, Reason}} -> + {error, Reason} + end. on_add_channel( InstId, @@ -478,14 +491,18 @@ on_get_status( _InstId, #{client_id := ClientId} = State ) -> - case wolff_client_sup:find_client(ClientId) of - {ok, Pid} -> - case wolff_client:check_connectivity(Pid) of - ok -> ?status_connected; - {error, Error} -> {?status_connecting, State, Error} - end; - {error, _Reason} -> - ?status_connecting + %% Note: we must avoid returning `?status_disconnected' here if the connector ever was + %% connected. If the connector ever connected, wolff producers might have been + %% sucessfully started, and returning `?status_disconnected' will make resource + %% manager try to restart the producers / connector, thus potentially dropping data + %% held in wolff producer's replayq. + case check_client_connectivity(ClientId) of + ok -> + ?status_connected; + {error, {find_client, _Error}} -> + ?status_connecting; + {error, {connectivity, Error}} -> + {?status_connecting, State, Error} end. on_get_channel_status( @@ -496,13 +513,19 @@ on_get_channel_status( installed_bridge_v2s := Channels } = _State ) -> + %% Note: we must avoid returning `?status_disconnected' here. Returning + %% `?status_disconnected' will make resource manager try to restart the producers / + %% connector, thus potentially dropping data held in wolff producer's replayq. The + %% only exception is if the topic does not exist ("unhealthy target"). #{kafka_topic := KafkaTopic} = maps:get(ChannelId, Channels), try ok = check_topic_and_leader_connections(ClientId, KafkaTopic), ?status_connected catch - throw:#{reason := restarting} -> - ?status_connecting + throw:{unhealthy_target, Msg} -> + throw({unhealthy_target, Msg}); + K:E -> + {?status_connecting, {K, E}} end. check_topic_and_leader_connections(ClientId, KafkaTopic) -> @@ -524,6 +547,21 @@ check_topic_and_leader_connections(ClientId, KafkaTopic) -> }) end. +-spec check_client_connectivity(wolff:client_id()) -> + ok | {error, {connectivity | find_client, term()}}. +check_client_connectivity(ClientId) -> + case wolff_client_sup:find_client(ClientId) of + {ok, Pid} -> + case wolff_client:check_connectivity(Pid) of + ok -> + ok; + {error, Error} -> + {error, {connectivity, Error}} + end; + {error, Reason} -> + {error, {find_client, Reason}} + end. + check_if_healthy_leaders(ClientId, ClientPid, KafkaTopic) when is_pid(ClientPid) -> Leaders = case wolff_client:get_leader_connections(ClientPid, KafkaTopic) of @@ -621,8 +659,13 @@ partitioner(random) -> random; partitioner(key_dispatch) -> first_key_dispatch. replayq_dir(BridgeType, BridgeName) -> + RawConf = emqx_conf:get_raw([actions, BridgeType, BridgeName]), DirName = iolist_to_binary([ - emqx_bridge_lib:downgrade_type(BridgeType), ":", BridgeName, ":", atom_to_list(node()) + emqx_bridge_lib:downgrade_type(BridgeType, RawConf), + ":", + BridgeName, + ":", + atom_to_list(node()) ]), filename:join([emqx:data_dir(), "kafka", DirName]). diff --git a/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_consumer_SUITE.erl b/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_consumer_SUITE.erl index 943f30629..4fd08c154 100644 --- a/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_consumer_SUITE.erl +++ b/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_consumer_SUITE.erl @@ -30,29 +30,41 @@ all() -> ]. groups() -> - AllTCs = emqx_common_test_helpers:all(?MODULE), - SASLAuths = [ - sasl_auth_plain, - sasl_auth_scram256, - sasl_auth_scram512, - sasl_auth_kerberos + SASLGroups = [ + {sasl_auth_plain, testcases(sasl)}, + {sasl_auth_scram256, testcases(sasl)}, + {sasl_auth_scram512, testcases(sasl)}, + {sasl_auth_kerberos, testcases(sasl_auth_kerberos)} ], - SASLAuthGroups = [{group, Type} || Type <- SASLAuths], - OnlyOnceTCs = only_once_tests(), - MatrixTCs = AllTCs -- OnlyOnceTCs, - SASLTests = [{Group, MatrixTCs} || Group <- SASLAuths], + SASLAuthGroups = [{group, Group} || {Group, _} <- SASLGroups], [ - {plain, MatrixTCs ++ OnlyOnceTCs}, - {ssl, MatrixTCs}, + {plain, testcases(plain)}, + {ssl, testcases(common)}, {sasl_plain, SASLAuthGroups}, {sasl_ssl, SASLAuthGroups} - ] ++ SASLTests. + | SASLGroups + ]. -sasl_only_tests() -> - [t_failed_creation_then_fixed]. - -%% tests that do not need to be run on all groups -only_once_tests() -> +testcases(all) -> + emqx_common_test_helpers:all(?MODULE); +testcases(plain) -> + %% NOTE: relevant only for a subset of SASL testcases + Exclude = [t_failed_creation_then_fixed], + testcases(all) -- Exclude; +testcases(common) -> + testcases(plain) -- testcases(once); +testcases(sasl) -> + testcases(all) -- testcases(once); +testcases(sasl_auth_kerberos) -> + %% NOTE: need a proxy to run these tests + Exclude = [ + t_failed_creation_then_fixed, + t_on_get_status, + t_receive_after_recovery + ], + testcases(sasl) -- Exclude; +testcases(once) -> + %% tests that do not need to be run on all groups [ t_begin_offset_earliest, t_bridge_rule_action_source, @@ -220,7 +232,7 @@ init_per_group(sasl_auth_kerberos, Config0) -> (KV) -> KV end, - [{has_proxy, false}, {sasl_auth_mechanism, kerberos} | Config0] + [{sasl_auth_mechanism, kerberos} | Config0] ), Config; init_per_group(_Group, Config) -> @@ -264,43 +276,6 @@ end_per_group(Group, Config) when end_per_group(_Group, _Config) -> ok. -init_per_testcase(TestCase, Config) when - TestCase =:= t_failed_creation_then_fixed --> - KafkaType = ?config(kafka_type, Config), - AuthMechanism = ?config(sasl_auth_mechanism, Config), - IsSASL = lists:member(KafkaType, [sasl_plain, sasl_ssl]), - case {IsSASL, AuthMechanism} of - {true, kerberos} -> - [{skip_does_not_apply, true}]; - {true, _} -> - common_init_per_testcase(TestCase, Config); - {false, _} -> - [{skip_does_not_apply, true}] - end; -init_per_testcase(TestCase, Config) when - TestCase =:= t_failed_creation_then_fixed --> - %% test with one partiton only for this case because - %% the wait probe may not be always sent to the same partition - HasProxy = proplists:get_value(has_proxy, Config, true), - case HasProxy of - false -> - [{skip_does_not_apply, true}]; - true -> - common_init_per_testcase(TestCase, [{num_partitions, 1} | Config]) - end; -init_per_testcase(TestCase, Config) when - TestCase =:= t_on_get_status; - TestCase =:= t_receive_after_recovery --> - HasProxy = proplists:get_value(has_proxy, Config, true), - case HasProxy of - false -> - [{skip_does_not_apply, true}]; - true -> - common_init_per_testcase(TestCase, Config) - end; init_per_testcase(t_cluster_group = TestCase, Config0) -> Config = emqx_utils:merge_opts(Config0, [{num_partitions, 6}]), common_init_per_testcase(TestCase, Config); @@ -393,30 +368,24 @@ common_init_per_testcase(TestCase, Config0) -> ]. end_per_testcase(_Testcase, Config) -> - case proplists:get_bool(skip_does_not_apply, Config) of - true -> - ok; - false -> - ProxyHost = ?config(proxy_host, Config), - ProxyPort = ?config(proxy_port, Config), - ProducersConfigs = ?config(kafka_producers, Config), - emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), - delete_all_bridges(), - #{clientid := KafkaProducerClientId, producers := ProducersMapping} = - ProducersConfigs, - lists:foreach( - fun(Producers) -> - ok = wolff:stop_and_delete_supervised_producers(Producers) - end, - maps:values(ProducersMapping) - ), - ok = wolff:stop_and_delete_supervised_client(KafkaProducerClientId), - %% in CI, apparently this needs more time since the - %% machines struggle with all the containers running... - emqx_common_test_helpers:call_janitor(60_000), - ok = snabbkaffe:stop(), - ok - end. + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + ProducersConfigs = ?config(kafka_producers, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + delete_all_bridges(), + #{clientid := KafkaProducerClientId, producers := ProducersMapping} = + ProducersConfigs, + lists:foreach( + fun(Producers) -> + ok = wolff:stop_and_delete_supervised_producers(Producers) + end, + maps:values(ProducersMapping) + ), + ok = wolff:stop_and_delete_supervised_client(KafkaProducerClientId), + %% in CI, apparently this needs more time since the + %% machines struggle with all the containers running... + emqx_common_test_helpers:call_janitor(60_000), + ok = snabbkaffe:stop(). %%------------------------------------------------------------------------------ %% Helper fns @@ -1391,14 +1360,6 @@ t_multiple_topic_mappings(Config) -> ok. t_on_get_status(Config) -> - case proplists:get_bool(skip_does_not_apply, Config) of - true -> - ok; - false -> - do_t_on_get_status(Config) - end. - -do_t_on_get_status(Config) -> ProxyPort = ?config(proxy_port, Config), ProxyHost = ?config(proxy_host, Config), ProxyName = ?config(proxy_name, Config), @@ -1421,14 +1382,6 @@ do_t_on_get_status(Config) -> %% ensure that we can create and use the bridge successfully after %% creating it with bad config. t_failed_creation_then_fixed(Config) -> - case proplists:get_bool(skip_does_not_apply, Config) of - true -> - ok; - false -> - ?check_trace(do_t_failed_creation_then_fixed(Config), []) - end. - -do_t_failed_creation_then_fixed(Config) -> ct:timetrap({seconds, 180}), MQTTTopic = ?config(mqtt_topic, Config), MQTTQoS = ?config(mqtt_qos, Config), @@ -1516,14 +1469,6 @@ do_t_failed_creation_then_fixed(Config) -> %% recovering from a network partition will make the subscribers %% consume the messages produced during the down time. t_receive_after_recovery(Config) -> - case proplists:get_bool(skip_does_not_apply, Config) of - true -> - ok; - false -> - do_t_receive_after_recovery(Config) - end. - -do_t_receive_after_recovery(Config) -> ct:timetrap(120_000), ProxyPort = ?config(proxy_port, Config), ProxyHost = ?config(proxy_host, Config), diff --git a/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_producer_SUITE.erl b/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_producer_SUITE.erl index b37ef00e9..09d3f78aa 100644 --- a/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_producer_SUITE.erl +++ b/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_producer_SUITE.erl @@ -28,13 +28,8 @@ ). -include_lib("eunit/include/eunit.hrl"). --include_lib("emqx/include/emqx.hrl"). --include_lib("emqx_dashboard/include/emqx_dashboard.hrl"). -define(HOST, "http://127.0.0.1:18083"). - -%% -define(API_VERSION, "v5"). - -define(BASE_PATH, "/api/v5"). %% NOTE: it's "kafka", but not "kafka_producer" @@ -48,13 +43,6 @@ %%------------------------------------------------------------------------------ all() -> - case code:get_object_code(cthr) of - {Module, Code, Filename} -> - {module, Module} = code:load_binary(Module, Filename, Code), - ok; - error -> - error - end, All0 = emqx_common_test_helpers:all(?MODULE), All = All0 -- matrix_cases(), Groups = lists:map(fun({G, _, _}) -> {group, G} end, groups()), @@ -105,23 +93,12 @@ init_per_suite(Config0) -> emqx_connector, emqx_bridge_kafka, emqx_bridge, - emqx_rule_engine + emqx_rule_engine, + {emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"} ], #{work_dir => emqx_cth_suite:work_dir(Config)} ), - emqx_mgmt_api_test_util:init_suite(), wait_until_kafka_is_up(), - %% Wait until bridges API is up - (fun WaitUntilRestApiUp() -> - case http_get(["bridges"]) of - {ok, 200, _Res} -> - ok; - Val -> - ct:pal("REST API for bridges not up. Wait and try again. Response: ~p", [Val]), - timer:sleep(1000), - WaitUntilRestApiUp() - end - end)(), [{apps, Apps} | Config]. end_per_suite(Config) -> @@ -183,6 +160,7 @@ t_query_mode_async(CtConfig) -> t_publish(matrix) -> {publish, [ [tcp, none, key_dispatch, sync], + [ssl, plain_passfile, random, sync], [ssl, scram_sha512, random, async], [ssl, kerberos, random, sync] ]}; @@ -200,9 +178,15 @@ t_publish(Config) -> end, Auth1 = case Auth of - none -> "none"; - scram_sha512 -> valid_sasl_scram512_settings(); - kerberos -> valid_sasl_kerberos_settings() + none -> + "none"; + plain_passfile -> + Passfile = filename:join(?config(priv_dir, Config), "passfile"), + valid_sasl_plain_passfile_settings(Passfile); + scram_sha512 -> + valid_sasl_scram512_settings(); + kerberos -> + valid_sasl_kerberos_settings() end, ConnCfg = #{ "bootstrap_hosts" => Hosts, @@ -499,11 +483,10 @@ t_failed_creation_then_fix(Config) -> {ok, {_, [KafkaMsg]}} = brod:fetch(kafka_hosts(), KafkaTopic, 0, Offset), ?assertMatch(#kafka_message{key = BinTime}, KafkaMsg), % %% TODO: refactor those into init/end per testcase - ok = ?PRODUCER:on_stop(ResourceId, State), - ?assertEqual([], supervisor:which_children(wolff_client_sup)), - ?assertEqual([], supervisor:which_children(wolff_producers_sup)), ok = emqx_bridge:remove(list_to_atom(Type), list_to_atom(Name)), delete_all_bridges(), + ?assertEqual([], supervisor:which_children(wolff_client_sup)), + ?assertEqual([], supervisor:which_children(wolff_producers_sup)), ok. t_custom_timestamp(_Config) -> @@ -1018,112 +1001,89 @@ hocon_config(Args, ConfigTemplateFun) -> ), Hocon. -%% erlfmt-ignore hocon_config_template() -> -""" -bridges.kafka.{{ bridge_name }} { - bootstrap_hosts = \"{{ kafka_hosts_string }}\" - enable = true - authentication = {{{ authentication }}} - ssl = {{{ ssl }}} - local_topic = \"{{ local_topic }}\" - kafka = { - message = { - key = \"${clientid}\" - value = \"${.payload}\" - timestamp = \"${timestamp}\" - } - buffer = { - memory_overload_protection = false - } - partition_strategy = {{ partition_strategy }} - topic = \"{{ kafka_topic }}\" - query_mode = {{ query_mode }} - } - metadata_request_timeout = 5s - min_metadata_refresh_interval = 3s - socket_opts { - nodelay = true - } - connect_timeout = 5s -} -""". + "bridges.kafka.{{ bridge_name }} {" + "\n bootstrap_hosts = \"{{ kafka_hosts_string }}\"" + "\n enable = true" + "\n authentication = {{{ authentication }}}" + "\n ssl = {{{ ssl }}}" + "\n local_topic = \"{{ local_topic }}\"" + "\n kafka = {" + "\n message = {" + "\n key = \"${clientid}\"" + "\n value = \"${.payload}\"" + "\n timestamp = \"${timestamp}\"" + "\n }" + "\n buffer = {" + "\n memory_overload_protection = false" + "\n }" + "\n partition_strategy = {{ partition_strategy }}" + "\n topic = \"{{ kafka_topic }}\"" + "\n query_mode = {{ query_mode }}" + "\n }" + "\n metadata_request_timeout = 5s" + "\n min_metadata_refresh_interval = 3s" + "\n socket_opts {" + "\n nodelay = true" + "\n }" + "\n connect_timeout = 5s" + "\n }". -%% erlfmt-ignore hocon_config_template_with_headers() -> -""" -bridges.kafka.{{ bridge_name }} { - bootstrap_hosts = \"{{ kafka_hosts_string }}\" - enable = true - authentication = {{{ authentication }}} - ssl = {{{ ssl }}} - local_topic = \"{{ local_topic }}\" - kafka = { - message = { - key = \"${clientid}\" - value = \"${.payload}\" - timestamp = \"${timestamp}\" - } - buffer = { - memory_overload_protection = false - } - kafka_headers = \"{{ kafka_headers }}\" - kafka_header_value_encode_mode: json - kafka_ext_headers: {{{ kafka_ext_headers }}} - partition_strategy = {{ partition_strategy }} - topic = \"{{ kafka_topic }}\" - query_mode = {{ query_mode }} - } - metadata_request_timeout = 5s - min_metadata_refresh_interval = 3s - socket_opts { - nodelay = true - } - connect_timeout = 5s -} -""". + "bridges.kafka.{{ bridge_name }} {" + "\n bootstrap_hosts = \"{{ kafka_hosts_string }}\"" + "\n enable = true" + "\n authentication = {{{ authentication }}}" + "\n ssl = {{{ ssl }}}" + "\n local_topic = \"{{ local_topic }}\"" + "\n kafka = {" + "\n message = {" + "\n key = \"${clientid}\"" + "\n value = \"${.payload}\"" + "\n timestamp = \"${timestamp}\"" + "\n }" + "\n buffer = {" + "\n memory_overload_protection = false" + "\n }" + "\n kafka_headers = \"{{ kafka_headers }}\"" + "\n kafka_header_value_encode_mode: json" + "\n kafka_ext_headers: {{{ kafka_ext_headers }}}" + "\n partition_strategy = {{ partition_strategy }}" + "\n topic = \"{{ kafka_topic }}\"" + "\n query_mode = {{ query_mode }}" + "\n }" + "\n metadata_request_timeout = 5s" + "\n min_metadata_refresh_interval = 3s" + "\n socket_opts {" + "\n nodelay = true" + "\n }" + "\n connect_timeout = 5s" + "\n }". -%% erlfmt-ignore hocon_config_template_authentication("none") -> "none"; hocon_config_template_authentication(#{"mechanism" := _}) -> -""" -{ - mechanism = {{ mechanism }} - password = {{ password }} - username = {{ username }} -} -"""; + "{" + "\n mechanism = {{ mechanism }}" + "\n password = \"{{ password }}\"" + "\n username = \"{{ username }}\"" + "\n }"; hocon_config_template_authentication(#{"kerberos_principal" := _}) -> -""" -{ - kerberos_principal = \"{{ kerberos_principal }}\" - kerberos_keytab_file = \"{{ kerberos_keytab_file }}\" -} -""". + "{" + "\n kerberos_principal = \"{{ kerberos_principal }}\"" + "\n kerberos_keytab_file = \"{{ kerberos_keytab_file }}\"" + "\n }". -%% erlfmt-ignore hocon_config_template_ssl(Map) when map_size(Map) =:= 0 -> -""" -{ - enable = false -} -"""; + "{ enable = false }"; hocon_config_template_ssl(#{"enable" := "false"}) -> -""" -{ - enable = false -} -"""; + "{ enable = false }"; hocon_config_template_ssl(#{"enable" := "true"}) -> -""" -{ - enable = true - cacertfile = \"{{{cacertfile}}}\" - certfile = \"{{{certfile}}}\" - keyfile = \"{{{keyfile}}}\" -} -""". + "{ enable = true" + "\n cacertfile = \"{{{cacertfile}}}\"" + "\n certfile = \"{{{certfile}}}\"" + "\n keyfile = \"{{{keyfile}}}\"" + "\n }". kafka_hosts_string(tcp, none) -> kafka_hosts_string(); @@ -1197,6 +1157,13 @@ valid_sasl_kerberos_settings() -> "kerberos_keytab_file" => shared_secret(rig_keytab) }. +valid_sasl_plain_passfile_settings(Passfile) -> + Auth = valid_sasl_plain_settings(), + ok = file:write_file(Passfile, maps:get("password", Auth)), + Auth#{ + "password" := "file://" ++ Passfile + }. + kafka_hosts() -> kpro:parse_endpoints(kafka_hosts_string()). diff --git a/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_tests.erl b/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_tests.erl index 69794f2b9..64871bf6d 100644 --- a/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_tests.erl +++ b/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_tests.erl @@ -223,144 +223,136 @@ check_atom_key(Conf) when is_map(Conf) -> %% Data section %%=========================================================================== -%% erlfmt-ignore kafka_producer_old_hocon(_WithLocalTopic = true) -> kafka_producer_old_hocon("mqtt {topic = \"mqtt/local\"}\n"); kafka_producer_old_hocon(_WithLocalTopic = false) -> kafka_producer_old_hocon("mqtt {}\n"); kafka_producer_old_hocon(MQTTConfig) when is_list(MQTTConfig) -> -""" -bridges.kafka { - myproducer { - authentication = \"none\" - bootstrap_hosts = \"toxiproxy:9292\" - connect_timeout = \"5s\" - metadata_request_timeout = \"5s\" - min_metadata_refresh_interval = \"3s\" - producer { - kafka { - buffer { - memory_overload_protection = false - mode = \"memory\" - per_partition_limit = \"2GB\" - segment_bytes = \"100MB\" - } - compression = \"no_compression\" - max_batch_bytes = \"896KB\" - max_inflight = 10 - message { - key = \"${.clientid}\" - timestamp = \"${.timestamp}\" - value = \"${.}\" - } - partition_count_refresh_interval = \"60s\" - partition_strategy = \"random\" - required_acks = \"all_isr\" - topic = \"test-topic-two-partitions\" - } -""" ++ MQTTConfig ++ -""" - } - socket_opts { - nodelay = true - recbuf = \"1024KB\" - sndbuf = \"1024KB\" - } - ssl {enable = false, verify = \"verify_peer\"} - } -} -""". + [ + "bridges.kafka {" + "\n myproducer {" + "\n authentication = \"none\"" + "\n bootstrap_hosts = \"toxiproxy:9292\"" + "\n connect_timeout = \"5s\"" + "\n metadata_request_timeout = \"5s\"" + "\n min_metadata_refresh_interval = \"3s\"" + "\n producer {" + "\n kafka {" + "\n buffer {" + "\n memory_overload_protection = false" + "\n mode = \"memory\"" + "\n per_partition_limit = \"2GB\"" + "\n segment_bytes = \"100MB\"" + "\n }" + "\n compression = \"no_compression\"" + "\n max_batch_bytes = \"896KB\"" + "\n max_inflight = 10" + "\n message {" + "\n key = \"${.clientid}\"" + "\n timestamp = \"${.timestamp}\"" + "\n value = \"${.}\"" + "\n }" + "\n partition_count_refresh_interval = \"60s\"" + "\n partition_strategy = \"random\"" + "\n required_acks = \"all_isr\"" + "\n topic = \"test-topic-two-partitions\"" + "\n }", + MQTTConfig, + "\n }" + "\n socket_opts {" + "\n nodelay = true" + "\n recbuf = \"1024KB\"" + "\n sndbuf = \"1024KB\"" + "\n }" + "\n ssl {enable = false, verify = \"verify_peer\"}" + "\n }" + "\n}" + ]. kafka_producer_new_hocon() -> - "" - "\n" - "bridges.kafka {\n" - " myproducer {\n" - " authentication = \"none\"\n" - " bootstrap_hosts = \"toxiproxy:9292\"\n" - " connect_timeout = \"5s\"\n" - " metadata_request_timeout = \"5s\"\n" - " min_metadata_refresh_interval = \"3s\"\n" - " kafka {\n" - " buffer {\n" - " memory_overload_protection = false\n" - " mode = \"memory\"\n" - " per_partition_limit = \"2GB\"\n" - " segment_bytes = \"100MB\"\n" - " }\n" - " compression = \"no_compression\"\n" - " max_batch_bytes = \"896KB\"\n" - " max_inflight = 10\n" - " message {\n" - " key = \"${.clientid}\"\n" - " timestamp = \"${.timestamp}\"\n" - " value = \"${.}\"\n" - " }\n" - " partition_count_refresh_interval = \"60s\"\n" - " partition_strategy = \"random\"\n" - " required_acks = \"all_isr\"\n" - " topic = \"test-topic-two-partitions\"\n" - " }\n" - " local_topic = \"mqtt/local\"\n" - " socket_opts {\n" - " nodelay = true\n" - " recbuf = \"1024KB\"\n" - " sndbuf = \"1024KB\"\n" - " }\n" - " ssl {enable = false, verify = \"verify_peer\"}\n" - " resource_opts {\n" - " health_check_interval = 10s\n" - " }\n" - " }\n" - "}\n" - "". + "bridges.kafka {" + "\n myproducer {" + "\n authentication = \"none\"" + "\n bootstrap_hosts = \"toxiproxy:9292\"" + "\n connect_timeout = \"5s\"" + "\n metadata_request_timeout = \"5s\"" + "\n min_metadata_refresh_interval = \"3s\"" + "\n kafka {" + "\n buffer {" + "\n memory_overload_protection = false" + "\n mode = \"memory\"" + "\n per_partition_limit = \"2GB\"" + "\n segment_bytes = \"100MB\"" + "\n }" + "\n compression = \"no_compression\"" + "\n max_batch_bytes = \"896KB\"" + "\n max_inflight = 10" + "\n message {" + "\n key = \"${.clientid}\"" + "\n timestamp = \"${.timestamp}\"" + "\n value = \"${.}\"" + "\n }" + "\n partition_count_refresh_interval = \"60s\"" + "\n partition_strategy = \"random\"" + "\n required_acks = \"all_isr\"" + "\n topic = \"test-topic-two-partitions\"" + "\n }" + "\n local_topic = \"mqtt/local\"" + "\n socket_opts {" + "\n nodelay = true" + "\n recbuf = \"1024KB\"" + "\n sndbuf = \"1024KB\"" + "\n }" + "\n ssl {enable = false, verify = \"verify_peer\"}" + "\n resource_opts {" + "\n health_check_interval = 10s" + "\n }" + "\n }" + "\n}". -%% erlfmt-ignore kafka_consumer_hocon() -> -""" -bridges.kafka_consumer.my_consumer { - enable = true - bootstrap_hosts = \"kafka-1.emqx.net:9292\" - connect_timeout = 5s - min_metadata_refresh_interval = 3s - metadata_request_timeout = 5s - authentication = { - mechanism = plain - username = emqxuser - password = password - } - kafka { - max_batch_bytes = 896KB - max_rejoin_attempts = 5 - offset_commit_interval_seconds = 3s - offset_reset_policy = latest - } - topic_mapping = [ - { - kafka_topic = \"kafka-topic-1\" - mqtt_topic = \"mqtt/topic/1\" - qos = 1 - payload_template = \"${.}\" - }, - { - kafka_topic = \"kafka-topic-2\" - mqtt_topic = \"mqtt/topic/2\" - qos = 2 - payload_template = \"v = ${.value}\" - } - ] - key_encoding_mode = none - value_encoding_mode = none - ssl { - enable = false - verify = verify_none - server_name_indication = \"auto\" - } - resource_opts { - health_check_interval = 10s - } -} -""". + "bridges.kafka_consumer.my_consumer {" + "\n enable = true" + "\n bootstrap_hosts = \"kafka-1.emqx.net:9292\"" + "\n connect_timeout = 5s" + "\n min_metadata_refresh_interval = 3s" + "\n metadata_request_timeout = 5s" + "\n authentication = {" + "\n mechanism = plain" + "\n username = emqxuser" + "\n password = password" + "\n }" + "\n kafka {" + "\n max_batch_bytes = 896KB" + "\n max_rejoin_attempts = 5" + "\n offset_commit_interval_seconds = 3s" + "\n offset_reset_policy = latest" + "\n }" + "\n topic_mapping = [" + "\n {" + "\n kafka_topic = \"kafka-topic-1\"" + "\n mqtt_topic = \"mqtt/topic/1\"" + "\n qos = 1" + "\n payload_template = \"${.}\"" + "\n }," + "\n {" + "\n kafka_topic = \"kafka-topic-2\"" + "\n mqtt_topic = \"mqtt/topic/2\"" + "\n qos = 2" + "\n payload_template = \"v = ${.value}\"" + "\n }" + "\n ]" + "\n key_encoding_mode = none" + "\n value_encoding_mode = none" + "\n ssl {" + "\n enable = false" + "\n verify = verify_none" + "\n server_name_indication = \"auto\"" + "\n }" + "\n resource_opts {" + "\n health_check_interval = 10s" + "\n }" + "\n }". %% assert compatibility bridge_schema_json_test() -> diff --git a/apps/emqx_bridge_kafka/test/emqx_bridge_v2_kafka_producer_SUITE.erl b/apps/emqx_bridge_kafka/test/emqx_bridge_v2_kafka_producer_SUITE.erl index 2ad0504b4..2913e178a 100644 --- a/apps/emqx_bridge_kafka/test/emqx_bridge_v2_kafka_producer_SUITE.erl +++ b/apps/emqx_bridge_kafka/test/emqx_bridge_v2_kafka_producer_SUITE.erl @@ -22,6 +22,7 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). -include_lib("brod/include/brod.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). -import(emqx_common_test_helpers, [on_exit/1]). @@ -35,6 +36,14 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + KafkaHost = os:getenv("KAFKA_PLAIN_HOST", "toxiproxy.emqx.net"), + KafkaPort = list_to_integer(os:getenv("KAFKA_PLAIN_PORT", "9292")), + ProxyName = "kafka_plain", + DirectKafkaHost = os:getenv("KAFKA_DIRECT_PLAIN_HOST", "kafka-1.emqx.net"), + DirectKafkaPort = list_to_integer(os:getenv("KAFKA_DIRECT_PLAIN_PORT", "9092")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), Apps = emqx_cth_suite:start( [ emqx, @@ -50,17 +59,34 @@ init_per_suite(Config) -> ), {ok, _} = emqx_common_test_http:create_default_app(), emqx_bridge_kafka_impl_producer_SUITE:wait_until_kafka_is_up(), - [{apps, Apps} | Config]. + [ + {apps, Apps}, + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort}, + {proxy_name, ProxyName}, + {kafka_host, KafkaHost}, + {kafka_port, KafkaPort}, + {direct_kafka_host, DirectKafkaHost}, + {direct_kafka_port, DirectKafkaPort} + | Config + ]. end_per_suite(Config) -> Apps = ?config(apps, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), emqx_cth_suite:stop(Apps), ok. init_per_testcase(_TestCase, Config) -> Config. -end_per_testcase(_TestCase, _Config) -> +end_per_testcase(_TestCase, Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + emqx_bridge_v2_testlib:delete_all_bridges_and_connectors(), emqx_common_test_helpers:call_janitor(60_000), ok. @@ -69,6 +95,13 @@ end_per_testcase(_TestCase, _Config) -> %%------------------------------------------------------------------------------------- check_send_message_with_bridge(BridgeName) -> + #{offset := Offset, payload := Payload} = send_message(BridgeName), + %% ###################################### + %% Check if message is sent to Kafka + %% ###################################### + check_kafka_message_payload(Offset, Payload). + +send_message(ActionName) -> %% ###################################### %% Create Kafka message %% ###################################### @@ -84,11 +117,8 @@ check_send_message_with_bridge(BridgeName) -> %% ###################################### %% Send message %% ###################################### - emqx_bridge_v2:send_message(?TYPE, BridgeName, Msg, #{}), - %% ###################################### - %% Check if message is sent to Kafka - %% ###################################### - check_kafka_message_payload(Offset, Payload). + emqx_bridge_v2:send_message(?TYPE, ActionName, Msg, #{}), + #{offset => Offset, payload => Payload}. resolve_kafka_offset() -> KafkaTopic = emqx_bridge_kafka_impl_producer_SUITE:test_topic_one_partition(), @@ -106,6 +136,14 @@ check_kafka_message_payload(Offset, ExpectedPayload) -> {ok, {_, [KafkaMsg0]}} = brod:fetch(Hosts, KafkaTopic, Partition, Offset), ?assertMatch(#kafka_message{value = ExpectedPayload}, KafkaMsg0). +action_config(ConnectorName) -> + action_config(ConnectorName, _Overrides = #{}). + +action_config(ConnectorName, Overrides) -> + Cfg0 = bridge_v2_config(ConnectorName), + Cfg1 = emqx_utils_maps:rename(<<"kafka">>, <<"parameters">>, Cfg0), + emqx_utils_maps:deep_merge(Cfg1, Overrides). + bridge_v2_config(ConnectorName) -> #{ <<"connector">> => ConnectorName, @@ -131,7 +169,9 @@ bridge_v2_config(ConnectorName) -> <<"query_mode">> => <<"sync">>, <<"required_acks">> => <<"all_isr">>, <<"sync_query_timeout">> => <<"5s">>, - <<"topic">> => emqx_bridge_kafka_impl_producer_SUITE:test_topic_one_partition() + <<"topic">> => list_to_binary( + emqx_bridge_kafka_impl_producer_SUITE:test_topic_one_partition() + ) }, <<"local_topic">> => <<"kafka_t/#">>, <<"resource_opts">> => #{ @@ -140,32 +180,37 @@ bridge_v2_config(ConnectorName) -> }. connector_config() -> - #{ - <<"authentication">> => <<"none">>, - <<"bootstrap_hosts">> => iolist_to_binary(kafka_hosts_string()), - <<"connect_timeout">> => <<"5s">>, - <<"enable">> => true, - <<"metadata_request_timeout">> => <<"5s">>, - <<"min_metadata_refresh_interval">> => <<"3s">>, - <<"socket_opts">> => - #{ - <<"recbuf">> => <<"1024KB">>, - <<"sndbuf">> => <<"1024KB">>, - <<"tcp_keepalive">> => <<"none">> - }, - <<"ssl">> => - #{ - <<"ciphers">> => [], - <<"depth">> => 10, - <<"enable">> => false, - <<"hibernate_after">> => <<"5s">>, - <<"log_level">> => <<"notice">>, - <<"reuse_sessions">> => true, - <<"secure_renegotiate">> => true, - <<"verify">> => <<"verify_peer">>, - <<"versions">> => [<<"tlsv1.3">>, <<"tlsv1.2">>] - } - }. + connector_config(_Overrides = #{}). + +connector_config(Overrides) -> + Defaults = + #{ + <<"authentication">> => <<"none">>, + <<"bootstrap_hosts">> => iolist_to_binary(kafka_hosts_string()), + <<"connect_timeout">> => <<"5s">>, + <<"enable">> => true, + <<"metadata_request_timeout">> => <<"5s">>, + <<"min_metadata_refresh_interval">> => <<"3s">>, + <<"socket_opts">> => + #{ + <<"recbuf">> => <<"1024KB">>, + <<"sndbuf">> => <<"1024KB">>, + <<"tcp_keepalive">> => <<"none">> + }, + <<"ssl">> => + #{ + <<"ciphers">> => [], + <<"depth">> => 10, + <<"enable">> => false, + <<"hibernate_after">> => <<"5s">>, + <<"log_level">> => <<"notice">>, + <<"reuse_sessions">> => true, + <<"secure_renegotiate">> => true, + <<"verify">> => <<"verify_peer">>, + <<"versions">> => [<<"tlsv1.3">>, <<"tlsv1.2">>] + } + }, + emqx_utils_maps:deep_merge(Defaults, Overrides). kafka_hosts_string() -> KafkaHost = os:getenv("KAFKA_PLAIN_HOST", "kafka-1.emqx.net"), @@ -350,13 +395,13 @@ t_bad_url(_Config) -> {ok, #{ resource_data := #{ - status := connecting, + status := ?status_disconnected, error := [#{reason := unresolvable_hostname}] } }}, emqx_connector:lookup(?TYPE, ConnectorName) ), - ?assertMatch({ok, #{status := connecting}}, emqx_bridge_v2:lookup(?TYPE, ActionName)), + ?assertMatch({ok, #{status := ?status_disconnected}}, emqx_bridge_v2:lookup(?TYPE, ActionName)), ok. t_parameters_key_api_spec(_Config) -> @@ -383,3 +428,153 @@ t_http_api_get(_Config) -> emqx_bridge_testlib:list_bridges_api() ), ok. + +t_create_connector_while_connection_is_down(Config) -> + ProxyName = ?config(proxy_name, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + KafkaHost = ?config(kafka_host, Config), + KafkaPort = ?config(kafka_port, Config), + Host = iolist_to_binary([KafkaHost, ":", integer_to_binary(KafkaPort)]), + ?check_trace( + begin + Type = ?TYPE, + ConnectorConfig = connector_config(#{ + <<"bootstrap_hosts">> => Host, + <<"resource_opts">> => + #{<<"health_check_interval">> => <<"500ms">>} + }), + ConnectorName = <<"c1">>, + ConnectorId = emqx_connector_resource:resource_id(Type, ConnectorName), + ConnectorParams = [ + {connector_config, ConnectorConfig}, + {connector_name, ConnectorName}, + {connector_type, Type} + ], + ActionName = ConnectorName, + ActionId = emqx_bridge_v2:id(?TYPE, ActionName, ConnectorName), + ActionConfig = action_config( + ConnectorName + ), + ActionParams = [ + {action_config, ActionConfig}, + {action_name, ActionName}, + {action_type, Type} + ], + Disconnected = atom_to_binary(?status_disconnected), + %% Initially, the connection cannot be stablished. Messages are not buffered, + %% hence the status is `?status_disconnected'. + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + {ok, {{_, 201, _}, _, #{<<"status">> := Disconnected}}} = + emqx_bridge_v2_testlib:create_connector_api(ConnectorParams), + {ok, {{_, 201, _}, _, #{<<"status">> := Disconnected}}} = + emqx_bridge_v2_testlib:create_action_api(ActionParams), + #{offset := Offset1} = send_message(ActionName), + #{offset := Offset2} = send_message(ActionName), + #{offset := Offset3} = send_message(ActionName), + ?assertEqual([Offset1], lists:usort([Offset1, Offset2, Offset3])), + ?assertEqual(3, emqx_resource_metrics:matched_get(ActionId)), + ?assertEqual(3, emqx_resource_metrics:failed_get(ActionId)), + ?assertEqual(0, emqx_resource_metrics:queuing_get(ActionId)), + ?assertEqual(0, emqx_resource_metrics:inflight_get(ActionId)), + ?assertEqual(0, emqx_resource_metrics:dropped_get(ActionId)), + ok + end), + %% Let the connector and action recover + Connected = atom_to_binary(?status_connected), + ?retry( + _Sleep0 = 1_100, + _Attempts0 = 10, + begin + _ = emqx_resource:health_check(ConnectorId), + _ = emqx_resource:health_check(ActionId), + ?assertMatch( + {ok, #{ + status := ?status_connected, + resource_data := + #{ + status := ?status_connected, + added_channels := + #{ + ActionId := #{ + status := ?status_connected + } + } + } + }}, + emqx_bridge_v2:lookup(Type, ActionName), + #{action_id => ActionId} + ), + ?assertMatch( + {ok, {{_, 200, _}, _, #{<<"status">> := Connected}}}, + emqx_bridge_v2_testlib:get_action_api(ActionParams) + ) + end + ), + %% Now the connection drops again; this time, status should be + %% `?status_connecting' to avoid destroying wolff_producers and their replayq + %% buffers. + Connecting = atom_to_binary(?status_connecting), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ?retry( + _Sleep0 = 1_100, + _Attempts0 = 10, + begin + _ = emqx_resource:health_check(ConnectorId), + _ = emqx_resource:health_check(ActionId), + ?assertMatch( + {ok, #{ + status := ?status_connecting, + resource_data := + #{ + status := ?status_connecting, + added_channels := + #{ + ActionId := #{ + status := ?status_connecting + } + } + } + }}, + emqx_bridge_v2:lookup(Type, ActionName), + #{action_id => ActionId} + ), + ?assertMatch( + {ok, {{_, 200, _}, _, #{<<"status">> := Connecting}}}, + emqx_bridge_v2_testlib:get_action_api(ActionParams) + ) + end + ), + %% This should get enqueued by wolff producers. + spawn_link(fun() -> send_message(ActionName) end), + PreviousMatched = 3, + PreviousFailed = 3, + ?retry( + _Sleep2 = 100, + _Attempts2 = 10, + ?assertEqual(PreviousMatched + 1, emqx_resource_metrics:matched_get(ActionId)) + ), + ?assertEqual(PreviousFailed, emqx_resource_metrics:failed_get(ActionId)), + ?assertEqual(1, emqx_resource_metrics:queuing_get(ActionId)), + ?assertEqual(0, emqx_resource_metrics:inflight_get(ActionId)), + ?assertEqual(0, emqx_resource_metrics:dropped_get(ActionId)), + ?assertEqual(0, emqx_resource_metrics:success_get(ActionId)), + ok + end), + ?retry( + _Sleep2 = 600, + _Attempts2 = 20, + begin + _ = emqx_resource:health_check(ConnectorId), + _ = emqx_resource:health_check(ActionId), + ?assertEqual(1, emqx_resource_metrics:success_get(ActionId), #{ + metrics => emqx_bridge_v2:get_metrics(Type, ActionName) + }), + ok + end + ), + ok + end, + [] + ), + ok. diff --git a/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis.app.src b/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis.app.src index 6066e2495..74d7dc94f 100644 --- a/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis.app.src +++ b/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_kinesis, [ {description, "EMQX Enterprise Amazon Kinesis Bridge"}, - {vsn, "0.1.2"}, + {vsn, "0.1.3"}, {registered, []}, {applications, [ kernel, diff --git a/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis.erl b/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis.erl index d98e7ab11..14e197113 100644 --- a/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis.erl +++ b/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis.erl @@ -62,12 +62,10 @@ fields(connector_config) -> } )}, {aws_secret_access_key, - mk( - binary(), + emqx_schema_secret:mk( #{ required => true, - desc => ?DESC("aws_secret_access_key"), - sensitive => true + desc => ?DESC("aws_secret_access_key") } )}, {endpoint, diff --git a/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_connector_client.erl b/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_connector_client.erl index d9dc0220f..959b539a0 100644 --- a/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_connector_client.erl +++ b/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_connector_client.erl @@ -97,7 +97,13 @@ init(#{ partition_key => PartitionKey, stream_name => StreamName }, - New = + %% TODO: teach `erlcloud` to to accept 0-arity closures as passwords. + ok = erlcloud_config:configure( + to_str(AwsAccessKey), + to_str(emqx_secret:unwrap(AwsSecretAccessKey)), + Host, + Port, + Scheme, fun(AccessKeyID, SecretAccessKey, HostAddr, HostPort, ConnectionScheme) -> Config0 = erlcloud_kinesis:new( AccessKeyID, @@ -107,9 +113,7 @@ init(#{ ConnectionScheme ++ "://" ), Config0#aws_config{retry_num = MaxRetries} - end, - erlcloud_config:configure( - to_str(AwsAccessKey), to_str(AwsSecretAccessKey), Host, Port, Scheme, New + end ), % check the connection case erlcloud_kinesis:list_streams() of diff --git a/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_impl_producer.erl b/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_impl_producer.erl index 1e07ae96e..decf3e83b 100644 --- a/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_impl_producer.erl +++ b/apps/emqx_bridge_kinesis/src/emqx_bridge_kinesis_impl_producer.erl @@ -15,7 +15,7 @@ -type config() :: #{ aws_access_key_id := binary(), - aws_secret_access_key := binary(), + aws_secret_access_key := emqx_secret:t(binary()), endpoint := binary(), stream_name := binary(), partition_key := binary(), diff --git a/apps/emqx_bridge_kinesis/test/emqx_bridge_kinesis_impl_producer_SUITE.erl b/apps/emqx_bridge_kinesis/test/emqx_bridge_kinesis_impl_producer_SUITE.erl index ea926fc33..61b354ea3 100644 --- a/apps/emqx_bridge_kinesis/test/emqx_bridge_kinesis_impl_producer_SUITE.erl +++ b/apps/emqx_bridge_kinesis/test/emqx_bridge_kinesis_impl_producer_SUITE.erl @@ -11,10 +11,11 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). --define(PRODUCER, emqx_bridge_kinesis_impl_producer). -define(BRIDGE_TYPE, kinesis_producer). -define(BRIDGE_TYPE_BIN, <<"kinesis_producer">>). -define(KINESIS_PORT, 4566). +-define(KINESIS_ACCESS_KEY, "aws_access_key_id"). +-define(KINESIS_SECRET_KEY, "aws_secret_access_key"). -define(TOPIC, <<"t/topic">>). %%------------------------------------------------------------------------------ @@ -38,6 +39,8 @@ init_per_suite(Config) -> ProxyHost = os:getenv("PROXY_HOST", "toxiproxy.emqx.net"), ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), ProxyName = "kinesis", + SecretFile = filename:join(?config(priv_dir, Config), "secret"), + ok = file:write_file(SecretFile, <>), ok = emqx_common_test_helpers:start_apps([emqx_conf]), ok = emqx_connector_test_helpers:start_apps([emqx_resource, emqx_bridge, emqx_rule_engine]), {ok, _} = application:ensure_all_started(emqx_connector), @@ -46,6 +49,7 @@ init_per_suite(Config) -> {proxy_host, ProxyHost}, {proxy_port, ProxyPort}, {kinesis_port, ?KINESIS_PORT}, + {kinesis_secretfile, SecretFile}, {proxy_name, ProxyName} | Config ]. @@ -130,6 +134,7 @@ kinesis_config(Config) -> Scheme = proplists:get_value(connection_scheme, Config, "http"), ProxyHost = proplists:get_value(proxy_host, Config), KinesisPort = proplists:get_value(kinesis_port, Config), + SecretFile = proplists:get_value(kinesis_secretfile, Config), BatchSize = proplists:get_value(batch_size, Config, 100), BatchTime = proplists:get_value(batch_time, Config, <<"500ms">>), PayloadTemplate = proplists:get_value(payload_template, Config, "${payload}"), @@ -140,29 +145,32 @@ kinesis_config(Config) -> Name = <<(atom_to_binary(?MODULE))/binary, (GUID)/binary>>, ConfigString = io_lib:format( - "bridges.kinesis_producer.~s {\n" - " enable = true\n" - " aws_access_key_id = \"aws_access_key_id\"\n" - " aws_secret_access_key = \"aws_secret_access_key\"\n" - " endpoint = \"~s://~s:~b\"\n" - " stream_name = \"~s\"\n" - " partition_key = \"~s\"\n" - " payload_template = \"~s\"\n" - " max_retries = ~b\n" - " pool_size = 1\n" - " resource_opts = {\n" - " health_check_interval = \"3s\"\n" - " request_ttl = 30s\n" - " resume_interval = 1s\n" - " metrics_flush_interval = \"700ms\"\n" - " worker_pool_size = 1\n" - " query_mode = ~s\n" - " batch_size = ~b\n" - " batch_time = \"~s\"\n" - " }\n" - "}\n", + "bridges.kinesis_producer.~s {" + "\n enable = true" + "\n aws_access_key_id = ~p" + "\n aws_secret_access_key = ~p" + "\n endpoint = \"~s://~s:~b\"" + "\n stream_name = \"~s\"" + "\n partition_key = \"~s\"" + "\n payload_template = \"~s\"" + "\n max_retries = ~b" + "\n pool_size = 1" + "\n resource_opts = {" + "\n health_check_interval = \"3s\"" + "\n request_ttl = 30s" + "\n resume_interval = 1s" + "\n metrics_flush_interval = \"700ms\"" + "\n worker_pool_size = 1" + "\n query_mode = ~s" + "\n batch_size = ~b" + "\n batch_time = \"~s\"" + "\n }" + "\n }", [ Name, + ?KINESIS_ACCESS_KEY, + %% NOTE: using file-based secrets with HOCON configs. + "file://" ++ SecretFile, Scheme, ProxyHost, KinesisPort, @@ -203,9 +211,6 @@ delete_bridge(Config) -> ct:pal("deleting bridge ~p", [{Type, Name}]), emqx_bridge:remove(Type, Name). -create_bridge_http(Config) -> - create_bridge_http(Config, _KinesisConfigOverrides = #{}). - create_bridge_http(Config, KinesisConfigOverrides) -> TypeBin = ?BRIDGE_TYPE_BIN, Name = ?config(kinesis_name, Config), @@ -489,7 +494,11 @@ to_bin(Str) when is_list(Str) -> %%------------------------------------------------------------------------------ t_create_via_http(Config) -> - ?assertMatch({ok, _}, create_bridge_http(Config)), + Overrides = #{ + %% NOTE: using literal secret with HTTP API requests. + <<"aws_secret_access_key">> => <> + }, + ?assertMatch({ok, _}, create_bridge_http(Config, Overrides)), ok. t_start_failed_then_fix(Config) -> diff --git a/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src index 14aca1f75..479aa13df 100644 --- a/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src +++ b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src @@ -1,13 +1,13 @@ {application, emqx_bridge_matrix, [ {description, "EMQX Enterprise MatrixDB Bridge"}, - {vsn, "0.1.2"}, + {vsn, "0.1.3"}, {registered, []}, {applications, [ kernel, stdlib, emqx_resource ]}, - {env, []}, + {env, [{emqx_action_info_modules, [emqx_bridge_matrix_action_info]}]}, {modules, []}, {links, []} ]}. diff --git a/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.erl b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.erl index abd98adb6..4f7a1a370 100644 --- a/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.erl +++ b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.erl @@ -3,6 +3,8 @@ %%-------------------------------------------------------------------- -module(emqx_bridge_matrix). +-include_lib("hocon/include/hoconsc.hrl"). + -export([ conn_bridge_examples/1 ]). @@ -14,6 +16,14 @@ desc/1 ]). +%% Examples +-export([ + bridge_v2_examples/1, + connector_examples/1 +]). + +-define(CONNECTOR_TYPE, matrix). + %% ------------------------------------------------------------------------------------------------- %% api @@ -22,7 +32,7 @@ conn_bridge_examples(Method) -> #{ <<"matrix">> => #{ summary => <<"Matrix Bridge">>, - value => emqx_bridge_pgsql:values(Method, matrix) + value => emqx_bridge_pgsql:values_conn_bridge_examples(Method, matrix) } } ]. @@ -35,8 +45,55 @@ roots() -> []. fields("post") -> emqx_bridge_pgsql:fields("post", matrix); +fields("config_connector") -> + emqx_bridge_pgsql:fields("config_connector"); +fields(action) -> + {matrix, + hoconsc:mk( + hoconsc:map(name, hoconsc:ref(emqx_bridge_pgsql, pgsql_action)), + #{ + desc => <<"Matrix Action Config">>, + required => false + } + )}; +fields("put_bridge_v2") -> + emqx_bridge_pgsql:fields(pgsql_action); +fields("get_bridge_v2") -> + emqx_bridge_pgsql:fields(pgsql_action); +fields("post_bridge_v2") -> + emqx_bridge_pgsql:fields(pgsql_action); +fields(Field) when + Field == "get_connector"; + Field == "put_connector"; + Field == "post_connector" +-> + emqx_postgresql_connector_schema:fields({Field, ?CONNECTOR_TYPE}); fields(Method) -> emqx_bridge_pgsql:fields(Method). +desc("config_connector") -> + ?DESC(emqx_postgresql_connector_schema, "config_connector"); desc(_) -> undefined. + +%% Examples + +connector_examples(Method) -> + [ + #{ + <<"matrix">> => #{ + summary => <<"Matrix Connector">>, + value => emqx_postgresql_connector_schema:values({Method, <<"matrix">>}) + } + } + ]. + +bridge_v2_examples(Method) -> + [ + #{ + <<"matrix">> => #{ + summary => <<"Matrix Action">>, + value => emqx_bridge_pgsql:values({Method, matrix}) + } + } + ]. diff --git a/apps/emqx_bridge_matrix/src/emqx_bridge_matrix_action_info.erl b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix_action_info.erl new file mode 100644 index 000000000..4eae13415 --- /dev/null +++ b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix_action_info.erl @@ -0,0 +1,22 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_matrix_action_info). + +-behaviour(emqx_action_info). + +-export([ + bridge_v1_type_name/0, + action_type_name/0, + connector_type_name/0, + schema_module/0 +]). + +bridge_v1_type_name() -> matrix. + +action_type_name() -> matrix. + +connector_type_name() -> matrix. + +schema_module() -> emqx_bridge_matrix. diff --git a/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.app.src b/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.app.src index 35bcc3fc4..f361d5276 100644 --- a/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.app.src +++ b/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_mongodb, [ {description, "EMQX Enterprise MongoDB Bridge"}, - {vsn, "0.2.1"}, + {vsn, "0.2.2"}, {registered, []}, {applications, [ kernel, @@ -9,7 +9,7 @@ emqx_resource, emqx_mongodb ]}, - {env, []}, + {env, [{emqx_action_info_modules, [emqx_bridge_mongodb_action_info]}]}, {modules, []}, {links, []} ]}. diff --git a/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.erl b/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.erl index b108f654f..796a4a4d1 100644 --- a/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.erl +++ b/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.erl @@ -12,7 +12,9 @@ %% emqx_bridge_enterprise "callbacks" -export([ - conn_bridge_examples/1 + bridge_v2_examples/1, + conn_bridge_examples/1, + connector_examples/1 ]). %% hocon_schema callbacks @@ -23,14 +25,19 @@ desc/1 ]). +-define(CONNECTOR_TYPE, mongodb). + %%================================================================================================= %% hocon_schema API %%================================================================================================= +%% [TODO] Namespace should be different depending on whether this is used for a +%% connector, an action or a legacy bridge type. namespace() -> "bridge_mongodb". roots() -> + %% ??? []. fields("config") -> @@ -44,6 +51,20 @@ fields("config") -> #{required => true, desc => ?DESC(emqx_resource_schema, "creation_opts")} )} ]; +fields("config_connector") -> + emqx_connector_schema:common_fields() ++ + fields("connection_fields"); +fields("connection_fields") -> + [ + {parameters, + mk( + hoconsc:union([ + ref(emqx_mongodb, "connector_" ++ T) + || T <- ["single", "sharded", "rs"] + ]), + #{required => true, desc => ?DESC("mongodb_parameters")} + )} + ] ++ emqx_mongodb:fields(mongodb); fields("creation_opts") -> %% so far, mongodb connector does not support batching %% but we cannot delete this field due to compatibility reasons @@ -55,12 +76,45 @@ fields("creation_opts") -> desc => ?DESC("batch_size") }} ]); +fields(action) -> + {mongodb, + mk( + hoconsc:map(name, ref(?MODULE, mongodb_action)), + #{desc => <<"MongoDB Action Config">>, required => false} + )}; +fields(mongodb_action) -> + emqx_bridge_v2_schema:make_producer_action_schema( + mk(ref(?MODULE, action_parameters), #{ + required => true, desc => ?DESC(action_parameters) + }) + ); +fields(action_parameters) -> + [ + {collection, mk(binary(), #{desc => ?DESC("collection"), default => <<"mqtt">>})}, + {payload_template, mk(binary(), #{required => false, desc => ?DESC("payload_template")})} + ]; +fields(resource_opts) -> + fields("creation_opts"); fields(mongodb_rs) -> emqx_mongodb:fields(rs) ++ fields("config"); fields(mongodb_sharded) -> emqx_mongodb:fields(sharded) ++ fields("config"); fields(mongodb_single) -> emqx_mongodb:fields(single) ++ fields("config"); +fields(Field) when + Field == "get_connector"; + Field == "put_connector"; + Field == "post_connector" +-> + emqx_connector_schema:api_fields(Field, ?CONNECTOR_TYPE, fields("connection_fields")); +fields("get_bridge_v2") -> + emqx_bridge_schema:status_fields() ++ + fields("post_bridge_v2"); +fields("post_bridge_v2") -> + type_and_name_fields(mongodb) ++ + fields(mongodb_action); +fields("put_bridge_v2") -> + fields(mongodb_action); fields("post_rs") -> fields(mongodb_rs) ++ type_and_name_fields(mongodb_rs); fields("post_sharded") -> @@ -86,6 +140,16 @@ fields("get_single") -> fields(mongodb_single) ++ type_and_name_fields(mongodb_single). +bridge_v2_examples(Method) -> + [ + #{ + <<"mongodb">> => #{ + summary => <<"MongoDB Action">>, + value => action_values(Method) + } + } + ]. + conn_bridge_examples(Method) -> [ #{ @@ -108,16 +172,46 @@ conn_bridge_examples(Method) -> } ]. +connector_examples(Method) -> + [ + #{ + <<"mongodb_rs">> => #{ + summary => <<"MongoDB Replica Set Connector">>, + value => connector_values(mongodb_rs, Method) + } + }, + #{ + <<"mongodb_sharded">> => #{ + summary => <<"MongoDB Sharded Connector">>, + value => connector_values(mongodb_sharded, Method) + } + }, + #{ + <<"mongodb_single">> => #{ + summary => <<"MongoDB Standalone Connector">>, + value => connector_values(mongodb_single, Method) + } + } + ]. + +desc("config_connector") -> + ?DESC("desc_config"); desc("config") -> ?DESC("desc_config"); desc("creation_opts") -> ?DESC(emqx_resource_schema, "creation_opts"); +desc(resource_opts) -> + ?DESC(emqx_resource_schema, "resource_opts"); desc(mongodb_rs) -> ?DESC(mongodb_rs_conf); desc(mongodb_sharded) -> ?DESC(mongodb_sharded_conf); desc(mongodb_single) -> ?DESC(mongodb_single_conf); +desc(mongodb_action) -> + ?DESC(mongodb_action); +desc(action_parameters) -> + ?DESC(action_parameters); desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> ["Configuration for MongoDB using `", string:to_upper(Method), "` method."]; desc(_) -> @@ -133,49 +227,103 @@ type_and_name_fields(MongoType) -> {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})} ]. -values(mongodb_rs = MongoType, Method) -> - TypeOpts = #{ +connector_values(Type, Method) -> + lists:foldl( + fun(M1, M2) -> + maps:merge(M1, M2) + end, + #{ + description => <<"My example connector">>, + parameters => mongo_type_opts(Type) + }, + [ + common_values(), + method_values(mongodb, Method) + ] + ). + +action_values(Method) -> + maps:merge( + method_values(mongodb, Method), + #{ + description => <<"My example action">>, + enable => true, + connector => <<"my_mongodb_connector">>, + parameters => #{ + collection => <<"mycol">> + } + } + ). + +values(MongoType, Method) -> + maps:merge( + mongo_type_opts(MongoType), + bridge_values(MongoType, Method) + ). + +mongo_type_opts(mongodb_rs) -> + #{ + mongo_type => <<"rs">>, servers => <<"localhost:27017, localhost:27018">>, w_mode => <<"safe">>, r_mode => <<"safe">>, replica_set_name => <<"rs">> - }, - values(common, MongoType, Method, TypeOpts); -values(mongodb_sharded = MongoType, Method) -> - TypeOpts = #{ + }; +mongo_type_opts(mongodb_sharded) -> + #{ + mongo_type => <<"sharded">>, servers => <<"localhost:27017, localhost:27018">>, w_mode => <<"safe">> - }, - values(common, MongoType, Method, TypeOpts); -values(mongodb_single = MongoType, Method) -> - TypeOpts = #{ + }; +mongo_type_opts(mongodb_single) -> + #{ + mongo_type => <<"single">>, server => <<"localhost:27017">>, w_mode => <<"safe">> - }, - values(common, MongoType, Method, TypeOpts). + }. -values(common, MongoType, Method, TypeOpts) -> - MongoTypeBin = atom_to_binary(MongoType), - Common = #{ - name => <>, - type => MongoTypeBin, +bridge_values(Type, _Method) -> + %% [FIXME] _Method makes a difference since PUT doesn't allow name and type + %% for connectors. + TypeBin = atom_to_binary(Type), + maps:merge( + #{ + name => <>, + type => TypeBin, + collection => <<"mycol">> + }, + common_values() + ). + +common_values() -> + #{ enable => true, - collection => <<"mycol">>, database => <<"mqtt">>, srv_record => false, pool_size => 8, username => <<"myuser">>, password => <<"******">> - }, - MethodVals = method_values(MongoType, Method), - Vals0 = maps:merge(MethodVals, Common), - maps:merge(Vals0, TypeOpts). + }. -method_values(MongoType, _) -> - ConnectorType = - case MongoType of - mongodb_rs -> <<"rs">>; - mongodb_sharded -> <<"sharded">>; - mongodb_single -> <<"single">> - end, - #{mongo_type => ConnectorType}. +method_values(Type, post) -> + TypeBin = atom_to_binary(Type), + #{ + name => <>, + type => TypeBin + }; +method_values(Type, get) -> + maps:merge( + method_values(Type, post), + #{ + status => <<"connected">>, + node_status => [ + #{ + node => <<"emqx@localhost">>, + status => <<"connected">> + } + ], + actions => [<<"my_action">>] + } + ); +method_values(_Type, put) -> + #{}. diff --git a/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb_action_info.erl b/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb_action_info.erl new file mode 100644 index 000000000..8bbe5ff3a --- /dev/null +++ b/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb_action_info.erl @@ -0,0 +1,95 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_mongodb_action_info). + +-behaviour(emqx_action_info). + +%% behaviour callbacks +-export([ + bridge_v1_config_to_action_config/2, + bridge_v1_config_to_connector_config/1, + connector_action_config_to_bridge_v1_config/2, + action_type_name/0, + bridge_v1_type_name/0, + connector_type_name/0, + schema_module/0 +]). + +%% dynamic callback +-export([ + bridge_v1_type_name_fun/1 +]). + +-import(emqx_utils_conv, [bin/1]). + +-define(SCHEMA_MODULE, emqx_bridge_mongodb). + +connector_action_config_to_bridge_v1_config(ConnectorConfig, ActionConfig) -> + fix_v1_type( + maps:merge( + maps:without( + [<<"connector">>], + map_unindent(<<"parameters">>, ActionConfig) + ), + map_unindent(<<"parameters">>, ConnectorConfig) + ) + ). + +fix_v1_type(#{<<"mongo_type">> := MongoType} = Conf) -> + Conf#{<<"type">> => v1_type(MongoType)}. + +bridge_v1_config_to_action_config(BridgeV1Config, ConnectorName) -> + ActionTopLevelKeys = schema_keys(mongodb_action), + ActionParametersKeys = schema_keys(action_parameters), + ActionKeys = ActionTopLevelKeys ++ ActionParametersKeys, + ActionConfig = make_config_map(ActionKeys, ActionParametersKeys, BridgeV1Config), + ActionConfig#{<<"connector">> => ConnectorName}. + +bridge_v1_config_to_connector_config(BridgeV1Config) -> + ActionTopLevelKeys = schema_keys(mongodb_action), + ActionParametersKeys = schema_keys(action_parameters), + ActionKeys = ActionTopLevelKeys ++ ActionParametersKeys, + ConnectorTopLevelKeys = schema_keys("config_connector"), + ConnectorKeys = maps:keys(BridgeV1Config) -- (ActionKeys -- ConnectorTopLevelKeys), + ConnectorParametersKeys = ConnectorKeys -- ConnectorTopLevelKeys, + make_config_map(ConnectorKeys, ConnectorParametersKeys, BridgeV1Config). + +make_config_map(PickKeys, IndentKeys, Config) -> + Conf0 = maps:with(PickKeys, Config), + map_indent(<<"parameters">>, IndentKeys, Conf0). + +bridge_v1_type_name() -> + {fun ?MODULE:bridge_v1_type_name_fun/1, bridge_v1_type_names()}. + +action_type_name() -> mongodb. + +connector_type_name() -> mongodb. + +schema_module() -> ?SCHEMA_MODULE. + +bridge_v1_type_names() -> [mongodb_rs, mongodb_sharded, mongodb_single]. + +bridge_v1_type_name_fun({#{<<"parameters">> := #{<<"mongo_type">> := MongoType}}, _}) -> + v1_type(MongoType). + +v1_type(<<"rs">>) -> mongodb_rs; +v1_type(<<"sharded">>) -> mongodb_sharded; +v1_type(<<"single">>) -> mongodb_single. + +map_unindent(Key, Map) -> + maps:merge( + maps:get(Key, Map), + maps:remove(Key, Map) + ). + +map_indent(IndentKey, PickKeys, Map) -> + maps:put( + IndentKey, + maps:with(PickKeys, Map), + maps:without(PickKeys, Map) + ). + +schema_keys(Name) -> + [bin(Key) || Key <- proplists:get_keys(?SCHEMA_MODULE:fields(Name))]. diff --git a/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb_connector.erl b/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb_connector.erl index 8c004d829..d0ea93ebc 100644 --- a/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb_connector.erl +++ b/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb_connector.erl @@ -6,19 +6,19 @@ -behaviour(emqx_resource). --include_lib("emqx_connector/include/emqx_connector_tables.hrl"). --include_lib("emqx_resource/include/emqx_resource.hrl"). --include_lib("typerefl/include/types.hrl"). --include_lib("emqx/include/logger.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). %% `emqx_resource' API -export([ + on_remove_channel/3, callback_mode/0, - on_start/2, - on_stop/2, + on_add_channel/4, + on_get_channel_status/3, + on_get_channels/1, + on_get_status/2, on_query/3, - on_get_status/2 + on_start/2, + on_stop/2 ]). %%======================================================================================== @@ -27,44 +27,94 @@ callback_mode() -> emqx_mongodb:callback_mode(). -on_start(InstanceId, Config) -> +on_add_channel( + _InstanceId, + #{channels := Channels} = OldState, + ChannelId, + #{parameters := Parameters} = ChannelConfig0 +) -> + PayloadTemplate0 = maps:get(payload_template, Parameters, undefined), + PayloadTemplate = preprocess_template(PayloadTemplate0), + CollectionTemplateSource = maps:get(collection, Parameters), + CollectionTemplate = preprocess_template(CollectionTemplateSource), + ChannelConfig = maps:merge( + Parameters, + ChannelConfig0#{ + payload_template => PayloadTemplate, + collection_template => CollectionTemplate + } + ), + NewState = OldState#{channels => maps:put(ChannelId, ChannelConfig, Channels)}, + {ok, NewState}. + +on_get_channel_status(InstanceId, _ChannelId, State) -> + case on_get_status(InstanceId, State) of + connected -> + connected; + _ -> + connecting + end. + +on_get_channels(InstanceId) -> + emqx_bridge_v2:get_channels_for_connector(InstanceId). + +on_get_status(InstanceId, _State = #{connector_state := ConnectorState}) -> + emqx_mongodb:on_get_status(InstanceId, ConnectorState). + +on_query(InstanceId, {Channel, Message0}, #{channels := Channels, connector_state := ConnectorState}) -> + #{ + payload_template := PayloadTemplate, + collection_template := CollectionTemplate + } = ChannelState0 = maps:get(Channel, Channels), + ChannelState = ChannelState0#{ + collection => emqx_placeholder:proc_tmpl(CollectionTemplate, Message0) + }, + Message = render_message(PayloadTemplate, Message0), + Res = emqx_mongodb:on_query( + InstanceId, + {Channel, Message}, + maps:merge(ConnectorState, ChannelState) + ), + ?tp(mongo_bridge_connector_on_query_return, #{instance_id => InstanceId, result => Res}), + Res; +on_query(InstanceId, Request, _State = #{connector_state := ConnectorState}) -> + emqx_mongodb:on_query(InstanceId, Request, ConnectorState). + +on_remove_channel(_InstanceId, #{channels := Channels} = State, ChannelId) -> + NewState = State#{channels => maps:remove(ChannelId, Channels)}, + {ok, NewState}. + +on_start(InstanceId, Config0) -> + Config = config_transform(Config0), case emqx_mongodb:on_start(InstanceId, Config) of {ok, ConnectorState} -> - PayloadTemplate0 = maps:get(payload_template, Config, undefined), - PayloadTemplate = preprocess_template(PayloadTemplate0), - CollectionTemplateSource = maps:get(collection, Config), - CollectionTemplate = preprocess_template(CollectionTemplateSource), State = #{ - payload_template => PayloadTemplate, - collection_template => CollectionTemplate, - connector_state => ConnectorState + connector_state => ConnectorState, + channels => #{} }, {ok, State}; Error -> Error end. +config_transform(#{parameters := #{mongo_type := MongoType} = Parameters} = Config) -> + maps:put( + type, + connector_type(MongoType), + maps:merge( + maps:remove(parameters, Config), + Parameters + ) + ). + +connector_type(rs) -> mongodb_rs; +connector_type(sharded) -> mongodb_sharded; +connector_type(single) -> mongodb_single. + on_stop(InstanceId, _State = #{connector_state := ConnectorState}) -> - emqx_mongodb:on_stop(InstanceId, ConnectorState). - -on_query(InstanceId, {send_message, Message0}, State) -> - #{ - payload_template := PayloadTemplate, - collection_template := CollectionTemplate, - connector_state := ConnectorState - } = State, - NewConnectorState = ConnectorState#{ - collection => emqx_placeholder:proc_tmpl(CollectionTemplate, Message0) - }, - Message = render_message(PayloadTemplate, Message0), - Res = emqx_mongodb:on_query(InstanceId, {send_message, Message}, NewConnectorState), - ?tp(mongo_bridge_connector_on_query_return, #{result => Res}), - Res; -on_query(InstanceId, Request, _State = #{connector_state := ConnectorState}) -> - emqx_mongodb:on_query(InstanceId, Request, ConnectorState). - -on_get_status(InstanceId, _State = #{connector_state := ConnectorState}) -> - emqx_mongodb:on_get_status(InstanceId, ConnectorState). + ok = emqx_mongodb:on_stop(InstanceId, ConnectorState), + ?tp(mongodb_stopped, #{instance_id => InstanceId}), + ok. %%======================================================================================== %% Helper fns diff --git a/apps/emqx_bridge_mongodb/test/emqx_bridge_mongodb_SUITE.erl b/apps/emqx_bridge_mongodb/test/emqx_bridge_mongodb_SUITE.erl index f2d0bc1c5..d87e1665f 100644 --- a/apps/emqx_bridge_mongodb/test/emqx_bridge_mongodb_SUITE.erl +++ b/apps/emqx_bridge_mongodb/test/emqx_bridge_mongodb_SUITE.erl @@ -11,6 +11,8 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-import(emqx_utils_conv, [bin/1]). + %%------------------------------------------------------------------------------ %% CT boilerplate %%------------------------------------------------------------------------------ @@ -96,14 +98,27 @@ init_per_group(Type = single, Config) -> true -> ok = start_apps(), emqx_mgmt_api_test_util:init_suite(), - {Name, MongoConfig} = mongo_config(MongoHost, MongoPort, Type, Config), + %% NOTE: `mongo-single` has auth enabled, see `credentials.env`. + AuthSource = bin(os:getenv("MONGO_AUTHSOURCE", "admin")), + Username = bin(os:getenv("MONGO_USERNAME", "")), + Password = bin(os:getenv("MONGO_PASSWORD", "")), + Passfile = filename:join(?config(priv_dir, Config), "passfile"), + ok = file:write_file(Passfile, Password), + NConfig = [ + {mongo_authsource, AuthSource}, + {mongo_username, Username}, + {mongo_password, Password}, + {mongo_passfile, Passfile} + | Config + ], + {Name, MongoConfig} = mongo_config(MongoHost, MongoPort, Type, NConfig), [ {mongo_host, MongoHost}, {mongo_port, MongoPort}, {mongo_config, MongoConfig}, {mongo_type, Type}, {mongo_name, Name} - | Config + | NConfig ]; false -> {skip, no_mongo} @@ -117,18 +132,29 @@ init_per_suite(Config) -> end_per_suite(_Config) -> emqx_mgmt_api_test_util:end_suite(), - ok = emqx_common_test_helpers:stop_apps([emqx_mongodb, emqx_bridge, emqx_rule_engine, emqx_conf]), + ok = emqx_common_test_helpers:stop_apps( + [ + emqx_management, + emqx_bridge_mongodb, + emqx_mongodb, + emqx_bridge, + emqx_connector, + emqx_rule_engine, + emqx_conf + ] + ), ok. init_per_testcase(_Testcase, Config) -> - catch clear_db(Config), + clear_db(Config), delete_bridge(Config), snabbkaffe:start_trace(), Config. end_per_testcase(_Testcase, Config) -> - catch clear_db(Config), + clear_db(Config), delete_bridge(Config), + [] = emqx_connector:list(), snabbkaffe:stop(), ok. @@ -142,9 +168,17 @@ start_apps() -> %% we want to make sure they are loaded before %% ekka start in emqx_common_test_helpers:start_apps/1 emqx_common_test_helpers:render_and_load_app_config(emqx_conf), - ok = emqx_common_test_helpers:start_apps([ - emqx_conf, emqx_rule_engine, emqx_bridge, emqx_mongodb - ]). + ok = emqx_common_test_helpers:start_apps( + [ + emqx_conf, + emqx_rule_engine, + emqx_connector, + emqx_bridge, + emqx_mongodb, + emqx_bridge_mongodb, + emqx_management + ] + ). ensure_loaded() -> _ = application:load(emqtt), @@ -175,19 +209,20 @@ mongo_config(MongoHost, MongoPort0, rs = Type, Config) -> Name = atom_to_binary(?MODULE), ConfigString = io_lib:format( - "bridges.mongodb_rs.~s {\n" - " enable = true\n" - " collection = mycol\n" - " replica_set_name = rs0\n" - " servers = [~p]\n" - " w_mode = safe\n" - " use_legacy_protocol = auto\n" - " database = mqtt\n" - " resource_opts = {\n" - " query_mode = ~s\n" - " worker_pool_size = 1\n" - " }\n" - "}", + "bridges.mongodb_rs.~s {" + "\n enable = true" + "\n collection = mycol" + "\n replica_set_name = rs0" + "\n servers = [~p]" + "\n w_mode = safe" + "\n use_legacy_protocol = auto" + "\n database = mqtt" + "\n mongo_type = rs" + "\n resource_opts = {" + "\n query_mode = ~s" + "\n worker_pool_size = 1" + "\n }" + "\n }", [ Name, Servers, @@ -202,18 +237,19 @@ mongo_config(MongoHost, MongoPort0, sharded = Type, Config) -> Name = atom_to_binary(?MODULE), ConfigString = io_lib:format( - "bridges.mongodb_sharded.~s {\n" - " enable = true\n" - " collection = mycol\n" - " servers = [~p]\n" - " w_mode = safe\n" - " use_legacy_protocol = auto\n" - " database = mqtt\n" - " resource_opts = {\n" - " query_mode = ~s\n" - " worker_pool_size = 1\n" - " }\n" - "}", + "bridges.mongodb_sharded.~s {" + "\n enable = true" + "\n collection = mycol" + "\n servers = [~p]" + "\n w_mode = safe" + "\n use_legacy_protocol = auto" + "\n database = mqtt" + "\n mongo_type = sharded" + "\n resource_opts = {" + "\n query_mode = ~s" + "\n worker_pool_size = 1" + "\n }" + "\n }", [ Name, Servers, @@ -228,21 +264,28 @@ mongo_config(MongoHost, MongoPort0, single = Type, Config) -> Name = atom_to_binary(?MODULE), ConfigString = io_lib:format( - "bridges.mongodb_single.~s {\n" - " enable = true\n" - " collection = mycol\n" - " server = ~p\n" - " w_mode = safe\n" - " use_legacy_protocol = auto\n" - " database = mqtt\n" - " resource_opts = {\n" - " query_mode = ~s\n" - " worker_pool_size = 1\n" - " }\n" - "}", + "bridges.mongodb_single.~s {" + "\n enable = true" + "\n collection = mycol" + "\n server = ~p" + "\n w_mode = safe" + "\n use_legacy_protocol = auto" + "\n database = mqtt" + "\n auth_source = ~s" + "\n username = ~s" + "\n password = \"file://~s\"" + "\n mongo_type = single" + "\n resource_opts = {" + "\n query_mode = ~s" + "\n worker_pool_size = 1" + "\n }" + "\n }", [ Name, Server, + ?config(mongo_authsource, Config), + ?config(mongo_username, Config), + ?config(mongo_passfile, Config), QueryMode ] ), @@ -269,13 +312,17 @@ create_bridge(Config, Overrides) -> delete_bridge(Config) -> Type = mongo_type_bin(?config(mongo_type, Config)), Name = ?config(mongo_name, Config), - emqx_bridge:remove(Type, Name). + emqx_bridge:check_deps_and_remove(Type, Name, [connector, rule_actions]). create_bridge_http(Params) -> Path = emqx_mgmt_api_test_util:api_path(["bridges"]), AuthHeader = emqx_mgmt_api_test_util:auth_header_(), - case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of - {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + case + emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, #{ + return_all => true + }) + of + {ok, {{_, 201, _}, _, Body}} -> {ok, emqx_utils_json:decode(Body, [return_maps])}; Error -> Error end. @@ -284,8 +331,24 @@ clear_db(Config) -> Host = ?config(mongo_host, Config), Port = ?config(mongo_port, Config), Server = Host ++ ":" ++ integer_to_list(Port), - #{<<"database">> := Db, <<"collection">> := Collection} = ?config(mongo_config, Config), - {ok, Client} = mongo_api:connect(Type, [Server], [], [{database, Db}, {w_mode, unsafe}]), + #{ + <<"database">> := Db, + <<"collection">> := Collection + } = ?config(mongo_config, Config), + WorkerOpts = [ + {database, Db}, + {w_mode, unsafe} + | lists:flatmap( + fun + ({mongo_authsource, AS}) -> [{auth_source, AS}]; + ({mongo_username, User}) -> [{login, User}]; + ({mongo_password, Pass}) -> [{password, Pass}]; + (_) -> [] + end, + Config + ) + ], + {ok, Client} = mongo_api:connect(Type, [Server], [], WorkerOpts), {true, _} = mongo_api:delete(Client, Collection, _Selector = #{}), mongo_api:disconnect(Client). @@ -386,13 +449,21 @@ t_setup_via_config_and_publish(Config) -> ok. t_setup_via_http_api_and_publish(Config) -> - Type = mongo_type_bin(?config(mongo_type, Config)), + Type = ?config(mongo_type, Config), Name = ?config(mongo_name, Config), MongoConfig0 = ?config(mongo_config, Config), - MongoConfig = MongoConfig0#{ + MongoConfig1 = MongoConfig0#{ <<"name">> => Name, - <<"type">> => Type + <<"type">> => mongo_type_bin(Type) }, + MongoConfig = + case Type of + single -> + %% NOTE: using literal password with HTTP API requests. + MongoConfig1#{<<"password">> => ?config(mongo_password, Config)}; + _ -> + MongoConfig1 + end, ?assertMatch( {ok, _}, create_bridge_http(MongoConfig) @@ -519,8 +590,8 @@ t_get_status_server_selection_too_short(Config) -> ok. t_use_legacy_protocol_option(Config) -> - ResourceID = resource_id(Config), {ok, _} = create_bridge(Config, #{<<"use_legacy_protocol">> => <<"true">>}), + ResourceID = resource_id(Config), ?retry( _Interval0 = 200, _NAttempts0 = 20, diff --git a/apps/emqx_bridge_mongodb/test/emqx_bridge_v2_mongodb_SUITE.erl b/apps/emqx_bridge_mongodb/test/emqx_bridge_v2_mongodb_SUITE.erl new file mode 100644 index 000000000..9fd13c50b --- /dev/null +++ b/apps/emqx_bridge_mongodb/test/emqx_bridge_v2_mongodb_SUITE.erl @@ -0,0 +1,232 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-module(emqx_bridge_v2_mongodb_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). + +-define(BRIDGE_TYPE, mongodb). +-define(BRIDGE_TYPE_BIN, <<"mongodb">>). +-define(CONNECTOR_TYPE, mongodb). +-define(CONNECTOR_TYPE_BIN, <<"mongodb">>). + +-import(emqx_common_test_helpers, [on_exit/1]). +-import(emqx_utils_conv, [bin/1]). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + MongoHost = os:getenv("MONGO_SINGLE_HOST", "mongo"), + MongoPort = list_to_integer(os:getenv("MONGO_SINGLE_PORT", "27017")), + case emqx_common_test_helpers:is_tcp_server_available(MongoHost, MongoPort) of + true -> + Apps = emqx_cth_suite:start( + [ + emqx, + emqx_conf, + emqx_connector, + emqx_bridge, + emqx_bridge_mongodb, + emqx_rule_engine, + emqx_management, + {emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"} + ], + #{work_dir => emqx_cth_suite:work_dir(Config)} + ), + {ok, Api} = emqx_common_test_http:create_default_app(), + [ + {apps, Apps}, + {api, Api}, + {mongo_host, MongoHost}, + {mongo_port, MongoPort} + | Config + ]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_mongo); + _ -> + {skip, no_mongo} + end + end. + +end_per_suite(Config) -> + Apps = ?config(apps, Config), + emqx_cth_suite:stop(Apps), + ok. + +init_per_testcase(TestCase, Config) -> + common_init_per_testcase(TestCase, Config). + +common_init_per_testcase(TestCase, Config) -> + ct:timetrap(timer:seconds(60)), + emqx_bridge_v2_testlib:delete_all_bridges_and_connectors(), + emqx_config:delete_override_conf_files(), + UniqueNum = integer_to_binary(erlang:unique_integer()), + Name = iolist_to_binary([atom_to_binary(TestCase), UniqueNum]), + AuthSource = bin(os:getenv("MONGO_AUTHSOURCE", "admin")), + Username = bin(os:getenv("MONGO_USERNAME", "")), + Password = bin(os:getenv("MONGO_PASSWORD", "")), + Passfile = filename:join(?config(priv_dir, Config), "passfile"), + ok = file:write_file(Passfile, Password), + NConfig = [ + {mongo_authsource, AuthSource}, + {mongo_username, Username}, + {mongo_password, Password}, + {mongo_passfile, Passfile} + | Config + ], + ConnectorConfig = connector_config(Name, NConfig), + BridgeConfig = bridge_config(Name, Name), + ok = snabbkaffe:start_trace(), + [ + {connector_type, ?CONNECTOR_TYPE}, + {connector_name, Name}, + {connector_config, ConnectorConfig}, + {bridge_type, ?BRIDGE_TYPE}, + {bridge_name, Name}, + {bridge_config, BridgeConfig} + | NConfig + ]. + +end_per_testcase(_Testcase, Config) -> + case proplists:get_bool(skip_does_not_apply, Config) of + true -> + ok; + false -> + emqx_bridge_v2_testlib:delete_all_bridges_and_connectors(), + emqx_common_test_helpers:call_janitor(60_000), + ok = snabbkaffe:stop(), + ok + end. + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +connector_config(Name, Config) -> + MongoHost = ?config(mongo_host, Config), + MongoPort = ?config(mongo_port, Config), + AuthSource = ?config(mongo_authsource, Config), + Username = ?config(mongo_username, Config), + PassFile = ?config(mongo_passfile, Config), + InnerConfigMap0 = + #{ + <<"enable">> => true, + <<"database">> => <<"mqtt">>, + <<"parameters">> => + #{ + <<"mongo_type">> => <<"single">>, + <<"server">> => iolist_to_binary([MongoHost, ":", integer_to_binary(MongoPort)]), + <<"w_mode">> => <<"safe">> + }, + <<"pool_size">> => 8, + <<"srv_record">> => false, + <<"username">> => Username, + <<"password">> => iolist_to_binary(["file://", PassFile]), + <<"auth_source">> => AuthSource + }, + InnerConfigMap = serde_roundtrip(InnerConfigMap0), + parse_and_check_connector_config(InnerConfigMap, Name). + +parse_and_check_connector_config(InnerConfigMap, Name) -> + TypeBin = ?CONNECTOR_TYPE_BIN, + RawConf = #{<<"connectors">> => #{TypeBin => #{Name => InnerConfigMap}}}, + #{<<"connectors">> := #{TypeBin := #{Name := Config}}} = + hocon_tconf:check_plain(emqx_connector_schema, RawConf, #{ + required => false, atom_key => false + }), + ct:pal("parsed config: ~p", [Config]), + InnerConfigMap. + +bridge_config(Name, ConnectorId) -> + InnerConfigMap0 = + #{ + <<"enable">> => true, + <<"connector">> => ConnectorId, + <<"parameters">> => + #{}, + <<"local_topic">> => <<"t/aeh">> + %%, + }, + InnerConfigMap = serde_roundtrip(InnerConfigMap0), + parse_and_check_bridge_config(InnerConfigMap, Name). + +%% check it serializes correctly +serde_roundtrip(InnerConfigMap0) -> + IOList = hocon_pp:do(InnerConfigMap0, #{}), + {ok, InnerConfigMap} = hocon:binary(IOList), + InnerConfigMap. + +parse_and_check_bridge_config(InnerConfigMap, Name) -> + TypeBin = ?BRIDGE_TYPE_BIN, + RawConf = #{<<"bridges">> => #{TypeBin => #{Name => InnerConfigMap}}}, + hocon_tconf:check_plain(emqx_bridge_v2_schema, RawConf, #{required => false, atom_key => false}), + InnerConfigMap. + +shared_secret_path() -> + os:getenv("CI_SHARED_SECRET_PATH", "/var/lib/secret"). + +shared_secret(client_keyfile) -> + filename:join([shared_secret_path(), "client.key"]); +shared_secret(client_certfile) -> + filename:join([shared_secret_path(), "client.crt"]); +shared_secret(client_cacertfile) -> + filename:join([shared_secret_path(), "ca.crt"]); +shared_secret(rig_keytab) -> + filename:join([shared_secret_path(), "rig.keytab"]). + +make_message() -> + Time = erlang:unique_integer(), + BinTime = integer_to_binary(Time), + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + #{ + clientid => BinTime, + payload => Payload, + timestamp => Time + }. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_start_stop(Config) -> + emqx_bridge_v2_testlib:t_start_stop(Config, mongodb_stopped), + ok. + +t_create_via_http(Config) -> + emqx_bridge_v2_testlib:t_create_via_http(Config), + ok. + +t_on_get_status(Config) -> + emqx_bridge_v2_testlib:t_on_get_status(Config, #{failure_status => connecting}), + ok. + +t_sync_query(Config) -> + ok = emqx_bridge_v2_testlib:t_sync_query( + Config, + fun make_message/0, + fun(Res) -> ?assertEqual(ok, Res) end, + mongo_bridge_connector_on_query_return + ), + ok. diff --git a/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt.app.src b/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt.app.src index cbef0dda8..716626bdf 100644 --- a/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt.app.src +++ b/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_bridge_mqtt, [ {description, "EMQX MQTT Broker Bridge"}, - {vsn, "0.1.5"}, + {vsn, "0.1.6"}, {registered, []}, {applications, [ kernel, diff --git a/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt_connector.erl b/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt_connector.erl index eb81c4b6e..61e9353ce 100644 --- a/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt_connector.erl +++ b/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt_connector.erl @@ -96,7 +96,7 @@ choose_ingress_pool_size( #{remote := #{topic := RemoteTopic}, pool_size := PoolSize} ) -> case emqx_topic:parse(RemoteTopic) of - {_Filter, #{share := _Name}} -> + {#share{} = _Filter, _SubOpts} -> % NOTE: this is shared subscription, many workers may subscribe PoolSize; {_Filter, #{}} when PoolSize > 1 -> @@ -326,7 +326,7 @@ mk_client_opts( ], Config ), - Options#{ + mk_client_opt_password(Options#{ hosts => [HostPort], clientid => clientid(ResourceId, ClientScope, Config), connect_timeout => 30, @@ -334,7 +334,13 @@ mk_client_opts( force_ping => true, ssl => EnableSsl, ssl_opts => maps:to_list(maps:remove(enable, Ssl)) - }. + }). + +mk_client_opt_password(Options = #{password := Secret}) -> + %% TODO: Teach `emqtt` to accept 0-arity closures as passwords. + Options#{password := emqx_secret:unwrap(Secret)}; +mk_client_opt_password(Options) -> + Options. ms_to_s(Ms) -> erlang:ceil(Ms / 1000). diff --git a/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt_connector_schema.erl b/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt_connector_schema.erl index f671bec71..32f9e9295 100644 --- a/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt_connector_schema.erl +++ b/apps/emqx_bridge_mqtt/src/emqx_bridge_mqtt_connector_schema.erl @@ -99,13 +99,9 @@ fields("server_configs") -> } )}, {password, - mk( - binary(), + emqx_schema_secret:mk( #{ - format => <<"password">>, - sensitive => true, - desc => ?DESC("password"), - converter => fun emqx_schema:password_converter/2 + desc => ?DESC("password") } )}, {clean_start, diff --git a/apps/emqx_bridge_mqtt/test/emqx_bridge_mqtt_SUITE.erl b/apps/emqx_bridge_mqtt/test/emqx_bridge_mqtt_SUITE.erl index 986a755d5..bde546bd0 100644 --- a/apps/emqx_bridge_mqtt/test/emqx_bridge_mqtt_SUITE.erl +++ b/apps/emqx_bridge_mqtt/test/emqx_bridge_mqtt_SUITE.erl @@ -21,13 +21,15 @@ -import(emqx_dashboard_api_test_helpers, [request/4, uri/1]). -include("emqx/include/emqx.hrl"). +-include("emqx/include/emqx_hooks.hrl"). +-include("emqx/include/asserts.hrl"). -include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). %% output functions -export([inspect/3]). --define(BRIDGE_CONF_DEFAULT, <<"bridges: {}">>). -define(TYPE_MQTT, <<"mqtt">>). -define(BRIDGE_NAME_INGRESS, <<"ingress_mqtt_bridge">>). -define(BRIDGE_NAME_EGRESS, <<"egress_mqtt_bridge">>). @@ -38,14 +40,18 @@ -define(EGRESS_REMOTE_TOPIC, "egress_remote_topic"). -define(EGRESS_LOCAL_TOPIC, "egress_local_topic"). --define(SERVER_CONF(Username), #{ +-define(SERVER_CONF, #{ + <<"type">> => ?TYPE_MQTT, <<"server">> => <<"127.0.0.1:1883">>, - <<"username">> => Username, - <<"password">> => <<"">>, <<"proto_ver">> => <<"v4">>, <<"ssl">> => #{<<"enable">> => false} }). +-define(SERVER_CONF(Username, Password), (?SERVER_CONF)#{ + <<"username">> => Username, + <<"password">> => Password +}). + -define(INGRESS_CONF, #{ <<"remote">> => #{ <<"topic">> => <>, @@ -129,43 +135,32 @@ suite() -> [{timetrap, {seconds, 30}}]. init_per_suite(Config) -> - _ = application:load(emqx_conf), - ok = emqx_common_test_helpers:start_apps( + Apps = emqx_cth_suite:start( [ - emqx_rule_engine, + emqx_conf, emqx_bridge, + emqx_rule_engine, emqx_bridge_mqtt, - emqx_dashboard + {emqx_dashboard, + "dashboard {" + "\n listeners.http { bind = 18083 }" + "\n default_username = connector_admin" + "\n default_password = public" + "\n }"} ], - fun set_special_configs/1 + #{work_dir => emqx_cth_suite:work_dir(Config)} ), - ok = emqx_common_test_helpers:load_config( - emqx_rule_engine_schema, - <<"rule_engine {rules {}}">> - ), - ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, ?BRIDGE_CONF_DEFAULT), - Config. + [{suite_apps, Apps} | Config]. -end_per_suite(_Config) -> - emqx_common_test_helpers:stop_apps([ - emqx_dashboard, - emqx_bridge_mqtt, - emqx_bridge, - emqx_rule_engine - ]), - ok. - -set_special_configs(emqx_dashboard) -> - emqx_dashboard_api_test_helpers:set_default_config(<<"connector_admin">>); -set_special_configs(_) -> - ok. +end_per_suite(Config) -> + emqx_cth_suite:stop(?config(suite_apps, Config)). init_per_testcase(_, Config) -> - {ok, _} = emqx_cluster_rpc:start_link(node(), emqx_cluster_rpc, 1000), ok = snabbkaffe:start_trace(), Config. end_per_testcase(_, _Config) -> + ok = unhook_authenticate(), clear_resources(), snabbkaffe:stop(), ok. @@ -187,14 +182,86 @@ clear_resources() -> %%------------------------------------------------------------------------------ %% Testcases %%------------------------------------------------------------------------------ + +t_conf_bridge_authn_anonymous(_) -> + ok = hook_authenticate(), + {ok, 201, _Bridge} = request( + post, + uri(["bridges"]), + ?SERVER_CONF#{ + <<"name">> => <<"t_conf_bridge_anonymous">>, + <<"ingress">> => ?INGRESS_CONF#{<<"pool_size">> => 1} + } + ), + ?assertReceive( + {authenticate, #{username := undefined, password := undefined}} + ). + +t_conf_bridge_authn_password(_) -> + Username1 = <<"user1">>, + Password1 = <<"from-here">>, + ok = hook_authenticate(), + {ok, 201, _Bridge1} = request( + post, + uri(["bridges"]), + ?SERVER_CONF(Username1, Password1)#{ + <<"name">> => <<"t_conf_bridge_authn_password">>, + <<"ingress">> => ?INGRESS_CONF#{<<"pool_size">> => 1} + } + ), + ?assertReceive( + {authenticate, #{username := Username1, password := Password1}} + ). + +t_conf_bridge_authn_passfile(Config) -> + DataDir = ?config(data_dir, Config), + Username2 = <<"user2">>, + PasswordFilename = filename:join(DataDir, "password"), + Password2 = <<"from-there">>, + ok = hook_authenticate(), + {ok, 201, _Bridge2} = request( + post, + uri(["bridges"]), + ?SERVER_CONF(Username2, iolist_to_binary(["file://", PasswordFilename]))#{ + <<"name">> => <<"t_conf_bridge_authn_passfile">>, + <<"ingress">> => ?INGRESS_CONF#{<<"pool_size">> => 1} + } + ), + ?assertReceive( + {authenticate, #{username := Username2, password := Password2}} + ), + ?assertMatch( + {ok, 201, #{ + <<"status">> := <<"disconnected">>, + <<"status_reason">> := <<"#{msg => failed_to_read_secret_file", _/bytes>> + }}, + request_json( + post, + uri(["bridges"]), + ?SERVER_CONF(<<>>, <<"file://im/pretty/sure/theres/no/such/file">>)#{ + <<"name">> => <<"t_conf_bridge_authn_no_passfile">> + } + ) + ). + +hook_authenticate() -> + emqx_hooks:add('client.authenticate', {?MODULE, authenticate, [self()]}, ?HP_HIGHEST). + +unhook_authenticate() -> + emqx_hooks:del('client.authenticate', {?MODULE, authenticate}). + +authenticate(Credential, _, TestRunnerPid) -> + _ = TestRunnerPid ! {authenticate, Credential}, + ignore. + +%%------------------------------------------------------------------------------ + t_mqtt_conn_bridge_ingress(_) -> - User1 = <<"user1">>, %% create an MQTT bridge, using POST {ok, 201, Bridge} = request( post, uri(["bridges"]), - ServerConf = ?SERVER_CONF(User1)#{ - <<"type">> => ?TYPE_MQTT, + ServerConf = ?SERVER_CONF#{ <<"name">> => ?BRIDGE_NAME_INGRESS, <<"ingress">> => ?INGRESS_CONF } @@ -249,7 +316,6 @@ t_mqtt_conn_bridge_ingress(_) -> ok. t_mqtt_conn_bridge_ingress_full_context(_Config) -> - User1 = <<"user1">>, IngressConf = emqx_utils_maps:deep_merge( ?INGRESS_CONF, @@ -258,8 +324,7 @@ t_mqtt_conn_bridge_ingress_full_context(_Config) -> {ok, 201, _Bridge} = request( post, uri(["bridges"]), - ?SERVER_CONF(User1)#{ - <<"type">> => ?TYPE_MQTT, + ?SERVER_CONF#{ <<"name">> => ?BRIDGE_NAME_INGRESS, <<"ingress">> => IngressConf } @@ -297,8 +362,7 @@ t_mqtt_conn_bridge_ingress_shared_subscription(_) -> Ns = lists:seq(1, 10), BridgeName = atom_to_binary(?FUNCTION_NAME), BridgeID = create_bridge( - ?SERVER_CONF(<<>>)#{ - <<"type">> => ?TYPE_MQTT, + ?SERVER_CONF#{ <<"name">> => BridgeName, <<"ingress">> => #{ <<"pool_size">> => PoolSize, @@ -337,8 +401,7 @@ t_mqtt_conn_bridge_ingress_shared_subscription(_) -> t_mqtt_egress_bridge_ignores_clean_start(_) -> BridgeName = atom_to_binary(?FUNCTION_NAME), BridgeID = create_bridge( - ?SERVER_CONF(<<"user1">>)#{ - <<"type">> => ?TYPE_MQTT, + ?SERVER_CONF#{ <<"name">> => BridgeName, <<"egress">> => ?EGRESS_CONF, <<"clean_start">> => false @@ -366,8 +429,7 @@ t_mqtt_egress_bridge_ignores_clean_start(_) -> t_mqtt_conn_bridge_ingress_downgrades_qos_2(_) -> BridgeName = atom_to_binary(?FUNCTION_NAME), BridgeID = create_bridge( - ?SERVER_CONF(<<"user1">>)#{ - <<"type">> => ?TYPE_MQTT, + ?SERVER_CONF#{ <<"name">> => BridgeName, <<"ingress">> => emqx_utils_maps:deep_merge( ?INGRESS_CONF, @@ -392,9 +454,8 @@ t_mqtt_conn_bridge_ingress_downgrades_qos_2(_) -> ok. t_mqtt_conn_bridge_ingress_no_payload_template(_) -> - User1 = <<"user1">>, BridgeIDIngress = create_bridge( - ?SERVER_CONF(User1)#{ + ?SERVER_CONF#{ <<"type">> => ?TYPE_MQTT, <<"name">> => ?BRIDGE_NAME_INGRESS, <<"ingress">> => ?INGRESS_CONF_NO_PAYLOAD_TEMPLATE @@ -428,10 +489,8 @@ t_mqtt_conn_bridge_ingress_no_payload_template(_) -> t_mqtt_conn_bridge_egress(_) -> %% then we add a mqtt connector, using POST - User1 = <<"user1">>, BridgeIDEgress = create_bridge( - ?SERVER_CONF(User1)#{ - <<"type">> => ?TYPE_MQTT, + ?SERVER_CONF#{ <<"name">> => ?BRIDGE_NAME_EGRESS, <<"egress">> => ?EGRESS_CONF } @@ -473,11 +532,8 @@ t_mqtt_conn_bridge_egress(_) -> t_mqtt_conn_bridge_egress_no_payload_template(_) -> %% then we add a mqtt connector, using POST - User1 = <<"user1">>, - BridgeIDEgress = create_bridge( - ?SERVER_CONF(User1)#{ - <<"type">> => ?TYPE_MQTT, + ?SERVER_CONF#{ <<"name">> => ?BRIDGE_NAME_EGRESS, <<"egress">> => ?EGRESS_CONF_NO_PAYLOAD_TEMPLATE } @@ -520,11 +576,9 @@ t_mqtt_conn_bridge_egress_no_payload_template(_) -> ok. t_egress_custom_clientid_prefix(_Config) -> - User1 = <<"user1">>, BridgeIDEgress = create_bridge( - ?SERVER_CONF(User1)#{ + ?SERVER_CONF#{ <<"clientid_prefix">> => <<"my-custom-prefix">>, - <<"type">> => ?TYPE_MQTT, <<"name">> => ?BRIDGE_NAME_EGRESS, <<"egress">> => ?EGRESS_CONF } @@ -545,17 +599,14 @@ t_egress_custom_clientid_prefix(_Config) -> ok. t_mqtt_conn_bridge_ingress_and_egress(_) -> - User1 = <<"user1">>, BridgeIDIngress = create_bridge( - ?SERVER_CONF(User1)#{ - <<"type">> => ?TYPE_MQTT, + ?SERVER_CONF#{ <<"name">> => ?BRIDGE_NAME_INGRESS, <<"ingress">> => ?INGRESS_CONF } ), BridgeIDEgress = create_bridge( - ?SERVER_CONF(User1)#{ - <<"type">> => ?TYPE_MQTT, + ?SERVER_CONF#{ <<"name">> => ?BRIDGE_NAME_EGRESS, <<"egress">> => ?EGRESS_CONF } @@ -627,8 +678,7 @@ t_mqtt_conn_bridge_ingress_and_egress(_) -> t_ingress_mqtt_bridge_with_rules(_) -> BridgeIDIngress = create_bridge( - ?SERVER_CONF(<<"user1">>)#{ - <<"type">> => ?TYPE_MQTT, + ?SERVER_CONF#{ <<"name">> => ?BRIDGE_NAME_INGRESS, <<"ingress">> => ?INGRESS_CONF } @@ -712,8 +762,7 @@ t_ingress_mqtt_bridge_with_rules(_) -> t_egress_mqtt_bridge_with_rules(_) -> BridgeIDEgress = create_bridge( - ?SERVER_CONF(<<"user1">>)#{ - <<"type">> => ?TYPE_MQTT, + ?SERVER_CONF#{ <<"name">> => ?BRIDGE_NAME_EGRESS, <<"egress">> => ?EGRESS_CONF } @@ -789,10 +838,8 @@ t_egress_mqtt_bridge_with_rules(_) -> t_mqtt_conn_bridge_egress_reconnect(_) -> %% then we add a mqtt connector, using POST - User1 = <<"user1">>, BridgeIDEgress = create_bridge( - ?SERVER_CONF(User1)#{ - <<"type">> => ?TYPE_MQTT, + ?SERVER_CONF#{ <<"name">> => ?BRIDGE_NAME_EGRESS, <<"egress">> => ?EGRESS_CONF, <<"resource_opts">> => #{ @@ -897,10 +944,8 @@ t_mqtt_conn_bridge_egress_reconnect(_) -> ok. t_mqtt_conn_bridge_egress_async_reconnect(_) -> - User1 = <<"user1">>, BridgeIDEgress = create_bridge( - ?SERVER_CONF(User1)#{ - <<"type">> => ?TYPE_MQTT, + ?SERVER_CONF#{ <<"name">> => ?BRIDGE_NAME_EGRESS, <<"egress">> => ?EGRESS_CONF, <<"resource_opts">> => #{ @@ -1018,5 +1063,9 @@ request_bridge_metrics(BridgeID) -> {ok, 200, BridgeMetrics} = request(get, uri(["bridges", BridgeID, "metrics"]), []), emqx_utils_json:decode(BridgeMetrics). +request_json(Method, Url, Body) -> + {ok, Code, Response} = request(Method, Url, Body), + {ok, Code, emqx_utils_json:decode(Response)}. + request(Method, Url, Body) -> request(<<"connector_admin">>, Method, Url, Body). diff --git a/apps/emqx_bridge_mqtt/test/emqx_bridge_mqtt_SUITE_data/password b/apps/emqx_bridge_mqtt/test/emqx_bridge_mqtt_SUITE_data/password new file mode 100644 index 000000000..d68418fda --- /dev/null +++ b/apps/emqx_bridge_mqtt/test/emqx_bridge_mqtt_SUITE_data/password @@ -0,0 +1 @@ +from-there diff --git a/apps/emqx_bridge_mysql/src/emqx_bridge_mysql.app.src b/apps/emqx_bridge_mysql/src/emqx_bridge_mysql.app.src index 252b8ff00..b1d110d36 100644 --- a/apps/emqx_bridge_mysql/src/emqx_bridge_mysql.app.src +++ b/apps/emqx_bridge_mysql/src/emqx_bridge_mysql.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_mysql, [ {description, "EMQX Enterprise MySQL Bridge"}, - {vsn, "0.1.2"}, + {vsn, "0.1.3"}, {registered, []}, {applications, [ kernel, diff --git a/apps/emqx_bridge_mysql/test/emqx_bridge_mysql_SUITE.erl b/apps/emqx_bridge_mysql/test/emqx_bridge_mysql_SUITE.erl index a34b65ede..98b957b19 100644 --- a/apps/emqx_bridge_mysql/test/emqx_bridge_mysql_SUITE.erl +++ b/apps/emqx_bridge_mysql/test/emqx_bridge_mysql_SUITE.erl @@ -21,7 +21,6 @@ "DEFAULT CHARSET=utf8MB4;" ). -define(SQL_DROP_TABLE, "DROP TABLE mqtt_test"). --define(SQL_DELETE, "DELETE from mqtt_test"). -define(SQL_SELECT, "SELECT payload FROM mqtt_test"). % DB defaults @@ -112,8 +111,8 @@ end_per_suite(_Config) -> ok. init_per_testcase(_Testcase, Config) -> + connect_and_drop_table(Config), connect_and_create_table(Config), - connect_and_clear_table(Config), delete_bridge(Config), snabbkaffe:start_trace(), Config. @@ -122,9 +121,7 @@ end_per_testcase(_Testcase, Config) -> ProxyHost = ?config(proxy_host, Config), ProxyPort = ?config(proxy_port, Config), emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), - connect_and_clear_table(Config), ok = snabbkaffe:stop(), - delete_bridge(Config), emqx_common_test_helpers:call_janitor(), ok. @@ -323,9 +320,6 @@ connect_and_create_table(Config) -> connect_and_drop_table(Config) -> query_direct_mysql(Config, ?SQL_DROP_TABLE). -connect_and_clear_table(Config) -> - query_direct_mysql(Config, ?SQL_DELETE). - connect_and_get_payload(Config) -> query_direct_mysql(Config, ?SQL_SELECT). @@ -777,28 +771,21 @@ t_table_removed(Config) -> Name = ?config(mysql_name, Config), BridgeType = ?config(mysql_bridge_type, Config), ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), - ?check_trace( - begin - connect_and_create_table(Config), - ?assertMatch({ok, _}, create_bridge(Config)), - ?retry( - _Sleep = 1_000, - _Attempts = 20, - ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)) - ), - connect_and_drop_table(Config), - Val = integer_to_binary(erlang:unique_integer()), - SentData = #{payload => Val, timestamp => 1668602148000}, - Timeout = 1000, - ?assertMatch( - {error, - {unrecoverable_error, - {1146, <<"42S02">>, <<"Table 'mqtt.mqtt_test' doesn't exist">>}}}, - sync_query_resource(Config, {send_message, SentData, [], Timeout}) - ), - ok - end, - [] + connect_and_create_table(Config), + ?assertMatch({ok, _}, create_bridge(Config)), + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)) + ), + connect_and_drop_table(Config), + Val = integer_to_binary(erlang:unique_integer()), + SentData = #{payload => Val, timestamp => 1668602148000}, + Timeout = 1000, + ?assertMatch( + {error, + {unrecoverable_error, {1146, <<"42S02">>, <<"Table 'mqtt.mqtt_test' doesn't exist">>}}}, + sync_query_resource(Config, {send_message, SentData, [], Timeout}) ), ok. @@ -807,38 +794,31 @@ t_nested_payload_template(Config) -> BridgeType = ?config(mysql_bridge_type, Config), ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), Value = integer_to_binary(erlang:unique_integer()), - ?check_trace( - begin - connect_and_create_table(Config), - {ok, _} = create_bridge( - Config, - #{ - <<"sql">> => - "INSERT INTO mqtt_test(payload, arrived) " - "VALUES (${payload.value}, FROM_UNIXTIME(${timestamp}/1000))" - } - ), - {ok, #{<<"from">> := [Topic]}} = create_rule_and_action_http(Config), - ?retry( - _Sleep = 1_000, - _Attempts = 20, - ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)) - ), - %% send message via rule action - Payload = emqx_utils_json:encode(#{value => Value}), - Message = emqx_message:make(Topic, Payload), - {_, {ok, _}} = - ?wait_async_action( - emqx:publish(Message), - #{?snk_kind := mysql_connector_query_return}, - 10_000 - ), - ?assertEqual( - {ok, [<<"payload">>], [[Value]]}, - connect_and_get_payload(Config) - ), - ok - end, - [] + {ok, _} = create_bridge( + Config, + #{ + <<"sql">> => + "INSERT INTO mqtt_test(payload, arrived) " + "VALUES (${payload.value}, FROM_UNIXTIME(${timestamp}/1000))" + } + ), + {ok, #{<<"from">> := [Topic]}} = create_rule_and_action_http(Config), + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)) + ), + %% send message via rule action + Payload = emqx_utils_json:encode(#{value => Value}), + Message = emqx_message:make(Topic, Payload), + {_, {ok, _}} = + ?wait_async_action( + emqx:publish(Message), + #{?snk_kind := mysql_connector_query_return}, + 10_000 + ), + ?assertEqual( + {ok, [<<"payload">>], [[Value]]}, + connect_and_get_payload(Config) ), ok. diff --git a/apps/emqx_bridge_oracle/test/emqx_bridge_oracle_SUITE.erl b/apps/emqx_bridge_oracle/test/emqx_bridge_oracle_SUITE.erl index 6b949b047..878ae2e1d 100644 --- a/apps/emqx_bridge_oracle/test/emqx_bridge_oracle_SUITE.erl +++ b/apps/emqx_bridge_oracle/test/emqx_bridge_oracle_SUITE.erl @@ -16,7 +16,6 @@ -define(APPS, [emqx_bridge, emqx_resource, emqx_rule_engine, emqx_oracle, emqx_bridge_oracle]). -define(SID, "XE"). -define(RULE_TOPIC, "mqtt/rule"). -% -define(RULE_TOPIC_BIN, <>). %%------------------------------------------------------------------------------ %% CT boilerplate @@ -33,9 +32,6 @@ groups() -> {plain, AllTCs} ]. -only_once_tests() -> - [t_create_via_http]. - init_per_suite(Config) -> Config. diff --git a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.app.src b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.app.src index 7a17652e0..fafd49f05 100644 --- a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.app.src +++ b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_pgsql, [ {description, "EMQX Enterprise PostgreSQL Bridge"}, - {vsn, "0.1.4"}, + {vsn, "0.1.5"}, {registered, []}, {applications, [ kernel, @@ -8,7 +8,7 @@ emqx_resource, emqx_postgresql ]}, - {env, []}, + {env, [{emqx_action_info_modules, [emqx_bridge_pgsql_action_info]}]}, {modules, []}, {links, []} ]}. diff --git a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl index bb15dfad9..4c0efe269 100644 --- a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl +++ b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.erl @@ -1,83 +1,97 @@ %%-------------------------------------------------------------------- %% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- + -module(emqx_bridge_pgsql). +-include_lib("emqx_connector/include/emqx_connector.hrl"). +-include_lib("emqx_postgresql/include/emqx_postgresql.hrl"). -include_lib("typerefl/include/types.hrl"). +-include_lib("emqx/include/logger.hrl"). -include_lib("hocon/include/hoconsc.hrl"). +-include_lib("epgsql/include/epgsql.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). -include_lib("emqx_resource/include/emqx_resource.hrl"). --import(hoconsc, [mk/2, enum/1, ref/2]). - --export([ - conn_bridge_examples/1, - values/2, - fields/2 -]). - -export([ namespace/0, roots/0, fields/1, - desc/1 + desc/1, + fields/2 ]). --define(DEFAULT_SQL, << - "insert into t_mqtt_msg(msgid, topic, qos, payload, arrived) " - "values (${id}, ${topic}, ${qos}, ${payload}, TO_TIMESTAMP((${timestamp} :: bigint)/1000))" ->>). +%% Examples +-export([ + bridge_v2_examples/1, + conn_bridge_examples/1 +]). -%% ------------------------------------------------------------------------------------------------- -%% api +%% Exported for timescale and matrix bridges +-export([ + values/1, + values_conn_bridge_examples/2 +]). -conn_bridge_examples(Method) -> - [ - #{ - <<"pgsql">> => #{ - summary => <<"PostgreSQL Bridge">>, - value => values(Method, pgsql) - } - } - ]. +-define(PGSQL_HOST_OPTIONS, #{ + default_port => ?PGSQL_DEFAULT_PORT +}). -values(_Method, Type) -> - #{ - enable => true, - type => Type, - name => <<"foo">>, - server => <<"127.0.0.1:5432">>, - database => <<"mqtt">>, - pool_size => 8, - username => <<"root">>, - password => <<"******">>, - sql => ?DEFAULT_SQL, - local_topic => <<"local/topic/#">>, - resource_opts => #{ - worker_pool_size => 8, - health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, - batch_size => ?DEFAULT_BATCH_SIZE, - batch_time => ?DEFAULT_BATCH_TIME, - query_mode => async, - max_buffer_bytes => ?DEFAULT_BUFFER_BYTES - } - }. - -%% ------------------------------------------------------------------------------------------------- %% Hocon Schema Definitions namespace() -> "bridge_pgsql". -roots() -> []. +roots() -> + []. +fields("config_connector") -> + emqx_postgresql_connector_schema:fields("config_connector"); +fields(config) -> + fields("config_connector") ++ + fields(action); +fields(action) -> + {pgsql, + hoconsc:mk( + hoconsc:map(name, hoconsc:ref(emqx_bridge_pgsql, pgsql_action)), + #{ + desc => <<"PostgreSQL Action Config">>, + required => false + } + )}; +fields(action_parameters) -> + [ + {sql, + hoconsc:mk( + binary(), + #{desc => ?DESC("sql_template"), default => default_sql(), format => <<"sql">>} + )} + ] ++ + emqx_connector_schema_lib:prepare_statement_fields(); +fields(pgsql_action) -> + emqx_bridge_v2_schema:make_producer_action_schema( + hoconsc:mk( + hoconsc:ref(?MODULE, action_parameters), + #{ + required => true, + desc => ?DESC("action_parameters") + } + ) + ); +fields("put_bridge_v2") -> + fields(pgsql_action); +fields("get_bridge_v2") -> + fields(pgsql_action); +fields("post_bridge_v2") -> + fields(pgsql_action); fields("config") -> [ - {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {enable, hoconsc:mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, {sql, - mk( + hoconsc:mk( binary(), - #{desc => ?DESC("sql_template"), default => ?DEFAULT_SQL, format => <<"sql">>} + #{desc => ?DESC("sql_template"), default => default_sql(), format => <<"sql">>} )}, {local_topic, - mk( + hoconsc:mk( binary(), #{desc => ?DESC("local_topic"), default => undefined} )} @@ -94,17 +108,132 @@ fields("get") -> fields("post", Type) -> [type_field(Type), name_field() | fields("config")]. +type_field(Type) -> + {type, hoconsc:mk(hoconsc:enum([Type]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, hoconsc:mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. + desc("config") -> ?DESC("desc_config"); desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> ["Configuration for PostgreSQL using `", string:to_upper(Method), "` method."]; +desc(pgsql_action) -> + ?DESC("pgsql_action"); +desc(action_parameters) -> + ?DESC("action_parameters"); +desc("config_connector") -> + ?DESC(emqx_postgresql_connector_schema, "config_connector"); desc(_) -> undefined. -%% ------------------------------------------------------------------------------------------------- +default_sql() -> + << + "insert into t_mqtt_msg(msgid, topic, qos, payload, arrived) " + "values (${id}, ${topic}, ${qos}, ${payload}, TO_TIMESTAMP((${timestamp} :: bigint)/1000))" + >>. -type_field(Type) -> - {type, mk(enum([Type]), #{required => true, desc => ?DESC("desc_type")})}. +%% Examples -name_field() -> - {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. +bridge_v2_examples(Method) -> + [ + #{ + <<"pgsql">> => #{ + summary => <<"PostgreSQL Action">>, + value => values({Method, pgsql}) + } + } + ]. + +conn_bridge_examples(Method) -> + [ + #{ + <<"pgsql">> => #{ + summary => <<"PostgreSQL Bridge">>, + value => values_conn_bridge_examples(Method, pgsql) + } + } + ]. + +values({get, PostgreSQLType}) -> + maps:merge( + #{ + status => <<"connected">>, + node_status => [ + #{ + node => <<"emqx@localhost">>, + status => <<"connected">> + } + ] + }, + values({put, PostgreSQLType}) + ); +values({post, PostgreSQLType}) -> + values({put, PostgreSQLType}); +values({put, PostgreSQLType}) -> + maps:merge( + #{ + name => <<"my_action">>, + type => PostgreSQLType, + enable => true, + connector => <<"my_connector">>, + resource_opts => #{ + batch_size => 1, + batch_time => <<"50ms">>, + inflight_window => 100, + max_buffer_bytes => <<"256MB">>, + request_ttl => <<"45s">>, + worker_pool_size => 16 + } + }, + values(parameters) + ); +values(parameters) -> + #{ + <<"parameters">> => #{ + <<"sql">> => + << + "INSERT INTO client_events(clientid, event, created_at)" + "VALUES (\n" + " ${clientid},\n" + " ${event},\n" + " TO_TIMESTAMP((${timestamp} :: bigint))\n" + ")" + >> + } + }. + +values_conn_bridge_examples(get, Type) -> + maps:merge( + #{ + status => <<"connected">>, + node_status => [ + #{ + node => <<"emqx@localhost">>, + status => <<"connected">> + } + ] + }, + values_conn_bridge_examples(post, Type) + ); +values_conn_bridge_examples(_Method, Type) -> + #{ + enable => true, + type => Type, + name => <<"foo">>, + server => <<"127.0.0.1:5432">>, + database => <<"mqtt">>, + pool_size => 8, + username => <<"root">>, + password => <<"******">>, + sql => default_sql(), + local_topic => <<"local/topic/#">>, + resource_opts => #{ + worker_pool_size => 8, + health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, + batch_size => ?DEFAULT_BATCH_SIZE, + batch_time => ?DEFAULT_BATCH_TIME, + query_mode => async, + max_buffer_bytes => ?DEFAULT_BUFFER_BYTES + } + }. diff --git a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql_action_info.erl b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql_action_info.erl new file mode 100644 index 000000000..c702b396b --- /dev/null +++ b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql_action_info.erl @@ -0,0 +1,22 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_pgsql_action_info). + +-behaviour(emqx_action_info). + +-export([ + bridge_v1_type_name/0, + action_type_name/0, + connector_type_name/0, + schema_module/0 +]). + +bridge_v1_type_name() -> pgsql. + +action_type_name() -> pgsql. + +connector_type_name() -> pgsql. + +schema_module() -> emqx_bridge_pgsql. diff --git a/apps/emqx_bridge_pgsql/test/emqx_bridge_pgsql_SUITE.erl b/apps/emqx_bridge_pgsql/test/emqx_bridge_pgsql_SUITE.erl index 156d4bd16..58aaa7d71 100644 --- a/apps/emqx_bridge_pgsql/test/emqx_bridge_pgsql_SUITE.erl +++ b/apps/emqx_bridge_pgsql/test/emqx_bridge_pgsql_SUITE.erl @@ -114,7 +114,7 @@ init_per_suite(Config) -> end_per_suite(_Config) -> emqx_mgmt_api_test_util:end_suite(), - ok = emqx_common_test_helpers:stop_apps([emqx_bridge, emqx_conf]), + ok = emqx_common_test_helpers:stop_apps([emqx, emqx_postgresql, emqx_conf, emqx_bridge]), ok. init_per_testcase(_Testcase, Config) -> @@ -147,7 +147,7 @@ common_init(Config0) -> ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), % Ensure enterprise bridge module is loaded - ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]), + ok = emqx_common_test_helpers:start_apps([emqx, emqx_postgresql, emqx_conf, emqx_bridge]), _ = emqx_bridge_enterprise:module_info(), emqx_mgmt_api_test_util:init_suite(), % Connect to pgsql directly and create the table @@ -183,31 +183,33 @@ pgsql_config(BridgeType, Config) -> end, QueryMode = ?config(query_mode, Config), TlsEnabled = ?config(enable_tls, Config), + %% NOTE: supplying password through a file here, to verify that it works. + Password = create_passfile(BridgeType, Config), ConfigString = io_lib:format( - "bridges.~s.~s {\n" - " enable = true\n" - " server = ~p\n" - " database = ~p\n" - " username = ~p\n" - " password = ~p\n" - " sql = ~p\n" - " resource_opts = {\n" - " request_ttl = 500ms\n" - " batch_size = ~b\n" - " query_mode = ~s\n" - " }\n" - " ssl = {\n" - " enable = ~w\n" - " }\n" - "}", + "bridges.~s.~s {" + "\n enable = true" + "\n server = ~p" + "\n database = ~p" + "\n username = ~p" + "\n password = ~p" + "\n sql = ~p" + "\n resource_opts = {" + "\n request_ttl = 500ms" + "\n batch_size = ~b" + "\n query_mode = ~s" + "\n }" + "\n ssl = {" + "\n enable = ~w" + "\n }" + "\n }", [ BridgeType, Name, Server, ?PGSQL_DATABASE, ?PGSQL_USERNAME, - ?PGSQL_PASSWORD, + Password, ?SQL_BRIDGE, BatchSize, QueryMode, @@ -216,6 +218,12 @@ pgsql_config(BridgeType, Config) -> ), {Name, parse_and_check(ConfigString, BridgeType, Name)}. +create_passfile(BridgeType, Config) -> + Filename = binary_to_list(BridgeType) ++ ".passfile", + Filepath = filename:join(?config(priv_dir, Config), Filename), + ok = file:write_file(Filepath, ?PGSQL_PASSWORD), + "file://" ++ Filepath. + parse_and_check(ConfigString, BridgeType, Name) -> {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), @@ -251,17 +259,16 @@ send_message(Config, Payload) -> BridgeID = emqx_bridge_resource:bridge_id(BridgeType, Name), emqx_bridge:send_message(BridgeID, Payload). -query_resource(Config, Request) -> +query_resource(Config, Msg = _Request) -> Name = ?config(pgsql_name, Config), BridgeType = ?config(pgsql_bridge_type, Config), - ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), - emqx_resource:query(ResourceID, Request, #{timeout => 1_000}). + emqx_bridge_v2:query(BridgeType, Name, Msg, #{timeout => 1_000}). query_resource_sync(Config, Request) -> Name = ?config(pgsql_name, Config), BridgeType = ?config(pgsql_bridge_type, Config), - ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), - emqx_resource_buffer_worker:simple_sync_query(ResourceID, Request). + ActionId = emqx_bridge_v2:id(BridgeType, Name), + emqx_resource_buffer_worker:simple_sync_query(ActionId, Request). query_resource_async(Config, Request) -> query_resource_async(Config, Request, _Opts = #{}). @@ -271,9 +278,8 @@ query_resource_async(Config, Request, Opts) -> BridgeType = ?config(pgsql_bridge_type, Config), Ref = alias([reply]), AsyncReplyFun = fun(Result) -> Ref ! {result, Ref, Result} end, - ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), Timeout = maps:get(timeout, Opts, 500), - Return = emqx_resource:query(ResourceID, Request, #{ + Return = emqx_bridge_v2:query(BridgeType, Name, Request, #{ timeout => Timeout, async_reply_fun => {AsyncReplyFun, []} }), @@ -379,7 +385,9 @@ t_setup_via_http_api_and_publish(Config) -> QueryMode = ?config(query_mode, Config), PgsqlConfig = PgsqlConfig0#{ <<"name">> => Name, - <<"type">> => BridgeType + <<"type">> => BridgeType, + %% NOTE: using literal passwords with HTTP API requests. + <<"password">> => <> }, ?assertMatch( {ok, _}, @@ -431,13 +439,12 @@ t_get_status(Config) -> Name = ?config(pgsql_name, Config), BridgeType = ?config(pgsql_bridge_type, Config), - ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), - ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)), + ?assertMatch(#{status := connected}, emqx_bridge_v2:health_check(BridgeType, Name)), emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> ?assertMatch( - {ok, Status} when Status =:= disconnected orelse Status =:= connecting, - emqx_resource_manager:health_check(ResourceID) + #{status := Status} when Status =:= disconnected orelse Status =:= connecting, + emqx_bridge_v2:health_check(BridgeType, Name) ) end), ok. @@ -645,7 +652,7 @@ t_nasty_sql_string(Config) -> t_missing_table(Config) -> Name = ?config(pgsql_name, Config), BridgeType = ?config(pgsql_bridge_type, Config), - ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + % ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), ?check_trace( begin @@ -655,21 +662,20 @@ t_missing_table(Config) -> _Sleep = 1_000, _Attempts = 20, ?assertMatch( - {ok, Status} when Status == connecting orelse Status == disconnected, - emqx_resource_manager:health_check(ResourceID) + #{status := Status} when Status == connecting orelse Status == disconnected, + emqx_bridge_v2:health_check(BridgeType, Name) ) ), Val = integer_to_binary(erlang:unique_integer()), SentData = #{payload => Val, timestamp => 1668602148000}, - Timeout = 1000, ?assertMatch( {error, {resource_error, #{reason := unhealthy_target}}}, - query_resource(Config, {send_message, SentData, [], Timeout}) + query_resource(Config, {send_message, SentData}) ), ok end, fun(Trace) -> - ?assertMatch([_], ?of_kind(pgsql_undefined_table, Trace)), + ?assertMatch([_ | _], ?of_kind(pgsql_undefined_table, Trace)), ok end ), @@ -679,7 +685,7 @@ t_missing_table(Config) -> t_table_removed(Config) -> Name = ?config(pgsql_name, Config), BridgeType = ?config(pgsql_bridge_type, Config), - ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + %%ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), ?check_trace( begin connect_and_create_table(Config), @@ -687,13 +693,14 @@ t_table_removed(Config) -> ?retry( _Sleep = 1_000, _Attempts = 20, - ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)) + ?assertMatch(#{status := connected}, emqx_bridge_v2:health_check(BridgeType, Name)) ), connect_and_drop_table(Config), Val = integer_to_binary(erlang:unique_integer()), SentData = #{payload => Val, timestamp => 1668602148000}, - case query_resource_sync(Config, {send_message, SentData, []}) of - {error, {unrecoverable_error, {error, error, <<"42P01">>, undefined_table, _, _}}} -> + ActionId = emqx_bridge_v2:id(BridgeType, Name), + case query_resource_sync(Config, {ActionId, SentData}) of + {error, {unrecoverable_error, _}} -> ok; ?RESOURCE_ERROR_M(not_connected, _) -> ok; @@ -710,7 +717,6 @@ t_table_removed(Config) -> t_concurrent_health_checks(Config) -> Name = ?config(pgsql_name, Config), BridgeType = ?config(pgsql_bridge_type, Config), - ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), ?check_trace( begin connect_and_create_table(Config), @@ -718,11 +724,13 @@ t_concurrent_health_checks(Config) -> ?retry( _Sleep = 1_000, _Attempts = 20, - ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)) + ?assertMatch(#{status := connected}, emqx_bridge_v2:health_check(BridgeType, Name)) ), emqx_utils:pmap( fun(_) -> - ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)) + ?assertMatch( + #{status := connected}, emqx_bridge_v2:health_check(BridgeType, Name) + ) end, lists:seq(1, 20) ), diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.app.src b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.app.src index b012874f8..c9abebf8b 100644 --- a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.app.src +++ b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_pulsar, [ {description, "EMQX Pulsar Bridge"}, - {vsn, "0.1.7"}, + {vsn, "0.1.8"}, {registered, []}, {applications, [ kernel, diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.erl b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.erl index beb8452b2..c7b378617 100644 --- a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.erl +++ b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.erl @@ -170,21 +170,17 @@ fields(auth_basic) -> [ {username, mk(binary(), #{required => true, desc => ?DESC("auth_basic_username")})}, {password, - mk(binary(), #{ + emqx_schema_secret:mk(#{ required => true, - desc => ?DESC("auth_basic_password"), - sensitive => true, - converter => fun emqx_schema:password_converter/2 + desc => ?DESC("auth_basic_password") })} ]; fields(auth_token) -> [ {jwt, - mk(binary(), #{ + emqx_schema_secret:mk(#{ required => true, - desc => ?DESC("auth_token_jwt"), - sensitive => true, - converter => fun emqx_schema:password_converter/2 + desc => ?DESC("auth_token_jwt") })} ]; fields("get_" ++ Type) -> diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl index 33ac83ee1..fed0142c5 100644 --- a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl +++ b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl @@ -78,7 +78,6 @@ query_mode(_Config) -> -spec on_start(resource_id(), config()) -> {ok, state()}. on_start(InstanceId, Config) -> #{ - authentication := _Auth, bridge_name := BridgeName, servers := Servers0, ssl := SSL @@ -263,12 +262,14 @@ conn_opts(#{authentication := none}) -> #{}; conn_opts(#{authentication := #{username := Username, password := Password}}) -> #{ - auth_data => iolist_to_binary([Username, <<":">>, Password]), + %% TODO: teach `pulsar` to accept 0-arity closures as passwords. + auth_data => iolist_to_binary([Username, <<":">>, emqx_secret:unwrap(Password)]), auth_method_name => <<"basic">> }; conn_opts(#{authentication := #{jwt := JWT}}) -> #{ - auth_data => JWT, + %% TODO: teach `pulsar` to accept 0-arity closures as passwords. + auth_data => emqx_secret:unwrap(JWT), auth_method_name => <<"token">> }. diff --git a/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_tests.erl b/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_tests.erl index 5492bb2a8..5b9c33fbb 100644 --- a/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_tests.erl +++ b/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_tests.erl @@ -73,7 +73,7 @@ check_atom_key(Conf) when is_map(Conf) -> %% erlfmt-ignore pulsar_producer_hocon() -> -""" +" bridges.pulsar_producer.my_producer { enable = true servers = \"localhost:6650\" @@ -90,4 +90,4 @@ bridges.pulsar_producer.my_producer { server_name_indication = \"auto\" } } -""". +". diff --git a/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq.app.src b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq.app.src index 7e32b5a89..2e1ec3444 100644 --- a/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq.app.src +++ b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_rabbitmq, [ {description, "EMQX Enterprise RabbitMQ Bridge"}, - {vsn, "0.1.6"}, + {vsn, "0.1.7"}, {registered, []}, {applications, [ kernel, diff --git a/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_connector.erl b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_connector.erl index 2af1c16c8..2e4074f79 100644 --- a/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_connector.erl +++ b/apps/emqx_bridge_rabbitmq/src/emqx_bridge_rabbitmq_connector.erl @@ -74,7 +74,7 @@ fields(config) -> desc => ?DESC("username") } )}, - {password, fun emqx_connector_schema_lib:password_required/1}, + {password, emqx_connector_schema_lib:password_field(#{required => true})}, {pool_size, hoconsc:mk( typerefl:pos_integer(), @@ -196,7 +196,6 @@ on_start( #{ pool_size := PoolSize, payload_template := PayloadTemplate, - password := Password, delivery_mode := InitialDeliveryMode } = InitialConfig ) -> @@ -206,7 +205,6 @@ on_start( persistent -> 2 end, Config = InitialConfig#{ - password => emqx_secret:wrap(Password), delivery_mode => DeliveryMode }, ?SLOG(info, #{ @@ -242,13 +240,11 @@ on_start( ok -> {ok, State}; {error, Reason} -> - LogMessage = - #{ - msg => "rabbitmq_connector_start_failed", - error_reason => Reason, - config => emqx_utils:redact(Config) - }, - ?SLOG(info, LogMessage), + ?SLOG(info, #{ + msg => "rabbitmq_connector_start_failed", + error_reason => Reason, + config => emqx_utils:redact(Config) + }), {error, Reason} end. @@ -321,6 +317,7 @@ create_rabbitmq_connection_and_channel(Config) -> heartbeat := Heartbeat, wait_for_publish_confirmations := WaitForPublishConfirmations } = Config, + %% TODO: teach `amqp` to accept 0-arity closures as passwords. Password = emqx_secret:unwrap(WrappedPassword), SSLOptions = case maps:get(ssl, Config, #{}) of diff --git a/apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_connector_SUITE.erl b/apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_connector_SUITE.erl index 106a4d67b..689c39dc5 100644 --- a/apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_connector_SUITE.erl +++ b/apps/emqx_bridge_rabbitmq/test/emqx_bridge_rabbitmq_connector_SUITE.erl @@ -10,6 +10,7 @@ -include("emqx_connector.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("stdlib/include/assert.hrl"). +-include_lib("common_test/include/ct.hrl"). -include_lib("amqp_client/include/amqp_client.hrl"). %% This test SUITE requires a running RabbitMQ instance. If you don't want to @@ -26,6 +27,9 @@ rabbit_mq_host() -> rabbit_mq_port() -> 5672. +rabbit_mq_password() -> + <<"guest">>. + rabbit_mq_exchange() -> <<"test_exchange">>. @@ -45,12 +49,12 @@ init_per_suite(Config) -> ) of true -> - ok = emqx_common_test_helpers:start_apps([emqx_conf]), - ok = emqx_connector_test_helpers:start_apps([emqx_resource]), - {ok, _} = application:ensure_all_started(emqx_connector), - {ok, _} = application:ensure_all_started(amqp_client), + Apps = emqx_cth_suite:start( + [emqx_conf, emqx_connector, emqx_bridge_rabbitmq], + #{work_dir => emqx_cth_suite:work_dir(Config)} + ), ChannelConnection = setup_rabbit_mq_exchange_and_queue(), - [{channel_connection, ChannelConnection} | Config]; + [{channel_connection, ChannelConnection}, {suite_apps, Apps} | Config]; false -> case os:getenv("IS_CI") of "yes" -> @@ -106,13 +110,11 @@ end_per_suite(Config) -> connection := Connection, channel := Channel } = get_channel_connection(Config), - ok = emqx_common_test_helpers:stop_apps([emqx_conf]), - ok = emqx_connector_test_helpers:stop_apps([emqx_resource]), - _ = application:stop(emqx_connector), %% Close the channel ok = amqp_channel:close(Channel), %% Close the connection - ok = amqp_connection:close(Connection). + ok = amqp_connection:close(Connection), + ok = emqx_cth_suite:stop(?config(suite_apps, Config)). % %%------------------------------------------------------------------------------ % %% Testcases @@ -125,23 +127,31 @@ t_lifecycle(Config) -> Config ). +t_start_passfile(Config) -> + ResourceID = atom_to_binary(?FUNCTION_NAME), + PasswordFilename = filename:join(?config(priv_dir, Config), "passfile"), + ok = file:write_file(PasswordFilename, rabbit_mq_password()), + InitialConfig = rabbitmq_config(#{ + password => iolist_to_binary(["file://", PasswordFilename]) + }), + ?assertMatch( + #{status := connected}, + create_local_resource(ResourceID, check_config(InitialConfig)) + ), + ?assertEqual( + ok, + emqx_resource:remove_local(ResourceID) + ). + perform_lifecycle_check(ResourceID, InitialConfig, TestConfig) -> #{ channel := Channel } = get_channel_connection(TestConfig), - {ok, #{config := CheckedConfig}} = - emqx_resource:check_config(emqx_bridge_rabbitmq_connector, InitialConfig), - {ok, #{ + CheckedConfig = check_config(InitialConfig), + #{ state := #{poolname := PoolName} = State, status := InitialStatus - }} = - emqx_resource:create_local( - ResourceID, - ?CONNECTOR_RESOURCE_GROUP, - emqx_bridge_rabbitmq_connector, - CheckedConfig, - #{} - ), + } = create_local_resource(ResourceID, CheckedConfig), ?assertEqual(InitialStatus, connected), %% Instance should match the state and status of the just started resource {ok, ?CONNECTOR_RESOURCE_GROUP, #{ @@ -184,6 +194,21 @@ perform_lifecycle_check(ResourceID, InitialConfig, TestConfig) -> % %% Helpers % %%------------------------------------------------------------------------------ +check_config(Config) -> + {ok, #{config := CheckedConfig}} = + emqx_resource:check_config(emqx_bridge_rabbitmq_connector, Config), + CheckedConfig. + +create_local_resource(ResourceID, CheckedConfig) -> + {ok, Bridge} = emqx_resource:create_local( + ResourceID, + ?CONNECTOR_RESOURCE_GROUP, + emqx_bridge_rabbitmq_connector, + CheckedConfig, + #{} + ), + Bridge. + perform_query(PoolName, Channel) -> %% Send message to queue: ok = emqx_resource:query(PoolName, {query, test_data()}), @@ -216,16 +241,19 @@ receive_simple_test_message(Channel) -> end. rabbitmq_config() -> + rabbitmq_config(#{}). + +rabbitmq_config(Overrides) -> Config = #{ server => rabbit_mq_host(), port => 5672, username => <<"guest">>, - password => <<"guest">>, + password => rabbit_mq_password(), exchange => rabbit_mq_exchange(), routing_key => rabbit_mq_routing_key() }, - #{<<"config">> => Config}. + #{<<"config">> => maps:merge(Config, Overrides)}. test_data() -> #{<<"msg_field">> => <<"Hello">>}. diff --git a/apps/emqx_bridge_redis/src/emqx_bridge_redis.app.src b/apps/emqx_bridge_redis/src/emqx_bridge_redis.app.src index 5b6163969..53130d188 100644 --- a/apps/emqx_bridge_redis/src/emqx_bridge_redis.app.src +++ b/apps/emqx_bridge_redis/src/emqx_bridge_redis.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_redis, [ {description, "EMQX Enterprise Redis Bridge"}, - {vsn, "0.1.3"}, + {vsn, "0.1.4"}, {registered, []}, {applications, [ kernel, @@ -9,7 +9,9 @@ emqx_resource, emqx_redis ]}, - {env, []}, + {env, [ + {emqx_action_info_modules, [emqx_bridge_redis_action_info]} + ]}, {modules, []}, {links, []} ]}. diff --git a/apps/emqx_bridge_redis/src/emqx_bridge_redis.erl b/apps/emqx_bridge_redis/src/emqx_bridge_redis.erl index 1c8ee75f9..beafc8775 100644 --- a/apps/emqx_bridge_redis/src/emqx_bridge_redis.erl +++ b/apps/emqx_bridge_redis/src/emqx_bridge_redis.erl @@ -8,9 +8,9 @@ -import(hoconsc, [mk/2, enum/1, ref/1, ref/2]). --export([ - conn_bridge_examples/1 -]). +-export([conn_bridge_examples/1]). + +-export([type_name_fields/1, connector_fields/1]). -export([ namespace/0, @@ -100,6 +100,8 @@ namespace() -> "bridge_redis". roots() -> []. +fields(action_parameters) -> + [{command_template, fun command_template/1}]; fields("post_single") -> method_fields(post, redis_single); fields("post_sentinel") -> @@ -142,21 +144,13 @@ method_fields(put, ConnectorType) -> redis_bridge_common_fields(Type) -> emqx_bridge_schema:common_bridge_fields() ++ [ - {local_topic, mk(binary(), #{desc => ?DESC("local_topic")})}, - {command_template, fun command_template/1} + {local_topic, mk(binary(), #{required => false, desc => ?DESC("desc_local_topic")})} + | fields(action_parameters) ] ++ resource_fields(Type). connector_fields(Type) -> - RedisType = bridge_type_to_redis_conn_type(Type), - emqx_redis:fields(RedisType). - -bridge_type_to_redis_conn_type(redis_single) -> - single; -bridge_type_to_redis_conn_type(redis_sentinel) -> - sentinel; -bridge_type_to_redis_conn_type(redis_cluster) -> - cluster. + emqx_redis:fields(Type). type_name_fields(Type) -> [ @@ -168,7 +162,7 @@ resource_fields(Type) -> [ {resource_opts, mk( - ref("creation_opts_" ++ atom_to_list(Type)), + ?R_REF("creation_opts_" ++ atom_to_list(Type)), #{ required => false, default => #{}, @@ -185,6 +179,8 @@ resource_creation_fields("redis_cluster") -> resource_creation_fields(_) -> emqx_resource_schema:fields("creation_opts"). +desc(action_parameters) -> + ?DESC("desc_action_parameters"); desc("config") -> ?DESC("desc_config"); desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> diff --git a/apps/emqx_bridge_redis/src/emqx_bridge_redis_action_info.erl b/apps/emqx_bridge_redis/src/emqx_bridge_redis_action_info.erl new file mode 100644 index 000000000..22ed40093 --- /dev/null +++ b/apps/emqx_bridge_redis/src/emqx_bridge_redis_action_info.erl @@ -0,0 +1,98 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_redis_action_info). + +-behaviour(emqx_action_info). + +-export([ + bridge_v1_type_name/0, + action_type_name/0, + connector_type_name/0, + schema_module/0, + bridge_v1_config_to_action_config/2, + connector_action_config_to_bridge_v1_config/2, + bridge_v1_config_to_connector_config/1, + bridge_v1_type_name_fun/1 +]). + +-import(emqx_utils_conv, [bin/1]). + +-define(SCHEMA_MODULE, emqx_bridge_redis_schema). +-import(hoconsc, [mk/2, enum/1, ref/1, ref/2]). + +action_type_name() -> redis. + +connector_type_name() -> redis. + +schema_module() -> ?SCHEMA_MODULE. + +connector_action_config_to_bridge_v1_config(ConnectorConfig, ActionConfig) -> + fix_v1_type( + maps:merge( + maps:without( + [<<"connector">>], + map_unindent(<<"parameters">>, ActionConfig) + ), + map_unindent(<<"parameters">>, ConnectorConfig) + ) + ). + +bridge_v1_config_to_action_config(BridgeV1Config, ConnectorName) -> + ActionTopLevelKeys = schema_keys(?SCHEMA_MODULE:fields(redis_action)), + ActionParametersKeys = schema_keys(emqx_bridge_redis:fields(action_parameters)), + ActionKeys = ActionTopLevelKeys ++ ActionParametersKeys, + ActionConfig = make_config_map(ActionKeys, ActionParametersKeys, BridgeV1Config), + ActionConfig#{<<"connector">> => ConnectorName}. + +bridge_v1_config_to_connector_config(BridgeV1Config) -> + ActionTopLevelKeys = schema_keys(?SCHEMA_MODULE:fields(redis_action)), + ActionParametersKeys = schema_keys(emqx_bridge_redis:fields(action_parameters)), + ActionKeys = ActionTopLevelKeys ++ ActionParametersKeys, + ConnectorTopLevelKeys = schema_keys(?SCHEMA_MODULE:fields("config_connector")), + %% Need put redis_type into parameter. + %% cluster need type to filter resource_opts + ConnectorKeys = + (maps:keys(BridgeV1Config) -- (ActionKeys -- ConnectorTopLevelKeys)) ++ + [<<"redis_type">>], + ConnectorParametersKeys = ConnectorKeys -- ConnectorTopLevelKeys, + make_config_map(ConnectorKeys, ConnectorParametersKeys, BridgeV1Config). + +%%------------------------------------------------------------------------------------------ +%% Internal helper fns +%%------------------------------------------------------------------------------------------ + +bridge_v1_type_name() -> + {fun ?MODULE:bridge_v1_type_name_fun/1, bridge_v1_type_names()}. +bridge_v1_type_name_fun({#{<<"parameters">> := #{<<"redis_type">> := Type}}, _}) -> + v1_type(Type). + +fix_v1_type(#{<<"redis_type">> := RedisType} = Conf) -> + Conf#{<<"type">> => v1_type(RedisType)}. + +v1_type(<<"single">>) -> redis_single; +v1_type(<<"sentinel">>) -> redis_sentinel; +v1_type(<<"cluster">>) -> redis_cluster. + +bridge_v1_type_names() -> [redis_single, redis_sentinel, redis_cluster]. + +map_unindent(Key, Map) -> + maps:merge( + maps:get(Key, Map), + maps:remove(Key, Map) + ). + +map_indent(IndentKey, PickKeys, Map) -> + maps:put( + IndentKey, + maps:with(PickKeys, Map), + maps:without(PickKeys, Map) + ). + +schema_keys(Schema) -> + [bin(Key) || {Key, _} <- Schema]. + +make_config_map(PickKeys, IndentKeys, Config) -> + Conf0 = maps:with(PickKeys, Config), + map_indent(<<"parameters">>, IndentKeys, Conf0). diff --git a/apps/emqx_bridge_redis/src/emqx_bridge_redis_connector.erl b/apps/emqx_bridge_redis/src/emqx_bridge_redis_connector.erl index 696947726..4835e8127 100644 --- a/apps/emqx_bridge_redis/src/emqx_bridge_redis_connector.erl +++ b/apps/emqx_bridge_redis/src/emqx_bridge_redis_connector.erl @@ -4,6 +4,7 @@ -module(emqx_bridge_redis_connector). -include_lib("emqx/include/logger.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). -behaviour(emqx_resource). @@ -11,11 +12,15 @@ %% callbacks of behaviour emqx_resource -export([ callback_mode/0, + on_add_channel/4, + on_remove_channel/3, + on_get_channels/1, on_start/2, on_stop/2, on_query/3, on_batch_query/3, - on_get_status/2 + on_get_status/2, + on_get_channel_status/3 ]). %% ------------------------------------------------------------------------------------------------- @@ -24,7 +29,34 @@ callback_mode() -> always_sync. -on_start(InstId, #{command_template := CommandTemplate} = Config) -> +on_add_channel( + _InstanceId, + State = #{channels := Channels}, + ChannelId, + #{ + parameters := #{ + command_template := Template + } + } +) -> + Channels2 = Channels#{ + ChannelId => #{template => preproc_command_template(Template)} + }, + {ok, State#{channels => Channels2}}. + +on_remove_channel(_InstanceId, State = #{channels := Channels}, ChannelId) -> + {ok, State#{channels => maps:remove(ChannelId, Channels)}}. + +on_get_channels(InstanceId) -> + emqx_bridge_v2:get_channels_for_connector(InstanceId). + +on_get_channel_status(_ConnectorResId, ChannelId, #{channels := Channels}) -> + case maps:is_key(ChannelId, Channels) of + true -> ?status_connected; + false -> ?status_disconnected + end. + +on_start(InstId, Config) -> case emqx_redis:on_start(InstId, Config) of {ok, RedisConnSt} -> ?tp( @@ -33,7 +65,7 @@ on_start(InstId, #{command_template := CommandTemplate} = Config) -> ), {ok, #{ conn_st => RedisConnSt, - command_template => preproc_command_template(CommandTemplate) + channels => #{} }}; {error, {start_pool_failed, _, #{type := authentication_error, reason := Reason}}} = Error -> ?tp( @@ -57,14 +89,8 @@ on_stop(InstId, undefined = _State) -> on_get_status(InstId, #{conn_st := RedisConnSt}) -> emqx_redis:on_get_status(InstId, RedisConnSt). -on_query( - InstId, - {send_message, Data}, - _State = #{ - command_template := CommandTemplate, conn_st := RedisConnSt - } -) -> - Cmd = proc_command_template(CommandTemplate, Data), +%% raw cmd without template, for CI test +on_query(InstId, {cmd, Cmd}, #{conn_st := RedisConnSt}) -> ?tp( redis_bridge_connector_cmd, #{cmd => Cmd, batch => false, mode => sync} @@ -77,45 +103,68 @@ on_query( Result; on_query( InstId, - Query, - _State = #{conn_st := RedisConnSt} + {_MessageTag, _Data} = Msg, + #{channels := Channels, conn_st := RedisConnSt} ) -> - ?tp( - redis_bridge_connector_query, - #{query => Query, batch => false, mode => sync} - ), - Result = query(InstId, Query, RedisConnSt), - ?tp( - redis_bridge_connector_send_done, - #{query => Query, batch => false, mode => sync, result => Result} - ), - Result. + case try_render_message([Msg], Channels) of + {ok, [Cmd]} -> + ?tp( + redis_bridge_connector_cmd, + #{cmd => Cmd, batch => false, mode => sync} + ), + Result = query(InstId, {cmd, Cmd}, RedisConnSt), + ?tp( + redis_bridge_connector_send_done, + #{cmd => Cmd, batch => false, mode => sync, result => Result} + ), + Result; + Error -> + Error + end. on_batch_query( - InstId, BatchData, _State = #{command_template := CommandTemplate, conn_st := RedisConnSt} + InstId, BatchData, _State = #{channels := Channels, conn_st := RedisConnSt} ) -> - Cmds = process_batch_data(BatchData, CommandTemplate), - ?tp( - redis_bridge_connector_send, - #{batch_data => BatchData, batch => true, mode => sync} - ), - Result = query(InstId, {cmds, Cmds}, RedisConnSt), - ?tp( - redis_bridge_connector_send_done, - #{ - batch_data => BatchData, - batch_size => length(BatchData), - batch => true, - mode => sync, - result => Result - } - ), - Result. + case try_render_message(BatchData, Channels) of + {ok, Cmds} -> + ?tp( + redis_bridge_connector_send, + #{batch_data => BatchData, batch => true, mode => sync} + ), + Result = query(InstId, {cmds, Cmds}, RedisConnSt), + ?tp( + redis_bridge_connector_send_done, + #{ + batch_data => BatchData, + batch_size => length(BatchData), + batch => true, + mode => sync, + result => Result + } + ), + Result; + Error -> + Error + end. %% ------------------------------------------------------------------------------------------------- %% private helpers %% ------------------------------------------------------------------------------------------------- +try_render_message(Datas, Channels) -> + try_render_message(Datas, Channels, []). + +try_render_message([{MessageTag, Data} | T], Channels, Acc) -> + case maps:find(MessageTag, Channels) of + {ok, #{template := Template}} -> + Msg = proc_command_template(Template, Data), + try_render_message(T, Channels, [Msg | Acc]); + _ -> + {error, {unrecoverable_error, {invalid_message_tag, MessageTag}}} + end; +try_render_message([], _Channels, Acc) -> + {ok, lists:reverse(Acc)}. + query(InstId, Query, RedisConnSt) -> case emqx_redis:on_query(InstId, Query, RedisConnSt) of {ok, _} = Ok -> Ok; @@ -123,14 +172,6 @@ query(InstId, Query, RedisConnSt) -> {error, _} = Error -> Error end. -process_batch_data(BatchData, CommandTemplate) -> - lists:map( - fun({send_message, Data}) -> - proc_command_template(CommandTemplate, Data) - end, - BatchData - ). - proc_command_template(CommandTemplate, Msg) -> lists:map( fun(ArgTks) -> diff --git a/apps/emqx_bridge_redis/src/emqx_bridge_redis_schema.erl b/apps/emqx_bridge_redis/src/emqx_bridge_redis_schema.erl new file mode 100644 index 000000000..f02bf3322 --- /dev/null +++ b/apps/emqx_bridge_redis/src/emqx_bridge_redis_schema.erl @@ -0,0 +1,276 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_redis_schema). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-define(TYPE, redis). + +%% `hocon_schema' API +-export([ + namespace/0, + roots/0, + fields/1, + desc/1, + resource_opts_converter/2 +]). + +%% `emqx_bridge_v2_schema' "unofficial" API +-export([ + bridge_v2_examples/1, + conn_bridge_examples/1, + connector_examples/1 +]). + +%%------------------------------------------------------------------------------------------------- +%% `hocon_schema' API +%%------------------------------------------------------------------------------------------------- + +namespace() -> + ?TYPE. + +roots() -> + []. + +%%========================================= +%% Action fields +%%========================================= +fields("config_connector") -> + emqx_connector_schema:common_fields() ++ + [ + {parameters, + ?HOCON( + hoconsc:union([ + ?R_REF(emqx_redis, redis_single_connector), + ?R_REF(emqx_redis, redis_sentinel_connector), + ?R_REF(emqx_redis, redis_cluster_connector) + ]), + #{required => true, desc => ?DESC(redis_parameters)} + )} + ] ++ + emqx_redis:redis_fields() ++ + emqx_connector_schema_lib:ssl_fields(); +fields(action) -> + {?TYPE, + ?HOCON( + ?MAP(name, ?R_REF(redis_action)), + #{ + desc => <<"Redis Action Config">>, + converter => fun ?MODULE:resource_opts_converter/2, + required => false + } + )}; +fields(redis_action) -> + Schema = + emqx_bridge_v2_schema:make_producer_action_schema( + ?HOCON( + ?R_REF(emqx_bridge_redis, action_parameters), + #{ + required => true, + desc => ?DESC(producer_action) + } + ) + ), + ResOpts = + {resource_opts, + ?HOCON( + ?R_REF(resource_opts), + #{ + required => true, + desc => ?DESC(emqx_resource_schema, resource_opts) + } + )}, + RedisType = + {redis_type, + ?HOCON( + ?ENUM([single, sentinel, cluster]), + #{required => true, desc => ?DESC(redis_type)} + )}, + [RedisType | lists:keyreplace(resource_opts, 1, Schema, ResOpts)]; +fields(resource_opts) -> + emqx_resource_schema:create_opts([ + {batch_size, #{desc => ?DESC(batch_size)}}, + {batch_time, #{desc => ?DESC(batch_time)}} + ]); +%%========================================= +%% HTTP API fields +%%========================================= +fields("post_connector") -> + emqx_bridge_redis:type_name_fields(?TYPE) ++ fields("config_connector"); +fields("put_connector") -> + fields("config_connector"); +fields("get_connector") -> + emqx_bridge_schema:status_fields() ++ + fields("post_connector"); +fields("get_bridge_v2") -> + emqx_bridge_schema:status_fields() ++ fields("post_bridge_v2"); +fields("post_bridge_v2") -> + emqx_bridge_redis:type_name_fields(?TYPE) ++ fields("put_bridge_v2"); +fields("put_bridge_v2") -> + fields(redis_action); +fields("get_single") -> + emqx_bridge_schema:status_fields() ++ fields("put_single"); +fields("put_single") -> + fields("config_connector"); +fields("post_single") -> + emqx_bridge_redis:type_name_fields(?TYPE) ++ fields("put_single"). + +desc("config_connector") -> + ?DESC(emqx_bridge_redis, "desc_config"); +desc(redis_action) -> + ?DESC(redis_action); +desc(resource_opts) -> + ?DESC(emqx_resource_schema, resource_opts); +desc(_Name) -> + undefined. + +resource_opts_converter(undefined, _Opts) -> + undefined; +resource_opts_converter(Conf, _Opts) -> + maps:map( + fun(_Name, SubConf) -> + case SubConf of + #{<<"redis_type">> := <<"cluster">>} -> + ResOpts = maps:get(<<"resource_opts">>, SubConf, #{}), + %% cluster don't support batch + SubConf#{ + <<"resource_opts">> => + ResOpts#{<<"batch_size">> => 1, <<"batch_time">> => <<"0ms">>} + }; + _ -> + SubConf + end + end, + Conf + ). + +%%------------------------------------------------------------------------------------------------- +%% `emqx_bridge_v2_schema' "unofficial" API +%%------------------------------------------------------------------------------------------------- + +bridge_v2_examples(Method) -> + [ + #{ + <<"redis_single_producer">> => #{ + summary => <<"Redis Single Producer Action">>, + value => action_example(single, Method) + } + }, + #{ + <<"redis_sentinel_producer">> => #{ + summary => <<"Redis Sentinel Producer Action">>, + value => action_example(sentinel, Method) + } + }, + #{ + <<"redis_cluster_producer">> => #{ + summary => <<"Redis Cluster Producer Action">>, + value => action_example(cluster, Method) + } + } + ]. + +connector_examples(Method) -> + [ + #{ + <<"redis_single_producer">> => #{ + summary => <<"Redis Single Producer Connector">>, + value => connector_example(single, Method) + } + }, + #{ + <<"redis_cluster_producer">> => #{ + summary => <<"Redis Cluster Producer Connector">>, + value => connector_example(cluster, Method) + } + }, + #{ + <<"redis_sentinel_producer">> => #{ + summary => <<"Redis Sentinel Producer Connector">>, + value => connector_example(sentinel, Method) + } + } + ]. + +conn_bridge_examples(Method) -> + emqx_bridge_redis:conn_bridge_examples(Method). + +action_example(RedisType, post) -> + maps:merge( + action_example(RedisType, put), + #{ + type => <<"redis">>, + name => <<"my_action">> + } + ); +action_example(RedisType, get) -> + maps:merge( + action_example(RedisType, put), + #{ + status => <<"connected">>, + node_status => [ + #{ + node => <<"emqx@localhost">>, + status => <<"connected">> + } + ] + } + ); +action_example(RedisType, put) -> + #{ + redis_type => RedisType, + enable => true, + connector => <<"my_connector_name">>, + description => <<"My action">>, + parameters => #{ + command_template => [<<"LPUSH">>, <<"MSGS">>, <<"${payload}">>] + }, + resource_opts => #{batch_size => 1} + }. + +connector_example(RedisType, get) -> + maps:merge( + connector_example(RedisType, put), + #{ + status => <<"connected">>, + node_status => [ + #{ + node => <<"emqx@localhost">>, + status => <<"connected">> + } + ] + } + ); +connector_example(RedisType, post) -> + maps:merge( + connector_example(RedisType, put), + #{ + type => <<"redis_single_producer">>, + name => <<"my_connector">> + } + ); +connector_example(RedisType, put) -> + #{ + enable => true, + desc => <<"My redis ", (atom_to_binary(RedisType))/binary, " connector">>, + parameters => connector_parameter(RedisType), + pool_size => 8, + database => 1, + username => <<"test">>, + password => <<"******">>, + auto_reconnect => true, + ssl => #{enable => false} + }. + +connector_parameter(single) -> + #{redis_type => single, server => <<"127.0.0.1:6379">>}; +connector_parameter(cluster) -> + #{redis_type => cluster, servers => <<"127.0.0.1:6379,127.0.0.2:6379">>}; +connector_parameter(sentinel) -> + #{ + redis_type => sentinel, + servers => <<"127.0.0.1:6379,127.0.0.2:6379">>, + sentinel => <<"myredismaster">> + }. diff --git a/apps/emqx_bridge_redis/test/emqx_bridge_redis_SUITE.erl b/apps/emqx_bridge_redis/test/emqx_bridge_redis_SUITE.erl index c2430c076..125d84d0f 100644 --- a/apps/emqx_bridge_redis/test/emqx_bridge_redis_SUITE.erl +++ b/apps/emqx_bridge_redis/test/emqx_bridge_redis_SUITE.erl @@ -56,6 +56,7 @@ ). all() -> [{group, transports}, {group, rest}]. +suite() -> [{timetrap, {minutes, 20}}]. groups() -> ResourceSpecificTCs = [t_create_delete_bridge], @@ -143,15 +144,19 @@ redis_checks() -> end. end_per_suite(_Config) -> - ok = delete_all_bridges(), + ok = emqx_bridge_v2_SUITE:delete_all_bridges_and_connectors(), ok = emqx_common_test_helpers:stop_apps([emqx_conf]), ok = emqx_connector_test_helpers:stop_apps([emqx_rule_engine, emqx_bridge, emqx_resource]), _ = application:stop(emqx_connector), ok. -init_per_testcase(_Testcase, Config) -> +init_per_testcase(Testcase, Config0) -> + emqx_logger:set_log_level(debug), ok = delete_all_rules(), - ok = delete_all_bridges(), + ok = emqx_bridge_v2_SUITE:delete_all_bridges_and_connectors(), + UniqueNum = integer_to_binary(erlang:unique_integer()), + Name = <<(atom_to_binary(Testcase))/binary, UniqueNum/binary>>, + Config = [{bridge_name, Name} | Config0], case {?config(connector_type, Config), ?config(batch_mode, Config)} of {undefined, _} -> Config; @@ -165,7 +170,13 @@ init_per_testcase(_Testcase, Config) -> IsBatch = (BatchMode =:= batch_on), BridgeConfig0 = maps:merge(RedisConnConfig, ?COMMON_REDIS_OPTS), BridgeConfig1 = BridgeConfig0#{<<"resource_opts">> => ResourceConfig}, - [{bridge_config, BridgeConfig1}, {is_batch, IsBatch} | Config] + BridgeType = list_to_atom(atom_to_list(RedisType) ++ "_producer"), + [ + {bridge_type, BridgeType}, + {bridge_config, BridgeConfig1}, + {is_batch, IsBatch} + | Config + ] end. end_per_testcase(_Testcase, Config) -> @@ -173,10 +184,18 @@ end_per_testcase(_Testcase, Config) -> ProxyPort = ?config(proxy_port, Config), ok = snabbkaffe:stop(), emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), - ok = delete_all_bridges(). + ok = emqx_bridge_v2_SUITE:delete_all_bridges_and_connectors(). t_create_delete_bridge(Config) -> - Name = <<"mybridge">>, + Pid = erlang:whereis(eredis_sentinel), + ct:pal("t_create_detele_bridge:~p~n", [ + #{ + config => Config, + sentinel => Pid, + eredis_sentinel => Pid =/= undefined andalso erlang:process_info(Pid) + } + ]), + Name = ?config(bridge_name, Config), Type = ?config(connector_type, Config), BridgeConfig = ?config(bridge_config, Config), IsBatch = ?config(is_batch, Config), @@ -184,13 +203,11 @@ t_create_delete_bridge(Config) -> {ok, _}, emqx_bridge:create(Type, Name, BridgeConfig) ), - ResourceId = emqx_bridge_resource:resource_id(Type, Name), - ?WAIT( {ok, connected}, emqx_resource:health_check(ResourceId), - 5 + 10 ), RedisType = atom_to_binary(Type), @@ -244,7 +261,7 @@ t_check_values(_Config) -> ). t_check_replay(Config) -> - Name = <<"toxic_bridge">>, + Name = ?config(bridge_name, Config), Type = <<"redis_single">>, Topic = <<"local_topic/test">>, ProxyName = "redis_single_tcp", @@ -324,15 +341,15 @@ t_permanent_error(_Config) -> ), ok = emqx_bridge:remove(Type, Name). -t_auth_username_password(_Config) -> - Name = <<"mybridge">>, +t_auth_username_password(Config) -> + Name = ?config(bridge_name, Config), Type = <<"redis_single">>, - ResourceId = emqx_bridge_resource:resource_id(Type, Name), BridgeConfig = username_password_redis_bridge_config(), ?assertMatch( {ok, _}, emqx_bridge:create(Type, Name, BridgeConfig) ), + ResourceId = emqx_bridge_resource:resource_id(Type, Name), ?WAIT( {ok, connected}, emqx_resource:health_check(ResourceId), @@ -340,16 +357,16 @@ t_auth_username_password(_Config) -> ), ok = emqx_bridge:remove(Type, Name). -t_auth_error_username_password(_Config) -> - Name = <<"mybridge">>, +t_auth_error_username_password(Config) -> + Name = ?config(bridge_name, Config), Type = <<"redis_single">>, - ResourceId = emqx_bridge_resource:resource_id(Type, Name), BridgeConfig0 = username_password_redis_bridge_config(), BridgeConfig = maps:merge(BridgeConfig0, #{<<"password">> => <<"wrong_password">>}), ?assertMatch( {ok, _}, emqx_bridge:create(Type, Name, BridgeConfig) ), + ResourceId = emqx_bridge_resource:resource_id(Type, Name), ?WAIT( {ok, disconnected}, emqx_resource:health_check(ResourceId), @@ -361,16 +378,16 @@ t_auth_error_username_password(_Config) -> ), ok = emqx_bridge:remove(Type, Name). -t_auth_error_password_only(_Config) -> - Name = <<"mybridge">>, +t_auth_error_password_only(Config) -> + Name = ?config(bridge_name, Config), Type = <<"redis_single">>, - ResourceId = emqx_bridge_resource:resource_id(Type, Name), BridgeConfig0 = toxiproxy_redis_bridge_config(), BridgeConfig = maps:merge(BridgeConfig0, #{<<"password">> => <<"wrong_password">>}), ?assertMatch( {ok, _}, emqx_bridge:create(Type, Name, BridgeConfig) ), + ResourceId = emqx_bridge_resource:resource_id(Type, Name), ?assertEqual( {ok, disconnected}, emqx_resource:health_check(ResourceId) @@ -382,7 +399,7 @@ t_auth_error_password_only(_Config) -> ok = emqx_bridge:remove(Type, Name). t_create_disconnected(Config) -> - Name = <<"toxic_bridge">>, + Name = ?config(bridge_name, Config), Type = <<"redis_single">>, ?check_trace( @@ -450,10 +467,8 @@ check_resource_queries(ResourceId, BaseTopic, IsBatch) -> added_msgs(ResourceId, BaseTopic, Payload) -> lists:flatmap( fun(K) -> - {ok, Results} = emqx_resource:simple_sync_query( - ResourceId, - {cmd, [<<"LRANGE">>, K, <<"0">>, <<"-1">>]} - ), + Message = {cmd, [<<"LRANGE">>, K, <<"0">>, <<"-1">>]}, + {ok, Results} = emqx_resource:simple_sync_query(ResourceId, Message), [El || El <- Results, El =:= Payload] end, [format_redis_key(BaseTopic, S) || S <- lists:seq(0, ?KEYSHARDS - 1)] @@ -482,14 +497,6 @@ delete_all_rules() -> emqx_rule_engine:get_rules() ). -delete_all_bridges() -> - lists:foreach( - fun(#{name := Name, type := Type}) -> - emqx_bridge:remove(Type, Name) - end, - emqx_bridge:list() - ). - all_test_hosts() -> Confs = [ ?REDIS_TOXYPROXY_CONNECT_CONFIG @@ -554,12 +561,12 @@ redis_connect_configs() -> tcp => #{ <<"servers">> => <<"redis-sentinel:26379">>, <<"redis_type">> => <<"sentinel">>, - <<"sentinel">> => <<"mymaster">> + <<"sentinel">> => <<"mytcpmaster">> }, tls => #{ <<"servers">> => <<"redis-sentinel-tls:26380">>, <<"redis_type">> => <<"sentinel">>, - <<"sentinel">> => <<"mymaster">>, + <<"sentinel">> => <<"mytlsmaster">>, <<"ssl">> => redis_connect_ssl_opts(redis_sentinel) } }, diff --git a/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.app.src b/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.app.src index e158a2e46..38c00e7ee 100644 --- a/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.app.src +++ b/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_rocketmq, [ {description, "EMQX Enterprise RocketMQ Bridge"}, - {vsn, "0.1.3"}, + {vsn, "0.1.4"}, {registered, []}, {applications, [kernel, stdlib, emqx_resource, rocketmq]}, {env, []}, diff --git a/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq_connector.erl b/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq_connector.erl index dbac88249..81045ade4 100644 --- a/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq_connector.erl +++ b/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq_connector.erl @@ -48,13 +48,8 @@ fields(config) -> binary(), #{default => <<>>, desc => ?DESC("access_key")} )}, - {secret_key, - mk( - binary(), - #{default => <<>>, desc => ?DESC("secret_key"), sensitive => true} - )}, - {security_token, - mk(binary(), #{default => <<>>, desc => ?DESC(security_token), sensitive => true})}, + {secret_key, emqx_schema_secret:mk(#{default => <<>>, desc => ?DESC("secret_key")})}, + {security_token, emqx_schema_secret:mk(#{default => <<>>, desc => ?DESC(security_token)})}, {sync_timeout, mk( emqx_schema:timeout_duration(), @@ -294,21 +289,19 @@ make_producer_opts( acl_info => emqx_secret:wrap(ACLInfo) }. -acl_info(<<>>, <<>>, <<>>) -> +acl_info(<<>>, _, _) -> #{}; -acl_info(AccessKey, SecretKey, <<>>) when is_binary(AccessKey), is_binary(SecretKey) -> - #{ +acl_info(AccessKey, SecretKey, SecurityToken) when is_binary(AccessKey) -> + Info = #{ access_key => AccessKey, - secret_key => SecretKey - }; -acl_info(AccessKey, SecretKey, SecurityToken) when - is_binary(AccessKey), is_binary(SecretKey), is_binary(SecurityToken) --> - #{ - access_key => AccessKey, - secret_key => SecretKey, - security_token => SecurityToken - }; + secret_key => emqx_maybe:define(emqx_secret:unwrap(SecretKey), <<>>) + }, + case emqx_maybe:define(emqx_secret:unwrap(SecurityToken), <<>>) of + <<>> -> + Info; + Token -> + Info#{security_token => Token} + end; acl_info(_, _, _) -> #{}. diff --git a/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver.app.src b/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver.app.src index 1664fee59..331f9c29f 100644 --- a/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver.app.src +++ b/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_sqlserver, [ {description, "EMQX Enterprise SQL Server Bridge"}, - {vsn, "0.1.4"}, + {vsn, "0.1.5"}, {registered, []}, {applications, [kernel, stdlib, emqx_resource, odbc]}, {env, []}, diff --git a/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver_connector.erl b/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver_connector.erl index 6db8c2877..a87e71e31 100644 --- a/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver_connector.erl +++ b/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver_connector.erl @@ -199,7 +199,7 @@ on_start( Options = [ {server, to_bin(Server)}, {username, Username}, - {password, emqx_secret:wrap(maps:get(password, Config, ""))}, + {password, maps:get(password, Config, emqx_secret:wrap(""))}, {driver, Driver}, {database, Database}, {pool_size, PoolSize} diff --git a/apps/emqx_bridge_sqlserver/test/emqx_bridge_sqlserver_SUITE.erl b/apps/emqx_bridge_sqlserver/test/emqx_bridge_sqlserver_SUITE.erl index 101ead838..62214cb5e 100644 --- a/apps/emqx_bridge_sqlserver/test/emqx_bridge_sqlserver_SUITE.erl +++ b/apps/emqx_bridge_sqlserver/test/emqx_bridge_sqlserver_SUITE.erl @@ -130,7 +130,9 @@ end_per_group(_Group, _Config) -> ok. init_per_suite(Config) -> - Config. + Passfile = filename:join(?config(priv_dir, Config), "passfile"), + ok = file:write_file(Passfile, <>), + [{sqlserver_passfile, Passfile} | Config]. end_per_suite(_Config) -> emqx_mgmt_api_test_util:end_suite(), @@ -193,7 +195,9 @@ t_setup_via_http_api_and_publish(Config) -> SQLServerConfig0 = ?config(sqlserver_config, Config), SQLServerConfig = SQLServerConfig0#{ <<"name">> => Name, - <<"type">> => BridgeType + <<"type">> => BridgeType, + %% NOTE: using literal password with HTTP API requests. + <<"password">> => <> }, ?assertMatch( {ok, _}, @@ -449,6 +453,7 @@ sqlserver_config(BridgeType, Config) -> Name = atom_to_binary(?MODULE), BatchSize = batch_size(Config), QueryMode = ?config(query_mode, Config), + Passfile = ?config(sqlserver_passfile, Config), ConfigString = io_lib:format( "bridges.~s.~s {\n" @@ -472,7 +477,7 @@ sqlserver_config(BridgeType, Config) -> Server, ?SQL_SERVER_DATABASE, ?SQL_SERVER_USERNAME, - ?SQL_SERVER_PASSWORD, + "file://" ++ Passfile, ?SQL_BRIDGE, ?SQL_SERVER_DRIVER, BatchSize, diff --git a/apps/emqx_bridge_syskeeper/BSL.txt b/apps/emqx_bridge_syskeeper/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_syskeeper/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_syskeeper/README.md b/apps/emqx_bridge_syskeeper/README.md new file mode 100644 index 000000000..328fd488e --- /dev/null +++ b/apps/emqx_bridge_syskeeper/README.md @@ -0,0 +1,30 @@ +# EMQX Syskeeper Bridge + +Nari Syskeeper 2000 is a one-way Physical Isolation Net Gap. + +The application is used to connect EMQX and Syskeeper. +Users can create a rule and quickly ingest IoT data to the Syskeeper by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + +# Documentation + +- Refer to [Rules engine](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_syskeeper/doc/protocol_v1.md b/apps/emqx_bridge_syskeeper/doc/protocol_v1.md new file mode 100644 index 000000000..ca73c300d --- /dev/null +++ b/apps/emqx_bridge_syskeeper/doc/protocol_v1.md @@ -0,0 +1,370 @@ + +# Table of Contents + +1. [Packet Format](#orgb2a43d1) +2. [Common Header](#org5ca4c69) + 1. [Types](#org240efb3) + 2. [Shared Flags](#org804fcce) +3. [Handshake Packet](#org6a73ea8) +4. [Forward Packet](#org39c753e) + 1. [Flags](#org5177d26) + 2. [Payload](#orgb29cbd7) + 1. [Message Content map structure](#org75acfe6) +5. [Heartbeat Packet](#org388b69a) + + + + +# Packet Format + + + + + + + + + + + + + + + + + + +
+  bytes  + +   0   + +   1   + +   2   + +   3   + +         5         + +     6 .. end     +
+         + +     variable length     + +   common header   + +     payload      +
+ +The length of the remaining part(common header + payload) is indicated by the Length Header of each packet + + + + +# Common Header + + + + + + + + + + + + + + + + + + + +
+  bits  + +   0   + +   1   + +   2   + +   3   + +   4   + +   5   + +   6   + +   7   +
+        + +       packet type       + +      shared flags       +
+ + + + +## Types + + + + + + + + + + + + + + + + + + + +
+    type    + +    usage    +
+     0      + +  handshake  +
+     1      + +   forward   +
+     2      + +  heartbeat  +
+ + + + +## Shared Flags + +The usage of each bit is determined by the type of packet + + + + +# Handshake Packet + + + + + + + + + + + + + +
+  bytes  + +        0        + +        1        +
+         + +  common header  + +     version     +
+ + + + +# Forward Packet + + + + + + + + + + + + + + + + + + + + + + + + + +
+  bits  + +  0  + +  1  + +  2  + +  3  + +  4  + +  5  + +  6  + +   7   + +     ...     +
+       
+       
+        +
+                
+   packet type  
+                 +
+             + +  ACK  + +            
+   payload  
+             +
+   forward flags   +
+ + + + +## Flags + + + + + + + + + + + +
+  flag  + +                    usage                    +
+  ACK   + +       This packet need a ACK response       +
+ + + + +## Payload + + + + + + + + + + + + + + + + + +
+  bytes  + +   0   + +   ..    + +   n   + +  n+1  + +  ..   + +   x   +
+         + +   Content Length    + +  Message Content  +
+ +- Content length is a variable length number. +- Message content is a list in an opaque binary format whose element is a map structure + + + + +### Message Content map structure + + { + id: "0006081CCFF3D48F03C10000058B0000", // unique message id + qos: 1, + flags: {dup: false, retain: false}, + from: "clientid", + topic: "t/1", + payload: "hello, world", + timestamp: 1697786555281 + } + + + + +# Heartbeat Packet + + + + + + + + + + + +
+  bytes  + +        0        +
+         + +  common header  +
+ diff --git a/apps/emqx_bridge_syskeeper/doc/protocol_v1.org b/apps/emqx_bridge_syskeeper/doc/protocol_v1.org new file mode 100644 index 000000000..12d0fe850 --- /dev/null +++ b/apps/emqx_bridge_syskeeper/doc/protocol_v1.org @@ -0,0 +1,80 @@ +* Packet Format + +-------+-----+-----+-----+-----+-----------------+----------------+ + | bytes | 0 | 1 | 2 | 3 | 5 | 6 .. end | + +-------+-----+-----+-----+-----+-----------------+----------------+ + | | variable length | common header | payload | + +-------+-----------------------+-----------------+----------------+ + + The length of the remaining part(common header + payload) is indicated by the Length Header of each packet + +* Common Header + +------+-----+-----+-----+-----+-----+-----+-----+-----+ + | bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | + +------+-----+-----+-----+-----+-----+-----+-----+-----+ + | | packet type | shared flags | + +------+-----------------------+-----------------------+ +** Types + +----------+-----------+ + | type | usage | + +----------+-----------+ + | 0 | handshake | + +----------+-----------+ + | 1 | forward | + +----------+-----------+ + | 2 | heartbeat | + +----------+-----------+ +** Shared Flags + The usage of each bit is determined by the type of packet +* Handshake Packet + +-------+---------------+---------------+ + | bytes | 0 | 1 | + +-------+---------------+---------------+ + | | common header | version | + +-------+---------------+---------------+ +* Forward Packet + +------+---+---+---+---+---+---+---+-----+-----------+ + | bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | ... | + +------+---+---+---+---+---+---+---+-----+-----------+ + | | | | ACK | | + | | packet type +-----------+-----+ payload | + | | | forward flags | | + +------+---------------+-----------------+-----------+ + +** Flags + +------+-------------------------------------------+ + | flag | usage | + +------+-------------------------------------------+ + | ACK | This packet need a ACK response | + +------+-------------------------------------------+ + +** Payload + +-------+-----+-------+-----+-----+-----+-----+ + | bytes | 0 | .. | n | n+1 | .. | x | + +-------+-----+-------+-----+-----+-----+-----+ + | | Content Length | Message Content | + +-------+-------------------+-----------------+ + + + Content length is a variable length number. + + Message content is a list in an opaque binary format whose element is a map structure + +*** Message Content map structure + +#+begin_src json + { + id: "0006081CCFF3D48F03C10000058B0000", // unique message id + qos: 1, + flags: {dup: false, retain: false}, + from: "clientid", + topic: "t/1", + payload: "hello, world", + timestamp: 1697786555281 + } +#+end_src + +* Heartbeat Packet + + +-------+---------------+ + | bytes | 0 | + +-------+---------------+ + | | common header | + +-------+---------------+ diff --git a/apps/emqx_bridge_syskeeper/docker-ct b/apps/emqx_bridge_syskeeper/docker-ct new file mode 100644 index 000000000..80f0d394b --- /dev/null +++ b/apps/emqx_bridge_syskeeper/docker-ct @@ -0,0 +1 @@ +toxiproxy diff --git a/apps/emqx_bridge_syskeeper/include/emqx_bridge_syskeeper.hrl b/apps/emqx_bridge_syskeeper/include/emqx_bridge_syskeeper.hrl new file mode 100644 index 000000000..4e14fafb0 --- /dev/null +++ b/apps/emqx_bridge_syskeeper/include/emqx_bridge_syskeeper.hrl @@ -0,0 +1,15 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-ifndef(EMQX_BRIDGE_SYSKEEPER). +-define(EMQX_BRIDGE_SYSKEEPER, true). + +-define(TYPE_HANDSHAKE, 0). +-define(TYPE_FORWARD, 1). +-define(TYPE_HEARTBEAT, 2). + +-type packet_type() :: handshake | forward | heartbeat. +-type packet_data() :: none | binary() | [binary()]. +-type packet_type_val() :: ?TYPE_HANDSHAKE..?TYPE_HEARTBEAT. + +-endif. diff --git a/apps/emqx_bridge_syskeeper/rebar.config b/apps/emqx_bridge_syskeeper/rebar.config new file mode 100644 index 000000000..31879d9ce --- /dev/null +++ b/apps/emqx_bridge_syskeeper/rebar.config @@ -0,0 +1,6 @@ +%% -*- mode: erlang; -*- +{erl_opts, [debug_info]}. +{deps, [ {emqx_connector, {path, "../../apps/emqx_connector"}} + , {emqx_resource, {path, "../../apps/emqx_resource"}} + , {emqx_bridge, {path, "../../apps/emqx_bridge"}} + ]}. diff --git a/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper.app.src b/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper.app.src new file mode 100644 index 000000000..3c7995cb7 --- /dev/null +++ b/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper.app.src @@ -0,0 +1,13 @@ +{application, emqx_bridge_syskeeper, [ + {description, "EMQX Enterprise Data bridge for Syskeeper"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + emqx_resource + ]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper.erl b/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper.erl new file mode 100644 index 000000000..0ccc76c9a --- /dev/null +++ b/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper.erl @@ -0,0 +1,161 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_syskeeper). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx_bridge/include/emqx_bridge.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + bridge_v2_examples/1, + values/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +%% ------------------------------------------------------------------------------------------------- +%% api +bridge_v2_examples(Method) -> + [ + #{ + <<"syskeeper_forwarder">> => #{ + summary => <<"Syskeeper Forwarder Bridge">>, + value => values(Method) + } + } + ]. + +values(get) -> + maps:merge( + #{ + status => <<"connected">>, + node_status => [ + #{ + node => <<"emqx@localhost">>, + status => <<"connected">> + } + ] + }, + values(post) + ); +values(post) -> + maps:merge( + #{ + name => <<"syskeeper_forwarder">>, + type => <<"syskeeper_forwarder">> + }, + values(put) + ); +values(put) -> + #{ + enable => true, + connector => <<"syskeeper_forwarder">>, + parameters => #{ + target_topic => <<"${topic}">>, + target_qos => <<"-1">>, + template => <<"${payload}">> + }, + resource_opts => #{ + worker_pool_size => 16 + } + }. + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions +namespace() -> "syskeeper". + +roots() -> []. + +fields(action) -> + {syskeeper_forwarder, + mk( + hoconsc:map(name, ref(?MODULE, config)), + #{ + desc => <<"Syskeeper Forwarder Action Config">>, + required => false + } + )}; +fields(config) -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {description, emqx_schema:description_schema()}, + {connector, + mk(binary(), #{ + desc => ?DESC(emqx_connector_schema, "connector_field"), required => true + })}, + {parameters, + mk( + ref(?MODULE, "parameters"), + #{required => true, desc => ?DESC("parameters")} + )}, + {local_topic, mk(binary(), #{required => false, desc => ?DESC(mqtt_topic)})}, + {resource_opts, + mk( + ref(?MODULE, "creation_opts"), + #{ + required => false, + default => #{}, + desc => ?DESC(emqx_resource_schema, <<"resource_opts">>) + } + )} + ]; +fields("parameters") -> + [ + {target_topic, + mk( + binary(), + #{desc => ?DESC("target_topic"), default => <<"${topic}">>} + )}, + {target_qos, + mk( + range(-1, 2), + #{desc => ?DESC("target_qos"), default => -1} + )}, + {template, + mk( + binary(), + #{desc => ?DESC("template"), default => <<"${payload}">>} + )} + ]; +fields("creation_opts") -> + emqx_resource_schema:create_opts([{request_ttl, #{default => infinity}}]); +fields("post") -> + [type_field(), name_field() | fields(config)]; +fields("post_bridge_v2") -> + fields("post"); +fields("put") -> + fields(config); +fields("put_bridge_v2") -> + fields("put"); +fields("get") -> + emqx_bridge_schema:status_fields() ++ fields("post"); +fields("get_bridge_v2") -> + fields("get"). + +desc(config) -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for Syskeeper using `", string:to_upper(Method), "` method."]; +desc("parameters") -> + ?DESC("parameters"); +desc("creation_opts" = Name) -> + emqx_resource_schema:desc(Name); +desc(_) -> + undefined. + +%% ------------------------------------------------------------------------------------------------- + +type_field() -> + {type, mk(enum([syskeeper_forwarder]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. diff --git a/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper_action_info.erl b/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper_action_info.erl new file mode 100644 index 000000000..77d3c26ce --- /dev/null +++ b/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper_action_info.erl @@ -0,0 +1,22 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_syskeeper_action_info). + +-behaviour(emqx_action_info). + +-export([ + bridge_v1_type_name/0, + action_type_name/0, + connector_type_name/0, + schema_module/0 +]). + +bridge_v1_type_name() -> syskeeper_forwarder. + +action_type_name() -> syskeeper_forwarder. + +connector_type_name() -> syskeeper_forwarder. + +schema_module() -> emqx_bridge_syskeeper. diff --git a/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper_client.erl b/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper_client.erl new file mode 100644 index 000000000..18822886f --- /dev/null +++ b/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper_client.erl @@ -0,0 +1,180 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_syskeeper_client). + +-behaviour(gen_server). + +%% API +-export([ + start_link/1, + forward/3, + heartbeat/2 +]). + +%% gen_server callbacks +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3, + format_status/2 +]). + +-include("emqx_bridge_syskeeper.hrl"). + +-type state() :: #{ + ack_mode := need_ack | no_ack, + ack_timeout := timer:time(), + socket := undefined | inet:socket(), + frame_state := emqx_bridge_syskeeper_frame:state(), + last_error := undefined | tuple() +}. + +-type send_result() :: {ok, state()} | {error, term()}. + +%% ------------------------------------------------------------------------------------------------- +%% API +forward(Pid, Msg, Timeout) -> + call(Pid, {?FUNCTION_NAME, Msg}, Timeout). + +heartbeat(Pid, Timeout) -> + ok =:= call(Pid, ?FUNCTION_NAME, Timeout). + +%% ------------------------------------------------------------------------------------------------- +%% Starts Bridge which transfer data to Syskeeper + +start_link(Options) -> + gen_server:start_link(?MODULE, Options, []). + +%% ------------------------------------------------------------------------------------------------- +%%% gen_server callbacks + +%% Initialize syskeeper client +init(#{ack_timeout := AckTimeout, ack_mode := AckMode} = Options) -> + erlang:process_flag(trap_exit, true), + connect(Options, #{ + ack_timeout => AckTimeout, + ack_mode => AckMode, + socket => undefined, + last_error => undefined, + frame_state => emqx_bridge_syskeeper_frame:make_state_with_conf(Options) + }). + +handle_call({forward, Msgs}, _From, State) -> + Result = send_packet(forward, Msgs, State), + handle_reply_result(Result, State); +handle_call(heartbeat, _From, State) -> + Result = send_ack_packet(heartbeat, none, State), + handle_reply_result(Result, State); +handle_call(_Request, _From, State) -> + {reply, ok, State}. + +handle_cast(_Request, State) -> + {noreply, State}. + +handle_info({tcp_closed, _} = Reason, State) -> + {noreply, State#{socket := undefined, last_error := Reason}}; +handle_info({last_error, _, _} = Reason, State) -> + {noreply, State#{socket := undefined, last_error := Reason}}; +handle_info(_Info, State) -> + {noreply, State}. + +terminate(_Reason, #{socket := Socket} = _State) -> + close_socket(Socket), + ok. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +-spec format_status( + Opt :: normal | terminate, + Status :: list() +) -> Status :: term(). +format_status(_Opt, Status) -> + Status. + +%% ------------------------------------------------------------------------------------------------ +connect( + #{ + hostname := Host, + port := Port + }, + State +) -> + case + gen_tcp:connect(Host, Port, [ + {active, true}, + {mode, binary}, + {nodelay, true} + ]) + of + {ok, Socket} -> + send_ack_packet(handshake, none, State#{socket := Socket}); + {error, Reason} -> + {stop, Reason} + end. + +-spec send_ack_packet(packet_type(), packet_data(), state()) -> send_result(). +send_ack_packet(Type, Data, State) -> + send_packet(Type, Data, State, true). + +-spec send_packet(packet_type(), packet_data(), state()) -> send_result(). +send_packet(Type, Data, State) -> + send_packet(Type, Data, State, false). + +-spec send_packet(packet_type(), packet_data(), state(), boolean()) -> send_result(). +send_packet(_Type, _Data, #{socket := undefined, last_error := Reason}, _Force) -> + {error, Reason}; +send_packet(Type, Data, #{frame_state := FrameState} = State, Force) -> + Packet = emqx_bridge_syskeeper_frame:encode(Type, Data, FrameState), + case socket_send(Packet, State) of + ok -> + wait_ack(State, Force); + {error, _} = Error -> + Error + end. + +-spec socket_send(binary() | [binary()], state()) -> ok | {error, _Reason}. +socket_send(Bin, State) when is_binary(Bin) -> + socket_send([Bin], State); +socket_send(Bins, #{socket := Socket}) -> + Map = fun(Data) -> + Len = erlang:byte_size(Data), + VarLen = emqx_bridge_syskeeper_frame:serialize_variable_byte_integer(Len), + <> + end, + gen_tcp:send(Socket, lists:map(Map, Bins)). + +-spec wait_ack(state(), boolean()) -> send_result(). +wait_ack(#{ack_timeout := AckTimeout, ack_mode := AckMode} = State, Force) when + AckMode =:= need_ack; Force +-> + receive + {tcp, _Socket, <<16#FF>>} -> + {ok, State}; + {tcp_closed, _} = Reason -> + {error, Reason}; + {tcp_error, _, _} = Reason -> + {error, Reason} + after AckTimeout -> + {error, wait_ack_timeout} + end; +wait_ack(State, _Force) -> + {ok, State}. + +close_socket(undefined) -> + ok; +close_socket(Socket) -> + catch gen_tcp:close(Socket), + ok. + +call(Pid, Msg, Timeout) -> + gen_server:call(Pid, Msg, Timeout). + +handle_reply_result({ok, _}, State) -> + {reply, ok, State}; +handle_reply_result({error, Reason}, State) -> + {reply, {error, {recoverable_error, Reason}}, State#{last_error := Reason}}. diff --git a/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper_connector.erl b/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper_connector.erl new file mode 100644 index 000000000..6887582b3 --- /dev/null +++ b/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper_connector.erl @@ -0,0 +1,347 @@ +%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_syskeeper_connector). + +-behaviour(emqx_resource). + +-include_lib("emqx_resource/include/emqx_resource.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-export([namespace/0, roots/0, fields/1, desc/1, connector_examples/1]). + +%% `emqx_resource' API +-export([ + callback_mode/0, + query_mode/1, + on_start/2, + on_stop/2, + on_query/3, + on_batch_query/3, + on_get_status/2, + on_add_channel/4, + on_remove_channel/3, + on_get_channels/1, + on_get_channel_status/3 +]). + +-export([ + connect/1 +]). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-define(CONNECTOR_TYPE, syskeeper_forwarder). +-define(SYSKEEPER_HOST_OPTIONS, #{ + default_port => 9092 +}). + +-define(EXTRA_CALL_TIMEOUT, 2000). + +%% ------------------------------------------------------------------------------------------------- +%% api +namespace() -> "syskeeper_forwarder". + +connector_examples(Method) -> + [ + #{ + <<"syskeeper_forwarder">> => #{ + summary => <<"Syskeeper Forwarder Connector">>, + value => values(Method) + } + } + ]. + +values(get) -> + maps:merge( + #{ + status => <<"connected">>, + node_status => [ + #{ + node => <<"emqx@localhost">>, + status => <<"connected">> + } + ], + actions => [<<"my_action">>] + }, + values(post) + ); +values(post) -> + maps:merge( + #{ + name => <<"syskeeper_forwarder">>, + type => <<"syskeeper_forwarder">> + }, + values(put) + ); +values(put) -> + #{ + enable => true, + server => <<"127.0.0.1:9092">>, + ack_mode => <<"no_ack">>, + ack_timeout => <<"10s">>, + pool_size => 16 + }. + +%% ------------------------------------------------------------------------------------------------- +%% Hocon schema +roots() -> + [{config, #{type => hoconsc:ref(?MODULE, config)}}]. + +fields(config) -> + emqx_connector_schema:common_fields() ++ fields("connection_fields"); +fields("connection_fields") -> + [ + {server, server()}, + {ack_mode, + mk( + enum([need_ack, no_ack]), + #{desc => ?DESC(ack_mode), default => <<"no_ack">>} + )}, + {ack_timeout, + mk( + emqx_schema:timeout_duration_ms(), + #{desc => ?DESC(ack_timeout), default => <<"10s">>} + )}, + {pool_size, fun + (default) -> + 16; + (Other) -> + emqx_connector_schema_lib:pool_size(Other) + end} + ]; +fields(Field) when + Field == "get"; + Field == "post"; + Field == "put" +-> + emqx_connector_schema:api_fields( + Field ++ "_connector", ?CONNECTOR_TYPE, fields("connection_fields") + ). + +desc(config) -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for Syskeeper Proxy using `", string:to_upper(Method), "` method."]; +desc(_) -> + undefined. + +server() -> + Meta = #{desc => ?DESC("server")}, + emqx_schema:servers_sc(Meta, ?SYSKEEPER_HOST_OPTIONS). + +%% ------------------------------------------------------------------------------------------------- +%% `emqx_resource' API + +callback_mode() -> always_sync. + +query_mode(_) -> sync. + +on_start( + InstanceId, + #{ + server := Server, + pool_size := PoolSize, + ack_timeout := AckTimeout + } = Config +) -> + ?SLOG(info, #{ + msg => "starting_syskeeper_connector", + connector => InstanceId, + config => Config + }), + + HostCfg = emqx_schema:parse_server(Server, ?SYSKEEPER_HOST_OPTIONS), + + Options = [ + {options, + maps:merge( + HostCfg, + maps:with([ack_mode, ack_timeout], Config) + )}, + {pool_size, PoolSize} + ], + + State = #{ + pool_name => InstanceId, + ack_timeout => AckTimeout, + channels => #{} + }, + case emqx_resource_pool:start(InstanceId, ?MODULE, Options) of + ok -> + {ok, State}; + Error -> + Error + end. + +on_stop(InstanceId, _State) -> + ?SLOG(info, #{ + msg => "stopping_syskeeper_connector", + connector => InstanceId + }), + emqx_resource_pool:stop(InstanceId). + +on_query(InstanceId, {_MessageTag, _} = Query, State) -> + do_query(InstanceId, [Query], State); +on_query(_InstanceId, Query, _State) -> + {error, {unrecoverable_error, {invalid_request, Query}}}. + +%% we only support batch insert +on_batch_query(InstanceId, [{_MessageTag, _} | _] = Query, State) -> + do_query(InstanceId, Query, State); +on_batch_query(_InstanceId, Query, _State) -> + {error, {unrecoverable_error, {invalid_request, Query}}}. + +on_get_status(_InstanceId, #{pool_name := Pool, ack_timeout := AckTimeout}) -> + Health = emqx_resource_pool:health_check_workers( + Pool, {emqx_bridge_syskeeper_client, heartbeat, [AckTimeout + ?EXTRA_CALL_TIMEOUT]} + ), + status_result(Health). + +status_result(true) -> connected; +status_result(false) -> connecting; +status_result({error, _}) -> connecting. + +on_add_channel( + _InstanceId, + #{channels := Channels} = OldState, + ChannelId, + #{ + parameters := #{ + target_topic := TargetTopic, + target_qos := TargetQoS, + template := Template + } + } +) -> + case maps:is_key(ChannelId, Channels) of + true -> + {error, already_exists}; + _ -> + Channel = #{ + target_qos => TargetQoS, + target_topic => emqx_placeholder:preproc_tmpl(TargetTopic), + template => emqx_placeholder:preproc_tmpl(Template) + }, + Channels2 = Channels#{ChannelId => Channel}, + {ok, OldState#{channels => Channels2}} + end. + +on_remove_channel(_InstanceId, #{channels := Channels} = OldState, ChannelId) -> + Channels2 = maps:remove(ChannelId, Channels), + {ok, OldState#{channels => Channels2}}. + +on_get_channels(InstanceId) -> + emqx_bridge_v2:get_channels_for_connector(InstanceId). + +on_get_channel_status(_InstanceId, ChannelId, #{channels := Channels}) -> + case maps:is_key(ChannelId, Channels) of + true -> + connected; + _ -> + {error, not_exists} + end. + +%% ------------------------------------------------------------------------------------------------- +%% Helper fns + +do_query( + InstanceId, + Query, + #{pool_name := PoolName, ack_timeout := AckTimeout, channels := Channels} = State +) -> + ?TRACE( + "QUERY", + "syskeeper_connector_received", + #{connector => InstanceId, query => Query, state => State} + ), + + Result = + case try_render_message(Query, Channels) of + {ok, Msg} -> + ecpool:pick_and_do( + PoolName, + {emqx_bridge_syskeeper_client, forward, [Msg, AckTimeout + ?EXTRA_CALL_TIMEOUT]}, + no_handover + ); + Error -> + Error + end, + + case Result of + {error, Reason} -> + ?tp( + syskeeper_connector_query_return, + #{error => Reason} + ), + ?SLOG(error, #{ + msg => "syskeeper_connector_do_query_failed", + connector => InstanceId, + query => Query, + reason => Reason + }), + case Reason of + ecpool_empty -> + {error, {recoverable_error, Reason}}; + _ -> + Result + end; + _ -> + ?tp( + syskeeper_connector_query_return, + #{result => Result} + ), + Result + end. + +connect(Opts) -> + Options = proplists:get_value(options, Opts), + emqx_bridge_syskeeper_client:start_link(Options). + +try_render_message(Datas, Channels) -> + try_render_message(Datas, Channels, []). + +try_render_message([{MessageTag, Data} | T], Channels, Acc) -> + case maps:find(MessageTag, Channels) of + {ok, Channel} -> + case render_message(Data, Channel) of + {ok, Msg} -> + try_render_message(T, Channels, [Msg | Acc]); + Error -> + Error + end; + _ -> + {error, {unrecoverable_error, {invalid_message_tag, MessageTag}}} + end; +try_render_message([], _Channels, Acc) -> + {ok, lists:reverse(Acc)}. + +render_message(#{id := Id, qos := QoS, clientid := From} = Data, #{ + target_qos := TargetQoS, target_topic := TargetTopicTks, template := Template +}) -> + Msg = maps:with([qos, flags, topic, payload, timestamp], Data), + Topic = emqx_placeholder:proc_tmpl(TargetTopicTks, Msg), + {ok, Msg#{ + id => emqx_guid:from_hexstr(Id), + qos := + case TargetQoS of + -1 -> + QoS; + _ -> + TargetQoS + end, + from => From, + topic := Topic, + payload := format_data(Template, Msg) + }}; +render_message(Data, _Channel) -> + {error, {unrecoverable_error, {invalid_data, Data}}}. + +format_data([], Msg) -> + emqx_utils_json:encode(Msg); +format_data(Tokens, Msg) -> + emqx_placeholder:proc_tmpl(Tokens, Msg). diff --git a/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper_frame.erl b/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper_frame.erl new file mode 100644 index 000000000..d2f8febb9 --- /dev/null +++ b/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper_frame.erl @@ -0,0 +1,163 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% @doc EMQ X Bridge Sysk Frame +%%-------------------------------------------------------------------- + +-module(emqx_bridge_syskeeper_frame). + +%% API +-export([ + versions/0, + current_version/0, + make_state_with_conf/1, + make_state/1, + encode/3, + parse/2, + parse_handshake/1 +]). + +-export([ + bool2int/1, + int2bool/1, + marshaller/1, + serialize_variable_byte_integer/1, + parse_variable_byte_integer/1 +]). + +-export_type([state/0, versions/0, handshake/0, forward/0, packet/0]). + +-include("emqx_bridge_syskeeper.hrl"). + +-type state() :: #{ + handler := atom(), + version := versions(), + ack => boolean() +}. + +-type versions() :: 1. + +-type handshake() :: #{type := handshake, version := versions()}. +-type forward() :: #{type := forward, ack := boolean(), messages := list(map())}. +-type heartbeat() :: #{type := heartbeat}. + +-type packet() :: + handshake() + | forward() + | heartbeat(). + +-callback version() -> versions(). +-callback encode(packet_type_val(), packet_data(), state()) -> binary(). +-callback parse(packet_type(), binary(), state()) -> packet(). + +-define(HIGHBIT, 2#10000000). +-define(LOWBITS, 2#01111111). +-define(MULTIPLIER_MAX, 16#200000). + +-export_type([packet_type/0]). + +%%------------------------------------------------------------------- +%%% API +%%------------------------------------------------------------------- +-spec versions() -> list(versions()). +versions() -> + [1]. + +-spec current_version() -> versions(). +current_version() -> + 1. + +-spec make_state_with_conf(map()) -> state(). +make_state_with_conf(#{ack_mode := Mode}) -> + State = make_state(current_version()), + State#{ack => Mode =:= need_ack}. + +-spec make_state(versions()) -> state(). +make_state(Version) -> + case lists:member(Version, versions()) of + true -> + Handler = erlang:list_to_existing_atom( + io_lib:format("emqx_bridge_syskeeper_frame_v~B", [Version]) + ), + #{ + handler => Handler, + version => Version + }; + _ -> + erlang:throw({unsupport_version, Version}) + end. + +-spec encode(packet_type(), term(), state()) -> binary(). +encode(Type, Data, #{handler := Handler} = State) -> + Handler:encode(packet_type_val(Type), Data, State). + +-spec parse(binary(), state()) -> _. +parse(<> = Bin, #{handler := Handler} = State) -> + Type = to_packet_type(TypeVal), + Handler:parse(Type, Bin, State). + +parse_handshake(Data) -> + State = make_state(1), + parse_handshake(Data, State). + +parse_handshake(Data, #{version := Version} = State) -> + case parse(Data, State) of + {ok, #{type := handshake, version := Version} = Shake} -> + {ok, {State, Shake}}; + {ok, #{type := handshake, version := NewVersion}} -> + State2 = make_state(NewVersion), + parse_handshake(Data, State2); + Error -> + Error + end. + +bool2int(true) -> + 1; +bool2int(_) -> + 0. + +int2bool(1) -> + true; +int2bool(_) -> + false. + +marshaller(Item) when is_binary(Item) -> + erlang:binary_to_term(Item); +marshaller(Item) -> + erlang:term_to_binary(Item). + +serialize_variable_byte_integer(N) when N =< ?LOWBITS -> + <<0:1, N:7>>; +serialize_variable_byte_integer(N) -> + <<1:1, (N rem ?HIGHBIT):7, (serialize_variable_byte_integer(N div ?HIGHBIT))/binary>>. + +parse_variable_byte_integer(Bin) -> + parse_variable_byte_integer(Bin, 1, 0). + +%%------------------------------------------------------------------- +%%% Internal functions +%%------------------------------------------------------------------- +to_packet_type(?TYPE_HANDSHAKE) -> + handshake; +to_packet_type(?TYPE_FORWARD) -> + forward; +to_packet_type(?TYPE_HEARTBEAT) -> + heartbeat. + +packet_type_val(handshake) -> + ?TYPE_HANDSHAKE; +packet_type_val(forward) -> + ?TYPE_FORWARD; +packet_type_val(heartbeat) -> + ?TYPE_HEARTBEAT. + +parse_variable_byte_integer(<<1:1, _Len:7, _Rest/binary>>, Multiplier, _Value) when + Multiplier > ?MULTIPLIER_MAX +-> + {error, malformed_variable_byte_integer}; +parse_variable_byte_integer(<<1:1, Len:7, Rest/binary>>, Multiplier, Value) -> + parse_variable_byte_integer(Rest, Multiplier * ?HIGHBIT, Value + Len * Multiplier); +parse_variable_byte_integer(<<0:1, Len:7, Rest/binary>>, Multiplier, Value) -> + {ok, Value + Len * Multiplier, Rest}; +parse_variable_byte_integer(<<>>, _Multiplier, _Value) -> + {error, incomplete}. diff --git a/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper_frame_v1.erl b/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper_frame_v1.erl new file mode 100644 index 000000000..200730659 --- /dev/null +++ b/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper_frame_v1.erl @@ -0,0 +1,70 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% @doc EMQ X Bridge Sysk Frame version 1 +%%-------------------------------------------------------------------- + +-module(emqx_bridge_syskeeper_frame_v1). + +%% API +-export([ + version/0, + encode/3, + parse/3 +]). + +-behaviour(emqx_bridge_syskeeper_frame). + +-include("emqx_bridge_syskeeper.hrl"). + +-define(B2I(X), emqx_bridge_syskeeper_frame:bool2int((X))). +-define(I2B(X), emqx_bridge_syskeeper_frame:int2bool((X))). + +-import(emqx_bridge_syskeeper_frame, [ + serialize_variable_byte_integer/1, parse_variable_byte_integer/1, marshaller/1 +]). + +%%------------------------------------------------------------------- +%%% API +%%------------------------------------------------------------------- +version() -> + 1. + +encode(?TYPE_HANDSHAKE = Type, _, _) -> + Version = version(), + <>; +encode(?TYPE_FORWARD = Type, Messages, #{ack := Ack}) -> + encode_forward(Messages, Type, Ack); +encode(?TYPE_HEARTBEAT = Type, _, _) -> + <>. + +-dialyzer({nowarn_function, parse/3}). +parse(handshake, <<_:4, _:4, Version:8>>, _) -> + {ok, #{type => handshake, version => Version}}; +parse(forward, Bin, _) -> + parse_forward(Bin); +parse(heartbeat, <<_:4, _:4>>, _) -> + {ok, #{type => heartbeat}}. + +%%------------------------------------------------------------------- +%%% Internal functions +%%------------------------------------------------------------------- +encode_forward(Messages, Type, Ack) -> + AckVal = ?B2I(Ack), + Data = marshaller(Messages), + Len = erlang:byte_size(Data), + LenVal = serialize_variable_byte_integer(Len), + <>. + +parse_forward(<<_:4, AckVal:4, Bin/binary>>) -> + case parse_variable_byte_integer(Bin) of + {ok, Len, Rest} -> + <> = Rest, + {ok, #{ + type => forward, + ack => ?I2B(AckVal), + messages => marshaller(MsgBin) + }}; + Error -> + Error + end. diff --git a/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper_proxy.erl b/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper_proxy.erl new file mode 100644 index 000000000..f930b0042 --- /dev/null +++ b/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper_proxy.erl @@ -0,0 +1,113 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_syskeeper_proxy). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx_bridge/include/emqx_bridge.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + connector_examples/1, + values/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +-define(CONNECTOR_TYPE, syskeeper_proxy). + +-define(SYSKEEPER_HOST_OPTIONS, #{ + default_port => 9092 +}). + +%% ------------------------------------------------------------------------------------------------- +%% api +connector_examples(Method) -> + [ + #{ + <<"syskeeper_proxy">> => #{ + summary => <<"Syskeeper Proxy Connector">>, + value => values(Method) + } + } + ]. + +values(get) -> + maps:merge( + #{ + status => <<"connected">>, + node_status => [ + #{ + node => <<"emqx@localhost">>, + status => <<"connected">> + } + ], + actions => [<<"my_action">>] + }, + values(post) + ); +values(post) -> + maps:merge( + #{ + name => <<"syskeeper_proxy">>, + type => <<"syskeeper_proxy">> + }, + values(put) + ); +values(put) -> + #{ + enable => true, + listen => <<"127.0.0.1:9092">>, + acceptors => 16, + handshake_timeout => <<"16s">> + }. + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions +namespace() -> "connector_syskeeper_proxy". + +roots() -> []. + +fields(config) -> + emqx_connector_schema:common_fields() ++ fields("connection_fields"); +fields("connection_fields") -> + [ + {listen, listen()}, + {acceptors, + mk( + non_neg_integer(), + #{desc => ?DESC("acceptors"), default => 16} + )}, + {handshake_timeout, + mk( + emqx_schema:timeout_duration_ms(), + #{desc => ?DESC(handshake_timeout), default => <<"10s">>} + )} + ]; +fields(Field) when + Field == "get"; + Field == "post"; + Field == "put" +-> + emqx_connector_schema:api_fields( + Field ++ "_connector", ?CONNECTOR_TYPE, fields("connection_fields") + ). + +desc(config) -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for Syskeeper Proxy using `", string:to_upper(Method), "` method."]; +desc(_) -> + undefined. + +listen() -> + Meta = #{desc => ?DESC("listen")}, + emqx_schema:servers_sc(Meta, ?SYSKEEPER_HOST_OPTIONS). diff --git a/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper_proxy_server.erl b/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper_proxy_server.erl new file mode 100644 index 000000000..057d7579c --- /dev/null +++ b/apps/emqx_bridge_syskeeper/src/emqx_bridge_syskeeper_proxy_server.erl @@ -0,0 +1,279 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_syskeeper_proxy_server). + +-behaviour(gen_statem). + +-include_lib("emqx/include/logger.hrl"). + +-elvis([{elvis_style, invalid_dynamic_call, disable}]). + +%% `emqx_resource' API +-export([ + query_mode/1, + on_start/2, + on_stop/2, + on_get_status/2 +]). + +%% API +-export([start_link/3]). + +%% gen_statem callbacks +-export([callback_mode/0, init/1, terminate/3, code_change/4]). +-export([handle_event/4]). + +-type state() :: wait_ready | handshake | running. +-type data() :: #{ + transport := atom(), + socket := inet:socket(), + frame_state := + undefined + | emqx_bridge_sysk_frame:state(), + buffer := binary(), + conf := map() +}. + +-define(DEFAULT_PORT, 9092). + +%% ------------------------------------------------------------------------------------------------- +%% emqx_resource + +query_mode(_) -> + no_queries. + +on_start( + InstanceId, + #{ + listen := Server, + acceptors := Acceptors + } = Config +) -> + ?SLOG(info, #{ + msg => "starting_syskeeper_proxy_server", + connector => InstanceId, + config => Config + }), + + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, #{ + default_port => ?DEFAULT_PORT + }), + ListenOn = {Host, Port}, + + Options = [ + {acceptors, Acceptors}, + {tcp_options, [{mode, binary}, {reuseaddr, true}, {nodelay, true}]} + ], + MFArgs = {?MODULE, start_link, [maps:with([handshake_timeout], Config)]}, + ok = emqx_resource:allocate_resource(InstanceId, listen_on, ListenOn), + + case esockd:open(?MODULE, ListenOn, Options, MFArgs) of + {ok, _} -> + {ok, #{listen_on => ListenOn}}; + Error -> + Error + end. + +on_stop(InstanceId, _State) -> + ?SLOG(info, #{ + msg => "stopping_syskeeper_proxy_server", + connector => InstanceId + }), + case emqx_resource:get_allocated_resources(InstanceId) of + #{listen_on := ListenOn} -> + esockd:close(?MODULE, ListenOn); + _ -> + ok + end. + +on_get_status(_InstanceId, #{listen_on := ListenOn}) -> + try + _ = esockd:listener({?MODULE, ListenOn}), + connected + catch + _:_ -> + disconnected + end. + +%% ------------------------------------------------------------------------------------------------- +-spec start_link(atom(), inet:socket(), map()) -> + {ok, Pid :: pid()} + | ignore + | {error, Error :: term()}. +start_link(Transport, Socket, Conf) -> + gen_statem:start_link(?MODULE, [Transport, Socket, Conf], []). + +%% ------------------------------------------------------------------------------------------------- +%% gen_statem callbacks + +-spec callback_mode() -> gen_statem:callback_mode_result(). +callback_mode() -> handle_event_function. + +%% ------------------------------------------------------------------------------------------------- +-spec init(Args :: term()) -> + gen_statem:init_result(term()). +init([Transport, Socket, Conf]) -> + {ok, wait_ready, + #{ + transport => Transport, + socket => Socket, + conf => Conf, + buffer => <<>>, + frame_state => undefined + }, + {next_event, internal, wait_ready}}. + +handle_event(internal, wait_ready, wait_ready, Data) -> + wait_ready(Data); +handle_event(state_timeout, handshake_timeout, handshake, Data) -> + ?SLOG(info, #{ + msg => "syskeeper_proxy_server_handshake_timeout", + data => Data + }), + {stop, normal}; +handle_event(internal, try_parse, running, Data) -> + try_parse(running, Data); +handle_event(info, {tcp, _Socket, Bin}, State, Data) -> + try_parse(State, combine_buffer(Bin, Data)); +handle_event(info, {tcp_closed, _}, _State, _Data) -> + {stop, normal}; +handle_event(info, {tcp_error, Error, Reason}, _State, _Data) -> + ?SLOG(warning, #{ + msg => "syskeeper_proxy_server_tcp_error", + error => Error, + reason => Reason + }), + {stop, normal}; +handle_event(Event, Content, State, Data) -> + ?SLOG(warning, #{ + msg => "syskeeper_proxy_server_unexpected_event", + event => Event, + content => Content, + state => State, + data => Data + }), + keep_state_and_data. + +-spec terminate(Reason :: term(), State :: state(), Data :: data()) -> + any(). +terminate(_Reason, _State, _Data) -> + ok. + +code_change(_OldVsn, State, Data, _Extra) -> + {ok, State, Data}. + +%% ------------------------------------------------------------------------------------------------- +%%% Internal functions +send(#{transport := Transport, socket := Socket}, Bin) -> + Transport:send(Socket, Bin). + +ack(Data) -> + ack(Data, true). + +ack(Data, false) -> + send(Data, <<0>>); +ack(Data, true) -> + send(Data, <<16#FF>>). + +wait_ready( + #{ + transport := Transport, + socket := RawSocket, + conf := #{handshake_timeout := Timeout} + } = + Data +) -> + case Transport:wait(RawSocket) of + {ok, Socket} -> + Transport:setopts(Socket, [{active, true}]), + {next_state, handshake, + Data#{ + socket => Socket, + frame_state => undefined + }, + {state_timeout, Timeout, handshake_timeout}}; + {error, Reason} -> + ok = Transport:fast_close(RawSocket), + ?SLOG(error, #{ + msg => "syskeeper_proxy_server_listen_error", + transport => Transport, + reason => Reason + }), + {stop, Reason} + end. + +combine_buffer(Bin, #{buffer := Buffer} = Data) -> + Data#{buffer := <>}. + +try_parse(State, #{buffer := Bin} = Data) -> + case emqx_bridge_syskeeper_frame:parse_variable_byte_integer(Bin) of + {ok, Len, Rest} -> + case Rest of + <> -> + Data2 = Data#{buffer := Rest2}, + Result = parse(Payload, Data2), + handle_parse_result(Result, State, Data2); + _ -> + {keep_state, Data} + end; + {error, incomplete} -> + {keep_state, Data}; + {error, Reason} -> + ?SLOG(error, #{ + msg => "syskeeper_proxy_server_try_parse_error", + state => State, + data => Data, + reason => Reason + }), + {stop, parse_error} + end. + +%% maybe handshake +parse(Bin, #{frame_state := undefined}) -> + emqx_bridge_syskeeper_frame:parse_handshake(Bin); +parse(Bin, #{frame_state := State}) -> + emqx_bridge_syskeeper_frame:parse(Bin, State). + +do_forward(Ack, Messages, Data) -> + lists:foreach( + fun(Message) -> + Msg = emqx_message:from_map(Message#{headers => #{}, extra => #{}}), + _ = emqx_broker:safe_publish(Msg) + end, + Messages + ), + case Ack of + true -> + ack(Data); + _ -> + ok + end. + +handle_parse_result({ok, Msg}, State, Data) -> + handle_packet(Msg, State, Data); +handle_parse_result({error, Reason} = Error, State, Data) -> + handle_parse_error(Error, State, #{buffer := _Bin} = Data), + ?SLOG(error, #{ + msg => "syskeeper_proxy_server_parse_result_error", + state => State, + data => Data, + reason => Reason + }), + {stop, parse_error}. + +handle_parse_error(_, handshake, Data) -> + ack(Data, false); +handle_parse_error(_, _, _) -> + ok. + +handle_packet({FrameState, _Shake}, handshake, Data) -> + ack(Data), + {next_state, running, Data#{frame_state := FrameState}, {next_event, internal, try_parse}}; +handle_packet(#{type := forward, ack := Ack, messages := Messages}, running, Data) -> + do_forward(Ack, Messages, Data), + try_parse(running, Data); +handle_packet(#{type := heartbeat}, running, Data) -> + ack(Data), + try_parse(running, Data). diff --git a/apps/emqx_bridge_syskeeper/test/emqx_bridge_syskeeper_SUITE.erl b/apps/emqx_bridge_syskeeper/test/emqx_bridge_syskeeper_SUITE.erl new file mode 100644 index 000000000..66b267eac --- /dev/null +++ b/apps/emqx_bridge_syskeeper/test/emqx_bridge_syskeeper_SUITE.erl @@ -0,0 +1,390 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_syskeeper_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-define(HOST, "127.0.0.1"). +-define(PORT, 9092). +-define(ACK_TIMEOUT, 2000). +-define(HANDSHAKE_TIMEOUT, 10000). +-define(SYSKEEPER_NAME, <<"syskeeper">>). +-define(SYSKEEPER_PROXY_NAME, <<"syskeeper_proxy">>). +-define(BATCH_SIZE, 3). +-define(TOPIC, <<"syskeeper/message">>). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, lifecycle}, + {group, need_ack}, + {group, no_ack} + ]. + +groups() -> + TCs = emqx_common_test_helpers:all(?MODULE), + Lifecycle = [ + t_setup_proxy_via_config, + t_setup_proxy_via_http_api, + t_setup_forwarder_via_config, + t_setup_forwarder_via_http_api, + t_get_status + ], + Write = TCs -- Lifecycle, + BatchingGroups = [{group, with_batch}, {group, without_batch}], + [ + {need_ack, BatchingGroups}, + {no_ack, BatchingGroups}, + {with_batch, Write}, + {without_batch, Write}, + {lifecycle, Lifecycle} + ]. + +init_per_group(need_ack, Config) -> + [{ack_mode, need_ack} | Config]; +init_per_group(no_ack, Config) -> + [{ack_mode, no_ack} | Config]; +init_per_group(with_batch, Config0) -> + [{enable_batch, true} | Config0]; +init_per_group(without_batch, Config0) -> + [{enable_batch, false} | Config0]; +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_suite(Config) -> + Apps = emqx_cth_suite:start( + [ + emqx_conf, + emqx_connector, + emqx_bridge, + emqx_bridge_syskeeper + ], + #{work_dir => emqx_cth_suite:work_dir(Config)} + ), + emqx_mgmt_api_test_util:init_suite(), + [{apps, Apps} | Config]. + +end_per_suite(Config) -> + Apps = ?config(apps, Config), + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_cth_suite:stop(Apps), + ok. + +init_per_testcase(_Testcase, Config) -> + snabbkaffe:start_trace(), + Config. + +end_per_testcase(_Testcase, _Config) -> + ok = snabbkaffe:stop(), + delete_bridge(syskeeper_forwarder, ?SYSKEEPER_NAME), + delete_connectors(syskeeper_forwarder, ?SYSKEEPER_NAME), + delete_connectors(syskeeper_proxy, ?SYSKEEPER_PROXY_NAME), + ok. + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ +syskeeper_config(Config) -> + BatchSize = + case proplists:get_value(enable_batch, Config, false) of + true -> ?BATCH_SIZE; + false -> 1 + end, + ConfigString = + io_lib:format( + "actions.~s.~s {\n" + " enable = true\n" + " connector = ~ts\n" + " parameters = {\n" + " target_topic = \"${topic}\"\n" + " target_qos = -1\n" + " template = \"${payload}\"\n" + " },\n" + " resource_opts = {\n" + " request_ttl = 500ms\n" + " batch_size = ~b\n" + " }\n" + "}", + [ + syskeeper_forwarder, + ?SYSKEEPER_NAME, + ?SYSKEEPER_NAME, + BatchSize + ] + ), + {?SYSKEEPER_NAME, parse_bridge_and_check(ConfigString, syskeeper_forwarder, ?SYSKEEPER_NAME)}. + +syskeeper_connector_config(Config) -> + AckMode = proplists:get_value(ack_mode, Config, no_ack), + ConfigString = + io_lib:format( + "connectors.~s.~s {\n" + " enable = true\n" + " server = \"~ts\"\n" + " ack_mode = ~p\n" + " ack_timeout = ~p\n" + " pool_size = 1\n" + "}", + [ + syskeeper_forwarder, + ?SYSKEEPER_NAME, + server(), + AckMode, + ?ACK_TIMEOUT + ] + ), + {?SYSKEEPER_NAME, + parse_connectors_and_check(ConfigString, syskeeper_forwarder, ?SYSKEEPER_NAME)}. + +syskeeper_proxy_config(_Config) -> + ConfigString = + io_lib:format( + "connectors.~s.~s {\n" + " enable = true\n" + " listen = \"~ts\"\n" + " acceptors = 1\n" + " handshake_timeout = ~p\n" + "}", + [ + syskeeper_proxy, + ?SYSKEEPER_PROXY_NAME, + server(), + ?HANDSHAKE_TIMEOUT + ] + ), + {?SYSKEEPER_PROXY_NAME, + parse_connectors_and_check(ConfigString, syskeeper_proxy, ?SYSKEEPER_PROXY_NAME)}. + +parse_and_check(ConfigString, SchemaMod, RootKey, Type0, Name) -> + Type = to_bin(Type0), + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + hocon_tconf:check_plain(SchemaMod, RawConf, #{required => false, atom_key => false}), + #{RootKey := #{Type := #{Name := Config}}} = RawConf, + Config. + +parse_bridge_and_check(ConfigString, BridgeType, Name) -> + parse_and_check(ConfigString, emqx_bridge_schema, <<"actions">>, BridgeType, Name). + +parse_connectors_and_check(ConfigString, ConnectorType, Name) -> + Config = parse_and_check( + ConfigString, emqx_connector_schema, <<"connectors">>, ConnectorType, Name + ), + emqx_utils_maps:safe_atom_key_map(Config). + +create_bridge(Type, Name, Conf) -> + emqx_bridge_v2:create(Type, Name, Conf). + +delete_bridge(Type, Name) -> + emqx_bridge_v2:remove(Type, Name). + +create_both_bridge(Config) -> + {ProxyName, ProxyConf} = syskeeper_proxy_config(Config), + {ConnectorName, ConnectorConf} = syskeeper_connector_config(Config), + {Name, Conf} = syskeeper_config(Config), + ?assertMatch( + {ok, _}, + create_connectors(syskeeper_proxy, ProxyName, ProxyConf) + ), + timer:sleep(1000), + ?assertMatch( + {ok, _}, + create_connectors(syskeeper_forwarder, ConnectorName, ConnectorConf) + ), + timer:sleep(1000), + ?assertMatch({ok, _}, create_bridge(syskeeper_forwarder, Name, Conf)). + +create_bridge_http(Params) -> + call_create_http("actions", Params). + +create_connectors_http(Params) -> + call_create_http("connectors", Params). + +call_create_http(Root, Params) -> + Path = emqx_mgmt_api_test_util:api_path([Root]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + Error -> Error + end. + +create_connectors(Type, Name, Conf) -> + emqx_connector:create(Type, Name, Conf). + +delete_connectors(Type, Name) -> + emqx_connector:remove(Type, Name). + +send_message(_Config, Payload) -> + Name = ?SYSKEEPER_NAME, + BridgeType = syskeeper_forwarder, + emqx_bridge_v2:send_message(BridgeType, Name, Payload, #{}). + +to_bin(List) when is_list(List) -> + unicode:characters_to_binary(List, utf8); +to_bin(Atom) when is_atom(Atom) -> + erlang:atom_to_binary(Atom); +to_bin(Bin) when is_binary(Bin) -> + Bin. + +to_str(Atom) when is_atom(Atom) -> + erlang:atom_to_list(Atom). + +server() -> + erlang:iolist_to_binary(io_lib:format("~ts:~B", [?HOST, ?PORT])). + +make_message() -> + Message = emqx_message:make(?MODULE, ?TOPIC, ?SYSKEEPER_NAME), + Id = emqx_guid:to_hexstr(emqx_guid:gen()), + From = emqx_message:from(Message), + Msg = emqx_message:to_map(Message), + Msg#{id => Id, clientid => From}. + +receive_msg() -> + receive + {deliver, ?TOPIC, Msg} -> + {ok, Msg} + after 500 -> + {error, no_message} + end. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ +t_setup_proxy_via_config(Config) -> + {Name, Conf} = syskeeper_proxy_config(Config), + ?assertMatch( + {ok, _}, + create_connectors(syskeeper_proxy, Name, Conf) + ), + ?assertMatch( + X when is_pid(X), + esockd:listener({emqx_bridge_syskeeper_proxy_server, {?HOST, ?PORT}}) + ), + delete_connectors(syskeeper_proxy, Name), + ?assertError( + not_found, + esockd:listener({emqx_bridge_syskeeper_proxy_server, {?HOST, ?PORT}}) + ). + +t_setup_proxy_via_http_api(Config) -> + {Name, ProxyConf0} = syskeeper_proxy_config(Config), + ProxyConf = ProxyConf0#{ + <<"name">> => Name, + <<"type">> => syskeeper_proxy + }, + ?assertMatch( + {ok, _}, + create_connectors_http(ProxyConf) + ), + + ?assertMatch( + X when is_pid(X), + esockd:listener({emqx_bridge_syskeeper_proxy_server, {?HOST, ?PORT}}) + ), + + delete_connectors(syskeeper_proxy, Name), + + ?assertError( + not_found, + esockd:listener({emqx_bridge_syskeeper_proxy_server, {?HOST, ?PORT}}) + ). + +t_setup_forwarder_via_config(Config) -> + {ConnectorName, ConnectorConf} = syskeeper_connector_config(Config), + {Name, Conf} = syskeeper_config(Config), + ?assertMatch( + {ok, _}, + create_connectors(syskeeper_forwarder, ConnectorName, ConnectorConf) + ), + ?assertMatch({ok, _}, create_bridge(syskeeper_forwarder, Name, Conf)). + +t_setup_forwarder_via_http_api(Config) -> + {ConnectorName, ConnectorConf0} = syskeeper_connector_config(Config), + {Name, Conf0} = syskeeper_config(Config), + + ConnectorConf = ConnectorConf0#{ + <<"name">> => ConnectorName, + <<"type">> => syskeeper_forwarder + }, + + Conf = Conf0#{ + <<"name">> => Name, + <<"type">> => syskeeper_forwarder + }, + + ?assertMatch( + {ok, _}, + create_connectors_http(ConnectorConf) + ), + + ?assertMatch( + {ok, _}, + create_bridge_http(Conf) + ). + +t_get_status(Config) -> + create_both_bridge(Config), + ?assertMatch( + #{status := connected}, emqx_bridge_v2:health_check(syskeeper_forwarder, ?SYSKEEPER_NAME) + ), + delete_connectors(syskeeper_proxy, ?SYSKEEPER_PROXY_NAME), + ?retry( + _Sleep = 500, + _Attempts = 10, + ?assertMatch( + #{status := connecting}, + emqx_bridge_v2:health_check(syskeeper_forwarder, ?SYSKEEPER_NAME) + ) + ). + +t_write_failure(Config) -> + create_both_bridge(Config), + delete_connectors(syskeeper_proxy, ?SYSKEEPER_PROXY_NAME), + SentData = make_message(), + Result = + ?wait_async_action( + send_message(Config, SentData), + #{?snk_kind := buffer_worker_flush_ack}, + 2_000 + ), + ?assertMatch({{error, {resource_error, _}}, _}, Result). + +t_invalid_data(Config) -> + create_both_bridge(Config), + {_, {ok, #{result := Result}}} = + ?wait_async_action( + send_message(Config, #{}), + #{?snk_kind := buffer_worker_flush_ack}, + 2_000 + ), + ?assertMatch({error, {unrecoverable_error, {invalid_data, _}}}, Result). + +t_forward(Config) -> + emqx_broker:subscribe(?TOPIC), + create_both_bridge(Config), + SentData = make_message(), + {_, {ok, #{result := _Result}}} = + ?wait_async_action( + send_message(Config, SentData), + #{?snk_kind := buffer_worker_flush_ack}, + 2_000 + ), + ?retry( + 500, + 10, + ?assertMatch({ok, _}, receive_msg()) + ), + emqx_broker:unsubscribe(?TOPIC), + ok. diff --git a/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine.app.src b/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine.app.src index e363f2f9c..5375a6ba9 100644 --- a/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine.app.src +++ b/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_tdengine, [ {description, "EMQX Enterprise TDEngine Bridge"}, - {vsn, "0.1.5"}, + {vsn, "0.1.6"}, {registered, []}, {applications, [ kernel, diff --git a/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine_connector.erl b/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine_connector.erl index dcef8506c..522007cbc 100644 --- a/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine_connector.erl +++ b/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine_connector.erl @@ -6,7 +6,6 @@ -behaviour(emqx_resource). --include_lib("emqx_resource/include/emqx_resource.hrl"). -include_lib("typerefl/include/types.hrl"). -include_lib("emqx/include/logger.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). @@ -48,8 +47,8 @@ adjust_fields(Fields) -> fun ({username, OrigUsernameFn}) -> {username, add_default_fn(OrigUsernameFn, <<"root">>)}; - ({password, OrigPasswordFn}) -> - {password, make_required_fn(OrigPasswordFn)}; + ({password, _}) -> + {password, emqx_connector_schema_lib:password_field(#{required => true})}; (Field) -> Field end, @@ -62,12 +61,6 @@ add_default_fn(OrigFn, Default) -> (Field) -> OrigFn(Field) end. -make_required_fn(OrigFn) -> - fun - (required) -> true; - (Field) -> OrigFn(Field) - end. - server() -> Meta = #{desc => ?DESC("server")}, emqx_schema:servers_sc(Meta, ?TD_HOST_OPTIONS). @@ -223,7 +216,10 @@ aggregate_query(BatchTks, BatchReqs, Acc) -> ). connect(Opts) -> - tdengine:start_link(Opts). + %% TODO: teach `tdengine` to accept 0-arity closures as passwords. + {value, {password, Secret}, OptsRest} = lists:keytake(password, 1, Opts), + NOpts = [{password, emqx_secret:unwrap(Secret)} | OptsRest], + tdengine:start_link(NOpts). query_opts(#{database := Database} = _Opts) -> [{db_name, Database}]. diff --git a/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.app.src b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.app.src index adb024591..53302a21f 100644 --- a/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.app.src +++ b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.app.src @@ -1,8 +1,9 @@ {application, emqx_bridge_timescale, [ {description, "EMQX Enterprise TimescaleDB Bridge"}, - {vsn, "0.1.2"}, + {vsn, "0.1.3"}, {registered, []}, {applications, [kernel, stdlib, emqx_resource]}, + {env, [{emqx_action_info_module, emqx_bridge_timescale_action_info}]}, {env, []}, {modules, []}, {links, []} diff --git a/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.erl b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.erl index c4dedf07c..5d6c5498d 100644 --- a/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.erl +++ b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.erl @@ -3,6 +3,8 @@ %%-------------------------------------------------------------------- -module(emqx_bridge_timescale). +-include_lib("hocon/include/hoconsc.hrl"). + -export([ conn_bridge_examples/1 ]). @@ -14,6 +16,14 @@ desc/1 ]). +%% Examples +-export([ + bridge_v2_examples/1, + connector_examples/1 +]). + +-define(CONNECTOR_TYPE, timescale). + %% ------------------------------------------------------------------------------------------------- %% api @@ -22,7 +32,7 @@ conn_bridge_examples(Method) -> #{ <<"timescale">> => #{ summary => <<"Timescale Bridge">>, - value => emqx_bridge_pgsql:values(Method, timescale) + value => emqx_bridge_pgsql:values_conn_bridge_examples(Method, timescale) } } ]. @@ -35,8 +45,55 @@ roots() -> []. fields("post") -> emqx_bridge_pgsql:fields("post", timescale); +fields("config_connector") -> + emqx_postgresql_connector_schema:fields("config_connector"); +fields(action) -> + {timescale, + hoconsc:mk( + hoconsc:map(name, hoconsc:ref(emqx_bridge_pgsql, pgsql_action)), + #{ + desc => <<"Timescale Action Config">>, + required => false + } + )}; +fields("put_bridge_v2") -> + emqx_bridge_pgsql:fields(pgsql_action); +fields("get_bridge_v2") -> + emqx_bridge_pgsql:fields(pgsql_action); +fields("post_bridge_v2") -> + emqx_bridge_pgsql:fields(pgsql_action); +fields(Field) when + Field == "get_connector"; + Field == "put_connector"; + Field == "post_connector" +-> + emqx_postgresql_connector_schema:fields({Field, ?CONNECTOR_TYPE}); fields(Method) -> emqx_bridge_pgsql:fields(Method). +desc("config_connector") -> + ?DESC(emqx_postgresql_connector_schema, "config_connector"); desc(_) -> undefined. + +%% Examples + +connector_examples(Method) -> + [ + #{ + <<"timescale">> => #{ + summary => <<"Timescale Connector">>, + value => emqx_postgresql_connector_schema:values({Method, <<"timescale">>}) + } + } + ]. + +bridge_v2_examples(Method) -> + [ + #{ + <<"timescale">> => #{ + summary => <<"Timescale Action">>, + value => emqx_bridge_pgsql:values({Method, timescale}) + } + } + ]. diff --git a/apps/emqx_bridge_timescale/src/emqx_bridge_timescale_action_info.erl b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale_action_info.erl new file mode 100644 index 000000000..fff74b578 --- /dev/null +++ b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale_action_info.erl @@ -0,0 +1,22 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_timescale_action_info). + +-behaviour(emqx_action_info). + +-export([ + bridge_v1_type_name/0, + action_type_name/0, + connector_type_name/0, + schema_module/0 +]). + +bridge_v1_type_name() -> timescale. + +action_type_name() -> timescale. + +connector_type_name() -> timescale. + +schema_module() -> emqx_bridge_timescale. diff --git a/apps/emqx_conf/include/emqx_conf.hrl b/apps/emqx_conf/include/emqx_conf.hrl index 83737e746..eeaa7c09e 100644 --- a/apps/emqx_conf/include/emqx_conf.hrl +++ b/apps/emqx_conf/include/emqx_conf.hrl @@ -59,8 +59,7 @@ emqx_authn_http_schema, emqx_authn_jwt_schema, emqx_authn_scram_mnesia_schema, - emqx_authn_ldap_schema, - emqx_authn_ldap_bind_schema + emqx_authn_ldap_schema ]). -define(EE_AUTHN_PROVIDER_SCHEMA_MODS, [ diff --git a/apps/emqx_conf/src/emqx_cluster_rpc.erl b/apps/emqx_conf/src/emqx_cluster_rpc.erl index 5bc330afa..756a5ec30 100644 --- a/apps/emqx_conf/src/emqx_cluster_rpc.erl +++ b/apps/emqx_conf/src/emqx_cluster_rpc.erl @@ -66,6 +66,7 @@ -boot_mnesia({mnesia, [boot]}). -include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). -include("emqx_conf.hrl"). -ifdef(TEST). @@ -384,6 +385,7 @@ catch_up(State) -> catch_up(State, false). catch_up(#{node := Node, retry_interval := RetryMs, is_leaving := false} = State, SkipResult) -> case transaction(fun ?MODULE:read_next_mfa/1, [Node]) of {atomic, caught_up} -> + ?tp(cluster_rpc_caught_up, #{}), ?TIMEOUT; {atomic, {still_lagging, NextId, MFA}} -> {Succeed, _} = apply_mfa(NextId, MFA, ?APPLY_KIND_REPLICATE), diff --git a/apps/emqx_conf/src/emqx_conf.app.src b/apps/emqx_conf/src/emqx_conf.app.src index 3856a882c..7f495a3cd 100644 --- a/apps/emqx_conf/src/emqx_conf.app.src +++ b/apps/emqx_conf/src/emqx_conf.app.src @@ -1,6 +1,6 @@ {application, emqx_conf, [ {description, "EMQX configuration management"}, - {vsn, "0.1.31"}, + {vsn, "0.1.32"}, {registered, []}, {mod, {emqx_conf_app, []}}, {applications, [kernel, stdlib, emqx_ctl]}, diff --git a/apps/emqx_conf/src/emqx_conf.erl b/apps/emqx_conf/src/emqx_conf.erl index 0925141de..0d2ee72e4 100644 --- a/apps/emqx_conf/src/emqx_conf.erl +++ b/apps/emqx_conf/src/emqx_conf.erl @@ -306,7 +306,7 @@ gen_flat_doc(RootNames, #{full_name := FullName, fields := Fields} = S) -> ShortName = short_name(FullName), case is_missing_namespace(ShortName, to_bin(FullName), RootNames) of true -> - io:format(standard_error, "WARN: no_namespace_for: ~s~n", [FullName]); + error({no_namespace, FullName, S}); false -> ok end, diff --git a/apps/emqx_conf/src/emqx_conf_cli.erl b/apps/emqx_conf/src/emqx_conf_cli.erl index ddabdae95..7e55ada4f 100644 --- a/apps/emqx_conf/src/emqx_conf_cli.erl +++ b/apps/emqx_conf/src/emqx_conf_cli.erl @@ -37,10 +37,15 @@ -define(AUDIT_MOD, audit). -define(UPDATE_READONLY_KEYS_PROHIBITED, "update_readonly_keys_prohibited"). +-dialyzer({no_match, [load/0]}). + load() -> emqx_ctl:register_command(?CLUSTER_CALL, {?MODULE, admins}, [hidden]), emqx_ctl:register_command(?CONF, {?MODULE, conf}, []), - emqx_ctl:register_command(?AUDIT_MOD, {?MODULE, audit}, [hidden]), + case emqx_release:edition() of + ee -> emqx_ctl:register_command(?AUDIT_MOD, {?MODULE, audit}, [hidden]); + ce -> ok + end, ok. unload() -> @@ -108,15 +113,14 @@ admins(_) -> emqx_ctl:usage(usage_sync()). audit(Level, From, Log) -> - Log1 = redact(Log#{time => logger:timestamp()}), - ?AUDIT(Level, From, Log1). + ?AUDIT(Level, redact(Log#{from => From})). -redact(Logs = #{cmd := admins, args := ["add", Username, _Password | Rest]}) -> - Logs#{args => ["add", Username, "******" | Rest]}; -redact(Logs = #{cmd := admins, args := ["passwd", Username, _Password]}) -> - Logs#{args => ["passwd", Username, "******"]}; -redact(Logs = #{cmd := license, args := ["update", _License]}) -> - Logs#{args => ["update", "******"]}; +redact(Logs = #{cmd := admins, args := [<<"add">>, Username, _Password | Rest]}) -> + Logs#{args => [<<"add">>, Username, <<"******">> | Rest]}; +redact(Logs = #{cmd := admins, args := [<<"passwd">>, Username, _Password]}) -> + Logs#{args => [<<"passwd">>, Username, <<"******">>]}; +redact(Logs = #{cmd := license, args := [<<"update">>, _License]}) -> + Logs#{args => [<<"update">>, "******"]}; redact(Logs) -> Logs. @@ -190,7 +194,7 @@ keys() -> emqx_config:get_root_names() -- hidden_roots(). drop_hidden_roots(Conf) -> - lists:foldl(fun(K, Acc) -> maps:remove(K, Acc) end, Conf, hidden_roots()). + maps:without(hidden_roots(), Conf). hidden_roots() -> [ @@ -198,6 +202,7 @@ hidden_roots() -> <<"stats">>, <<"broker">>, <<"persistent_session_store">>, + <<"session_persistence">>, <<"plugins">>, <<"zones">> ]. diff --git a/apps/emqx_conf/src/emqx_conf_schema.erl b/apps/emqx_conf/src/emqx_conf_schema.erl index a872a6a56..0db3c4a45 100644 --- a/apps/emqx_conf/src/emqx_conf_schema.erl +++ b/apps/emqx_conf/src/emqx_conf_schema.erl @@ -77,7 +77,8 @@ %% Callback to upgrade config after loaded from config file but before validation. upgrade_raw_conf(RawConf) -> - emqx_connector_schema:transform_bridges_v1_to_connectors_and_bridges_v2(RawConf). + RawConf1 = emqx_connector_schema:transform_bridges_v1_to_connectors_and_bridges_v2(RawConf), + emqx_otel_schema:upgrade_legacy_metrics(RawConf1). namespace() -> emqx. @@ -1189,37 +1190,44 @@ tr_prometheus_collectors(Conf) -> emqx_prometheus, emqx_prometheus_mria %% builtin vm collectors - | tr_vm_dist_collector(Conf) ++ - tr_mnesia_collector(Conf) ++ - tr_vm_statistics_collector(Conf) ++ - tr_vm_system_info_collector(Conf) ++ - tr_vm_memory_collector(Conf) ++ - tr_vm_msacc_collector(Conf) + | prometheus_collectors(Conf) ]. -tr_vm_dist_collector(Conf) -> - Enabled = conf_get("prometheus.vm_dist_collector", Conf, disabled), - collector_enabled(Enabled, prometheus_vm_dist_collector). +prometheus_collectors(Conf) -> + case conf_get("prometheus.enable_basic_auth", Conf, undefined) of + %% legacy + undefined -> + tr_collector("prometheus.vm_dist_collector", prometheus_vm_dist_collector, Conf) ++ + tr_collector("prometheus.mnesia_collector", prometheus_mnesia_collector, Conf) ++ + tr_collector( + "prometheus.vm_statistics_collector", prometheus_vm_statistics_collector, Conf + ) ++ + tr_collector( + "prometheus.vm_system_info_collector", prometheus_vm_system_info_collector, Conf + ) ++ + tr_collector("prometheus.vm_memory_collector", prometheus_vm_memory_collector, Conf) ++ + tr_collector("prometheus.vm_msacc_collector", prometheus_vm_msacc_collector, Conf); + %% new + _ -> + tr_collector("prometheus.collectors.vm_dist", prometheus_vm_dist_collector, Conf) ++ + tr_collector("prometheus.collectors.mnesia", prometheus_mnesia_collector, Conf) ++ + tr_collector( + "prometheus.collectors.vm_statistics", prometheus_vm_statistics_collector, Conf + ) ++ + tr_collector( + "prometheus.collectors.vm_system_info", + prometheus_vm_system_info_collector, + Conf + ) ++ + tr_collector( + "prometheus.collectors.vm_memory", prometheus_vm_memory_collector, Conf + ) ++ + tr_collector("prometheus.collectors.vm_msacc", prometheus_vm_msacc_collector, Conf) + end. -tr_mnesia_collector(Conf) -> - Enabled = conf_get("prometheus.mnesia_collector", Conf, disabled), - collector_enabled(Enabled, prometheus_mnesia_collector). - -tr_vm_statistics_collector(Conf) -> - Enabled = conf_get("prometheus.vm_statistics_collector", Conf, disabled), - collector_enabled(Enabled, prometheus_vm_statistics_collector). - -tr_vm_system_info_collector(Conf) -> - Enabled = conf_get("prometheus.vm_system_info_collector", Conf, disabled), - collector_enabled(Enabled, prometheus_vm_system_info_collector). - -tr_vm_memory_collector(Conf) -> - Enabled = conf_get("prometheus.vm_memory_collector", Conf, disabled), - collector_enabled(Enabled, prometheus_vm_memory_collector). - -tr_vm_msacc_collector(Conf) -> - Enabled = conf_get("prometheus.vm_msacc_collector", Conf, disabled), - collector_enabled(Enabled, prometheus_vm_msacc_collector). +tr_collector(Key, Collect, Conf) -> + Enabled = conf_get(Key, Conf, disabled), + collector_enabled(Enabled, Collect). collector_enabled(enabled, Collector) -> [Collector]; collector_enabled(disabled, _) -> []. diff --git a/apps/emqx_conf/src/emqx_conf_schema_types.erl b/apps/emqx_conf/src/emqx_conf_schema_types.erl index e948142fc..dc3af77b2 100644 --- a/apps/emqx_conf/src/emqx_conf_schema_types.erl +++ b/apps/emqx_conf/src/emqx_conf_schema_types.erl @@ -265,6 +265,12 @@ readable("port_number()") -> Result = try_range("1..65535"), true = is_map(Result), Result; +readable("secret()") -> + #{ + swagger => #{type => string, example => <<"R4ND0M/S∃CЯ∃T"/utf8>>}, + dashboard => #{type => string}, + docgen => #{type => "String", example => <<"R4ND0M/S∃CЯ∃T"/utf8>>} + }; readable(TypeStr0) -> case string:split(TypeStr0, ":") of [ModuleStr, TypeStr] -> diff --git a/apps/emqx_conf/test/emqx_conf_logger_SUITE.erl b/apps/emqx_conf/test/emqx_conf_logger_SUITE.erl index 096136651..2cb699036 100644 --- a/apps/emqx_conf/test/emqx_conf_logger_SUITE.erl +++ b/apps/emqx_conf/test/emqx_conf_logger_SUITE.erl @@ -24,7 +24,7 @@ %% erlfmt-ignore -define(BASE_CONF, - """ + " log { console { enable = true @@ -36,7 +36,7 @@ path = \"log/emqx.log\" } } - """). + "). all() -> emqx_common_test_helpers:all(?MODULE). diff --git a/apps/emqx_conf/test/emqx_conf_schema_tests.erl b/apps/emqx_conf/test/emqx_conf_schema_tests.erl index 4fca88a00..22f8c5575 100644 --- a/apps/emqx_conf/test/emqx_conf_schema_tests.erl +++ b/apps/emqx_conf/test/emqx_conf_schema_tests.erl @@ -20,7 +20,7 @@ %% erlfmt-ignore -define(BASE_CONF, - """ + " node { name = \"emqx1@127.0.0.1\" cookie = \"emqxsecretcookie\" @@ -34,7 +34,7 @@ static.seeds = ~p core_nodes = ~p } - """). + "). array_nodes_test() -> ensure_acl_conf(), @@ -70,7 +70,7 @@ array_nodes_test() -> %% erlfmt-ignore -define(OUTDATED_LOG_CONF, - """ + " log.console_handler { burst_limit { enable = true @@ -124,7 +124,7 @@ log.file_handlers { time_offset = \"+01:00\" } } - """ + " ). -define(FORMATTER(TimeOffset), {emqx_logger_textfmt, #{ @@ -196,7 +196,7 @@ validate_log(Conf) -> %% erlfmt-ignore -define(FILE_LOG_BASE_CONF, - """ + " log.file.default { enable = true file = \"log/xx-emqx.log\" @@ -206,7 +206,7 @@ validate_log(Conf) -> rotation_size = ~s time_offset = \"+01:00\" } - """ + " ). file_log_infinity_rotation_size_test_() -> @@ -249,7 +249,7 @@ file_log_infinity_rotation_size_test_() -> %% erlfmt-ignore -define(KERNEL_LOG_CONF, - """ + " log.console { enable = true formatter = text @@ -269,7 +269,7 @@ file_log_infinity_rotation_size_test_() -> enable = true file = \"log/my-emqx.log\" } - """ + " ). log_test() -> @@ -279,7 +279,7 @@ log_test() -> log_rotation_count_limit_test() -> ensure_acl_conf(), Format = - """ + " log.file { enable = true path = \"log/emqx.log\" @@ -288,7 +288,7 @@ log_rotation_count_limit_test() -> rotation = {count = ~w} rotation_size = \"1024MB\" } - """, + ", BaseConf = to_bin(?BASE_CONF, ["emqx1@127.0.0.1", "emqx1@127.0.0.1"]), lists:foreach(fun({Conf, Count}) -> Conf0 = <>, @@ -320,7 +320,7 @@ log_rotation_count_limit_test() -> %% erlfmt-ignore -define(BASE_AUTHN_ARRAY, - """ + " authentication = [ {backend = \"http\" body {password = \"${password}\", username = \"${username}\"} @@ -335,7 +335,7 @@ log_rotation_count_limit_test() -> url = \"~ts\" } ] - """ + " ). -define(ERROR(Error), @@ -396,13 +396,13 @@ authn_validations_test() -> %% erlfmt-ignore -define(LISTENERS, - """ + " listeners.ssl.default.bind = 9999 listeners.wss.default.bind = 9998 listeners.wss.default.ssl_options.cacertfile = \"mytest/certs/cacert.pem\" listeners.wss.new.bind = 9997 listeners.wss.new.websocket.mqtt_path = \"/my-mqtt\" - """ + " ). listeners_test() -> diff --git a/apps/emqx_connector/src/emqx_connector.app.src b/apps/emqx_connector/src/emqx_connector.app.src index cc78829e7..7150d1e7a 100644 --- a/apps/emqx_connector/src/emqx_connector.app.src +++ b/apps/emqx_connector/src/emqx_connector.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_connector, [ {description, "EMQX Data Integration Connectors"}, - {vsn, "0.1.34"}, + {vsn, "0.1.35"}, {registered, []}, {mod, {emqx_connector_app, []}}, {applications, [ @@ -9,6 +9,9 @@ stdlib, ecpool, emqx_resource, + eredis, + %% eredis_cluster has supervisor should be started before emqx_connector + %% otherwise the first start redis_cluster will fail. eredis_cluster, ehttpc, jose, diff --git a/apps/emqx_connector/src/emqx_connector_api.erl b/apps/emqx_connector/src/emqx_connector_api.erl index a5b7692d7..f25fe9b7e 100644 --- a/apps/emqx_connector/src/emqx_connector_api.erl +++ b/apps/emqx_connector/src/emqx_connector_api.erl @@ -627,15 +627,20 @@ format_resource( ). format_resource_data(ResData) -> - maps:fold(fun format_resource_data/3, #{}, maps:with([status, error], ResData)). + maps:fold(fun format_resource_data/3, #{}, maps:with([status, error, added_channels], ResData)). format_resource_data(error, undefined, Result) -> Result; format_resource_data(error, Error, Result) -> Result#{status_reason => emqx_utils:readable_error_msg(Error)}; +format_resource_data(added_channels, Channels, Result) -> + Result#{actions => lists:map(fun format_action/1, maps:keys(Channels))}; format_resource_data(K, V, Result) -> Result#{K => V}. +format_action(ActionId) -> + element(2, emqx_bridge_v2:parse_id(ActionId)). + is_ok(ok) -> ok; is_ok(OkResult = {ok, _}) -> diff --git a/apps/emqx_connector/src/emqx_connector_schema_lib.erl b/apps/emqx_connector/src/emqx_connector_schema_lib.erl index 07e7fe375..76a06cb5a 100644 --- a/apps/emqx_connector/src/emqx_connector_schema_lib.erl +++ b/apps/emqx_connector/src/emqx_connector_schema_lib.erl @@ -23,14 +23,14 @@ pool_size/1, relational_db_fields/0, ssl_fields/0, - prepare_statement_fields/0 + prepare_statement_fields/0, + password_field/0, + password_field/1 ]). -export([ database/1, username/1, - password/1, - password_required/1, auto_reconnect/1 ]). @@ -66,10 +66,19 @@ relational_db_fields() -> %% See emqx_resource.hrl {pool_size, fun pool_size/1}, {username, fun username/1}, - {password, fun password/1}, + {password, password_field()}, {auto_reconnect, fun auto_reconnect/1} ]. +-spec password_field() -> hocon_schema:field_schema(). +password_field() -> + password_field(#{}). + +-spec password_field(#{atom() => _}) -> hocon_schema:field_schema(). +password_field(Overrides) -> + Base = #{desc => ?DESC("password")}, + emqx_schema_secret:mk(maps:merge(Base, Overrides)). + prepare_statement_fields() -> [{prepare_statement, fun prepare_statement/1}]. @@ -95,22 +104,6 @@ username(desc) -> ?DESC("username"); username(required) -> false; username(_) -> undefined. -password(type) -> binary(); -password(desc) -> ?DESC("password"); -password(required) -> false; -password(format) -> <<"password">>; -password(sensitive) -> true; -password(converter) -> fun emqx_schema:password_converter/2; -password(_) -> undefined. - -password_required(type) -> binary(); -password_required(desc) -> ?DESC("password"); -password_required(required) -> true; -password_required(format) -> <<"password">>; -password_required(sensitive) -> true; -password_required(converter) -> fun emqx_schema:password_converter/2; -password_required(_) -> undefined. - auto_reconnect(type) -> boolean(); auto_reconnect(desc) -> ?DESC("auto_reconnect"); auto_reconnect(default) -> true; diff --git a/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl b/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl index ef101ad28..1ca6e4a5d 100644 --- a/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl +++ b/apps/emqx_connector/src/schema/emqx_connector_ee_schema.erl @@ -21,11 +21,29 @@ resource_type(Type) when is_binary(Type) -> resource_type(binary_to_atom(Type, utf8)); +resource_type(azure_event_hub_producer) -> + %% We use AEH's Kafka interface. + emqx_bridge_kafka_impl_producer; +resource_type(confluent_producer) -> + emqx_bridge_kafka_impl_producer; +resource_type(gcp_pubsub_producer) -> + emqx_bridge_gcp_pubsub_impl_producer; resource_type(kafka_producer) -> emqx_bridge_kafka_impl_producer; -%% We use AEH's Kafka interface. -resource_type(azure_event_hub_producer) -> - emqx_bridge_kafka_impl_producer; +resource_type(matrix) -> + emqx_postgresql; +resource_type(mongodb) -> + emqx_bridge_mongodb_connector; +resource_type(pgsql) -> + emqx_postgresql; +resource_type(syskeeper_forwarder) -> + emqx_bridge_syskeeper_connector; +resource_type(syskeeper_proxy) -> + emqx_bridge_syskeeper_proxy_server; +resource_type(timescale) -> + emqx_postgresql; +resource_type(redis) -> + emqx_bridge_redis_connector; resource_type(Type) -> error({unknown_connector_type, Type}). @@ -34,6 +52,8 @@ connector_impl_module(ConnectorType) when is_binary(ConnectorType) -> connector_impl_module(binary_to_atom(ConnectorType, utf8)); connector_impl_module(azure_event_hub_producer) -> emqx_bridge_azure_event_hub; +connector_impl_module(confluent_producer) -> + emqx_bridge_confluent_producer; connector_impl_module(_ConnectorType) -> undefined. @@ -42,6 +62,30 @@ fields(connectors) -> connector_structs() -> [ + {azure_event_hub_producer, + mk( + hoconsc:map(name, ref(emqx_bridge_azure_event_hub, "config_connector")), + #{ + desc => <<"Azure Event Hub Connector Config">>, + required => false + } + )}, + {confluent_producer, + mk( + hoconsc:map(name, ref(emqx_bridge_confluent_producer, "config_connector")), + #{ + desc => <<"Confluent Connector Config">>, + required => false + } + )}, + {gcp_pubsub_producer, + mk( + hoconsc:map(name, ref(emqx_bridge_gcp_pubsub_producer_schema, "config_connector")), + #{ + desc => <<"GCP PubSub Producer Connector Config">>, + required => false + } + )}, {kafka_producer, mk( hoconsc:map(name, ref(emqx_bridge_kafka, "config_connector")), @@ -50,11 +94,59 @@ connector_structs() -> required => false } )}, - {azure_event_hub_producer, + {mongodb, mk( - hoconsc:map(name, ref(emqx_bridge_azure_event_hub, "config_connector")), + hoconsc:map(name, ref(emqx_bridge_mongodb, "config_connector")), #{ - desc => <<"Azure Event Hub Connector Config">>, + desc => <<"MongoDB Connector Config">>, + required => false + } + )}, + {syskeeper_forwarder, + mk( + hoconsc:map(name, ref(emqx_bridge_syskeeper_connector, config)), + #{ + desc => <<"Syskeeper Connector Config">>, + required => false + } + )}, + {syskeeper_proxy, + mk( + hoconsc:map(name, ref(emqx_bridge_syskeeper_proxy, config)), + #{ + desc => <<"Syskeeper Proxy Connector Config">>, + required => false + } + )}, + {pgsql, + mk( + hoconsc:map(name, ref(emqx_bridge_pgsql, "config_connector")), + #{ + desc => <<"PostgreSQL Connector Config">>, + required => false + } + )}, + {timescale, + mk( + hoconsc:map(name, ref(emqx_bridge_timescale, "config_connector")), + #{ + desc => <<"Timescale Connector Config">>, + required => false + } + )}, + {matrix, + mk( + hoconsc:map(name, ref(emqx_bridge_matrix, "config_connector")), + #{ + desc => <<"Matrix Connector Config">>, + required => false + } + )}, + {redis, + mk( + hoconsc:map(name, ref(emqx_bridge_redis_schema, "config_connector")), + #{ + desc => <<"Redis Connector Config">>, required => false } )} @@ -62,16 +154,42 @@ connector_structs() -> schema_modules() -> [ + emqx_bridge_azure_event_hub, + emqx_bridge_confluent_producer, + emqx_bridge_gcp_pubsub_producer_schema, emqx_bridge_kafka, - emqx_bridge_azure_event_hub + emqx_bridge_matrix, + emqx_bridge_mongodb, + emqx_bridge_syskeeper_connector, + emqx_bridge_syskeeper_proxy, + emqx_bridge_timescale, + emqx_postgresql_connector_schema, + emqx_bridge_redis_schema ]. api_schemas(Method) -> [ %% We need to map the `type' field of a request (binary) to a %% connector schema module. + api_ref( + emqx_bridge_azure_event_hub, <<"azure_event_hub_producer">>, Method ++ "_connector" + ), + api_ref( + emqx_bridge_confluent_producer, <<"confluent_producer">>, Method ++ "_connector" + ), + api_ref( + emqx_bridge_gcp_pubsub_producer_schema, + <<"gcp_pubsub_producer">>, + Method ++ "_connector" + ), api_ref(emqx_bridge_kafka, <<"kafka_producer">>, Method ++ "_connector"), - api_ref(emqx_bridge_azure_event_hub, <<"azure_event_hub_producer">>, Method ++ "_connector") + api_ref(emqx_bridge_matrix, <<"matrix">>, Method ++ "_connector"), + api_ref(emqx_bridge_mongodb, <<"mongodb">>, Method ++ "_connector"), + api_ref(emqx_bridge_syskeeper_connector, <<"syskeeper_forwarder">>, Method), + api_ref(emqx_bridge_syskeeper_proxy, <<"syskeeper_proxy">>, Method), + api_ref(emqx_bridge_timescale, <<"timescale">>, Method ++ "_connector"), + api_ref(emqx_postgresql_connector_schema, <<"pgsql">>, Method ++ "_connector"), + api_ref(emqx_bridge_redis_schema, <<"redis">>, Method ++ "_connector") ]. api_ref(Module, Type, Method) -> diff --git a/apps/emqx_connector/src/schema/emqx_connector_schema.erl b/apps/emqx_connector/src/schema/emqx_connector_schema.erl index 890f84871..d4f82d474 100644 --- a/apps/emqx_connector/src/schema/emqx_connector_schema.erl +++ b/apps/emqx_connector/src/schema/emqx_connector_schema.erl @@ -24,7 +24,8 @@ -export([ transform_bridges_v1_to_connectors_and_bridges_v2/1, - transform_bridge_v1_config_to_action_config/4 + transform_bridge_v1_config_to_action_config/4, + top_level_common_connector_keys/0 ]). -export([roots/0, fields/1, desc/1, namespace/0, tags/0]). @@ -32,6 +33,12 @@ -export([get_response/0, put_request/0, post_request/0]). -export([connector_type_to_bridge_types/1]). +-export([ + api_fields/3, + common_fields/0, + status_and_actions_fields/0, + type_and_name_fields/1 +]). -export([resource_opts_fields/0, resource_opts_fields/1]). @@ -96,9 +103,32 @@ schema_modules() -> [emqx_bridge_http_schema]. -endif. -connector_type_to_bridge_types(http) -> [http, webhook]; -connector_type_to_bridge_types(kafka_producer) -> [kafka, kafka_producer]; -connector_type_to_bridge_types(azure_event_hub_producer) -> [azure_event_hub_producer]. +%% @doc Return old bridge(v1) and/or connector(v2) type +%% from the latest connector type name. +connector_type_to_bridge_types(http) -> + [webhook, http]; +connector_type_to_bridge_types(azure_event_hub_producer) -> + [azure_event_hub_producer]; +connector_type_to_bridge_types(confluent_producer) -> + [confluent_producer]; +connector_type_to_bridge_types(gcp_pubsub_producer) -> + [gcp_pubsub, gcp_pubsub_producer]; +connector_type_to_bridge_types(kafka_producer) -> + [kafka, kafka_producer]; +connector_type_to_bridge_types(matrix) -> + [matrix]; +connector_type_to_bridge_types(mongodb) -> + [mongodb, mongodb_rs, mongodb_sharded, mongodb_single]; +connector_type_to_bridge_types(pgsql) -> + [pgsql]; +connector_type_to_bridge_types(syskeeper_forwarder) -> + [syskeeper_forwarder]; +connector_type_to_bridge_types(syskeeper_proxy) -> + []; +connector_type_to_bridge_types(timescale) -> + [timescale]; +connector_type_to_bridge_types(redis) -> + [redis, redis_single, redis_sentinel, redis_cluster]. actions_config_name() -> <<"actions">>. @@ -143,7 +173,7 @@ split_bridge_to_connector_and_action( BridgeType, BridgeV1Conf ); false -> - %% We do an automatic transfomation to get the connector config + %% We do an automatic transformation to get the connector config %% if the callback is not defined. %% Get connector fields from bridge config lists:foldl( @@ -191,17 +221,20 @@ transform_bridge_v1_config_to_action_config( BridgeV1Conf, ConnectorName, ConnectorFields ). -transform_bridge_v1_config_to_action_config( - BridgeV1Conf, ConnectorName, ConnectorFields -) -> - TopKeys = [ +top_level_common_connector_keys() -> + [ <<"enable">>, <<"connector">>, <<"local_topic">>, <<"resource_opts">>, <<"description">>, <<"parameters">> - ], + ]. + +transform_bridge_v1_config_to_action_config( + BridgeV1Conf, ConnectorName, ConnectorFields +) -> + TopKeys = top_level_common_connector_keys(), TopKeysMap = maps:from_keys(TopKeys, true), %% Remove connector fields ActionMap0 = lists:foldl( @@ -290,8 +323,9 @@ transform_old_style_bridges_to_connector_and_actions_of_type( RawConfigSoFar1 ), %% Add action + ActionType = emqx_action_info:bridge_v1_type_to_action_type(to_bin(BridgeType)), RawConfigSoFar3 = emqx_utils_maps:deep_put( - [actions_config_name(), to_bin(maybe_rename(BridgeType)), BridgeName], + [actions_config_name(), to_bin(ActionType), BridgeName], RawConfigSoFar2, ActionMap ), @@ -310,12 +344,6 @@ transform_bridges_v1_to_connectors_and_bridges_v2(RawConfig) -> ), NewRawConf. -%% v1 uses 'kafka' as bridge type v2 uses 'kafka_producer' -maybe_rename(kafka) -> - kafka_producer; -maybe_rename(Name) -> - Name. - %%====================================================================================== %% HOCON Schema Callbacks %%====================================================================================== @@ -388,13 +416,87 @@ fields(connectors) -> required => false } )} - ] ++ enterprise_fields_connectors(). + ] ++ enterprise_fields_connectors(); +fields("node_status") -> + [ + node_name(), + {"status", mk(status(), #{})}, + {"status_reason", + mk(binary(), #{ + required => false, + desc => ?DESC("desc_status_reason"), + example => <<"Connection refused">> + })} + ]. desc(connectors) -> ?DESC("desc_connectors"); +desc("node_status") -> + ?DESC("desc_node_status"); desc(_) -> undefined. +api_fields("get_connector", Type, Fields) -> + lists:append( + [ + type_and_name_fields(Type), + common_fields(), + status_and_actions_fields(), + Fields + ] + ); +api_fields("post_connector", Type, Fields) -> + lists:append( + [ + type_and_name_fields(Type), + common_fields(), + Fields + ] + ); +api_fields("put_connector", _Type, Fields) -> + lists:append( + [ + common_fields(), + Fields + ] + ). + +common_fields() -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {description, emqx_schema:description_schema()} + ]. + +type_and_name_fields(ConnectorType) -> + [ + {type, mk(ConnectorType, #{required => true, desc => ?DESC("desc_type")})}, + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})} + ]. + +status_and_actions_fields() -> + [ + {"status", mk(status(), #{desc => ?DESC("desc_status")})}, + {"status_reason", + mk(binary(), #{ + required => false, + desc => ?DESC("desc_status_reason"), + example => <<"Connection refused">> + })}, + {"node_status", + mk( + hoconsc:array(ref(?MODULE, "node_status")), + #{desc => ?DESC("desc_node_status")} + )}, + {"actions", + mk( + hoconsc:array(binary()), + #{ + desc => ?DESC("connector_actions"), + example => [<<"my_action">>] + } + )} + ]. + resource_opts_fields() -> resource_opts_fields(_Overrides = []). @@ -452,12 +554,18 @@ is_bad_schema(#{type := ?MAP(_, ?R_REF(Module, TypeName))}) -> false; _ -> {true, #{ - schema_modle => Module, + schema_module => Module, type_name => TypeName, missing_fields => MissingFileds }} end. +status() -> + hoconsc:enum([connected, disconnected, connecting, inconsistent]). + +node_name() -> + {"node", mk(binary(), #{desc => ?DESC("desc_node_name"), example => "emqx@127.0.0.1"})}. + common_field_names() -> [ enable, description diff --git a/apps/emqx_connector/test/emqx_connector_api_SUITE.erl b/apps/emqx_connector/test/emqx_connector_api_SUITE.erl index bd8aa9ddf..0b4189396 100644 --- a/apps/emqx_connector/test/emqx_connector_api_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_api_SUITE.erl @@ -175,7 +175,8 @@ groups() -> AllTCs = emqx_common_test_helpers:all(?MODULE), SingleOnlyTests = [ t_connectors_probe, - t_fail_delete_with_action + t_fail_delete_with_action, + t_actions_field ], ClusterLaterJoinOnlyTCs = [ % t_cluster_later_join_metrics @@ -256,15 +257,6 @@ end_per_testcase(TestCase, Config) -> ok. -define(CONNECTOR_IMPL, dummy_connector_impl). -init_mocks(t_fail_delete_with_action) -> - init_mocks(common), - meck:expect(?CONNECTOR_IMPL, on_add_channel, 4, {ok, connector_state}), - meck:expect(?CONNECTOR_IMPL, on_remove_channel, 3, {ok, connector_state}), - meck:expect(?CONNECTOR_IMPL, on_get_channel_status, 3, connected), - ok = meck:expect(?CONNECTOR_IMPL, on_get_channels, fun(ResId) -> - emqx_bridge_v2:get_channels_for_connector(ResId) - end), - ok; init_mocks(_TestCase) -> meck:new(emqx_connector_ee_schema, [passthrough, no_link]), meck:expect(emqx_connector_ee_schema, resource_type, 1, ?CONNECTOR_IMPL), @@ -289,17 +281,25 @@ init_mocks(_TestCase) -> (_, _) -> connected end ), + meck:expect(?CONNECTOR_IMPL, on_add_channel, 4, {ok, connector_state}), + meck:expect(?CONNECTOR_IMPL, on_remove_channel, 3, {ok, connector_state}), + meck:expect(?CONNECTOR_IMPL, on_get_channel_status, 3, connected), + meck:expect( + ?CONNECTOR_IMPL, + on_get_channels, + fun(ResId) -> + emqx_bridge_v2:get_channels_for_connector(ResId) + end + ), [?CONNECTOR_IMPL, emqx_connector_ee_schema]. -clear_resources(t_fail_delete_with_action) -> +clear_resources(_) -> lists:foreach( fun(#{type := Type, name := Name}) -> ok = emqx_bridge_v2:remove(Type, Name) end, emqx_bridge_v2:list() ), - clear_resources(common); -clear_resources(_) -> lists:foreach( fun(#{type := Type, name := Name}) -> ok = emqx_connector:remove(Type, Name) @@ -738,6 +738,62 @@ t_create_with_bad_name(Config) -> ?assertMatch(#{<<"kind">> := <<"validation_error">>}, Msg), ok. +t_actions_field(Config) -> + Name = ?CONNECTOR_NAME, + ?assertMatch( + {ok, 201, #{ + <<"type">> := ?CONNECTOR_TYPE, + <<"name">> := Name, + <<"enable">> := true, + <<"status">> := <<"connected">>, + <<"node_status">> := [_ | _], + <<"actions">> := [] + }}, + request_json( + post, + uri(["connectors"]), + ?KAFKA_CONNECTOR(Name), + Config + ) + ), + ConnectorID = emqx_connector_resource:connector_id(?CONNECTOR_TYPE, Name), + BridgeName = ?BRIDGE_NAME, + ?assertMatch( + {ok, 201, #{ + <<"type">> := ?BRIDGE_TYPE, + <<"name">> := BridgeName, + <<"enable">> := true, + <<"status">> := <<"connected">>, + <<"node_status">> := [_ | _], + <<"connector">> := Name, + <<"kafka">> := #{}, + <<"local_topic">> := _, + <<"resource_opts">> := _ + }}, + request_json( + post, + uri(["actions"]), + ?KAFKA_BRIDGE(?BRIDGE_NAME), + Config + ) + ), + ?assertMatch( + {ok, 200, #{ + <<"type">> := ?CONNECTOR_TYPE, + <<"name">> := Name, + <<"enable">> := true, + <<"status">> := <<"connected">>, + <<"node_status">> := [_ | _], + <<"actions">> := [BridgeName] + }}, + request_json( + get, + uri(["connectors", ConnectorID]), + Config + ) + ), + ok. + t_fail_delete_with_action(Config) -> Name = ?CONNECTOR_NAME, ?assertMatch( diff --git a/apps/emqx_dashboard/include/emqx_dashboard.hrl b/apps/emqx_dashboard/include/emqx_dashboard.hrl index 9013436e7..c41dbb71c 100644 --- a/apps/emqx_dashboard/include/emqx_dashboard.hrl +++ b/apps/emqx_dashboard/include/emqx_dashboard.hrl @@ -13,16 +13,9 @@ %% See the License for the specific language governing permissions and %% limitations under the License. %%-------------------------------------------------------------------- --define(ADMIN, emqx_admin). +-include("emqx_dashboard_rbac.hrl"). -%% TODO: -%% The predefined roles of the preliminary RBAC implementation, -%% these may be removed when developing the full RBAC feature. -%% In full RBAC feature, the role may be customised created and deleted, -%% a predefined configuration would replace these macros. --define(ROLE_VIEWER, <<"viewer">>). --define(ROLE_SUPERUSER, <<"administrator">>). --define(ROLE_DEFAULT, ?ROLE_SUPERUSER). +-define(ADMIN, emqx_admin). -define(BACKEND_LOCAL, local). -define(SSO_USERNAME(Backend, Name), {Backend, Name}). diff --git a/apps/emqx_dashboard/include/emqx_dashboard_rbac.hrl b/apps/emqx_dashboard/include/emqx_dashboard_rbac.hrl new file mode 100644 index 000000000..386ae8bea --- /dev/null +++ b/apps/emqx_dashboard/include/emqx_dashboard_rbac.hrl @@ -0,0 +1,33 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-ifndef(EMQX_DASHBOARD_RBAC). +-define(EMQX_DASHBOARD_RBAC, true). + +%% TODO: +%% The predefined roles of the preliminary RBAC implementation, +%% these may be removed when developing the full RBAC feature. +%% In full RBAC feature, the role may be customised created and deleted, +%% a predefined configuration would replace these macros. +-define(ROLE_VIEWER, <<"viewer">>). +-define(ROLE_SUPERUSER, <<"administrator">>). +-define(ROLE_DEFAULT, ?ROLE_SUPERUSER). + +-define(ROLE_API_VIEWER, <<"viewer">>). +-define(ROLE_API_SUPERUSER, <<"administrator">>). +-define(ROLE_API_PUBLISHER, <<"publisher">>). +-define(ROLE_API_DEFAULT, ?ROLE_API_SUPERUSER). + +-endif. diff --git a/apps/emqx_dashboard/src/emqx_dashboard.app.src b/apps/emqx_dashboard/src/emqx_dashboard.app.src index 97691c6cd..9474d868f 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard.app.src +++ b/apps/emqx_dashboard/src/emqx_dashboard.app.src @@ -2,7 +2,7 @@ {application, emqx_dashboard, [ {description, "EMQX Web Dashboard"}, % strict semver, bump manually! - {vsn, "5.0.30"}, + {vsn, "5.0.31"}, {modules, []}, {registered, [emqx_dashboard_sup]}, {applications, [ diff --git a/apps/emqx_dashboard/src/emqx_dashboard.erl b/apps/emqx_dashboard/src/emqx_dashboard.erl index 4f9e34238..96f81ca84 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard.erl @@ -72,7 +72,7 @@ start_listeners(Listeners) -> base_path => emqx_dashboard_swagger:base_path(), modules => minirest_api:find_api_modules(apps()), authorization => Authorization, - log => fun emqx_dashboard_audit:log/1, + log => audit_log_fun(), security => [#{'basicAuth' => []}, #{'bearerAuth' => []}], swagger_global_spec => GlobalSpec, dispatch => dispatch(), @@ -210,9 +210,19 @@ filter_false(K, V, S) -> [{K, V} | S]. listener_name(Protocol) -> list_to_atom(atom_to_list(Protocol) ++ ":dashboard"). +-dialyzer({no_match, [audit_log_fun/0]}). + +audit_log_fun() -> + case emqx_release:edition() of + ee -> fun emqx_dashboard_audit:log/2; + ce -> undefined + end. + -if(?EMQX_RELEASE_EDITION =/= ee). + %% dialyzer complains about the `unauthorized_role' clause... --dialyzer({no_match, [authorize/1]}). +-dialyzer({no_match, [authorize/1, api_key_authorize/3]}). + -endif. authorize(Req) -> @@ -222,7 +232,7 @@ authorize(Req) -> {bearer, Token} -> case emqx_dashboard_admin:verify_token(Req, Token) of {ok, Username} -> - {ok, #{auth_type => jwt_token, username => Username}}; + {ok, #{auth_type => jwt_token, source => Username}}; {error, token_timeout} -> {401, 'TOKEN_TIME_OUT', <<"Token expired, get new token by POST /login">>}; {error, not_found} -> @@ -251,14 +261,16 @@ listeners() -> api_key_authorize(Req, Key, Secret) -> Path = cowboy_req:path(Req), - case emqx_mgmt_auth:authorize(Path, Key, Secret) of + case emqx_mgmt_auth:authorize(Path, Req, Key, Secret) of ok -> - {ok, #{auth_type => api_key, api_key => Key}}; + {ok, #{auth_type => api_key, source => Key}}; {error, <<"not_allowed">>} -> return_unauthorized( ?BAD_API_KEY_OR_SECRET, <<"Not allowed, Check api_key/api_secret">> ); + {error, unauthorized_role} -> + {403, 'UNAUTHORIZED_ROLE', ?API_KEY_NOT_ALLOW_MSG}; {error, _} -> return_unauthorized( ?BAD_API_KEY_OR_SECRET, diff --git a/apps/emqx_dashboard/src/emqx_dashboard_admin.erl b/apps/emqx_dashboard/src/emqx_dashboard_admin.erl index e9aac164b..c264a1b0f 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_admin.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_admin.erl @@ -207,8 +207,15 @@ add_user_(Username, Password, Role, Desc) -> description = Desc }, mnesia:write(Admin), + ?SLOG(info, #{msg => "dashboard_sso_user_added", username => Username, role => Role}), flatten_username(#{username => Username, role => Role, description => Desc}); [_] -> + ?SLOG(info, #{ + msg => "dashboard_sso_user_add_failed", + reason => "username_already_exists", + username => Username, + role => Role + }), mnesia:abort(<<"username_already_exist">>) end. @@ -416,7 +423,7 @@ ensure_role(Role) when is_binary(Role) -> -if(?EMQX_RELEASE_EDITION == ee). legal_role(Role) -> - emqx_dashboard_rbac:valid_role(Role). + emqx_dashboard_rbac:valid_dashboard_role(Role). role(Data) -> emqx_dashboard_rbac:role(Data). @@ -447,8 +454,10 @@ lookup_user(Backend, Username) when is_atom(Backend) -> -dialyzer({no_match, [add_user/4, update_user/3]}). +legal_role(?ROLE_DEFAULT) -> + ok; legal_role(_) -> - ok. + {error, <<"Role does not exist">>}. role(_) -> ?ROLE_DEFAULT. diff --git a/apps/emqx_dashboard/src/emqx_dashboard_audit.erl b/apps/emqx_dashboard/src/emqx_dashboard_audit.erl index cb5c0f42b..4b51b2cb0 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_audit.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_audit.erl @@ -17,30 +17,102 @@ -module(emqx_dashboard_audit). -include_lib("emqx/include/logger.hrl"). +-include_lib("emqx/include/http_api.hrl"). %% API --export([log/1]). +-export([log/2]). -log(Meta0) -> - #{req_start := ReqStart, req_end := ReqEnd, code := Code, method := Method} = Meta0, - Duration = erlang:convert_time_unit(ReqEnd - ReqStart, native, millisecond), - Level = level(Method, Code, Duration), - Username = maps:get(username, Meta0, <<"">>), - From = from(maps:get(auth_type, Meta0, "")), - Meta1 = maps:without([req_start, req_end], Meta0), - Meta2 = Meta1#{time => logger:timestamp(), duration_ms => Duration}, - Meta = emqx_utils:redact(Meta2), - ?AUDIT( - Level, - From, - Meta#{username => binary_to_list(Username), node => node()} - ), - ok. +%% filter high frequency events +-define(HIGH_FREQUENCY_REQUESTS, [ + <<"/publish">>, + <<"/clients/:clientid/subscribe">>, + <<"/clients/:clientid/unsubscribe">>, + <<"/publish/bulk">>, + <<"/clients/:clientid/unsubscribe/bulk">>, + <<"/clients/:clientid/subscribe/bulk">>, + <<"/clients/kickout/bulk">> +]). -from(jwt_token) -> "dashboard"; -from(_) -> "rest_api". +log(#{code := Code, method := Method} = Meta, Req) -> + %% Keep level/2 and log_meta/1 inside of this ?AUDIT macro + ?AUDIT(level(Method, Code), log_meta(Meta, Req)). -level(get, _Code, _) -> debug; -level(_, Code, _) when Code >= 200 andalso Code < 300 -> info; -level(_, Code, _) when Code >= 300 andalso Code < 400 -> warning; -level(_, Code, _) when Code >= 400 andalso Code < 500 -> error; -level(_, _, _) -> critical. +log_meta(Meta, Req) -> + #{operation_id := OperationId, method := Method} = Meta, + case + Method =:= get orelse + (lists:member(OperationId, ?HIGH_FREQUENCY_REQUESTS) andalso + ignore_high_frequency_request()) + of + true -> + undefined; + false -> + Meta1 = #{ + time => logger:timestamp(), + from => from(Meta), + source => source(Meta), + duration_ms => duration_ms(Meta), + source_ip => source_ip(Req), + operation_type => operation_type(Meta), + %% method for http filter api. + http_method => Method, + http_request => http_request(Meta), + http_status_code => maps:get(code, Meta), + operation_result => operation_result(Meta), + node => node() + }, + Meta2 = maps:without([req_start, req_end, method, headers, body, bindings, code], Meta), + emqx_utils:redact(maps:merge(Meta2, Meta1)) + end. + +duration_ms(#{req_start := ReqStart, req_end := ReqEnd}) -> + erlang:convert_time_unit(ReqEnd - ReqStart, native, millisecond). + +from(#{auth_type := jwt_token}) -> + dashboard; +from(#{auth_type := api_key}) -> + rest_api; +from(#{operation_id := <<"/login">>}) -> + dashboard; +from(#{code := Code} = Meta) when Code =:= 401 orelse Code =:= 403 -> + case maps:find(failure, Meta) of + {ok, #{code := 'BAD_API_KEY_OR_SECRET'}} -> rest_api; + {ok, #{code := 'UNAUTHORIZED_ROLE', message := ?API_KEY_NOT_ALLOW_MSG}} -> rest_api; + %% 'TOKEN_TIME_OUT' 'BAD_TOKEN' is dashboard code. + _ -> dashboard + end. + +source(#{source := Source}) -> Source; +source(#{operation_id := <<"/login">>, body := #{<<"username">> := Username}}) -> Username; +source(_Meta) -> <<"">>. + +source_ip(Req) -> + case cowboy_req:header(<<"x-forwarded-for">>, Req, undefined) of + undefined -> + {RemoteIP, _} = cowboy_req:peer(Req), + iolist_to_binary(inet:ntoa(RemoteIP)); + Addresses -> + hd(binary:split(Addresses, <<",">>)) + end. + +operation_type(Meta) -> + case maps:find(operation_id, Meta) of + {ok, OperationId} -> + lists:nth(2, binary:split(OperationId, <<"/">>, [global])); + _ -> + <<"unknown">> + end. + +http_request(Meta) -> + maps:with([method, headers, bindings, body], Meta). + +operation_result(#{failure := _}) -> failure; +operation_result(_) -> success. + +level(get, _Code) -> debug; +level(_, Code) when Code >= 200 andalso Code < 300 -> info; +level(_, Code) when Code >= 300 andalso Code < 400 -> warning; +level(_, Code) when Code >= 400 andalso Code < 500 -> error; +level(_, _) -> critical. + +ignore_high_frequency_request() -> + emqx_conf:get([log, audit, ignore_high_frequency_request], true). diff --git a/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl b/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl index c1379d4d6..022c6fcb0 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl @@ -185,7 +185,8 @@ fields(meta) -> schema_with_example(Type, Example) -> hoconsc:mk(Type, #{examples => #{<<"example">> => Example}}). --spec schema_with_examples(hocon_schema:type(), map()) -> hocon_schema:field_schema_map(). +-spec schema_with_examples(hocon_schema:type(), map() | list(tuple())) -> + hocon_schema:field_schema_map(). schema_with_examples(Type, Examples) -> hoconsc:mk(Type, #{examples => #{<<"examples">> => Examples}}). @@ -843,7 +844,7 @@ parse_object_loop(PropList0, Module, Options) -> ), parse_object_loop(PropList, Module, Options, _Props = [], _Required = [], _Refs = []). -parse_object_loop([], _Modlue, _Options, Props, Required, Refs) -> +parse_object_loop([], _Module, _Options, Props, Required, Refs) -> {lists:reverse(Props), lists:usort(Required), Refs}; parse_object_loop([{Name, Hocon} | Rest], Module, Options, Props, Required, Refs) -> NameBin = to_bin(Name), diff --git a/apps/emqx_dashboard_rbac/src/emqx_dashboard_rbac.app.src b/apps/emqx_dashboard_rbac/src/emqx_dashboard_rbac.app.src index ec8e6cd3f..acc5e6cbd 100644 --- a/apps/emqx_dashboard_rbac/src/emqx_dashboard_rbac.app.src +++ b/apps/emqx_dashboard_rbac/src/emqx_dashboard_rbac.app.src @@ -1,6 +1,6 @@ {application, emqx_dashboard_rbac, [ {description, "EMQX Dashboard RBAC"}, - {vsn, "0.1.1"}, + {vsn, "0.1.2"}, {registered, []}, {applications, [ kernel, diff --git a/apps/emqx_dashboard_rbac/src/emqx_dashboard_rbac.erl b/apps/emqx_dashboard_rbac/src/emqx_dashboard_rbac.erl index 57132b65b..7b8ffef02 100644 --- a/apps/emqx_dashboard_rbac/src/emqx_dashboard_rbac.erl +++ b/apps/emqx_dashboard_rbac/src/emqx_dashboard_rbac.erl @@ -6,7 +6,12 @@ -include_lib("emqx_dashboard/include/emqx_dashboard.hrl"). --export([check_rbac/3, role/1, valid_role/1]). +-export([ + check_rbac/3, + role/1, + valid_dashboard_role/1, + valid_api_role/1 +]). -dialyzer({nowarn_function, role/1}). %%===================================================================== @@ -31,20 +36,35 @@ role(#?ADMIN{role = Role}) -> role([]) -> ?ROLE_SUPERUSER; role(#{role := Role}) -> + Role; +role(Role) when is_binary(Role) -> Role. -valid_role(Role) -> - case lists:member(Role, role_list()) of +valid_dashboard_role(Role) -> + valid_role(dashboard, Role). + +valid_api_role(Role) -> + valid_role(api, Role). + +%% =================================================================== + +valid_role(Type, Role) -> + case lists:member(Role, role_list(Type)) of true -> ok; _ -> {error, <<"Role does not exist">>} end. + %% =================================================================== check_rbac(?ROLE_SUPERUSER, _, _, _) -> true; check_rbac(?ROLE_VIEWER, <<"GET">>, _, _) -> true; +check_rbac(?ROLE_API_PUBLISHER, <<"POST">>, <<"/publish">>, _) -> + true; +check_rbac(?ROLE_API_PUBLISHER, <<"POST">>, <<"/publish/bulk">>, _) -> + true; %% everyone should allow to logout check_rbac(?ROLE_VIEWER, <<"POST">>, <<"/logout">>, _) -> true; @@ -58,5 +78,7 @@ check_rbac(?ROLE_VIEWER, <<"POST">>, <<"/users/", SubPath/binary>>, Username) -> check_rbac(_, _, _, _) -> false. -role_list() -> - [?ROLE_VIEWER, ?ROLE_SUPERUSER]. +role_list(dashboard) -> + [?ROLE_VIEWER, ?ROLE_SUPERUSER]; +role_list(api) -> + [?ROLE_API_VIEWER, ?ROLE_API_PUBLISHER, ?ROLE_API_SUPERUSER]. diff --git a/apps/emqx_dashboard_sso/rebar.config b/apps/emqx_dashboard_sso/rebar.config index 2691afbc1..874aca800 100644 --- a/apps/emqx_dashboard_sso/rebar.config +++ b/apps/emqx_dashboard_sso/rebar.config @@ -4,5 +4,5 @@ {deps, [ {emqx_ldap, {path, "../../apps/emqx_ldap"}}, {emqx_dashboard, {path, "../../apps/emqx_dashboard"}}, - {esaml, {git, "https://github.com/emqx/esaml", {tag, "v1.1.2"}}} + {esaml, {git, "https://github.com/emqx/esaml", {tag, "v1.1.3"}}} ]}. diff --git a/apps/emqx_dashboard_sso/src/emqx_dashboard_sso.app.src b/apps/emqx_dashboard_sso/src/emqx_dashboard_sso.app.src index 71788947b..19f3bf552 100644 --- a/apps/emqx_dashboard_sso/src/emqx_dashboard_sso.app.src +++ b/apps/emqx_dashboard_sso/src/emqx_dashboard_sso.app.src @@ -1,6 +1,6 @@ {application, emqx_dashboard_sso, [ {description, "EMQX Dashboard Single Sign-On"}, - {vsn, "0.1.2"}, + {vsn, "0.1.3"}, {registered, [emqx_dashboard_sso_sup]}, {applications, [ kernel, diff --git a/apps/emqx_dashboard_sso/src/emqx_dashboard_sso_api.erl b/apps/emqx_dashboard_sso/src/emqx_dashboard_sso_api.erl index c023ace51..830b50676 100644 --- a/apps/emqx_dashboard_sso/src/emqx_dashboard_sso_api.erl +++ b/apps/emqx_dashboard_sso/src/emqx_dashboard_sso_api.erl @@ -204,7 +204,7 @@ backend(get, #{bindings := #{backend := Type}}) -> undefined -> {404, #{code => ?BACKEND_NOT_FOUND, message => <<"Backend not found">>}}; Backend -> - {200, to_json(Backend)} + {200, to_redacted_json(Backend)} end; backend(put, #{bindings := #{backend := Backend}, body := Config}) -> ?SLOG(info, #{ @@ -264,9 +264,9 @@ valid_config(_, _, _) -> {error, invalid_config}. handle_backend_update_result({ok, #{backend := saml} = State}, _Config) -> - {200, to_json(maps:without([idp_meta, sp], State))}; + {200, to_redacted_json(maps:without([idp_meta, sp], State))}; handle_backend_update_result({ok, _State}, Config) -> - {200, to_json(Config)}; + {200, to_redacted_json(Config)}; handle_backend_update_result(ok, _) -> 204; handle_backend_update_result({error, not_exists}, _) -> @@ -278,9 +278,9 @@ handle_backend_update_result({error, Reason}, _) when is_binary(Reason) -> handle_backend_update_result({error, Reason}, _) -> {400, #{code => ?BAD_REQUEST, message => emqx_dashboard_sso:format(["Reason: ", Reason])}}. -to_json(Data) -> +to_redacted_json(Data) -> emqx_utils_maps:jsonable_map( - Data, + emqx_utils:redact(Data), fun(K, V) -> {K, emqx_utils_maps:binary_string(V)} end diff --git a/apps/emqx_dashboard_sso/src/emqx_dashboard_sso_ldap.erl b/apps/emqx_dashboard_sso/src/emqx_dashboard_sso_ldap.erl index 583f1d683..faa87b80e 100644 --- a/apps/emqx_dashboard_sso/src/emqx_dashboard_sso_ldap.erl +++ b/apps/emqx_dashboard_sso/src/emqx_dashboard_sso_ldap.erl @@ -96,7 +96,7 @@ parse_config(Config0) -> %% In this feature, the `bind_password` is fixed, so it should conceal from the swagger, %% but the connector still needs it, hence we should add it back here ensure_bind_password(Config) -> - Config#{bind_password => <<"${password}">>}. + Config#{method => #{type => bind, bind_password => <<"${password}">>}}. adjust_ldap_fields(Fields) -> lists:map(fun adjust_ldap_field/1, Fields). diff --git a/apps/emqx_dashboard_sso/src/emqx_dashboard_sso_saml_api.erl b/apps/emqx_dashboard_sso/src/emqx_dashboard_sso_saml_api.erl index ccc40e2c6..949938884 100644 --- a/apps/emqx_dashboard_sso/src/emqx_dashboard_sso_saml_api.erl +++ b/apps/emqx_dashboard_sso/src/emqx_dashboard_sso_saml_api.erl @@ -95,6 +95,10 @@ sp_saml_callback(post, Req) -> State = #{enable := true} -> case (provider(saml)):callback(Req, State) of {redirect, Redirect} -> + ?SLOG(info, #{ + msg => "dashboard_saml_sso_login_successful", + redirect => "SAML login successful. Redirecting with LoginMeta." + }), Redirect; {error, Reason} -> ?SLOG(info, #{ diff --git a/apps/emqx_dashboard_sso/test/emqx_dashboard_sso_ldap_SUITE.erl b/apps/emqx_dashboard_sso/test/emqx_dashboard_sso_ldap_SUITE.erl index 8966ffca9..9e831b4d2 100644 --- a/apps/emqx_dashboard_sso/test/emqx_dashboard_sso_ldap_SUITE.erl +++ b/apps/emqx_dashboard_sso/test/emqx_dashboard_sso_ldap_SUITE.erl @@ -10,9 +10,11 @@ -include_lib("emqx_dashboard/include/emqx_dashboard.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). -include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). -define(LDAP_HOST, "ldap"). -define(LDAP_DEFAULT_PORT, 389). +-define(LDAP_PASSWORD, <<"public">>). -define(LDAP_USER, <<"viewer1">>). -define(LDAP_USER_PASSWORD, <<"viewer1">>). -define(LDAP_BASE_DN, <<"ou=dashboard,dc=emqx,dc=io">>). @@ -128,9 +130,19 @@ t_update({init, Config}) -> Config; t_update({'end', _Config}) -> ok; -t_update(_) -> +t_update(Config) -> Path = uri(["sso", "ldap"]), - {ok, 200, Result} = request(put, Path, ldap_config(#{<<"enable">> => <<"true">>})), + %% NOTE: this time verify that supplying password through file-based secret works. + PasswordFilename = filename:join([?config(priv_dir, Config), "passfile"]), + ok = file:write_file(PasswordFilename, ?LDAP_PASSWORD), + {ok, 200, Result} = request( + put, + Path, + ldap_config(#{ + <<"enable">> => <<"true">>, + <<"password">> => iolist_to_binary(["file://", PasswordFilename]) + }) + ), check_running([<<"ldap">>]), ?assertMatch(#{backend := <<"ldap">>, enable := true}, decode_json(Result)), ?assertMatch([#{backend := <<"ldap">>, enable := true}], get_sso()), @@ -287,7 +299,7 @@ ldap_config(Override) -> <<"base_dn">> => ?LDAP_BASE_DN, <<"filter">> => ?LDAP_FILTER_WITH_UID, <<"username">> => <<"cn=root,dc=emqx,dc=io">>, - <<"password">> => <<"public">>, + <<"password">> => ?LDAP_PASSWORD, <<"pool_size">> => 8 }, Override diff --git a/apps/emqx_durable_storage/src/emqx_ds.erl b/apps/emqx_durable_storage/src/emqx_ds.erl index 725d62673..649341eb5 100644 --- a/apps/emqx_durable_storage/src/emqx_ds.erl +++ b/apps/emqx_durable_storage/src/emqx_ds.erl @@ -35,7 +35,6 @@ -export_type([ create_db_opts/0, - builtin_db_opts/0, db/0, time/0, topic_filter/0, @@ -47,7 +46,10 @@ next_result/1, next_result/0, store_batch_result/0, make_iterator_result/1, make_iterator_result/0, - get_iterator_result/1 + get_iterator_result/1, + + ds_specific_stream/0, + ds_specific_iterator/0 ]). %%================================================================================ @@ -64,9 +66,13 @@ -type stream_rank() :: {term(), integer()}. --opaque stream() :: emqx_ds_replication_layer:stream(). +-opaque iterator() :: ds_specific_iterator(). --opaque iterator() :: emqx_ds_replication_layer:iterator(). +-opaque stream() :: ds_specific_stream(). + +-type ds_specific_iterator() :: term(). + +-type ds_specific_stream() :: term(). -type store_batch_result() :: ok | {error, _}. @@ -87,14 +93,14 @@ -type message_store_opts() :: #{}. --type builtin_db_opts() :: +-type generic_db_opts() :: #{ - backend := builtin, - storage := emqx_ds_storage_layer:prototype() + backend := atom(), + _ => _ }. -type create_db_opts() :: - builtin_db_opts(). + emqx_ds_replication_layer:builtin_db_opts() | generic_db_opts(). -type message_id() :: emqx_ds_replication_layer:message_id(). @@ -114,9 +120,10 @@ -callback store_batch(db(), [emqx_types:message()], message_store_opts()) -> store_batch_result(). --callback get_streams(db(), topic_filter(), time()) -> [{stream_rank(), stream()}]. +-callback get_streams(db(), topic_filter(), time()) -> [{stream_rank(), ds_specific_stream()}]. --callback make_iterator(db(), _Stream, topic_filter(), time()) -> make_iterator_result(_Iterator). +-callback make_iterator(db(), ds_specific_stream(), topic_filter(), time()) -> + make_iterator_result(ds_specific_iterator()). -callback next(db(), Iterator, pos_integer()) -> next_result(Iterator). @@ -127,10 +134,11 @@ %% @doc Different DBs are completely independent from each other. They %% could represent something like different tenants. -spec open_db(db(), create_db_opts()) -> ok. -open_db(DB, Opts = #{backend := Backend}) when Backend =:= builtin -> +open_db(DB, Opts = #{backend := Backend}) when Backend =:= builtin orelse Backend =:= fdb -> Module = case Backend of - builtin -> emqx_ds_replication_layer + builtin -> emqx_ds_replication_layer; + fdb -> emqx_fdb_ds end, persistent_term:put(?persistent_term(DB), Module), ?module(DB):open_db(DB, Opts). diff --git a/apps/emqx_durable_storage/src/emqx_ds_lts.erl b/apps/emqx_durable_storage/src/emqx_ds_lts.erl index d06854fd0..d148e8cbc 100644 --- a/apps/emqx_durable_storage/src/emqx_ds_lts.erl +++ b/apps/emqx_durable_storage/src/emqx_ds_lts.erl @@ -119,7 +119,7 @@ trie_restore(Options, Dump) -> Trie. %% @doc Lookup the topic key. Create a new one, if not found. --spec topic_key(trie(), threshold_fun(), [binary()]) -> msg_storage_key(). +-spec topic_key(trie(), threshold_fun(), [binary() | '']) -> msg_storage_key(). topic_key(Trie, ThresholdFun, Tokens) -> do_topic_key(Trie, ThresholdFun, 0, ?PREFIX, Tokens, []). @@ -363,12 +363,12 @@ emanating(#trie{trie = Tab}, State, ?EOT) -> [#trans{next = Next}] -> [{?EOT, Next}]; [] -> [] end; -emanating(#trie{trie = Tab}, State, Bin) when is_binary(Bin) -> +emanating(#trie{trie = Tab}, State, Token) when is_binary(Token); Token =:= '' -> [ {Edge, Next} || #trans{key = {_, Edge}, next = Next} <- ets:lookup(Tab, {State, ?PLUS}) ++ - ets:lookup(Tab, {State, Bin}) + ets:lookup(Tab, {State, Token}) ]. %%================================================================================ @@ -533,6 +533,7 @@ topic_match_test() -> {S11, []} = test_key(T, ThresholdFun, [1, 1]), {S12, []} = test_key(T, ThresholdFun, [1, 2]), {S111, []} = test_key(T, ThresholdFun, [1, 1, 1]), + {S11e, []} = test_key(T, ThresholdFun, [1, 1, '']), %% Match concrete topics: assert_match_topics(T, [1], [{S1, []}]), assert_match_topics(T, [1, 1], [{S11, []}]), @@ -540,14 +541,16 @@ topic_match_test() -> %% Match topics with +: assert_match_topics(T, [1, '+'], [{S11, []}, {S12, []}]), assert_match_topics(T, [1, '+', 1], [{S111, []}]), + assert_match_topics(T, [1, '+', ''], [{S11e, []}]), %% Match topics with #: assert_match_topics(T, [1, '#'], [{S1, []}, {S11, []}, {S12, []}, - {S111, []}]), + {S111, []}, {S11e, []}]), assert_match_topics(T, [1, 1, '#'], [{S11, []}, - {S111, []}]), + {S111, []}, + {S11e, []}]), %% Now add learned wildcards: {S21, []} = test_key(T, ThresholdFun, [2, 1]), {S22, []} = test_key(T, ThresholdFun, [2, 2]), @@ -587,7 +590,10 @@ assert_match_topics(Trie, Filter0, Expected) -> %% erlfmt-ignore test_key(Trie, Threshold, Topic0) -> - Topic = [integer_to_binary(I) || I <- Topic0], + Topic = lists:map(fun('') -> ''; + (I) -> integer_to_binary(I) + end, + Topic0), Ret = topic_key(Trie, Threshold, Topic), %% Test idempotency: Ret1 = topic_key(Trie, Threshold, Topic), diff --git a/apps/emqx_durable_storage/src/emqx_ds_replication_layer.erl b/apps/emqx_durable_storage/src/emqx_ds_replication_layer.erl index a06af104d..7a26b696d 100644 --- a/apps/emqx_durable_storage/src/emqx_ds_replication_layer.erl +++ b/apps/emqx_durable_storage/src/emqx_ds_replication_layer.erl @@ -32,15 +32,16 @@ %% internal exports: -export([ - do_open_shard_v1/3, - do_drop_shard_v1/2, + do_drop_db_v1/1, do_store_batch_v1/4, do_get_streams_v1/4, do_make_iterator_v1/5, do_next_v1/4 ]). --export_type([shard_id/0, stream/0, iterator/0, message_id/0]). +-export_type([shard_id/0, builtin_db_opts/0, stream/0, iterator/0, message_id/0, batch/0]). + +-include_lib("emqx_utils/include/emqx_message.hrl"). %%================================================================================ %% Type declarations @@ -52,13 +53,22 @@ %% tags: -define(STREAM, 1). -define(IT, 2). +-define(BATCH, 3). %% keys: -define(tag, 1). -define(shard, 2). -define(enc, 3). --type shard_id() :: atom(). +-type shard_id() :: binary(). + +-type builtin_db_opts() :: + #{ + backend := builtin, + storage := emqx_ds_storage_layer:prototype(), + n_shards => pos_integer(), + replication_factor => pos_integer() + }. %% This enapsulates the stream entity from the replication level. %% @@ -82,42 +92,46 @@ -type message_id() :: emqx_ds_storage_layer:message_id(). +-define(batch_messages, 2). + +-type batch() :: #{ + ?tag := ?BATCH, + ?batch_messages := [emqx_types:message()] +}. + %%================================================================================ %% API functions %%================================================================================ -spec list_shards(emqx_ds:db()) -> [shard_id()]. -list_shards(_DB) -> - %% TODO: milestone 5 - list_nodes(). +list_shards(DB) -> + emqx_ds_replication_layer_meta:shards(DB). --spec open_db(emqx_ds:db(), emqx_ds:create_db_opts()) -> ok | {error, _}. -open_db(DB, Opts) -> - %% TODO: improve error reporting, don't just crash +-spec open_db(emqx_ds:db(), builtin_db_opts()) -> ok | {error, _}. +open_db(DB, CreateOpts) -> + Opts = emqx_ds_replication_layer_meta:open_db(DB, CreateOpts), + MyShards = emqx_ds_replication_layer_meta:my_shards(DB), lists:foreach( fun(Shard) -> - Node = node_of_shard(DB, Shard), - ok = emqx_ds_proto_v1:open_shard(Node, DB, Shard, Opts) + emqx_ds_storage_layer:open_shard({DB, Shard}, Opts), + maybe_set_myself_as_leader(DB, Shard) end, - list_shards(DB) + MyShards ). -spec drop_db(emqx_ds:db()) -> ok | {error, _}. drop_db(DB) -> - lists:foreach( - fun(Shard) -> - Node = node_of_shard(DB, Shard), - ok = emqx_ds_proto_v1:drop_shard(Node, DB, Shard) - end, - list_shards(DB) - ). + Nodes = list_nodes(), + _ = emqx_ds_proto_v1:drop_db(Nodes, DB), + _ = emqx_ds_replication_layer_meta:drop_db(DB), + ok. --spec store_batch(emqx_ds:db(), [emqx_types:message()], emqx_ds:message_store_opts()) -> +-spec store_batch(emqx_ds:db(), [emqx_types:message(), ...], emqx_ds:message_store_opts()) -> emqx_ds:store_batch_result(). -store_batch(DB, Batch, Opts) -> - %% TODO: Currently we store messages locally. - Shard = node(), +store_batch(DB, Messages, Opts) -> + Shard = shard_of_messages(DB, Messages), Node = node_of_shard(DB, Shard), + Batch = #{?tag => ?BATCH, ?batch_messages => Messages}, emqx_ds_proto_v1:store_batch(Node, DB, Shard, Batch, Opts). -spec get_streams(emqx_ds:db(), emqx_ds:topic_filter(), emqx_ds:time()) -> @@ -184,26 +198,25 @@ next(DB, Iter0, BatchSize) -> %% Internal exports (RPC targets) %%================================================================================ --spec do_open_shard_v1( - emqx_ds:db(), emqx_ds_replication_layer:shard_id(), emqx_ds:create_db_opts() -) -> - ok | {error, _}. -do_open_shard_v1(DB, Shard, Opts) -> - emqx_ds_storage_layer:open_shard({DB, Shard}, Opts). - --spec do_drop_shard_v1(emqx_ds:db(), emqx_ds_replication_layer:shard_id()) -> ok | {error, _}. -do_drop_shard_v1(DB, Shard) -> - emqx_ds_storage_layer:drop_shard({DB, Shard}). +-spec do_drop_db_v1(emqx_ds:db()) -> ok | {error, _}. +do_drop_db_v1(DB) -> + MyShards = emqx_ds_replication_layer_meta:my_shards(DB), + lists:foreach( + fun(Shard) -> + emqx_ds_storage_layer:drop_shard({DB, Shard}) + end, + MyShards + ). -spec do_store_batch_v1( emqx_ds:db(), emqx_ds_replication_layer:shard_id(), - [emqx_types:message()], + batch(), emqx_ds:message_store_opts() ) -> emqx_ds:store_batch_result(). -do_store_batch_v1(DB, Shard, Batch, Options) -> - emqx_ds_storage_layer:store_batch({DB, Shard}, Batch, Options). +do_store_batch_v1(DB, Shard, #{?tag := ?BATCH, ?batch_messages := Messages}, Options) -> + emqx_ds_storage_layer:store_batch({DB, Shard}, Messages, Options). -spec do_get_streams_v1( emqx_ds:db(), emqx_ds_replicationi_layer:shard_id(), emqx_ds:topic_filter(), emqx_ds:time() @@ -237,9 +250,34 @@ do_next_v1(DB, Shard, Iter, BatchSize) -> %% Internal functions %%================================================================================ +%% TODO: there's no real leader election right now +-spec maybe_set_myself_as_leader(emqx_ds:db(), shard_id()) -> ok. +maybe_set_myself_as_leader(DB, Shard) -> + Site = emqx_ds_replication_layer_meta:this_site(), + case emqx_ds_replication_layer_meta:in_sync_replicas(DB, Shard) of + [Site | _] -> + %% Currently the first in-sync replica always becomes the + %% leader + ok = emqx_ds_replication_layer_meta:set_leader(DB, Shard, node()); + _Sites -> + ok + end. + -spec node_of_shard(emqx_ds:db(), shard_id()) -> node(). -node_of_shard(_DB, Node) -> - Node. +node_of_shard(DB, Shard) -> + case emqx_ds_replication_layer_meta:shard_leader(DB, Shard) of + {ok, Leader} -> + Leader; + {error, no_leader_for_shard} -> + %% TODO: use optvar + timer:sleep(500), + node_of_shard(DB, Shard) + end. + +%% Here we assume that all messages in the batch come from the same client +shard_of_messages(DB, [#message{from = From} | _]) -> + N = emqx_ds_replication_layer_meta:n_shards(DB), + integer_to_binary(erlang:phash2(From, N)). list_nodes() -> mria:running_nodes(). diff --git a/apps/emqx_durable_storage/src/emqx_ds_replication_layer_meta.erl b/apps/emqx_durable_storage/src/emqx_ds_replication_layer_meta.erl new file mode 100644 index 000000000..5c451206d --- /dev/null +++ b/apps/emqx_durable_storage/src/emqx_ds_replication_layer_meta.erl @@ -0,0 +1,371 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +%% @doc Metadata storage for the builtin sharded database. +%% +%% Currently metadata is stored in mria; that's not ideal, but +%% eventually we'll replace it, so it's important not to leak +%% implementation details from this module. +-module(emqx_ds_replication_layer_meta). + +-behaviour(gen_server). + +%% API: +-export([ + shards/1, + my_shards/1, + replica_set/2, + in_sync_replicas/2, + sites/0, + open_db/2, + drop_db/1, + shard_leader/2, + this_site/0, + set_leader/3, + print_status/0 +]). + +%% gen_server +-export([start_link/0, init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). + +%% internal exports: +-export([ + open_db_trans/2, + drop_db_trans/1, + claim_site/2, + in_sync_replicas_trans/2, + set_leader_trans/3, + n_shards/1 +]). + +-export_type([site/0]). + +-include_lib("stdlib/include/qlc.hrl"). + +%%================================================================================ +%% Type declarations +%%================================================================================ + +-define(SERVER, ?MODULE). + +-define(SHARD, emqx_ds_builtin_metadata_shard). +%% DS database metadata: +-define(META_TAB, emqx_ds_builtin_metadata_tab). +%% Mapping from Site to the actual Erlang node: +-define(NODE_TAB, emqx_ds_builtin_node_tab). +%% Shard metadata: +-define(SHARD_TAB, emqx_ds_builtin_shard_tab). + +-record(?META_TAB, { + db :: emqx_ds:db(), + db_props :: emqx_ds_replication_layer:builtin_db_opts() +}). + +-record(?NODE_TAB, { + site :: site(), + node :: node(), + misc = #{} :: map() +}). + +-record(?SHARD_TAB, { + shard :: {emqx_ds:db(), emqx_ds_replication_layer:shard_id()}, + %% Sites that should contain the data when the cluster is in the + %% stable state (no nodes are being added or removed from it): + replica_set :: [site()], + %% Sites that contain the actual data: + in_sync_replicas :: [site()], + leader :: node() | undefined, + misc = #{} :: map() +}). + +%% Persistent ID of the node (independent from the IP/FQDN): +-type site() :: binary(). + +%% Peristent term key: +-define(emqx_ds_builtin_site, emqx_ds_builtin_site). + +%%================================================================================ +%% API funcions +%%================================================================================ + +-spec print_status() -> ok. +print_status() -> + io:format("THIS SITE:~n~s~n", [base64:encode(this_site())]), + io:format("~nSITES:~n", []), + Nodes = [node() | nodes()], + lists:foreach( + fun(#?NODE_TAB{site = Site, node = Node}) -> + Status = + case lists:member(Node, Nodes) of + true -> up; + false -> down + end, + io:format("~s ~p ~p~n", [base64:encode(Site), Node, Status]) + end, + eval_qlc(mnesia:table(?NODE_TAB)) + ), + io:format("~nSHARDS~n", []), + lists:foreach( + fun(#?SHARD_TAB{shard = {DB, Shard}, leader = Leader}) -> + Status = + case lists:member(Leader, Nodes) of + true -> up; + false -> down + end, + io:format("~p/~s ~p ~p~n", [DB, Shard, Leader, Status]) + end, + eval_qlc(mnesia:table(?SHARD_TAB)) + ). + +-spec this_site() -> site(). +this_site() -> + persistent_term:get(?emqx_ds_builtin_site). + +-spec n_shards(emqx_ds:db()) -> pos_integer(). +n_shards(DB) -> + [#?META_TAB{db_props = #{n_shards := NShards}}] = mnesia:dirty_read(?META_TAB, DB), + NShards. + +-spec start_link() -> {ok, pid()}. +start_link() -> + gen_server:start_link({local, ?SERVER}, ?MODULE, [], []). + +-spec shards(emqx_ds:db()) -> [emqx_ds_replication_layer:shard_id()]. +shards(DB) -> + eval_qlc( + qlc:q([Shard || #?SHARD_TAB{shard = {D, Shard}} <- mnesia:table(?SHARD_TAB), D =:= DB]) + ). + +-spec my_shards(emqx_ds:db()) -> [emqx_ds_replication_layer:shard_id()]. +my_shards(DB) -> + Site = this_site(), + eval_qlc( + qlc:q([ + Shard + || #?SHARD_TAB{shard = {D, Shard}, replica_set = ReplicaSet, in_sync_replicas = InSync} <- mnesia:table( + ?SHARD_TAB + ), + D =:= DB, + lists:member(Site, ReplicaSet) orelse lists:member(Site, InSync) + ]) + ). + +-spec replica_set(emqx_ds:db(), emqx_ds_replication_layer:shard_id()) -> + {ok, [site()]} | {error, _}. +replica_set(DB, Shard) -> + case mnesia:dirty_read(?SHARD_TAB, {DB, Shard}) of + [#?SHARD_TAB{replica_set = ReplicaSet}] -> + {ok, ReplicaSet}; + [] -> + {error, no_shard} + end. + +-spec in_sync_replicas(emqx_ds:db(), emqx_ds_replication_layer:shard_id()) -> + [site()]. +in_sync_replicas(DB, ShardId) -> + {atomic, Result} = mria:transaction(?SHARD, fun ?MODULE:in_sync_replicas_trans/2, [DB, ShardId]), + case Result of + {ok, InSync} -> + InSync; + {error, _} -> + [] + end. + +-spec sites() -> [site()]. +sites() -> + eval_qlc(qlc:q([Site || #?NODE_TAB{site = Site} <- mnesia:table(?NODE_TAB)])). + +-spec shard_leader(emqx_ds:db(), emqx_ds_replication_layer:shard_id()) -> + {ok, node()} | {error, no_leader_for_shard}. +shard_leader(DB, Shard) -> + case mnesia:dirty_read(?SHARD_TAB, {DB, Shard}) of + [#?SHARD_TAB{leader = Leader}] when Leader =/= undefined -> + {ok, Leader}; + _ -> + {error, no_leader_for_shard} + end. + +-spec set_leader(emqx_ds:db(), emqx_ds_replication_layer:shard_id(), node()) -> + ok. +set_leader(DB, Shard, Node) -> + {atomic, _} = mria:transaction(?SHARD, fun ?MODULE:set_leader_trans/3, [DB, Shard, Node]), + ok. + +-spec open_db(emqx_ds:db(), emqx_ds_replication_layer:builtin_db_opts()) -> + emqx_ds_replication_layer:builtin_db_opts(). +open_db(DB, DefaultOpts) -> + {atomic, Opts} = mria:transaction(?SHARD, fun ?MODULE:open_db_trans/2, [DB, DefaultOpts]), + Opts. + +-spec drop_db(emqx_ds:db()) -> ok. +drop_db(DB) -> + _ = mria:transaction(?SHARD, fun ?MODULE:drop_db_trans/1, [DB]), + ok. + +%%================================================================================ +%% behavior callbacks +%%================================================================================ + +-record(s, {}). + +init([]) -> + process_flag(trap_exit, true), + logger:set_process_metadata(#{domain => [ds, meta]}), + ensure_tables(), + ensure_site(), + S = #s{}, + {ok, S}. + +handle_call(_Call, _From, S) -> + {reply, {error, unknown_call}, S}. + +handle_cast(_Cast, S) -> + {noreply, S}. + +handle_info(_Info, S) -> + {noreply, S}. + +terminate(_Reason, #s{}) -> + persistent_term:erase(?emqx_ds_builtin_site), + ok. + +%%================================================================================ +%% Internal exports +%%================================================================================ + +-spec open_db_trans(emqx_ds:db(), emqx_ds_replication_layer:builtin_db_opts()) -> + emqx_ds_replication_layer:builtin_db_opts(). +open_db_trans(DB, CreateOpts) -> + case mnesia:wread({?META_TAB, DB}) of + [] -> + NShards = maps:get(n_shards, CreateOpts), + ReplicationFactor = maps:get(replication_factor, CreateOpts), + mnesia:write(#?META_TAB{db = DB, db_props = CreateOpts}), + create_shards(DB, NShards, ReplicationFactor), + CreateOpts; + [#?META_TAB{db_props = Opts}] -> + Opts + end. + +-spec drop_db_trans(emqx_ds:db()) -> ok. +drop_db_trans(DB) -> + mnesia:delete({?META_TAB, DB}), + [mnesia:delete({?SHARD_TAB, Shard}) || Shard <- shards(DB)], + ok. + +-spec claim_site(site(), node()) -> ok. +claim_site(Site, Node) -> + mnesia:write(#?NODE_TAB{site = Site, node = Node}). + +-spec in_sync_replicas_trans(emqx_ds:db(), emqx_ds_replication_layer:shard_id()) -> + {ok, [site()]} | {error, no_shard}. +in_sync_replicas_trans(DB, Shard) -> + case mnesia:read(?SHARD_TAB, {DB, Shard}) of + [#?SHARD_TAB{in_sync_replicas = InSync}] -> + {ok, InSync}; + [] -> + {error, no_shard} + end. + +-spec set_leader_trans(emqx_ds:ds(), emqx_ds_replication_layer:shard_id(), node()) -> + ok. +set_leader_trans(DB, Shard, Node) -> + [Record0] = mnesia:wread({?SHARD_TAB, {DB, Shard}}), + Record = Record0#?SHARD_TAB{leader = Node}, + mnesia:write(Record). + +%%================================================================================ +%% Internal functions +%%================================================================================ + +ensure_tables() -> + %% TODO: seems like it may introduce flakiness + Majority = false, + ok = mria:create_table(?META_TAB, [ + {rlog_shard, ?SHARD}, + {majority, Majority}, + {type, ordered_set}, + {storage, rocksdb_copies}, + {record_name, ?META_TAB}, + {attributes, record_info(fields, ?META_TAB)} + ]), + ok = mria:create_table(?NODE_TAB, [ + {rlog_shard, ?SHARD}, + {majority, Majority}, + {type, ordered_set}, + {storage, rocksdb_copies}, + {record_name, ?NODE_TAB}, + {attributes, record_info(fields, ?NODE_TAB)} + ]), + ok = mria:create_table(?SHARD_TAB, [ + {rlog_shard, ?SHARD}, + {majority, Majority}, + {type, ordered_set}, + {storage, disc_copies}, + {record_name, ?SHARD_TAB}, + {attributes, record_info(fields, ?SHARD_TAB)} + ]), + ok = mria:wait_for_tables([?META_TAB, ?NODE_TAB, ?SHARD_TAB]). + +ensure_site() -> + Filename = filename:join(emqx:data_dir(), "emqx_ds_builtin_site.eterm"), + case file:consult(Filename) of + {ok, [Site]} -> + ok; + _ -> + Site = crypto:strong_rand_bytes(8), + logger:notice("Creating a new site with ID=~s", [base64:encode(Site)]), + ok = filelib:ensure_dir(Filename), + {ok, FD} = file:open(Filename, [write]), + io:format(FD, "~p.", [Site]), + file:close(FD) + end, + {atomic, ok} = mria:transaction(?SHARD, fun ?MODULE:claim_site/2, [Site, node()]), + persistent_term:put(?emqx_ds_builtin_site, Site), + ok. + +-spec create_shards(emqx_ds:db(), pos_integer(), pos_integer()) -> ok. +create_shards(DB, NShards, ReplicationFactor) -> + Shards = [integer_to_binary(I) || I <- lists:seq(0, NShards - 1)], + AllSites = sites(), + lists:foreach( + fun(Shard) -> + Hashes0 = [{hash(Shard, Site), Site} || Site <- AllSites], + Hashes = lists:sort(Hashes0), + {_, Sites} = lists:unzip(Hashes), + [First | _] = ReplicaSet = lists:sublist(Sites, 1, ReplicationFactor), + Record = #?SHARD_TAB{ + shard = {DB, Shard}, + replica_set = ReplicaSet, + in_sync_replicas = [First] + }, + mnesia:write(Record) + end, + Shards + ). + +-spec hash(emqx_ds_replication_layer:shard_id(), site()) -> any(). +hash(Shard, Site) -> + erlang:phash2({Shard, Site}). + +eval_qlc(Q) -> + case mnesia:is_transaction() of + true -> + qlc:eval(Q); + false -> + {atomic, Result} = mria:ro_transaction(?SHARD, fun() -> qlc:eval(Q) end), + Result + end. diff --git a/apps/emqx_durable_storage/src/emqx_ds_storage_bitfield_lts.erl b/apps/emqx_durable_storage/src/emqx_ds_storage_bitfield_lts.erl index 2d4949919..6a69a20f3 100644 --- a/apps/emqx_durable_storage/src/emqx_ds_storage_bitfield_lts.erl +++ b/apps/emqx_durable_storage/src/emqx_ds_storage_bitfield_lts.erl @@ -99,8 +99,27 @@ %% Limit on the number of wildcard levels in the learned topic trie: -define(WILDCARD_LIMIT, 10). +%% Persistent (durable) term representing `#message{}' record. Must +%% not change. +-type value_v1() :: + { + _Id :: binary(), + _Qos :: 0..2, + _From :: atom() | binary(), + _Flags :: emqx_types:flags(), + _Headsers :: emqx_types:headers(), + _Topic :: emqx_types:topic(), + _Payload :: emqx_types:payload(), + _Timestamp :: integer(), + _Extra :: term() + }. + -include("emqx_ds_bitmask.hrl"). +-ifdef(TEST). +-include_lib("eunit/include/eunit.hrl"). +-endif. + %%================================================================================ %% API funcions %%================================================================================ @@ -389,11 +408,39 @@ hash_topic_level(TopicLevel) -> <> = erlang:md5(TopicLevel), Int. +-spec message_to_value_v1(emqx_types:message()) -> value_v1(). +message_to_value_v1(#message{ + id = Id, + qos = Qos, + from = From, + flags = Flags, + headers = Headers, + topic = Topic, + payload = Payload, + timestamp = Timestamp, + extra = Extra +}) -> + {Id, Qos, From, Flags, Headers, Topic, Payload, Timestamp, Extra}. + +-spec value_v1_to_message(value_v1()) -> emqx_types:message(). +value_v1_to_message({Id, Qos, From, Flags, Headers, Topic, Payload, Timestamp, Extra}) -> + #message{ + id = Id, + qos = Qos, + from = From, + flags = Flags, + headers = Headers, + topic = Topic, + payload = Payload, + timestamp = Timestamp, + extra = Extra + }. + serialize(Msg) -> - term_to_binary(Msg). + term_to_binary(message_to_value_v1(Msg)). deserialize(Blob) -> - binary_to_term(Blob). + value_v1_to_message(binary_to_term(Blob)). -define(BYTE_SIZE, 8). @@ -452,3 +499,21 @@ data_cf(GenId) -> -spec trie_cf(emqx_ds_storage_layer:gen_id()) -> [char()]. trie_cf(GenId) -> "emqx_ds_storage_bitfield_lts_trie" ++ integer_to_list(GenId). + +-ifdef(TEST). + +serialize_deserialize_test() -> + Msg = #message{ + id = <<"message_id_val">>, + qos = 2, + from = <<"from_val">>, + flags = #{sys => true, dup => true}, + headers = #{foo => bar}, + topic = <<"topic/value">>, + payload = [<<"foo">>, <<"bar">>], + timestamp = 42424242, + extra = "extra_val" + }, + ?assertEqual(Msg, deserialize(serialize(Msg))). + +-endif. diff --git a/apps/emqx_durable_storage/src/emqx_ds_storage_layer.erl b/apps/emqx_durable_storage/src/emqx_ds_storage_layer.erl index 0fe719dbc..54530f428 100644 --- a/apps/emqx_durable_storage/src/emqx_ds_storage_layer.erl +++ b/apps/emqx_durable_storage/src/emqx_ds_storage_layer.erl @@ -384,7 +384,7 @@ rocksdb_open(Shard, Options) -> -spec db_dir(shard_id()) -> file:filename(). db_dir({DB, ShardId}) -> - filename:join([emqx:data_dir(), atom_to_list(DB), atom_to_list(ShardId)]). + filename:join([emqx:data_dir(), atom_to_list(DB), binary_to_list(ShardId)]). %%-------------------------------------------------------------------------------- %% Schema access diff --git a/apps/emqx_durable_storage/src/emqx_ds_sup.erl b/apps/emqx_durable_storage/src/emqx_ds_sup.erl index d371a2346..081557a46 100644 --- a/apps/emqx_durable_storage/src/emqx_ds_sup.erl +++ b/apps/emqx_durable_storage/src/emqx_ds_sup.erl @@ -29,8 +29,15 @@ start_link() -> %% behaviour callbacks %%================================================================================ +-dialyzer({nowarn_function, init/1}). init([]) -> - Children = [storage_layer_sup()], + %% TODO: technically, we don't need rocksDB for the alternative + %% backends. But right now we have any: + Children = + case mria:rocksdb_backend_available() of + true -> [meta(), storage_layer_sup()]; + false -> [] + end, SupFlags = #{ strategy => one_for_all, intensity => 0, @@ -42,6 +49,15 @@ init([]) -> %% Internal functions %%================================================================================ +meta() -> + #{ + id => emqx_ds_replication_layer_meta, + start => {emqx_ds_replication_layer_meta, start_link, []}, + restart => permanent, + type => worker, + shutdown => 5000 + }. + storage_layer_sup() -> #{ id => local_store_shard_sup, diff --git a/apps/emqx_durable_storage/src/emqx_durable_storage.app.src b/apps/emqx_durable_storage/src/emqx_durable_storage.app.src index 2bce4ff8e..8d868bc75 100644 --- a/apps/emqx_durable_storage/src/emqx_durable_storage.app.src +++ b/apps/emqx_durable_storage/src/emqx_durable_storage.app.src @@ -2,7 +2,7 @@ {application, emqx_durable_storage, [ {description, "Message persistence and subscription replays for EMQX"}, % strict semver, bump manually! - {vsn, "0.1.7"}, + {vsn, "0.1.8"}, {modules, []}, {registered, []}, {applications, [kernel, stdlib, rocksdb, gproc, mria, emqx_utils]}, diff --git a/apps/emqx_durable_storage/src/proto/emqx_ds_proto_v1.erl b/apps/emqx_durable_storage/src/proto/emqx_ds_proto_v1.erl index 10d1ed7a5..0d7972466 100644 --- a/apps/emqx_durable_storage/src/proto/emqx_ds_proto_v1.erl +++ b/apps/emqx_durable_storage/src/proto/emqx_ds_proto_v1.erl @@ -19,7 +19,7 @@ -include_lib("emqx_utils/include/bpapi.hrl"). %% API: --export([open_shard/4, drop_shard/3, store_batch/5, get_streams/5, make_iterator/6, next/5]). +-export([drop_db/2, store_batch/5, get_streams/5, make_iterator/6, next/5]). %% behavior callbacks: -export([introduced_in/0]). @@ -28,20 +28,10 @@ %% API funcions %%================================================================================ --spec open_shard( - node(), - emqx_ds:db(), - emqx_ds_replication_layer:shard_id(), - emqx_ds:create_db_opts() -) -> - ok. -open_shard(Node, DB, Shard, Opts) -> - erpc:call(Node, emqx_ds_replication_layer, do_open_shard_v1, [DB, Shard, Opts]). - --spec drop_shard(node(), emqx_ds:db(), emqx_ds_replication_layer:shard_id()) -> - ok. -drop_shard(Node, DB, Shard) -> - erpc:call(Node, emqx_ds_replication_layer, do_drop_shard_v1, [DB, Shard]). +-spec drop_db([node()], emqx_ds:db()) -> + [{ok, ok} | erpc:caught_call_exception()]. +drop_db(Node, DB) -> + erpc:multicall(Node, emqx_ds_replication_layer, do_drop_db_v1, [DB]). -spec get_streams( node(), @@ -85,7 +75,7 @@ next(Node, DB, Shard, Iter, BatchSize) -> node(), emqx_ds:db(), emqx_ds_replication_layer:shard_id(), - [emqx_types:message()], + emqx_ds_replication_layer:batch(), emqx_ds:message_store_opts() ) -> emqx_ds:store_batch_result(). diff --git a/apps/emqx_durable_storage/test/emqx_ds_SUITE.erl b/apps/emqx_durable_storage/test/emqx_ds_SUITE.erl index 9b74e3227..8a46804b0 100644 --- a/apps/emqx_durable_storage/test/emqx_ds_SUITE.erl +++ b/apps/emqx_durable_storage/test/emqx_ds_SUITE.erl @@ -23,10 +23,14 @@ -include_lib("stdlib/include/assert.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-define(N_SHARDS, 1). + opts() -> #{ backend => builtin, - storage => {emqx_ds_storage_reference, #{}} + storage => {emqx_ds_storage_reference, #{}}, + n_shards => ?N_SHARDS, + replication_factor => 3 }. %% A simple smoke test that verifies that opening/closing the DB @@ -34,7 +38,32 @@ opts() -> t_00_smoke_open_drop(_Config) -> DB = 'DB', ?assertMatch(ok, emqx_ds:open_db(DB, opts())), + %% Check metadata: + %% We have only one site: + [Site] = emqx_ds_replication_layer_meta:sites(), + %% Check all shards: + Shards = emqx_ds_replication_layer_meta:shards(DB), + %% Since there is only one site all shards should be allocated + %% to this site: + MyShards = emqx_ds_replication_layer_meta:my_shards(DB), + ?assertEqual(?N_SHARDS, length(Shards)), + lists:foreach( + fun(Shard) -> + ?assertEqual( + {ok, [Site]}, emqx_ds_replication_layer_meta:replica_set(DB, Shard) + ), + ?assertEqual( + [Site], emqx_ds_replication_layer_meta:in_sync_replicas(DB, Shard) + ), + %% Check that the leader is eleected; + ?assertEqual({ok, node()}, emqx_ds_replication_layer_meta:shard_leader(DB, Shard)) + end, + Shards + ), + ?assertEqual(lists:sort(Shards), lists:sort(MyShards)), + %% Reopen the DB and make sure the operation is idempotent: ?assertMatch(ok, emqx_ds:open_db(DB, opts())), + %% Close the DB: ?assertMatch(ok, emqx_ds:drop_db(DB)). %% A simple smoke test that verifies that storing the messages doesn't @@ -138,9 +167,11 @@ end_per_suite(Config) -> ok. init_per_testcase(_TC, Config) -> - %% snabbkaffe:fix_ct_logging(), application:ensure_all_started(emqx_durable_storage), Config. end_per_testcase(_TC, _Config) -> - ok = application:stop(emqx_durable_storage). + ok = application:stop(emqx_durable_storage), + mria:stop(), + _ = mnesia:delete_schema([node()]), + ok. diff --git a/apps/emqx_durable_storage/test/emqx_ds_storage_bitfield_lts_SUITE.erl b/apps/emqx_durable_storage/test/emqx_ds_storage_bitfield_lts_SUITE.erl index 6dc24a269..7b733406d 100644 --- a/apps/emqx_durable_storage/test/emqx_ds_storage_bitfield_lts_SUITE.erl +++ b/apps/emqx_durable_storage/test/emqx_ds_storage_bitfield_lts_SUITE.erl @@ -15,7 +15,9 @@ -define(DEFAULT_CONFIG, #{ backend => builtin, - storage => {emqx_ds_storage_bitfield_lts, #{}} + storage => {emqx_ds_storage_bitfield_lts, #{}}, + n_shards => 16, + replication_factor => 3 }). -define(COMPACT_CONFIG, #{ @@ -23,7 +25,9 @@ storage => {emqx_ds_storage_bitfield_lts, #{ bits_per_wildcard_level => 8 - }} + }}, + n_shards => 16, + replication_factor => 3 }). %% Smoke test for opening and reopening the database @@ -387,7 +391,7 @@ end_per_testcase(TC, _Config) -> ok = emqx_ds_storage_layer_sup:stop_shard(shard(TC)). shard(TC) -> - {?MODULE, TC}. + {?MODULE, atom_to_binary(TC)}. keyspace(TC) -> TC. diff --git a/apps/emqx_enterprise/src/emqx_enterprise.app.src b/apps/emqx_enterprise/src/emqx_enterprise.app.src index 06bc500f4..d7bcb1fd5 100644 --- a/apps/emqx_enterprise/src/emqx_enterprise.app.src +++ b/apps/emqx_enterprise/src/emqx_enterprise.app.src @@ -1,6 +1,6 @@ {application, emqx_enterprise, [ {description, "EMQX Enterprise Edition"}, - {vsn, "0.1.5"}, + {vsn, "0.1.6"}, {registered, []}, {applications, [ kernel, diff --git a/apps/emqx_enterprise/src/emqx_enterprise_schema.erl b/apps/emqx_enterprise/src/emqx_enterprise_schema.erl index 658666fc7..66af3206b 100644 --- a/apps/emqx_enterprise/src/emqx_enterprise_schema.erl +++ b/apps/emqx_enterprise/src/emqx_enterprise_schema.erl @@ -83,6 +83,24 @@ fields("log_audit_handler") -> desc => ?DESC(emqx_conf_schema, "log_file_handler_max_size"), importance => ?IMPORTANCE_MEDIUM } + )}, + {"max_filter_size", + hoconsc:mk( + range(10, 30000), + #{ + default => 5000, + desc => ?DESC(emqx_conf_schema, "audit_log_max_filter_limit"), + importance => ?IMPORTANCE_MEDIUM + } + )}, + {"ignore_high_frequency_request", + hoconsc:mk( + boolean(), + #{ + default => true, + desc => ?DESC(emqx_conf_schema, "audit_log_ignore_high_frequency_request"), + importance => ?IMPORTANCE_MEDIUM + } )} ] ++ CommonConfs1; fields(Name) -> diff --git a/apps/emqx_enterprise/test/emqx_enterprise_schema_SUITE.erl b/apps/emqx_enterprise/test/emqx_enterprise_schema_SUITE.erl index e2aece927..bf1f358ea 100644 --- a/apps/emqx_enterprise/test/emqx_enterprise_schema_SUITE.erl +++ b/apps/emqx_enterprise/test/emqx_enterprise_schema_SUITE.erl @@ -95,6 +95,8 @@ t_audit_log_conf(_Config) -> <<"enable">> => false, <<"level">> => <<"info">>, <<"path">> => <<"log/audit.log">>, + <<"ignore_high_frequency_request">> => true, + <<"max_filter_size">> => 5000, <<"rotation_count">> => 10, <<"rotation_size">> => <<"50MB">>, <<"time_offset">> => <<"system">> diff --git a/apps/emqx_eviction_agent/src/emqx_eviction_agent.app.src b/apps/emqx_eviction_agent/src/emqx_eviction_agent.app.src index 4f4cf5722..cc415d495 100644 --- a/apps/emqx_eviction_agent/src/emqx_eviction_agent.app.src +++ b/apps/emqx_eviction_agent/src/emqx_eviction_agent.app.src @@ -1,6 +1,6 @@ {application, emqx_eviction_agent, [ {description, "EMQX Eviction Agent"}, - {vsn, "5.1.4"}, + {vsn, "5.1.5"}, {registered, [ emqx_eviction_agent_sup, emqx_eviction_agent, diff --git a/apps/emqx_eviction_agent/src/emqx_eviction_agent.erl b/apps/emqx_eviction_agent/src/emqx_eviction_agent.erl index 42cffcb3d..9f1352b7c 100644 --- a/apps/emqx_eviction_agent/src/emqx_eviction_agent.erl +++ b/apps/emqx_eviction_agent/src/emqx_eviction_agent.erl @@ -15,8 +15,11 @@ -export([ start_link/0, enable/2, + enable/3, + default_options/0, disable/1, status/0, + enable_status/0, connection_count/0, all_channels_count/0, session_count/0, @@ -51,7 +54,7 @@ unhook/0 ]). --export_type([server_reference/0]). +-export_type([server_reference/0, kind/0, options/0]). -define(CONN_MODULES, [ emqx_connection, emqx_ws_connection, emqx_quic_connection, emqx_eviction_agent_channel @@ -67,15 +70,31 @@ connections := non_neg_integer(), sessions := non_neg_integer() }. --type kind() :: atom(). + +%% kind() is any() because it was not exported previously +%% and bpapi checker remembered it as any() +-type kind() :: any(). +-type options() :: #{ + allow_connections => boolean() +}. -spec start_link() -> startlink_ret(). start_link() -> gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). +-spec default_options() -> options(). +default_options() -> + #{ + allow_connections => false + }. + -spec enable(kind(), server_reference()) -> ok_or_error(eviction_agent_busy). enable(Kind, ServerReference) -> - gen_server:call(?MODULE, {enable, Kind, ServerReference}). + gen_server:call(?MODULE, {enable, Kind, ServerReference, default_options()}). + +-spec enable(kind(), server_reference(), options()) -> ok_or_error(eviction_agent_busy). +enable(Kind, ServerReference, #{} = Options) -> + gen_server:call(?MODULE, {enable, Kind, ServerReference, Options}). -spec disable(kind()) -> ok. disable(Kind) -> @@ -84,16 +103,20 @@ disable(Kind) -> -spec status() -> status(). status() -> case enable_status() of - {enabled, _Kind, _ServerReference} -> + {enabled, _Kind, _ServerReference, _Options} -> {enabled, stats()}; disabled -> disabled end. +-spec enable_status() -> disabled | {enabled, kind(), server_reference(), options()}. +enable_status() -> + persistent_term:get(?MODULE, disabled). + -spec evict_connections(pos_integer()) -> ok_or_error(disabled). evict_connections(N) -> case enable_status() of - {enabled, _Kind, ServerReference} -> + {enabled, _Kind, ServerReference, _Options} -> ok = do_evict_connections(N, ServerReference); disabled -> {error, disabled} @@ -112,15 +135,16 @@ evict_sessions(N, Nodes, ConnState) when is_list(Nodes) andalso length(Nodes) > 0 -> case enable_status() of - {enabled, _Kind, _ServerReference} -> + {enabled, _Kind, _ServerReference, _Options} -> ok = do_evict_sessions(N, Nodes, ConnState); disabled -> {error, disabled} end. +-spec purge_sessions(non_neg_integer()) -> ok_or_error(disabled). purge_sessions(N) -> case enable_status() of - {enabled, _Kind, _ServerReference} -> + {enabled, _Kind, _ServerReference, _Options} -> ok = do_purge_sessions(N); disabled -> {error, disabled} @@ -135,14 +159,14 @@ init([]) -> {ok, #{}}. %% enable -handle_call({enable, Kind, ServerReference}, _From, St) -> +handle_call({enable, Kind, ServerReference, Options}, _From, St) -> Reply = case enable_status() of disabled -> - ok = persistent_term:put(?MODULE, {enabled, Kind, ServerReference}); - {enabled, Kind, _ServerReference} -> - ok = persistent_term:put(?MODULE, {enabled, Kind, ServerReference}); - {enabled, _OtherKind, _ServerReference} -> + ok = persistent_term:put(?MODULE, {enabled, Kind, ServerReference, Options}); + {enabled, Kind, _ServerReference, _Options} -> + ok = persistent_term:put(?MODULE, {enabled, Kind, ServerReference, Options}); + {enabled, _OtherKind, _ServerReference, _Options} -> {error, eviction_agent_busy} end, {reply, Reply, St}; @@ -152,10 +176,10 @@ handle_call({disable, Kind}, _From, St) -> case enable_status() of disabled -> {error, disabled}; - {enabled, Kind, _ServerReference} -> + {enabled, Kind, _ServerReference, _Options} -> _ = persistent_term:erase(?MODULE), ok; - {enabled, _OtherKind, _ServerReference} -> + {enabled, _OtherKind, _ServerReference, _Options} -> {error, eviction_agent_busy} end, {reply, Reply, St}; @@ -180,8 +204,10 @@ code_change(_Vsn, State, _Extra) -> on_connect(_ConnInfo, _Props) -> case enable_status() of - {enabled, _Kind, _ServerReference} -> + {enabled, _Kind, _ServerReference, #{allow_connections := false}} -> {stop, {error, ?RC_USE_ANOTHER_SERVER}}; + {enabled, _Kind, _ServerReference, _Options} -> + ignore; disabled -> ignore end. @@ -192,7 +218,7 @@ on_connack( Props ) -> case enable_status() of - {enabled, _Kind, ServerReference} -> + {enabled, _Kind, ServerReference, _Options} -> {ok, Props#{'Server-Reference' => ServerReference}}; disabled -> {ok, Props} @@ -214,10 +240,10 @@ unhook() -> ok = emqx_hooks:del('client.connect', {?MODULE, on_connect}), ok = emqx_hooks:del('client.connack', {?MODULE, on_connack}). -enable_status() -> - persistent_term:get(?MODULE, disabled). +%%-------------------------------------------------------------------- +%% Internal funcs +%%-------------------------------------------------------------------- -% connection management stats() -> #{ connections => connection_count(), diff --git a/apps/emqx_eviction_agent/test/emqx_eviction_agent_SUITE.erl b/apps/emqx_eviction_agent/test/emqx_eviction_agent_SUITE.erl index bc6f626d2..bf2865a78 100644 --- a/apps/emqx_eviction_agent/test/emqx_eviction_agent_SUITE.erl +++ b/apps/emqx_eviction_agent/test/emqx_eviction_agent_SUITE.erl @@ -15,7 +15,11 @@ -import( emqx_eviction_agent_test_helpers, - [emqtt_connect/0, emqtt_connect/1, emqtt_connect/2, emqtt_connect_for_publish/1] + [ + emqtt_connect/0, emqtt_connect/1, emqtt_connect/2, + emqtt_connect_for_publish/1, + case_specific_node_name/1 + ] ). -define(assertPrinted(Printed, Code), @@ -29,11 +33,19 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - emqx_common_test_helpers:start_apps([emqx_eviction_agent]), - Config. + Apps = emqx_cth_suite:start( + [ + emqx, + emqx_eviction_agent + ], + #{ + work_dir => emqx_cth_suite:work_dir(Config) + } + ), + [{apps, Apps} | Config]. -end_per_suite(_Config) -> - emqx_common_test_helpers:stop_apps([emqx_eviction_agent]). +end_per_suite(Config) -> + ok = emqx_cth_suite:stop(?config(apps, Config)). init_per_testcase(Case, Config) -> _ = emqx_eviction_agent:disable(test_eviction), @@ -41,10 +53,17 @@ init_per_testcase(Case, Config) -> start_slave(Case, Config). start_slave(t_explicit_session_takeover, Config) -> + NodeNames = + [ + t_explicit_session_takeover_donor, + t_explicit_session_takeover_recipient + ], ClusterNodes = emqx_eviction_agent_test_helpers:start_cluster( - [{evacuate_test1, 2883}, {evacuate_test2, 3883}], - [emqx_eviction_agent] + Config, + NodeNames, + [emqx_conf, emqx, emqx_eviction_agent] ), + ok = snabbkaffe:start_trace(), [{evacuate_nodes, ClusterNodes} | Config]; start_slave(_Case, Config) -> Config. @@ -56,8 +75,7 @@ end_per_testcase(TestCase, Config) -> stop_slave(t_explicit_session_takeover, Config) -> emqx_eviction_agent_test_helpers:stop_cluster( - ?config(evacuate_nodes, Config), - [emqx_eviction_agent] + ?config(evacuate_nodes, Config) ); stop_slave(_Case, _Config) -> ok. @@ -77,13 +95,16 @@ t_enable_disable(_Config) -> {ok, C0} = emqtt_connect(), ok = emqtt:disconnect(C0), + %% Enable ok = emqx_eviction_agent:enable(test_eviction, undefined), + %% Can't enable with different kind ?assertMatch( {error, eviction_agent_busy}, emqx_eviction_agent:enable(bar, undefined) ), + %% Enable with the same kind but different server ref ?assertMatch( ok, emqx_eviction_agent:enable(test_eviction, <<"srv">>) @@ -99,6 +120,39 @@ t_enable_disable(_Config) -> emqtt_connect() ), + %% Enable with the same kind and server ref and explicit options + ?assertMatch( + ok, + emqx_eviction_agent:enable(test_eviction, <<"srv">>, #{allow_connections => false}) + ), + + ?assertMatch( + {enabled, #{}}, + emqx_eviction_agent:status() + ), + + ?assertMatch( + {error, {use_another_server, #{}}}, + emqtt_connect() + ), + + %% Enable with the same kind and server ref and permissive options + ?assertMatch( + ok, + emqx_eviction_agent:enable(test_eviction, <<"srv">>, #{allow_connections => true}) + ), + + ?assertMatch( + {enabled, #{}}, + emqx_eviction_agent:status() + ), + + ?assertMatch( + {ok, _}, + emqtt_connect() + ), + + %% Can't enable using different kind ?assertMatch( {error, eviction_agent_busy}, emqx_eviction_agent:disable(bar) diff --git a/apps/emqx_eviction_agent/test/emqx_eviction_agent_api_SUITE.erl b/apps/emqx_eviction_agent/test/emqx_eviction_agent_api_SUITE.erl index 3fe15e53a..341f543a7 100644 --- a/apps/emqx_eviction_agent/test/emqx_eviction_agent_api_SUITE.erl +++ b/apps/emqx_eviction_agent/test/emqx_eviction_agent_api_SUITE.erl @@ -22,12 +22,23 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - emqx_mgmt_api_test_util:init_suite([emqx_eviction_agent]), - Config. + Apps = emqx_cth_suite:start( + [ + emqx, + emqx_eviction_agent, + emqx_management, + {emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"} + ], + #{ + work_dir => emqx_cth_suite:work_dir(Config) + } + ), + _ = emqx_common_test_http:create_default_app(), + [{apps, Apps} | Config]. end_per_suite(Config) -> - emqx_mgmt_api_test_util:end_suite([emqx_eviction_agent]), - Config. + emqx_common_test_http:delete_default_app(), + emqx_cth_suite:stop(?config(apps, Config)). %%-------------------------------------------------------------------- %% Tests diff --git a/apps/emqx_eviction_agent/test/emqx_eviction_agent_channel_SUITE.erl b/apps/emqx_eviction_agent/test/emqx_eviction_agent_channel_SUITE.erl index b4d7ceb08..d87429339 100644 --- a/apps/emqx_eviction_agent/test/emqx_eviction_agent_channel_SUITE.erl +++ b/apps/emqx_eviction_agent/test/emqx_eviction_agent_channel_SUITE.erl @@ -22,12 +22,20 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - emqx_common_test_helpers:start_apps([emqx_conf, emqx_eviction_agent]), - {ok, _} = emqx:update_config([rpc, port_discovery], manual), - Config. + Apps = emqx_cth_suite:start( + [ + emqx_conf, + emqx, + emqx_eviction_agent + ], + #{ + work_dir => emqx_cth_suite:work_dir(Config) + } + ), + [{apps, Apps} | Config]. -end_per_suite(_Config) -> - emqx_common_test_helpers:stop_apps([emqx_eviction_agent, emqx_conf]). +end_per_suite(Config) -> + ok = emqx_cth_suite:stop(?config(apps, Config)). %%-------------------------------------------------------------------- %% Tests diff --git a/apps/emqx_eviction_agent/test/emqx_eviction_agent_cli_SUITE.erl b/apps/emqx_eviction_agent/test/emqx_eviction_agent_cli_SUITE.erl index 4cfb2fff5..70abd076f 100644 --- a/apps/emqx_eviction_agent/test/emqx_eviction_agent_cli_SUITE.erl +++ b/apps/emqx_eviction_agent/test/emqx_eviction_agent_cli_SUITE.erl @@ -14,13 +14,21 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - emqx_common_test_helpers:start_apps([emqx_eviction_agent]), - Config. + Apps = emqx_cth_suite:start( + [ + emqx, + emqx_eviction_agent + ], + #{ + work_dir => emqx_cth_suite:work_dir(Config) + } + ), + [{apps, Apps} | Config]. end_per_suite(Config) -> _ = emqx_eviction_agent:disable(foo), - emqx_common_test_helpers:stop_apps([emqx_eviction_agent]), - Config. + + emqx_cth_suite:stop(?config(apps, Config)). %%-------------------------------------------------------------------- %% Tests diff --git a/apps/emqx_eviction_agent/test/emqx_eviction_agent_test_helpers.erl b/apps/emqx_eviction_agent/test/emqx_eviction_agent_test_helpers.erl index b3b3e8767..052f37952 100644 --- a/apps/emqx_eviction_agent/test/emqx_eviction_agent_test_helpers.erl +++ b/apps/emqx_eviction_agent/test/emqx_eviction_agent_test_helpers.erl @@ -15,13 +15,15 @@ emqtt_try_connect/1, - start_cluster/2, start_cluster/3, - stop_cluster/2, + stop_cluster/1, case_specific_node_name/2, case_specific_node_name/3, - concat_atoms/1 + concat_atoms/1, + + get_mqtt_port/2, + nodes_with_mqtt_tcp_ports/1 ]). emqtt_connect() -> @@ -83,52 +85,24 @@ emqtt_try_connect(Opts) -> Error end. -start_cluster(NamesWithPorts, Apps) -> - start_cluster(NamesWithPorts, Apps, []). - -start_cluster(NamesWithPorts, Apps, Env) -> - Specs = lists:map( - fun({ShortName, Port}) -> - {core, ShortName, #{listener_ports => [{tcp, Port}]}} - end, - NamesWithPorts +start_cluster(Config, NodeNames = [Node1 | _], Apps) -> + Spec = #{ + role => core, + join_to => emqx_cth_cluster:node_name(Node1), + listeners => true, + apps => Apps + }, + Cluster = [{NodeName, Spec} || NodeName <- NodeNames], + ClusterNodes = emqx_cth_cluster:start( + Cluster, + %% Use Node1 to scope the work dirs for all the nodes + #{work_dir => emqx_cth_suite:work_dir(Node1, Config)} ), - Opts0 = [ - {env, Env}, - {apps, Apps}, - {conf, - [{[listeners, Proto, default, enable], false} || Proto <- [ssl, ws, wss]] ++ - [{[rpc, mode], async}]} - ], - Cluster = emqx_common_test_helpers:emqx_cluster( - Specs, - Opts0 - ), - NodesWithPorts = [ - { - emqx_common_test_helpers:start_slave(Name, Opts), - proplists:get_value(Name, NamesWithPorts) - } - || {Name, Opts} <- Cluster - ], - NodesWithPorts. + nodes_with_mqtt_tcp_ports(ClusterNodes). -stop_cluster(NodesWithPorts, Apps) -> - lists:foreach( - fun({Node, _Port}) -> - lists:foreach( - fun(App) -> - rpc:call(Node, application, stop, [App]) - end, - Apps - ), - %% This sleep is just to make logs cleaner - ct:sleep(100), - _ = rpc:call(Node, emqx_common_test_helpers, stop_apps, []), - emqx_common_test_helpers:stop_slave(Node) - end, - NodesWithPorts - ). +stop_cluster(NamesWithPorts) -> + {Nodes, _Ports} = lists:unzip(NamesWithPorts), + ok = emqx_cth_cluster:stop(Nodes). case_specific_node_name(Module, Case) -> concat_atoms([Module, '__', Case]). @@ -145,3 +119,15 @@ concat_atoms(Atoms) -> ) ) ). + +get_mqtt_port(Node, Type) -> + {_IP, Port} = erpc:call(Node, emqx_config, get, [[listeners, Type, default, bind]]), + Port. + +nodes_with_mqtt_tcp_ports(Nodes) -> + lists:map( + fun(Node) -> + {Node, get_mqtt_port(Node, tcp)} + end, + Nodes + ). diff --git a/apps/emqx_exhook/priv/protos/exhook.proto b/apps/emqx_exhook/priv/protos/exhook.proto index 928e9b20b..e5d7b3606 100644 --- a/apps/emqx_exhook/priv/protos/exhook.proto +++ b/apps/emqx_exhook/priv/protos/exhook.proto @@ -460,8 +460,11 @@ message SubOpts { // The QoS level uint32 qos = 1; + // deprecated + reserved 2; + reserved "share"; // The group name for shared subscription - string share = 2; + // string share = 2; // The Retain Handling option (MQTT v5.0) // diff --git a/apps/emqx_exhook/src/emqx_exhook.app.src b/apps/emqx_exhook/src/emqx_exhook.app.src index 8a57249e9..79c34e36b 100644 --- a/apps/emqx_exhook/src/emqx_exhook.app.src +++ b/apps/emqx_exhook/src/emqx_exhook.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_exhook, [ {description, "EMQX Extension for Hook"}, - {vsn, "5.0.14"}, + {vsn, "5.0.15"}, {modules, []}, {registered, []}, {mod, {emqx_exhook_app, []}}, diff --git a/apps/emqx_exhook/src/emqx_exhook_handler.erl b/apps/emqx_exhook/src/emqx_exhook_handler.erl index b4358969d..2bcb91b12 100644 --- a/apps/emqx_exhook/src/emqx_exhook_handler.erl +++ b/apps/emqx_exhook/src/emqx_exhook_handler.erl @@ -143,7 +143,7 @@ on_client_authorize(ClientInfo, Action, Topic, Result) -> Req = #{ clientinfo => clientinfo(ClientInfo), type => Type, - topic => Topic, + topic => emqx_topic:maybe_format_share(Topic), result => Bool }, case @@ -191,15 +191,15 @@ on_session_created(ClientInfo, _SessInfo) -> on_session_subscribed(ClientInfo, Topic, SubOpts) -> Req = #{ clientinfo => clientinfo(ClientInfo), - topic => Topic, - subopts => maps:with([qos, share, rh, rap, nl], SubOpts) + topic => emqx_topic:maybe_format_share(Topic), + subopts => maps:with([qos, rh, rap, nl], SubOpts) }, cast('session.subscribed', Req). on_session_unsubscribed(ClientInfo, Topic, _SubOpts) -> Req = #{ clientinfo => clientinfo(ClientInfo), - topic => Topic + topic => emqx_topic:maybe_format_share(Topic) }, cast('session.unsubscribed', Req). @@ -413,7 +413,13 @@ enrich_header(Headers, Message) -> end. topicfilters(Tfs) when is_list(Tfs) -> - [#{name => Topic, qos => Qos} || {Topic, #{qos := Qos}} <- Tfs]. + GetQos = fun(SubOpts) -> + maps:get(qos, SubOpts, 0) + end, + [ + #{name => emqx_topic:maybe_format_share(Topic), qos => GetQos(SubOpts)} + || {Topic, SubOpts} <- Tfs + ]. ntoa({0, 0, 0, 0, 0, 16#ffff, AB, CD}) -> list_to_binary(inet_parse:ntoa({AB bsr 8, AB rem 256, CD bsr 8, CD rem 256})); diff --git a/apps/emqx_exhook/test/props/prop_exhook_hooks.erl b/apps/emqx_exhook/test/props/prop_exhook_hooks.erl index 2c9b5bb06..cf48fff80 100644 --- a/apps/emqx_exhook/test/props/prop_exhook_hooks.erl +++ b/apps/emqx_exhook/test/props/prop_exhook_hooks.erl @@ -546,8 +546,7 @@ subopts(SubOpts) -> qos => maps:get(qos, SubOpts, 0), rh => maps:get(rh, SubOpts, 0), rap => maps:get(rap, SubOpts, 0), - nl => maps:get(nl, SubOpts, 0), - share => maps:get(share, SubOpts, <<>>) + nl => maps:get(nl, SubOpts, 0) }. authresult_to_bool(AuthResult) -> diff --git a/apps/emqx_gateway/src/emqx_gateway.app.src b/apps/emqx_gateway/src/emqx_gateway.app.src index df681b00f..81a2e65ed 100644 --- a/apps/emqx_gateway/src/emqx_gateway.app.src +++ b/apps/emqx_gateway/src/emqx_gateway.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_gateway, [ {description, "The Gateway management application"}, - {vsn, "0.1.27"}, + {vsn, "0.1.28"}, {registered, []}, {mod, {emqx_gateway_app, []}}, {applications, [kernel, stdlib, emqx, emqx_auth, emqx_ctl]}, diff --git a/apps/emqx_gateway/src/emqx_gateway_api.erl b/apps/emqx_gateway/src/emqx_gateway_api.erl index 61f29059f..ae2533f97 100644 --- a/apps/emqx_gateway/src/emqx_gateway_api.erl +++ b/apps/emqx_gateway/src/emqx_gateway_api.erl @@ -93,10 +93,9 @@ gateways(get, Request) -> gateway(get, #{bindings := #{name := Name}}) -> try - GwName = gw_name(Name), - case emqx_gateway:lookup(GwName) of + case emqx_gateway:lookup(Name) of undefined -> - {200, #{name => GwName, status => unloaded}}; + {200, #{name => Name, status => unloaded}}; Gateway -> GwConf = emqx_gateway_conf:gateway(Name), GwInfo0 = emqx_gateway_utils:unix_ts_to_rfc3339( @@ -125,15 +124,14 @@ gateway(put, #{ }) -> GwConf = maps:without([<<"name">>], GwConf0), try - GwName = gw_name(Name), LoadOrUpdateF = - case emqx_gateway:lookup(GwName) of + case emqx_gateway:lookup(Name) of undefined -> fun emqx_gateway_conf:load_gateway/2; _ -> fun emqx_gateway_conf:update_gateway/2 end, - case LoadOrUpdateF(GwName, GwConf) of + case LoadOrUpdateF(Name, GwConf) of {ok, _} -> {204}; {error, Reason} -> @@ -148,12 +146,11 @@ gateway(put, #{ gateway_enable(put, #{bindings := #{name := Name, enable := Enable}}) -> try - GwName = gw_name(Name), - case emqx_gateway:lookup(GwName) of + case emqx_gateway:lookup(Name) of undefined -> return_http_error(404, <<"NOT FOUND">>); _Gateway -> - {ok, _} = emqx_gateway_conf:update_gateway(GwName, #{<<"enable">> => Enable}), + {ok, _} = emqx_gateway_conf:update_gateway(Name, #{<<"enable">> => Enable}), {204} end catch @@ -161,14 +158,6 @@ gateway_enable(put, #{bindings := #{name := Name, enable := Enable}}) -> return_http_error(404, <<"NOT FOUND">>) end. --spec gw_name(binary()) -> stomp | coap | lwm2m | mqttsn | exproto | no_return(). -gw_name(<<"stomp">>) -> stomp; -gw_name(<<"coap">>) -> coap; -gw_name(<<"lwm2m">>) -> lwm2m; -gw_name(<<"mqttsn">>) -> mqttsn; -gw_name(<<"exproto">>) -> exproto; -gw_name(_Else) -> throw(not_found). - %%-------------------------------------------------------------------- %% Swagger defines %%-------------------------------------------------------------------- @@ -249,7 +238,7 @@ params_gateway_name_in_path() -> [ {name, mk( - binary(), + hoconsc:enum(emqx_gateway_schema:gateway_names()), #{ in => path, desc => ?DESC(gateway_name_in_qs), @@ -390,7 +379,10 @@ fields(Gw) when Gw == mqttsn; Gw == coap; Gw == lwm2m; - Gw == exproto + Gw == exproto; + Gw == gbt32960; + Gw == ocpp; + Gw == jt808 -> [{name, mk(Gw, #{desc => ?DESC(gateway_name)})}] ++ convert_listener_struct(emqx_gateway_schema:gateway_schema(Gw)); @@ -399,7 +391,10 @@ fields(Gw) when Gw == update_mqttsn; Gw == update_coap; Gw == update_lwm2m; - Gw == update_exproto + Gw == update_exproto; + Gw == update_gbt32960; + Gw == update_ocpp; + Gw == update_jt808 -> "update_" ++ GwStr = atom_to_list(Gw), Gw1 = list_to_existing_atom(GwStr), @@ -408,14 +403,18 @@ fields(Listener) when Listener == tcp_listener; Listener == ssl_listener; Listener == udp_listener; - Listener == dtls_listener + Listener == dtls_listener; + Listener == ws_listener; + Listener == wss_listener -> Type = case Listener of tcp_listener -> tcp; ssl_listener -> ssl; udp_listener -> udp; - dtls_listener -> dtls + dtls_listener -> dtls; + ws_listener -> ws; + wss_listener -> wss end, [ {id, @@ -447,31 +446,30 @@ fields(gateway_stats) -> [{key, mk(binary(), #{})}]. schema_load_or_update_gateways_conf() -> + Names = emqx_gateway_schema:gateway_names(), emqx_dashboard_swagger:schema_with_examples( - hoconsc:union([ - ref(?MODULE, stomp), - ref(?MODULE, mqttsn), - ref(?MODULE, coap), - ref(?MODULE, lwm2m), - ref(?MODULE, exproto), - ref(?MODULE, update_stomp), - ref(?MODULE, update_mqttsn), - ref(?MODULE, update_coap), - ref(?MODULE, update_lwm2m), - ref(?MODULE, update_exproto) - ]), + hoconsc:union( + [ + ref(?MODULE, Name) + || Name <- + Names ++ + [ + erlang:list_to_existing_atom("update_" ++ erlang:atom_to_list(Name)) + || Name <- Names + ] + ] + ), examples_update_gateway_confs() ). schema_gateways_conf() -> emqx_dashboard_swagger:schema_with_examples( - hoconsc:union([ - ref(?MODULE, stomp), - ref(?MODULE, mqttsn), - ref(?MODULE, coap), - ref(?MODULE, lwm2m), - ref(?MODULE, exproto) - ]), + hoconsc:union( + [ + ref(?MODULE, Name) + || Name <- emqx_gateway_schema:gateway_names() + ] + ), examples_gateway_confs() ). @@ -502,14 +500,18 @@ listeners_schema(?R_REF(_Mod, tcp_udp_listeners)) -> ref(udp_listener), ref(dtls_listener) ]) - ). + ); +listeners_schema(?R_REF(_Mod, ws_listeners)) -> + hoconsc:array(hoconsc:union([ref(ws_listener), ref(wss_listener)])). listener_schema() -> hoconsc:union([ ref(?MODULE, tcp_listener), ref(?MODULE, ssl_listener), ref(?MODULE, udp_listener), - ref(?MODULE, dtls_listener) + ref(?MODULE, dtls_listener), + ref(?MODULE, ws_listener), + ref(?MODULE, wss_listener) ]). %%-------------------------------------------------------------------- @@ -756,6 +758,59 @@ examples_gateway_confs() -> } ] } + }, + gbt32960_gateway => + #{ + summary => <<"A simple GBT32960 gateway config">>, + value => + #{ + enable => true, + name => <<"gbt32960">>, + enable_stats => true, + mountpoint => <<"gbt32960/${clientid}">>, + retry_interval => <<"8s">>, + max_retry_times => 3, + message_queue_len => 10, + listeners => + [ + #{ + type => <<"tcp">>, + name => <<"default">>, + bind => <<"7325">>, + max_connections => 1024000, + max_conn_rate => 1000 + } + ] + } + }, + ocpp_gateway => + #{ + summary => <<"A simple OCPP gateway config">>, + vaule => + #{ + enable => true, + name => <<"ocpp">>, + enable_stats => true, + mountpoint => <<"ocpp/">>, + default_heartbeat_interval => <<"60s">>, + upstream => + #{ + topic => <<"cp/${cid}">>, + reply_topic => <<"cp/${cid}/reply">>, + error_topic => <<"cp/${cid}/error">> + }, + dnstream => #{topic => <<"cp/${cid}">>}, + message_format_checking => disable, + listeners => + [ + #{ + type => <<"ws">>, + name => <<"default">>, + bind => <<"33033">>, + max_connections => 1024000 + } + ] + } } }. @@ -854,5 +909,37 @@ examples_update_gateway_confs() -> handler => #{address => <<"http://127.0.0.1:9001">>} } + }, + gbt32960_gateway => + #{ + summary => <<"A simple GBT32960 gateway config">>, + value => + #{ + enable => true, + enable_stats => true, + mountpoint => <<"gbt32960/${clientid}">>, + retry_interval => <<"8s">>, + max_retry_times => 3, + message_queue_len => 10 + } + }, + ocpp_gateway => + #{ + summary => <<"A simple OCPP gateway config">>, + vaule => + #{ + enable => true, + enable_stats => true, + mountpoint => <<"ocpp/">>, + default_heartbeat_interval => <<"60s">>, + upstream => + #{ + topic => <<"cp/${cid}">>, + reply_topic => <<"cp/${cid}/reply">>, + error_topic => <<"cp/${cid}/error">> + }, + dnstream => #{topic => <<"cp/${cid}">>}, + message_format_checking => disable + } } }. diff --git a/apps/emqx_gateway/src/emqx_gateway_api_authn.erl b/apps/emqx_gateway/src/emqx_gateway_api_authn.erl index 539d65112..55672318a 100644 --- a/apps/emqx_gateway/src/emqx_gateway_api_authn.erl +++ b/apps/emqx_gateway/src/emqx_gateway_api_authn.erl @@ -327,7 +327,7 @@ params_gateway_name_in_path() -> [ {name, mk( - binary(), + hoconsc:enum(emqx_gateway_schema:gateway_names()), #{ in => path, desc => ?DESC(emqx_gateway_api, gateway_name_in_qs), diff --git a/apps/emqx_gateway/src/emqx_gateway_api_authn_user_import.erl b/apps/emqx_gateway/src/emqx_gateway_api_authn_user_import.erl index 30aeaf8fe..321b145ac 100644 --- a/apps/emqx_gateway/src/emqx_gateway_api_authn_user_import.erl +++ b/apps/emqx_gateway/src/emqx_gateway_api_authn_user_import.erl @@ -52,7 +52,7 @@ %%-------------------------------------------------------------------- api_spec() -> - emqx_dashboard_swagger:spec(?MODULE, #{check_schema => false}). + emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true, translate_body => true}). paths() -> [ @@ -157,7 +157,7 @@ params_gateway_name_in_path() -> [ {name, mk( - binary(), + hoconsc:enum(emqx_gateway_schema:gateway_names()), #{ in => path, desc => ?DESC(emqx_gateway_api, gateway_name_in_qs), diff --git a/apps/emqx_gateway/src/emqx_gateway_api_clients.erl b/apps/emqx_gateway/src/emqx_gateway_api_clients.erl index b698446b9..aedb4b0fa 100644 --- a/apps/emqx_gateway/src/emqx_gateway_api_clients.erl +++ b/apps/emqx_gateway/src/emqx_gateway_api_clients.erl @@ -33,7 +33,7 @@ ] ). -%% minirest/dashbaord_swagger behaviour callbacks +%% minirest/dashboard_swagger behaviour callbacks -export([ api_spec/0, paths/0, @@ -700,7 +700,7 @@ params_gateway_name_in_path() -> [ {name, mk( - binary(), + hoconsc:enum(emqx_gateway_schema:gateway_names()), #{ in => path, desc => ?DESC(emqx_gateway_api, gateway_name) diff --git a/apps/emqx_gateway/src/emqx_gateway_api_listeners.erl b/apps/emqx_gateway/src/emqx_gateway_api_listeners.erl index 046e23300..284576983 100644 --- a/apps/emqx_gateway/src/emqx_gateway_api_listeners.erl +++ b/apps/emqx_gateway/src/emqx_gateway_api_listeners.erl @@ -609,7 +609,7 @@ params_gateway_name_in_path() -> [ {name, mk( - binary(), + hoconsc:enum(emqx_gateway_schema:gateway_names()), #{ in => path, desc => ?DESC(emqx_gateway_api, gateway_name_in_qs), diff --git a/apps/emqx_gateway/src/emqx_gateway_cm.erl b/apps/emqx_gateway/src/emqx_gateway_cm.erl index 7df3b2552..2c8d708df 100644 --- a/apps/emqx_gateway/src/emqx_gateway_cm.erl +++ b/apps/emqx_gateway/src/emqx_gateway_cm.erl @@ -23,7 +23,7 @@ -behaviour(gen_server). --include("include/emqx_gateway.hrl"). +-include("emqx_gateway.hrl"). -include_lib("emqx/include/logger.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). diff --git a/apps/emqx_gateway/src/emqx_gateway_cm_registry.erl b/apps/emqx_gateway/src/emqx_gateway_cm_registry.erl index f5bede084..f7a72af5f 100644 --- a/apps/emqx_gateway/src/emqx_gateway_cm_registry.erl +++ b/apps/emqx_gateway/src/emqx_gateway_cm_registry.erl @@ -17,7 +17,7 @@ %% @doc The gateway connection registry -module(emqx_gateway_cm_registry). --include("include/emqx_gateway.hrl"). +-include("emqx_gateway.hrl"). -behaviour(gen_server). diff --git a/apps/emqx_gateway/src/emqx_gateway_ctx.erl b/apps/emqx_gateway/src/emqx_gateway_ctx.erl index 11ad55d3e..6df1a8aae 100644 --- a/apps/emqx_gateway/src/emqx_gateway_ctx.erl +++ b/apps/emqx_gateway/src/emqx_gateway_ctx.erl @@ -17,7 +17,7 @@ %% @doc The gateway instance context -module(emqx_gateway_ctx). --include("include/emqx_gateway.hrl"). +-include("emqx_gateway.hrl"). %% @doc The running context for a Connection/Channel process. %% diff --git a/apps/emqx_gateway/src/emqx_gateway_gw_sup.erl b/apps/emqx_gateway/src/emqx_gateway_gw_sup.erl index 0f7ff4ffc..345d94432 100644 --- a/apps/emqx_gateway/src/emqx_gateway_gw_sup.erl +++ b/apps/emqx_gateway/src/emqx_gateway_gw_sup.erl @@ -23,7 +23,7 @@ -behaviour(supervisor). --include("include/emqx_gateway.hrl"). +-include("emqx_gateway.hrl"). -export([start_link/1]). diff --git a/apps/emqx_gateway/src/emqx_gateway_http.erl b/apps/emqx_gateway/src/emqx_gateway_http.erl index e8f0e034f..677176acc 100644 --- a/apps/emqx_gateway/src/emqx_gateway_http.erl +++ b/apps/emqx_gateway/src/emqx_gateway_http.erl @@ -17,7 +17,7 @@ %% @doc Gateway Interface Module for HTTP-APIs -module(emqx_gateway_http). --include("include/emqx_gateway.hrl"). +-include("emqx_gateway.hrl"). -include_lib("emqx/include/logger.hrl"). -include_lib("emqx_auth/include/emqx_authn_chains.hrl"). @@ -160,10 +160,10 @@ cluster_gateway_status(GwName) -> max_connections_count(Config) -> Listeners = emqx_gateway_utils:normalize_config(Config), lists:foldl( - fun({_, _, _, SocketOpts, _}, Acc) -> + fun({_, _, _, Conf0}, Acc) -> emqx_gateway_utils:plus_max_connections( Acc, - proplists:get_value(max_connections, SocketOpts, 0) + maps:get(max_connections, Conf0, 0) ) end, 0, @@ -184,7 +184,7 @@ current_connections_count(GwName) -> get_listeners_status(GwName, Config) -> Listeners = emqx_gateway_utils:normalize_config(Config), lists:map( - fun({Type, LisName, ListenOn, _, _}) -> + fun({Type, LisName, ListenOn, _}) -> Name0 = listener_id(GwName, Type, LisName), Name = {Name0, ListenOn}, LisO = #{id => Name0, type => Type, name => LisName}, @@ -513,29 +513,23 @@ codestr(501) -> 'NOT_IMPLEMENTED'. fmtstr(Fmt, Args) -> lists:flatten(io_lib:format(Fmt, Args)). --spec with_authn(binary(), function()) -> any(). +-spec with_authn(atom(), function()) -> any(). with_authn(GwName0, Fun) -> with_gateway(GwName0, fun(GwName, _GwConf) -> Authn = emqx_gateway_http:authn(GwName), Fun(GwName, Authn) end). --spec with_listener_authn(binary(), binary(), function()) -> any(). +-spec with_listener_authn(atom(), binary(), function()) -> any(). with_listener_authn(GwName0, Id, Fun) -> with_gateway(GwName0, fun(GwName, _GwConf) -> Authn = emqx_gateway_http:authn(GwName, Id), Fun(GwName, Authn) end). --spec with_gateway(binary(), function()) -> any(). -with_gateway(GwName0, Fun) -> +-spec with_gateway(atom(), function()) -> any(). +with_gateway(GwName, Fun) -> try - GwName = - try - binary_to_existing_atom(GwName0) - catch - _:_ -> error(badname) - end, case emqx_gateway:lookup(GwName) of undefined -> return_http_error(404, "Gateway not loaded"); diff --git a/apps/emqx_gateway/src/emqx_gateway_insta_sup.erl b/apps/emqx_gateway/src/emqx_gateway_insta_sup.erl index 8dce8582d..2898298a7 100644 --- a/apps/emqx_gateway/src/emqx_gateway_insta_sup.erl +++ b/apps/emqx_gateway/src/emqx_gateway_insta_sup.erl @@ -19,7 +19,7 @@ -behaviour(gen_server). --include("include/emqx_gateway.hrl"). +-include("emqx_gateway.hrl"). -include_lib("emqx/include/logger.hrl"). %% APIs diff --git a/apps/emqx_gateway/src/emqx_gateway_registry.erl b/apps/emqx_gateway/src/emqx_gateway_registry.erl index 50aad9445..20a3e1c42 100644 --- a/apps/emqx_gateway/src/emqx_gateway_registry.erl +++ b/apps/emqx_gateway/src/emqx_gateway_registry.erl @@ -17,7 +17,7 @@ %% @doc The Registry Centre of Gateway -module(emqx_gateway_registry). --include("include/emqx_gateway.hrl"). +-include("emqx_gateway.hrl"). -behaviour(gen_server). diff --git a/apps/emqx_gateway/src/emqx_gateway_schema.erl b/apps/emqx_gateway/src/emqx_gateway_schema.erl index ed149d1f5..c0abb48ce 100644 --- a/apps/emqx_gateway/src/emqx_gateway_schema.erl +++ b/apps/emqx_gateway/src/emqx_gateway_schema.erl @@ -45,12 +45,15 @@ ip_port/0 ]). -elvis([{elvis_style, dont_repeat_yourself, disable}]). +-elvis([{elvis_style, invalid_dynamic_call, disable}]). -export([namespace/0, roots/0, fields/1, desc/1, tags/0]). -export([proxy_protocol_opts/0]). --export([mountpoint/0, mountpoint/1, gateway_common_options/0, gateway_schema/1]). +-export([mountpoint/0, mountpoint/1, gateway_common_options/0, gateway_schema/1, gateway_names/0]). + +-export([ws_listener/0, wss_listener/0, ws_opts/2]). namespace() -> gateway. @@ -123,6 +126,16 @@ fields(ssl_listener) -> } )} ]; +fields(ws_listener) -> + emqx_gateway_schema:ws_listener() ++ + [{websocket, sc(ref(websocket), #{})}]; +fields(wss_listener) -> + emqx_gateway_schema:wss_listener() ++ + [{websocket, sc(ref(websocket), #{})}]; +fields(websocket) -> + DefaultPath = <<>>, + SubProtocols = <<>>, + emqx_gateway_schema:ws_opts(DefaultPath, SubProtocols); fields(udp_listener) -> [ %% some special configs for udp listener @@ -186,6 +199,8 @@ desc(udp_opts) -> "Settings for UDP sockets."; desc(dtls_opts) -> "Settings for DTLS protocol."; +desc(websocket) -> + "Websocket options"; desc(_) -> undefined. @@ -246,6 +261,134 @@ mountpoint(Default) -> } ). +ws_listener() -> + [ + {acceptors, sc(integer(), #{default => 16, desc => ?DESC(tcp_listener_acceptors)})} + ] ++ + tcp_opts() ++ + proxy_protocol_opts() ++ + common_listener_opts(). + +wss_listener() -> + ws_listener() ++ + [ + {ssl_options, + sc( + hoconsc:ref(emqx_schema, "listener_wss_opts"), + #{ + desc => ?DESC(ssl_listener_options), + validator => fun emqx_schema:validate_server_ssl_opts/1 + } + )} + ]. + +ws_opts(DefaultPath, DefaultSubProtocols) when + is_binary(DefaultPath), is_binary(DefaultSubProtocols) +-> + [ + {"path", + sc( + string(), + #{ + default => DefaultPath, + desc => ?DESC(fields_ws_opts_path) + } + )}, + {"piggyback", + sc( + hoconsc:enum([single, multiple]), + #{ + default => single, + desc => ?DESC(fields_ws_opts_piggyback) + } + )}, + {"compress", + sc( + boolean(), + #{ + default => false, + desc => ?DESC(fields_ws_opts_compress) + } + )}, + {"idle_timeout", + sc( + duration(), + #{ + default => <<"7200s">>, + desc => ?DESC(fields_ws_opts_idle_timeout) + } + )}, + {"max_frame_size", + sc( + hoconsc:union([infinity, integer()]), + #{ + default => infinity, + desc => ?DESC(fields_ws_opts_max_frame_size) + } + )}, + {"fail_if_no_subprotocol", + sc( + boolean(), + #{ + default => true, + desc => ?DESC(fields_ws_opts_fail_if_no_subprotocol) + } + )}, + {"supported_subprotocols", + sc( + emqx_schema:comma_separated_list(), + #{ + default => DefaultSubProtocols, + desc => ?DESC(fields_ws_opts_supported_subprotocols) + } + )}, + {"check_origin_enable", + sc( + boolean(), + #{ + default => false, + desc => ?DESC(fields_ws_opts_check_origin_enable) + } + )}, + {"allow_origin_absence", + sc( + boolean(), + #{ + default => true, + desc => ?DESC(fields_ws_opts_allow_origin_absence) + } + )}, + {"check_origins", + sc( + emqx_schema:comma_separated_binary(), + #{ + default => <<"http://localhost:18083, http://127.0.0.1:18083">>, + desc => ?DESC(fields_ws_opts_check_origins) + } + )}, + {"proxy_address_header", + sc( + string(), + #{ + default => <<"x-forwarded-for">>, + desc => ?DESC(fields_ws_opts_proxy_address_header) + } + )}, + {"proxy_port_header", + sc( + string(), + #{ + default => <<"x-forwarded-port">>, + desc => ?DESC(fields_ws_opts_proxy_port_header) + } + )}, + {"deflate_opts", + sc( + ref(emqx_schema, "deflate_opts"), + #{} + )} + ]. + common_listener_opts() -> [ {enable, @@ -324,7 +467,7 @@ proxy_protocol_opts() -> sc( duration(), #{ - default => <<"15s">>, + default => <<"3s">>, desc => ?DESC(tcp_listener_proxy_protocol_timeout) } )} @@ -333,13 +476,21 @@ proxy_protocol_opts() -> %%-------------------------------------------------------------------- %% dynamic schemas -%% FIXME: don't hardcode the gateway names -gateway_schema(stomp) -> emqx_stomp_schema:fields(stomp); -gateway_schema(mqttsn) -> emqx_mqttsn_schema:fields(mqttsn); -gateway_schema(coap) -> emqx_coap_schema:fields(coap); -gateway_schema(lwm2m) -> emqx_lwm2m_schema:fields(lwm2m); -gateway_schema(exproto) -> emqx_exproto_schema:fields(exproto). +gateway_schema(Name) -> + case emqx_gateway_utils:find_gateway_definition(Name) of + {ok, #{config_schema_module := SchemaMod}} -> + SchemaMod:fields(Name); + {error, _} = Error -> + throw(Error) + end. +gateway_names() -> + Definations = emqx_gateway_utils:find_gateway_definitions(), + [ + Name + || #{name := Name} = Defination <- Definations, + emqx_gateway_utils:check_gateway_edition(Defination) + ]. %%-------------------------------------------------------------------- %% helpers diff --git a/apps/emqx_gateway/src/emqx_gateway_sup.erl b/apps/emqx_gateway/src/emqx_gateway_sup.erl index 4e928bbf9..ffb7d9220 100644 --- a/apps/emqx_gateway/src/emqx_gateway_sup.erl +++ b/apps/emqx_gateway/src/emqx_gateway_sup.erl @@ -18,7 +18,7 @@ -behaviour(supervisor). --include("include/emqx_gateway.hrl"). +-include("emqx_gateway.hrl"). -export([start_link/0]). diff --git a/apps/emqx_gateway/src/emqx_gateway_utils.erl b/apps/emqx_gateway/src/emqx_gateway_utils.erl index 72751297b..ed3f10594 100644 --- a/apps/emqx_gateway/src/emqx_gateway_utils.erl +++ b/apps/emqx_gateway/src/emqx_gateway_utils.erl @@ -45,8 +45,10 @@ global_chain/1, listener_chain/3, find_gateway_definitions/0, + find_gateway_definition/1, plus_max_connections/2, - random_clientid/1 + random_clientid/1, + check_gateway_edition/1 ]). -export([stringfy/1]). @@ -80,6 +82,11 @@ max_mailbox_size => 32000 }). +-define(IS_ESOCKD_LISTENER(T), + T == tcp orelse T == ssl orelse T == udp orelse T == dtls +). +-define(IS_COWBOY_LISTENER(T), T == ws orelse T == wss). + -elvis([{elvis_style, god_modules, disable}]). -spec childspec(supervisor:worker(), Mod :: atom()) -> @@ -133,7 +140,7 @@ find_sup_child(Sup, ChildId) -> {ok, [pid()]} | {error, term()} when - ModCfg :: #{frame_mod := atom(), chann_mod := atom()}. + ModCfg :: #{frame_mod := atom(), chann_mod := atom(), connection_mod => atom()}. start_listeners(Listeners, GwName, Ctx, ModCfg) -> start_listeners(Listeners, GwName, Ctx, ModCfg, []). @@ -165,13 +172,12 @@ start_listeners([L | Ls], GwName, Ctx, ModCfg, Acc) -> start_listener( GwName, Ctx, - {Type, LisName, ListenOn, SocketOpts, Cfg}, + {Type, LisName, ListenOn, Cfg}, ModCfg ) -> ListenOnStr = emqx_listeners:format_bind(ListenOn), ListenerId = emqx_gateway_utils:listener_id(GwName, Type, LisName), - NCfg = maps:merge(Cfg, ModCfg), case start_listener( GwName, @@ -179,8 +185,8 @@ start_listener( Type, LisName, ListenOn, - SocketOpts, - NCfg + Cfg, + ModCfg ) of {ok, Pid} -> @@ -197,15 +203,69 @@ start_listener( emqx_gateway_utils:supervisor_ret({error, Reason}) end. -start_listener(GwName, Ctx, Type, LisName, ListenOn, SocketOpts, Cfg) -> +start_listener(GwName, Ctx, Type, LisName, ListenOn, Confs, ModCfg) when + ?IS_ESOCKD_LISTENER(Type) +-> Name = emqx_gateway_utils:listener_id(GwName, Type, LisName), - NCfg = Cfg#{ - ctx => Ctx, - listener => {GwName, Type, LisName} - }, - NSocketOpts = merge_default(Type, SocketOpts), - MFA = {emqx_gateway_conn, start_link, [NCfg]}, - do_start_listener(Type, Name, ListenOn, NSocketOpts, MFA). + SocketOpts = merge_default(Type, esockd_opts(Type, Confs)), + HighLevelCfgs0 = filter_out_low_level_opts(Type, Confs), + HighLevelCfgs = maps:merge( + HighLevelCfgs0, + ModCfg#{ + ctx => Ctx, + listener => {GwName, Type, LisName} + } + ), + ConnMod = maps:get(connection_mod, ModCfg, emqx_gateway_conn), + MFA = {ConnMod, start_link, [HighLevelCfgs]}, + do_start_listener(Type, Name, ListenOn, SocketOpts, MFA); +start_listener(GwName, Ctx, Type, LisName, ListenOn, Confs, ModCfg) when + ?IS_COWBOY_LISTENER(Type) +-> + Name = emqx_gateway_utils:listener_id(GwName, Type, LisName), + RanchOpts = ranch_opts(Type, ListenOn, Confs), + HighLevelCfgs0 = filter_out_low_level_opts(Type, Confs), + HighLevelCfgs = maps:merge( + HighLevelCfgs0, + ModCfg#{ + ctx => Ctx, + listener => {GwName, Type, LisName} + } + ), + WsOpts = ws_opts(Confs, HighLevelCfgs), + case Type of + ws -> cowboy:start_clear(Name, RanchOpts, WsOpts); + wss -> cowboy:start_tls(Name, RanchOpts, WsOpts) + end. + +filter_out_low_level_opts(Type, RawCfg = #{gw_conf := Conf0}) when ?IS_ESOCKD_LISTENER(Type) -> + EsockdKeys = [ + gw_conf, + bind, + acceptors, + max_connections, + max_conn_rate, + tcp_options, + ssl_options, + udp_options, + dtls_options + ], + Conf1 = maps:without(EsockdKeys, RawCfg), + maps:merge(Conf0, Conf1); +filter_out_low_level_opts(Type, RawCfg = #{gw_conf := Conf0}) when ?IS_COWBOY_LISTENER(Type) -> + CowboyKeys = [ + gw_conf, + bind, + acceptors, + max_connections, + max_conn_rate, + tcp_options, + ssl_options, + udp_options, + dtls_options + ], + Conf1 = maps:without(CowboyKeys, RawCfg), + maps:merge(Conf0, Conf1). merge_default(Udp, Options) -> {Key, Default} = @@ -244,8 +304,8 @@ stop_listeners(GwName, Listeners) -> lists:foreach(fun(L) -> stop_listener(GwName, L) end, Listeners). -spec stop_listener(GwName :: atom(), Listener :: tuple()) -> ok. -stop_listener(GwName, {Type, LisName, ListenOn, SocketOpts, Cfg}) -> - StopRet = stop_listener(GwName, Type, LisName, ListenOn, SocketOpts, Cfg), +stop_listener(GwName, {Type, LisName, ListenOn, Cfg}) -> + StopRet = stop_listener(GwName, Type, LisName, ListenOn, Cfg), ListenOnStr = emqx_listeners:format_bind(ListenOn), case StopRet of ok -> @@ -261,7 +321,7 @@ stop_listener(GwName, {Type, LisName, ListenOn, SocketOpts, Cfg}) -> end, StopRet. -stop_listener(GwName, Type, LisName, ListenOn, _SocketOpts, _Cfg) -> +stop_listener(GwName, Type, LisName, ListenOn, _Cfg) -> Name = emqx_gateway_utils:listener_id(GwName, Type, LisName), esockd:close(Name, ListenOn). @@ -378,8 +438,7 @@ stringfy(T) -> Type :: udp | tcp | ssl | dtls, Name :: atom(), ListenOn :: esockd:listen_on(), - SocketOpts :: esockd:option(), - Cfg :: map() + RawCfg :: map() }). normalize_config(RawConf) -> LisMap = maps:get(listeners, RawConf, #{}), @@ -391,14 +450,7 @@ normalize_config(RawConf) -> maps:fold( fun(Name, Confs, AccIn2) -> ListenOn = maps:get(bind, Confs), - SocketOpts = esockd_opts(Type, Confs), - RemainCfgs = maps:without( - [bind, tcp, ssl, udp, dtls] ++ - proplists:get_keys(SocketOpts), - Confs - ), - Cfg = maps:merge(Cfg0, RemainCfgs), - [{Type, Name, ListenOn, SocketOpts, Cfg} | AccIn2] + [{Type, Name, ListenOn, Confs#{gw_conf => Cfg0}} | AccIn2] end, [], Liss @@ -410,7 +462,7 @@ normalize_config(RawConf) -> ) ). -esockd_opts(Type, Opts0) -> +esockd_opts(Type, Opts0) when ?IS_ESOCKD_LISTENER(Type) -> Opts1 = maps:with( [ acceptors, @@ -425,37 +477,70 @@ esockd_opts(Type, Opts0) -> maps:to_list( case Type of tcp -> - Opts2#{tcp_options => sock_opts(tcp, Opts0)}; + Opts2#{tcp_options => sock_opts(tcp_options, Opts0)}; ssl -> Opts2#{ - tcp_options => sock_opts(tcp, Opts0), - ssl_options => ssl_opts(ssl, Opts0) + tcp_options => sock_opts(tcp_options, Opts0), + ssl_options => ssl_opts(ssl_options, Opts0) }; udp -> - Opts2#{udp_options => sock_opts(udp, Opts0)}; + Opts2#{udp_options => sock_opts(udp_options, Opts0)}; dtls -> Opts2#{ - udp_options => sock_opts(udp, Opts0), - dtls_options => ssl_opts(dtls, Opts0) + udp_options => sock_opts(udp_options, Opts0), + dtls_options => ssl_opts(dtls_options, Opts0) } end ). +sock_opts(Name, Opts) -> + maps:to_list( + maps:without( + [active_n, keepalive], + maps:get(Name, Opts, #{}) + ) + ). + ssl_opts(Name, Opts) -> Type = case Name of - ssl -> tls; - dtls -> dtls + ssl_options -> tls; + dtls_options -> dtls end, emqx_tls_lib:to_server_opts(Type, maps:get(Name, Opts, #{})). -sock_opts(Name, Opts) -> - maps:to_list( - maps:without( - [active_n], - maps:get(Name, Opts, #{}) - ) - ). +ranch_opts(Type, ListenOn, Opts) -> + NumAcceptors = maps:get(acceptors, Opts, 4), + MaxConnections = maps:get(max_connections, Opts, 1024), + SocketOpts1 = + case Type of + wss -> + sock_opts(tcp_options, Opts) ++ + proplists:delete(handshake_timeout, ssl_opts(ssl_options, Opts)); + ws -> + sock_opts(tcp_options, Opts) + end, + SocketOpts = ip_port(ListenOn) ++ proplists:delete(reuseaddr, SocketOpts1), + #{ + num_acceptors => NumAcceptors, + max_connections => MaxConnections, + handshake_timeout => maps:get(handshake_timeout, Opts, 15000), + socket_opts => SocketOpts + }. + +ws_opts(Opts, Conf) -> + ConnMod = maps:get(connection_mod, Conf, emqx_gateway_conn), + WsPaths = [ + {emqx_utils_maps:deep_get([websocket, path], Opts, "") ++ "/[...]", ConnMod, Conf} + ], + Dispatch = cowboy_router:compile([{'_', WsPaths}]), + ProxyProto = maps:get(proxy_protocol, Opts, false), + #{env => #{dispatch => Dispatch}, proxy_header => ProxyProto}. + +ip_port(Port) when is_integer(Port) -> + [{port, Port}]; +ip_port({Addr, Port}) -> + [{ip, Addr}, {port, Port}]. %%-------------------------------------------------------------------- %% Envs @@ -538,6 +623,32 @@ find_gateway_definitions() -> ) ). +-spec find_gateway_definition(atom()) -> {ok, map()} | {error, term()}. +find_gateway_definition(Name) -> + ensure_gateway_loaded(), + find_gateway_definition(Name, ignore_lib_apps(application:loaded_applications())). + +-dialyzer({no_match, [find_gateway_definition/2]}). +find_gateway_definition(Name, [App | T]) -> + Attrs = find_attrs(App, gateway), + SearchFun = fun({_App, _Mod, #{name := GwName}}) -> + GwName =:= Name + end, + case lists:search(SearchFun, Attrs) of + {value, {_App, _Mod, Defination}} -> + case check_gateway_edition(Defination) of + true -> + {ok, Defination}; + _ -> + {error, invalid_edition} + end; + false -> + find_gateway_definition(Name, T) + end; +find_gateway_definition(_Name, []) -> + {error, not_found}. + +-dialyzer({no_match, [gateways/1]}). gateways([]) -> []; gateways([ @@ -550,7 +661,20 @@ gateways([ }} | More ]) when is_atom(Name), is_atom(CbMod), is_atom(SchemaMod) -> - [Defination | gateways(More)]. + case check_gateway_edition(Defination) of + true -> + [Defination | gateways(More)]; + _ -> + gateways(More) + end. + +-if(?EMQX_RELEASE_EDITION == ee). +check_gateway_edition(_Defination) -> + true. +-else. +check_gateway_edition(Defination) -> + ce == maps:get(edition, Defination, ce). +-endif. find_attrs(App, Def) -> [ @@ -624,7 +748,9 @@ ensure_gateway_loaded() -> emqx_gateway_stomp, emqx_gateway_coap, emqx_gateway_lwm2m, - emqx_gateway_mqttsn + emqx_gateway_mqttsn, + emqx_gateway_gbt32960, + emqx_gateway_ocpp ] ). diff --git a/apps/emqx_gateway/test/emqx_gateway_SUITE.erl b/apps/emqx_gateway/test/emqx_gateway_SUITE.erl index 9e0beb8cd..2574db644 100644 --- a/apps/emqx_gateway/test/emqx_gateway_SUITE.erl +++ b/apps/emqx_gateway/test/emqx_gateway_SUITE.erl @@ -74,13 +74,7 @@ end_per_testcase(_TestCase, _Config) -> %%-------------------------------------------------------------------- t_registered_gateway(_) -> - [ - {coap, #{cbkmod := emqx_gateway_coap}}, - {exproto, #{cbkmod := emqx_gateway_exproto}}, - {lwm2m, #{cbkmod := emqx_gateway_lwm2m}}, - {mqttsn, #{cbkmod := emqx_gateway_mqttsn}}, - {stomp, #{cbkmod := emqx_gateway_stomp}} - ] = emqx_gateway:registered_gateway(). + [{coap, #{cbkmod := emqx_gateway_coap}} | _] = emqx_gateway:registered_gateway(). t_load_unload_list_lookup(_) -> {ok, _} = emqx_gateway:load(?GWNAME, #{idle_timeout => 1000}), diff --git a/apps/emqx_gateway/test/emqx_gateway_api_SUITE.erl b/apps/emqx_gateway/test/emqx_gateway_api_SUITE.erl index 9cda5bc23..0b562e851 100644 --- a/apps/emqx_gateway/test/emqx_gateway_api_SUITE.erl +++ b/apps/emqx_gateway/test/emqx_gateway_api_SUITE.erl @@ -96,10 +96,8 @@ t_gateways(_) -> ok. t_gateway(_) -> - {404, GwNotFoundReq1} = request(get, "/gateways/not_a_known_atom"), - assert_not_found(GwNotFoundReq1), - {404, GwNotFoundReq2} = request(get, "/gateways/undefined"), - assert_not_found(GwNotFoundReq2), + ?assertMatch({400, #{code := <<"BAD_REQUEST">>}}, request(get, "/gateways/not_a_known_atom")), + ?assertMatch({400, #{code := <<"BAD_REQUEST">>}}, request(get, "/gateways/undefined")), {204, _} = request(put, "/gateways/stomp", #{}), {200, StompGw} = request(get, "/gateways/stomp"), assert_fields_exist( @@ -110,7 +108,7 @@ t_gateway(_) -> {200, #{enable := true}} = request(get, "/gateways/stomp"), {204, _} = request(put, "/gateways/stomp", #{enable => false}), {200, #{enable := false}} = request(get, "/gateways/stomp"), - {404, _} = request(put, "/gateways/undefined", #{}), + ?assertMatch({400, #{code := <<"BAD_REQUEST">>}}, request(put, "/gateways/undefined", #{})), {400, _} = request(put, "/gateways/stomp", #{bad_key => "foo"}), ok. @@ -129,8 +127,14 @@ t_gateway_enable(_) -> {200, #{enable := NotEnable}} = request(get, "/gateways/stomp"), {204, _} = request(put, "/gateways/stomp/enable/" ++ atom_to_list(Enable), undefined), {200, #{enable := Enable}} = request(get, "/gateways/stomp"), - {404, _} = request(put, "/gateways/undefined/enable/true", undefined), - {404, _} = request(put, "/gateways/not_a_known_atom/enable/true", undefined), + ?assertMatch( + {400, #{code := <<"BAD_REQUEST">>}}, + request(put, "/gateways/undefined/enable/true", undefined) + ), + ?assertMatch( + {400, #{code := <<"BAD_REQUEST">>}}, + request(put, "/gateways/not_a_known_atom/enable/true", undefined) + ), {404, _} = request(put, "/gateways/coap/enable/true", undefined), ok. diff --git a/apps/emqx_gateway/test/emqx_gateway_auth_ct.erl b/apps/emqx_gateway/test/emqx_gateway_auth_ct.erl index 92bf95a69..215302105 100644 --- a/apps/emqx_gateway/test/emqx_gateway_auth_ct.erl +++ b/apps/emqx_gateway/test/emqx_gateway_auth_ct.erl @@ -45,7 +45,7 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("emqx/include/emqx_placeholder.hrl"). --define(CALL(Msg), gen_server:call(?MODULE, {?FUNCTION_NAME, Msg})). +-define(CALL(Msg), gen_server:call(?MODULE, {?FUNCTION_NAME, Msg}, 15000)). -define(AUTHN_HTTP_PORT, 37333). -define(AUTHN_HTTP_PATH, "/auth"). diff --git a/apps/emqx_gateway/test/emqx_gateway_cli_SUITE.erl b/apps/emqx_gateway/test/emqx_gateway_cli_SUITE.erl index b2280bb20..f5be9ce14 100644 --- a/apps/emqx_gateway/test/emqx_gateway_cli_SUITE.erl +++ b/apps/emqx_gateway/test/emqx_gateway_cli_SUITE.erl @@ -118,14 +118,8 @@ t_gateway_registry_usage(_) -> t_gateway_registry_list(_) -> emqx_gateway_cli:'gateway-registry'(["list"]), - ?assertEqual( - "Registered Name: coap, Callback Module: emqx_gateway_coap\n" - "Registered Name: exproto, Callback Module: emqx_gateway_exproto\n" - "Registered Name: lwm2m, Callback Module: emqx_gateway_lwm2m\n" - "Registered Name: mqttsn, Callback Module: emqx_gateway_mqttsn\n" - "Registered Name: stomp, Callback Module: emqx_gateway_stomp\n", - acc_print() - ). + %% TODO: assert it. + _ = acc_print(). t_gateway_usage(_) -> ?assertEqual( @@ -142,14 +136,8 @@ t_gateway_usage(_) -> t_gateway_list(_) -> emqx_gateway_cli:gateway(["list"]), - ?assertEqual( - "Gateway(name=coap, status=unloaded)\n" - "Gateway(name=exproto, status=unloaded)\n" - "Gateway(name=lwm2m, status=unloaded)\n" - "Gateway(name=mqttsn, status=unloaded)\n" - "Gateway(name=stomp, status=unloaded)\n", - acc_print() - ), + %% TODO: assert it. + _ = acc_print(), emqx_gateway_cli:gateway(["load", "mqttsn", ?CONF_MQTTSN]), ?assertEqual("ok\n", acc_print()), diff --git a/apps/emqx_gateway_coap/README.md b/apps/emqx_gateway_coap/README.md index 405366e89..653fd7433 100644 --- a/apps/emqx_gateway_coap/README.md +++ b/apps/emqx_gateway_coap/README.md @@ -5,7 +5,7 @@ with [Publish-Subscribe Broker for the CoAP](https://datatracker.ietf.org/doc/ht ## Quick Start -In EMQX 5.0, CoAP gateways can be configured and enabled through the Dashboard. +In EMQX 5.0, CoAP gateway can be configured and enabled through the Dashboard. It can also be enabled via the HTTP API or emqx.conf, e.g. In emqx.conf: diff --git a/apps/emqx_gateway_coap/rebar.config b/apps/emqx_gateway_coap/rebar.config index 3b070a72a..493ebe04f 100644 --- a/apps/emqx_gateway_coap/rebar.config +++ b/apps/emqx_gateway_coap/rebar.config @@ -1,3 +1,4 @@ +%% -*- mode: erlang -*- {erl_opts, [debug_info]}. {deps, [ {emqx, {path, "../emqx"}}, diff --git a/apps/emqx_gateway_coap/src/emqx_gateway_coap.app.src b/apps/emqx_gateway_coap/src/emqx_gateway_coap.app.src index 30c176139..10dd6efef 100644 --- a/apps/emqx_gateway_coap/src/emqx_gateway_coap.app.src +++ b/apps/emqx_gateway_coap/src/emqx_gateway_coap.app.src @@ -1,3 +1,4 @@ +%% -*- mode: erlang -*- {application, emqx_gateway_coap, [ {description, "CoAP Gateway"}, {vsn, "0.1.5"}, diff --git a/apps/emqx_gateway_exproto/rebar.config b/apps/emqx_gateway_exproto/rebar.config index 473fa9b67..aafbe4e13 100644 --- a/apps/emqx_gateway_exproto/rebar.config +++ b/apps/emqx_gateway_exproto/rebar.config @@ -1,3 +1,4 @@ +%% -*- mode: erlang -*- {erl_opts, [debug_info]}. {deps, [ {emqx, {path, "../emqx"}}, diff --git a/apps/emqx_gateway_exproto/src/emqx_gateway_exproto.app.src b/apps/emqx_gateway_exproto/src/emqx_gateway_exproto.app.src index ffd8fd3d1..2bdd0956e 100644 --- a/apps/emqx_gateway_exproto/src/emqx_gateway_exproto.app.src +++ b/apps/emqx_gateway_exproto/src/emqx_gateway_exproto.app.src @@ -1,3 +1,4 @@ +%% -*- mode: erlang -*- {application, emqx_gateway_exproto, [ {description, "ExProto Gateway"}, {vsn, "0.1.5"}, diff --git a/apps/emqx_gateway_exproto/test/emqx_exproto_SUITE.erl b/apps/emqx_gateway_exproto/test/emqx_exproto_SUITE.erl index 1c4c7ba08..76e11ef00 100644 --- a/apps/emqx_gateway_exproto/test/emqx_exproto_SUITE.erl +++ b/apps/emqx_gateway_exproto/test/emqx_exproto_SUITE.erl @@ -636,18 +636,18 @@ close({dtls, Sock}) -> %% Server-Opts socketopts(tcp) -> - #{tcp => tcp_opts()}; + #{tcp_options => tcp_opts()}; socketopts(ssl) -> #{ - tcp => tcp_opts(), - ssl => ssl_opts() + tcp_options => tcp_opts(), + ssl_options => ssl_opts() }; socketopts(udp) -> - #{udp => udp_opts()}; + #{udp_options => udp_opts()}; socketopts(dtls) -> #{ - udp => udp_opts(), - dtls => dtls_opts() + udp_options => udp_opts(), + dtls_options => dtls_opts() }. tcp_opts() -> diff --git a/apps/emqx_gateway_gbt32960/BSL.txt b/apps/emqx_gateway_gbt32960/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_gateway_gbt32960/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_gateway_gbt32960/README.md b/apps/emqx_gateway_gbt32960/README.md new file mode 100644 index 000000000..779e7004c --- /dev/null +++ b/apps/emqx_gateway_gbt32960/README.md @@ -0,0 +1,24 @@ +# emqx_gbt32960 + +The GBT32960 Gateway is based on the GBT32960 specification. + +## Quick Start + +In EMQX 5.0, GBT32960 gateway can be configured and enabled through the Dashboard. + +It can also be enabled via the HTTP API or emqx.conf, e.g. In emqx.conf: + +```properties +gateway.gbt32960 { + + mountpoint = "gbt32960/${clientid}" + + listeners.tcp.default { + bind = 7325 + } +} +``` + +> Note: +> Configuring the gateway via emqx.conf requires changes on a per-node basis, +> but configuring it via Dashboard or the HTTP API will take effect across the cluster. diff --git a/apps/emqx_gateway_gbt32960/doc/Data_Exchange_Guide_CN.md b/apps/emqx_gateway_gbt32960/doc/Data_Exchange_Guide_CN.md new file mode 100644 index 000000000..8fdd77449 --- /dev/null +++ b/apps/emqx_gateway_gbt32960/doc/Data_Exchange_Guide_CN.md @@ -0,0 +1,743 @@ +# EMQX GBT/32960 网关 + +该文档定义了 **GBT/32960** 网关和 **EMQX** 之间数据交换的格式 + +约定: +- Payload 采用 Json 格式进行组装 +- Json Key 采用大驼峰格式命名 +- 使用车辆的 `vin` 值作为 `clientid` +- 默认挂载点为: gbt32960/${clientid} + +# Upstream +数据流向: Terminal -> GBT/32960 -> EMQX + +## 车辆登入 +Topic: gbt32960/${clientid}/upstream/vlogin + +```json +{ + "Cmd": 1, + "Encrypt": 1, + "Vin": "1G1BL52P7TR115520", + "Data": { + "ICCID": "12345678901234567890", + "Id": "C", + "Length": 1, + "Num": 1, + "Seq": 1, + "Time": { + "Day": 29, + "Hour": 12, + "Minute": 19, + "Month": 12, + "Second": 20, + "Year": 12 + } + } +} +``` + +其中 + +| 字段 | 类型 | 描述 | +| --------- | ------- | ------------------------------------------------------------ | +| `Cmd` | Integer | 命令单元; `1` 表示车辆登入 | +| `Encrypt` | Integer | 数据单元加密方式,`1` 表示不加密,`2` 数据经过 RSA 加密,`3` 数据经过 ASE128 算法加密;`254` 表示异常;`255` 表示无效;其他预留 | +| `Vin` | String | 唯一识别码,即车辆 VIN 码 | +| `Data` | Object | 数据单元, JSON 对象格式。 | + +车辆登入的数据单元格式为 + +| 字段 | 类型 | 描述 | +| -------- | ------- | ------------------------------------------------------------ | +| `Time` | Object | 数据采集时间,按年,月,日,时,分,秒,格式见示例。 | +| `Seq` | Integer | 登入流水号 | +| `ICCID` | String | 长度为20的字符串,SIM 卡的 ICCID 号 | +| `Num` | Integer | 可充电储能子系统数,有效值 0 ~ 250 | +| `Length` | Integer | 可充电储能系统编码长度,有效值 0 ~ 50 | +| `Id` | String | 可充电储能系统编码,长度为 "子系统数" 与 "编码长度" 值的乘积 | + +## 车辆登出 + +Topic: gbt32960/${clientid}/upstream/vlogout + +车辆登出的 `Cmd` 值为 4,其余字段含义与登入相同: + +```json +{ + "Cmd": 4, + "Encrypt": 1, + "Vin": "1G1BL52P7TR115520", + "Data": { + "Seq": 1, + "Time": { + "Day": 1, + "Hour": 2, + "Minute": 59, + "Month": 1, + "Second": 0, + "Year": 16 + } + } +} +``` + +## 实时信息上报 + +Topic: gbt32960/${clientid}/upstream/info + +> 不同信息类型上报,格式上只有 Infos 里面的对象属性不同,通过 `Type` 进行区分 +> Infos 为数组,代表车载终端每次报文可以上报多个信息 + +### 整车数据 + +```json +{ + "Cmd": 2, + "Encrypt": 1, + "Vin": "1G1BL52P7TR115520", + "Data": { + "Infos": [ + { + "AcceleratorPedal": 90, + "BrakePedal": 0, + "Charging": 1, + "Current": 15000, + "DC": 1, + "Gear": 5, + "Mileage": 999999, + "Mode": 1, + "Resistance": 6000, + "SOC": 50, + "Speed": 2000, + "Status": 1, + "Type": "Vehicle", + "Voltage": 5000 + } + ], + "Time": { + "Day": 1, + "Hour": 2, + "Minute": 59, + "Month": 1, + "Second": 0, + "Year": 16 + } + } +} +``` + + + +其中,整车信息字段含义如下: + +| 字段 | 类型 | 描述 | +| ------------ | ------- | ------------------------------------------------------------ | +| `Type` | String | 数据类型,`Vehicle` 表示该结构为整车信息 | +| `Status` | Integer | 车辆状态,`1` 表示启动状态;`2` 表示熄火;`3` 表示其状态;`254` 表示异常;`255` 表示无效 | +| `Charging` | Integer | 充电状态,`1` 表示停车充电;`2` 行驶充电;`3` 未充电状态;`4` 充电完成;`254` 表示异常;`255` 表示无效 | +| `Mode` | Integer | 运行模式,`1` 表示纯电;`2` 混动;`3` 燃油;`254` 表示异常;`255` 表示无效 | +| `Speed` | Integer | 车速,有效值 ( 0~ 2200,表示 0 km/h ~ 220 km/h),单位 0.1 km/h | +| `Mileage` | Integer | 累计里程,有效值 0 ~9,999,999(表示 0 km ~ 999,999.9 km),单位 0.1 km | +| `Voltage` | Integer | 总电压,有效值范围 0 ~10000(表示 0 V ~ 1000 V)单位 0.1 V | +| `Current` | Integer | 总电流,有效值 0 ~ 20000 (偏移量 1000,表示 -1000 A ~ +1000 A,单位 0.1 A | +| `SOC` | Integer | SOC,有效值 0 ~ 100(表示 0% ~ 100%) | +| `DC` | Integer | DC,`1` 工作;`2` 断开;`254` 表示异常;`255` 表示无效 | +| `Gear` | Integer | 档位,参考原协议的 表 A.1,此值为其转换为整数的值 | +| `Resistance` | Integer | 绝缘电阻,有效范围 0 ~ 60000(表示 0 k欧姆 ~ 60000 k欧姆) | + +### 驱动电机数据 + +```json +{ + "Cmd": 2, + "Encrypt": 1, + "Vin": "1G1BL52P7TR115520", + "Data": { + "Infos": [ + { + "Motors": [ + { + "CtrlTemp": 125, + "DCBusCurrent": 31203, + "InputVoltage": 30012, + "MotorTemp": 125, + "No": 1, + "Rotating": 30000, + "Status": 1, + "Torque": 25000 + }, + { + "CtrlTemp": 125, + "DCBusCurrent": 30200, + "InputVoltage": 32000, + "MotorTemp": 145, + "No": 2, + "Rotating": 30200, + "Status": 1, + "Torque": 25300 + } + ], + "Number": 2, + "Type": "DriveMotor" + } + ], + "Time": { + "Day": 1, + "Hour": 2, + "Minute": 59, + "Month": 1, + "Second": 0, + "Year": 16 + } + } +} +``` + +其中,驱动电机数据各个字段的含义是 + +| 字段 | 类型 | 描述 | +| -------- | ------- | ------------------------------ | +| `Type` | String | 数据类型,此处为 `DriveMotor` | +| `Number` | Integer | 驱动电机个数,有效值 1~253 | +| `Motors` | Array | 驱动电机数据列表 | + +驱动电机数据字段为: + +| 字段 | 类型 | 描述 | +| -------------- | -------- | ------------------------------------------------------------ | +| `No` | Integer | 驱动电机序号,有效值 1~253 | +| `Status` | Integer | 驱动电机状态,`1` 表示耗电;`2`发电;`3` 关闭状态;`4` 准备状态;`254` 表示异常;`255` 表示无效 | +| `CtrlTemp` | Integer | 驱动电机控制器温度,有效值 0~250(数值偏移 40°C,表示 -40°C ~ +210°C)单位 °C | +| `Rotating` | Interger | 驱动电机转速,有效值 0~65531(数值偏移 20000表示 -20000 r/min ~ 45531 r/min)单位 1 r/min | +| `Torque` | Integer | 驱动电机转矩,有效值 0~65531(数据偏移量 20000,表示 - 2000 N·m ~ 4553.1 N·m)单位 0.1 N·m | +| `MotorTemp` | Integer | 驱动电机温度,有效值 0~250(数据偏移量 40 °C,表示 -40°C ~ +210°C)单位 1°C | +| `InputVoltage` | Integer | 电机控制器输入电压,有效值 0~60000(表示 0V ~ 6000V)单位 0.1 V | +| `DCBusCurrent` | Interger | 电机控制器直流母线电流,有效值 0~20000(数值偏移 1000A,表示 -1000A ~ +1000 A)单位 0.1 A | + +### 燃料电池数据 + +```json +{ + "Cmd": 2, + "Encrypt": 1, + "Vin": "1G1BL52P7TR115520", + "Data": { + "Infos": [ + { + "CellCurrent": 12000, + "CellVoltage": 10000, + "DCStatus": 1, + "FuelConsumption": 45000, + "H_ConcSensorCode": 11, + "H_MaxConc": 35000, + "H_MaxPress": 500, + "H_MaxTemp": 12500, + "H_PressSensorCode": 12, + "H_TempProbeCode": 10, + "ProbeNum": 2, + "ProbeTemps": [120, 121], + "Type": "FuelCell" + } + ], + "Time": { + "Day": 1, + "Hour": 2, + "Minute": 59, + "Month": 1, + "Second": 0, + "Year": 16 + } + } +} +``` + +其中,燃料电池数据各个字段的含义是 + +| 字段 | 类型 | 描述 | +| ------------------- | ------- | ------------------------------------------------------------ | +| `Type` | String | 数据类型,此处为 `FuleCell` | +| `CellVoltage` | Integer | 燃料电池电压,有效值范围 0~20000(表示 0V ~ 2000V)单位 0.1 V | +| `CellCurrent` | Integer | 燃料电池电流,有效值范围 0~20000(表示 0A ~ +2000A)单位 0.1 A | +| `FuelConsumption` | Integer | 燃料消耗率,有效值范围 0~60000(表示 0kg/100km ~ 600 kg/100km) 单位 0.01 kg/100km | +| `ProbeNum` | Integer | 燃料电池探针总数,有效值范围 0~65531 | +| `ProbeTemps` | Array | 燃料电池每探针温度值 | +| `H_MaxTemp` | Integer | 氢系统最高温度,有效值 0~2400(偏移量40°C,表示 -40°C ~ 200°C)单位 0.1 °C | +| `H_TempProbeCode` | Integer | 氢系统最高温度探针代号,有效值 1~252 | +| `H_MaxConc` | Integer | 氢气最高浓度,有效值 0~60000(表示 0mg/kg ~ 50000 mg/kg)单位 1mg/kg | +| `H_ConcSensorCode` | Integer | 氢气最高浓度传感器代号,有效值 1~252 | +| `H_MaxPress` | Integer | 氢气最高压力,有效值 0~1000(表示 0 MPa ~ 100 MPa)最小单位 0.1 MPa | +| `H_PressSensorCode` | Integer | 氢气最高压力传感器代号,有效值 1~252 | +| `DCStatus` | Integer | 高压 DC/DC状态,`1` 表示工作;`2`断开 | + +### 发动机数据 + +```json +{ + "Cmd": 2, + "Encrypt": 1, + "Vin": "1G1BL52P7TR115520", + "Data": { + "Infos": [ + { + "CrankshaftSpeed": 2000, + "FuelConsumption": 200, + "Status": 1, + "Type": "Engine" + } + ], + "Time": { + "Day": 1, + "Hour": 22, + "Minute": 59, + "Month": 10, + "Second": 0, + "Year": 16 + } + } +} +``` + +其中,发动机数据各个字段的含义是 + +| 字段 | 类型 | 描述 | +| ----------------- | ------- | ------------------------------------------------------------ | +| `Type` | String | 数据类型,此处为 `Engine` | +| `Status` | Integer | 发动机状态,`1` 表示启动;`2` 关闭 | +| `CrankshaftSpeed` | Integer | 曲轴转速,有效值 0~60000(表示 0r/min ~ 60000r/min)单位 1r/min | +| `FuelConsumption` | Integer | 燃料消耗率,有效范围 0~60000(表示 0L/100km ~ 600L/100km)单位 0.01 L/100km | + + + +### 车辆位置数据 + +```json +{ + "Cmd": 2, + "Encrypt": 1, + "Vin": "1G1BL52P7TR115520", + "Data": { + "Infos": [ + { + "Latitude": 100, + "Longitude": 10, + "Status": 0, + "Type": "Location" + } + ], + "Time": { + "Day": 1, + "Hour": 22, + "Minute": 59, + "Month": 10, + "Second": 0, + "Year": 16 + } + } +} +``` + +其中,车辆位置数据各个字段的含义是 + +| 字段 | 类型 | 描述 | +| ----------- | ------- | ----------------------------------------------------- | +| `Type` | String | 数据类型,此处为 `Location` | +| `Status` | Integer | 定位状态,见原协议表15,此处为所有比特位的整型值 | +| `Longitude` | Integer | 经度,以度为单位的纬度值乘以 10^6,精确到百万分之一度 | +| `Latitude` | Integer | 纬度,以度为单位的纬度值乘以 10^6,精确到百万分之一度 | + + + +### 极值数据 + +```json +{ + "Cmd": 2, + "Encrypt": 1, + "Vin": "1G1BL52P7TR115520", + "Data": { + "Infos": [ + { + "MaxBatteryVoltage": 7500, + "MaxTemp": 120, + "MaxTempProbeNo": 12, + "MaxTempSubsysNo": 14, + "MaxVoltageBatteryCode": 10, + "MaxVoltageBatterySubsysNo": 12, + "MinBatteryVoltage": 2000, + "MinTemp": 40, + "MinTempProbeNo": 13, + "MinTempSubsysNo": 15, + "MinVoltageBatteryCode": 11, + "MinVoltageBatterySubsysNo": 13, + "Type": "Extreme" + } + ], + "Time": { + "Day": 30, + "Hour": 12, + "Minute": 22, + "Month": 5, + "Second": 59, + "Year": 17 + } + } +} +``` + +其中,极值数据各个字段的含义是 + +| 字段 | 类型 | 描述 | +| --------------------------- | ------- | ------------------------------------------------------------ | +| `Type` | String | 数据类型,此处为 `Extreme` | +| `MaxVoltageBatterySubsysNo` | Integer | 最高电压电池子系统号,有效值 1~250 | +| `MaxVoltageBatteryCode` | Integer | 最高电压电池单体代号,有效值 1~250 | +| `MaxBatteryVoltage` | Integer | 电池单体电压最高值,有效值 0~15000(表示 0V~15V)单位 0.001V | +| `MinVoltageBatterySubsysNo` | Integer | 最低电压电池子系统号,有效值 1~250 | +| `MinVoltageBatteryCode` | Integer | 最低电压电池单体代号,有效值 1~250 | +| `MinBatteryVoltage` | Integer | 电池单体电压最低值,有效值 0~15000(表示 0V~15V)单位 0.001V | +| `MaxTempSubsysNo` | Integer | 最高温度子系统号,有效值 1~250 | +| `MaxTempProbeNo` | Integer | 最高温度探针序号,有效值 1~250 | +| `MaxTemp` | Integer | 最高温度值,有效值范围 0~250(偏移量40,表示 -40°C ~ +210°C) | +| `MinTempSubsysNo` | Integer | 最低温度子系统号,有效值 1~250 | +| `MinTempProbeNo` | Integer | 最低温度探针序号,有效值 1~250 | +| `MinTemp` | Integer | 最低温度值,有效值范围 0~250(偏移量40,表示 -40°C ~ +210°C) | + + + +### 报警数据 + +```json +{ + "Cmd": 2, + "Encrypt": 1, + "Vin": "1G1BL52P7TR115520", + "Data": { + "Infos": [ + { + "FaultChargeableDeviceNum": 1, + "FaultChargeableDeviceList": ["00C8"], + "FaultDriveMotorNum": 0, + "FaultDriveMotorList": [], + "FaultEngineNum": 1, + "FaultEngineList": ["006F"], + "FaultOthersNum": 0, + "FaultOthersList": [], + "GeneralAlarmFlag": 3, + "MaxAlarmLevel": 1, + "Type": "Alarm" + } + ], + "Time": { + "Day": 20, + "Hour": 22, + "Minute": 23, + "Month": 12, + "Second": 59, + "Year": 17 + } + } +} +``` + +其中,报警数据各个字段的含义是 + +| 字段 | 类型 | 描述 | +| --------------------------- | ------- | ------------------------------------------------------------ | +| `Type` | String | 数据类型,此处为 `Alarm` | +| `MaxAlarmLevel` | Integer | 最高报警等级,有效值范围 0~3,`0` 表示无故障,`1` 表示 `1` 级故障 | +| `GeneralAlarmFlag` | Integer | 通用报警标志位,见原协议表 18 | +| `FaultChargeableDeviceNum` | Integer | 可充电储能装置故障总数,有效值 0~252 | +| `FaultChargeableDeviceList` | Array | 可充电储能装置故障代码列表 | +| `FaultDriveMotorNum` | Integer | 驱动电机故障总数,有效置范围 0 ~252 | +| `FaultDriveMotorList` | Array | 驱动电机故障代码列表 | +| `FaultEngineNum` | Integer | 发动机故障总数,有效值范围 0~252 | +| `FaultEngineList` | Array | 发动机故障代码列表 | +| `FaultOthersNum` | Integer | 其他故障总数 | +| `FaultOthersList` | Array | 其他故障代码列表 | + + + +### 可充电储能装置电压数据 + +```json +{ + "Cmd": 2, + "Encrypt": 1, + "Vin": "1G1BL52P7TR115520", + "Data": { + "Infos": [ + { + "Number": 2, + "SubSystems": [ + { + "CellsTotal": 2, + "CellsVoltage": [5000], + "ChargeableCurrent": 10000, + "ChargeableSubsysNo": 1, + "ChargeableVoltage": 5000, + "FrameCellsCount": 1, + "FrameCellsIndex": 0 + }, + { + "CellsTotal": 2, + "CellsVoltage": [5001], + "ChargeableCurrent": 10001, + "ChargeableSubsysNo": 2, + "ChargeableVoltage": 5001, + "FrameCellsCount": 1, + "FrameCellsIndex": 1 + } + ], + "Type": "ChargeableVoltage" + } + ], + "Time": { + "Day": 1, + "Hour": 22, + "Minute": 59, + "Month": 10, + "Second": 0, + "Year": 16 + } + } +} +``` + + + +其中,字段定义如下 + +| 字段 | 类型 | 描述 | +| ----------- | ------- | ------------------------------------ | +| `Type` | String | 数据类型,此处为 `ChargeableVoltage` | +| `Number` | Integer | 可充电储能子系统个数,有效范围 1~250 | +| `SubSystem` | Object | 可充电储能子系统电压信息列表 | + +可充电储能子系统电压信息数据格式: + +| 字段 | 类型 | 描述 | +| -------------------- | ------- | ------------------------------------------------------------ | +| `ChargeableSubsysNo` | Integer | 可充电储能子系统号,有效值范围,1~250 | +| `ChargeableVoltage` | Integer | 可充电储能装置电压,有效值范围,0~10000(表示 0V ~ 1000V)单位 0.1 V | +| `ChargeableCurrent` | Integer | 可充电储能装置电流,有效值范围,0~20000(数值偏移量 1000A,表示 -1000A ~ +1000A)单位 0.1 A | +| `CellsTotal` | Integer | 单体电池总数,有效值范围 1~65531 | +| `FrameCellsIndex` | Integer | 本帧起始电池序号,当本帧单体个数超过 200 时,应该拆分多个帧进行传输,有效值范围 1~65531 | +| `FrameCellsCount` | Integer | 本帧单体电池总数,有效值范围 1~200 | +| `CellsVoltage` | Array | 单体电池电压,有效值范围 0~60000(表示 0V ~ 60.000V)单位 0.001V | + + + +### 可充电储能装置温度数据 + +```json +{ + "Cmd": 2, + "Encrypt": 1, + "Vin": "1G1BL52P7TR115520", + "Data": { + "Infos": [ + { + "Number": 2, + "SubSystems": [ + { + "ChargeableSubsysNo": 1, + "ProbeNum": 10, + "ProbesTemp": [0, 0, 0, 0, 0, 0, 0, 0, 19, 136] + }, + { + "ChargeableSubsysNo": 2, + "ProbeNum": 1, + "ProbesTemp": [100] + } + ], + "Type": "ChargeableTemp" + } + ], + "Time": { + "Day": 1, + "Hour": 22, + "Minute": 59, + "Month": 10, + "Second": 0, + "Year": 16 + } + } +} +``` +其中,数据格式为: + +| 字段 | 类型 | 描述 | +| ------------ | ------- | --------------------------------- | +| `Type` | String | 数据类型,此处为 `ChargeableTemp` | +| `Number` | Integer | 可充电储能子系统温度信息列表长度 | +| `SubSystems` | Object | 可充电储能子系统温度信息列表 | + +可充电储能子系统温度信息格式为 + +| 字段 | 类型 | 描述 | +| -------------------- | -------- | ------------------------------------ | +| `ChargeableSubsysNo` | Ineteger | 可充电储能子系统号,有效值 1~250 | +| `ProbeNum` | Integer | 可充电储能温度探针个数 | +| `ProbesTemp` | Array | 可充电储能子系统各温度探针温度值列表 | + + + +## 数据补发 + +Topic: gbt32960/${clientid}/upstream/reinfo + +**数据格式: 略** (与实时数据上报相同) + +# Downstream + +> 请求数据流向: EMQX -> GBT/32960 -> Terminal + +> 应答数据流向: Terminal -> GBT/32960 -> EMQX + +下行主题: gbt32960/${clientid}/dnstream +上行应答主题: gbt32960/${clientid}/upstream/response + +## 参数查询 + + + +**Req:** + +```json +{ + "Action": "Query", + "Total": 2, + "Ids": ["0x01", "0x02"] +} +``` + +| 字段 | 类型 | 描述 | +| -------- | ------- | -------------------------------------------------- | +| `Action` | String | 下发命令类型,此处为 `Query` | +| `Total` | Integer | 查询参数总数 | +| `Ids` | Array | 需查询参数的 ID 列表,具体 ID 含义见原协议 表 B.10 | + +**Response:** + +```json +{ + "Cmd": 128, + "Encrypt": 1, + "Vin": "1G1BL52P7TR115520", + "Data": { + "Total": 2, + "Params": [ + {"0x01": 6000}, + {"0x02": 10} + ], + "Time": { + "Day": 2, + "Hour": 11, + "Minute": 12, + "Month": 2, + "Second": 12, + "Year": 17 + } + } +} +``` + + + +## 参数设置 + +**Req:** +```json +{ + "Action": "Setting", + "Total": 2, + "Params": [{"0x01": 5000}, + {"0x02": 200}] +} +``` + +| 字段 | 类型 | 描述 | +| -------- | ------- | ------------------------------ | +| `Action` | String | 下发命令类型,此处为 `Setting` | +| `Total` | Integer | 设置参数总数 | +| `Params` | Array | 需设置参数的 ID 和 值 | + +**Response:** + +```json +// fixme? 终端是按照这种方式返回? +{ + "Cmd": 129, + "Encrypt": 1, + "Vin": "1G1BL52P7TR115520", + "Data": { + "Total": 2, + "Params": [ + {"0x01": 5000}, + {"0x02": 200} + ], + "Time": { + "Day": 2, + "Hour": 11, + "Minute": 12, + "Month": 2, + "Second": 12, + "Year": 17 + } + } +} +``` + +## 终端控制 +**命令的不同, 参数不同; 无参数时为空** + +远程升级: +**Req:** + +```json +{ + "Action": "Control", + "Command": "0x01", + "Param": { + "DialingName": "hz203", + "Username": "user001", + "Password": "password01", + "Ip": "192.168.199.1", + "Port": 8080, + "ManufacturerId": "BMWA", + "HardwareVer": "1.0.0", + "SoftwareVer": "1.0.0", + "UpgradeUrl": "ftp://emqtt.io/ftp/server", + "Timeout": 10 + } +} +``` + +| 字段 | 类型 | 描述 | +| --------- | ------- | ------------------------------ | +| `Action` | String | 下发命令类型,此处为 `Control` | +| `Command` | Integer | 下发指令 ID,见原协议表 B.15 | +| `Param` | Object | 命令参数 | + +列表 + +车载终端关机: + +```json +{ + "Action": "Control", + "Command": "0x02" +} +``` + +... + +车载终端报警: +```json +{ + "Action": "Control", + "Command": "0x06", + "Param": {"Level": 0, "Message": "alarm message"} +} +``` diff --git a/apps/emqx_gateway_gbt32960/doc/Data_Exchange_Guide_EN.md b/apps/emqx_gateway_gbt32960/doc/Data_Exchange_Guide_EN.md new file mode 100644 index 000000000..98f7db30b --- /dev/null +++ b/apps/emqx_gateway_gbt32960/doc/Data_Exchange_Guide_EN.md @@ -0,0 +1,744 @@ +# EMQX GBT/32960 Gateway + +This document defines the format of the data exchange internal the **GBT/32960** gateway and the **EMQX**. + +Conventions: +- Payloads are assembled in Json format +- Json Keys are named in big hump format +- Use the `vin` value of the vehicle as the `clientid`. +- The default mountpoint is: `gbt32960/${clientid}` + +# Upstream +Data flow: Terminal -> GBT/32960 Gateway -> EMQX + +## Vehicle Login +Topic: gbt32960/${clientid}/upstream/vlogin + +```json +{ + "Cmd": 1, + "Encrypt": 1, + "Vin": "1G1BL52P7TR115520", + "Data": { + "ICCID": "12345678901234567890", + "Id": "C", + "Length": 1, + "Num": 1, + "Seq": 1, + "Time": { + "Day": 29, + "Hour": 12, + "Minute": 19, + "Month": 12, + "Second": 20, + "Year": 12 + } + } +} +``` + +definition: + +| Field | Type | Description | +| --------- | ------- | ------------------------------------------------------------ | +| `Cmd` | Integer | Command, `1` :: vehicle login | +| `Encrypt` | Integer | Data encryption method, `1` :: no encryption; `2` :: RSA; `3` :: ASE128; `254` :: abnormal; `255` :: invalid; other reserved | +| `Vin` | String | Unique identifier,namely, the vehicle VIN code | +| `Data` | Object | The JSON format data | + +The data unit format for vehicle login is: + +| Field | Type | Description | +| -------- | ------- | ------------------------------------------------------------ | +| `Time` | Object | Data collection time, in year, month, day, hour, minute, second. See the example for the format | +| `Seq` | Integer | Login sequence number | +| `ICCID` | String | String of length 20, the ICCID number of the SIM card | +| `Num` | Integer | Number of rechargeable energy storage subsystems, valid values 0 ~ 250 | +| `Length` | Integer | Rechargeable energy storage system encoding length, valid value 0 ~ 50 | +| `Id` | String | Rechargeable energy storage system code, the length is the product of the `Num` and the `Length` value | + +## Vehicle logout + +Topic: gbt32960/${clientid}/upstream/vlogout + +The `Cmd` value of vehicle logout is 4, and the meaning of the other fields is the same as that of the login. + +```json +{ + "Cmd": 4, + "Encrypt": 1, + "Vin": "1G1BL52P7TR115520", + "Data": { + "Seq": 1, + "Time": { + "Day": 1, + "Hour": 2, + "Minute": 59, + "Month": 1, + "Second": 0, + "Year": 16 + } + } +} +``` + +## Real-time information reporting + +Topic: gbt32960/${clientid}/upstream/info + +> When reporting messages of different information types, only the object attribute is different, which are distinguished by the `Type` field. +> Infos is an array, which means that the vehicle terminal can report multiple information in each message + +### Vehicle data + +```json +{ + "Cmd": 2, + "Encrypt": 1, + "Vin": "1G1BL52P7TR115520", + "Data": { + "Infos": [ + { + "AcceleratorPedal": 90, + "BrakePedal": 0, + "Charging": 1, + "Current": 15000, + "DC": 1, + "Gear": 5, + "Mileage": 999999, + "Mode": 1, + "Resistance": 6000, + "SOC": 50, + "Speed": 2000, + "Status": 1, + "Type": "Vehicle", + "Voltage": 5000 + } + ], + "Time": { + "Day": 1, + "Hour": 2, + "Minute": 59, + "Month": 1, + "Second": 0, + "Year": 16 + } + } +} +``` + + + +The meaning of the vehicle information field is as follows: + +| Field | Type | Description | +| ------------ | ------- | ------------------------------------------------------------ | +| `Type` | String | Data type, `Vehicle` :: this is the vehicle information | +| `Status` | Integer | Vehicle status, `1` :: started; `2` :: stalled; `3` :: others; `254` :: abnormal; `255` :: invalid | +| `Charging` | Integer | Charging status, `1` :: parking and charging; `2` :: driving and charging; `3` :: not charging; `4` :: charging is completed; `254` :: abnormal; `255` :: invalid | +| `Mode` | Integer | Operating mode, `1` :: pure electric; `2` :: hybrid; `3` :: fuel; `254` :: abnormal; `255` :: invalid | +| `Speed` | Integer | Vehicle speed, valid value (0~ 2200, indicating 0 km/h ~ 220 km/h), unit 0.1 km/h | +| `Mileage` | Integer | Accumulated mileage, valid value 0 ~9,999,999 (representing 0 km ~ 999,999.9 km), unit 0.1 km | +| `Voltage` | Integer | Total voltage, valid value 0 ~10000 (representing 0 V ~ 1000 V) unit 0.1 V | +| `Current` | Integer | Total current, valid value 0 ~ 20000 (offset 1000, :: -1000 A ~ +1000 A) unit 0.1 A | +| `SOC` | Integer | SOC, valid values 0 ~ 100 (representing 0% ~ 100%) | +| `DC` | Integer | DC, `1` works; `2` disconnects; `254` :: abnormal; `255` :: invalid | +| `Gear` | Integer | Gear, refer to Table A.1 of the original protocol, this value is converted into an integer | +| `Resistance` | Integer | Insulation resistance, valid range 0 ~ 60000 (representing 0 k ohm ~ 60000 k ohm) | + + + +### Drive motor data + +```json +{ + "Cmd": 2, + "Encrypt": 1, + "Vin": "1G1BL52P7TR115520", + "Data": { + "Infos": [ + { + "Motors": [ + { + "CtrlTemp": 125, + "DCBusCurrent": 31203, + "InputVoltage": 30012, + "MotorTemp": 125, + "No": 1, + "Rotating": 30000, + "Status": 1, + "Torque": 25000 + }, + { + "CtrlTemp": 125, + "DCBusCurrent": 30200, + "InputVoltage": 32000, + "MotorTemp": 145, + "No": 2, + "Rotating": 30200, + "Status": 1, + "Torque": 25300 + } + ], + "Number": 2, + "Type": "DriveMotor" + } + ], + "Time": { + "Day": 1, + "Hour": 2, + "Minute": 59, + "Month": 1, + "Second": 0, + "Year": 16 + } + } +} +``` + +The meaning of each field of the drive motor data is: + +| Field | Type | Description | +| -------- | ------- | ------------------------------- | +| `Type` | String | Data type, here is `DriveMotor` | +| `Number` | Integer | Number of drive motors, valid value 1~253 | +| `Motors` | Array | Drive motor data list | + +The drive motor data fields are: + +| Field | Type | Description | +| -------------- | -------- | --------------------------------------------------------------------------------------------------------------------| +| `No` | Integer | Drive motor serial number, valid value 1 ~ 253 | +| `Status` | Integer | Drive motor status, `1` :: consuming; `2` producing; `3` closed; `4` ready; `254` :: abnormal; `255` :: invalid | +| `CtrlTemp` | Integer | Drive motor controller temperature, valid value 0 ~ 250 (value offset 40°C, indicating -40°C ~ +210°C) unit °C | +| `Rotating` | Interger | Drive motor speed, valid value 0 ~ 65531 (numeric offset 20000 :: -20000 r/min ~ 45531 r/min) unit 1 r/min | +| `Torque` | Integer | Drive motor torque, valid value 0 ~ 65531 (data offset 20000, represents - 2000 N·m ~ 4553.1 N·m) unit 0.1 N·m | +| `MotorTemp` | Integer | Drive motor temperature, valid value 0 ~ 250 (data offset 40 °C, represents -40°C ~ +210°C) unit 1°C | +| `InputVoltage` | Integer | Motor controller input voltage, valid value 0 ~ 60000 (representing 0V ~ 6000V) unit 0.1 V | +| `DCBusCurrent` | Interger | Motor controller DC bus current, valid value 0 ~ 20000 (value offset 1000A, indicating -1000A ~ +1000 A) unit 0.1 A | + + +### Fuel cell data + +```json +{ + "Cmd": 2, + "Encrypt": 1, + "Vin": "1G1BL52P7TR115520", + "Data": { + "Infos": [ + { + "CellCurrent": 12000, + "CellVoltage": 10000, + "DCStatus": 1, + "FuelConsumption": 45000, + "H_ConcSensorCode": 11, + "H_MaxConc": 35000, + "H_MaxPress": 500, + "H_MaxTemp": 12500, + "H_PressSensorCode": 12, + "H_TempProbeCode": 10, + "ProbeNum": 2, + "ProbeTemps": [120, 121], + "Type": "FuelCell" + } + ], + "Time": { + "Day": 1, + "Hour": 2, + "Minute": 59, + "Month": 1, + "Second": 0, + "Year": 16 + } + } +} +``` + +The meaning of each field of fuel cell data is + +| Field | Type | Description | +| ------------------- | ------- | ---------------------------------------------------------------------------------------------------------------------| +| `Type` | String | Data type, here is `FuleCell` | +| `CellVoltage` | Integer | Fuel cell voltage, valid value range 0~20000 (representing 0V ~ 2000V) unit 0.1 V | +| `CellCurrent` | Integer | Fuel cell current, valid value range 0~20000 (representing 0A ~ +2000A) unit 0.1 A | +| `FuelConsumption` | Integer | Fuel consumption rate, valid value range 0~60000 (representing 0kg/100km ~ 600 kg/100km) unit 0.01 kg/100km | +| `ProbeNum` | Integer | Total number of fuel cell probes, valid value range 0~65531 | +| `ProbeTemps` | Array | Fuel cell temperature value per probe | +| `H_MaxTemp` | Integer | Maximum temperature of the hydrogen system, effective value 0~2400 (offset 40°C, indicating -40°C ~ 200°C) unit 0.1 °C | +| `H_TempProbeCode` | Integer | Hydrogen system maximum temperature probe code, valid value 1~252 | +| `H_MaxConc` | Integer | Maximum hydrogen concentration, valid value 0~60000 (representing 0mg/kg ~ 50000 mg/kg) unit 1mg/kg | +| `H_ConcSensorCode` | Integer | Hydrogen maximum concentration sensor code, valid value 1~252 | +| `H_MaxPress` | Integer | Maximum pressure of hydrogen, valid value 0~1000 (representing 0 MPa ~ 100 MPa) minimum unit 0.1 MPa | +| `H_PressSensorCode` | Integer | Hydrogen maximum pressure sensor code, valid value 1~252 | +| `DCStatus` | Integer | High voltage DC/DC status, `1` :: working; `2` :: disconnected | + +### Engine data + +```json +{ + "Cmd": 2, + "Encrypt": 1, + "Vin": "1G1BL52P7TR115520", + "Data": { + "Infos": [ + { + "CrankshaftSpeed": 2000, + "FuelConsumption": 200, + "Status": 1, + "Type": "Engine" + } + ], + "Time": { + "Day": 1, + "Hour": 22, + "Minute": 59, + "Month": 10, + "Second": 0, + "Year": 16 + } + } +} +``` + +The meaning of each field of the engine data is + +| Field | Type | Description | +| ------------------ | ------- | ------------------------------------------------------------------------------------------------| +| `Type` | String | Data type, here is `Engine` | +| `Status` | Integer | Engine status, `1` :: started; `2` :: shutdown | +| `CrankshaftSpeed` | Integer | Crankshaft speed, valid value 0~60000 (representing 0r/min ~ 60000r/min) unit 1r/min | +| `FuelConsumption` | Integer | Fuel consumption rate, valid range 0~60000 (representing 0L/100km ~ 600L/100km) unit 0.01 L/100km | + + + +### Vehicle location data + +```json +{ + "Cmd": 2, + "Encrypt": 1, + "Vin": "1G1BL52P7TR115520", + "Data": { + "Infos": [ + { + "Latitude": 100, + "Longitude": 10, + "Status": 0, + "Type": "Location" + } + ], + "Time": { + "Day": 1, + "Hour": 22, + "Minute": 59, + "Month": 10, + "Second": 0, + "Year": 16 + } + } +} +``` + +The meaning of each field of the vehicle location data is: + +| Field | Type | Description | +| ----------- | ------- | ------------------------------------------------------------------------------------------------| +| `Type` | String | Data type, here is `Location` | +| `Status` | Integer | Positioning status, see table 15 of original protocol, here is the integer value of all bits | +| `Longitude` | Integer | Longitude, latitude value in degrees multiplied by 10^6 to the nearest millionth of a degree | +| `Latitude` | Integer | Latitude, the latitude value in degrees multiplied by 10^6 to the nearest millionth of a degree | + + + +### Maximum value data + +```json +{ + "Cmd": 2, + "Encrypt": 1, + "Vin": "1G1BL52P7TR115520", + "Data": { + "Infos": [ + { + "MaxBatteryVoltage": 7500, + "MaxTemp": 120, + "MaxTempProbeNo": 12, + "MaxTempSubsysNo": 14, + "MaxVoltageBatteryCode": 10, + "MaxVoltageBatterySubsysNo": 12, + "MinBatteryVoltage": 2000, + "MinTemp": 40, + "MinTempProbeNo": 13, + "MinTempSubsysNo": 15, + "MinVoltageBatteryCode": 11, + "MinVoltageBatterySubsysNo": 13, + "Type": "Extreme" + } + ], + "Time": { + "Day": 30, + "Hour": 12, + "Minute": 22, + "Month": 5, + "Second": 59, + "Year": 17 + } + } +} +``` + +Among them, the meaning of each field of extreme value data is + +| Field | Type | Description | +| --------------------------- | ------- | -------------------------------------------------------------------------------------------------| +| `Type` | String | Data type, here is `Extreme` | +| `MaxVoltageBatterySubsysNo` | Integer | Maximum voltage battery subsystem number, valid value 1~250 | +| `MaxVoltageBatteryCode` | Integer | Maximum voltage battery cell code, valid value 1~250 | +| `MaxBatteryVoltage` | Integer | Maximum value of the battery cell voltage, valid value 0~15000 (representing 0V ~ 15V) unit 0.001V | +| `MinVoltageBatterySubsysNo` | Integer | Minimum voltage battery subsystem number, valid value 1~250 | +| `MinVoltageBatteryCode` | Integer | Minimum voltage battery cell code, valid value 1~250 | +| `MinBatteryVoltage` | Integer | Minimum value of battery cell voltage, valid value 0~15000 (representing 0V ~ 15V) unit 0.001V | +| `MaxTempSubsysNo` | Integer | Maximum temperature subsystem number, valid value 1~250 | +| `MaxTempProbeNo` | Integer | Maximum temperature probe serial number, valid value 1~250 | +| `MaxTemp` | Integer | Maximum temperature value, valid value range 0~250 (offset 40, representing -40°C ~ +210°C) | +| `MinTempSubsysNo` | Integer | Minimum temperature subsystem number, valid value 1~250 | +| `MinTempProbeNo` | Integer | Minimum temperature probe serial number, valid value 1~250 | +| `MinTemp` | Integer | Minimum temperature value, valid value range 0~250 (offset 40, representing -40°C ~ +210°C) | + + +### Alarm data + +```json +{ + "Cmd": 2, + "Encrypt": 1, + "Vin": "1G1BL52P7TR115520", + "Data": { + "Infos": [ + { + "FaultChargeableDeviceNum": 1, + "FaultChargeableDeviceList": ["00C8"], + "FaultDriveMotorNum": 0, + "FaultDriveMotorList": [], + "FaultEngineNum": 1, + "FaultEngineList": ["006F"], + "FaultOthersNum": 0, + "FaultOthersList": [], + "GeneralAlarmFlag": 3, + "MaxAlarmLevel": 1, + "Type": "Alarm" + } + ], + "Time": { + "Day": 20, + "Hour": 22, + "Minute": 23, + "Month": 12, + "Second": 59, + "Year": 17 + } + } +} +``` + +The meaning of each field of the alarm data is: + +| Field | Type | Description | +| --------------------------- | ------- | -------------------------------------------------------------------------------------------| +| `Type` | String | Data type, here is `Alarm` | +| `MaxAlarmLevel` | Integer | The maximum alarm level, valid value range is 0~3, `0` :: no fault, `1` :: `1` level fault | +| `GeneralAlarmFlag` | Integer | General alarm flag, see original protocol table 18 | +| `FaultChargeableDeviceNum` | Integer | Total number of rechargeable energy storage device faults, valid value 0~252 | +| `FaultChargeableDeviceList` | Array | Rechargeable energy storage device fault code list | +| `FaultDriveMotorNum` | Integer | Total number of drive motor faults, valid setting range 0 ~252 | +| `FaultDriveMotorList` | Array | Drive motor fault code list | +| `FaultEngineNum` | Integer | Total number of engine faults, valid value range 0~252 | +| `FaultEngineList` | Array | Engine fault code list | +| `FaultOthersNum` | Integer | Total number of other faults | +| `FaultOthersList` | Array | Other fault code list | + + + +### Rechargeable energy storage device voltage data + +```json +{ + "Cmd": 2, + "Encrypt": 1, + "Vin": "1G1BL52P7TR115520", + "Data": { + "Infos": [ + { + "Number": 2, + "SubSystems": [ + { + "CellsTotal": 2, + "CellsVoltage": [5000], + "ChargeableCurrent": 10000, + "ChargeableSubsysNo": 1, + "ChargeableVoltage": 5000, + "FrameCellsCount": 1, + "FrameCellsIndex": 0 + }, + { + "CellsTotal": 2, + "CellsVoltage": [5001], + "ChargeableCurrent": 10001, + "ChargeableSubsysNo": 2, + "ChargeableVoltage": 5001, + "FrameCellsCount": 1, + "FrameCellsIndex": 1 + } + ], + "Type": "ChargeableVoltage" + } + ], + "Time": { + "Day": 1, + "Hour": 22, + "Minute": 59, + "Month": 10, + "Second": 0, + "Year": 16 + } + } +} +``` + + + +The fields are defined as follows: + +| Field | Type | Description | +| ----------- | ------- | --------------------------------------------------------------------| +| `Type` | String | Data type, here is `ChargeableVoltage` | +| `Number` | Integer | Number of rechargeable energy storage subsystems, valid range 1~250 | +| `SubSystem` | Object | Rechargeable energy storage subsystem voltage information list | + +Rechargeable energy storage subsystem voltage information data format: + +| Field | Type | Description | +| -------------------- | ------- | -----------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `ChargeableSubsysNo` | Integer | Rechargeable energy storage subsystem number, valid value range, 1~250 | +| `ChargeableVoltage` | Integer | Rechargeable energy storage device voltage, valid value range, 0~10000 (representing 0V ~ 1000V) unit 0.1 V | +| `ChargeableCurrent` | Integer | Rechargeable energy storage device current, valid value range, 0~20000 (value offset 1000A, indicating -1000A ~ +1000A) unit 0.1 A | +| `CellsTotal` | Integer | Total number of cells, valid value range 1~65531 | +| `FrameCellsIndex` | Integer | The serial number of the cell at the beginning of this frame, when the number of single cells in this frame exceeds 200, it should be split into multiple frames for transmission, valid value range 1~65531 | +| `FrameCellsCount` | Integer | The total number of cells in this frame, valid value range 1~200 | +| `CellsVoltage` | Array | Cells voltage, valid value range 0~60000 (representing 0V ~ 60.000V) unit 0.001V | + + + +### Rechargeable energy storage device temperature data + +```json +{ + "Cmd": 2, + "Encrypt": 1, + "Vin": "1G1BL52P7TR115520", + "Data": { + "Infos": [ + { + "Number": 2, + "SubSystems": [ + { + "ChargeableSubsysNo": 1, + "ProbeNum": 10, + "ProbesTemp": [0, 0, 0, 0, 0, 0, 0, 0, 19, 136] + }, + { + "ChargeableSubsysNo": 2, + "ProbeNum": 1, + "ProbesTemp": [100] + } + ], + "Type": "ChargeableTemp" + } + ], + "Time": { + "Day": 1, + "Hour": 22, + "Minute": 59, + "Month": 10, + "Second": 0, + "Year": 16 + } + } +} +``` +The data format is: + +| Field | Type | Description | +| ------------ | ------- | --------------------------------------------------------------------------| +| `Type` | String | Data type, here is `ChargeableTemp` | +| `Number` | Integer | Rechargeable energy storage subsystem temperature information list length | +| `SubSystems` | Object | Rechargeable energy storage subsystem temperature information list | + +The rechargeable energy storage subsystem temperature information format is: + +| Field | Type | Description | +| -------------------- | -------- | --------------------------------------------------------------------------------------------------| +| `ChargeableSubsysNo` | Ineteger | Rechargeable energy storage subsystem number, valid value 1~250 | +| `ProbeNum` | Integer | Number of rechargeable energy storage temperature probes | +| `ProbesTemp` | Array | List of temperature values of each temperature probe of the rechargeable energy storage subsystem | + + + +## Data reissue + +Topic: gbt32960/${clientid}/upstream/reinfo + +**Data format: omitted** (same as real-time data reporting) + +# Downstream + +> Request data flow direction: EMQX -> GBT/32960 Gateway -> Terminal + +> Response data flow: Terminal -> GBT/32960 Gateway -> EMQX + +Downstream topic: gbt32960/${clientid}/dnstream +Upstream response topic: gbt32960/${clientid}/upstream/response + +## Parameters query + + + +**Req:** + +```json +{ + "Action": "Query", + "Total": 2, + "Ids": ["0x01", "0x02"] +} +``` + +| Field | Type | Description | +| -------- | ------- | ----------------------------------------------------------------------------------------------------| +| `Action` | String | The type of downstream command, here is `Query` | +| `Total` | Integer | Total number of query parameters | +| `Ids` | Array | List of IDs that need to be queried. For specific ID meanings, see the original protocol Table B.10 | + +**Response:** + +```json +{ + "Cmd": 128, + "Encrypt": 1, + "Vin": "1G1BL52P7TR115520", + "Data": { + "Total": 2, + "Params": [ + {"0x01": 6000}, + {"0x02": 10} + ], + "Time": { + "Day": 2, + "Hour": 11, + "Minute": 12, + "Month": 2, + "Second": 12, + "Year": 17 + } + } +} +``` + + + +## Parameters setting + +**Req:** +```json +{ + "Action": "Setting", + "Total": 2, + "Params": [{"0x01": 5000}, + {"0x02": 200}] +} +``` + +| Field | Type | Description | +| -------- | ------- | ----------------------------------------------| +| `Action` | String | Type of downstream command, here is `Setting` | +| `Total` | Integer | Set the total number of parameters | +| `Params` | Array | ID and value of parameters to be set | + +**Response:** + +```json +{ + "Cmd": 129, + "Encrypt": 1, + "Vin": "1G1BL52P7TR115520", + "Data": { + "Total": 2, + "Params": [ + {"0x01": 5000}, + {"0x02": 200} + ], + "Time": { + "Day": 2, + "Hour": 11, + "Minute": 12, + "Month": 2, + "Second": 12, + "Year": 17 + } + } +} +``` + +## Terminal control +**Different commands have different parameters; if there are no parameters, it will be empty** + +Remote Upgrade: +**Req:** + +```json +{ + "Action": "Control", + "Command": "0x01", + "Param": { + "DialingName": "hz203", + "Username": "user001", + "Password": "password01", + "Ip": "192.168.199.1", + "Port": 8080, + "ManufacturerId": "BMWA", + "HardwareVer": "1.0.0", + "SoftwareVer": "1.0.0", + "UpgradeUrl": "ftp://emqtt.io/ftp/server", + "Timeout": 10 + } +} +``` + +| Field | Type | Description | +| --------- | ------- | ----------------------------------------------------| +| `Action` | String | Type of command issued, here is `Control` | +| `Command` | Integer | Issued command ID, see original protocol table B.15 | +| `Param` | Object | Command parameters | + +The example list: + +Shut down the vehicle terminal: + +```json +{ + "Action": "Control", + "Command": "0x02" +} +``` + +... + +Vehicle terminal alarm: +```json +{ + "Action": "Control", + "Command": "0x06", + "Param": {"Level": 0, "Message": "alarm message"} +} +``` diff --git a/apps/emqx_gateway_gbt32960/include/emqx_gbt32960.hrl b/apps/emqx_gateway_gbt32960/include/emqx_gbt32960.hrl new file mode 100644 index 000000000..2649f3f98 --- /dev/null +++ b/apps/emqx_gateway_gbt32960/include/emqx_gbt32960.hrl @@ -0,0 +1,80 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-record(frame, {cmd, ack, vin, encrypt, length, data, check, rawdata}). + +-type frame() :: #frame{}. + +-define(CMD(CmdType), #frame{ + cmd = CmdType, + ack = ?ACK_IS_CMD +}). + +-define(CMD(CmdType, Data), #frame{ + cmd = CmdType, + data = Data, + ack = ?ACK_IS_CMD +}). + +-define(IS_ACK_CODE(C), + (C == ?ACK_SUCCESS orelse + C == ?ACK_ERROR orelse + C == ?ACK_VIN_REPEAT) +). + +%%-------------------------------------------------------------------- +%% CMD Feilds +%%-------------------------------------------------------------------- +-define(CMD_VIHECLE_LOGIN, 16#01). +-define(CMD_INFO_REPORT, 16#02). +-define(CMD_INFO_RE_REPORT, 16#03). +-define(CMD_VIHECLE_LOGOUT, 16#04). +-define(CMD_PLATFORM_LOGIN, 16#05). +-define(CMD_PLATFORM_LOGOUT, 16#06). +-define(CMD_HEARTBEAT, 16#07). +-define(CMD_SCHOOL_TIME, 16#08). +% 0x09~0x7F: Reserved by upstream system +% 0x80~0x82: Reserved by terminal data +-define(CMD_PARAM_QUERY, 16#80). +-define(CMD_PARAM_SETTING, 16#81). +-define(CMD_TERMINAL_CTRL, 16#82). + +% 0x83~0xBF: Reserved by downstream system +% 0xC0~0xFE: Customized data for Platform Exchange Protocol + +%%-------------------------------------------------------------------- +%% ACK Feilds +%%-------------------------------------------------------------------- +-define(ACK_SUCCESS, 16#01). +-define(ACK_ERROR, 16#02). +-define(ACK_VIN_REPEAT, 16#03). +-define(ACK_IS_CMD, 16#FE). + +%%-------------------------------------------------------------------- +%% Encrypt Feilds +%%-------------------------------------------------------------------- +-define(ENCRYPT_NONE, 16#01). +-define(ENCRYPT_RSA, 16#02). +-define(ENCRYPT_AES128, 16#03). +-define(ENCRYPT_ABNORMAL, 16#FE). +-define(ENCRYPT_INVAILD, 16#FF). + +%%-------------------------------------------------------------------- +%% Info Type Flags +%%-------------------------------------------------------------------- +-define(INFO_TYPE_VEHICLE, 16#01). +-define(INFO_TYPE_DRIVE_MOTOR, 16#02). +-define(INFO_TYPE_FUEL_CELL, 16#03). +-define(INFO_TYPE_ENGINE, 16#04). +-define(INFO_TYPE_LOCATION, 16#05). +-define(INFO_TYPE_EXTREME, 16#06). +-define(INFO_TYPE_ALARM, 16#07). +-define(INFO_TYPE_CHARGEABLE_VOLTAGE, 16#08). +-define(INFO_TYPE_CHARGEABLE_TEMP, 16#09). +% 0x0A~0x2F: Customized data for Platform Exchange Protocol +% 0x30~0x7F: Reserved +% 0x80~0xFE: Customized by user + +-define(DEFAULT_MOUNTPOINT, <<"gbt32960/${clientid}/">>). +-define(DEFAULT_DOWNLINK_TOPIC, <<"dnstream">>). diff --git a/apps/emqx_gateway_gbt32960/rebar.config b/apps/emqx_gateway_gbt32960/rebar.config new file mode 100644 index 000000000..456746d25 --- /dev/null +++ b/apps/emqx_gateway_gbt32960/rebar.config @@ -0,0 +1,7 @@ +%% -*- mode: erlang -*- +{erl_opts, [debug_info]}. +{deps, [ + {emqx, {path, "../../apps/emqx"}}, + {emqx_utils, {path, "../emqx_utils"}}, + {emqx_gateway, {path, "../../apps/emqx_gateway"}} +]}. diff --git a/apps/emqx_gateway_gbt32960/src/emqx_gateway_gbt32960.app.src b/apps/emqx_gateway_gbt32960/src/emqx_gateway_gbt32960.app.src new file mode 100644 index 000000000..0ed2dca39 --- /dev/null +++ b/apps/emqx_gateway_gbt32960/src/emqx_gateway_gbt32960.app.src @@ -0,0 +1,11 @@ +%% -*- mode: erlang -*- +{application, emqx_gateway_gbt32960, [ + {description, "GBT32960 Gateway"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib, emqx, emqx_gateway]}, + {env, []}, + {modules, []}, + {licenses, ["BSL"]}, + {links, []} +]}. diff --git a/apps/emqx_gateway_gbt32960/src/emqx_gateway_gbt32960.erl b/apps/emqx_gateway_gbt32960/src/emqx_gateway_gbt32960.erl new file mode 100644 index 000000000..e4bcd4969 --- /dev/null +++ b/apps/emqx_gateway_gbt32960/src/emqx_gateway_gbt32960.erl @@ -0,0 +1,98 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +%% @doc The GBT32960 Gateway implement +-module(emqx_gateway_gbt32960). + +-include_lib("emqx/include/logger.hrl"). +-include_lib("emqx_gateway/include/emqx_gateway.hrl"). + +%% define a gateway named gbt32960 +-gateway(#{ + name => gbt32960, + callback_module => ?MODULE, + config_schema_module => emqx_gbt32960_schema, + edition => ee +}). + +%% callback_module must implement the emqx_gateway_impl behaviour +-behaviour(emqx_gateway_impl). + +%% callback for emqx_gateway_impl +-export([ + on_gateway_load/2, + on_gateway_update/3, + on_gateway_unload/2 +]). + +-import( + emqx_gateway_utils, + [ + normalize_config/1, + start_listeners/4, + stop_listeners/2 + ] +). + +%%-------------------------------------------------------------------- +%% emqx_gateway_impl callbacks +%%-------------------------------------------------------------------- + +on_gateway_load( + _Gateway = #{ + name := GwName, + config := Config + }, + Ctx +) -> + Listeners = normalize_config(Config), + ModCfg = #{ + frame_mod => emqx_gbt32960_frame, + chann_mod => emqx_gbt32960_channel + }, + case + start_listeners( + Listeners, GwName, Ctx, ModCfg + ) + of + {ok, ListenerPids} -> + %% FIXME: How to throw an exception to interrupt the restart logic ? + %% FIXME: Assign ctx to GwState + {ok, ListenerPids, _GwState = #{ctx => Ctx}}; + {error, {Reason, Listener}} -> + throw( + {badconf, #{ + key => listeners, + value => Listener, + reason => Reason + }} + ) + end. + +on_gateway_update(Config, Gateway, GwState = #{ctx := Ctx}) -> + GwName = maps:get(name, Gateway), + try + %% XXX: 1. How hot-upgrade the changes ??? + %% XXX: 2. Check the New confs first before destroy old state??? + on_gateway_unload(Gateway, GwState), + on_gateway_load(Gateway#{config => Config}, Ctx) + catch + Class:Reason:Stk -> + logger:error( + "Failed to update ~ts; " + "reason: {~0p, ~0p} stacktrace: ~0p", + [GwName, Class, Reason, Stk] + ), + {error, Reason} + end. + +on_gateway_unload( + _Gateway = #{ + name := GwName, + config := Config + }, + _GwState +) -> + Listeners = normalize_config(Config), + stop_listeners(GwName, Listeners). diff --git a/apps/emqx_gateway_gbt32960/src/emqx_gbt32960_channel.erl b/apps/emqx_gateway_gbt32960/src/emqx_gbt32960_channel.erl new file mode 100644 index 000000000..5cb65f104 --- /dev/null +++ b/apps/emqx_gateway_gbt32960/src/emqx_gbt32960_channel.erl @@ -0,0 +1,864 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_gbt32960_channel). +-behaviour(emqx_gateway_channel). + +-include("emqx_gbt32960.hrl"). +-include_lib("emqx/include/types.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("emqx/include/emqx.hrl"). +-include_lib("emqx/include/emqx_mqtt.hrl"). + +-export([ + info/1, + info/2, + stats/1 +]). + +-export([ + init/2, + handle_in/2, + handle_deliver/2, + handle_timeout/3, + terminate/2, + set_conn_state/2 +]). + +-export([ + handle_call/3, + handle_cast/2, + handle_info/2 +]). + +-record(channel, { + %% Context + ctx :: emqx_gateway_ctx:context(), + %% ConnInfo + conninfo :: emqx_types:conninfo(), + %% ClientInfo + clientinfo :: emqx_types:clientinfo(), + %% Session + session :: undefined | map(), + %% Keepalive + keepalive :: maybe(emqx_keepalive:keepalive()), + %% Conn State + conn_state :: conn_state(), + %% Timers + timers :: #{atom() => undefined | disabled | reference()}, + %% Inflight + inflight :: emqx_inflight:inflight(), + %% Message Queue + mqueue :: queue:queue(), + retx_interval, + retx_max_times, + max_mqueue_len +}). + +-type conn_state() :: idle | connecting | connected | disconnected. + +-type channel() :: #channel{}. + +-type reply() :: + {outgoing, emqx_types:packet()} + | {outgoing, [emqx_types:packet()]} + | {event, conn_state() | updated} + | {close, Reason :: atom()}. + +-type replies() :: reply() | [reply()]. + +-define(TIMER_TABLE, #{ + alive_timer => keepalive, + retry_timer => retry_delivery +}). + +-define(INFO_KEYS, [conninfo, conn_state, clientinfo, session, will_msg]). + +-dialyzer({nowarn_function, init/2}). + +%%-------------------------------------------------------------------- +%% Info, Attrs and Caps +%%-------------------------------------------------------------------- + +%% @doc Get infos of the channel. +-spec info(channel()) -> emqx_types:infos(). +info(Channel) -> + maps:from_list(info(?INFO_KEYS, Channel)). + +-spec info(list(atom()) | atom(), channel()) -> term(). +info(Keys, Channel) when is_list(Keys) -> + [{Key, info(Key, Channel)} || Key <- Keys]; +info(ctx, #channel{ctx = Ctx}) -> + Ctx; +info(conninfo, #channel{conninfo = ConnInfo}) -> + ConnInfo; +info(zone, #channel{clientinfo = #{zone := Zone}}) -> + Zone; +info(clientid, #channel{clientinfo = #{clientid := ClientId}}) -> + ClientId; +info(clientinfo, #channel{clientinfo = ClientInfo}) -> + ClientInfo; +info(session, _) -> + #{}; +info(conn_state, #channel{conn_state = ConnState}) -> + ConnState; +info(keepalive, #channel{keepalive = undefined}) -> + undefined; +info(keepalive, #channel{keepalive = Keepalive}) -> + emqx_keepalive:info(Keepalive); +info(will_msg, _) -> + undefined. + +-spec stats(channel()) -> emqx_types:stats(). +stats(#channel{inflight = Inflight, mqueue = Queue}) -> + %% XXX: A fake stats for managed by emqx_management + [ + {subscriptions_cnt, 1}, + {subscriptions_max, 0}, + {inflight_cnt, emqx_inflight:size(Inflight)}, + {inflight_max, emqx_inflight:max_size(Inflight)}, + {mqueue_len, queue:len(Queue)}, + {mqueue_max, 0}, + {mqueue_dropped, 0}, + {next_pkt_id, 0}, + {awaiting_rel_cnt, 0}, + {awaiting_rel_max, 0} + ]. + +set_conn_state(ConnState, Channel) -> + Channel#channel{conn_state = ConnState}. + +%%-------------------------------------------------------------------- +%% Init the Channel +%%-------------------------------------------------------------------- + +init( + ConnInfo = #{ + peername := {PeerHost, _Port}, + sockname := {_Host, SockPort} + }, + Options +) -> + % TODO: init rsa_key from user input + Peercert = maps:get(peercert, ConnInfo, undefined), + Mountpoint = maps:get(mountpoint, Options, ?DEFAULT_MOUNTPOINT), + ListenerId = + case maps:get(listener, Options, undefined) of + undefined -> undefined; + {GwName, Type, LisName} -> emqx_gateway_utils:listener_id(GwName, Type, LisName) + end, + EnableAuthn = maps:get(enable_authn, Options, true), + + ClientInfo = setting_peercert_infos( + Peercert, + #{ + zone => default, + listener => ListenerId, + protocol => gbt32960, + peerhost => PeerHost, + sockport => SockPort, + clientid => undefined, + username => undefined, + is_bridge => false, + is_superuser => false, + enable_authn => EnableAuthn, + mountpoint => Mountpoint + } + ), + + Ctx = maps:get(ctx, Options), + + #{ + retry_interval := RetxInterv, + max_retry_times := RetxMaxTime, + message_queue_len := MessageQueueLen + } = Options, + + #channel{ + ctx = Ctx, + conninfo = ConnInfo, + clientinfo = ClientInfo, + inflight = emqx_inflight:new(1), + mqueue = queue:new(), + timers = #{}, + conn_state = idle, + retx_interval = RetxInterv, + retx_max_times = RetxMaxTime, + max_mqueue_len = MessageQueueLen + }. + +setting_peercert_infos(NoSSL, ClientInfo) when + NoSSL =:= nossl; + NoSSL =:= undefined +-> + ClientInfo; +setting_peercert_infos(Peercert, ClientInfo) -> + {DN, CN} = {esockd_peercert:subject(Peercert), esockd_peercert:common_name(Peercert)}, + ClientInfo#{dn => DN, cn => CN}. + +%%-------------------------------------------------------------------- +%% Handle incoming packet +%%-------------------------------------------------------------------- +-spec handle_in(frame() | {frame_error, any()}, channel()) -> + {ok, channel()} + | {ok, replies(), channel()} + | {shutdown, Reason :: term(), channel()} + | {shutdown, Reason :: term(), replies(), channel()}. + +handle_in( + Frame = ?CMD(?CMD_VIHECLE_LOGIN), + Channel +) -> + case + emqx_utils:pipeline( + [ + fun enrich_clientinfo/2, + fun enrich_conninfo/2, + fun set_log_meta/2, + %% TODO: How to implement the banned in the gateway instance? + %, fun check_banned/2 + fun auth_connect/2 + ], + Frame, + Channel#channel{conn_state = connecting} + ) + of + {ok, _NPacket, NChannel} -> + process_connect(Frame, ensure_connected(NChannel)); + {error, ReasonCode, NChannel} -> + log(warning, #{msg => "login_failed", reason => ReasonCode}, NChannel), + shutdown(ReasonCode, NChannel) + end; +handle_in(_Frame, Channel = #channel{conn_state = ConnState}) when + ConnState =/= connected +-> + shutdown(protocol_error, Channel); +handle_in(Frame = ?CMD(?CMD_INFO_REPORT), Channel) -> + _ = upstreaming(Frame, Channel), + {ok, Channel}; +handle_in(Frame = ?CMD(?CMD_INFO_RE_REPORT), Channel) -> + _ = upstreaming(Frame, Channel), + {ok, Channel}; +handle_in(Frame = ?CMD(?CMD_VIHECLE_LOGOUT), Channel) -> + %% XXX: unsubscribe gbt32960/dnstream/${vin}? + _ = upstreaming(Frame, Channel), + {ok, Channel}; +handle_in(Frame = ?CMD(?CMD_PLATFORM_LOGIN), Channel) -> + #{ + <<"Username">> := _Username, + <<"Password">> := _Password + } = Frame#frame.data, + %% TODO: + _ = upstreaming(Frame, Channel), + {ok, Channel}; +handle_in(Frame = ?CMD(?CMD_PLATFORM_LOGOUT), Channel) -> + %% TODO: + _ = upstreaming(Frame, Channel), + {ok, Channel}; +handle_in(Frame = ?CMD(?CMD_HEARTBEAT), Channel) -> + handle_out({?ACK_SUCCESS, Frame}, Channel); +handle_in(Frame = ?CMD(?CMD_SCHOOL_TIME), Channel) -> + %% TODO: How verify this request + handle_out({?ACK_SUCCESS, Frame}, Channel); +handle_in(Frame = #frame{cmd = Cmd}, Channel = #channel{inflight = Inflight}) -> + {Outgoings, NChannel} = dispatch_frame(Channel#channel{inflight = ack_frame(Cmd, Inflight)}), + _ = upstreaming(Frame, NChannel), + {ok, [{outgoing, Outgoings}], NChannel}; +handle_in(Frame, Channel) -> + log(warning, #{msg => "unexcepted_frame", frame => Frame}, Channel), + {ok, Channel}. + +%%-------------------------------------------------------------------- +%% Handle out +%%-------------------------------------------------------------------- + +handle_out({AckCode, Frame}, Channel) when + ?IS_ACK_CODE(AckCode) +-> + {ok, [{outgoing, ack(AckCode, Frame)}], Channel}. + +handle_out({AckCode, Frame}, Outgoings, Channel) when ?IS_ACK_CODE(AckCode) -> + {ok, [{outgoing, ack(AckCode, Frame)} | Outgoings], Channel}. + +%%-------------------------------------------------------------------- +%% Handle Delivers from broker to client +%%-------------------------------------------------------------------- +-spec handle_deliver(list(emqx_types:deliver()), channel()) -> + {ok, channel()} + | {ok, replies(), channel()}. + +handle_deliver( + Messages0, + Channel = #channel{ + clientinfo = #{clientid := ClientId, mountpoint := Mountpoint}, + mqueue = Queue, + max_mqueue_len = MaxQueueLen + } +) -> + Messages = lists:map( + fun({deliver, _, M}) -> + emqx_mountpoint:unmount(Mountpoint, M) + end, + Messages0 + ), + case MaxQueueLen - queue:len(Queue) of + N when N =< 0 -> + discard_downlink_messages(Messages, Channel), + {ok, Channel}; + N -> + {NMessages, Dropped} = split_by_pos(Messages, N), + log(debug, #{msg => "enqueue_messages", messages => NMessages}, Channel), + metrics_inc('messages.delivered', Channel, erlang:length(NMessages)), + discard_downlink_messages(Dropped, Channel), + Frames = msgs2frame(NMessages, ClientId, Channel), + NQueue = lists:foldl(fun(F, Q) -> queue:in(F, Q) end, Queue, Frames), + {Outgoings, NChannel} = dispatch_frame(Channel#channel{mqueue = NQueue}), + {ok, [{outgoing, Outgoings}], NChannel} + end. + +split_by_pos(L, Pos) -> + split_by_pos(L, Pos, []). + +split_by_pos([], _, A1) -> + {lists:reverse(A1), []}; +split_by_pos(L, 0, A1) -> + {lists:reverse(A1), L}; +split_by_pos([E | L], N, A1) -> + split_by_pos(L, N - 1, [E | A1]). + +msgs2frame(Messages, Vin, Channel) -> + lists:filtermap( + fun(#message{payload = Payload}) -> + case emqx_utils_json:safe_decode(Payload, [return_maps]) of + {ok, Maps} -> + case msg2frame(Maps, Vin) of + {error, Reason} -> + log( + debug, + #{ + msg => "convert_message_to_frame_error", + reason => Reason, + data => Maps + }, + Channel + ), + false; + Frame -> + {true, Frame} + end; + {error, Reason} -> + log(error, #{msg => "json_decode_error", reason => Reason}, Channel), + false + end + end, + Messages + ). + +%%-------------------------------------------------------------------- +%% Handle call +%%-------------------------------------------------------------------- + +-spec handle_call(Req :: term(), From :: term(), channel()) -> + {reply, Reply :: term(), channel()} + | {reply, Reply :: term(), replies(), channel()} + | {shutdown, Reason :: term(), Reply :: term(), channel()} + | {shutdown, Reason :: term(), Reply :: term(), frame(), channel()}. + +handle_call(kick, _From, Channel) -> + Channel1 = ensure_disconnected(kicked, Channel), + disconnect_and_shutdown(kicked, ok, Channel1); +handle_call(discard, _From, Channel) -> + disconnect_and_shutdown(discarded, ok, Channel); +handle_call(Req, _From, Channel) -> + log(error, #{msg => "unexpected_call", call => Req}, Channel), + reply(ignored, Channel). + +%%-------------------------------------------------------------------- +%% Handle cast +%%-------------------------------------------------------------------- + +-spec handle_cast(Req :: term(), channel()) -> + ok | {ok, channel()} | {shutdown, Reason :: term(), channel()}. +handle_cast(_Req, Channel) -> + {ok, Channel}. + +%%-------------------------------------------------------------------- +%% Handle info +%%-------------------------------------------------------------------- + +-spec handle_info(Info :: term(), channel()) -> + ok | {ok, channel()} | {shutdown, Reason :: term(), channel()}. + +handle_info({sock_closed, Reason}, Channel = #channel{conn_state = idle}) -> + shutdown(Reason, Channel); +handle_info({sock_closed, Reason}, Channel = #channel{conn_state = connecting}) -> + shutdown(Reason, Channel); +handle_info( + {sock_closed, Reason}, + Channel = + #channel{ + conn_state = connected + } +) -> + NChannel = ensure_disconnected(Reason, Channel), + shutdown(Reason, NChannel); +handle_info({sock_closed, Reason}, Channel = #channel{conn_state = disconnected}) -> + log(error, #{msg => "unexpected_sock_closed", reason => Reason}, Channel), + {ok, Channel}; +handle_info(Info, Channel) -> + log(error, #{msg => "unexpected_info}", info => Info}, Channel), + {ok, Channel}. + +%%-------------------------------------------------------------------- +%% Handle timeout +%%-------------------------------------------------------------------- + +-spec handle_timeout(reference(), Msg :: term(), channel()) -> + {ok, channel()} + | {ok, replies(), channel()} + | {shutdown, Reason :: term(), channel()}. + +handle_timeout( + _TRef, + {keepalive, _StatVal}, + Channel = #channel{keepalive = undefined} +) -> + {ok, Channel}; +handle_timeout( + _TRef, + {keepalive, _StatVal}, + Channel = #channel{conn_state = disconnected} +) -> + {ok, Channel}; +handle_timeout( + _TRef, + {keepalive, StatVal}, + Channel = #channel{keepalive = Keepalive} +) -> + case emqx_keepalive:check(StatVal, Keepalive) of + {ok, NKeepalive} -> + NChannel = Channel#channel{keepalive = NKeepalive}, + {ok, reset_timer(alive_timer, NChannel)}; + {error, timeout} -> + shutdown(keepalive_timeout, Channel) + end; +handle_timeout( + _TRef, + retry_delivery, + Channel = #channel{inflight = Inflight, retx_interval = RetxInterv} +) -> + case emqx_inflight:is_empty(Inflight) of + true -> + {ok, clean_timer(retry_timer, Channel)}; + false -> + Frames = emqx_inflight:to_list(Inflight), + {Outgoings, NInflight} = retry_delivery( + Frames, erlang:system_time(millisecond), RetxInterv, Inflight, [] + ), + {Outgoings2, NChannel} = dispatch_frame(Channel#channel{inflight = NInflight}), + {ok, [{outgoing, Outgoings ++ Outgoings2}], reset_timer(retry_timer, NChannel)} + end; +handle_timeout(_TRef, Msg, Channel) -> + log(error, #{msg => "unexpected_timeout", content => Msg}, Channel), + {ok, Channel}. + +%%-------------------------------------------------------------------- +%% Ensure timers +%%-------------------------------------------------------------------- + +ensure_timer(Name, Channel = #channel{timers = Timers}) -> + TRef = maps:get(Name, Timers, undefined), + Time = interval(Name, Channel), + case TRef == undefined andalso Time > 0 of + true -> ensure_timer(Name, Time, Channel); + %% Timer disabled or exists + false -> Channel + end. + +ensure_timer(Name, Time, Channel = #channel{timers = Timers}) -> + log(debug, #{msg => "start_timer", name => Name, time => Time}, Channel), + Msg = maps:get(Name, ?TIMER_TABLE), + TRef = emqx_utils:start_timer(Time, Msg), + Channel#channel{timers = Timers#{Name => TRef}}. + +reset_timer(Name, Channel) -> + ensure_timer(Name, clean_timer(Name, Channel)). + +clean_timer(Name, Channel = #channel{timers = Timers}) -> + Channel#channel{timers = maps:remove(Name, Timers)}. + +interval(alive_timer, #channel{keepalive = KeepAlive}) -> + emqx_keepalive:info(interval, KeepAlive); +interval(retry_timer, #channel{retx_interval = RetxIntv}) -> + RetxIntv. + +%%-------------------------------------------------------------------- +%% Terminate +%%-------------------------------------------------------------------- + +terminate(Reason, #channel{ + ctx = Ctx, + session = Session, + clientinfo = ClientInfo +}) -> + run_hooks(Ctx, 'session.terminated', [ClientInfo, Reason, Session]). + +%%-------------------------------------------------------------------- +%% Ensure connected + +enrich_clientinfo( + Packet, + Channel = #channel{ + clientinfo = ClientInfo + } +) -> + {ok, NPacket, NClientInfo} = emqx_utils:pipeline( + [ + fun maybe_assign_clientid/2, + %% FIXME: CALL After authentication successfully + fun fix_mountpoint/2 + ], + Packet, + ClientInfo + ), + {ok, NPacket, Channel#channel{clientinfo = NClientInfo}}. + +enrich_conninfo( + _Packet, + Channel = #channel{ + conninfo = ConnInfo, + clientinfo = ClientInfo + } +) -> + #{clientid := ClientId, username := Username} = ClientInfo, + NConnInfo = ConnInfo#{ + proto_name => <<"GBT32960">>, + proto_ver => <<"">>, + clean_start => true, + keepalive => 0, + expiry_interval => 0, + conn_props => #{}, + receive_maximum => 0, + clientid => ClientId, + username => Username + }, + {ok, Channel#channel{conninfo = NConnInfo}}. + +set_log_meta(_Packet, #channel{clientinfo = #{clientid := ClientId}}) -> + emqx_logger:set_metadata_clientid(ClientId), + ok. + +auth_connect( + _Packet, + Channel = #channel{ + ctx = Ctx, + clientinfo = ClientInfo + } +) -> + #{ + clientid := ClientId, + username := Username + } = ClientInfo, + case emqx_gateway_ctx:authenticate(Ctx, ClientInfo) of + {ok, NClientInfo} -> + {ok, Channel#channel{clientinfo = NClientInfo}}; + {error, Reason} -> + ?SLOG(warning, #{ + msg => "client_login_failed", + clientid => ClientId, + username => Username, + reason => Reason + }), + {error, Reason} + end. + +ensure_connected( + Channel = #channel{ + ctx = Ctx, + conninfo = ConnInfo, + clientinfo = ClientInfo + } +) -> + NConnInfo = ConnInfo#{connected_at => erlang:system_time(millisecond)}, + ok = run_hooks(Ctx, 'client.connected', [ClientInfo, NConnInfo]), + Channel#channel{ + conninfo = NConnInfo, + conn_state = connected + }. + +process_connect( + Frame, + Channel = #channel{ + ctx = Ctx, + conninfo = ConnInfo, + clientinfo = ClientInfo + } +) -> + SessFun = fun(_, _) -> #{} end, + case + emqx_gateway_ctx:open_session( + Ctx, + true, + ClientInfo, + ConnInfo, + SessFun + ) + of + {ok, #{session := Session}} -> + NChannel = Channel#channel{session = Session}, + subscribe_downlink(?DEFAULT_DOWNLINK_TOPIC, Channel), + _ = upstreaming(Frame, NChannel), + %% XXX: connection_accepted is not defined by stomp protocol + _ = run_hooks(Ctx, 'client.connack', [ConnInfo, connection_accepted, #{}]), + handle_out({?ACK_SUCCESS, Frame}, [{event, connected}], NChannel); + {error, Reason} -> + log( + error, + #{ + msg => "failed_to_open_session", + reason => Reason + }, + Channel + ), + shutdown(Reason, Channel) + end. + +maybe_assign_clientid(#frame{vin = Vin}, ClientInfo) -> + {ok, ClientInfo#{clientid => Vin, username => Vin}}. + +fix_mountpoint(_Packet, #{mountpoint := undefined}) -> + ok; +fix_mountpoint(_Packet, ClientInfo = #{mountpoint := Mountpoint}) -> + %% TODO: Enrich the variable replacement???? + %% i.e: ${ClientInfo.auth_result.productKey} + Mountpoint1 = emqx_mountpoint:replvar(Mountpoint, ClientInfo), + {ok, ClientInfo#{mountpoint := Mountpoint1}}. + +%%-------------------------------------------------------------------- +%% Ensure disconnected + +ensure_disconnected( + Reason, + Channel = #channel{ + ctx = Ctx, + conninfo = ConnInfo, + clientinfo = ClientInfo + } +) -> + NConnInfo = ConnInfo#{disconnected_at => erlang:system_time(millisecond)}, + ok = run_hooks( + Ctx, + 'client.disconnected', + [ClientInfo, Reason, NConnInfo] + ), + Channel#channel{conninfo = NConnInfo, conn_state = disconnected}. + +%%-------------------------------------------------------------------- +%% Helper functions +%%-------------------------------------------------------------------- + +run_hooks(Ctx, Name, Args) -> + emqx_gateway_ctx:metrics_inc(Ctx, Name), + emqx_hooks:run(Name, Args). + +reply(Reply, Channel) -> + {reply, Reply, Channel}. + +shutdown(Reason, Channel) -> + {shutdown, Reason, Channel}. + +shutdown(Reason, Reply, Channel) -> + {shutdown, Reason, Reply, Channel}. + +disconnect_and_shutdown(Reason, Reply, Channel) -> + shutdown(Reason, Reply, Channel). + +retry_delivery([], _Now, _Interval, Inflight, Acc) -> + {lists:reverse(Acc), Inflight}; +retry_delivery([{Key, {_Frame, 0, _}} | Frames], Now, Interval, Inflight, Acc) -> + %% todo log(error, "has arrived max re-send times, drop ~p", [Frame]), + NInflight = emqx_inflight:delete(Key, Inflight), + retry_delivery(Frames, Now, Interval, NInflight, Acc); +retry_delivery([{Key, {Frame, RetxCount, Ts}} | Frames], Now, Interval, Inflight, Acc) -> + Diff = Now - Ts, + case Diff >= Interval of + true -> + NInflight = emqx_inflight:update(Key, {Frame, RetxCount - 1, Now}, Inflight), + retry_delivery(Frames, Now, Interval, NInflight, [Frame | Acc]); + _ -> + retry_delivery(Frames, Now, Interval, Inflight, Acc) + end. + +upstreaming( + Frame, Channel = #channel{clientinfo = #{mountpoint := Mountpoint, clientid := ClientId}} +) -> + {Topic, Payload} = transform(Frame, Mountpoint), + log(debug, #{msg => "upstreaming_to_topic", topic => Topic, payload => Payload}, Channel), + emqx:publish(emqx_message:make(ClientId, ?QOS_1, Topic, Payload)). + +transform(Frame = ?CMD(Cmd), Mountpoint) -> + Suffix = + case Cmd of + ?CMD_VIHECLE_LOGIN -> <<"upstream/vlogin">>; + ?CMD_INFO_REPORT -> <<"upstream/info">>; + ?CMD_INFO_RE_REPORT -> <<"upstream/reinfo">>; + ?CMD_VIHECLE_LOGOUT -> <<"upstream/vlogout">>; + ?CMD_PLATFORM_LOGIN -> <<"upstream/plogin">>; + ?CMD_PLATFORM_LOGOUT -> <<"upstream/plogout">>; + %CMD_HEARTBEAT, CMD_SCHOOL_TIME ... + _ -> <<"upstream/transparent">> + end, + Topic = emqx_mountpoint:mount(Mountpoint, Suffix), + Payload = to_json(Frame), + {Topic, Payload}; +transform(Frame = #frame{ack = Ack}, Mountpoint) when + ?IS_ACK_CODE(Ack) +-> + Topic = emqx_mountpoint:mount(Mountpoint, <<"upstream/response">>), + Payload = to_json(Frame), + {Topic, Payload}. + +to_json(#frame{cmd = Cmd, vin = Vin, encrypt = Encrypt, data = Data}) -> + emqx_utils_json:encode(#{'Cmd' => Cmd, 'Vin' => Vin, 'Encrypt' => Encrypt, 'Data' => Data}). + +ack(Code, Frame = #frame{data = Data, ack = ?ACK_IS_CMD}) -> + % PROTO: Update time & ack feilds only + Frame#frame{ack = Code, data = Data#{<<"Time">> => gentime()}}. + +ack_frame(Key, Inflight) -> + case emqx_inflight:contain(Key, Inflight) of + true -> emqx_inflight:delete(Key, Inflight); + false -> Inflight + end. + +dispatch_frame( + Channel = #channel{ + mqueue = Queue, + inflight = Inflight, + retx_max_times = RetxMax + } +) -> + case emqx_inflight:is_full(Inflight) orelse queue:is_empty(Queue) of + true -> + {[], Channel}; + false -> + {{value, Frame}, NewQueue} = queue:out(Queue), + + log(debug, #{msg => "delivery", frame => Frame}, Channel), + + NewInflight = emqx_inflight:insert( + Frame#frame.cmd, {Frame, RetxMax, erlang:system_time(millisecond)}, Inflight + ), + NChannel = Channel#channel{mqueue = NewQueue, inflight = NewInflight}, + {[Frame], ensure_timer(retry_timer, NChannel)} + end. + +gentime() -> + {Year, Mon, Day} = date(), + {Hour, Min, Sec} = time(), + Year1 = list_to_integer(string:substr(integer_to_list(Year), 3, 2)), + #{ + <<"Year">> => Year1, + <<"Month">> => Mon, + <<"Day">> => Day, + <<"Hour">> => Hour, + <<"Minute">> => Min, + <<"Second">> => Sec + }. + +%%-------------------------------------------------------------------- +%% Message to frame +%%-------------------------------------------------------------------- + +msg2frame(#{<<"Action">> := <<"Query">>, <<"Total">> := Total, <<"Ids">> := Ids}, Vin) -> + % Ids = [<<"0x01">>, <<"0x02">>] --> [1, 2] + Data = #{ + <<"Time">> => gentime(), + <<"Total">> => Total, + <<"Ids">> => lists:map(fun hexstring_to_byte/1, Ids) + }, + #frame{ + cmd = ?CMD_PARAM_QUERY, ack = ?ACK_IS_CMD, vin = Vin, encrypt = ?ENCRYPT_NONE, data = Data + }; +msg2frame(#{<<"Action">> := <<"Setting">>, <<"Total">> := Total, <<"Params">> := Params}, Vin) -> + % Params = [#{<<"0x01">> := 5000}, #{<<"0x02">> := 400}] + % Params1 = [#{1 := 5000}, #{2 := 400}] + Params1 = lists:foldr( + fun(M, Acc) -> + [{K, V}] = maps:to_list(M), + [#{hexstring_to_byte(K) => V} | Acc] + end, + [], + Params + ), + Data = #{<<"Time">> => gentime(), <<"Total">> => Total, <<"Params">> => Params1}, + #frame{ + cmd = ?CMD_PARAM_SETTING, ack = ?ACK_IS_CMD, vin = Vin, encrypt = ?ENCRYPT_NONE, data = Data + }; +msg2frame(Data = #{<<"Action">> := <<"Control">>, <<"Command">> := Command}, Vin) -> + Param = maps:get(<<"Param">>, Data, <<>>), + Data1 = #{ + <<"Time">> => gentime(), + <<"Command">> => hexstring_to_byte(Command), + <<"Param">> => Param + }, + #frame{ + cmd = ?CMD_TERMINAL_CTRL, + ack = ?ACK_IS_CMD, + vin = Vin, + encrypt = ?ENCRYPT_NONE, + data = Data1 + }; +msg2frame(_Data, _Vin) -> + {error, unsupproted}. + +hexstring_to_byte(S) when is_binary(S) -> + hexstring_to_byte(binary_to_list(S)); +hexstring_to_byte("0x" ++ S) -> + tune_byte(list_to_integer(S, 16)); +hexstring_to_byte(S) -> + tune_byte(list_to_integer(S)). + +tune_byte(I) when I =< 16#FF -> I; +tune_byte(_) -> exit(invalid_byte). + +discard_downlink_messages([], _Channel) -> + ok; +discard_downlink_messages(Messages, Channel) -> + log( + error, + #{ + msg => "discard_new_downlink_messages", + reason => + "Discard new downlink messages due to that too" + " many messages are waiting their ACKs.", + messages => Messages + }, + Channel + ), + metrics_inc('delivery.dropped', Channel, erlang:length(Messages)). + +log(Level, Meta, #channel{clientinfo = #{clientid := ClientId, username := Username}} = _Channel) -> + ?SLOG(Level, Meta#{clientid => ClientId, username => Username}). + +metrics_inc(Name, #channel{ctx = Ctx}, Oct) -> + emqx_gateway_ctx:metrics_inc(Ctx, Name, Oct). + +subscribe_downlink( + Topic, + #channel{ + ctx = Ctx, + clientinfo = + ClientInfo = + #{ + clientid := ClientId, + mountpoint := Mountpoint + } + } +) -> + {ParsedTopic, SubOpts0} = emqx_topic:parse(Topic), + SubOpts = maps:merge(emqx_gateway_utils:default_subopts(), SubOpts0), + MountedTopic = emqx_mountpoint:mount(Mountpoint, ParsedTopic), + _ = emqx_broker:subscribe(MountedTopic, ClientId, SubOpts), + run_hooks(Ctx, 'session.subscribed', [ClientInfo, MountedTopic, SubOpts]). diff --git a/apps/emqx_gateway_gbt32960/src/emqx_gbt32960_frame.erl b/apps/emqx_gateway_gbt32960/src/emqx_gbt32960_frame.erl new file mode 100644 index 000000000..f4b679711 --- /dev/null +++ b/apps/emqx_gateway_gbt32960/src/emqx_gbt32960_frame.erl @@ -0,0 +1,808 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_gbt32960_frame). + +-behaviour(emqx_gateway_frame). + +-include("emqx_gbt32960.hrl"). +-include_lib("emqx/include/logger.hrl"). + +%% emqx_gateway_frame callbacks +-export([ + initial_parse_state/1, + serialize_opts/0, + serialize_pkt/2, + parse/2, + format/1, + type/1, + is_message/1 +]). + +-define(FLAG, 1 / binary). +-define(BYTE, 8 / big - integer). +-define(WORD, 16 / big - integer). +-define(DWORD, 32 / big - integer). +%% CMD: 1, ACK: 1, VIN: 17, Enc: 1, Len: 2 +-define(HEADER_SIZE, 22). + +-define(IS_RESPONSE(Ack), + Ack == ?ACK_SUCCESS orelse + Ack == ?ACK_ERROR orelse + Ack == ?ACK_VIN_REPEAT +). + +-type phase() :: search_heading | parse. + +-type parser_state() :: #{ + data := binary(), + phase := phase() +}. + +-ifdef(TEST). +-export([serialize/1]). +-endif. + +%%-------------------------------------------------------------------- +%% Init a Parser +%%-------------------------------------------------------------------- + +-spec initial_parse_state(map()) -> parser_state(). +initial_parse_state(_) -> + #{data => <<>>, phase => search_heading}. + +-spec serialize_opts() -> emqx_gateway_frame:serialize_options(). +serialize_opts() -> + #{}. + +%%-------------------------------------------------------------------- +%% Parse Message +%%-------------------------------------------------------------------- +parse(Bin, State) -> + case enter_parse(Bin, State) of + {ok, Message, Rest} -> + {ok, Message, Rest, State#{data => <<>>, phase => search_heading}}; + {error, Error} -> + {error, Error}; + {more_data_follow, Partial} -> + {more, State#{data => Partial, phase => parse}} + end. + +enter_parse(Bin, #{phase := search_heading}) -> + case search_heading(Bin) of + {ok, Rest} -> + parse_msg(Rest); + Error -> + Error + end; +enter_parse(Bin, #{data := Data}) -> + parse_msg(<>). + +search_heading(<<16#23, 16#23, Rest/binary>>) -> + {ok, Rest}; +search_heading(<<_, Rest/binary>>) -> + search_heading(Rest); +search_heading(<<>>) -> + {error, invalid_frame}. + +parse_msg(Binary) -> + case byte_size(Binary) >= ?HEADER_SIZE of + true -> + {Frame, Rest2} = parse_header(Binary), + case byte_size(Rest2) >= Frame#frame.length + 1 of + true -> parse_body(Rest2, Frame); + false -> {more_data_follow, Binary} + end; + false -> + {more_data_follow, Binary} + end. + +parse_header(<> = Binary) -> + Check = cal_check(Binary, ?HEADER_SIZE, undefined), + { + #frame{cmd = Cmd, ack = Ack, vin = VIN, encrypt = Encrypt, length = Length, check = Check}, + Rest2 + }. + +parse_body(Binary, Frame = #frame{length = Length, check = OldCheck, encrypt = Encrypt}) -> + <> = Binary, + Check = cal_check(Binary, Length, OldCheck), + case CheckByte == Check of + true -> + RawData = decipher(Data, Encrypt), + {ok, Frame#frame{data = parse_data(Frame, RawData), rawdata = RawData}, Rest}; + false -> + {error, frame_check_error} + end. + +% Algo: ?ENCRYPT_NONE, ENCRYPT_RSA, ENCRYPT_AES128 +decipher(Data, _Algo) -> + % TODO: decypher data + Data. + +% Algo: ?ENCRYPT_NONE, ENCRYPT_RSA, ENCRYPT_AES128 +encipher(Data, _Algo) -> + % TODO: encipher data + Data. + +parse_data( + #frame{cmd = ?CMD_VIHECLE_LOGIN}, + <> +) -> + #{ + <<"Time">> => #{ + <<"Year">> => Year, + <<"Month">> => Month, + <<"Day">> => Day, + <<"Hour">> => Hour, + <<"Minute">> => Minute, + <<"Second">> => Second + }, + <<"Seq">> => Seq, + <<"ICCID">> => ICCID, + <<"Num">> => Num, + <<"Length">> => Length, + <<"Id">> => Id + }; +parse_data( + #frame{cmd = ?CMD_INFO_REPORT}, + <> +) -> + #{ + <<"Time">> => #{ + <<"Year">> => Year, + <<"Month">> => Month, + <<"Day">> => Day, + <<"Hour">> => Hour, + <<"Minute">> => Minute, + <<"Second">> => Second + }, + <<"Infos">> => parse_info(Infos, []) + }; +parse_data( + #frame{cmd = ?CMD_INFO_RE_REPORT}, + <> +) -> + #{ + <<"Time">> => #{ + <<"Year">> => Year, + <<"Month">> => Month, + <<"Day">> => Day, + <<"Hour">> => Hour, + <<"Minute">> => Minute, + <<"Second">> => Second + }, + <<"Infos">> => parse_info(Infos, []) + }; +parse_data( + #frame{cmd = ?CMD_VIHECLE_LOGOUT}, + <> +) -> + #{ + <<"Time">> => #{ + <<"Year">> => Year, + <<"Month">> => Month, + <<"Day">> => Day, + <<"Hour">> => Hour, + <<"Minute">> => Minute, + <<"Second">> => Second + }, + <<"Seq">> => Seq + }; +parse_data( + #frame{cmd = ?CMD_PLATFORM_LOGIN}, + <> +) -> + #{ + <<"Time">> => #{ + <<"Year">> => Year, + <<"Month">> => Month, + <<"Day">> => Day, + <<"Hour">> => Hour, + <<"Minute">> => Minute, + <<"Second">> => Second + }, + <<"Seq">> => Seq, + <<"Username">> => Username, + <<"Password">> => Password, + <<"Encrypt">> => Encrypt + }; +parse_data( + #frame{cmd = ?CMD_PLATFORM_LOGOUT}, + <> +) -> + #{ + <<"Time">> => #{ + <<"Year">> => Year, + <<"Month">> => Month, + <<"Day">> => Day, + <<"Hour">> => Hour, + <<"Minute">> => Minute, + <<"Second">> => Second + }, + <<"Seq">> => Seq + }; +parse_data(#frame{cmd = ?CMD_HEARTBEAT}, <<>>) -> + #{}; +parse_data(#frame{cmd = ?CMD_SCHOOL_TIME}, <<>>) -> + #{}; +parse_data( + #frame{cmd = ?CMD_PARAM_QUERY}, + <> +) -> + %% XXX: need check ACK filed? + #{ + <<"Time">> => #{ + <<"Year">> => Year, + <<"Month">> => Month, + <<"Day">> => Day, + <<"Hour">> => Hour, + <<"Minute">> => Minute, + <<"Second">> => Second + }, + <<"Total">> => Total, + <<"Params">> => parse_params(Rest) + }; +parse_data( + #frame{cmd = ?CMD_PARAM_SETTING}, + <> +) -> + ?SLOG(debug, #{msg => "rest", data => Rest}), + #{ + <<"Time">> => #{ + <<"Year">> => Year, + <<"Month">> => Month, + <<"Day">> => Day, + <<"Hour">> => Hour, + <<"Minute">> => Minute, + <<"Second">> => Second + }, + <<"Total">> => Total, + <<"Params">> => parse_params(Rest) + }; +parse_data( + #frame{cmd = ?CMD_TERMINAL_CTRL}, + <> +) -> + #{ + <<"Time">> => #{ + <<"Year">> => Year, + <<"Month">> => Month, + <<"Day">> => Day, + <<"Hour">> => Hour, + <<"Minute">> => Minute, + <<"Second">> => Second + }, + <<"Command">> => Command, + <<"Param">> => parse_ctrl_param(Command, Rest) + }; +parse_data(Frame, Data) -> + ?SLOG(error, #{msg => "invalid_frame", frame => Frame, data => Data}), + error(invalid_frame). + +%%-------------------------------------------------------------------- +%% Parse Report Data Info +%%-------------------------------------------------------------------- + +parse_info(<<>>, Acc) -> + lists:reverse(Acc); +parse_info(<>, Acc) -> + <> = Body, + parse_info(Rest, [ + #{ + <<"Type">> => <<"Vehicle">>, + <<"Status">> => Status, + <<"Charging">> => Charging, + <<"Mode">> => Mode, + <<"Speed">> => Speed, + <<"Mileage">> => Mileage, + <<"Voltage">> => Voltage, + <<"Current">> => Current, + <<"SOC">> => SOC, + <<"DC">> => DC, + <<"Gear">> => Gear, + <<"Resistance">> => Resistance, + <<"AcceleratorPedal">> => AcceleratorPedal, + <<"BrakePedal">> => BrakePedal + } + | Acc + ]); +parse_info(<>, Acc) -> + % 12 is packet len of per drive motor + Len = Number * 12, + <> = Rest, + parse_info(Rest1, [ + #{ + <<"Type">> => <<"DriveMotor">>, + <<"Number">> => Number, + <<"Motors">> => parse_drive_motor(Bodys, []) + } + | Acc + ]); +parse_info(<>, Acc) -> + <> = + Rest, + + <> = Rest1, + + <> = Rest2, + parse_info(Rest3, [ + #{ + <<"Type">> => <<"FuelCell">>, + <<"CellVoltage">> => CellVoltage, + <<"CellCurrent">> => CellCurrent, + <<"FuelConsumption">> => FuelConsumption, + <<"ProbeNum">> => ProbeNum, + <<"ProbeTemps">> => binary_to_list(ProbeTemps), + <<"H_MaxTemp">> => HMaxTemp, + <<"H_TempProbeCode">> => HTempProbeCode, + <<"H_MaxConc">> => HMaxConc, + <<"H_ConcSensorCode">> => HConcSensorCode, + <<"H_MaxPress">> => HMaxPress, + <<"H_PressSensorCode">> => HPressSensorCode, + <<"DCStatus">> => DCStatus + } + | Acc + ]); +parse_info( + <>, + Acc +) -> + parse_info(Rest, [ + #{ + <<"Type">> => <<"Engine">>, + <<"Status">> => Status, + <<"CrankshaftSpeed">> => CrankshaftSpeed, + <<"FuelConsumption">> => FuelConsumption + } + | Acc + ]); +parse_info( + <>, Acc +) -> + parse_info(Rest, [ + #{ + <<"Type">> => <<"Location">>, + <<"Status">> => Status, + <<"Longitude">> => Longitude, + <<"Latitude">> => Latitude + } + | Acc + ]); +parse_info(<>, Acc) -> + <> = Body, + + parse_info(Rest, [ + #{ + <<"Type">> => <<"Extreme">>, + <<"MaxVoltageBatterySubsysNo">> => MaxVoltageBatterySubsysNo, + <<"MaxVoltageBatteryCode">> => MaxVoltageBatteryCode, + <<"MaxBatteryVoltage">> => MaxBatteryVoltage, + <<"MinVoltageBatterySubsysNo">> => MinVoltageBatterySubsysNo, + <<"MinVoltageBatteryCode">> => MinVoltageBatteryCode, + <<"MinBatteryVoltage">> => MinBatteryVoltage, + <<"MaxTempSubsysNo">> => MaxTempSubsysNo, + <<"MaxTempProbeNo">> => MaxTempProbeNo, + <<"MaxTemp">> => MaxTemp, + <<"MinTempSubsysNo">> => MinTempSubsysNo, + <<"MinTempProbeNo">> => MinTempProbeNo, + <<"MinTemp">> => MinTemp + } + | Acc + ]); +parse_info(<>, Acc) -> + <> = + Rest, + N1 = FaultChargeableDeviceNum * 4, + <> = Rest1, + N2 = FaultDriveMotorNum * 4, + <> = Rest2, + N3 = FaultEngineNum * 4, + <> = Rest3, + N4 = FaultOthersNum * 4, + <> = Rest4, + parse_info(Rest5, [ + #{ + <<"Type">> => <<"Alarm">>, + <<"MaxAlarmLevel">> => MaxAlarmLevel, + <<"GeneralAlarmFlag">> => GeneralAlarmFlag, + <<"FaultChargeableDeviceNum">> => FaultChargeableDeviceNum, + <<"FaultChargeableDeviceList">> => tune_fault_codelist(FaultChargeableDeviceList), + <<"FaultDriveMotorNum">> => FaultDriveMotorNum, + <<"FaultDriveMotorList">> => tune_fault_codelist(FaultDriveMotorList), + <<"FaultEngineNum">> => FaultEngineNum, + <<"FaultEngineList">> => tune_fault_codelist(FaultEngineList), + <<"FaultOthersNum">> => FaultOthersNum, + <<"FaultOthersList">> => tune_fault_codelist(FaultOthersList) + } + | Acc + ]); +parse_info(<>, Acc) -> + {Rest1, SubSystems} = parse_chargeable_voltage(Rest, Number, []), + parse_info(Rest1, [ + #{ + <<"Type">> => <<"ChargeableVoltage">>, + <<"Number">> => Number, + <<"SubSystems">> => SubSystems + } + | Acc + ]); +parse_info(<>, Acc) -> + {Rest1, SubSystems} = parse_chargeable_temp(Rest, Number, []), + parse_info(Rest1, [ + #{ + <<"Type">> => <<"ChargeableTemp">>, + <<"Number">> => Number, + <<"SubSystems">> => SubSystems + } + | Acc + ]); +parse_info(Rest, Acc) -> + ?SLOG(error, #{msg => "invalid_info_feild", rest => Rest, acc => Acc}), + error(invalid_info_feild). + +parse_drive_motor(<<>>, Acc) -> + lists:reverse(Acc); +parse_drive_motor( + <>, + Acc +) -> + parse_drive_motor(Rest, [ + #{ + <<"No">> => No, + <<"Status">> => Status, + <<"CtrlTemp">> => CtrlTemp, + <<"Rotating">> => Rotating, + <<"Torque">> => Torque, + <<"MotorTemp">> => MotorTemp, + <<"InputVoltage">> => InputVoltage, + <<"DCBusCurrent">> => DCBusCurrent + } + | Acc + ]). + +parse_chargeable_voltage(Rest, 0, Acc) -> + {Rest, lists:reverse(Acc)}; +parse_chargeable_voltage( + <>, + Num, + Acc +) -> + Len = FrameCellsCount * 2, + <> = Rest, + parse_chargeable_voltage(Rest1, Num - 1, [ + #{ + <<"ChargeableSubsysNo">> => ChargeableSubsysNo, + <<"ChargeableVoltage">> => ChargeableVoltage, + <<"ChargeableCurrent">> => ChargeableCurrent, + <<"CellsTotal">> => CellsTotal, + <<"FrameCellsIndex">> => FrameCellsIndex, + <<"FrameCellsCount">> => FrameCellsCount, + <<"CellsVoltage">> => tune_voltage(CellsVoltage) + } + | Acc + ]). + +parse_chargeable_temp(Rest, 0, Acc) -> + {Rest, lists:reverse(Acc)}; +parse_chargeable_temp(<>, Num, Acc) -> + <> = Rest, + parse_chargeable_temp(Rest1, Num - 1, [ + #{ + <<"ChargeableSubsysNo">> => ChargeableSubsysNo, + <<"ProbeNum">> => ProbeNum, + <<"ProbesTemp">> => binary_to_list(ProbesTemp) + } + | Acc + ]). +tune_fault_codelist(<<>>) -> + []; +tune_fault_codelist(Data) -> + lists:flatten([list_to_binary(io_lib:format("~4.16.0B", [X])) || <> <= Data]). + +tune_voltage(Bin) -> tune_voltage_(Bin, []). +tune_voltage_(<<>>, Acc) -> lists:reverse(Acc); +tune_voltage_(<>, Acc) -> tune_voltage_(Rest, [V | Acc]). + +parse_params(Bin) -> parse_params_(Bin, []). +parse_params_(<<>>, Acc) -> + lists:reverse(Acc); +parse_params_(<<16#01, Val:?WORD, Rest/binary>>, Acc) -> + parse_params_(Rest, [#{<<"0x01">> => Val} | Acc]); +parse_params_(<<16#02, Val:?WORD, Rest/binary>>, Acc) -> + parse_params_(Rest, [#{<<"0x02">> => Val} | Acc]); +parse_params_(<<16#03, Val:?WORD, Rest/binary>>, Acc) -> + parse_params_(Rest, [#{<<"0x03">> => Val} | Acc]); +parse_params_(<<16#04, Val:?BYTE, Rest/binary>>, Acc) -> + parse_params_(Rest, [#{<<"0x04">> => Val} | Acc]); +parse_params_(<<16#05, Rest/binary>>, Acc) -> + case [V || #{<<"0x04">> := V} <- Acc] of + [Len] -> + <> = Rest, + parse_params_(Rest1, [#{<<"0x05">> => Val} | Acc]); + _ -> + ?SLOG(error, #{ + msg => "invalid_data", reason => "cmd_0x04 must appear ahead of cmd_0x05" + }), + lists:reverse(Acc) + end; +parse_params_(<<16#06, Val:?WORD, Rest/binary>>, Acc) -> + parse_params_(Rest, [#{<<"0x06">> => Val} | Acc]); +parse_params_(<<16#07, Val:5/binary, Rest/binary>>, Acc) -> + parse_params_(Rest, [#{<<"0x07">> => Val} | Acc]); +parse_params_(<<16#08, Val:5/binary, Rest/binary>>, Acc) -> + parse_params_(Rest, [#{<<"0x08">> => Val} | Acc]); +parse_params_(<<16#09, Val:?BYTE, Rest/binary>>, Acc) -> + parse_params_(Rest, [#{<<"0x09">> => Val} | Acc]); +parse_params_(<<16#0A, Val:?WORD, Rest/binary>>, Acc) -> + parse_params_(Rest, [#{<<"0x0A">> => Val} | Acc]); +parse_params_(<<16#0B, Val:?WORD, Rest/binary>>, Acc) -> + parse_params_(Rest, [#{<<"0x0B">> => Val} | Acc]); +parse_params_(<<16#0C, Val:?BYTE, Rest/binary>>, Acc) -> + parse_params_(Rest, [#{<<"0x0C">> => Val} | Acc]); +parse_params_(<<16#0D, Val:?BYTE, Rest/binary>>, Acc) -> + parse_params_(Rest, [#{<<"0x0D">> => Val} | Acc]); +parse_params_(<<16#0E, Rest/binary>>, Acc) -> + case [V || #{<<"0x0D">> := V} <- Acc] of + [Len] -> + <> = Rest, + parse_params_(Rest1, [#{<<"0x0E">> => Val} | Acc]); + _ -> + ?SLOG(error, #{ + msg => "invalid_data", reason => "cmd_0x0D must appear ahead of cmd_0x0E" + }), + lists:reverse(Acc) + end; +parse_params_(<<16#0F, Val:?WORD, Rest/binary>>, Acc) -> + parse_params_(Rest, [#{<<"0x0F">> => Val} | Acc]); +parse_params_(<<16#10, Val:?BYTE, Rest/binary>>, Acc) -> + parse_params_(Rest, [#{<<"0x10">> => Val} | Acc]); +parse_params_(Cmd, Acc) -> + ?SLOG(error, #{msg => "unexcepted_param_identifier", cmd => Cmd}), + lists:reverse(Acc). + +parse_ctrl_param(16#01, Param) -> + parse_upgrade_feild(Param); +parse_ctrl_param(16#02, _) -> + <<>>; +parse_ctrl_param(16#03, _) -> + <<>>; +parse_ctrl_param(16#04, _) -> + <<>>; +parse_ctrl_param(16#05, _) -> + <<>>; +parse_ctrl_param(16#06, <>) -> + #{<<"Level">> => Level, <<"Message">> => Msg}; +parse_ctrl_param(16#07, _) -> + <<>>; +parse_ctrl_param(Cmd, Param) -> + ?SLOG(error, #{msg => "unexcepted_param", param => Param, cmd => Cmd}), + <<>>. + +parse_upgrade_feild(Param) -> + [ + DialingName, + Username, + Password, + <<0, 0, I1, I2, I3, I4>>, + <>, + ManufacturerId, + HardwareVer, + SoftwareVer, + UpgradeUrl, + <> + ] = re:split(Param, ";", [{return, binary}]), + + #{ + <<"DialingName">> => DialingName, + <<"Username">> => Username, + <<"Password">> => Password, + <<"Ip">> => list_to_binary(inet:ntoa({I1, I2, I3, I4})), + <<"Port">> => Port, + <<"ManufacturerId">> => ManufacturerId, + <<"HardwareVer">> => HardwareVer, + <<"SoftwareVer">> => SoftwareVer, + <<"UpgradeUrl">> => UpgradeUrl, + <<"Timeout">> => Timeout + }. + +%%-------------------------------------------------------------------- +%% serialize_pkt +%%-------------------------------------------------------------------- +serialize_pkt(Frame, _Opts) -> + serialize(Frame). + +serialize(#frame{cmd = Cmd, ack = Ack, vin = Vin, encrypt = Encrypt, data = Data, rawdata = RawData}) -> + Encrypted = encipher(serialize_data(Cmd, Ack, RawData, Data), Encrypt), + Len = byte_size(Encrypted), + Stream = <>, + Crc = cal_check(Stream, byte_size(Stream), undefined), + <<"##", Stream/binary, Crc:?BYTE>>. + +serialize_data(?CMD_PARAM_QUERY, ?ACK_IS_CMD, _, #{ + <<"Time">> := Time, + <<"Total">> := Total, + <<"Ids">> := Ids +}) when length(Ids) == Total -> + T = tune_time(Time), + Ids1 = tune_ids(Ids), + <>; +serialize_data(?CMD_PARAM_SETTING, ?ACK_IS_CMD, _, #{ + <<"Time">> := Time, + <<"Total">> := Total, + <<"Params">> := Params +}) when length(Params) == Total -> + T = tune_time(Time), + Params1 = tune_params(Params), + <>; +serialize_data(?CMD_TERMINAL_CTRL, ?ACK_IS_CMD, _, #{ + <<"Time">> := Time, + <<"Command">> := Cmd, + <<"Param">> := Param +}) -> + T = tune_time(Time), + Param1 = tune_ctrl_param(Cmd, Param), + <>; +serialize_data(_Cmd, Ack, RawData, #{<<"Time">> := Time}) when ?IS_RESPONSE(Ack) -> + Rest = + case byte_size(RawData) > 6 of + false -> <<>>; + true -> binary:part(RawData, 6, byte_size(RawData) - 6) + end, + T = tune_time(Time), + <>. + +tune_time(#{ + <<"Year">> := Year, + <<"Month">> := Month, + <<"Day">> := Day, + <<"Hour">> := Hour, + <<"Minute">> := Min, + <<"Second">> := Sec +}) -> + <>. + +tune_ids(Ids) -> + lists:foldr( + fun + (Id, Acc) when is_integer(Id) -> + <>; + (Id, Acc) when is_binary(Id) -> + <> + end, + <<>>, + Ids + ). + +tune_params(Params) -> + tune_params_(lists:reverse(Params), <<>>). + +tune_params_([], Bin) -> + Bin; +tune_params_([#{16#01 := Val} | Rest], Bin) -> + tune_params_(Rest, <<16#01:?BYTE, Val:?WORD, Bin/binary>>); +tune_params_([#{16#02 := Val} | Rest], Bin) -> + tune_params_(Rest, <<16#02:?BYTE, Val:?WORD, Bin/binary>>); +tune_params_([#{16#03 := Val} | Rest], Bin) -> + tune_params_(Rest, <<16#03:?BYTE, Val:?WORD, Bin/binary>>); +tune_params_([#{16#04 := Val} | Rest], Bin) -> + {Val05, Rest1} = take_param(16#05, Rest), + tune_params_(Rest1, <<16#04:?BYTE, Val:?BYTE, 16#05, Val05:Val/binary, Bin/binary>>); +tune_params_([#{16#05 := Val} | Rest], Bin) -> + tune_params_(Rest ++ [#{16#05 => Val}], Bin); +tune_params_([#{16#06 := Val} | Rest], Bin) -> + tune_params_(Rest, <<16#06:?BYTE, Val:?WORD, Bin/binary>>); +tune_params_([#{16#07 := Val} | Rest], Bin) when byte_size(Val) == 5 -> + tune_params_(Rest, <<16#07:?BYTE, Val/binary, Bin/binary>>); +tune_params_([#{16#08 := Val} | Rest], Bin) when byte_size(Val) == 5 -> + tune_params_(Rest, <<16#08:?BYTE, Val/binary, Bin/binary>>); +tune_params_([#{16#09 := Val} | Rest], Bin) -> + tune_params_(Rest, <<16#09:?BYTE, Val:?BYTE, Bin/binary>>); +tune_params_([#{16#0A := Val} | Rest], Bin) -> + tune_params_(Rest, <<16#0A:?BYTE, Val:?WORD, Bin/binary>>); +tune_params_([#{16#0B := Val} | Rest], Bin) -> + tune_params_(Rest, <<16#0B:?BYTE, Val:?WORD, Bin/binary>>); +tune_params_([#{16#0C := Val} | Rest], Bin) -> + tune_params_(Rest, <<16#0C:?BYTE, Val:?BYTE, Bin/binary>>); +tune_params_([#{16#0D := Val} | Rest], Bin) -> + {Val0E, Rest1} = take_param(16#0E, Rest), + tune_params_(Rest1, <<16#0D:?BYTE, Val:?BYTE, 16#0E, Val0E:Val/binary, Bin/binary>>); +tune_params_([#{16#0E := Val} | Rest], Bin) -> + tune_params_(Rest ++ [#{16#0E => Val}], Bin); +tune_params_([#{16#0F := Val} | Rest], Bin) -> + tune_params_(Rest, <<16#0F:?BYTE, Val:?WORD, Bin/binary>>); +tune_params_([#{16#10 := Val} | Rest], Bin) -> + tune_params_(Rest, <<16#10:?BYTE, Val:?BYTE, Bin/binary>>). + +tune_ctrl_param(16#00, _) -> + <<>>; +tune_ctrl_param(16#01, Param) -> + tune_upgrade_feild(Param); +tune_ctrl_param(16#02, _) -> + <<>>; +tune_ctrl_param(16#03, _) -> + <<>>; +tune_ctrl_param(16#04, _) -> + <<>>; +tune_ctrl_param(16#05, _) -> + <<>>; +tune_ctrl_param(16#06, #{<<"Level">> := Level, <<"Message">> := Msg}) -> + <>; +tune_ctrl_param(16#07, _) -> + <<>>; +tune_ctrl_param(Cmd, Param) -> + ?SLOG(error, #{msg => "unexcepted_cmd", cmd => Cmd, param => Param}), + <<>>. + +tune_upgrade_feild(Param) -> + TuneBin = fun + (Bin, Len) when is_binary(Bin), byte_size(Bin) =:= Len -> Bin; + (undefined, _) -> undefined; + (Bin, _) -> error({invalid_param_length, Bin}) + end, + TuneWrd = fun + (Val) when is_integer(Val), Val < 65535 -> <>; + (undefined) -> undefined; + (_) -> error(invalid_param_word_value) + end, + TuneAdr = fun + (Ip) when is_binary(Ip) -> + {ok, {I1, I2, I3, I4}} = inet:parse_address(binary_to_list(Ip)), + <<0, 0, I1, I2, I3, I4>>; + (undefined) -> + undefined; + (_) -> + error(invalid_ip_address) + end, + L = [ + maps:get(<<"DialingName">>, Param, undefined), + maps:get(<<"Username">>, Param, undefined), + maps:get(<<"Password">>, Param, undefined), + TuneAdr(maps:get(<<"Ip">>, Param, undefined)), + TuneWrd(maps:get(<<"Port">>, Param, undefined)), + TuneBin(maps:get(<<"ManufacturerId">>, Param, undefined), 4), + TuneBin(maps:get(<<"HardwareVer">>, Param, undefined), 5), + TuneBin(maps:get(<<"SoftwareVer">>, Param, undefined), 5), + maps:get(<<"UpgradeUrl">>, Param, undefined), + TuneWrd(maps:get(<<"Timeout">>, Param, undefined)) + ], + list_to_binary([I || I <- lists:join(";", L), I /= undefined]). + +take_param(K, Params) -> + V = search_param(K, Params), + {V, Params -- [#{K => V}]}. + +search_param(16#05, [#{16#05 := V} | _]) -> V; +search_param(16#0E, [#{16#0E := V} | _]) -> V; +search_param(K, [_ | Rest]) -> search_param(K, Rest). + +cal_check(_, 0, Check) -> Check; +cal_check(<>, Size, undefined) -> cal_check(Rest, Size - 1, C); +cal_check(<>, Size, Check) -> cal_check(Rest, Size - 1, Check bxor C). + +format(Msg) -> + io_lib:format("~p", [Msg]). + +type(_) -> + %% TODO: + gbt32960. + +is_message(#frame{}) -> + %% TODO: + true; +is_message(_) -> + false. diff --git a/apps/emqx_gateway_gbt32960/src/emqx_gbt32960_schema.erl b/apps/emqx_gateway_gbt32960/src/emqx_gbt32960_schema.erl new file mode 100644 index 000000000..4580cc087 --- /dev/null +++ b/apps/emqx_gateway_gbt32960/src/emqx_gbt32960_schema.erl @@ -0,0 +1,62 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_gbt32960_schema). + +-behaviour(hocon_schema). + +-include("emqx_gbt32960.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("typerefl/include/types.hrl"). + +%% config schema provides +-export([namespace/0, roots/0, fields/1, desc/1]). + +namespace() -> "gateway_gbt32960". + +roots() -> []. + +fields(gbt32960) -> + [ + {mountpoint, emqx_gateway_schema:mountpoint(?DEFAULT_MOUNTPOINT)}, + {retry_interval, + sc( + emqx_schema:duration_ms(), + #{ + default => <<"8s">>, + desc => ?DESC(retry_interval) + } + )}, + {max_retry_times, + sc( + non_neg_integer(), + #{ + default => 3, + desc => ?DESC(max_retry_times) + } + )}, + {message_queue_len, + sc( + non_neg_integer(), + #{ + default => 10, + desc => ?DESC(message_queue_len) + } + )}, + {listeners, sc(ref(emqx_gateway_schema, tcp_listeners), #{desc => ?DESC(tcp_listeners)})} + ] ++ emqx_gateway_schema:gateway_common_options(). + +desc(gbt32960) -> + "The GBT-32960 gateway"; +desc(_) -> + undefined. + +%%-------------------------------------------------------------------- +%% internal functions + +sc(Type, Meta) -> + hoconsc:mk(Type, Meta). + +ref(Mod, Field) -> + hoconsc:ref(Mod, Field). diff --git a/apps/emqx_gateway_gbt32960/test/emqx_gbt32960_SUITE.erl b/apps/emqx_gateway_gbt32960/test/emqx_gbt32960_SUITE.erl new file mode 100644 index 000000000..56184fc5f --- /dev/null +++ b/apps/emqx_gateway_gbt32960/test/emqx_gbt32960_SUITE.erl @@ -0,0 +1,1445 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_gbt32960_SUITE). + +-compile(export_all). +-compile(nowarn_export_all). + +-include("emqx_gbt32960.hrl"). +-include_lib("emqx/include/emqx.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). + +-define(BYTE, 8 / big - integer). +-define(WORD, 16 / big - integer). +-define(DWORD, 32 / big - integer). + +-define(PORT, 7325). +-define(LOGT(Format, Args), ct:pal("TEST_SUITE: " ++ Format, Args)). + +-define(CONF_DEFAULT, << + "\n" + "gateway.gbt32960 {\n" + " retry_interval = \"1s\"\n" + " listeners.tcp.default {\n" + " bind = 7325\n" + " }\n" + "}\n" +>>). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + application:load(emqx_gateway_gbt32960), + Apps = emqx_cth_suite:start( + [ + {emqx_conf, ?CONF_DEFAULT}, + emqx_gateway, + emqx_auth, + emqx_management, + {emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"} + ], + #{work_dir => emqx_cth_suite:work_dir(Config)} + ), + emqx_common_test_http:create_default_app(), + [{suite_apps, Apps} | Config]. + +end_per_suite(Config) -> + emqx_common_test_http:delete_default_app(), + emqx_cth_suite:stop(?config(suite_apps, Config)), + ok. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% helper functions %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +encode(Cmd, Vin, Data) -> + encode(Cmd, ?ACK_IS_CMD, Vin, ?ENCRYPT_NONE, Data). + +encode(Cmd, Ack, Vin, Data) -> + encode(Cmd, Ack, Vin, ?ENCRYPT_NONE, Data). + +encode(Cmd, Ack, Vin, Encrypt, Data) -> + Size = byte_size(Data), + S1 = <>, + Crc = make_crc(S1, undefined), + Stream = <<"##", S1/binary, Crc:8>>, + ?LOGT("encode a packet=~p", [binary_to_hex_string(Stream)]), + Stream. + +make_crc(<<>>, Xor) -> Xor; +make_crc(<>, undefined) -> make_crc(Rest, C); +make_crc(<>, Xor) -> make_crc(Rest, C bxor Xor). + +make_time() -> + {Year, Mon, Day} = date(), + {Hour, Min, Sec} = time(), + Year1 = list_to_integer(string:substr(integer_to_list(Year), 3, 2)), + <>. + +binary_to_hex_string(Data) -> + lists:flatten([io_lib:format("~2.16.0B ", [X]) || <> <= Data]). + +to_json(#frame{cmd = Cmd, vin = Vin, encrypt = Encrypt, data = Data}) -> + emqx_utils_json:encode(#{'Cmd' => Cmd, 'Vin' => Vin, 'Encrypt' => Encrypt, 'Data' => Data}). + +get_published_msg() -> + receive + {deliver, _Topic, #message{topic = Topic, payload = Payload}} -> + {Topic, Payload} + after 5000 -> + error(timeout) + end. + +get_subscriptions() -> + lists:map(fun({_, Topic}) -> Topic end, ets:tab2list(emqx_subscription)). + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%% test cases %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +login_first() -> + emqx:subscribe("gbt32960/+/upstream/#"), + + % + % send VEHICLE LOGIN + % + Time = <<12, 12, 29, 12, 19, 20>>, + Data = <