Compare commits

...

735 Commits

Author SHA1 Message Date
Ivan Dyachkov bcd63344b8
Merge pull request #13583 from id/20240807-sync-release-branches
sync release branches
2024-08-07 11:38:14 +02:00
Ivan Dyachkov cc3b26a3ac Merge remote-tracking branch 'upstream/release-58' into 20240807-sync-release-branches 2024-08-07 09:48:38 +02:00
Ivan Dyachkov dd686c24a0 Merge remote-tracking branch 'upstream/release-57' into 20240807-sync-release-branches 2024-08-07 09:44:38 +02:00
Ivan Dyachkov 592c4e0045
Merge pull request #12774 from emqx/dependabot/github_actions/dot-github/actions/package-macos/actions-package-macos-83d1e47aa6
chore(deps): bump the actions-package-macos group in /.github/actions/package-macos with 1 update
2024-08-07 09:43:53 +02:00
Ivan Dyachkov 073e3ea0a8
Merge pull request #13569 from emqx/dependabot/github_actions/actions-ef71aea555
chore(deps): bump the actions group across 1 directory with 8 updates
2024-08-07 09:35:52 +02:00
Xinyu Liu 81978ceaeb
Merge pull request #13571 from terry-xiaoyu/fast_fail_on_invalid_ssl_opts
chore: update esockd to 5.12.0
2024-08-07 11:21:32 +08:00
Ilia Averianov 6bfddd9952
Merge pull request #13565 from savonarola/0801-shared-subs-compact-structures
Reduce size of shared sub protocol structures
2024-08-06 19:56:08 +03:00
Thales Macedo Garitezi cf608a73a5
Merge pull request #13578 from thalesmg/20240806-r58-port-raft-precond
feat(dsraft): support atomic batches + preconditions (release-58)
2024-08-06 13:40:46 -03:00
Thales Macedo Garitezi a8200fb83d
Merge pull request #13579 from thalesmg/20240806-r58-test-flaky-consumer-rebalance
test: attempt to reduce test flakiness
2024-08-06 13:33:46 -03:00
Ilya Averyanov 9ad65c6ac1 feat(queue): reduce logging levels 2024-08-06 18:45:15 +03:00
Thales Macedo Garitezi 9ca3985bbd test: attempt to reduce test flakiness 2024-08-06 12:44:51 -03:00
Ilya Averyanov e17becb84d feat(queue): compact protocol structures, organize formatting 2024-08-06 18:05:02 +03:00
Andrew Mayorov 5dd8fefded test(ds): avoid side effects in check phase 2024-08-06 11:43:12 -03:00
Andrew Mayorov 7b85faf12a chore(dsraft): fix few spelling errors
Co-Authored-By: Thales Macedo Garitezi <thalesmg@gmail.com>
2024-08-06 11:43:12 -03:00
Andrew Mayorov b0594271b2 chore(dsraft): fix a typespec 2024-08-06 11:43:12 -03:00
Andrew Mayorov d8aa39a310 fix(dsraft): use local application environment 2024-08-06 11:43:12 -03:00
Andrew Mayorov fc0434afc8 chore(dslocal): refine few typespecs 2024-08-06 11:43:12 -03:00
Andrew Mayorov 5502af18b7 feat(ds): support deletions + precondition-related API in bitfield-lts 2024-08-06 11:43:12 -03:00
Andrew Mayorov 9f96e0957e test(ds): verify deletions work predictably 2024-08-06 11:43:12 -03:00
Andrew Mayorov 109ffe7a70 fix(dsbackend): unify timestamp resolution in operations / preconditions 2024-08-06 11:43:12 -03:00
Andrew Mayorov 1559aac486 test(dsbackend): add shared tests for atomic batches + preconditions 2024-08-06 11:43:12 -03:00
Andrew Mayorov 68990f1538 feat(ds): support operations + preconditions in skipstream-lts 2024-08-06 11:43:12 -03:00
Andrew Mayorov 5356d678cc feat(dsraft): support atomic batches + preconditions 2024-08-06 11:43:12 -03:00
Andrew Mayorov 11951f8f6c feat(ds): adopt buffer interface to `emqx_ds:operation()` 2024-08-06 11:43:12 -03:00
Andrew Mayorov 0aa4cdbaf3 feat(ds): add generic preconditions implementation 2024-08-06 11:43:12 -03:00
Ivan Dyachkov 281f8ddc83
Merge pull request #13575 from Kinplemelon/kinple/upgrade-dashboard-58
chore(dashboard): bump dashboard version to v1.10.0-beta.1 & e1.8.0-beta.1
2024-08-06 16:39:06 +02:00
Kinplemelon b80513e941 ci: update emqx docs link in dashboard 2024-08-06 15:21:19 +02:00
Ivan Dyachkov 822ed71282 chore: release 5.7.2 2024-08-06 13:25:56 +02:00
Kinple b8fd5de2a5
Merge pull request #13577 from Kinplemelon/kinple/upgrade-dashboard
chore(dashboard): bump dashboard version to v1.9.2 & e1.7.2
2024-08-06 19:02:50 +08:00
Kinplemelon 3ee84d60ae chore(dashboard): bump dashboard version to v1.9.2 & e1.7.2 2024-08-06 18:11:35 +08:00
Andrew Mayorov 3b52b658cd
Merge pull request #13559 from keynslug/feat/EMQX-12309/raft-precond
feat(dsraft): support atomic batches + preconditions
2024-08-06 09:17:16 +02:00
Kinplemelon cba3dcbeda chore(dashboard): bump dashboard version to v1.10.0-beta.1 & e1.8.0-beta.1 2024-08-06 13:44:16 +08:00
Kinple caf1897979
Merge pull request #13574 from Kinplemelon/kinple/upgrade-dashboard
chore(dashboard): bump dashboard version to e1.7.2-beta.7
2024-08-06 10:51:03 +08:00
Kinplemelon dbbd5e1458 ci: update emqx docs link in dashboard 2024-08-06 09:33:20 +08:00
Kinplemelon 0ab31df9d2 chore(dashboard): bump dashboard version to v1.9.2-beta.1 & e1.7.2-beta.7 2024-08-06 09:32:17 +08:00
Thales Macedo Garitezi 613fc644f5
Merge pull request #13425 from kjellwinblad/kjell/review_connector_error_logs_mqtt_etc/EMQX-12555/EMQX-12657
fix: make MQTT connector error log messages easier to understand
2024-08-05 17:34:13 -03:00
Andrew Mayorov b1a53568d6
test(ds): avoid side effects in check phase 2024-08-05 16:34:17 +02:00
Ivan Dyachkov d6651a1889
Merge pull request #13572 from id/20240805-prep-5.8.0-alpha.1
chore: prepare 5.8.0-alpha.1
2024-08-05 16:05:42 +02:00
Ivan Dyachkov 4cf7151139 chore: prepare 5.8.0-alpha.1 2024-08-05 11:09:07 +02:00
Ivan Dyachkov 4865999606 Merge remote-tracking branch 'upstream/master' into release-58 2024-08-05 10:59:59 +02:00
Andrew Mayorov 382feab7d1
chore(dsraft): fix few spelling errors
Co-Authored-By: Thales Macedo Garitezi <thalesmg@gmail.com>
2024-08-05 10:55:49 +02:00
Andrew Mayorov 6aad774075
chore(dsraft): fix a typespec 2024-08-05 10:55:49 +02:00
Andrew Mayorov 649cbf1c79
fix(dsraft): use local application environment 2024-08-05 10:55:49 +02:00
Andrew Mayorov 4cde5e98a3
chore(dslocal): refine few typespecs 2024-08-05 10:55:48 +02:00
Andrew Mayorov d631b5b296
feat(ds): support deletions + precondition-related API in bitfield-lts 2024-08-05 10:55:48 +02:00
Andrew Mayorov 26ec69d5f4
test(ds): verify deletions work predictably 2024-08-05 10:55:48 +02:00
Andrew Mayorov 58b9ab0210
fix(dsbackend): unify timestamp resolution in operations / preconditions 2024-08-05 10:55:22 +02:00
lafirest 4644072fd8
Merge pull request #13570 from lafirest/fix/api_key_bootstrap
fix(api_key): do not crash boot when the bootstrap file is not exists
2024-08-05 16:33:43 +08:00
Shawn bd87e3ce2b chore: update esockd to 5.12.0 2024-08-05 16:18:04 +08:00
firest c9c4d1a196 fix(api_key): do not crash boot when the bootstrap file is not exists 2024-08-05 15:56:05 +08:00
dependabot[bot] 11546b72f4
chore(deps): bump the actions group across 1 directory with 8 updates
Bumps the actions group with 8 updates in the / directory:

| Package | From | To |
| --- | --- | --- |
| [actions/checkout](https://github.com/actions/checkout) | `4.1.2` | `4.1.7` |
| [actions/upload-artifact](https://github.com/actions/upload-artifact) | `4.3.3` | `4.3.5` |
| [actions/download-artifact](https://github.com/actions/download-artifact) | `4.1.7` | `4.1.8` |
| [docker/setup-qemu-action](https://github.com/docker/setup-qemu-action) | `3.0.0` | `3.2.0` |
| [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) | `3.3.0` | `3.6.1` |
| [docker/login-action](https://github.com/docker/login-action) | `3.2.0` | `3.3.0` |
| [erlef/setup-beam](https://github.com/erlef/setup-beam) | `1.18.0` | `1.18.1` |
| [ossf/scorecard-action](https://github.com/ossf/scorecard-action) | `2.3.3` | `2.4.0` |



Updates `actions/checkout` from 4.1.2 to 4.1.7
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v4.1.2...692973e3d937129bcbf40652eb9f2f61becf3332)

Updates `actions/upload-artifact` from 4.3.3 to 4.3.5
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](65462800fd...89ef406dd8)

Updates `actions/download-artifact` from 4.1.7 to 4.1.8
- [Release notes](https://github.com/actions/download-artifact/releases)
- [Commits](65a9edc588...fa0a91b85d)

Updates `docker/setup-qemu-action` from 3.0.0 to 3.2.0
- [Release notes](https://github.com/docker/setup-qemu-action/releases)
- [Commits](68827325e0...49b3bc8e6b)

Updates `docker/setup-buildx-action` from 3.3.0 to 3.6.1
- [Release notes](https://github.com/docker/setup-buildx-action/releases)
- [Commits](d70bba72b1...988b5a0280)

Updates `docker/login-action` from 3.2.0 to 3.3.0
- [Release notes](https://github.com/docker/login-action/releases)
- [Commits](0d4c9c5ea7...9780b0c442)

Updates `erlef/setup-beam` from 1.18.0 to 1.18.1
- [Release notes](https://github.com/erlef/setup-beam/releases)
- [Commits](a6e26b2231...b9c58b0450)

Updates `ossf/scorecard-action` from 2.3.3 to 2.4.0
- [Release notes](https://github.com/ossf/scorecard-action/releases)
- [Changelog](https://github.com/ossf/scorecard-action/blob/main/RELEASE.md)
- [Commits](dc50aa9510...62b2cac7ed)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions
- dependency-name: actions/upload-artifact
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions
- dependency-name: actions/download-artifact
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions
- dependency-name: docker/setup-qemu-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: actions
- dependency-name: docker/setup-buildx-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: actions
- dependency-name: docker/login-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: actions
- dependency-name: erlef/setup-beam
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions
- dependency-name: ossf/scorecard-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: actions
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-08-05 03:25:47 +00:00
dependabot[bot] bcb70a9fb9
chore(deps): bump the actions-package-macos group
Bumps the actions-package-macos group in /.github/actions/package-macos with 1 update: [actions/cache](https://github.com/actions/cache).


Updates `actions/cache` from 4.0.1 to 4.0.2
- [Release notes](https://github.com/actions/cache/releases)
- [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md)
- [Commits](ab5e6d0c87...0c45773b62)

---
updated-dependencies:
- dependency-name: actions/cache
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions-package-macos
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-08-05 03:17:26 +00:00
JimMoen 09ec31908b
Merge pull request #13357 from JimMoen/fix-utf8-frame-error-connack
Stop returning `CONNACK` or `DISCONNECT` to clients that sent malformed CONNECT packets.

- Only send `CONNACK` with reason code `frame_too_large` for MQTT-v5.0 when connecting if the protocol version field in CONNECT can be detected.
- Otherwise **DONOT** send any CONNACK or DISCONNECT packet.
2024-08-02 15:24:30 +08:00
lafirest b94ec4014f
Merge pull request #13563 from lafirest/fix/payload_encode
fix(log): respect payload encoding settings when formatting packets
2024-08-02 14:38:12 +08:00
firest 74c346f9d1 fix(log): respect payload encoding settings when formatting packets 2024-08-02 12:41:30 +08:00
zhongwencool 8a33ef8576
Merge pull request #13562 from zhongwencool/fix-deactivate-alarm
fix: deactivate alarm before create resource
2024-08-02 12:08:27 +08:00
zhongwencool 6c2033ecbf fix: deactivate alarm before create resource 2024-08-02 11:03:59 +08:00
zmstone 51530588ef ci: fix a typo in commented out docker-compose yaml file 2024-08-01 22:41:42 +02:00
Thales Macedo Garitezi bba9d085d6 test: refactor test structure 2024-08-01 16:03:04 -03:00
Thales Macedo Garitezi 3162fe7a27 feat: prettify some error explanations 2024-08-01 15:31:00 -03:00
Thales Macedo Garitezi 52b2d73b28 test: move new test to newer module and use current apis 2024-08-01 15:13:25 -03:00
Thales Macedo Garitezi 44e7f2e9b2 refactor: use macros for status to avoid typos 2024-08-01 14:49:43 -03:00
Thales Macedo Garitezi baf2b96cbc test: refactor test structure 2024-08-01 14:27:25 -03:00
Kjell Winblad ba2d4f3df3 docs: add change log entry 2024-08-01 14:21:27 -03:00
Kjell Winblad 11aaa7b07d fix: make MQTT connector error log messages easier to understand
Fixes:
https://emqx.atlassian.net/browse/EMQX-12555
https://emqx.atlassian.net/browse/EMQX-12657
2024-08-01 14:21:26 -03:00
Thales Macedo Garitezi 4250d01363
Merge pull request #13546 from thalesmg/20240730-r58-pulsar-query-mode
feat: expose `resource_opts.query_mode` for pulsar action
2024-08-01 14:19:16 -03:00
Thales Macedo Garitezi 86853ac6ef
Merge pull request #13545 from thalesmg/20240730-m-connector-jwt-app
refactor: move JWT worker and helpers to separate app
2024-08-01 13:06:27 -03:00
Andrew Mayorov 810a4d3cf9
test(dsbackend): add shared tests for atomic batches + preconditions 2024-08-01 14:26:45 +02:00
Andrew Mayorov 7b243ef7ad
feat(ds): support operations + preconditions in skipstream-lts 2024-08-01 14:26:45 +02:00
Andrew Mayorov fcf76d28ba
feat(dsraft): support atomic batches + preconditions 2024-08-01 14:26:45 +02:00
Andrew Mayorov 3b5d98c1d9
feat(ds): adopt buffer interface to `emqx_ds:operation()` 2024-08-01 14:26:45 +02:00
Andrew Mayorov 451b03ff99
feat(ds): add generic preconditions implementation 2024-08-01 14:26:45 +02:00
JimMoen f792418a68
Merge pull request #13552 from JimMoen/fix-plugin-app-takes-too-long
fix: add a startup timeout limit for the plugin application
2024-08-01 16:46:09 +08:00
JimMoen 4915cc0da6
chore: add changelog entry for 13357 2024-08-01 15:23:58 +08:00
JimMoen 15b3f4deb0
fix: rm unused func and exports 2024-08-01 15:00:24 +08:00
JimMoen 7a251c9ead
test: handle frame error for CONNECT packets 2024-08-01 10:26:31 +08:00
JimMoen 37a89d0094
fix: enrich parse_state and connection serialize opts 2024-08-01 10:26:31 +08:00
JimMoen c313aa89f0
fix: try throw proto_ver and proto_name when parsing CONNECT packet 2024-08-01 10:26:31 +08:00
JimMoen 6db1c0a446
refactor: separate function to handle `frame_error` 2024-08-01 10:26:31 +08:00
JimMoen d4508a4f1d
chore: sync master `elvis.config` 2024-08-01 10:26:31 +08:00
Thales Macedo Garitezi a6a9538e73 refactor: move JWT worker and helpers to separate app
Some bridge applications might need to use JWTs before the `emqx_connector` is started, so
we must move JWT table initialization to a separate dependency application.
2024-07-31 14:52:12 -03:00
Thales Macedo Garitezi 9f97bff7d0 feat: expose `resource_opts.query_mode` for pulsar action
Fixes https://emqx.atlassian.net/browse/EMQX-12782
2024-07-31 11:13:11 -03:00
Ivan Dyachkov 577f1a7d8a
Merge pull request #13553 from id/20240731-ci-fix-docker-build
ci: fix docker images build
2024-07-31 16:04:47 +02:00
Ivan Dyachkov e42021d314
Merge pull request #13554 from id/20240731-sync-release-57
sync release-57
2024-07-31 15:48:37 +02:00
Thales Macedo Garitezi 08c58cc319
Merge pull request #13543 from thalesmg/20240730-r57-sr-delete-protobuf-cache
fix(schema registry): clear protobuf code cache when deleting/updating serdes
2024-07-31 10:16:48 -03:00
Thales Macedo Garitezi 150fee87f1
Merge pull request #13541 from thalesmg/20240730-r57-unset-crl-check-listener
fix(crl): force remove CRL fields from SSL opts after listener update
2024-07-31 10:16:35 -03:00
ieQu1 6058b50c91
Merge pull request #13555 from ieQu1/ds-rest-404
fix(mgmt): Return 404 for /ds/ API endpoints when DS is disabled
2024-07-31 14:57:17 +02:00
Thales Macedo Garitezi 85cff5e7eb fix: merge conflicts 2024-07-31 09:14:29 -03:00
ieQu1 569f48f5a1
fix(mgmt): Return 404 for /ds/ API endpoints when DS is disabled 2024-07-31 13:44:38 +02:00
ieQu1 2cf86e76ee
Merge pull request #13551 from ieQu1/EMQX-12587
fix(sessds): Expose durable sessions in the config API
2024-07-31 12:00:26 +02:00
Ivan Dyachkov 74cef7937d Merge remote-tracking branch 'upstream/release-57' into 20240731-sync-release-57 2024-07-31 11:31:29 +02:00
JimMoen c658cfe269
fix: make static_check happy 2024-07-31 17:17:13 +08:00
JimMoen a246551914
fix: add a startup timeout limit for the plugin application 2024-07-31 17:17:11 +08:00
JimMoen b1c8bc2421
Merge pull request #13548 from JimMoen/feat-plugin-on-config-changed-callback
feat: call plugin's app module `on_config_changed/2` callback
2024-07-31 16:40:48 +08:00
ieQu1 200b5ab294
Merge pull request #13550 from ieQu1/no-ra-dependency-on-oss
chore(emqx): Remove ra from the list of EMQX dependencies
2024-07-31 10:30:44 +02:00
Ivan Dyachkov 8d8ff6cf5d ci: fix docker images build
/etc/docker/daemon.json requires root for read access
2024-07-31 10:27:04 +02:00
ieQu1 a23b8266b1
fix(sessds): Expose durable sessions in the config API 2024-07-31 10:18:38 +02:00
ieQu1 d69342a2fc
chore(emqx): Remove ra from the list of EMQX dependencies 2024-07-31 09:56:28 +02:00
JimMoen e6bfc14cc9
fix: try-catch optional `on_config_changed/2` plugin app callback 2024-07-31 10:09:02 +08:00
JimMoen 3d1f0c756c
feat: call plugin's app module `on_config_changed/2` callback
assume the module: `[PluginName]_app`
2024-07-31 10:09:02 +08:00
Thales Macedo Garitezi 83041a8b83
Merge pull request #13544 from thalesmg/20240730-m-test-flaky-client-v2
test(clients v2 api): attempt to reduce flakiness
2024-07-30 16:07:37 -03:00
Thales Macedo Garitezi 1c4402b12c test(clients v2 api): attempt to reduce flakiness
https://github.com/emqx/emqx/actions/runs/10161391242/job/28101183920#step:6:331
2024-07-30 14:07:08 -03:00
Thales Macedo Garitezi ebb69f4ebf fix(crl): force remove crl fields from SSL opts after listener update
Fixes https://emqx.atlassian.net/browse/EMQX-12785
2024-07-30 14:00:24 -03:00
Thales Macedo Garitezi fd961f9da7 fix(schema registry): clear protobuf code cache when deleting/updating serde
Fixes https://emqx.atlassian.net/browse/EMQX-12789
2024-07-30 13:52:34 -03:00
Ilia Averianov 359bc38aa4
Merge pull request #13407 from savonarola/0701-shared-sub
Implement shared subscriptions
2024-07-30 16:12:13 +03:00
Ilya Averyanov 08f70e4a25 feat(queue): move ds shared sub dependent test to emqx_ds_shared_sub app 2024-07-30 14:19:39 +03:00
Ilya Averyanov e408804efb feat(queue): fix dialyzer issues 2024-07-30 13:01:48 +03:00
Ilya Averyanov e294d35703 feat(queue): add schema descriptions 2024-07-30 13:01:48 +03:00
Ilya Averyanov 303ff95e10 feat(queue): add stub for CRUD API 2024-07-30 13:01:48 +03:00
Ilya Averyanov 23f0e88b45 feat(queue): add integration with external broker 2024-07-30 13:01:46 +03:00
Ilya Averyanov f0dd1bc4f4 feat(queue): add shared sub support to the management API 2024-07-30 13:01:20 +03:00
Ilya Averyanov 9b30320ddb feat(queue): simplify progress report on disconnect 2024-07-30 13:01:20 +03:00
Ilya Averyanov cae27293a5 feat(queue): move route registration to sessions 2024-07-30 13:01:19 +03:00
Ilya Averyanov 81f4103d60 feat(queue): avoid cyclic dependencies 2024-07-30 13:01:19 +03:00
Ilya Averyanov bab526be24 feat(queue): self-revoke all shared streams on session open 2024-07-30 13:01:19 +03:00
Ilya Averyanov 9307a82004 feat(queue): rearrange leader's code 2024-07-30 13:01:19 +03:00
Ilya Averyanov b8e8f7c8e0 feat(queue): add pre_renew_streams callback 2024-07-30 13:01:18 +03:00
Ilya Averyanov a97a0d6400 feat(queue): fix dialyzer issues 2024-07-30 13:01:18 +03:00
Ilya Averyanov 8705956cdc feat(queue): update docs 2024-07-30 13:01:18 +03:00
Ilya Averyanov f213569460 feat(queue): clarify naming; identify shared subs by full topic filter 2024-07-30 13:01:18 +03:00
Ilya Averyanov 7e23f8d19f feat(queue): fix include 2024-07-30 13:01:17 +03:00
Ilya Averyanov a676ede6b8 feat(queue): improve logging 2024-07-30 13:01:17 +03:00
Ilya Averyanov 9e5e7a23c5 feat(queue): remove unnecessary acked flag 2024-07-30 13:01:17 +03:00
Ilya Averyanov 143086b0ef feat(queue): replace invalid rewing algorithm with skipping iterator 2024-07-30 13:01:16 +03:00
Ilya Averyanov c569625dd1 feat(queue): handle partially unacked ranges 2024-07-30 13:01:16 +03:00
Ilya Averyanov 7daab1ab23 feat(queue): move replay progress to a separate data structure 2024-07-30 13:01:16 +03:00
Ilya Averyanov 077ee38530 feat(queue): add config 2024-07-30 13:01:15 +03:00
Ilya Averyanov b74189570d feat(queue): do not use ee app from emqx app 2024-07-30 13:01:15 +03:00
Ilya Averyanov 649cf88042 feat(queue): kick agents that do not return to the replaying state for long 2024-07-30 13:01:15 +03:00
Ilya Averyanov 1496f7f778 feat(queue): add leader_rank_progress test 2024-07-30 13:01:15 +03:00
Ilya Averyanov 91dd1183ad feat(queue): fix dialyzer issues 2024-07-30 13:01:14 +03:00
Ilya Averyanov 65ab81ff74 feat(queue): fix quick resubscription 2024-07-30 13:01:14 +03:00
Ilya Averyanov 53d4cd3174 feat(queue): rename leader' stream_progresses to stream_states 2024-07-30 13:01:14 +03:00
Ilya Averyanov 7d004b37da feat(queue): implement stream finalization 2024-07-30 13:01:13 +03:00
Ilya Averyanov e5547005eb feat(queue): implement resubscribe test 2024-07-30 13:01:13 +03:00
Ilya Averyanov fada2a3fea feat(queue): reorganize and document shared subs module 2024-07-30 13:01:13 +03:00
Ilya Averyanov b4a010d63b feat(queue): implement unsubscribe 2024-07-30 13:01:13 +03:00
Ilya Averyanov 9bde981c44 feat(queue): fix static check issues 2024-07-30 13:01:12 +03:00
Ilya Averyanov 7658e081c5 feat(queue): move design docs to the EIP 2024-07-30 13:01:12 +03:00
Ilya Averyanov 8dce530d15 feat(queue): fix progress reporting and more tests
We test reassignment during the intensive replay
2024-07-30 13:01:12 +03:00
Ilya Averyanov a20d262327 feat(queue): send progress before fetching new messages 2024-07-30 13:01:11 +03:00
Ilya Averyanov d32f282feb feat(queue): add graceful disconnect 2024-07-30 13:01:11 +03:00
Ilya Averyanov 1d728a05b2 feat(queue): send metadata with agent when connecting to leader
It will be used to attach agent taints to improve stream assignment.
2024-07-30 13:01:11 +03:00
Ilya Averyanov 49bff5c08a feat(queue): wrap remote calls in a proto 2024-07-30 13:01:10 +03:00
Ilya Averyanov 61eda0ff31 feat(queue): identify agents by SessionId in tests 2024-07-30 13:01:10 +03:00
Ilya Averyanov 8f0d807c00 feat(queue): add new test scenarios 2024-07-30 13:01:10 +03:00
Ilya Averyanov bceb5d43ed feat(queue): fix stream rebalancing issues, update tests 2024-07-30 13:01:10 +03:00
Ilya Averyanov 03fea34962 feat(queue): document protocol between agent and leader
Document leader's states
2024-07-30 13:01:09 +03:00
Ilya Averyanov 082514f557 feat(queue): implement full protocol between agent and leader 2024-07-30 13:01:09 +03:00
Ilya Averyanov c831f0772f feat(queue): handle renew_lease_timeout 2024-07-30 13:01:09 +03:00
Ivan Dyachkov ca455ad992
Merge pull request #13532 from emqx/sync-release-57-20240729-021938
Sync release-57
2024-07-30 10:49:09 +02:00
JianBo He c347c2c285
Merge pull request #13540 from zmstone/0729-rule-funciton-getenv-should-be-limited-to-vars-with-prefix-EMQXVAR_
refactor: force getenv to access only OS env with prefix EMQXVAR_
2024-07-30 08:23:24 +08:00
zmstone a49cd78aae refactor: force getenv to access only OS env with prefix EMQXVAR_ 2024-07-29 23:54:00 +02:00
zmstone 4065158be7
Merge pull request #13534 from JimMoen/feat-add-superuser-skip-authz
feat: add authz skipped trace for superuser
2024-07-29 22:30:13 +02:00
ieQu1 18721d05bc
Merge pull request #13526 from ieQu1/replicant-ee
fix(mria): Reserve replicant role for EE only
2024-07-29 22:10:06 +02:00
zmstone 7f7d0741d2
Merge pull request #13528 from zmstone/0726-unrecoverable-error-limit
0726 unrecoverable error limit
2024-07-29 21:32:14 +02:00
zmstone 2e39c4ad5e
Merge pull request #13495 from thalesmg/20240719-m-hide-enable-fields-mkII
chore: hide enable flags from schema and config examples
2024-07-29 21:31:20 +02:00
zmstone 5b50d5433a
Merge pull request #13537 from thalesmg/20240729-r57-auto-decode-payload-kprodu
feat: attempt to automatically decode `payload` similar to key and message templates
2024-07-29 21:03:41 +02:00
zmstone eab440e0c1 docs: add changelog for PR 13528 2024-07-29 20:41:57 +02:00
zmstone e08425e67d refactor(log-throttler): remove unnecessary code
there is no need to reset counters before erasing
2024-07-29 20:41:27 +02:00
zhongwencool f6f1d32da0 feat: throttle with resource_id 2024-07-29 20:41:27 +02:00
zhongwencool 2924ec582a feat: add unrecoverable_resource_error throttle 2024-07-29 20:41:27 +02:00
zhongwencool 8dc1d1424a chore: add resource tag for log 2024-07-29 20:41:27 +02:00
Thales Macedo Garitezi 693d5dd394 feat: attempt to automatically decode `payload` similar to key and message templates 2024-07-29 13:01:06 -03:00
ieQu1 f85db0a0e9
fix: Apply suggestions from code review
Co-authored-by: Thales Macedo Garitezi <thalesmg@gmail.com>
2024-07-29 17:22:41 +02:00
lafirest 60aefd1065
Merge pull request #13520 from lafirest/feat/scram-rest-acl
feat(scram): supports ACL rules in `scram_restapi` backend
2024-07-29 22:46:50 +08:00
JianBo He c637422302
Merge pull request #13518 from thalesmg/20240724-r57-dynamic-kprodu-action-mkIII
feat(kafka producer): allow dynamic topics (mkIII)
2024-07-29 22:43:20 +08:00
Thales Macedo Garitezi e80d43d14d test(fix): use ebin path without plugins
Without the filtering that already exists in cth:ebin_path, the rebar3 plugins path may
take priority over normal dependencies.  Since we just updated hocon, and there seems to
be an older hocon among the rebar3 plugins, it started to break the test because older
hocon was getting loaded in the peer.
2024-07-29 09:50:25 -03:00
Thales Macedo Garitezi b3074144cc chore: temporarily revert `NO_DOC` changes to fields with default value = false
These will be dealt with in follow up PRs, by allowing the parent struct to be set to a
special `disabled` value in such cases.
2024-07-29 09:50:25 -03:00
Thales Macedo Garitezi 6786c9b517
refactor: improve descriptions and identifiers
Co-authored-by: zmstone <zmstone@gmail.com>
2024-07-29 09:45:52 -03:00
Thales Macedo Garitezi 8913de10c0
Merge pull request #13527 from thalesmg/20240726-r57-multiple-froms-sql-test
fix(rule engine tester): fix message publish with bridge source in from clause
2024-07-29 09:37:17 -03:00
JimMoen 5ddd7d7a6a
test: superuser skipped all authz check 2024-07-29 17:27:36 +08:00
JimMoen d7cac74bed
feat: add authz skipped trace 2024-07-29 17:27:36 +08:00
JianBo He 0b0a28ae44
chore: update changes/ee/feat-13504.en.md
Co-authored-by: zmstone <zmstone@gmail.com>
2024-07-29 10:45:24 +08:00
id c1e2801f41 Merge remote-tracking branch 'origin/release-57' into sync-release-57-20240729-021938 2024-07-29 02:19:38 +00:00
ieQu1 8036baf22c
test(paho): Run RLOG paho test with replicants only on EE 2024-07-26 21:53:32 +02:00
ieQu1 268f887700
test(mgmt): Disable certain tests on OSS 2024-07-26 20:24:53 +02:00
Thales Macedo Garitezi 1d56ac6e5e refactor: change topic schema type 2024-07-26 14:26:21 -03:00
Thales Macedo Garitezi 4e0742c66f feat: make kafka producer freely dynamic 2024-07-26 14:25:20 -03:00
ieQu1 8c1302f455
test(conf_app): Remove redundand config 2024-07-26 17:25:09 +02:00
ieQu1 b8a2a8ea18
test(router): Skip certain tests on OSS 2024-07-26 17:25:09 +02:00
ieQu1 b7c424a13d
test(persmsg): Remove redundant config 2024-07-26 17:17:06 +02:00
ieQu1 1b6494ab9a
test(mgmt): Remove redundant config 2024-07-26 17:17:06 +02:00
ieQu1 41bf5cd6ca
test(otel): Remove redundant config 2024-07-26 17:17:06 +02:00
ieQu1 548bcceab7
test(auth): Remove redundant config 2024-07-26 17:17:06 +02:00
ieQu1 1beda1cd11
test(mria): Remove role from the example config 2024-07-26 17:17:06 +02:00
ieQu1 9da744c423
fix(mria): Reserve replicant role for EE only 2024-07-26 17:17:06 +02:00
Thales Macedo Garitezi 3fae704903 fix(rule engine tester): fix message publish with bridge source in from clause
Fixes https://emqx.atlassian.net/browse/EMQX-12762
2024-07-26 09:27:16 -03:00
lafirest 2d6b2bff8e
Merge pull request #13524 from lafirest/feat/exclusive-cli
feat(exclusive): added CLI interface for exclusive topics
2024-07-26 19:54:03 +08:00
firest dc342a35ac chore: update changes 2024-07-26 17:18:52 +08:00
firest 397c104a85 feat(exclusive): added CLI interface for exclusive topics 2024-07-26 16:56:47 +08:00
firest 7bf70aaab6 feat(scram): supports ACL rules in `scram_restapi` backend 2024-07-26 14:30:28 +08:00
Thales Macedo Garitezi df1f4fad70 feat(kafka producer): allow dynamic topics from pre-configured topics
Fixes https://emqx.atlassian.net/browse/EMQX-12656
2024-07-25 15:33:12 -03:00
Thales Macedo Garitezi 39b8cb1789
Merge pull request #13487 from thalesmg/20240715-m-refactor-cluster-link-api
feat(cluster link): refactor http api, add status and metrics
2024-07-25 14:51:36 -03:00
Thales Macedo Garitezi 33eccb35da chore: update wolff -> 3.0.2 2024-07-25 14:30:50 -03:00
Thales Macedo Garitezi 03821c7b49 fix(cluster link metrics): route count metric is cluster-wide 2024-07-25 13:12:08 -03:00
Thales Macedo Garitezi 6da71200f3 refactor: improve bookkeeping api 2024-07-25 13:12:08 -03:00
Thales Macedo Garitezi 6dbf015c93 refactor: demote hidden config to hardcoded value 2024-07-25 13:12:08 -03:00
Thales Macedo Garitezi 30259284d1 chore: namespace metrics by type 2024-07-25 13:12:08 -03:00
Thales Macedo Garitezi 87e4e2340d refactor: better metric and error fold 2024-07-25 13:12:08 -03:00
firest 141d8144e4 fix(scram): change the name from `scram_http` to `scram_restapi` 2024-07-25 17:01:49 +08:00
Thales Macedo Garitezi b283a8c1ff
Merge pull request #13505 from thalesmg/20240722-m-rule-conn-deps-part-2
feat(rule engine api): add filters options for action and source ids
2024-07-24 16:52:47 -03:00
Thales Macedo Garitezi dda73651c5 fix(cluster link metrics): use periodic full table scan and gauge to count routes 2024-07-24 16:46:04 -03:00
Ivan Dyachkov c31e28153f
Merge pull request #13513 from id/20240724-sync-release-57
sync release-57
2024-07-24 20:12:06 +02:00
Thales Macedo Garitezi 7829838dc5 feat(cluster link api): add forwarding resource metrics to response 2024-07-24 14:53:57 -03:00
Thales Macedo Garitezi 80e035f115 feat(rule engine api): add filters options for action and source ids
Fixes https://emqx.atlassian.net/browse/EMQX-12654 (requirement 2)
2024-07-24 13:32:50 -03:00
Thales Macedo Garitezi 34f5a886ce refactor(cluster link api): return erpc errors in status and metrics responses 2024-07-24 12:07:34 -03:00
Thales Macedo Garitezi 79db2e6d7f test: fix flaky test 2024-07-24 11:17:00 -03:00
Thales Macedo Garitezi 3e4eeddb78 fix: add missing `resource_type` callback implementations 2024-07-24 10:53:33 -03:00
Thales Macedo Garitezi d2da311416 fix(resource): create undocumented callback
Created by https://github.com/emqx/emqx/pull/13449 but not added as a callback.
2024-07-24 10:53:33 -03:00
Thales Macedo Garitezi 76e51fa532 fix: correctly use maybe match clause 2024-07-24 10:17:45 -03:00
Thales Macedo Garitezi 82bb876de0
docs: improve descriptions
Co-authored-by: Andrew Mayorov <encube.ul@gmail.com>
2024-07-24 10:15:01 -03:00
Thales Macedo Garitezi 2d507146ab refactor: change style of case clause 2024-07-24 10:13:48 -03:00
Thales Macedo Garitezi 216a6abed9 refactor: rename CRUD functions 2024-07-24 10:11:03 -03:00
Thales Macedo Garitezi ca2d4ad2a0 refactor: move metrics logic to separate module 2024-07-24 10:04:27 -03:00
Thales Macedo Garitezi 311419f621
Merge pull request #13489 from thalesmg/20240718-m-init-debug
feat(bin/emqx): add `-init_debug` system arg when `DEBUG=2`
2024-07-24 09:16:12 -03:00
Thales Macedo Garitezi 9a950571d8
Merge pull request #13492 from thalesmg/20240718-m-rules-conn-deps
feat: return dependent entities in connectors/actions/sources API
2024-07-24 09:16:00 -03:00
ieQu1 d1edf8aad2
Merge pull request #13514 from ieQu1/skip-streams-improvement
fix(ds): Improve logic of skipstream LTS layout
2024-07-24 13:28:44 +02:00
ieQu1 b010efb647
fix(ds): Improve logic of skipstream LTS layout
Iterators:
Previously it used timestamp of the next message as a reference. This
won't work well for the upcoming beamformer/beamsplitter feature. This
commit changes the logic so iterators store timestamp of the last seen
message.

Cooked batches:
Cooked batches no longer store index entries. Creation of indexes has
been delegated to commit callback.
2024-07-24 10:32:06 +02:00
Ivan Dyachkov 606d829256 Merge remote-tracking branch 'upstream/release-57' into 20240724-sync-release-57 2024-07-24 10:28:00 +02:00
Thales Macedo Garitezi 9c0f1df8a3
Merge pull request #13506 from thalesmg/20240722-m-peername-sys-events
feat: add `peername` to rule events that already have `peerhost`
2024-07-23 09:38:57 -03:00
Xinyu Liu 7bb7b10a31
Merge pull request #13114 from emqx/emqx-relup-gen
feat: generate relup tarball, add relup APIs
2024-07-23 15:00:52 +08:00
Shawn 439abe430b refactor: remove relup revert callback functions 2024-07-23 11:45:55 +08:00
Shawn eb71477f43 chore: move relup_info to rel/relup 2024-07-23 09:32:54 +08:00
Thales Macedo Garitezi 99e6613713 test(rule events): add test cases for `schema.validation_failed` and `message.transformation_failed` events 2024-07-22 16:31:48 -03:00
Thales Macedo Garitezi d9832252d8 refactor: add namespace to avoid clashes with operations or other resources 2024-07-22 16:04:19 -03:00
Thales Macedo Garitezi 6a5849488c feat(cluster link): add metrics
Fixes https://emqx.atlassian.net/browse/EMQX-12627
2024-07-22 16:04:19 -03:00
Thales Macedo Garitezi 07cb147d38 fix(cluster link schema): username is not required 2024-07-22 16:04:19 -03:00
Thales Macedo Garitezi ba3cbe02e3 feat(cluster link api): add status to responses
Fixes https://emqx.atlassian.net/browse/EMQX-12627
2024-07-22 16:04:19 -03:00
Thales Macedo Garitezi 0b1f0db73c chore(cluster link): refactor HTTP API for CRUD operations
Fixes https://emqx.atlassian.net/browse/EMQX-12627
2024-07-22 16:04:19 -03:00
Thales Macedo Garitezi 7ca5205f3f feat: add `peername` to rule events that already have `peerhost`
Fixes https://emqx.atlassian.net/browse/EMQX-12342
2024-07-22 16:01:30 -03:00
Ivan Dyachkov d1c218303d
Merge pull request #13498 from emqx/sync-release-57-20240722-022026
Sync release-57
2024-07-22 19:30:10 +02:00
Thales Macedo Garitezi d7112921a6 docs: remove `enable` from config examples
Fixes https://emqx.atlassian.net/browse/EMQX-12730
2024-07-22 13:26:53 -03:00
Thales Macedo Garitezi 69f5b6fa6c chore: hide `enable` fields from docgen
Fixes https://emqx.atlassian.net/browse/EMQX-12730
2024-07-22 13:26:53 -03:00
Thales Macedo Garitezi 220fbe8a0a test: fix flaky test 2024-07-22 09:44:51 -03:00
Shawn 862336a2cb feat: hide relup plugins from APIs and CLIs 2024-07-22 16:07:50 +08:00
id ed2fab51e9 Merge remote-tracking branch 'origin/release-57' into sync-release-57-20240722-022026 2024-07-22 02:20:27 +00:00
Thales Macedo Garitezi 65544f34ec chore: bump hocon -> 0.43.2 2024-07-19 17:25:18 -03:00
Thales Macedo Garitezi 8d535bbd24
Merge pull request #13464 from thalesmg/20240712-m-res-manager-shutdown-logs
chore: attempt to reduce race condition supervisor noproc shutdown error logs
2024-07-19 14:57:56 -03:00
Thales Macedo Garitezi d7e72808a8 docs: add changelog 2024-07-19 14:43:55 -03:00
Thales Macedo Garitezi 4d174b8678 feat(sources & actions api): add dependent rules to response
Fixes https://emqx.atlassian.net/browse/EMQX-12654
2024-07-19 14:43:55 -03:00
Thales Macedo Garitezi b5231c29e3 feat(bin/emqx): add `-init_debug` system arg when `DEBUG=2` 2024-07-19 12:32:37 -03:00
Thales Macedo Garitezi eb2d3a3b7e chore: attempt to reduce race condition supervisor shutdown errors
Fixes https://emqx.atlassian.net/browse/EMQX-12442

e.g.:
```
2024-05-23T08:52:39.811845+00:00 [error] Supervisor: {local,emqx_resource_manager_sup}. Context: shutdown_error. Reason: noproc. Offender: id=<<99, 101, 110, 115, 111, 114, 101, 100>>,pid=<0.7752.1030>.
```

It could be just a race condition, as it seems to be the case for resource manager: i) a call is made to the process to stop it; ii) the call times out; iii) the after clause ends up calling supervisor:terminate_child; iv) while the supervisor is finding the child to terminate, the process actually finishes terminating, and the supervisor receives a noproc reason back.
2024-07-19 10:57:00 -03:00
Thales Macedo Garitezi ae828e8cfb feat(connectors api): add dependent actions and sources to response
Fixes https://emqx.atlassian.net/browse/EMQX-12654
2024-07-19 10:33:48 -03:00
Thales Macedo Garitezi 464e202742
Merge pull request #13488 from thalesmg/20240718-m-mix-fix-machine-dep
chore(new mix build): fix app dependency for release
2024-07-19 09:21:02 -03:00
Andrew Mayorov b7200656a5
Merge pull request #13486 from keynslug/fix/ci/ds-raft-flaky-next
test(dsraft): attempt to stabilize flaky testcases
2024-07-19 12:15:36 +02:00
Shawn fc3405fe4c fix: bp_api for relup 2024-07-19 17:35:36 +08:00
Shawn f11dfce292 ci: suppress dialyzer checks for quicer and odbc types 2024-07-19 17:11:49 +08:00
Shawn c61828460a chore: emqx_utils_api:with_node/2 support simple http-code 2024-07-19 15:02:14 +08:00
Shawn 4d25f28bb2 fix: dialyzer checks 2024-07-19 14:19:50 +08:00
Shawn 5c2a7dfdfa fix: rename relup dir to relup_info to avoid tgz failure 2024-07-19 12:17:12 +08:00
Shawn 3ad7dc262b fix: some sanity-checks 2024-07-19 11:38:44 +08:00
Shawn 3c8ef35b18 fix: show relup status even if no packages installed 2024-07-19 11:20:37 +08:00
Thales Macedo Garitezi 01883e9759 chore(new mix build): fix app dependency for release 2024-07-18 11:33:02 -03:00
Shawn 79b65a28c1 chore: use emqx-relup 0.1.0 2024-07-18 18:54:10 +08:00
Andrew Mayorov 3a893626b8
Merge pull request #13474 from keynslug/ft/EMQX-12309/ds-cas-api
feat(ds): allow isolated batches with preconditions
2024-07-18 12:52:13 +02:00
Shawn 2008130071 feat: add HTTP APIs for relup 2024-07-18 18:48:38 +08:00
Shawn c6b02bc13f feat: support starting emqx from relup dir
We put all of the unpacked files into `relup` dir, and warn the user if boot from it
2024-07-18 18:47:27 +08:00
Ivan Dyachkov 4a04ffdca1
Merge pull request #13483 from id/20240717-sync-release-57
sync release-57
2024-07-18 10:41:49 +02:00
Ivan Dyachkov c2d49ff34f chore(rmq_tests): fix rabbitmq tests
Co-authored-by: Ilya Averyanov <av@rubybox.dev>
2024-07-18 08:34:00 +02:00
zhongwencool ac52bf39ce
Merge pull request #13443 from zhongwencool/cluster-link-cli-load
fix: update cluster.links via cli
2024-07-18 09:13:59 +08:00
Andrew Mayorov 0e545ffcec
feat(ds): add dedicated `#message_matcher{}` for preconditions 2024-07-17 21:27:17 +02:00
Andrew Mayorov 2e89656a90
test(dsraft): start `t_replication_transfers_snapshots` from stable state 2024-07-17 20:01:55 +02:00
Andrew Mayorov 466fa41ec3
fix(dsraft): rely on last resort timeout with unresponsive replicas
This simplifies the shard transition scheduling logic and makes it less
prone to races.
2024-07-17 19:24:38 +02:00
Thales Macedo Garitezi 4edbcc55e7
Merge pull request #13463 from thalesmg/20240712-m-gprodu-backoff-retry
feat(gcp pubsub producer): retry on 502 and 503 http status code responses
2024-07-17 12:49:27 -03:00
Andrew Mayorov 0c05b3f019
fix(ds): make conditionals less confusing 2024-07-17 16:23:41 +02:00
Andrew Mayorov 78fe9304be
Merge pull request #13462 from keynslug/fix/ci/flaky-ds-raft
fix(dsraft): preserve pending replica set transitions
2024-07-17 16:11:59 +02:00
Andrew Mayorov 14022aded1
feat(ds): allow isolated batches with preconditions
Namely, single message deletions and preconditions that can be used to
build complex "compare-and-swap"-style operations. Also allow user to
declare that atomic batches support is needed for a DB.
2024-07-17 15:57:17 +02:00
Andrew Mayorov 02e1007a16
feat(dslocal): implement `force_monotonic_timestamps => false` 2024-07-17 15:49:50 +02:00
zhongwencool 3381eecd6f chore: apply code review 2024-07-17 21:27:07 +08:00
zhongwencool 3c832db13d test: test cluster.links reloaded 2024-07-17 21:06:48 +08:00
zhongwencool 937fb153c2 fix: fill_default/3 should populate default values for all parameters except the 'ds' 2024-07-17 21:06:48 +08:00
zhongwencool 7b6b9580c8 test: add test for updating cluster.links 2024-07-17 21:06:48 +08:00
zhongwencool 2783192f77 fix: update cluster.links via cli 2024-07-17 21:06:48 +08:00
zhongwencool 083537daa3 fix: retry not_found if conf file not exist 2024-07-17 21:06:48 +08:00
Andrew Mayorov ae3812da85
feat(ds): allow to turn monotonic timestamps off for DB
That tells implementation how to assign timestamps to messages. Current
implicit default is now `force_monotonic_timestamps => true`.
2024-07-17 14:40:23 +02:00
Andrew Mayorov 6b130c6422
fix(dsraft): preserve pending replica set transitions
Otherwise, information about pending replica set transitions taking a
long time to complete could be lost on subsequent target set changes and
node crashes.
2024-07-17 12:17:07 +02:00
Ivan Dyachkov 292b331064 Merge remote-tracking branch 'upstream/release-57' into 20240717-sync-release-57 2024-07-17 11:29:25 +02:00
Thales Macedo Garitezi 1ad02a11e2
Merge pull request #13455 from thalesmg/20240711-m-mix-umbrella-part-III-no-ci
sync new mix build work to master
2024-07-16 14:41:10 -03:00
Andrew Mayorov d04915d6a6
test(dsraft): increase `ra_server` logging level for debugging 2024-07-16 15:54:49 +02:00
Andrew Mayorov 78bb102311
test(dsraft): attempt to start select testcases from stable state 2024-07-16 15:54:49 +02:00
ieQu1 706cab3c86
Merge pull request #13467 from ieQu1/dev/optimize-connection-process_msg
fix(connection): Make process_msg function tail-recursive
2024-07-15 17:00:50 +02:00
Thales Macedo Garitezi 4a08bfc93f feat(mix ct): improve failure logging 2024-07-15 09:26:04 -03:00
Thales Macedo Garitezi 0555a8ec61 fix(mix): bizarre compilation order bug with `emqx` profile
For some bizarre reason, if the `:apps` key is defined in the `project()` callback in the
root umbrella `mix.exs`, it messes up the compilation order that mix follows when
compiling the project from scratch.

Specifically, in the `emqx` profile, even though `:emqx_utils` is an explicit dependency
of `:emqx_ds_builtin_local`, mix insisted in compiling the latter before the former, and
failing, obviously.  Removing the explicit `:apps` from the project definition solved
this.

🫠
2024-07-15 09:26:04 -03:00
Thales Macedo Garitezi 02a0ccfdd1 ci: preparations for new mix build 2024-07-15 09:26:04 -03:00
Thales Macedo Garitezi 9a003ee3cf feat(mix eunit): add support for filtering test cases 2024-07-15 09:26:04 -03:00
Thales Macedo Garitezi bbd51bdf18 feat(mix ct): add support for specifying group paths 2024-07-15 09:26:04 -03:00
Thales Macedo Garitezi 39c82fbe89 feat(mix): always run merge-config before release 2024-07-15 09:26:04 -03:00
Thales Macedo Garitezi 70786d6aca test: fix suite apps 2024-07-15 09:26:04 -03:00
Thales Macedo Garitezi 066fd0481b feat(mix): compile asn1 files 2024-07-15 09:26:04 -03:00
ieQu1 46c2c75b7b
fix(connection): Make process_msg function tail-recursive 2024-07-14 06:00:00 +02:00
Thales Macedo Garitezi 0e57b39cf2 feat(gcp pubsub producer): retry on 502 and 503 http status code responses
Fixes https://emqx.atlassian.net/browse/EMQX-12625
2024-07-12 15:29:59 -03:00
Andrew Mayorov 2401a2fb80
test(dsraft): run `t_join_leave_errors` case in tracing context 2024-07-12 18:28:24 +02:00
Andrew Mayorov af81800aec
chore(dsraft): log a bit more informative messages in shard allocator 2024-07-12 18:24:58 +02:00
Andrew Mayorov 8e8b382ec0
chore(dsraft): provide more details when replica is unready 2024-07-12 18:23:23 +02:00
Andrew Mayorov 70a760850f
chore(dsraft): correct comment spelling errors 2024-07-12 15:27:29 +02:00
Andrew Mayorov 205ad507ea
test(dsraft): attempt to ensure testcases start from stable state
Where "stable state" is currently defined as "everyone knows and agrees
on the current leader".
2024-07-12 15:26:00 +02:00
Ivan Dyachkov ffa69df6f8
Merge pull request #13461 from id/20240712-ci-add-sync-release-branch-workflow
ci: add sync-release-branch workflow
2024-07-12 13:13:00 +02:00
Ivan Dyachkov e07d96e4d8 ci: add sync-release-branch workflow 2024-07-12 12:52:16 +02:00
Ilia Averianov 82e723bd18
Merge pull request #13459 from savonarola/0712-reduce-flackyness
chore(mgmt): reduce test flakyness
2024-07-12 13:14:14 +03:00
Ilya Averyanov 9ca8aeb155 chore(mgmt): reduce test flakyness 2024-07-12 12:10:49 +03:00
Thales Macedo Garitezi 21313c766d ci: add dialyzer mix task 2024-07-11 14:19:23 -03:00
Thales Macedo Garitezi f3c6d10f76 fix(mix): fix compile paths and deps 2024-07-11 14:19:23 -03:00
zmstone 7664b06e98
Merge pull request #13434 from zmstone/0704-refine-rpc-config
0704 refine rpc config
2024-07-11 10:25:45 +02:00
ieQu1 02ce7e1b07
Merge pull request #13446 from ieQu1/dev/ds-build-platform
chore(ds): Support platform profile
2024-07-10 13:26:01 +02:00
ieQu1 4825079964
chore(ds): Support platform profile 2024-07-10 12:03:23 +02:00
zmstone 917df38a07 docs: add changelog for PR 13434 2024-07-09 22:26:30 +02:00
zmstone 7a23ae7b4d refactor: expose only server_port for rpc
previously, there were 4 port configs:
- tcp_server_port
- ssl_server_port
- tcp_client_port
- ssl_client_port
2024-07-09 22:26:29 +02:00
zmstone ee13773496 refactor: rename rpc.tcp_client_num to rpc.client_num
tcp_client_num is kept as an alias
2024-07-09 22:26:29 +02:00
Thales Macedo Garitezi 48e604bda8 fix(mix grpc): include default erlc options 2024-07-09 15:56:30 -03:00
Thales Macedo Garitezi 818070ad44 test(mix): add integration test path 2024-07-09 15:56:30 -03:00
Thales Macedo Garitezi 5279ad76be fix(grpc compiler): unload apps to avoid side effects 2024-07-09 15:56:30 -03:00
Thales Macedo Garitezi b91515b131 fix(schema registry mix): gpb is a runtime dep 2024-07-09 15:56:30 -03:00
Thales Macedo Garitezi 6d94809950
Merge pull request #13415 from thalesmg/20240703-m-couchbase-action
feat: implement couchbase connector and action
2024-07-09 15:53:11 -03:00
ieQu1 92dc059908
Merge pull request #13370 from ieQu1/dev/skip-streams
New durable storage layout with explicit index for LTS wildcards
2024-07-09 20:27:21 +02:00
ieQu1 3721be65ee
fix(ds): Improve comments 2024-07-09 13:15:15 +02:00
ieQu1 d7732a6aac
test(ds): Attempt to stabilize a flaky test 2024-07-09 13:15:15 +02:00
ieQu1 e70c1cfea3
test(ds): Improve stability of replication test suite 2024-07-09 13:15:15 +02:00
ieQu1 dc4ae82798
test(ds): Add message ID 2024-07-09 13:15:14 +02:00
ieQu1 d1b574a67e
perf(dslocal): Run heavy-duty operations in a temporary process 2024-07-09 13:15:14 +02:00
ieQu1 661f79544b
fix(ds): Optimize hot loop of skipstream storage layout 2024-07-09 13:15:14 +02:00
ieQu1 23dafbb03b
feat(ds): Add a benchmarking tool for storage efficiency analysis 2024-07-09 13:15:14 +02:00
ieQu1 afeb2ab8aa
feat(ds): Add metrics for skipstream layout 2024-07-09 13:15:14 +02:00
ieQu1 b68ebb9a73
test(dsrepl): Generalize tests to use different storage layouts 2024-07-09 13:15:14 +02:00
ieQu1 8c5e4a2376
test(ds): Generalize storage layout test suite for different layouts 2024-07-09 13:15:14 +02:00
ieQu1 086e7256f5
feat(ds): Add configuration schema for skipstream LTS layout 2024-07-09 13:15:14 +02:00
ieQu1 a4642d4d06
feat(ds): Add a new storage layout engine: "skipstream"
This layout is based on LTS as well, but it uses separate index
streams for constrained replay of streams with learned wildcards
2024-07-09 13:15:14 +02:00
ieQu1 de48077ac4
test(ds): Add new helper functions
- Improve message comparison
- Add set operations
2024-07-09 13:15:14 +02:00
ieQu1 210556e545
feat(ds): Generalize value serialization
- Add a new utility module for message serialization
- Add experimental serializer based on ASN.1
2024-07-09 13:15:14 +02:00
ieQu1 843973ef32
fix(ds): bitfield_lts: static_key_size -> static_key_bits 2024-07-09 13:15:14 +02:00
ieQu1 f84fb34692
feat(ds_lts): New APIs: info, reverse lookups and topic compression 2024-07-09 13:15:14 +02:00
ieQu1 eb80402ccb
fix(ds): Improve typespecs and descriptions in storage_layer 2024-07-09 13:15:14 +02:00
ieQu1 71dad0242e
docs(ds): Move Raft-related parts to emqx_ds_builtin_raft README 2024-07-09 13:15:14 +02:00
ieQu1 afe1c5617d
refactor(ds): Rename macros for bitfield_lts metrics 2024-07-09 13:15:14 +02:00
ieQu1 0f2c19b656
refactor(ds): Move end_of_stream detection logic for delete_next 2024-07-09 13:15:14 +02:00
ieQu1 b565976794
fix(ds): Fix hashing of empty wildcard topic levels in bitfield_lts 2024-07-09 13:15:13 +02:00
zmstone 91fd01ed21
Merge pull request #13411 from Altair-Bueno/master
new(helm): websocket ingress (fixes #13309)
2024-07-09 11:39:07 +02:00
Thales Macedo Garitezi 0d1eaba82e
Merge pull request #13437 from thalesmg/20240708-m-fix-includes
fix(ds builtin local): use `-include_lib` instead of relative path
2024-07-08 18:40:11 -03:00
Thales Macedo Garitezi f00bb383d4 fix(ds builtin local): use `-include_lib` instead of relative path 2024-07-08 16:57:55 -03:00
zhongwencool fd18e5feb3
Merge pull request #13202 from zhongwencool/cluster-fix-cli
feat: add cluster fix command
2024-07-08 19:08:34 +08:00
zhongwencool 820789a09f fix: redact status when mark_fix_log begin 2024-07-08 17:32:45 +08:00
zhongwencool 457ea93570 test: add cluster_sync cli test 2024-07-08 17:04:41 +08:00
zhongwencool f490a0cba2 feat: don't reset tnx_id when cluster_fix 2024-07-08 17:04:41 +08:00
zhongwencool 298211d101 chore: apply suggestions from code review
Co-authored-by: zmstone <zmstone@gmail.com>
2024-07-08 17:04:41 +08:00
zhongwencool bdf3fc63a6 chore: add config leader to suggestion 2024-07-08 17:04:41 +08:00
zhongwencool 22fc3c49cc chore: combine some common code into one function 2024-07-08 17:04:41 +08:00
zhongwencool 5b105fcdbb chore: move emqx_conf_proto_v3 to emqx_conf_proto_v4 2024-07-08 17:04:41 +08:00
zhongwencool 3ed4340145 test: fix cluster_rpc test failed 2024-07-08 17:04:41 +08:00
zhongwencool 2069910ad1 feat: add cluster fix command 2024-07-08 17:04:41 +08:00
Shawn 5fca0a16f9 feat: rename emqx_relup to emqx_post_upgrade 2024-07-08 10:33:09 +08:00
Shawn 92594d042b feat: add some relup examples 2024-07-08 10:33:09 +08:00
Shawn e9163f2752 feat: generate relup tarball
To generate a tarball, tag the release and then:

```
make emqx-enterprise-relup
```
2024-07-08 10:33:09 +08:00
Kjell Winblad c8258cebe8
Merge pull request #13414 from kjellwinblad/kjell/review_connector_error_logs_rabbitmq_etc/EMQX-12462
fix: make RabbitMQ error log messages easier to understand
2024-07-05 15:01:15 +02:00
Kjell Winblad aeacb3d58a docs: add change log entry 2024-07-05 11:05:21 +02:00
Altair-Bueno f2f8c2ae92 fix(helm): Default to /mqtt on ingress as Rory-Z suggestion 2024-07-05 09:09:30 +02:00
Thales Macedo Garitezi c4dd167cb9 feat: implement couchbase connector and action
Fixes https://emqx.atlassian.net/browse/EMQX-12545
2024-07-04 17:51:59 -03:00
Thales Macedo Garitezi 16113001fe
Merge pull request #13413 from thalesmg/20240703-m-mix-umbrella-part-II
chore: add missing mix files to new apps
2024-07-04 14:27:59 -03:00
Kjell Winblad b994e0f1c0 fix: make RabbitMQ error log messages easier to understand
Fixes:
https://emqx.atlassian.net/browse/EMQX-12462
2024-07-04 16:43:53 +02:00
Thales Macedo Garitezi 420493deb4 chore: add missing mix files to new apps 2024-07-04 10:27:59 -03:00
Altair-Bueno 20be0df62d feat(helm): websocket ingress
Closes: #13309
2024-07-04 10:52:42 +02:00
Thales Macedo Garitezi e9265b88e5
Merge pull request #13406 from thalesmg/20240703-m-sync-r57
sync release-57 to master
2024-07-03 17:21:02 -03:00
Thales Macedo Garitezi 32ace85e1c Merge branch 'release-57' into 20240703-m-sync-r57 2024-07-03 16:03:15 -03:00
Andrew Mayorov 187f5e5936
Merge pull request #13391 from keynslug/perf/EMQX-12611/avoid-stream-shuffle
perf(sessds): rotate through streams with iterators when fetching
2024-07-03 17:46:29 +02:00
Andrew Mayorov 45dbfb77e3
Merge pull request #13402 from keynslug/ci/flaky/s3-aggreg
test(bridge-s3): correct assertions to get rid of flakiness
2024-07-03 17:44:42 +02:00
Andrew Mayorov d7d5eb2c52
test(bridge-s3): correct assertions to get rid of flakiness 2024-07-03 16:40:34 +02:00
Andrew Mayorov 950f4d9483
fix(sessds): defend restartable stream iterator from infinite loop 2024-07-03 15:21:16 +02:00
Andrew Mayorov 947af1faaf
chore(sessds): rename `iterate` -> `iter_next` for consistency 2024-07-03 15:21:16 +02:00
zhongwencool 17261c6499
Merge pull request #13379 from emqx/dependabot/github_actions/actions-b098ddba97
build(deps): bump the actions group across 1 directory with 3 updates
2024-07-03 17:28:05 +08:00
zhongwencool 7f17981a12
Merge pull request #13392 from thalesmg/20240702-m-sync-r57-mix-umbrella
sync release-57 to master
2024-07-03 16:24:24 +08:00
zhongwencool cfa29eaa6f Merge remote-tracking branch 'upstream/release-57' into 20240702-m-sync-r57-mix-umbrella 2024-07-03 15:30:11 +08:00
zhongwencool cfa7c3bf04 Merge remote-tracking branch 'upstream/release-57' into 20240702-m-sync-r57-mix-umbrella 2024-07-03 15:15:58 +08:00
Andrew Mayorov 1d5669d008
fix(sessds): tolerate removal of nonexistent gbt entries 2024-07-02 17:15:41 +02:00
Thales Macedo Garitezi 5532f40d83 Merge branch 'release-57' into 20240702-m-sync-r57-mix-umbrella 2024-07-02 11:52:36 -03:00
Andrew Mayorov a57917b66b
perf(sessds): rotate through streams with iterators when fetching
This avoids expensive shuffling of the whole list of fetchable streams,
which can be quite long.
2024-07-02 15:42:33 +02:00
Andrew Mayorov 9a4f3f88e3
feat(sessds): allow stream iteration starting from a specific key 2024-07-02 15:40:40 +02:00
Andrew Mayorov dc73b957b3
feat(sessds): use trees to hold streams in session state 2024-07-02 15:39:02 +02:00
dependabot[bot] 7c0e85d239
build(deps): bump the actions group across 1 directory with 3 updates
Bumps the actions group with 3 updates in the / directory: [actions/checkout](https://github.com/actions/checkout), [actions/download-artifact](https://github.com/actions/download-artifact) and [erlef/setup-beam](https://github.com/erlef/setup-beam).


Updates `actions/checkout` from 4.1.2 to 4.1.7
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v4.1.2...692973e3d937129bcbf40652eb9f2f61becf3332)

Updates `actions/download-artifact` from 4.1.4 to 4.1.7
- [Release notes](https://github.com/actions/download-artifact/releases)
- [Commits](https://github.com/actions/download-artifact/compare/v4.1.4...65a9edc5881444af0b9093a5e628f2fe47ea3b2e)

Updates `erlef/setup-beam` from 1.17.6 to 1.18.0
- [Release notes](https://github.com/erlef/setup-beam/releases)
- [Commits](0a541161e4...a6e26b2231)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions
- dependency-name: actions/download-artifact
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions
- dependency-name: erlef/setup-beam
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: actions
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-07-01 14:05:58 +00:00
Ivan Dyachkov 532f04da9d
Merge pull request #13373 from id/0701-sync-release-57
sync release-57
2024-07-01 16:02:29 +02:00
Ivan Dyachkov 505f568c32 chore: bump apps versions 2024-07-01 13:52:27 +02:00
Ivan Dyachkov e28750b522 Merge remote-tracking branch 'upstream/release-57' into 0701-sync-release-57 2024-07-01 13:52:14 +02:00
lafirest 6c665037de
Merge pull request #13372 from lafirest/fix/gateway_license
fix: limit gateway connections with license
2024-07-01 18:46:05 +08:00
firest b38b4ee5a2 chore: update changes 2024-07-01 16:37:11 +08:00
firest b5d507bad8 fix: limit gateway connections with license 2024-06-30 20:06:04 +08:00
Thales Macedo Garitezi 9ede62c9b1
Merge pull request #13356 from thalesmg/20240626-m-test-sparkplug-empty-roundtrip
test(schema registry): add test asserting the behavior of empty message roundtrip
2024-06-28 12:08:38 -03:00
zhongwencool 2c48d7e0f0
Merge pull request #13347 from zhongwencool/improve-check-oom-log
chore: log shutdown reason for check_oom trace log
2024-06-28 08:06:11 +08:00
Thales Macedo Garitezi 79f15b1daa test(schema registry): add test asserting the behavior of empty message roundtrip
Relates to https://emqx.atlassian.net/browse/EMQX-10866
2024-06-27 17:49:57 -03:00
Andrew Mayorov 3ff9440a01
Merge pull request #13218 from keynslug/feat/EMQX-12468/wal-less
feat(dsrepl): enable WAL-less batch writes
2024-06-27 14:52:22 +02:00
Andrew Mayorov 58b931160f
Merge pull request #13351 from keynslug/ci/pin-debian-x64
build: pin base docker image to stable-20240612-slim
2024-06-27 14:10:28 +02:00
Ivan Dyachkov 8a42d664b8
build: pin base docker image to stable-20240612-slim
latest version of 12-slim as of today is configured
to fetch i386 packages
2024-06-27 13:21:06 +02:00
zhongwencool 6a78951715 chore: log shutdown reason for check_oom trace log 2024-06-27 14:52:22 +08:00
Andrew Mayorov e1de18ef10
test(dsrepl): await stable state before running testcase 2024-06-26 20:44:35 +02:00
Andrew Mayorov 30efa1f57e
test(dsrepl): relax crash-recover testcase to tolerate message loss
Which is quite an expected occasion for this kind of stress test.
2024-06-26 20:44:34 +02:00
Andrew Mayorov 3d296abde9
fix(dsrepl): classify ra error conditions more carefully
Most importantly: avoid automatic retries of `shutdown` and `nodedown`
errors as this could easily lead to Raft log entries duplication.
2024-06-26 20:44:34 +02:00
Andrew Mayorov 733751fadd
refactor(dsstore): keep passing `Options` to both prepare + commit 2024-06-26 20:44:34 +02:00
Andrew Mayorov 5b5f33c421
chore(dsstore): resurrect `prepare_batch` entry tracepoint 2024-06-26 20:44:34 +02:00
Andrew Mayorov 8ff48ac5ea
feat(dsrepl): rely on accumulated log size to decide when to flush 2024-06-26 20:44:34 +02:00
Andrew Mayorov b6a249baa9
feat(cth-peer): add brutal `kill/1` facility 2024-06-26 20:44:34 +02:00
Andrew Mayorov 8db70b5bbc
test(dsrepl): add crash-restart-recover testcase
That verifies nothing is lost in the event of abrupt node failures.
2024-06-26 20:44:33 +02:00
Andrew Mayorov ae89b61af0
feat(cth-cluster): make `restart/1` more generic 2024-06-26 20:44:33 +02:00
Andrew Mayorov 5fd5fc76e5
fix(dsstore): ensure backward compatibility 2024-06-26 20:44:33 +02:00
Andrew Mayorov 8538a5a5b6
test(dsrepl): anticipate transitionless membership changes
E.g. when a membership change is applied twice in a row.
2024-06-26 20:44:33 +02:00
Andrew Mayorov 19072414cb
chore: bump `erlang-rocksdb` to 1.8.0-emqx-6 2024-06-26 20:44:33 +02:00
Andrew Mayorov cd0663074e
test(dsrepl): add `add_generation` events into the mix
They usually cause storage layer to perform flushes, and thus enable
testing `handle_flush/1` codepath in different circumstances.
2024-06-26 20:44:33 +02:00
Andrew Mayorov 2180cc7c26
fix(dsstore): avoid storing `cf_refs()` in the RocksDB itself
This is both pointless and confusing.
2024-06-26 20:44:33 +02:00
Andrew Mayorov 80ea2e62f7
fix(stream): ensure that `chain/1` preserves the order 2024-06-26 20:44:32 +02:00
Andrew Mayorov 7895e9cc45
feat(dsstore): make WAL-less mode optional
And make the upper layer choose when to use it.
2024-06-26 20:44:32 +02:00
Andrew Mayorov 0c0757b8c2
feat(dsrepl): enable WAL-less batch writes 2024-06-26 20:44:32 +02:00
Andrew Mayorov 2705226eb5
feat(dsrepl): release log entries occasionally
Also make tracepoints in `apply/3` callback implementation more
uniform.
2024-06-26 20:44:32 +02:00
Andrew Mayorov b6894c18fa
chore(dsrepl): improve tracepoints usability a bit 2024-06-26 20:44:32 +02:00
Andrew Mayorov 10e9fed22b
Merge pull request #13326 from keynslug/fix/ct/api-clients-flaky
test(api-clients): refactor suite to isolate flakiness
2024-06-25 18:26:42 +02:00
ieQu1 9f30da334f
Merge pull request #13248 from ieQu1/dev/EMQX-12491-local-backend
replace builtin DS backend with builtin_local and builtin_raft
2024-06-24 23:07:57 +02:00
ieQu1 d349f84f04
fix(ds): Apply remarks 2024-06-24 20:20:40 +02:00
Andrew Mayorov d12b985507
test(api-clients): refactor suite to isolate flakiness 2024-06-24 16:54:20 +02:00
Thales Macedo Garitezi debf1e6cd5
Merge pull request #13316 from thalesmg/fix-mt-read-user-prop-m-20240621
fix(message transformation): correctly read from user properties in context
2024-06-24 09:31:48 -03:00
ieQu1 c0472a06f1
test(ds): Set initial conditions in repl. suite explicitly 2024-06-22 02:55:05 +02:00
Thales Macedo Garitezi 6682004dc8
Merge pull request #13319 from thalesmg/20240621-sync-r57-m
sync release-57 to master
2024-06-21 17:44:08 -03:00
Thales Macedo Garitezi ec83d999bf Merge remote-tracking branch 'origin/release-57' into 20240621-sync-r57-m 2024-06-21 14:11:08 -03:00
ieQu1 9a58d71378
fix(ds): Move DS backend supervision trees to their own apps 2024-06-21 18:22:04 +02:00
ieQu1 3d69ec496a
fix(schema): Transform config of `singleton' discovery_strategy 2024-06-21 17:16:25 +02:00
ieQu1 d7d878fd43
test(ds): Fix emqx standalone test profile selection 2024-06-21 17:16:25 +02:00
ieQu1 8aa27488b6
test: Disable certain DS-related suites in CE 2024-06-21 17:16:24 +02:00
ieQu1 1d3b1868fb
test(ds): Use close_db API 2024-06-21 17:16:24 +02:00
ieQu1 b2f7815a7f
test(ds): Don't start raft explicitly 2024-06-21 17:16:24 +02:00
ieQu1 3851fc189f
fix(ds): Avoid reverse dependencies from storage to repl. layer 2024-06-21 17:16:24 +02:00
ieQu1 bc915216a0
feat(ds): Support metrics for builtin_local backend 2024-06-21 17:16:24 +02:00
ieQu1 be6c5e172f
fix(ds): Disable DS management APIs for builtin_local backend 2024-06-21 17:16:24 +02:00
ieQu1 99c9b56cf3
feat(ds_buffer): Add `ls' function to list all local buffers 2024-06-21 17:16:24 +02:00
ieQu1 5a8818edf3
feat(ds): Add schema for builtin_local backend 2024-06-21 17:16:24 +02:00
ieQu1 4484f30021
feat(ds_schema): Add schema for builtin_local backend 2024-06-21 17:16:24 +02:00
ieQu1 8990b1312b
refactor(ds_schema): Rename backend builtin -> builtin_raft 2024-06-21 17:16:24 +02:00
ieQu1 ea48b1265d
refactor(ds_schema): Extract common builtin fields 2024-06-21 17:16:24 +02:00
ieQu1 abe41de19b
refactor(ds_schema): builtin_local_buffer -> builtin_buffer 2024-06-21 17:16:24 +02:00
ieQu1 ecb172b07e
refactor(ds): Rename egress metrics to 'buffer' 2024-06-21 17:16:24 +02:00
ieQu1 09c3ae795d
refactor(ds_raft): Replace egress server with common emqx_ds_buffer 2024-06-21 17:16:24 +02:00
ieQu1 a0fbd37e58
refactor(emqx): Use emqx_ds_backends application 2024-06-21 17:16:24 +02:00
ieQu1 ef09cfcd71
feat(ds): Add `emqx_ds_backends` application 2024-06-21 17:16:24 +02:00
ieQu1 279619fc80
feat(ds): Add `builtin_local' backend 2024-06-21 17:16:24 +02:00
ieQu1 a8ea0ae4e5
refactor(ds): Extract DS replication layer to a separate application 2024-06-21 17:16:24 +02:00
ieQu1 63f1856a2c
feat(ds): Dynamic backend registration 2024-06-21 17:16:24 +02:00
ieQu1 83dc8f4d77
fix(ds): Fix return values of emqx_ds_storage_layer functions 2024-06-21 17:16:23 +02:00
ieQu1 a18d1987a2
test(ds): Add a helper function for diffing messages 2024-06-21 17:16:23 +02:00
Ilia Averianov f1b8c356a6
Merge pull request #13299 from savonarola/0617-shared-sub-leader-poc
feat(queue): implement PoC version of session ↔️ shared group leader interaction
2024-06-21 16:45:17 +03:00
Thales Macedo Garitezi 28c7d94bd2 fix(message transformation): correctly read from user properties in context
Fixes https://emqx.atlassian.net/browse/EMQX-12582
2024-06-21 09:39:22 -03:00
Ilya Averyanov db28a042d5 feat(queue): handle renew_lease_timeout 2024-06-21 15:18:37 +03:00
Ilya Averyanov 2096755ad6 feat(queue): reorganize group sm callbacks and methods 2024-06-21 13:01:38 +03:00
Ilya Averyanov b9c5911883 feat(queue): implement `find_leader_timeout` event and so the `connecting` group_sm state 2024-06-21 12:22:17 +03:00
SergeTupchiy fb266fbf8c
Merge pull request #13300 from SergeTupchiy/EMQX-12195-cluster-link-conf-backup
cluster link conf backup
2024-06-21 11:42:57 +03:00
Ilya Averyanov 979fb58e50 feat(queue): use tp to trace new lease events 2024-06-20 21:12:15 +03:00
Ilya Averyanov 1205e34650 feat(queue): remove too useless header file 2024-06-20 20:48:56 +03:00
Ilya Averyanov be175d205c feat(queue): add JIRA task links for TODOs 2024-06-20 15:38:16 +03:00
Ilya Averyanov a9c976b6c1 feat(queue): fix notes about the usage of agent<->leader protocol-related data structures 2024-06-20 15:24:25 +03:00
Ilya Averyanov d0cff63ed6 feat(queue): remove unused hrl-guard 2024-06-20 15:18:47 +03:00
Ilya Averyanov a92460d38f feat(queue): improve hints for macro clauses 2024-06-20 15:17:08 +03:00
zmstone f969a4ef5e
Merge pull request #13296 from zmstone/0619-sync-release-57-to-master
0619 sync release 57 to master
2024-06-20 10:33:52 +02:00
Ilya Averyanov 6da10036dc feat(queue): fix dialyzer issues 2024-06-19 22:25:53 +03:00
Ilya Averyanov a4bbab4aa2 feat(queue): document general entity layout 2024-06-19 22:24:20 +03:00
Serge Tupchii 71f5eaf11e fix(data_backup): import `cluster.links` conf 2024-06-19 20:47:06 +03:00
Serge Tupchii ef5cf4fac3 chore(clusterlink): fix comment 2024-06-19 20:46:00 +03:00
Ilya Averyanov 2b0146663a feat(queue): add smoke test for ds shared subs 2024-06-19 19:32:23 +03:00
Ilya Averyanov e3c4816035 feat(queue): move group subscription state machine to its own module 2024-06-19 19:22:10 +03:00
zmstone 021b6b3902 Merge remote-tracking branch 'origin/release-57' into 0619-sync-release-57-to-master 2024-06-19 15:06:41 +02:00
zmstone 213e4785e7 Merge remote-tracking branch 'origin/release-57' into 0619-sync-release-57-to-master 2024-06-19 14:54:48 +02:00
Ilya Averyanov bca743054b feat(queue): implement backbones of queue agent, leader and leader registry 2024-06-18 21:03:51 +03:00
Ivan Dyachkov 43cca6c9f8
Merge pull request #13286 from id/0718-sync-docker-docs-with-official-image
docs(docker): sync README.md with the official image docs
2024-06-18 08:48:36 +02:00
Ivan Dyachkov 4e83ca34ce docs(docker): sync README.md with the official image docs 2024-06-18 08:16:38 +02:00
ieQu1 a5110da37c
Merge pull request #13283 from thalesmg/sync-r57-m-20240617
sync release-57 to master
2024-06-18 00:10:50 +02:00
Thales Macedo Garitezi 20cffb54d4 Merge branch 'release-57' into sync-r57-m-20240617 2024-06-17 17:10:12 -03:00
Ilia Averianov ad993437aa
Merge pull request #13278 from savonarola/0617-release-57-sync
Sync release-57
2024-06-17 21:25:00 +03:00
Ilya Averyanov f8e6aab86f Merge branch 'release-57' into 0617-release-57-sync
* release-57:
  chore(auth,http): cache REs for parsing URIs
  fix(auth,http): improve URI handling
  chore: revert ULOG/ELOG
  test: generate dispatch.eterm in dashboard test
  docs: refine change log
  feat: make the dashboard restart quicker
  chore: fix typo
  fix(http authz): handle unknown content types in responses
  chore: change types of mysql and mongodb fields to `template()`
  fix(client mgmt api): allow projecting `client_attrs` from client fields
  fix(emqx_rule_funcs): expose regex_extract function to rule engine
2024-06-17 18:53:39 +03:00
SergeTupchiy 830266b4d5
Merge pull request #13126 from SergeTupchiy/EMQX-11967-cross-cluster-route-replication-syncer-actor
cluster linking
2024-06-17 18:52:46 +03:00
Thales Macedo Garitezi f5eb3e7471
Merge pull request #13250 from thalesmg/ds-singleton-m-20240613
feat: enforce singleton discovery strategy when using DS `builtin_local` backend
2024-06-17 09:34:05 -03:00
Serge Tupchii a905a6048c chore(clusterlink): rename link `upstream` field to `name` 2024-06-17 12:24:51 +03:00
Serge Tupchii a95a08efd3 test(clusterlink): add more test cases 2024-06-17 12:24:51 +03:00
Serge Tupchii 44c37571cc fix(clusterlink): ignore not_registered error 2024-06-17 12:24:51 +03:00
Serge Tupchii 00f912928f fix: fix emqx_external_broker:forward/1 spec 2024-06-17 12:24:51 +03:00
Andrew Mayorov 179870c573 chore: remove author-specific gitignore stuff
Which was accidentally committed.
2024-06-17 12:24:51 +03:00
Andrew Mayorov ed16ff07df refactor(broker): simplify external broker behaviour 2024-06-17 12:24:51 +03:00
Serge Tupchii d282c61120 feat(clusterlink): update only necessary resources when a link config is changed 2024-06-17 12:24:50 +03:00
Serge Tupchii 5304ca1563 fix(clusterlink): add emqx_cluster_link app to mix.exs/rebar.config.erl 2024-06-17 12:24:47 +03:00
Serge Tupchii ff16521d4f fix(clusterlink): add schema descriptions, fix dialyzer warnings, add resource_opts 2024-06-17 12:23:52 +03:00
Serge Tupchii 94e81ba812 feat(clusterlink): implement actor config handler 2024-06-17 12:23:52 +03:00
Serge Tupchii c871b37453 fix(clusterlink): add link topics schema validator 2024-06-17 12:23:52 +03:00
Andrew Mayorov 780a0bf807 fix(cluster-link): clear exit signal of failed-to-connect client 2024-06-17 12:23:52 +03:00
Andrew Mayorov d0df4de2a3 test(cluster-link): add e2e replication actor GC testcase 2024-06-17 12:23:52 +03:00
Andrew Mayorov c4840b30d2 fix(cluster-link): deduplicate routes down to dest cluster 2024-06-17 12:23:52 +03:00
Andrew Mayorov ede35df24a fix(cluster-link): cancel heartbeats on client down 2024-06-17 12:23:52 +03:00
Andrew Mayorov e0604e3af6 fix(cluster-link): anticipate clients may occasionally retry 2024-06-17 12:23:52 +03:00
Andrew Mayorov 0219b8bd4d feat(cluster-link): add simple replication actor GC process 2024-06-17 12:23:52 +03:00
Andrew Mayorov 7fccb5dbc9 test(topic): add more `intersection/2` testcases 2024-06-17 12:23:52 +03:00
Andrew Mayorov 7b8f466adf feat(topic): avoid `lists:reverse` when intersecting 2024-06-17 12:23:52 +03:00
Andrew Mayorov 24be189728 fix(topic): respect special topic rules when intersecting 2024-06-17 12:23:51 +03:00
Andrew Mayorov de1ac131f7 test(cluster-link): fix test suite setup 2024-06-17 12:23:51 +03:00
Serge Tupchii 58eaf07627 fix(clusterlink): valide config to disallow duplicated cluster links 2024-06-17 12:23:51 +03:00
Andrew Mayorov 54d51d0982 test(cluster-link): draft basic integration test suite 2024-06-17 12:23:51 +03:00
Andrew Mayorov e9c24090d4 fix(cluster-link): avoid starting ps syncer if persistence disabled 2024-06-17 12:23:51 +03:00
Andrew Mayorov 036c7e8492 test(cluster-link): adapt extrouter testsuite to new APIs 2024-06-17 12:23:51 +03:00
Andrew Mayorov 45eda4f3b9 fix(cluster-link): adapt heartbeat / reincarnation handling to new API 2024-06-17 12:23:51 +03:00
Andrew Mayorov 43d114546c feat(cluster-link): preserve replication actor state in pdict 2024-06-17 12:23:51 +03:00
Andrew Mayorov 5771a41a32 fix(cluster-link): ensure replication actor bootstraps do heartbeats 2024-06-17 12:23:51 +03:00
Andrew Mayorov d4b449c6e1 feat(cluster-link): implement replication actor heartbeats 2024-06-17 12:23:51 +03:00
Serge Tupchii faa4420e1f fix(clusterlink): improve actor error handling
Add status and error reason to the actor state, report alarms.
2024-06-17 12:23:51 +03:00
Serge Tupchii 21711c6e0d fix(clusterlink): communicate bootstrap requirements via actor handshake
`session_present` flag is not reliable to decide whether bootstrap is needed if durable sessions is enabled.
In this case, the client session may survive cluster restart while all the external routes are lost, as they are not persistent.
2024-06-17 12:23:51 +03:00
Serge Tupchii d5e82cdfac refactor(clusterlink): avoid unnecessary `maybe_` external_broker CB names 2024-06-17 12:23:51 +03:00
Serge Tupchii d578ac3f9e fix(clusterlink): match correct timer ref in router actor 2024-06-17 12:23:51 +03:00
Serge Tupchii b1aeb35370 feat(clusterlink): implement actor init handshake 2024-06-17 12:23:51 +03:00
Serge Tupchii ac19cf89df chore(clusterlink): remove code related to the rejected coordinator-based implementation 2024-06-17 12:23:51 +03:00
Serge Tupchii e26e7acaa1 refactor(clusterlink): use `emqx_bridge_mqtt_lib:clientid_base/1` to construct routesync client id 2024-06-17 12:23:51 +03:00
Serge Tupchii e7305c62ee feat(clusterlink): replicate shared subscription and persistent session routes 2024-06-17 12:23:51 +03:00
Serge Tupchii f036b641eb feat(clusterlink): integrate node local routes replication and message forwarding 2024-06-17 12:23:51 +03:00
Serge Tupchii 7df91d852c feat(clusterlink): integrate node local syncer/actor implementation
- support multiple cluster links in extrouter
 - apply extrouter ops on 'message.publish' hook
 - fix several minor bugs
2024-06-17 12:23:51 +03:00
Andrew Mayorov cbd01ae818 feat(clusterlink): add node-local route sync actor implementation 2024-06-17 12:23:51 +03:00
Andrew Mayorov 7b95273218 feat(routesync): make syncer a bit more generic and reusable 2024-06-17 12:23:51 +03:00
Andrew Mayorov 5bd9ee5c7f feat(utils): add `emqx_utils_ets:keyfoldl/3` function
Designed to be used with `bag` / `duplicate_bag` tables.
2024-06-17 12:23:51 +03:00
Andrew Mayorov 4097585f5d fix(clusterlink): ensure extrouter works on replicants
This is sort of a quick fix to make things safe, but it will likely be
a subject to the same drawbacks as the regular router in high-latency
deployments: reduced throughput.
2024-06-17 12:23:51 +03:00
Andrew Mayorov a53524c826 fix(cth-cluster): fix occasional case clauses during cluster bootup 2024-06-17 12:23:51 +03:00
Andrew Mayorov 2dd99c5a08 feat(clusterlink): add facility to reconstruct remote routing table 2024-06-17 12:23:51 +03:00
Serge Tupchii f08342c704 feat: cluster link prototype WIP 2024-06-17 12:23:48 +03:00
lafirest 98a54994c0
Merge pull request #13196 from lafirest/fix/rules_len
fix(authz_mnesia): add a soft limit in the API for the length of ACL rules
2024-06-17 15:47:59 +08:00
firest 1716852057 fix(authz_mnesia): improve field names and changes 2024-06-17 10:09:25 +08:00
zmstone 2b7f3a597f
Merge pull request #13245 from qzhuyan/merge/william/sync-rel57-to-master
sync release57 to master
2024-06-15 09:13:55 +02:00
zhongwencool d433fc7c30
Merge pull request #13236 from zhongwencool/bug-schema-validation
fix: some minor bug fixes
2024-06-15 06:40:28 +08:00
zmstone 626aae6edf chore: fix bad conflict resolution 2024-06-14 16:57:53 +02:00
Thales Macedo Garitezi 62ecdb8ad0
Merge pull request #13246 from thalesmg/doc-changelog-nit-m-20240613
docs: minor changelog improvement
2024-06-14 09:19:28 -03:00
Thales Macedo Garitezi 378a16b4fb feat: enforce singleton discovery strategy when using DS `builtin_local` backend
Fixes https://emqx.atlassian.net/browse/EMQX-12493
2024-06-14 09:18:35 -03:00
William Yang b86d631744 test: fix tc t_handle_outing_non_utf8_topic 2024-06-14 08:17:19 +02:00
William Yang d39b8caff9 Merge branch 'release-57' into merge/william/sync-rel57-to-master 2024-06-14 08:17:09 +02:00
lafirest afd4b46f72
Merge pull request #13225 from lafirest/fix/mysql_redact
fix(auth): redact sensitive data for the authn/authz APIs
2024-06-14 09:19:21 +08:00
Thales Macedo Garitezi 01635722e9
docs: improve descriptions
Co-authored-by: ieQu1 <99872536+ieQu1@users.noreply.github.com>
2024-06-13 18:34:44 -03:00
Thales Macedo Garitezi 52e6c88941 feat: enforce singleton discovery strategy when using DS `builtin_local` backend
Fixes https://emqx.atlassian.net/browse/EMQX-12493
2024-06-13 17:04:23 -03:00
Thales Macedo Garitezi cba5c7bb45 refactor: rm dead code 2024-06-13 16:57:34 -03:00
Thales Macedo Garitezi 4d876f2af2 chore: bump ekka -> 0.19.4
https://github.com/emqx/ekka/pull/236
2024-06-13 16:35:58 -03:00
Thales Macedo Garitezi 3adf64e637
Merge pull request #13240 from thalesmg/ds-cluster-purge-m-20240612
feat: support purging durable sessions during cluster purge
2024-06-13 09:30:18 -03:00
Thales Macedo Garitezi 5b73670252 docs: minor changelog improvement
Addresses https://github.com/emqx/emqx/pull/13232#discussion_r1637889964
2024-06-13 09:14:37 -03:00
Thales Macedo Garitezi b6613e7b27
Merge pull request #13232 from thalesmg/mt-changelog-m-20240611
docs: add changelog for message transformation
2024-06-13 09:11:07 -03:00
William Yang 1664ea4ad4 Revert: TLS partial-chain and keyUsage #12955 #12977
This reverts commit 28b17a2562.
This reverts commit 01467246fc.
This reverts commit c3f8ba5762.
This reverts commit 1a4a4bb3a5.
This reverts commit fb30207ef3.
This reverts commit 337c230e79.
This reverts commit 3a674f44f1.
This reverts commit 70ffd77f99.
This reverts commit 03b0935564.
This reverts commit 650cf4b27e.
This reverts commit 43ad665dcf.
This reverts commit a29a43e5fc.
This reverts commit 4e9c1ec0c9.
This reverts commit 8eb463c58d.
This reverts commit 90430fa66d.
This reverts commit eb1ab9adfe.
This reverts commit 8bc3a86f63.
This reverts commit fa4357ce89.
This reverts commit 0b95a08d32.
2024-06-13 13:46:45 +02:00
zhongwencool 1dd4b6de5e
Merge pull request #13241 from zhongwencool/add-with-mfa-option-for-log
feat: add hidden option to record MFA and line in the log
2024-06-13 19:26:10 +08:00
zhongwencool 0eee2edd28 test: fix api_config SUITE failed 2024-06-13 14:55:24 +08:00
zhongwencool 2b1fa1711b fix: observer command crash when can't find object code 2024-06-13 14:55:23 +08:00
zhongwencool 623845d07d fix: don't allow set active_n to negative int 2024-06-13 14:55:23 +08:00
zhongwencool 825bfe4717 fix: load bad configs return unknown msg 2024-06-13 14:55:23 +08:00
zhongwencool 5d3f464ec3 fix: ws/wss's max_frame_size should > 0 2024-06-13 14:55:23 +08:00
zhongwencool 800a79adde test: test failed with_mfa 2024-06-13 09:50:52 +08:00
zhongwencool c72b455a83 feat: add hidden option to record MFA and line in the log is very useful for debug. 2024-06-12 22:51:45 +08:00
Thales Macedo Garitezi d0e6f22a79 feat: support purging durable sessions during cluster purge
Fixes https://emqx.atlassian.net/browse/EMQX-12405
2024-06-12 11:19:53 -03:00
Thales Macedo Garitezi 0c805e049e docs: add changelog for message transformation 2024-06-12 09:52:25 -03:00
Thales Macedo Garitezi fe303231cf
Merge pull request #13230 from thalesmg/sync-r57-m-20240611
sync release-57 to master
2024-06-12 08:55:43 -03:00
Thales Macedo Garitezi db572d35a7 Merge remote-tracking branch 'origin/release-57' into sync-r57-m-20240611 2024-06-11 15:34:54 -03:00
Thales Macedo Garitezi b2d716909f
Merge pull request #13200 from thalesmg/update-erlazure-no-gen-server-m-20240606
chore: refactor azure blob connector to use new `erlazure` without `gen_server`
2024-06-11 13:30:14 -03:00
Thales Macedo Garitezi 686bcc8a48
Merge pull request #13207 from thalesmg/republish-metrics-m-20240607
fix(rule actions): check `republish` action result for metrics
2024-06-11 13:29:58 -03:00
firest 46a51bd92b chore: update changes 2024-06-11 21:34:37 +08:00
firest e64f60b73f fix(auth): redact sensitive data for the authn/authz APIs 2024-06-11 16:05:11 +08:00
Thales Macedo Garitezi e586178479 chore: refactor azure blob connector to use new `erlazure` without `gen_server`
https://github.com/dkataskin/erlazure/pull/43 removes unnecessary usage of `gen_server`
from the driver.
2024-06-10 13:42:28 -03:00
zmstone bb9fb10ec5
Merge pull request #13213 from zmstone/0609-syn-release-57-to-master
0609 syn release 57 to master
2024-06-10 10:28:59 +02:00
zmstone 45dd7816d7 Merge remote-tracking branch 'origin/release-57' 2024-06-09 10:11:15 +02:00
zmstone 15fbb966a0
Merge pull request #13198 from zmstone/0606-merge-release-57-to-master
0606 merge `release-57` to `master`
2024-06-07 16:23:26 +02:00
Thales Macedo Garitezi 39615e1cb6 fix(rule actions): check `republish` action result for metrics
Fixes https://emqx.atlassian.net/browse/EMQX-12328
2024-06-07 11:17:43 -03:00
zmstone 9574b33832
Merge branch 'master' into 0606-merge-release-57-to-master 2024-06-07 15:21:34 +02:00
Ivan Dyachkov 590569776d
Merge pull request #13205 from emqx/dependabot/github_actions/actions-f50227a1a2
build(deps): bump the actions group across 1 directory with 11 updates
2024-06-07 14:39:44 +02:00
Thales Macedo Garitezi 2828e8a5c5
Merge pull request #13201 from thalesmg/fix-fmt-m-20240606
ci: fix `make fmt` to take into account checkouts and elixir deps
2024-06-07 09:20:57 -03:00
Kjell Winblad a3f3f96781 test(emqx_bridge_v2_pgsql_SUITE): fix broken test case
We have to delete the connector and action created by the previous step
of the test case so we don't get a name conflict.
2024-06-07 12:03:34 +02:00
dependabot[bot] f07aaac256
build(deps): bump the actions group across 1 directory with 11 updates
Bumps the actions group with 11 updates in the / directory:

| Package | From | To |
| --- | --- | --- |
| [actions/checkout](https://github.com/actions/checkout) | `4.1.2` | `4.1.6` |
| [actions/upload-artifact](https://github.com/actions/upload-artifact) | `4.3.1` | `4.3.3` |
| [actions/download-artifact](https://github.com/actions/download-artifact) | `4.1.4` | `4.1.7` |
| [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) | `3.2.0` | `3.3.0` |
| [docker/login-action](https://github.com/docker/login-action) | `3.1.0` | `3.2.0` |
| [slackapi/slack-github-action](https://github.com/slackapi/slack-github-action) | `1.25.0` | `1.26.0` |
| [hashicorp/setup-terraform](https://github.com/hashicorp/setup-terraform) | `3.0.0` | `3.1.1` |
| [actions/cache](https://github.com/actions/cache) | `4.0.1` | `4.0.2` |
| [erlef/setup-beam](https://github.com/erlef/setup-beam) | `1.17.5` | `1.17.6` |
| [coverallsapp/github-action](https://github.com/coverallsapp/github-action) | `2.2.3` | `2.3.0` |
| [ossf/scorecard-action](https://github.com/ossf/scorecard-action) | `2.3.1` | `2.3.3` |



Updates `actions/checkout` from 4.1.2 to 4.1.6
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](9bb56186c3...a5ac7e51b4)

Updates `actions/upload-artifact` from 4.3.1 to 4.3.3
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](5d5d22a312...65462800fd)

Updates `actions/download-artifact` from 4.1.4 to 4.1.7
- [Release notes](https://github.com/actions/download-artifact/releases)
- [Commits](c850b930e6...65a9edc588)

Updates `docker/setup-buildx-action` from 3.2.0 to 3.3.0
- [Release notes](https://github.com/docker/setup-buildx-action/releases)
- [Commits](2b51285047...d70bba72b1)

Updates `docker/login-action` from 3.1.0 to 3.2.0
- [Release notes](https://github.com/docker/login-action/releases)
- [Commits](e92390c5fb...0d4c9c5ea7)

Updates `slackapi/slack-github-action` from 1.25.0 to 1.26.0
- [Release notes](https://github.com/slackapi/slack-github-action/releases)
- [Commits](6c661ce588...70cd7be8e4)

Updates `hashicorp/setup-terraform` from 3.0.0 to 3.1.1
- [Release notes](https://github.com/hashicorp/setup-terraform/releases)
- [Changelog](https://github.com/hashicorp/setup-terraform/blob/main/CHANGELOG.md)
- [Commits](a1502cd9e7...651471c36a)

Updates `actions/cache` from 4.0.1 to 4.0.2
- [Release notes](https://github.com/actions/cache/releases)
- [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md)
- [Commits](ab5e6d0c87...0c45773b62)

Updates `erlef/setup-beam` from 1.17.5 to 1.17.6
- [Release notes](https://github.com/erlef/setup-beam/releases)
- [Commits](2f0cc07b4b...0a541161e4)

Updates `coverallsapp/github-action` from 2.2.3 to 2.3.0
- [Release notes](https://github.com/coverallsapp/github-action/releases)
- [Commits](3dfc556739...643bc377ff)

Updates `ossf/scorecard-action` from 2.3.1 to 2.3.3
- [Release notes](https://github.com/ossf/scorecard-action/releases)
- [Changelog](https://github.com/ossf/scorecard-action/blob/main/RELEASE.md)
- [Commits](0864cf1902...dc50aa9510)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions
- dependency-name: actions/upload-artifact
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions
- dependency-name: actions/download-artifact
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions
- dependency-name: docker/setup-buildx-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: actions
- dependency-name: docker/login-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: actions
- dependency-name: slackapi/slack-github-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: actions
- dependency-name: hashicorp/setup-terraform
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: actions
- dependency-name: actions/cache
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions
- dependency-name: erlef/setup-beam
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions
- dependency-name: coverallsapp/github-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: actions
- dependency-name: ossf/scorecard-action
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-06-07 07:50:25 +00:00
Ivan Dyachkov eea399e3ed
Merge pull request #13204 from id/0607-ci-keep-release-branch-green
ci: trigger reruns of failed checks on release-57 branch as well as master
2024-06-07 09:46:40 +02:00
Ivan Dyachkov 381ed6a451 ci: trigger reruns of failed checks on release-57 branch as well as master 2024-06-07 08:57:18 +02:00
Thales Macedo Garitezi c0c5545c21 ci: fix `make fmt` to take into account checkouts and elixir deps 2024-06-06 16:53:30 -03:00
zmstone ebf17c8143 Merge remote-tracking branch 'origin/release-57' into 0606-merge-release-57-to-master 2024-06-06 17:43:36 +02:00
firest 3ae26c8a54 chore: update changes 2024-06-06 18:13:24 +08:00
firest 5532c7b0a6 fix(authz_mnesia): add a soft limit in the API for the length of ACL rules 2024-06-06 16:22:53 +08:00
zmstone 07a3fbaf1a docs: add changelog for PR 13190 2024-06-05 23:28:41 +02:00
zmstone 1af06e4533
Merge pull request #13190 from zmstone/0605-stop-release-el7-and-ubuntu18
ci: stop releasing on centos7 (el7) and ubuntu18
2024-06-05 23:25:37 +02:00
Ilia Averianov dc2e6d1695
Merge pull request #13163 from savonarola/0523-ds-shared-subs-dispatch
Inject shared subcription handling into durable session
2024-06-05 23:59:50 +03:00
SergeTupchiy 64862b296c
Merge pull request #13179 from SergeTupchiy/dashboard-swagger-apply-body-validator
feat(emqx_dashboard_swagger): apply field validator in request body check
2024-06-05 21:25:07 +03:00
Serge Tupchii 159da912c2 feat(emqx_dashboard_swagger): apply field validator in request body check 2024-06-05 19:35:04 +03:00
Ilya Averyanov b075b7120c feat(sessds): implement dispatchig between CE/EE shared sub agents 2024-06-05 19:17:58 +03:00
zmstone cf0eefee54 ci: stop releasing on centos7 (el7) and ubuntu18 2024-06-05 14:02:06 +02:00
JimMoen f0721bdb97
Merge pull request #13176 from JimMoen/erlfmt-file-name-by-find
build: run erlfmt with files name by `$(FIND)`
2024-06-05 17:41:19 +08:00
JimMoen 0eac19ef12
style: disable elvis `param_pattern_matching` check 2024-06-05 10:48:34 +08:00
JimMoen 51e55dcad8
style: track reformat commit in git-blame-ignore-revs 2024-06-05 10:48:33 +08:00
JimMoen 336ed1b922
style: reformat files in `erlang-mode` 2024-06-05 10:29:44 +08:00
JimMoen 023bb5e766
build: run erlfmt with files name by `$(FIND)` 2024-06-05 10:29:43 +08:00
zmstone 3d992e4501
Merge pull request #13171 from zmstone/0603-improve-compatibility-check
chore: improve emqx boot script's compatibility check
2024-06-04 16:58:18 +02:00
Ilya Averyanov 9cdfbb0845 feat(sessds): make ds session be aware of shared subscriptions 2024-06-04 17:15:30 +03:00
zmstone 8276ae54a1 chore: improve emqx boot script's compatibility check
Prior to this change, the command used to check installation
and os compatibility is done by starting a 'erl' process using
default flags. it might be unnecessarily expensive in large
VM instances.
In this change, we have added small enough +S +P and +Q number to
make lower the cost.
2024-06-04 12:11:43 +02:00
Ilia Averianov dc95218c68
Merge pull request #13144 from savonarola/0528-increase-bridge-buffer-drop-log-level
chore(bridges): increase log level for bridge buffer overflow reports
2024-06-04 12:37:50 +03:00
zhongwencool a8a67a2ac9
Merge pull request #13151 from zhongwencool/authz-trace-log
chore: make authz's logs easier to understand
2024-06-04 11:12:09 +08:00
Thales Macedo Garitezi c5547543e0
Merge pull request #13069 from thalesmg/azure-blob-storage-action-m-20240508
feat: implement azure blob storage action
2024-06-03 08:56:37 -03:00
Ilia Averianov 193978af90
Merge pull request #13168 from savonarola/0530-fix-mgmt-test
chore(tests): improve test stability
2024-06-03 14:44:12 +03:00
zhongwencool fda365a87b chore: make authz's logs easier to understand 2024-06-03 18:01:57 +08:00
zmstone ec7ec7261e
Merge pull request #13155 from zmstone/0529-hide-enable-flag-which-has-default-value-true
refactor: hide 'enable' config from example when it's 'true' by default
2024-05-31 21:52:40 +02:00
zmstone 12fd1f6a48
Merge pull request #13080 from zmstone/0521-default-retry_interval-to-infinity
change `mqtt.retry_interval` default value to `infinity`
2024-05-31 21:36:38 +02:00
Ilya Averyanov 57741a41e9 chore(tests): improve test stability 2024-05-31 22:27:31 +03:00
zmstone fb07f40bea test: fix default value of retry_interval in test case 2024-05-31 17:53:12 +02:00
zmstone 20c92407c7 feat: change message retry_interval default value to infinity
Previous Setting: The default value for `retry_interval` was 30 seconds.

New Default: The default `retry_interval` has been changed to 'infinity'.
With this update, EMQX will not automatically retry message deliveries
by default.

Compliance and Compatibility: Aligning with MQTT specification
standards, in-session message delivery retries are not typically
compliant. We recognize that some users depend on this feature, so the
option to configure retries remains available for backward
compatibility.
2024-05-31 17:53:12 +02:00
Thales Macedo Garitezi efa4432eef docs: fix docs 2024-05-31 11:10:24 -03:00
Thales Macedo Garitezi 347971e24a fix: check `max_block_size` for direct transfers 2024-05-31 11:07:44 -03:00
Thales Macedo Garitezi 4859cebd9f refactor: move `min_block_size` one level up 2024-05-31 11:07:44 -03:00
Thales Macedo Garitezi 9a1d3ea04f chore: bump app vsns 2024-05-31 11:07:44 -03:00
Thales Macedo Garitezi 616f14ae53 fix: avoid uploading blocks too frequently and splitting large buffers
Fixes https://github.com/emqx/emqx/pull/13069#discussion_r1613706747
2024-05-31 11:07:44 -03:00
Thales Macedo Garitezi af99829a21 refactor: move common templating logic to `emqx_connector_aggreg_buffer_ctx` 2024-05-31 11:07:44 -03:00
Thales Macedo Garitezi c916c83c7c feat: implement azure blob storage action
Fixes https://emqx.atlassian.net/browse/EMQX-12280
2024-05-31 11:07:44 -03:00
Thales Macedo Garitezi 60d24c6ad5 refactor: simplify resource_opts definition 2024-05-31 11:07:44 -03:00
Thales Macedo Garitezi 729441d1ce refactor(connector aggregator): move shared schemas to app 2024-05-31 11:07:44 -03:00
Ilya Averyanov db713cb656 chore(bridges): increase log level for bridge buffer overflow reports
Co-authored-by: Andrew Mayorov <encube.ul@gmail.com>
2024-05-31 16:35:18 +03:00
zmstone b457c2a49c refactor: hide 'enable' config from doc when it's 'true' by default 2024-05-31 14:10:03 +02:00
Ivan Dyachkov b232784df2
Merge pull request #13165 from id/0531-sync-release-57
sync release 57
2024-05-31 09:15:50 +02:00
Ivan Dyachkov 29ad07ce29 Merge remote-tracking branch 'upstream/release-57' into 0531-sync-release-57 2024-05-31 07:32:26 +02:00
zmstone a4ec9d7cb0
Merge pull request #13133 from zmstone/0527-port-back-diverged-modules
chore: port diverged modules back to oss
2024-05-29 09:03:30 +02:00
zmstone 062ab31ecf Merge remote-tracking branch 'origin/release-57' into 0527-port-back-diverged-modules 2024-05-28 14:35:25 +02:00
zmstone 8f2ca71d8b test: fix test case to have deterministic base config 2024-05-28 10:57:20 +02:00
zmstone e4abd8fca6 Merge remote-tracking branch 'origin/release-57' into 0527-port-back-diverged-modules 2024-05-28 09:50:59 +02:00
zmstone 1148091f0b chore: restore test code 2024-05-27 23:05:21 +02:00
zmstone 96ef37d0ea chore: port diverged modules back to oss 2024-05-27 22:40:08 +02:00
zmstone 238c207b09 chore: bump app versions 2024-05-27 20:26:52 +02:00
zmstone 41cbfcfaa1 Merge remote-tracking branch 'origin/release-57' into 0527-sync-5.7.0-to-master 2024-05-27 20:10:44 +02:00
zhongwencool e5da4aa128
Merge pull request #13078 from zhongwencool/http-415
feat: return 415 when UNSUPPORTED_MEDIA_TYPE
2024-05-24 15:59:05 +08:00
zhongwencool 3aa1f86d9e chore: is_content_type_json to validate_content_type_json 2024-05-24 10:13:24 +08:00
zhongwencool c013366b27 chore: upgrade minirest to 1.4.1 to ignore 415 code check 2024-05-24 10:13:23 +08:00
zhongwencool 5c759941d5 feat: return 415 when UNSUPPORTED_MEDIA_TYPE 2024-05-24 10:13:23 +08:00
zmstone 63721bf1db
Merge pull request #13109 from ieQu1/dev/sync-release-57-240523
Sync release-57 240523
2024-05-23 21:54:12 +02:00
ieQu1 c952e46f08
Merge remote-tracking branch 'origin/release-57' 2024-05-23 21:04:03 +02:00
William Yang bb3f858db2
Merge pull request #13086 from qzhuyan/dev/william/quic-lb
feat: quic-lb support
2024-05-23 15:31:14 +02:00
Zaiming (Stone) Shi 147d7f5d11
Merge pull request #13098 from zmstone/0523-more-ct-chunks-for-emqx-app
ci: run apps/emqx tests in 8 groups
2024-05-23 14:04:03 +02:00
William Yang 94e59c8fa7 feat(quic-lb): fix test 2024-05-23 10:32:23 +02:00
William Yang d8eecfe64f feat(quic-lb): add configuration 2024-05-23 10:32:20 +02:00
William Yang 34a69625b5 feat: quic-lb support 2024-05-23 09:17:09 +02:00
zmstone 1d5f3d7a36 chore: handle ctl command return 'true' 2024-05-23 08:58:31 +02:00
zmstone b0832ecc74 test: fix a flaky one 2024-05-23 08:55:28 +02:00
zmstone 63c34a4c40 ci: run apps/emqx tests in 8 groups 2024-05-22 23:45:54 +02:00
Thales Macedo Garitezi 625fcd5821
Merge pull request #13096 from thalesmg/test-fix-broken-m-20240522
test: fix broken test
2024-05-22 17:56:36 -03:00
Thales Macedo Garitezi 20a767eee8 test: fix broken test 2024-05-22 16:19:47 -03:00
Zaiming (Stone) Shi aa86772f96
Merge pull request #13094 from zmstone/0522-sync-release-57-to-master
0522 sync release 57 to master
2024-05-22 21:13:31 +02:00
zmstone 1bc9600c58 Merge remote-tracking branch 'origin/release-57' 2024-05-22 18:28:23 +02:00
JimMoen 4ad0743f61
Merge pull request #13081 from JimMoen/fix-typo
chore: fix typos
2024-05-22 02:05:20 +08:00
SergeTupchiy 528d6b7ef1
Merge pull request #13082 from SergeTupchiy/routes-fixes
Minor routes fixes
2024-05-21 19:11:47 +03:00
ieQu1 72c0ada578
Merge pull request #13061 from ieQu1/dev/merge-release57-240516
Sync release-57 to master
2024-05-21 17:47:26 +02:00
Serge Tupchii 38e57e321f fix(ds_router): return correct type from lookup_routes/1, fix fun specs 2024-05-21 16:31:31 +03:00
Serge Tupchii 0db6b8dc54 fix(emqx_router): return correct type from lookup_routes/1 (v2 schema) 2024-05-21 16:28:59 +03:00
ieQu1 acb19a06cf Merge remote-tracking branch 'origin/release-57' 2024-05-21 15:04:46 +02:00
JimMoen bb3c66638c
chore: fix typos 2024-05-21 17:45:20 +08:00
JianBo He 4f341bef8b
Merge pull request #12920 from cisiqo/patch-6
fix: STOMP heartbeat
2024-05-21 16:57:58 +08:00
zhongwencool 02097b577c
Merge pull request #13064 from zhongwencool/influxdb-client-failed-msg
chore: improved InfluxDB connection error messaging
2024-05-19 16:20:13 +08:00
zhongwencool b4eb910cb1 chore: improved InfluxDB connection error messaging 2024-05-17 10:38:11 +08:00
Ilia Averianov 22d5f17de8
Merge pull request #13025 from savonarola/0513-fix-cursor-usage
Get rid of qlc in retainer
2024-05-16 17:06:53 +03:00
Ilya Averyanov 1a664c941b chore(retainer): scan table in batches, improve stream usage
Co-authored-by: Thales Macedo Garitezi <thalesmg@gmail.com>
Co-authored-by: Zaiming (Stone) Shi <zmstone@gmail.com>
2024-05-16 16:06:11 +03:00
Ilia Averianov 322989c83f
Merge pull request #13040 from savonarola/0513-fix-http-authn-error-handling
fix(auth_http): fix query encoding
2024-05-16 15:12:57 +03:00
Ilya Averyanov e1ce6377f3 chore(streams): add stream methods, optimize streams 2024-05-16 12:04:08 +03:00
Ilia Averianov 1f7e358e1e
Merge pull request #13006 from savonarola/0509-fix-retainer-banned
fix(banned): verify delayed/taken over/retained messages against clientid_re ban rules
2024-05-16 11:37:15 +03:00
Ilya Averyanov f2d26b9972 chore(auth_http): add changelog
Co-authored-by: Thales Macedo Garitezi <thalesmg@gmail.com>
2024-05-16 11:36:52 +03:00
zmstone 2acde5a4e4 fix(authn/http): log meaningful error message if http header is missing 2024-05-16 11:36:52 +03:00
zmstone 4fb484d4cf chore(emqx_schema): change atom array to enum array for alarm.actions 2024-05-16 11:36:52 +03:00
Ilya Averyanov daf2e5a444 chore(auth_http): unify http request generation
Co-authored-by: Thales Macedo Garitezi <thalesmg@gmail.com>
2024-05-16 11:36:52 +03:00
Kjell Winblad cf5b464777
Merge pull request #12937 from kjellwinblad/kjell/add_use_legacy_protocol_option_to_mongodb_auth/EMQX-12245
test(mongodb authn, authz): add test cases for use_legacy_protocol
2024-05-16 10:10:17 +02:00
Ilya Averyanov 552b62236c chore(retainer): move filter/foreach to emqx_utils_stream 2024-05-15 19:09:06 +03:00
Ilya Averyanov 0b39aaadbd chore(retainer): get rid of qlc usage 2024-05-15 19:09:06 +03:00
Thales Macedo Garitezi bde17158e9
Merge pull request #13051 from thalesmg/test-add-more-context-debug-m-20240514
test: attempt to stabilize a couple flaky tests
2024-05-15 10:29:27 -03:00
Ilya Averyanov 78a87ab5a6 fix(banned): verify delayed/taken over/retained messages against clientid_re rules 2024-05-15 14:05:38 +03:00
Thales Macedo Garitezi bcbeadd4af test: stabilize flaky test 2024-05-14 17:53:29 -03:00
Thales Macedo Garitezi f062517ccb test: stabilize flaky test 2024-05-14 17:53:29 -03:00
Thales Macedo Garitezi b0aa7f25aa test: add more context to help debug flaky test
Ex: https://github.com/emqx/emqx/actions/runs/9084293110/job/24967912514?pr=13050#step:6:18330
2024-05-14 17:53:29 -03:00
Thales Macedo Garitezi e67b078775
Merge pull request #13050 from thalesmg/ci-spellcheck-opts-m-20240514
ci: allow customizing number of jobs when running locally
2024-05-14 17:25:54 -03:00
Thales Macedo Garitezi 202e145db7
Merge pull request #13049 from thalesmg/sync-r57-m-20240514
sync `release-57` to `master`
2024-05-14 16:01:57 -03:00
Thales Macedo Garitezi db125d54dd ci: allow customizing number of jobs when running locally
Example:

```sh
ͳ scripts/spellcheck/spellcheck.sh -j 20 _build/docgen/emqx-enterprise/schema-en.json
Adding /dicts/emqx.txt
Starting LanguageTool server...
+ java -Xms256m -Xmx512m -cp languagetool-server.jar org.languagetool.server.HTTPServer --port 8010 --public --allow-origin '*' --config config.properties
Checking /schema.json with 20 processes...
Spellcheck OK
```
2024-05-14 15:20:16 -03:00
Thales Macedo Garitezi 2a0ac34656 Merge branch 'release-57' into sync-r57-m-20240514 2024-05-14 15:03:04 -03:00
lafirest dc391d2403
Merge pull request #13009 from lafirest/fix/rate_log
feat(limiter): lift the log level of rate limiter to `warning`
2024-05-14 21:11:26 +08:00
ieQu1 f663373c57
Merge pull request #13037 from ieQu1/dev/merge-release57-240513
Sync release-57 to the master
2024-05-14 10:50:30 +02:00
Zaiming (Stone) Shi 34bf291539
Merge pull request #13024 from zmstone/0513-deny-plush-hash-by-default-ACL
fix: deny subscribing to +/# by default ACL
2024-05-14 09:43:42 +02:00
firest 7a7bd84cba fix(limiter): improved log content 2024-05-14 15:34:13 +08:00
Ilya Averyanov bca3782d73 fix(auth_http): fix query encoding
* ignore authenticator if JSON format is set up for requests, but non-utf8 data is going to be sent
* use application/json format by default
* fix encoding of query part of the requests
2024-05-14 10:32:53 +03:00
ieQu1 25c6ac3d16
Merge remote-tracking branch 'origin/release-57' 2024-05-13 22:05:29 +02:00
zmstone 290ebe2fc5 fix: deny subscribing to +/# by default ACL
Prior to this change, EMQX default ACL has a deny rule to reject
subscribing to `#`.
For completeness, the default ACL should also deny `+/#` because
they are essentially equivalent.
2024-05-13 09:26:42 +02:00
Thales Macedo Garitezi c02701dfa1
Merge pull request #13015 from thalesmg/sync-r57-m-20240510
sync `release-57` to `master`
2024-05-10 12:18:02 -03:00
Thales Macedo Garitezi 6be4e6f631 Merge branch 'release-57' into sync-r57-m-20240510 2024-05-10 10:23:28 -03:00
firest bd7ccc3546 chore: update change 2024-05-10 18:48:51 +08:00
firest 1fefb421bc feat(limiter): lift the log level of rate limiter to `warning` 2024-05-10 18:47:55 +08:00
Ivan Dyachkov 900536712d
Merge pull request #12997 from thalesmg/sync-r57-m-20240508
sync `release-57` to `master`
2024-05-10 10:41:30 +02:00
lafirest 23a1b18459
Merge pull request #12983 from emqx/feat/authn_hook
feat(events): add new hook && event `client.check_authn_complete`
2024-05-09 22:36:52 +08:00
Thales Macedo Garitezi 401f0fa84b Merge branch 'release-57' into sync-r57-m-20240508 2024-05-09 09:13:30 -03:00
firest 2bc014db69 fix(events): call `client.check_authn_complete` even if authentication fails 2024-05-09 18:15:32 +08:00
firest d2ca4e9f11 chore: update change 2024-05-09 11:04:56 +08:00
zhongwencool 107d00adee
Merge pull request #12944 from zhongwencool/mqtt-utf8-strict-mode
fix: non-utf8 clientid connect event case crash if strict_mod=false
2024-05-09 09:16:10 +08:00
William Yang 6e5d04e9fa
Merge pull request #12977 from qzhuyan/port/william/5/partial-chain
chore: update doc for `partial_chain` and `verify_peer_ext_key_usage`
2024-05-07 16:42:10 +02:00
zhongwencool 78095ef9bc chore: changelog 2024-05-07 17:04:58 +08:00
zhongwencool a0bf06caba chore: add test case for non-utf8 topic 2024-05-07 17:02:45 +08:00
firest f641d0b2b7 feat(events): add new hook && event `client.check_authn_complete` 2024-05-07 16:24:31 +08:00
William Yang 28b17a2562 chore: fix nit for spellcheck 2024-05-06 21:02:33 +02:00
William Yang 01467246fc docs: Apply suggestions from code review
Co-authored-by: Zaiming (Stone) Shi <zmstone@gmail.com>
2024-05-06 17:08:55 +02:00
William Yang c3f8ba5762 chore: update doc for `partial_chain` and `verify_peer_ext_key_usage` 2024-05-06 16:03:52 +02:00
Ilia Averianov e19222fc0d
Merge pull request #12971 from savonarola/0502-fix-url-parse
fix(auth,http): improve URI handling
2024-05-06 13:41:10 +03:00
Ilya Averyanov 31026d51f7 chore(auth,http): cache REs for parsing URIs 2024-05-06 11:46:18 +03:00
JianBo He cabe2ae100 chore: fix dialyzer warning 2024-05-06 14:58:27 +08:00
JianBo He d2b6e41cd1 chore(stomp): parse \n as heartbeat frame 2024-05-06 14:39:41 +08:00
Ilya Averyanov 3b655f56cb fix(auth,http): improve URI handling 2024-05-04 09:47:13 +03:00
Zaiming (Stone) Shi e7f0c83406
Merge pull request #12970 from zmstone/0503-refactor-dashboard-listener
refactor: simplify https listener config for dashboard
2024-05-04 08:38:29 +02:00
Zaiming (Stone) Shi c5d8659f35
Merge pull request #12972 from zmstone/0503-unhide-default-dashboard-username-password
chore: unhide dashboard default_password config
2024-05-04 08:36:32 +02:00
zmstone 4d3156b5ed chore: unhide dashboard default_username and default_password config 2024-05-03 18:35:49 +02:00
Thales Macedo Garitezi 3818b75188
Merge pull request #12959 from thalesmg/kprodu-connector-hc-m-20240429
feat(kafka producer): add health check topic option
2024-05-03 12:48:48 -03:00
zmstone 3c5c76fcfc refactor: simplify https listener config for dashboard 2024-05-03 16:45:40 +02:00
Thales Macedo Garitezi 6f3da6b131 feat(kafka producer): add health check topic option
Fixes https://emqx.atlassian.net/browse/EMQX-12241

This allows more accurate health checking for Kafka Producers.  Without a topic, it's not
possible to actually probe the connection to partition leaders, so the connector might not
be reported as `disconnected` without testing a concrete topic.
2024-05-02 17:07:44 -03:00
Thales Macedo Garitezi 28cdce7464
Merge pull request #12964 from thalesmg/ci-cron-pkgs-20240502
ci(cron packages): use unique upload names for different branches
2024-05-02 16:19:11 -03:00
Thales Macedo Garitezi 5b39b9c11f ci(cron packages): use unique upload names for different branches 2024-05-02 15:31:57 -03:00
Thales Macedo Garitezi c71f73924b
Merge pull request #12961 from thalesmg/kconsu-custom-group-id-m-20240430
feat(kafka consumer): allow custom group id
2024-05-02 14:37:42 -03:00
Ivan Dyachkov 98dee03773
Merge pull request #12963 from id/0502-sync-release-57
sync release-57
2024-05-02 18:33:28 +02:00
Ivan Dyachkov cc577e636d Merge remote-tracking branch 'upstream/release-57' into 0502-sync-release-57 2024-05-02 17:06:18 +02:00
Thales Macedo Garitezi 905d04f1c3 docs: improve descriptions 2024-05-02 11:19:14 -03:00
Thales Macedo Garitezi eb113fa578 fix: add non-empty validator 2024-05-02 11:19:00 -03:00
William Yang 8f780ae8bc
Merge pull request #12955 from qzhuyan/port/william/5/partial-chain
port: TLS partial chain
2024-05-02 15:34:25 +02:00
Thales Macedo Garitezi 3942b371d7 feat(kafka consumer): allow custom group id
Fixes https://emqx.atlassian.net/browse/EMQX-12273
Fixes EMQX-12273

When consuming messages in Kafka in Alibaba Cloud, the group needs to be configured in
advance, and then the consumer can use the group to consume messages. Automatic group
creation is generally not allowed online.
2024-05-02 08:59:40 -03:00
William Yang 1a4a4bb3a5 chore: fix nit 2024-05-02 10:13:57 +02:00
Thales Macedo Garitezi 14ef0b1e51 feat(kafka consumer): allow custom group id
Fixes https://emqx.atlassian.net/browse/EMQX-12273
Fixes EMQX-12273

When consuming messages in Kafka in Alibaba Cloud, the group needs to be configured in
advance, and then the consumer can use the group to consume messages. Automatic group
creation is generally not allowed online.
2024-05-01 13:53:29 -03:00
Thales Macedo Garitezi 437e7968b1
Merge pull request #12960 from thalesmg/sync-r57-m-20240430
sync `release-57` to `master`
2024-05-01 13:52:16 -03:00
Thales Macedo Garitezi 42cb17360e Merge branch 'release-57' into sync-r57-m-20240430 2024-04-30 14:42:22 -03:00
William Yang fb30207ef3 chore: fix test 2024-04-30 16:41:46 +02:00
William Yang 337c230e79 feat(partial_chain): gateway support 2024-04-30 16:41:26 +02:00
William Yang 3a674f44f1 chore: lock mimerl 2024-04-30 10:27:02 +02:00
Kjell Winblad 7b638a5829 style: fix spelling mistake (thanks @savonarola) 2024-04-30 09:28:12 +02:00
William Yang 70ffd77f99 chore(TLS-chain-test): update for OTP 26 2024-04-30 09:05:51 +02:00
William Yang 03b0935564 chore: add changelog 2024-04-30 06:25:03 +02:00
William Yang 650cf4b27e test(partial_chain): update tcs for OTP-25 2024-04-30 06:25:03 +02:00
William Yang 43ad665dcf fix(test): tls_verify_partial_chain 2024-04-30 06:25:03 +02:00
William Yang a29a43e5fc fix(listener): remove partial_chain in wss opts 2024-04-30 06:25:03 +02:00
William Yang 4e9c1ec0c9 chore: happy elvis 2024-04-30 06:25:03 +02:00
William Yang 8eb463c58d feat(tls): update schema for TLS keyusage 2024-04-30 06:25:03 +02:00
William Yang 90430fa66d fix(tls): undefined keyusage 2024-04-30 06:25:03 +02:00
William Yang eb1ab9adfe test(tls): verify peer keyusage 2024-04-30 06:25:03 +02:00
William Yang 8bc3a86f63 feat(config): partial_chain 2024-04-30 06:25:03 +02:00
William Yang fa4357ce89 test: port listener tls partial_chain 2024-04-30 06:25:03 +02:00
William Yang 0b95a08d32 feat(tls): port partial_chain, part 1 2024-04-30 06:25:03 +02:00
zhongwencool ef9a63ae17 fix: non-utf8 clientid connect event case crash if strict_mod=false 2024-04-29 14:43:29 +08:00
Ilia Averianov 002dc8541b
Merge pull request #12936 from savonarola/0426-improve-authz-rule-typespecs
chore(authz): improve and clarify types
2024-04-26 19:43:41 +03:00
Kjell Winblad db48a977bb test(mongodb authn, authz): add test cases for use_legacy_protocol
This commit adds test cases that check that the authn and authz modules
for MongoDB support the use_legacy_protocol configuration option.

Fixes:
https://emqx.atlassian.net/browse/EMQX-12245
2024-04-26 12:41:50 +02:00
Ilya Averyanov aaf57ecfbc chore(authz): improve and clarify types 2024-04-26 12:09:18 +03:00
Ilia Averianov a0e0a27f87
Merge pull request #12418 from savonarola/0129-fix-auth-claim-schema
feat(jwt_auth): improve verify_claims handling and docs
2024-04-25 19:05:13 +03:00
chengshq 75dab4dff0 fix: STOMP heartbeat 2024-04-25 23:33:15 +08:00
Ilya Averyanov 407b0cd0ca feat(jwt_auth): improve verify_claims handling and docs 2024-04-25 17:49:29 +03:00
Ilia Averianov c42583550d
Merge pull request #12514 from savonarola/0214-fix-ft-responses
fix(ft): report ft assemble status from a dedicated process
2024-04-25 15:10:26 +03:00
Ilya Averyanov 05f544495e fix(ft): report ft assemble status from a dedicated process
Previously, the status was monitored by the channel itself.
If channel disconnected before the assemble was completed,
the status message was lost (not sent to the resonse topic)
2024-04-24 18:16:28 +03:00
687 changed files with 34046 additions and 6463 deletions

View File

@ -0,0 +1,24 @@
version: '3.9'
services:
azurite:
container_name: azurite
image: mcr.microsoft.com/azure-storage/azurite:3.30.0
restart: always
expose:
- "10000"
# ports:
# - "10000:10000"
networks:
- emqx_bridge
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:10000"]
interval: 30s
timeout: 5s
retries: 4
command:
- azurite-blob
- "--blobHost"
- 0.0.0.0
- "-d"
- debug.log

View File

@ -0,0 +1,30 @@
version: '3.9'
services:
couchbase:
container_name: couchbase
hostname: couchbase
image: ghcr.io/emqx/couchbase:1.0.0
restart: always
expose:
- 8091-8093
# ports:
# - "8091-8093:8091-8093"
networks:
- emqx_bridge
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8093/admin/ping"]
interval: 30s
timeout: 5s
retries: 4
environment:
- CLUSTER=localhost
- USER=admin
- PASS=public
- PORT=8091
- RAMSIZEMB=2048
- RAMSIZEINDEXMB=512
- RAMSIZEFTSMB=512
- BUCKETS=mqtt
- BUCKETSIZES=100
- AUTOREBALANCE=true

View File

@ -10,7 +10,7 @@ services:
nofile: 1024
image: openldap
#ports:
# - 389:389
# - "389:389"
volumes:
- ./certs/ca.crt:/etc/certs/ca.crt
restart: always

View File

@ -215,5 +215,17 @@
"listen": "0.0.0.0:9200",
"upstream": "elasticsearch:9200",
"enabled": true
},
{
"name": "azurite_plain",
"listen": "0.0.0.0:10000",
"upstream": "azurite:10000",
"enabled": true
},
{
"name": "couchbase",
"listen": "0.0.0.0:8093",
"upstream": "couchbase:8093",
"enabled": true
}
]

View File

@ -1,12 +1,12 @@
%% -*- mode: erlang -*-
{application, http_server,
[{description, "An HTTP server application"},
{application, http_server, [
{description, "An HTTP server application"},
{vsn, "0.2.0"},
{registered, []},
% {mod, {http_server_app, []}},
{modules, []},
{applications,
[kernel,
{applications, [
kernel,
stdlib,
minirest
]},

View File

@ -51,7 +51,7 @@ runs:
echo "SELF_HOSTED=false" >> $GITHUB_OUTPUT
;;
esac
- uses: actions/cache@ab5e6d0c87105b4c9c2047343972218f562e4319 # v4.0.1
- uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
id: cache
if: steps.prepare.outputs.SELF_HOSTED != 'true'
with:

1
.github/workflows/.zipignore2 vendored Normal file
View File

@ -0,0 +1 @@
*/.github/*

View File

@ -27,7 +27,7 @@ jobs:
ELIXIR_VSN: ${{ steps.env.outputs.ELIXIR_VSN }}
BUILDER: ${{ steps.env.outputs.BUILDER }}
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
ref: ${{ github.event.inputs.ref }}
- name: Set up environment
@ -52,7 +52,7 @@ jobs:
contents: read
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
ref: ${{ github.event.inputs.ref }}
fetch-depth: 0
@ -136,7 +136,7 @@ jobs:
contents: read
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
fetch-depth: 0
- name: Work around https://github.com/actions/checkout/issues/766
@ -152,7 +152,7 @@ jobs:
echo "PROFILE=${PROFILE}" | tee -a .env
echo "PKG_VSN=$(./pkg-vsn.sh ${PROFILE})" | tee -a .env
zip -ryq -x@.github/workflows/.zipignore $PROFILE.zip .
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
with:
name: ${{ matrix.profile }}
path: ${{ matrix.profile }}.zip

View File

@ -35,7 +35,7 @@ jobs:
BUILD_FROM: ${{ steps.env.outputs.BUILD_FROM }}
RUN_FROM: ${{ steps.env.outputs.BUILD_FROM }}
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
ref: ${{ github.event.inputs.ref }}
- name: Set up environment
@ -65,7 +65,7 @@ jobs:
contents: read
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
ref: ${{ github.event.inputs.ref }}
fetch-depth: 0
@ -147,7 +147,7 @@ jobs:
contents: read
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
ref: ${{ github.event.inputs.ref }}
fetch-depth: 0
@ -163,7 +163,7 @@ jobs:
echo "PROFILE=${PROFILE}" | tee -a .env
echo "PKG_VSN=$(./pkg-vsn.sh ${PROFILE})" | tee -a .env
zip -ryq -x@.github/workflows/.zipignore $PROFILE.zip .
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
with:
name: ${{ matrix.profile }}
path: ${{ matrix.profile }}.zip

View File

@ -75,7 +75,7 @@ jobs:
- arm64
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
ref: ${{ github.event.inputs.ref }}
- run: git config --global --add safe.directory "$PWD"
@ -83,7 +83,7 @@ jobs:
id: build
run: |
make ${{ matrix.profile }}-tgz
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
with:
name: "${{ matrix.profile }}-${{ matrix.arch }}.tar.gz"
path: "_packages/emqx*/emqx-*.tar.gz"
@ -107,10 +107,10 @@ jobs:
- ["${{ inputs.profile }}-elixir", "${{ inputs.profile == 'emqx' && 'docker.io,public.ecr.aws' || 'docker.io' }}"]
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
ref: ${{ github.event.inputs.ref }}
- uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
pattern: "${{ matrix.profile[0] }}-*.tar.gz"
path: _packages
@ -122,24 +122,25 @@ jobs:
run: |
ls -lR _packages/$PROFILE
mv _packages/$PROFILE/*.tar.gz ./
- name: Enable containerd image store on Docker Engine
run: |
echo "$(jq '. += {"features": {"containerd-snapshotter": true}}' /etc/docker/daemon.json)" > daemon.json
echo "$(sudo cat /etc/docker/daemon.json | jq '. += {"features": {"containerd-snapshotter": true}}')" > daemon.json
sudo mv daemon.json /etc/docker/daemon.json
sudo systemctl restart docker
- uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0
- uses: docker/setup-buildx-action@2b51285047da1547ffb1b2203d8be4c0af6b1f20 # v3.2.0
- uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0
- uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1
- name: Login to hub.docker.com
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
if: inputs.publish && contains(matrix.profile[1], 'docker.io')
with:
username: ${{ secrets.DOCKER_HUB_USER }}
password: ${{ secrets.DOCKER_HUB_TOKEN }}
- name: Login to AWS ECR
uses: docker/login-action@e92390c5fb421da1463c202d546fed0ec5c39f20 # v3.1.0
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
if: inputs.publish && contains(matrix.profile[1], 'public.ecr.aws')
with:
registry: public.ecr.aws

View File

@ -26,7 +26,7 @@ jobs:
- emqx-enterprise-elixir
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Set up environment
id: env
run: |
@ -51,7 +51,7 @@ jobs:
if: always()
run: |
docker save $_EMQX_DOCKER_IMAGE_TAG | gzip > $EMQX_NAME-docker-$PKG_VSN.tar.gz
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
with:
name: "${{ env.EMQX_NAME }}-docker"
path: "${{ env.EMQX_NAME }}-docker-${{ env.PKG_VSN }}.tar.gz"

View File

@ -82,7 +82,7 @@ jobs:
- ${{ inputs.otp_vsn }}
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
ref: ${{ github.event.inputs.ref }}
fetch-depth: 0
@ -95,7 +95,7 @@ jobs:
apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }}
apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }}
apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }}
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: success()
with:
name: ${{ matrix.profile }}-${{ matrix.os }}-${{ matrix.otp }}
@ -113,13 +113,11 @@ jobs:
- ubuntu24.04
- ubuntu22.04
- ubuntu20.04
- ubuntu18.04
- debian12
- debian11
- debian10
- el9
- el8
- el7
- amzn2
- amzn2023
arch:
@ -147,7 +145,7 @@ jobs:
shell: bash
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
ref: ${{ github.event.inputs.ref }}
fetch-depth: 0
@ -182,7 +180,7 @@ jobs:
--builder $BUILDER \
--elixir $IS_ELIXIR \
--pkgtype pkg
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
with:
name: ${{ matrix.profile }}-${{ matrix.os }}-${{ matrix.arch }}${{ matrix.with_elixir == 'yes' && '-elixir' || '' }}-${{ matrix.builder }}-${{ matrix.otp }}-${{ matrix.elixir }}
path: _packages/${{ matrix.profile }}/
@ -200,7 +198,7 @@ jobs:
profile:
- ${{ inputs.profile }}
steps:
- uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
pattern: "${{ matrix.profile }}-*"
path: packages/${{ matrix.profile }}

View File

@ -23,6 +23,7 @@ jobs:
profile:
- ['emqx', 'master']
- ['emqx', 'release-57']
- ['emqx', 'release-58']
os:
- ubuntu22.04
- amzn2023
@ -37,7 +38,7 @@ jobs:
shell: bash
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
ref: ${{ matrix.profile[1] }}
fetch-depth: 0
@ -53,14 +54,14 @@ jobs:
- name: build pkg
run: |
./scripts/buildx.sh --profile "$PROFILE" --pkgtype pkg --builder "$BUILDER"
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: success()
with:
name: ${{ matrix.profile[0] }}-${{ matrix.os }}-${{ github.ref_name }}
name: ${{ matrix.profile[0] }}-${{ matrix.profile[1] }}-${{ matrix.os }}
path: _packages/${{ matrix.profile[0] }}/
retention-days: 7
- name: Send notification to Slack
uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0
uses: slackapi/slack-github-action@70cd7be8e40a46e8b0eced40b0de447bdb42f68e # v1.26.0
if: failure()
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
@ -83,7 +84,7 @@ jobs:
- macos-14-arm64
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
ref: ${{ matrix.branch }}
fetch-depth: 0
@ -101,14 +102,14 @@ jobs:
apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }}
apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }}
apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }}
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: success()
with:
name: ${{ matrix.profile }}-${{ matrix.os }}
path: _packages/${{ matrix.profile }}/
retention-days: 7
- name: Send notification to Slack
uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0
uses: slackapi/slack-github-action@70cd7be8e40a46e8b0eced40b0de447bdb42f68e # v1.26.0
if: failure()
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}

View File

@ -32,7 +32,7 @@ jobs:
- ["emqx-enterprise", "erlang", "x64"]
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
fetch-depth: 0
- name: build tgz
@ -41,13 +41,13 @@ jobs:
- name: build pkg
run: |
./scripts/buildx.sh --profile $PROFILE --pkgtype pkg --elixir $ELIXIR --arch $ARCH
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
with:
name: "${{ matrix.profile[0] }}-${{ matrix.profile[1] }}-${{ matrix.profile[2] }}"
path: _packages/${{ matrix.profile[0] }}/*
retention-days: 7
compression-level: 0
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
with:
name: "${{ matrix.profile[0] }}-schema-dump-${{ matrix.profile[1] }}-${{ matrix.profile[2] }}"
path: |
@ -69,7 +69,7 @@ jobs:
EMQX_NAME: ${{ matrix.profile }}
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Set up environment
id: env
run: |
@ -84,7 +84,7 @@ jobs:
apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }}
apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }}
apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }}
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
with:
name: ${{ matrix.os }}
path: _packages/**/*

View File

@ -22,7 +22,7 @@ jobs:
profile:
- emqx-enterprise
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- run: git config --global --add safe.directory "$GITHUB_WORKSPACE"
- run: make ensure-rebar3
- run: ./scripts/check-deps-integrity.escript
@ -37,7 +37,7 @@ jobs:
- run: ./scripts/check-elixir-deps-discrepancies.exs
- run: ./scripts/check-elixir-applications.exs
- name: Upload produced lock files
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: failure()
with:
name: ${{ matrix.profile }}_produced_lock_files

View File

@ -24,13 +24,14 @@ jobs:
branch:
- master
- release-57
- release-58
language:
- cpp
- python
steps:
- name: Checkout repository
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
ref: ${{ matrix.branch }}

View File

@ -7,9 +7,6 @@ on:
# run hourly
- cron: "0 * * * *"
workflow_dispatch:
inputs:
ref:
required: false
permissions:
contents: read
@ -17,14 +14,21 @@ permissions:
jobs:
rerun-failed-jobs:
if: github.repository_owner == 'emqx'
runs-on: ubuntu-22.04
runs-on: ubuntu-latest
permissions:
checks: read
actions: write
strategy:
fail-fast: false
matrix:
ref:
- master
- release-57
- release-58
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
ref: ${{ github.event.inputs.ref || 'master' }}
ref: ${{ matrix.ref }}
- name: run script
shell: bash

View File

@ -32,7 +32,7 @@ jobs:
PACKAGE_FILE: ${{ steps.package_file.outputs.PACKAGE_FILE }}
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
fetch-depth: 0
ref: ${{ github.event.inputs.ref }}
@ -52,7 +52,7 @@ jobs:
id: package_file
run: |
echo "PACKAGE_FILE=$(find _packages/emqx -name 'emqx-*.deb' | head -n 1 | xargs basename)" >> $GITHUB_OUTPUT
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
with:
name: emqx-ubuntu20.04
path: _packages/emqx/${{ steps.package_file.outputs.PACKAGE_FILE }}
@ -72,17 +72,17 @@ jobs:
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }}
aws-region: eu-west-1
- name: Checkout tf-emqx-performance-test
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
repository: emqx/tf-emqx-performance-test
path: tf-emqx-performance-test
ref: v0.2.3
- uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: emqx-ubuntu20.04
path: tf-emqx-performance-test/
- name: Setup Terraform
uses: hashicorp/setup-terraform@a1502cd9e758c50496cc9ac5308c4843bcd56d36 # v3.0.0
uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 # v3.1.1
with:
terraform_wrapper: false
- name: run scenario
@ -105,7 +105,7 @@ jobs:
terraform destroy -auto-approve
aws s3 sync --exclude '*' --include '*.tar.gz' s3://$TF_VAR_s3_bucket_name/$TF_VAR_bench_id .
- name: Send notification to Slack
uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0
uses: slackapi/slack-github-action@70cd7be8e40a46e8b0eced40b0de447bdb42f68e # v1.26.0
with:
payload-file-path: "./tf-emqx-performance-test/slack-payload.json"
- name: terraform destroy
@ -113,13 +113,13 @@ jobs:
working-directory: ./tf-emqx-performance-test
run: |
terraform destroy -auto-approve
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: success()
with:
name: metrics
path: |
"./tf-emqx-performance-test/*.tar.gz"
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: failure()
with:
name: terraform
@ -143,17 +143,17 @@ jobs:
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }}
aws-region: eu-west-1
- name: Checkout tf-emqx-performance-test
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
repository: emqx/tf-emqx-performance-test
path: tf-emqx-performance-test
ref: v0.2.3
- uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: emqx-ubuntu20.04
path: tf-emqx-performance-test/
- name: Setup Terraform
uses: hashicorp/setup-terraform@a1502cd9e758c50496cc9ac5308c4843bcd56d36 # v3.0.0
uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 # v3.1.1
with:
terraform_wrapper: false
- name: run scenario
@ -176,7 +176,7 @@ jobs:
terraform destroy -auto-approve
aws s3 sync --exclude '*' --include '*.tar.gz' s3://$TF_VAR_s3_bucket_name/$TF_VAR_bench_id .
- name: Send notification to Slack
uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0
uses: slackapi/slack-github-action@70cd7be8e40a46e8b0eced40b0de447bdb42f68e # v1.26.0
with:
payload-file-path: "./tf-emqx-performance-test/slack-payload.json"
- name: terraform destroy
@ -184,13 +184,13 @@ jobs:
working-directory: ./tf-emqx-performance-test
run: |
terraform destroy -auto-approve
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: success()
with:
name: metrics
path: |
"./tf-emqx-performance-test/*.tar.gz"
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: failure()
with:
name: terraform
@ -215,17 +215,17 @@ jobs:
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }}
aws-region: eu-west-1
- name: Checkout tf-emqx-performance-test
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
repository: emqx/tf-emqx-performance-test
path: tf-emqx-performance-test
ref: v0.2.3
- uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: emqx-ubuntu20.04
path: tf-emqx-performance-test/
- name: Setup Terraform
uses: hashicorp/setup-terraform@a1502cd9e758c50496cc9ac5308c4843bcd56d36 # v3.0.0
uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 # v3.1.1
with:
terraform_wrapper: false
- name: run scenario
@ -249,7 +249,7 @@ jobs:
terraform destroy -auto-approve
aws s3 sync --exclude '*' --include '*.tar.gz' s3://$TF_VAR_s3_bucket_name/$TF_VAR_bench_id .
- name: Send notification to Slack
uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0
uses: slackapi/slack-github-action@70cd7be8e40a46e8b0eced40b0de447bdb42f68e # v1.26.0
with:
payload-file-path: "./tf-emqx-performance-test/slack-payload.json"
- name: terraform destroy
@ -257,13 +257,13 @@ jobs:
working-directory: ./tf-emqx-performance-test
run: |
terraform destroy -auto-approve
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: success()
with:
name: metrics
path: |
"./tf-emqx-performance-test/*.tar.gz"
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: failure()
with:
name: terraform
@ -289,17 +289,17 @@ jobs:
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }}
aws-region: eu-west-1
- name: Checkout tf-emqx-performance-test
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
repository: emqx/tf-emqx-performance-test
path: tf-emqx-performance-test
ref: v0.2.3
- uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: emqx-ubuntu20.04
path: tf-emqx-performance-test/
- name: Setup Terraform
uses: hashicorp/setup-terraform@a1502cd9e758c50496cc9ac5308c4843bcd56d36 # v3.0.0
uses: hashicorp/setup-terraform@651471c36a6092792c552e8b1bef71e592b462d8 # v3.1.1
with:
terraform_wrapper: false
- name: run scenario
@ -322,7 +322,7 @@ jobs:
terraform destroy -auto-approve
aws s3 sync --exclude '*' --include '*.tar.gz' s3://$TF_VAR_s3_bucket_name/$TF_VAR_bench_id .
- name: Send notification to Slack
uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 # v1.25.0
uses: slackapi/slack-github-action@70cd7be8e40a46e8b0eced40b0de447bdb42f68e # v1.26.0
with:
payload-file-path: "./tf-emqx-performance-test/slack-payload.json"
- name: terraform destroy
@ -330,13 +330,13 @@ jobs:
working-directory: ./tf-emqx-performance-test
run: |
terraform destroy -auto-approve
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: success()
with:
name: metrics
path: |
"./tf-emqx-performance-test/*.tar.gz"
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: failure()
with:
name: terraform

View File

@ -36,7 +36,7 @@ jobs:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ secrets.AWS_DEFAULT_REGION }}
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
ref: ${{ github.event.inputs.tag }}
- name: Detect profile
@ -106,16 +106,12 @@ jobs:
push "debian/bullseye" "packages/$PROFILE-$VERSION-debian11-arm64.deb"
push "debian/bookworm" "packages/$PROFILE-$VERSION-debian12-amd64.deb"
push "debian/bookworm" "packages/$PROFILE-$VERSION-debian12-arm64.deb"
push "ubuntu/bionic" "packages/$PROFILE-$VERSION-ubuntu18.04-amd64.deb"
push "ubuntu/bionic" "packages/$PROFILE-$VERSION-ubuntu18.04-arm64.deb"
push "ubuntu/focal" "packages/$PROFILE-$VERSION-ubuntu20.04-amd64.deb"
push "ubuntu/focal" "packages/$PROFILE-$VERSION-ubuntu20.04-arm64.deb"
push "ubuntu/jammy" "packages/$PROFILE-$VERSION-ubuntu22.04-amd64.deb"
push "ubuntu/jammy" "packages/$PROFILE-$VERSION-ubuntu22.04-arm64.deb"
push "ubuntu/noble" "packages/$PROFILE-$VERSION-ubuntu24.04-amd64.deb"
push "ubuntu/noble" "packages/$PROFILE-$VERSION-ubuntu24.04-arm64.deb"
push "el/7" "packages/$PROFILE-$VERSION-el7-amd64.rpm"
push "el/7" "packages/$PROFILE-$VERSION-el7-arm64.rpm"
push "el/8" "packages/$PROFILE-$VERSION-el8-amd64.rpm"
push "el/8" "packages/$PROFILE-$VERSION-el8-arm64.rpm"
push "el/9" "packages/$PROFILE-$VERSION-el9-amd64.rpm"
@ -135,7 +131,7 @@ jobs:
checks: write
actions: write
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: trigger re-run of app versions check on open PRs
shell: bash
env:

View File

@ -25,7 +25,7 @@ jobs:
- emqx
- emqx-enterprise
steps:
- uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: ${{ matrix.profile }}
- name: extract artifact
@ -39,10 +39,10 @@ jobs:
- name: print erlang log
if: failure()
run: |
cat _build/${{ matrix.profile }}/rel/emqx/logs/erlang.log.*
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
cat _build/${{ matrix.profile }}/rel/emqx/log/erlang.log.*
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: failure()
with:
name: conftest-logs-${{ matrix.profile }}
path: _build/${{ matrix.profile }}/rel/emqx/logs
path: _build/${{ matrix.profile }}/rel/emqx/log
retention-days: 7

View File

@ -28,14 +28,14 @@ jobs:
EMQX_IMAGE_OLD_VERSION_TAG: ${{ matrix.profile[1] }}
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Set up environment
id: env
run: |
source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh "$EMQX_NAME")
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: ${{ env.EMQX_NAME }}-docker
path: /tmp
@ -69,7 +69,6 @@ jobs:
shell: bash
env:
EMQX_NAME: ${{ matrix.profile }}
_EMQX_TEST_DB_BACKEND: ${{ matrix.cluster_db_backend }}
strategy:
fail-fast: false
@ -78,18 +77,20 @@ jobs:
- emqx
- emqx-enterprise
- emqx-elixir
cluster_db_backend:
- mnesia
- rlog
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Set up environment
id: env
run: |
source env.sh
if [ "$EMQX_NAME" = "emqx-enterprise" ]; then
_EMQX_TEST_DB_BACKEND='rlog'
else
_EMQX_TEST_DB_BACKEND='mnesia'
fi
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh "$EMQX_NAME")
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: ${{ env.EMQX_NAME }}-docker
path: /tmp

View File

@ -37,7 +37,7 @@ jobs:
matrix: ${{ steps.matrix.outputs.matrix }}
skip: ${{ steps.matrix.outputs.skip }}
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
fetch-depth: 0
- name: prepare test matrix
@ -72,7 +72,7 @@ jobs:
run:
shell: bash
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
fetch-depth: 0
- name: run
@ -95,7 +95,7 @@ jobs:
echo "Suites: $SUITES"
./rebar3 as standalone_test ct --name 'test@127.0.0.1' -v --readable=true --suite="$SUITES"
fi
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: failure()
with:
name: logs-emqx-app-tests-${{ matrix.type }}

View File

@ -34,7 +34,7 @@ jobs:
- ssl1.3
- ssl1.2
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
path: source
- name: Set up environment
@ -44,7 +44,7 @@ jobs:
source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh "$EMQX_NAME")
echo "EMQX_TAG=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: "${{ env.EMQX_NAME }}-docker"
path: /tmp
@ -164,7 +164,7 @@ jobs:
fi
sleep 1;
done
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
repository: emqx/paho.mqtt.testing
ref: develop-5.0

View File

@ -12,7 +12,7 @@ jobs:
steps:
- name: Cache Jmeter
id: cache-jmeter
uses: actions/cache@ab5e6d0c87105b4c9c2047343972218f562e4319 # v4.0.1
uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
with:
path: /tmp/apache-jmeter.tgz
key: apache-jmeter-5.4.3.tgz
@ -31,7 +31,7 @@ jobs:
else
wget --no-verbose --no-check-certificate -O /tmp/apache-jmeter.tgz $ARCHIVE_URL
fi
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
with:
name: apache-jmeter.tgz
path: /tmp/apache-jmeter.tgz
@ -51,14 +51,14 @@ jobs:
needs: jmeter_artifact
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Set up environment
id: env
run: |
source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh emqx)
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: emqx-docker
path: /tmp
@ -95,7 +95,7 @@ jobs:
echo "check logs failed"
exit 1
fi
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: always()
with:
name: jmeter_logs-advanced_feat-${{ matrix.scripts_type }}
@ -120,14 +120,14 @@ jobs:
needs: jmeter_artifact
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Set up environment
id: env
run: |
source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh emqx)
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: emqx-docker
path: /tmp
@ -175,7 +175,7 @@ jobs:
if: failure()
run: |
docker compose -f .ci/docker-compose-file/docker-compose-emqx-cluster.yaml logs --no-color > ./jmeter_logs/emqx.log
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: always()
with:
name: jmeter_logs-pgsql_authn_authz-${{ matrix.scripts_type }}_${{ matrix.pgsql_tag }}
@ -197,14 +197,14 @@ jobs:
needs: jmeter_artifact
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Set up environment
id: env
run: |
source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh emqx)
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: emqx-docker
path: /tmp
@ -248,7 +248,7 @@ jobs:
echo "check logs failed"
exit 1
fi
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: always()
with:
name: jmeter_logs-mysql_authn_authz-${{ matrix.scripts_type }}_${{ matrix.mysql_tag }}
@ -266,14 +266,14 @@ jobs:
needs: jmeter_artifact
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Set up environment
id: env
run: |
source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh emqx)
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: emqx-docker
path: /tmp
@ -313,7 +313,7 @@ jobs:
echo "check logs failed"
exit 1
fi
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: always()
with:
name: jmeter_logs-JWT_authn-${{ matrix.scripts_type }}
@ -332,14 +332,14 @@ jobs:
needs: jmeter_artifact
steps:
- uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4.1.6
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Set up environment
id: env
run: |
source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh emqx)
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: emqx-docker
path: /tmp
@ -370,7 +370,7 @@ jobs:
echo "check logs failed"
exit 1
fi
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: always()
with:
name: jmeter_logs-built_in_database_authn_authz-${{ matrix.scripts_type }}

View File

@ -25,7 +25,7 @@ jobs:
run:
shell: bash
steps:
- uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: emqx-enterprise
- name: extract artifact
@ -45,7 +45,7 @@ jobs:
run: |
export PROFILE='emqx-enterprise'
make emqx-enterprise-tgz
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
name: Upload built emqx and test scenario
with:
name: relup_tests_emqx_built
@ -72,10 +72,10 @@ jobs:
run:
shell: bash
steps:
- uses: erlef/setup-beam@2f0cc07b4b9bea248ae098aba9e1a8a1de5ec24c # v1.17.5
- uses: erlef/setup-beam@b9c58b0450cd832ccdb3c17cc156a47065d2114f # v1.18.1
with:
otp-version: 26.2.5
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
repository: hawk/lux
ref: lux-2.8.1
@ -88,7 +88,7 @@ jobs:
./configure
make
echo "$(pwd)/bin" >> $GITHUB_PATH
- uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
name: Download built emqx and test scenario
with:
name: relup_tests_emqx_built
@ -111,7 +111,7 @@ jobs:
docker logs node2.emqx.io | tee lux_logs/emqx2.log
exit 1
fi
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
name: Save debug data
if: failure()
with:

View File

@ -46,7 +46,7 @@ jobs:
contents: read
steps:
- uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: ${{ matrix.profile }}
@ -90,7 +90,7 @@ jobs:
contents: read
steps:
- uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: ${{ matrix.profile }}
- name: extract artifact
@ -133,7 +133,7 @@ jobs:
if: failure()
run: tar -czf logs.tar.gz _build/test/logs
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: failure()
with:
name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-sg${{ matrix.suitegroup }}
@ -164,7 +164,7 @@ jobs:
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-sg${{ matrix.suitegroup }}
steps:
- uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: ${{ matrix.profile }}
- name: extract artifact
@ -193,7 +193,7 @@ jobs:
if: failure()
run: tar -czf logs.tar.gz _build/test/logs
- uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: failure()
with:
name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-sg${{ matrix.suitegroup }}
@ -216,7 +216,7 @@ jobs:
steps:
- name: Coveralls finished
if: github.repository == 'emqx/emqx'
uses: coverallsapp/github-action@3dfc5567390f6fa9267c0ee9c251e4c8c3f18949 # v2.2.3
uses: coverallsapp/github-action@643bc377ffa44ace6394b2b5d0d3950076de9f63 # v2.3.0
with:
parallel-finished: true
git-branch: ${{ github.ref }}

View File

@ -25,12 +25,12 @@ jobs:
steps:
- name: "Checkout code"
uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
persist-credentials: false
- name: "Run analysis"
uses: ossf/scorecard-action@0864cf19026789058feabb7e87baa5f140aac736 # v2.3.1
uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # v2.4.0
with:
results_file: results.sarif
results_format: sarif
@ -40,7 +40,7 @@ jobs:
publish_results: true
- name: "Upload artifact"
uses: actions/upload-artifact@5d5d22a31266ced268874388b861e4b58bb5c2f3 # v4.3.1
uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
with:
name: SARIF file
path: results.sarif

View File

@ -19,7 +19,7 @@ jobs:
- emqx-enterprise
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
steps:
- uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
pattern: "${{ matrix.profile }}-schema-dump-*-x64"
merge-multiple: true

View File

@ -30,14 +30,14 @@ jobs:
include: ${{ fromJson(inputs.ct-matrix) }}
container: "${{ inputs.builder }}"
steps:
- uses: actions/download-artifact@c850b930e6ba138125429b7e5c93fc707a7f8427 # v4.1.4
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: ${{ matrix.profile }}
- name: extract artifact
run: |
unzip -o -q ${{ matrix.profile }}.zip
git config --global --add safe.directory "$GITHUB_WORKSPACE"
- uses: actions/cache@ab5e6d0c87105b4c9c2047343972218f562e4319 # v4.0.1
- uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
with:
path: "emqx_dialyzer_${{ matrix.profile }}_plt"
key: rebar3-dialyzer-plt-${{ matrix.profile }}-${{ hashFiles('rebar.*', 'apps/*/rebar.*') }}

View File

@ -0,0 +1,88 @@
name: Sync release branch
concurrency:
group: sync-release-branch-${{ github.event_name }}-${{ github.ref }}
cancel-in-progress: true
on:
schedule:
- cron: '0 2 * * *'
workflow_dispatch:
permissions:
contents: read
jobs:
create-pr:
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
strategy:
fail-fast: false
matrix:
branch:
- release-57
env:
SYNC_BRANCH: ${{ matrix.branch }}
defaults:
run:
shell: bash
permissions:
contents: write
pull-requests: write
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
fetch-depth: 0
- name: create new branch
run: |
set -euxo pipefail
NEW_BRANCH_NAME=sync-${SYNC_BRANCH}-$(date +"%Y%m%d-%H%M%S")
echo "NEW_BRANCH_NAME=${NEW_BRANCH_NAME}" >> $GITHUB_ENV
git config --global user.name "${GITHUB_ACTOR}"
git config --global user.email "${GITHUB_ACTOR}@users.noreply.github.com"
git checkout -b ${NEW_BRANCH_NAME}
git merge origin/${SYNC_BRANCH} 2>&1 | tee merge.log
git push origin ${NEW_BRANCH_NAME}:${NEW_BRANCH_NAME}
- name: create pull request
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
set -euxo pipefail
for pr in $(gh pr list --state open --base master --label sync-release-branch --search "Sync ${SYNC_BRANCH} in:title" --repo ${{ github.repository }} --json number --jq '.[] | .number'); do
gh pr close $pr --repo ${{ github.repository }} --delete-branch || true
done
gh pr create --title "Sync ${SYNC_BRANCH}" --body "Sync ${SYNC_BRANCH}" --base master --head ${NEW_BRANCH_NAME} --label sync-release-branch --repo ${{ github.repository }}
- name: Send notification to Slack
if: failure()
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
run: |
awk '{printf "%s\\n", $0}' merge.log > merge.log.1
cat <<EOF > payload.json
{
"blocks": [
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "Automatic sync of ${SYNC_BRANCH} branch failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
}
},
{
"type": "section",
"text": {
"type": "mrkdwn",
"text": "\`\`\`$(cat merge.log.1)\`\`\`"
}
}
]
}
EOF
curl -X POST -H 'Content-type: application/json' --data @payload.json "$SLACK_WEBHOOK_URL"

View File

@ -23,7 +23,7 @@ jobs:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws-region: ${{ secrets.AWS_DEFAULT_REGION }}
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
ref: ${{ github.event.inputs.tag }}
- name: Detect profile

View File

@ -10,8 +10,8 @@ include env.sh
# Dashboard version
# from https://github.com/emqx/emqx-dashboard5
export EMQX_DASHBOARD_VERSION ?= v1.9.1
export EMQX_EE_DASHBOARD_VERSION ?= e1.7.1
export EMQX_DASHBOARD_VERSION ?= v1.10.0-beta.1
export EMQX_EE_DASHBOARD_VERSION ?= e1.8.0-beta.1
export EMQX_RELUP ?= true
export EMQX_REL_FORM ?= tgz
@ -28,6 +28,8 @@ CT_COVER_EXPORT_PREFIX ?= $(PROFILE)
export REBAR_GIT_CLONE_OPTIONS += --depth=1
ELIXIR_COMMON_DEPS := ensure-hex ensure-mix-rebar3 ensure-mix-rebar
.PHONY: default
default: $(REBAR) $(PROFILE)
@ -58,8 +60,12 @@ ensure-mix-rebar3: $(REBAR)
ensure-mix-rebar: $(REBAR)
@mix local.rebar --if-missing --force
.PHONY: elixir-common-deps
elixir-common-deps: $(ELIXIR_COMMON_DEPS)
.PHONY: mix-deps-get
mix-deps-get: $(ELIXIR_COMMON_DEPS)
mix-deps-get: elixir-common-deps
@mix deps.get
.PHONY: eunit
@ -238,7 +244,7 @@ $(foreach zt,$(ALL_ZIPS),$(eval $(call download-relup-packages,$(zt))))
## relup target is to create relup instructions
.PHONY: $(REL_PROFILES:%=%-relup)
define gen-relup-target
$1-relup: $1-relup-downloads $(COMMON_DEPS)
$1-relup: $(COMMON_DEPS)
@$(BUILD) $1 relup
endef
ALL_TGZS = $(REL_PROFILES)
@ -247,7 +253,7 @@ $(foreach zt,$(ALL_TGZS),$(eval $(call gen-relup-target,$(zt))))
## tgz target is to create a release package .tar.gz with relup
.PHONY: $(REL_PROFILES:%=%-tgz)
define gen-tgz-target
$1-tgz: $1-relup
$1-tgz: $(COMMON_DEPS)
@$(BUILD) $1 tgz
endef
ALL_TGZS = $(REL_PROFILES)
@ -310,10 +316,20 @@ $(foreach tt,$(ALL_ELIXIR_TGZS),$(eval $(call gen-elixir-tgz-target,$(tt))))
.PHONY: fmt
fmt: $(REBAR)
@$(SCRIPTS)/erlfmt -w 'apps/*/{src,include,priv,test,integration_test}/**/*.{erl,hrl,app.src,eterm}'
@$(SCRIPTS)/erlfmt -w 'apps/*/rebar.config' 'apps/emqx/rebar.config.script' '.ci/fvt_tests/http_server/rebar.config'
@$(SCRIPTS)/erlfmt -w 'rebar.config' 'rebar.config.erl'
@$(SCRIPTS)/erlfmt -w 'scripts/*.escript' 'bin/*.escript' 'bin/nodetool'
@find . \( -name '*.app.src' -o \
-name '*.erl' -o \
-name '*.hrl' -o \
-name 'rebar.config' -o \
-name '*.eterm' -o \
-name '*.escript' \) \
-not -path '*/_build/*' \
-not -path '*/deps/*' \
-not -path '*/_checkouts/*' \
-type f \
| xargs $(SCRIPTS)/erlfmt -w
@$(SCRIPTS)/erlfmt -w 'apps/emqx/rebar.config.script'
@$(SCRIPTS)/erlfmt -w 'elvis.config'
@$(SCRIPTS)/erlfmt -w 'bin/nodetool'
@mix format
.PHONY: clean-test-cluster-config

View File

@ -45,6 +45,10 @@
).
-define(assertReceive(PATTERN, TIMEOUT),
?assertReceive(PATTERN, TIMEOUT, #{})
).
-define(assertReceive(PATTERN, TIMEOUT, EXTRA),
(fun() ->
receive
X__V = PATTERN -> X__V
@ -54,7 +58,8 @@
{module, ?MODULE},
{line, ?LINE},
{expression, (??PATTERN)},
{mailbox, ?drainMailbox()}
{mailbox, ?drainMailbox()},
{extra_info, EXTRA}
]}
)
end

View File

@ -65,9 +65,20 @@
%% Route
%%--------------------------------------------------------------------
-record(share_dest, {
session_id :: emqx_session:session_id(),
group :: emqx_types:group()
}).
-record(route, {
topic :: binary(),
dest :: node() | {binary(), node()} | emqx_session:session_id()
dest ::
node()
| {binary(), node()}
| emqx_session:session_id()
%% One session can also have multiple subscriptions to the same topic through different groups
| #share_dest{}
| emqx_external_broker:dest()
}).
%%--------------------------------------------------------------------

View File

@ -683,6 +683,7 @@ end).
-define(FRAME_PARSE_ERROR, frame_parse_error).
-define(FRAME_SERIALIZE_ERROR, frame_serialize_error).
-define(THROW_FRAME_ERROR(Reason), erlang:throw({?FRAME_PARSE_ERROR, Reason})).
-define(THROW_SERIALIZE_ERROR(Reason), erlang:throw({?FRAME_SERIALIZE_ERROR, Reason})).

View File

@ -32,7 +32,7 @@
%% `apps/emqx/src/bpapi/README.md'
%% Opensource edition
-define(EMQX_RELEASE_CE, "5.7.1").
-define(EMQX_RELEASE_CE, "5.8.0-alpha.1").
%% Enterprise edition
-define(EMQX_RELEASE_EE, "5.7.1").
-define(EMQX_RELEASE_EE, "5.8.0-alpha.1").

View File

@ -20,4 +20,11 @@
-define(IS_SESSION_IMPL_MEM(S), (is_tuple(S) andalso element(1, S) =:= session)).
-define(IS_SESSION_IMPL_DS(S), (is_map_key(id, S))).
%% (Erlang) messages that a connection process should forward to the
%% session handler.
-record(session_message, {
message :: term()
}).
-define(session_message(MSG), #session_message{message = MSG}).
-endif.

View File

@ -86,5 +86,6 @@
{'SOURCE_ERROR', <<"Source error">>},
{'UPDATE_FAILED', <<"Update failed">>},
{'REST_FAILED', <<"Reset source or config failed">>},
{'CLIENT_NOT_RESPONSE', <<"Client not responding">>}
{'CLIENT_NOT_RESPONSE', <<"Client not responding">>},
{'UNSUPPORTED_MEDIA_TYPE', <<"Unsupported media type">>}
]).

View File

@ -30,7 +30,10 @@
logger:log(
Level,
(Data),
Meta
maps:merge(Meta, #{
mfa => {?MODULE, ?FUNCTION_NAME, ?FUNCTION_ARITY},
line => ?LINE
})
);
false ->
ok
@ -38,16 +41,20 @@
).
%% NOTE: do not forget to use atom for msg and add every used msg to
%% the default value of `log.thorttling.msgs` list.
%% the default value of `log.throttling.msgs` list.
-define(SLOG_THROTTLE(Level, Data),
?SLOG_THROTTLE(Level, Data, #{})
).
-define(SLOG_THROTTLE(Level, Data, Meta),
?SLOG_THROTTLE(Level, undefined, Data, Meta)
).
-define(SLOG_THROTTLE(Level, UniqueKey, Data, Meta),
case logger:allow(Level, ?MODULE) of
true ->
(fun(#{msg := __Msg} = __Data) ->
case emqx_log_throttler:allow(__Msg) of
case emqx_log_throttler:allow(__Msg, UniqueKey) of
true ->
logger:log(Level, __Data, Meta);
false ->
@ -84,7 +91,7 @@
?_DO_TRACE(Tag, Msg, Meta),
?SLOG(
Level,
(emqx_trace_formatter:format_meta_map(Meta))#{msg => Msg, tag => Tag},
(Meta)#{msg => Msg, tag => Tag},
#{is_trace => false}
)
end).

View File

@ -25,11 +25,16 @@ all() ->
emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) ->
case emqx_ds_test_helpers:skip_if_norepl() of
false ->
TCApps = emqx_cth_suite:start(
app_specs(),
#{work_dir => emqx_cth_suite:work_dir(Config)}
),
[{tc_apps, TCApps} | Config].
[{tc_apps, TCApps} | Config];
Yes ->
Yes
end.
end_per_suite(Config) ->
TCApps = ?config(tc_apps, Config),
@ -158,7 +163,7 @@ mk_clientid(Prefix, ID) ->
restart_node(Node, NodeSpec) ->
?tp(will_restart_node, #{}),
emqx_cth_cluster:restart(Node, NodeSpec),
emqx_cth_cluster:restart(NodeSpec),
wait_nodeup(Node),
?tp(restarted_node, #{}),
ok.
@ -253,7 +258,7 @@ t_session_subscription_idempotency(Config) ->
ok
end,
fun(Trace) ->
fun(_Trace) ->
Session = session_open(Node1, ClientId),
?assertMatch(
#{SubTopicFilter := #{}},
@ -326,7 +331,7 @@ t_session_unsubscription_idempotency(Config) ->
ok
end,
fun(Trace) ->
fun(_Trace) ->
Session = session_open(Node1, ClientId),
?assertEqual(
#{},

View File

@ -8,7 +8,7 @@ defmodule EMQX.MixProject do
app: :emqx,
version: "0.1.0",
build_path: "../../_build",
erlc_paths: UMP.erlc_paths(),
erlc_paths: erlc_paths(),
erlc_options: [
{:i, "src"}
| UMP.erlc_options()
@ -36,8 +36,9 @@ defmodule EMQX.MixProject do
def deps() do
## FIXME!!! go though emqx.app.src and add missing stuff...
[
{:emqx_mix_utils, in_umbrella: true, runtime: false},
{:emqx_utils, in_umbrella: true},
# {:emqx_ds_backends, in_umbrella: true},
{:emqx_ds_backends, in_umbrella: true},
UMP.common_dep(:gproc),
UMP.common_dep(:gen_rpc),
@ -53,6 +54,15 @@ defmodule EMQX.MixProject do
] ++ UMP.quicer_dep()
end
defp erlc_paths() do
paths = UMP.erlc_paths()
if UMP.test_env?() do
["integration_test" | paths]
else
paths
end
end
defp extra_dirs() do
dirs = ["src", "etc"]
if UMP.test_env?() do

View File

@ -10,12 +10,14 @@
{emqx_bridge,5}.
{emqx_bridge,6}.
{emqx_broker,1}.
{emqx_cluster_link,1}.
{emqx_cm,1}.
{emqx_cm,2}.
{emqx_cm,3}.
{emqx_conf,1}.
{emqx_conf,2}.
{emqx_conf,3}.
{emqx_conf,4}.
{emqx_connector,1}.
{emqx_dashboard,1}.
{emqx_delayed,1}.
@ -25,6 +27,7 @@
{emqx_ds,2}.
{emqx_ds,3}.
{emqx_ds,4}.
{emqx_ds_shared_sub,1}.
{emqx_eviction_agent,1}.
{emqx_eviction_agent,2}.
{emqx_eviction_agent,3}.
@ -47,6 +50,7 @@
{emqx_mgmt_api_plugins,1}.
{emqx_mgmt_api_plugins,2}.
{emqx_mgmt_api_plugins,3}.
{emqx_mgmt_api_relup,1}.
{emqx_mgmt_cluster,1}.
{emqx_mgmt_cluster,2}.
{emqx_mgmt_cluster,3}.

View File

@ -24,18 +24,18 @@
{deps, [
{emqx_utils, {path, "../emqx_utils"}},
{emqx_durable_storage, {path, "../emqx_durable_storage"}},
{emqx_ds_backends, {path, "../emqx_ds_backends"}},
{lc, {git, "https://github.com/emqx/lc.git", {tag, "0.3.2"}}},
{gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}},
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}},
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.11.3"}}},
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.12.0"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.19.5"}}},
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.3.1"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.43.1"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.43.2"}}},
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.3"}}},
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},
{snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.10"}}},
{ra, "2.7.3"}
{snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.10"}}}
]}.
{plugins, [{rebar3_proper, "0.12.1"}, rebar3_path_deps]}.

View File

@ -24,7 +24,8 @@ IsQuicSupp = fun() ->
end,
Bcrypt = {bcrypt, {git, "https://github.com/emqx/erlang-bcrypt.git", {tag, "0.6.0"}}},
Quicer = {quicer, {git, "https://github.com/emqx/quic.git", {tag, "0.0.313"}}}.
Quicer =
{quicer, {git, "https://github.com/emqx/quic.git", {tag, "0.0.500"}}}.
Dialyzer = fun(Config) ->
{dialyzer, OldDialyzerConfig} = lists:keyfind(dialyzer, 1, Config),

View File

@ -237,27 +237,29 @@ log_formatter(HandlerName, Conf) ->
_ ->
conf_get("formatter", Conf)
end,
TsFormat = timstamp_format(Conf),
TsFormat = timestamp_format(Conf),
WithMfa = conf_get("with_mfa", Conf),
PayloadEncode = conf_get("payload_encode", Conf, text),
do_formatter(
Format, CharsLimit, SingleLine, TimeOffSet, Depth, TsFormat, PayloadEncode
Format, CharsLimit, SingleLine, TimeOffSet, Depth, TsFormat, WithMfa, PayloadEncode
).
%% auto | epoch | rfc3339
timstamp_format(Conf) ->
timestamp_format(Conf) ->
conf_get("timestamp_format", Conf).
%% helpers
do_formatter(json, CharsLimit, SingleLine, TimeOffSet, Depth, TsFormat, PayloadEncode) ->
do_formatter(json, CharsLimit, SingleLine, TimeOffSet, Depth, TsFormat, WithMfa, PayloadEncode) ->
{emqx_logger_jsonfmt, #{
chars_limit => CharsLimit,
single_line => SingleLine,
time_offset => TimeOffSet,
depth => Depth,
timestamp_format => TsFormat,
with_mfa => WithMfa,
payload_encode => PayloadEncode
}};
do_formatter(text, CharsLimit, SingleLine, TimeOffSet, Depth, TsFormat, PayloadEncode) ->
do_formatter(text, CharsLimit, SingleLine, TimeOffSet, Depth, TsFormat, WithMfa, PayloadEncode) ->
{emqx_logger_textfmt, #{
template => ["[", level, "] ", msg, "\n"],
chars_limit => CharsLimit,
@ -265,6 +267,7 @@ do_formatter(text, CharsLimit, SingleLine, TimeOffSet, Depth, TsFormat, PayloadE
time_offset => TimeOffSet,
depth => Depth,
timestamp_format => TsFormat,
with_mfa => WithMfa,
payload_encode => PayloadEncode
}}.

View File

@ -2,7 +2,7 @@
{application, emqx, [
{id, "emqx"},
{description, "EMQX Core"},
{vsn, "5.3.3"},
{vsn, "5.3.4"},
{modules, []},
{registered, []},
{applications, [
@ -18,7 +18,7 @@
sasl,
lc,
hocon,
emqx_durable_storage,
emqx_ds_backends,
bcrypt,
pbkdf2,
emqx_http_lib,

View File

@ -33,6 +33,7 @@
-export([
check/1,
check_clientid/1,
create/1,
look_up/1,
delete/1,
@ -117,6 +118,10 @@ check(ClientInfo) ->
do_check({peerhost, maps:get(peerhost, ClientInfo, undefined)}) orelse
do_check_rules(ClientInfo).
-spec check_clientid(emqx_types:clientid()) -> boolean().
check_clientid(ClientId) ->
do_check({clientid, ClientId}) orelse do_check_rules(#{clientid => ClientId}).
-spec format(emqx_types:banned()) -> map().
format(#banned{
who = Who0,

View File

@ -244,11 +244,24 @@ publish(Msg) when is_record(Msg, message) ->
topic => Topic
}),
[];
Msg1 = #message{topic = Topic} ->
PersistRes = persist_publish(Msg1),
route(aggre(emqx_router:match_routes(Topic)), delivery(Msg1), PersistRes)
Msg1 = #message{} ->
do_publish(Msg1);
Msgs when is_list(Msgs) ->
do_publish_many(Msgs)
end.
do_publish_many([]) ->
[];
do_publish_many([Msg | T]) ->
do_publish(Msg) ++ do_publish_many(T).
do_publish(#message{topic = Topic} = Msg) ->
PersistRes = persist_publish(Msg),
Routes = aggre(emqx_router:match_routes(Topic)),
Delivery = delivery(Msg),
RouteRes = route(Routes, Delivery, PersistRes),
do_forward_external(Delivery, RouteRes).
persist_publish(Msg) ->
case emqx_persistent_message:persist(Msg) of
ok ->
@ -332,6 +345,9 @@ aggre([], false, Acc) ->
aggre([], true, Acc) ->
lists:usort(Acc).
do_forward_external(Delivery, RouteRes) ->
emqx_external_broker:forward(Delivery) ++ RouteRes.
%% @doc Forward message to another node.
-spec forward(
node(), emqx_types:topic() | emqx_types:share(), emqx_types:delivery(), RpcMode :: sync | async
@ -643,6 +659,7 @@ maybe_delete_route(Topic) ->
sync_route(Action, Topic, ReplyTo) ->
EnabledOn = emqx_config:get([broker, routing, batch_sync, enable_on]),
Res =
case EnabledOn of
all ->
push_sync_route(Action, Topic, ReplyTo);
@ -655,7 +672,14 @@ sync_route(Action, Topic, ReplyTo) ->
false ->
regular_sync_route(Action, Topic)
end
end.
end,
_ = external_sync_route(Action, Topic),
Res.
external_sync_route(add, Topic) ->
emqx_external_broker:add_route(Topic);
external_sync_route(delete, Topic) ->
emqx_external_broker:delete_route(Topic).
push_sync_route(Action, Topic, Opts) ->
emqx_router_syncer:push(Action, Topic, node(), Opts).

View File

@ -47,7 +47,7 @@ init([]) ->
router_syncer_pool,
hash,
PoolSize,
{emqx_router_syncer, start_link, []}
{emqx_router_syncer, start_link_pooled, []}
]),
%% Shared subscription

View File

@ -19,6 +19,7 @@
-include("emqx.hrl").
-include("emqx_channel.hrl").
-include("emqx_session.hrl").
-include("emqx_mqtt.hrl").
-include("emqx_access_control.hrl").
-include("logger.hrl").
@ -145,7 +146,9 @@
-type replies() :: emqx_types:packet() | reply() | [reply()].
-define(IS_MQTT_V5, #channel{conninfo = #{proto_ver := ?MQTT_PROTO_V5}}).
-define(IS_CONNECTED_OR_REAUTHENTICATING(ConnState),
((ConnState == connected) orelse (ConnState == reauthenticating))
).
-define(IS_COMMON_SESSION_TIMER(N),
((N == retry_delivery) orelse (N == expire_awaiting_rel))
).
@ -234,7 +237,7 @@ caps(#channel{clientinfo = #{zone := Zone}}) ->
-spec init(emqx_types:conninfo(), opts()) -> channel().
init(
ConnInfo = #{
peername := {PeerHost, PeerPort},
peername := {PeerHost, PeerPort} = PeerName,
sockname := {_Host, SockPort}
},
#{
@ -258,6 +261,9 @@ init(
listener => ListenerId,
protocol => Protocol,
peerhost => PeerHost,
%% We copy peername to clientinfo because some event contexts only have access
%% to client info (e.g.: authn/authz).
peername => PeerName,
peerport => PeerPort,
sockport => SockPort,
clientid => undefined,
@ -333,7 +339,7 @@ take_conn_info_fields(Fields, ClientInfo, ConnInfo) ->
| {shutdown, Reason :: term(), channel()}
| {shutdown, Reason :: term(), replies(), channel()}.
handle_in(?CONNECT_PACKET(), Channel = #channel{conn_state = ConnState}) when
ConnState =:= connected orelse ConnState =:= reauthenticating
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState)
->
handle_out(disconnect, ?RC_PROTOCOL_ERROR, Channel);
handle_in(?CONNECT_PACKET(), Channel = #channel{conn_state = connecting}) ->
@ -563,29 +569,8 @@ handle_in(
process_disconnect(ReasonCode, Properties, NChannel);
handle_in(?AUTH_PACKET(), Channel) ->
handle_out(disconnect, ?RC_IMPLEMENTATION_SPECIFIC_ERROR, Channel);
handle_in({frame_error, Reason}, Channel = #channel{conn_state = idle}) ->
shutdown(shutdown_count(frame_error, Reason), Channel);
handle_in(
{frame_error, #{cause := frame_too_large} = R}, Channel = #channel{conn_state = connecting}
) ->
shutdown(
shutdown_count(frame_error, R), ?CONNACK_PACKET(?RC_PACKET_TOO_LARGE), Channel
);
handle_in({frame_error, Reason}, Channel = #channel{conn_state = connecting}) ->
shutdown(shutdown_count(frame_error, Reason), ?CONNACK_PACKET(?RC_MALFORMED_PACKET), Channel);
handle_in(
{frame_error, #{cause := frame_too_large}}, Channel = #channel{conn_state = ConnState}
) when
ConnState =:= connected orelse ConnState =:= reauthenticating
->
handle_out(disconnect, {?RC_PACKET_TOO_LARGE, frame_too_large}, Channel);
handle_in({frame_error, Reason}, Channel = #channel{conn_state = ConnState}) when
ConnState =:= connected orelse ConnState =:= reauthenticating
->
handle_out(disconnect, {?RC_MALFORMED_PACKET, Reason}, Channel);
handle_in({frame_error, Reason}, Channel = #channel{conn_state = disconnected}) ->
?SLOG(error, #{msg => "malformed_mqtt_message", reason => Reason}),
{ok, Channel};
handle_in({frame_error, Reason}, Channel) ->
handle_frame_error(Reason, Channel);
handle_in(Packet, Channel) ->
?SLOG(error, #{msg => "disconnecting_due_to_unexpected_message", packet => Packet}),
handle_out(disconnect, ?RC_PROTOCOL_ERROR, Channel).
@ -1017,6 +1002,68 @@ not_nacked({deliver, _Topic, Msg}) ->
true
end.
%%--------------------------------------------------------------------
%% Handle Frame Error
%%--------------------------------------------------------------------
handle_frame_error(
Reason = #{cause := frame_too_large},
Channel = #channel{conn_state = ConnState, conninfo = ConnInfo}
) when
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState)
->
ShutdownCount = shutdown_count(frame_error, Reason),
case proto_ver(Reason, ConnInfo) of
?MQTT_PROTO_V5 ->
handle_out(disconnect, {?RC_PACKET_TOO_LARGE, frame_too_large}, Channel);
_ ->
shutdown(ShutdownCount, Channel)
end;
%% Only send CONNACK with reason code `frame_too_large` for MQTT-v5.0 when connecting,
%% otherwise DONOT send any CONNACK or DISCONNECT packet.
handle_frame_error(
Reason,
Channel = #channel{conn_state = ConnState, conninfo = ConnInfo}
) when
is_map(Reason) andalso
(ConnState == idle orelse ConnState == connecting)
->
ShutdownCount = shutdown_count(frame_error, Reason),
ProtoVer = proto_ver(Reason, ConnInfo),
NChannel = Channel#channel{conninfo = ConnInfo#{proto_ver => ProtoVer}},
case ProtoVer of
?MQTT_PROTO_V5 ->
shutdown(ShutdownCount, ?CONNACK_PACKET(?RC_PACKET_TOO_LARGE), NChannel);
_ ->
shutdown(ShutdownCount, NChannel)
end;
handle_frame_error(
Reason,
Channel = #channel{conn_state = connecting}
) ->
shutdown(
shutdown_count(frame_error, Reason),
?CONNACK_PACKET(?RC_MALFORMED_PACKET),
Channel
);
handle_frame_error(
Reason,
Channel = #channel{conn_state = ConnState}
) when
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState)
->
handle_out(
disconnect,
{?RC_MALFORMED_PACKET, Reason},
Channel
);
handle_frame_error(
Reason,
Channel = #channel{conn_state = disconnected}
) ->
?SLOG(error, #{msg => "malformed_mqtt_message", reason => Reason}),
{ok, Channel}.
%%--------------------------------------------------------------------
%% Handle outgoing packet
%%--------------------------------------------------------------------
@ -1285,7 +1332,7 @@ handle_info(
session = Session
}
) when
ConnState =:= connected orelse ConnState =:= reauthenticating
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState)
->
{Intent, Session1} = session_disconnect(ClientInfo, ConnInfo, Session),
Channel1 = ensure_disconnected(Reason, maybe_publish_will_msg(sock_closed, Channel)),
@ -1315,6 +1362,9 @@ handle_info({'DOWN', Ref, process, Pid, Reason}, Channel) ->
[] -> {ok, Channel};
Msgs -> {ok, Msgs, Channel}
end;
handle_info(?session_message(Message), #channel{session = Session} = Channel) ->
NSession = emqx_session:handle_info(Message, Session),
{ok, Channel#channel{session = NSession}};
handle_info(Info, Channel) ->
?SLOG(error, #{msg => "unexpected_info", info => Info}),
{ok, Channel}.
@ -1749,7 +1799,7 @@ maybe_add_cert(Map, #channel{conninfo = ConnInfo}) ->
maybe_add_cert(Map, ConnInfo);
maybe_add_cert(Map, #{peercert := PeerCert}) when is_binary(PeerCert) ->
%% NOTE: it's raw binary at this point,
%% encoding to PEM (base64) is done lazy in emqx_authn_utils:render_var
%% encoding to PEM (base64) is done lazy in emqx_auth_utils:render_var
Map#{cert_pem => PeerCert};
maybe_add_cert(Map, _) ->
Map.
@ -2629,8 +2679,7 @@ save_alias(outbound, AliasId, Topic, TopicAliases = #{outbound := Aliases}) ->
NAliases = maps:put(Topic, AliasId, Aliases),
TopicAliases#{outbound => NAliases}.
-compile({inline, [reply/2, shutdown/2, shutdown/3, sp/1, flag/1]}).
-compile({inline, [reply/2, shutdown/2, shutdown/3]}).
reply(Reply, Channel) ->
{reply, Reply, Channel}.
@ -2666,13 +2715,13 @@ disconnect_and_shutdown(
?IS_MQTT_V5 =
#channel{conn_state = ConnState}
) when
ConnState =:= connected orelse ConnState =:= reauthenticating
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState)
->
NChannel = ensure_disconnected(Reason, Channel),
shutdown(Reason, Reply, ?DISCONNECT_PACKET(reason_code(Reason)), NChannel);
%% mqtt v3/v4 connected sessions
disconnect_and_shutdown(Reason, Reply, Channel = #channel{conn_state = ConnState}) when
ConnState =:= connected orelse ConnState =:= reauthenticating
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState)
->
NChannel = ensure_disconnected(Reason, Channel),
shutdown(Reason, Reply, NChannel);
@ -2715,6 +2764,13 @@ is_durable_session(#channel{session = Session}) ->
false
end.
proto_ver(#{proto_ver := ProtoVer}, _ConnInfo) ->
ProtoVer;
proto_ver(_Reason, #{proto_ver := ProtoVer}) ->
ProtoVer;
proto_ver(_, _) ->
?MQTT_PROTO_V4.
%%--------------------------------------------------------------------
%% For CT tests
%%--------------------------------------------------------------------

View File

@ -499,15 +499,14 @@ fill_defaults(RawConf, Opts) ->
).
-spec fill_defaults(module(), raw_config(), hocon_tconf:opts()) -> map().
fill_defaults(_SchemaMod, RawConf = #{<<"durable_storage">> := _}, _) ->
fill_defaults(SchemaMod, RawConf = #{<<"durable_storage">> := Ds}, Opts) ->
%% FIXME: kludge to prevent `emqx_config' module from filling in
%% the default values for backends and layouts. These records are
%% inside unions, and adding default values there will add
%% incompatible fields.
%%
%% Note: this function is called for each individual conf root, so
%% this clause only affects this particular subtree.
RawConf;
RawConf1 = maps:remove(<<"durable_storage">>, RawConf),
Conf = fill_defaults(SchemaMod, RawConf1, Opts),
Conf#{<<"durable_storage">> => Ds};
fill_defaults(SchemaMod, RawConf, Opts0) ->
Opts = maps:merge(#{required => false, make_serializable => true}, Opts0),
hocon_tconf:check_plain(

View File

@ -173,7 +173,9 @@
system_code_change/4
]}
).
-dialyzer({no_missing_calls, [handle_msg/2]}).
-ifndef(BUILD_WITHOUT_QUIC).
-spec start_link
(esockd:transport(), esockd:socket(), emqx_channel:opts()) ->
{ok, pid()};
@ -183,6 +185,9 @@
emqx_quic_connection:cb_state()
) ->
{ok, pid()}.
-else.
-spec start_link(esockd:transport(), esockd:socket(), emqx_channel:opts()) -> {ok, pid()}.
-endif.
start_link(Transport, Socket, Options) ->
Args = [self(), Transport, Socket, Options],
@ -323,7 +328,7 @@ init_state(
max_size => emqx_config:get_zone_conf(Zone, [mqtt, max_packet_size])
},
ParseState = emqx_frame:initial_parse_state(FrameOpts),
Serialize = emqx_frame:serialize_opts(),
Serialize = emqx_frame:initial_serialize_opts(FrameOpts),
%% Init Channel
Channel = emqx_channel:init(ConnInfo, Opts),
GcState =
@ -468,8 +473,7 @@ cancel_stats_timer(State) ->
process_msg([], State) ->
{ok, State};
process_msg([Msg | More], State) ->
try
case handle_msg(Msg, State) of
try handle_msg(Msg, State) of
ok ->
process_msg(More, State);
{ok, NState} ->
@ -480,7 +484,6 @@ process_msg([Msg | More], State) ->
{stop, Reason, NState};
{stop, Reason} ->
{stop, Reason, State}
end
catch
exit:normal ->
{stop, normal, State};
@ -566,12 +569,10 @@ handle_msg({Closed, _Sock}, State) when
handle_msg({Passive, _Sock}, State) when
Passive == tcp_passive; Passive == ssl_passive; Passive =:= quic_passive
->
%% In Stats
Pubs = emqx_pd:reset_counter(incoming_pubs),
Bytes = emqx_pd:reset_counter(incoming_bytes),
InStats = #{cnt => Pubs, oct => Bytes},
%% Run GC and Check OOM
NState1 = check_oom(run_gc(InStats, State)),
NState1 = check_oom(Pubs, Bytes, run_gc(Pubs, Bytes, State)),
handle_info(activate_socket, NState1);
handle_msg(
Deliver = {deliver, _Topic, _Msg},
@ -782,7 +783,8 @@ parse_incoming(Data, Packets, State = #state{parse_state = ParseState}) ->
input_bytes => Data,
parsed_packets => Packets
}),
{[{frame_error, Reason} | Packets], State};
NState = enrich_state(Reason, State),
{[{frame_error, Reason} | Packets], NState};
error:Reason:Stacktrace ->
?LOG(error, #{
at_state => emqx_frame:describe_state(ParseState),
@ -899,8 +901,7 @@ sent(#state{listener = {Type, Listener}} = State) ->
true ->
Pubs = emqx_pd:reset_counter(outgoing_pubs),
Bytes = emqx_pd:reset_counter(outgoing_bytes),
OutStats = #{cnt => Pubs, oct => Bytes},
{ok, check_oom(run_gc(OutStats, State))};
{ok, check_oom(Pubs, Bytes, run_gc(Pubs, Bytes, State))};
false ->
{ok, State}
end.
@ -993,17 +994,23 @@ check_limiter(
Data,
WhenOk,
Msgs,
#state{limiter_timer = undefined, limiter = Limiter} = State
#state{channel = Channel, limiter_timer = undefined, limiter = Limiter} = State
) ->
case emqx_limiter_container:check_list(Needs, Limiter) of
{ok, Limiter2} ->
WhenOk(Data, Msgs, State#state{limiter = Limiter2});
{pause, Time, Limiter2} ->
?SLOG(debug, #{
msg => "pause_time_due_to_rate_limit",
needs => Needs,
time_in_ms => Time
}),
?SLOG_THROTTLE(
warning,
#{
msg => socket_receive_paused_by_rate_limit,
paused_ms => Time
},
#{
tag => "RATE",
clientid => emqx_channel:info(clientid, Channel)
}
),
Retry = #retry{
types = [Type || {_, Type} <- Needs],
@ -1037,7 +1044,7 @@ check_limiter(
%% try to perform a retry
-spec retry_limiter(state()) -> _.
retry_limiter(#state{limiter = Limiter} = State) ->
retry_limiter(#state{channel = Channel, limiter = Limiter} = State) ->
#retry{types = Types, data = Data, next = Next} =
emqx_limiter_container:get_retry_context(Limiter),
case emqx_limiter_container:retry_list(Types, Limiter) of
@ -1051,11 +1058,17 @@ retry_limiter(#state{limiter = Limiter} = State) ->
}
);
{pause, Time, Limiter2} ->
?SLOG(debug, #{
msg => "pause_time_due_to_rate_limit",
types => Types,
time_in_ms => Time
}),
?SLOG_THROTTLE(
warning,
#{
msg => socket_receive_paused_by_rate_limit,
paused_ms => Time
},
#{
tag => "RATE",
clientid => emqx_channel:info(clientid, Channel)
}
),
TRef = start_timer(Time, limit_timeout),
@ -1068,25 +1081,36 @@ retry_limiter(#state{limiter = Limiter} = State) ->
%%--------------------------------------------------------------------
%% Run GC and Check OOM
run_gc(Stats, State = #state{gc_state = GcSt, zone = Zone}) ->
run_gc(Pubs, Bytes, State = #state{gc_state = GcSt, zone = Zone}) ->
case
?ENABLED(GcSt) andalso not emqx_olp:backoff_gc(Zone) andalso
emqx_gc:run(Stats, GcSt)
emqx_gc:run(Pubs, Bytes, GcSt)
of
false -> State;
{_IsGC, GcSt1} -> State#state{gc_state = GcSt1}
end.
check_oom(State = #state{channel = Channel}) ->
check_oom(Pubs, Bytes, State = #state{channel = Channel}) ->
ShutdownPolicy = emqx_config:get_zone_conf(
emqx_channel:info(zone, Channel), [force_shutdown]
),
?tp(debug, check_oom, #{policy => ShutdownPolicy}),
case emqx_utils:check_oom(ShutdownPolicy) of
{shutdown, Reason} ->
%% triggers terminate/2 callback immediately
?tp(warning, check_oom_shutdown, #{
policy => ShutdownPolicy,
incoming_pubs => Pubs,
incoming_bytes => Bytes,
shutdown => Reason
}),
erlang:exit({shutdown, Reason});
_ ->
Result ->
?tp(debug, check_oom_ok, #{
policy => ShutdownPolicy,
incoming_pubs => Pubs,
incoming_bytes => Bytes,
result => Result
}),
ok
end,
State.
@ -1204,6 +1228,12 @@ inc_counter(Key, Inc) ->
_ = emqx_pd:inc_counter(Key, Inc),
ok.
enrich_state(#{parse_state := NParseState}, State) ->
Serialize = emqx_frame:serialize_opts(NParseState),
State#state{parse_state = NParseState, serialize = Serialize};
enrich_state(_, State) ->
State.
set_tcp_keepalive({quic, _Listener}) ->
ok;
set_tcp_keepalive({Type, Id}) ->

View File

@ -18,7 +18,7 @@
-module(emqx_ds_schema).
%% API:
-export([schema/0, translate_builtin/1]).
-export([schema/0, translate_builtin_raft/1, translate_builtin_local/1]).
%% Behavior callbacks:
-export([fields/1, desc/1, namespace/0]).
@ -32,42 +32,51 @@
%% Type declarations
%%================================================================================
-ifndef(EMQX_RELEASE_EDITION).
-define(EMQX_RELEASE_EDITION, ce).
-endif.
-if(?EMQX_RELEASE_EDITION == ee).
-define(DEFAULT_BACKEND, builtin_raft).
-define(BUILTIN_BACKENDS, [ref(builtin_raft), ref(builtin_local)]).
-else.
-define(DEFAULT_BACKEND, builtin_local).
-define(BUILTIN_BACKENDS, [ref(builtin_local)]).
-endif.
%%================================================================================
%% API
%%================================================================================
translate_builtin(
translate_builtin_raft(
Backend = #{
backend := builtin,
backend := builtin_raft,
n_shards := NShards,
n_sites := NSites,
replication_factor := ReplFactor,
layout := Layout
}
) ->
Storage =
case Layout of
#{
type := wildcard_optimized,
bits_per_topic_level := BitsPerTopicLevel,
epoch_bits := EpochBits,
topic_index_bytes := TIBytes
} ->
{emqx_ds_storage_bitfield_lts, #{
bits_per_topic_level => BitsPerTopicLevel,
topic_index_bytes => TIBytes,
epoch_bits => EpochBits
}};
#{type := reference} ->
{emqx_ds_storage_reference, #{}}
end,
#{
backend => builtin,
backend => builtin_raft,
n_shards => NShards,
n_sites => NSites,
replication_factor => ReplFactor,
replication_options => maps:get(replication_options, Backend, #{}),
storage => Storage
storage => translate_layout(Layout)
}.
translate_builtin_local(
#{
backend := builtin_local,
n_shards := NShards,
layout := Layout
}
) ->
#{
backend => builtin_local,
n_shards => NShards,
storage => translate_layout(Layout)
}.
%%================================================================================
@ -83,24 +92,24 @@ schema() ->
ds_schema(#{
default =>
#{
<<"backend">> => builtin
<<"backend">> => ?DEFAULT_BACKEND
},
importance => ?IMPORTANCE_MEDIUM,
desc => ?DESC(messages)
})}
].
fields(builtin) ->
%% Schema for the builtin backend:
fields(builtin_local) ->
%% Schema for the builtin_raft backend:
[
{backend,
sc(
builtin,
builtin_local,
#{
'readOnly' => true,
default => builtin,
default => builtin_local,
importance => ?IMPORTANCE_MEDIUM,
desc => ?DESC(builtin_backend)
desc => ?DESC(backend_type)
}
)},
{'_config_handler',
@ -108,27 +117,32 @@ fields(builtin) ->
{module(), atom()},
#{
'readOnly' => true,
default => {?MODULE, translate_builtin},
default => {?MODULE, translate_builtin_local},
importance => ?IMPORTANCE_HIDDEN
}
)},
{data_dir,
)}
| common_builtin_fields()
];
fields(builtin_raft) ->
%% Schema for the builtin_raft backend:
[
{backend,
sc(
string(),
builtin_raft,
#{
mapping => "emqx_durable_storage.db_data_dir",
required => false,
'readOnly' => true,
default => builtin_raft,
importance => ?IMPORTANCE_MEDIUM,
desc => ?DESC(builtin_data_dir)
desc => ?DESC(backend_type)
}
)},
{n_shards,
{'_config_handler',
sc(
pos_integer(),
{module(), atom()},
#{
default => 12,
importance => ?IMPORTANCE_MEDIUM,
desc => ?DESC(builtin_n_shards)
'readOnly' => true,
default => {?MODULE, translate_builtin_raft},
importance => ?IMPORTANCE_HIDDEN
}
)},
%% TODO: Deprecate once cluster management and rebalancing is implemented.
@ -157,29 +171,10 @@ fields(builtin) ->
default => #{},
importance => ?IMPORTANCE_HIDDEN
}
)},
{local_write_buffer,
sc(
ref(builtin_local_write_buffer),
#{
importance => ?IMPORTANCE_HIDDEN,
desc => ?DESC(builtin_local_write_buffer)
}
)},
{layout,
sc(
hoconsc:union(builtin_layouts()),
#{
desc => ?DESC(builtin_layout),
importance => ?IMPORTANCE_MEDIUM,
default =>
#{
<<"type">> => wildcard_optimized
}
}
)}
| common_builtin_fields()
];
fields(builtin_local_write_buffer) ->
fields(builtin_write_buffer) ->
[
{max_items,
sc(
@ -188,7 +183,7 @@ fields(builtin_local_write_buffer) ->
default => 1000,
mapping => "emqx_durable_storage.egress_batch_size",
importance => ?IMPORTANCE_HIDDEN,
desc => ?DESC(builtin_local_write_buffer_max_items)
desc => ?DESC(builtin_write_buffer_max_items)
}
)},
{flush_interval,
@ -198,7 +193,7 @@ fields(builtin_local_write_buffer) ->
default => 100,
mapping => "emqx_durable_storage.egress_flush_interval",
importance => ?IMPORTANCE_HIDDEN,
desc => ?DESC(builtin_local_write_buffer_flush_interval)
desc => ?DESC(builtin_write_buffer_flush_interval)
}
)}
];
@ -239,6 +234,42 @@ fields(layout_builtin_wildcard_optimized) ->
}
)}
];
fields(layout_builtin_wildcard_optimized_v2) ->
[
{type,
sc(
wildcard_optimized_v2,
#{
'readOnly' => true,
default => wildcard_optimized_v2,
desc => ?DESC(layout_builtin_wildcard_optimized_type)
}
)},
{bytes_per_topic_level,
sc(
range(1, 16),
#{
default => 8,
importance => ?IMPORTANCE_HIDDEN
}
)},
{topic_index_bytes,
sc(
pos_integer(),
#{
default => 8,
importance => ?IMPORTANCE_HIDDEN
}
)},
{serialization_schema,
sc(
emqx_ds_msg_serializer:schema(),
#{
default => v1,
importance => ?IMPORTANCE_HIDDEN
}
)}
];
fields(layout_builtin_reference) ->
[
{type,
@ -247,17 +278,65 @@ fields(layout_builtin_reference) ->
#{
'readOnly' => true,
importance => ?IMPORTANCE_LOW,
default => reference,
desc => ?DESC(layout_builtin_reference_type)
}
)}
].
desc(builtin) ->
?DESC(builtin);
desc(builtin_local_write_buffer) ->
?DESC(builtin_local_write_buffer);
common_builtin_fields() ->
[
{data_dir,
sc(
string(),
#{
mapping => "emqx_durable_storage.db_data_dir",
required => false,
importance => ?IMPORTANCE_MEDIUM,
desc => ?DESC(builtin_data_dir)
}
)},
{n_shards,
sc(
pos_integer(),
#{
default => 16,
importance => ?IMPORTANCE_MEDIUM,
desc => ?DESC(builtin_n_shards)
}
)},
{local_write_buffer,
sc(
ref(builtin_write_buffer),
#{
importance => ?IMPORTANCE_HIDDEN,
desc => ?DESC(builtin_write_buffer)
}
)},
{layout,
sc(
hoconsc:union(builtin_layouts()),
#{
desc => ?DESC(builtin_layout),
importance => ?IMPORTANCE_MEDIUM,
default =>
#{
<<"type">> => wildcard_optimized_v2
}
}
)}
].
desc(builtin_raft) ->
?DESC(builtin_raft);
desc(builtin_local) ->
?DESC(builtin_local);
desc(builtin_write_buffer) ->
?DESC(builtin_write_buffer);
desc(layout_builtin_wildcard_optimized) ->
?DESC(layout_builtin_wildcard_optimized);
desc(layout_builtin_wildcard_optimized_v2) ->
?DESC(layout_builtin_wildcard_optimized);
desc(layout_builtin_reference) ->
?DESC(layout_builtin_reference);
desc(_) ->
@ -267,12 +346,40 @@ desc(_) ->
%% Internal functions
%%================================================================================
translate_layout(
#{
type := wildcard_optimized_v2,
bytes_per_topic_level := BytesPerTopicLevel,
topic_index_bytes := TopicIndexBytes,
serialization_schema := SSchema
}
) ->
{emqx_ds_storage_skipstream_lts, #{
wildcard_hash_bytes => BytesPerTopicLevel,
topic_index_bytes => TopicIndexBytes,
serialization_schema => SSchema
}};
translate_layout(
#{
type := wildcard_optimized,
bits_per_topic_level := BitsPerTopicLevel,
epoch_bits := EpochBits,
topic_index_bytes := TIBytes
}
) ->
{emqx_ds_storage_bitfield_lts, #{
bits_per_topic_level => BitsPerTopicLevel,
topic_index_bytes => TIBytes,
epoch_bits => EpochBits
}};
translate_layout(#{type := reference}) ->
{emqx_ds_storage_reference, #{}}.
ds_schema(Options) ->
sc(
hoconsc:union([
ref(builtin)
| emqx_schema_hooks:injection_point('durable_storage.backends', [])
]),
hoconsc:union(
?BUILTIN_BACKENDS ++ emqx_schema_hooks:injection_point('durable_storage.backends', [])
),
Options
).
@ -281,7 +388,11 @@ builtin_layouts() ->
%% suitable for production use. However, it's very simple and
%% produces a very predictabale replay order, which can be useful
%% for testing and debugging:
[ref(layout_builtin_wildcard_optimized), ref(layout_builtin_reference)].
[
ref(layout_builtin_wildcard_optimized_v2),
ref(layout_builtin_wildcard_optimized),
ref(layout_builtin_reference)
].
sc(Type, Meta) -> hoconsc:mk(Type, Meta).

View File

@ -0,0 +1,148 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_external_broker).
-callback forward(emqx_types:delivery()) ->
emqx_types:publish_result().
-callback add_route(emqx_types:topic()) -> ok.
-callback delete_route(emqx_types:topic()) -> ok.
-callback add_shared_route(emqx_types:topic(), emqx_types:group()) -> ok.
-callback delete_shared_route(emqx_types:topic(), emqx_types:group()) -> ok.
-callback add_persistent_route(emqx_types:topic(), emqx_persistent_session_ds:id()) -> ok.
-callback delete_persistent_route(emqx_types:topic(), emqx_persistent_session_ds:id()) -> ok.
-type dest() :: term().
-export([
%% Registration
provider/0,
register_provider/1,
unregister_provider/1,
%% Forwarding
forward/1,
%% Routing updates
add_route/1,
delete_route/1,
add_shared_route/2,
delete_shared_route/2,
add_persistent_route/2,
delete_persistent_route/2,
add_persistent_shared_route/3,
delete_persistent_shared_route/3
]).
-export_type([dest/0]).
-include("logger.hrl").
-define(PROVIDER, {?MODULE, external_broker}).
-define(safe_with_provider(IfRegistered, IfNotRegistered),
case persistent_term:get(?PROVIDER, undefined) of
undefined ->
IfNotRegistered;
Provider ->
try
Provider:IfRegistered
catch
Err:Reason:St ->
?SLOG_THROTTLE(error, #{
msg => external_broker_crashed,
provider => Provider,
callback => ?FUNCTION_NAME,
stacktrace => St,
error => Err,
reason => Reason
}),
{error, Reason}
end
end
).
%% TODO: provider API copied from emqx_external_traces,
%% but it can be moved to a common module.
%%--------------------------------------------------------------------
%% Provider API
%%--------------------------------------------------------------------
-spec register_provider(module()) -> ok | {error, term()}.
register_provider(Module) when is_atom(Module) ->
case is_valid_provider(Module) of
true ->
persistent_term:put(?PROVIDER, Module);
false ->
{error, invalid_provider}
end.
-spec unregister_provider(module()) -> ok | {error, term()}.
unregister_provider(Module) ->
case persistent_term:get(?PROVIDER, undefined) of
Module ->
persistent_term:erase(?PROVIDER),
ok;
_ ->
{error, not_registered}
end.
-spec provider() -> module() | undefined.
provider() ->
persistent_term:get(?PROVIDER, undefined).
%%--------------------------------------------------------------------
%% Broker API
%%--------------------------------------------------------------------
forward(Delivery) ->
?safe_with_provider(?FUNCTION_NAME(Delivery), []).
add_route(Topic) ->
?safe_with_provider(?FUNCTION_NAME(Topic), ok).
delete_route(Topic) ->
?safe_with_provider(?FUNCTION_NAME(Topic), ok).
add_shared_route(Topic, Group) ->
?safe_with_provider(?FUNCTION_NAME(Topic, Group), ok).
delete_shared_route(Topic, Group) ->
?safe_with_provider(?FUNCTION_NAME(Topic, Group), ok).
add_persistent_route(Topic, ID) ->
?safe_with_provider(?FUNCTION_NAME(Topic, ID), ok).
delete_persistent_route(Topic, ID) ->
?safe_with_provider(?FUNCTION_NAME(Topic, ID), ok).
add_persistent_shared_route(Topic, Group, ID) ->
?safe_with_provider(?FUNCTION_NAME(Topic, Group, ID), ok).
delete_persistent_shared_route(Topic, Group, ID) ->
?safe_with_provider(?FUNCTION_NAME(Topic, Group, ID), ok).
%%--------------------------------------------------------------------
%% Internal functions
%%--------------------------------------------------------------------
is_valid_provider(Module) ->
lists:all(
fun({F, A}) -> erlang:function_exported(Module, F, A) end,
?MODULE:behaviour_info(callbacks)
).

View File

@ -29,11 +29,12 @@
parse/2,
serialize_fun/0,
serialize_fun/1,
serialize_opts/0,
initial_serialize_opts/1,
serialize_opts/1,
serialize_pkt/2,
serialize/1,
serialize/2
serialize/2,
serialize/3
]).
-export([describe_state/1]).
@ -84,7 +85,7 @@
-define(MULTIPLIER_MAX, 16#200000).
-dialyzer({no_match, [serialize_utf8_string/2]}).
-dialyzer({no_match, [serialize_utf8_string/3]}).
%% @doc Describe state for logging.
describe_state(?NONE(_Opts)) ->
@ -266,28 +267,50 @@ packet(Header, Variable) ->
packet(Header, Variable, Payload) ->
#mqtt_packet{header = Header, variable = Variable, payload = Payload}.
parse_connect(FrameBin, StrictMode) ->
{ProtoName, Rest} = parse_utf8_string_with_cause(FrameBin, StrictMode, invalid_proto_name),
case ProtoName of
<<"MQTT">> ->
ok;
<<"MQIsdp">> ->
ok;
_ ->
%% from spec: the server MAY send disconnect with reason code 0x84
%% we chose to close socket because the client is likely not talking MQTT anyway
?PARSE_ERR(#{
cause => invalid_proto_name,
expected => <<"'MQTT' or 'MQIsdp'">>,
received => ProtoName
})
end,
parse_connect2(ProtoName, Rest, StrictMode).
parse_connect(FrameBin, Options = #{strict_mode := StrictMode}) ->
{ProtoName, Rest0} = parse_utf8_string_with_cause(FrameBin, StrictMode, invalid_proto_name),
%% No need to parse and check proto_ver if proto_name is invalid, check it first
%% And the matching check of `proto_name` and `proto_ver` fields will be done in `emqx_packet:check_proto_ver/2`
_ = validate_proto_name(ProtoName),
{IsBridge, ProtoVer, Rest2} = parse_connect_proto_ver(Rest0),
NOptions = Options#{version => ProtoVer},
try
do_parse_connect(ProtoName, IsBridge, ProtoVer, Rest2, StrictMode)
catch
throw:{?FRAME_PARSE_ERROR, ReasonM} when is_map(ReasonM) ->
?PARSE_ERR(
ReasonM#{
proto_ver => ProtoVer,
proto_name => ProtoName,
parse_state => ?NONE(NOptions)
}
);
throw:{?FRAME_PARSE_ERROR, Reason} ->
?PARSE_ERR(
#{
cause => Reason,
proto_ver => ProtoVer,
proto_name => ProtoName,
parse_state => ?NONE(NOptions)
}
)
end.
parse_connect2(
do_parse_connect(
ProtoName,
<<BridgeTag:4, ProtoVer:4, UsernameFlagB:1, PasswordFlagB:1, WillRetainB:1, WillQoS:2,
WillFlagB:1, CleanStart:1, Reserved:1, KeepAlive:16/big, Rest2/binary>>,
IsBridge,
ProtoVer,
<<
UsernameFlagB:1,
PasswordFlagB:1,
WillRetainB:1,
WillQoS:2,
WillFlagB:1,
CleanStart:1,
Reserved:1,
KeepAlive:16/big,
Rest/binary
>>,
StrictMode
) ->
_ = validate_connect_reserved(Reserved),
@ -302,14 +325,14 @@ parse_connect2(
UsernameFlag = bool(UsernameFlagB),
PasswordFlag = bool(PasswordFlagB)
),
{Properties, Rest3} = parse_properties(Rest2, ProtoVer, StrictMode),
{Properties, Rest3} = parse_properties(Rest, ProtoVer, StrictMode),
{ClientId, Rest4} = parse_utf8_string_with_cause(Rest3, StrictMode, invalid_clientid),
ConnPacket = #mqtt_packet_connect{
proto_name = ProtoName,
proto_ver = ProtoVer,
%% For bridge mode, non-standard implementation
%% Invented by mosquitto, named 'try_private': https://mosquitto.org/man/mosquitto-conf-5.html
is_bridge = (BridgeTag =:= 8),
is_bridge = IsBridge,
clean_start = bool(CleanStart),
will_flag = WillFlag,
will_qos = WillQoS,
@ -342,16 +365,16 @@ parse_connect2(
unexpected_trailing_bytes => size(Rest7)
})
end;
parse_connect2(_ProtoName, Bin, _StrictMode) ->
%% sent less than 32 bytes
do_parse_connect(_ProtoName, _IsBridge, _ProtoVer, Bin, _StrictMode) ->
%% sent less than 24 bytes
?PARSE_ERR(#{cause => malformed_connect, header_bytes => Bin}).
parse_packet(
#mqtt_packet_header{type = ?CONNECT},
FrameBin,
#{strict_mode := StrictMode}
Options
) ->
parse_connect(FrameBin, StrictMode);
parse_connect(FrameBin, Options);
parse_packet(
#mqtt_packet_header{type = ?CONNACK},
<<AckFlags:8, ReasonCode:8, Rest/binary>>,
@ -515,6 +538,12 @@ parse_packet_id(<<PacketId:16/big, Rest/binary>>) ->
parse_packet_id(_) ->
?PARSE_ERR(invalid_packet_id).
parse_connect_proto_ver(<<BridgeTag:4, ProtoVer:4, Rest/binary>>) ->
{_IsBridge = (BridgeTag =:= 8), ProtoVer, Rest};
parse_connect_proto_ver(Bin) ->
%% sent less than 1 bytes or empty
?PARSE_ERR(#{cause => malformed_connect, header_bytes => Bin}).
parse_properties(Bin, Ver, _StrictMode) when Ver =/= ?MQTT_PROTO_V5 ->
{#{}, Bin};
%% TODO: version mess?
@ -725,43 +754,53 @@ serialize_fun() -> serialize_fun(?DEFAULT_OPTIONS).
serialize_fun(#mqtt_packet_connect{proto_ver = ProtoVer, properties = ConnProps}) ->
MaxSize = get_property('Maximum-Packet-Size', ConnProps, ?MAX_PACKET_SIZE),
serialize_fun(#{version => ProtoVer, max_size => MaxSize});
serialize_fun(#{version := Ver, max_size := MaxSize}) ->
serialize_fun(#{version => ProtoVer, max_size => MaxSize, strict_mode => false});
serialize_fun(#{version := Ver, max_size := MaxSize, strict_mode := StrictMode}) ->
fun(Packet) ->
IoData = serialize(Packet, Ver),
IoData = serialize(Packet, Ver, StrictMode),
case is_too_large(IoData, MaxSize) of
true -> <<>>;
false -> IoData
end
end.
serialize_opts() ->
?DEFAULT_OPTIONS.
initial_serialize_opts(Opts) ->
maps:merge(?DEFAULT_OPTIONS, Opts).
serialize_opts(?NONE(Options)) ->
maps:merge(?DEFAULT_OPTIONS, Options);
serialize_opts(#mqtt_packet_connect{proto_ver = ProtoVer, properties = ConnProps}) ->
MaxSize = get_property('Maximum-Packet-Size', ConnProps, ?MAX_PACKET_SIZE),
#{version => ProtoVer, max_size => MaxSize}.
#{version => ProtoVer, max_size => MaxSize, strict_mode => false}.
serialize_pkt(Packet, #{version := Ver, max_size := MaxSize}) ->
IoData = serialize(Packet, Ver),
serialize_pkt(Packet, #{version := Ver, max_size := MaxSize, strict_mode := StrictMode}) ->
IoData = serialize(Packet, Ver, StrictMode),
case is_too_large(IoData, MaxSize) of
true -> <<>>;
false -> IoData
end.
-spec serialize(emqx_types:packet()) -> iodata().
serialize(Packet) -> serialize(Packet, ?MQTT_PROTO_V4).
serialize(Packet) -> serialize(Packet, ?MQTT_PROTO_V4, false).
-spec serialize(emqx_types:packet(), emqx_types:proto_ver()) -> iodata().
serialize(Packet, Ver) -> serialize(Packet, Ver, false).
-spec serialize(emqx_types:packet(), emqx_types:proto_ver(), boolean()) -> iodata().
serialize(
#mqtt_packet{
header = Header,
variable = Variable,
payload = Payload
},
Ver
Ver,
StrictMode
) ->
serialize(Header, serialize_variable(Variable, Ver), serialize_payload(Payload)).
serialize(
Header,
serialize_variable(Variable, Ver, StrictMode),
serialize_payload(Payload),
StrictMode
).
serialize(
#mqtt_packet_header{
@ -771,7 +810,8 @@ serialize(
retain = Retain
},
VariableBin,
PayloadBin
PayloadBin,
_StrictMode
) when
?CONNECT =< Type andalso Type =< ?AUTH
->
@ -803,7 +843,8 @@ serialize_variable(
username = Username,
password = Password
},
_Ver
_Ver,
StrictMode
) ->
[
serialize_binary_data(ProtoName),
@ -821,20 +862,20 @@ serialize_variable(
0:1,
KeepAlive:16/big-unsigned-integer
>>,
serialize_properties(Properties, ProtoVer),
serialize_utf8_string(ClientId),
serialize_properties(Properties, ProtoVer, StrictMode),
serialize_utf8_string(ClientId, StrictMode),
case WillFlag of
true ->
[
serialize_properties(WillProps, ProtoVer),
serialize_utf8_string(WillTopic),
serialize_properties(WillProps, ProtoVer, StrictMode),
serialize_utf8_string(WillTopic, StrictMode),
serialize_binary_data(WillPayload)
];
false ->
<<>>
end,
serialize_utf8_string(Username, true),
serialize_utf8_string(Password, true)
serialize_utf8_string(Username, true, StrictMode),
serialize_utf8_string(Password, true, StrictMode)
];
serialize_variable(
#mqtt_packet_connack{
@ -842,26 +883,28 @@ serialize_variable(
reason_code = ReasonCode,
properties = Properties
},
Ver
Ver,
StrictMode
) ->
[AckFlags, ReasonCode, serialize_properties(Properties, Ver)];
[AckFlags, ReasonCode, serialize_properties(Properties, Ver, StrictMode)];
serialize_variable(
#mqtt_packet_publish{
topic_name = TopicName,
packet_id = PacketId,
properties = Properties
},
Ver
Ver,
StrictMode
) ->
[
serialize_utf8_string(TopicName),
serialize_utf8_string(TopicName, StrictMode),
case PacketId of
undefined -> <<>>;
_ -> <<PacketId:16/big-unsigned-integer>>
end,
serialize_properties(Properties, Ver)
serialize_properties(Properties, Ver, StrictMode)
];
serialize_variable(#mqtt_packet_puback{packet_id = PacketId}, Ver) when
serialize_variable(#mqtt_packet_puback{packet_id = PacketId}, Ver, _StrictMode) when
Ver == ?MQTT_PROTO_V3; Ver == ?MQTT_PROTO_V4
->
<<PacketId:16/big-unsigned-integer>>;
@ -871,12 +914,13 @@ serialize_variable(
reason_code = ReasonCode,
properties = Properties
},
Ver = ?MQTT_PROTO_V5
Ver = ?MQTT_PROTO_V5,
StrictMode
) ->
[
<<PacketId:16/big-unsigned-integer>>,
ReasonCode,
serialize_properties(Properties, Ver)
serialize_properties(Properties, Ver, StrictMode)
];
serialize_variable(
#mqtt_packet_subscribe{
@ -884,12 +928,13 @@ serialize_variable(
properties = Properties,
topic_filters = TopicFilters
},
Ver
Ver,
StrictMode
) ->
[
<<PacketId:16/big-unsigned-integer>>,
serialize_properties(Properties, Ver),
serialize_topic_filters(subscribe, TopicFilters, Ver)
serialize_properties(Properties, Ver, StrictMode),
serialize_topic_filters(subscribe, TopicFilters, Ver, StrictMode)
];
serialize_variable(
#mqtt_packet_suback{
@ -897,11 +942,12 @@ serialize_variable(
properties = Properties,
reason_codes = ReasonCodes
},
Ver
Ver,
StrictMode
) ->
[
<<PacketId:16/big-unsigned-integer>>,
serialize_properties(Properties, Ver),
serialize_properties(Properties, Ver, StrictMode),
serialize_reason_codes(ReasonCodes)
];
serialize_variable(
@ -910,12 +956,13 @@ serialize_variable(
properties = Properties,
topic_filters = TopicFilters
},
Ver
Ver,
StrictMode
) ->
[
<<PacketId:16/big-unsigned-integer>>,
serialize_properties(Properties, Ver),
serialize_topic_filters(unsubscribe, TopicFilters, Ver)
serialize_properties(Properties, Ver, StrictMode),
serialize_topic_filters(unsubscribe, TopicFilters, Ver, StrictMode)
];
serialize_variable(
#mqtt_packet_unsuback{
@ -923,14 +970,15 @@ serialize_variable(
properties = Properties,
reason_codes = ReasonCodes
},
Ver
Ver,
StrictMode
) ->
[
<<PacketId:16/big-unsigned-integer>>,
serialize_properties(Properties, Ver),
serialize_properties(Properties, Ver, StrictMode),
serialize_reason_codes(ReasonCodes)
];
serialize_variable(#mqtt_packet_disconnect{}, Ver) when
serialize_variable(#mqtt_packet_disconnect{}, Ver, _StrictMode) when
Ver == ?MQTT_PROTO_V3; Ver == ?MQTT_PROTO_V4
->
<<>>;
@ -939,110 +987,115 @@ serialize_variable(
reason_code = ReasonCode,
properties = Properties
},
Ver = ?MQTT_PROTO_V5
Ver = ?MQTT_PROTO_V5,
StrictMode
) ->
[ReasonCode, serialize_properties(Properties, Ver)];
serialize_variable(#mqtt_packet_disconnect{}, _Ver) ->
[ReasonCode, serialize_properties(Properties, Ver, StrictMode)];
serialize_variable(#mqtt_packet_disconnect{}, _Ver, _StrictMode) ->
<<>>;
serialize_variable(
#mqtt_packet_auth{
reason_code = ReasonCode,
properties = Properties
},
Ver = ?MQTT_PROTO_V5
Ver = ?MQTT_PROTO_V5,
StrictMode
) ->
[ReasonCode, serialize_properties(Properties, Ver)];
serialize_variable(PacketId, ?MQTT_PROTO_V3) when is_integer(PacketId) ->
[ReasonCode, serialize_properties(Properties, Ver, StrictMode)];
serialize_variable(PacketId, ?MQTT_PROTO_V3, _StrictMode) when is_integer(PacketId) ->
<<PacketId:16/big-unsigned-integer>>;
serialize_variable(PacketId, ?MQTT_PROTO_V4) when is_integer(PacketId) ->
serialize_variable(PacketId, ?MQTT_PROTO_V4, _StrictMode) when is_integer(PacketId) ->
<<PacketId:16/big-unsigned-integer>>;
serialize_variable(undefined, _Ver) ->
serialize_variable(undefined, _Ver, _StrictMode) ->
<<>>.
serialize_payload(undefined) -> <<>>;
serialize_payload(Bin) -> Bin.
serialize_properties(_Props, Ver) when Ver =/= ?MQTT_PROTO_V5 ->
serialize_properties(_Props, Ver, _StrictMode) when Ver =/= ?MQTT_PROTO_V5 ->
<<>>;
serialize_properties(Props, ?MQTT_PROTO_V5) ->
serialize_properties(Props).
serialize_properties(Props, ?MQTT_PROTO_V5, StrictMode) ->
serialize_properties(Props, StrictMode).
serialize_properties(undefined) ->
serialize_properties(undefined, _StrictMode) ->
<<0>>;
serialize_properties(Props) when map_size(Props) == 0 ->
serialize_properties(Props, _StrictMode) when map_size(Props) == 0 ->
<<0>>;
serialize_properties(Props) when is_map(Props) ->
Bin = <<<<(serialize_property(Prop, Val))/binary>> || {Prop, Val} <- maps:to_list(Props)>>,
serialize_properties(Props, StrictMode) when is_map(Props) ->
Bin = <<
<<(serialize_property(Prop, Val, StrictMode))/binary>>
|| {Prop, Val} <- maps:to_list(Props)
>>,
[serialize_variable_byte_integer(byte_size(Bin)), Bin].
serialize_property(_, Disabled) when Disabled =:= disabled; Disabled =:= undefined ->
serialize_property(_, Disabled, _StrictMode) when Disabled =:= disabled; Disabled =:= undefined ->
<<>>;
serialize_property(internal_extra, _) ->
serialize_property(internal_extra, _, _StrictMode) ->
<<>>;
serialize_property('Payload-Format-Indicator', Val) ->
serialize_property('Payload-Format-Indicator', Val, _StrictMode) ->
<<16#01, Val>>;
serialize_property('Message-Expiry-Interval', Val) ->
serialize_property('Message-Expiry-Interval', Val, _StrictMode) ->
<<16#02, Val:32/big>>;
serialize_property('Content-Type', Val) ->
<<16#03, (serialize_utf8_string(Val))/binary>>;
serialize_property('Response-Topic', Val) ->
<<16#08, (serialize_utf8_string(Val))/binary>>;
serialize_property('Correlation-Data', Val) ->
serialize_property('Content-Type', Val, StrictMode) ->
<<16#03, (serialize_utf8_string(Val, StrictMode))/binary>>;
serialize_property('Response-Topic', Val, StrictMode) ->
<<16#08, (serialize_utf8_string(Val, StrictMode))/binary>>;
serialize_property('Correlation-Data', Val, _StrictMode) ->
<<16#09, (byte_size(Val)):16, Val/binary>>;
serialize_property('Subscription-Identifier', Val) ->
serialize_property('Subscription-Identifier', Val, _StrictMode) ->
<<16#0B, (serialize_variable_byte_integer(Val))/binary>>;
serialize_property('Session-Expiry-Interval', Val) ->
serialize_property('Session-Expiry-Interval', Val, _StrictMode) ->
<<16#11, Val:32/big>>;
serialize_property('Assigned-Client-Identifier', Val) ->
<<16#12, (serialize_utf8_string(Val))/binary>>;
serialize_property('Server-Keep-Alive', Val) ->
serialize_property('Assigned-Client-Identifier', Val, StrictMode) ->
<<16#12, (serialize_utf8_string(Val, StrictMode))/binary>>;
serialize_property('Server-Keep-Alive', Val, _StrictMode) ->
<<16#13, Val:16/big>>;
serialize_property('Authentication-Method', Val) ->
<<16#15, (serialize_utf8_string(Val))/binary>>;
serialize_property('Authentication-Data', Val) ->
serialize_property('Authentication-Method', Val, StrictMode) ->
<<16#15, (serialize_utf8_string(Val, StrictMode))/binary>>;
serialize_property('Authentication-Data', Val, _StrictMode) ->
<<16#16, (iolist_size(Val)):16, Val/binary>>;
serialize_property('Request-Problem-Information', Val) ->
serialize_property('Request-Problem-Information', Val, _StrictMode) ->
<<16#17, Val>>;
serialize_property('Will-Delay-Interval', Val) ->
serialize_property('Will-Delay-Interval', Val, _StrictMode) ->
<<16#18, Val:32/big>>;
serialize_property('Request-Response-Information', Val) ->
serialize_property('Request-Response-Information', Val, _StrictMode) ->
<<16#19, Val>>;
serialize_property('Response-Information', Val) ->
<<16#1A, (serialize_utf8_string(Val))/binary>>;
serialize_property('Server-Reference', Val) ->
<<16#1C, (serialize_utf8_string(Val))/binary>>;
serialize_property('Reason-String', Val) ->
<<16#1F, (serialize_utf8_string(Val))/binary>>;
serialize_property('Receive-Maximum', Val) ->
serialize_property('Response-Information', Val, StrictMode) ->
<<16#1A, (serialize_utf8_string(Val, StrictMode))/binary>>;
serialize_property('Server-Reference', Val, StrictMode) ->
<<16#1C, (serialize_utf8_string(Val, StrictMode))/binary>>;
serialize_property('Reason-String', Val, StrictMode) ->
<<16#1F, (serialize_utf8_string(Val, StrictMode))/binary>>;
serialize_property('Receive-Maximum', Val, _StrictMode) ->
<<16#21, Val:16/big>>;
serialize_property('Topic-Alias-Maximum', Val) ->
serialize_property('Topic-Alias-Maximum', Val, _StrictMode) ->
<<16#22, Val:16/big>>;
serialize_property('Topic-Alias', Val) ->
serialize_property('Topic-Alias', Val, _StrictMode) ->
<<16#23, Val:16/big>>;
serialize_property('Maximum-QoS', Val) ->
serialize_property('Maximum-QoS', Val, _StrictMode) ->
<<16#24, Val>>;
serialize_property('Retain-Available', Val) ->
serialize_property('Retain-Available', Val, _StrictMode) ->
<<16#25, Val>>;
serialize_property('User-Property', {Key, Val}) ->
<<16#26, (serialize_utf8_pair({Key, Val}))/binary>>;
serialize_property('User-Property', Props) when is_list(Props) ->
serialize_property('User-Property', {Key, Val}, StrictMode) ->
<<16#26, (serialize_utf8_pair(Key, Val, StrictMode))/binary>>;
serialize_property('User-Property', Props, StrictMode) when is_list(Props) ->
<<
<<(serialize_property('User-Property', {Key, Val}))/binary>>
<<(serialize_property('User-Property', {Key, Val}, StrictMode))/binary>>
|| {Key, Val} <- Props
>>;
serialize_property('Maximum-Packet-Size', Val) ->
serialize_property('Maximum-Packet-Size', Val, _StrictMode) ->
<<16#27, Val:32/big>>;
serialize_property('Wildcard-Subscription-Available', Val) ->
serialize_property('Wildcard-Subscription-Available', Val, _StrictMode) ->
<<16#28, Val>>;
serialize_property('Subscription-Identifier-Available', Val) ->
serialize_property('Subscription-Identifier-Available', Val, _StrictMode) ->
<<16#29, Val>>;
serialize_property('Shared-Subscription-Available', Val) ->
serialize_property('Shared-Subscription-Available', Val, _StrictMode) ->
<<16#2A, Val>>.
serialize_topic_filters(subscribe, TopicFilters, ?MQTT_PROTO_V5) ->
serialize_topic_filters(subscribe, TopicFilters, ?MQTT_PROTO_V5, StrictMode) ->
<<
<<
(serialize_utf8_string(Topic))/binary,
(serialize_utf8_string(Topic, StrictMode))/binary,
?RESERVED:2,
Rh:2,
(flag(Rap)):1,
@ -1051,37 +1104,42 @@ serialize_topic_filters(subscribe, TopicFilters, ?MQTT_PROTO_V5) ->
>>
|| {Topic, #{rh := Rh, rap := Rap, nl := Nl, qos := QoS}} <- TopicFilters
>>;
serialize_topic_filters(subscribe, TopicFilters, _Ver) ->
serialize_topic_filters(subscribe, TopicFilters, _Ver, StrictMode) ->
<<
<<(serialize_utf8_string(Topic))/binary, ?RESERVED:6, QoS:2>>
<<(serialize_utf8_string(Topic, StrictMode))/binary, ?RESERVED:6, QoS:2>>
|| {Topic, #{qos := QoS}} <- TopicFilters
>>;
serialize_topic_filters(unsubscribe, TopicFilters, _Ver) ->
<<<<(serialize_utf8_string(Topic))/binary>> || Topic <- TopicFilters>>.
serialize_topic_filters(unsubscribe, TopicFilters, _Ver, StrictMode) ->
<<<<(serialize_utf8_string(Topic, StrictMode))/binary>> || Topic <- TopicFilters>>.
serialize_reason_codes(undefined) ->
<<>>;
serialize_reason_codes(ReasonCodes) when is_list(ReasonCodes) ->
<<<<Code>> || Code <- ReasonCodes>>.
serialize_utf8_pair({Name, Value}) ->
<<(serialize_utf8_string(Name))/binary, (serialize_utf8_string(Value))/binary>>.
serialize_utf8_pair(Name, Value, StrictMode) ->
<<
(serialize_utf8_string(Name, StrictMode))/binary,
(serialize_utf8_string(Value, StrictMode))/binary
>>.
serialize_binary_data(Bin) ->
[<<(byte_size(Bin)):16/big-unsigned-integer>>, Bin].
serialize_utf8_string(undefined, false) ->
serialize_utf8_string(undefined, false, _StrictMode) ->
?SERIALIZE_ERR(utf8_string_undefined);
serialize_utf8_string(undefined, true) ->
serialize_utf8_string(undefined, true, _StrictMode) ->
<<>>;
serialize_utf8_string(String, _AllowNull) ->
serialize_utf8_string(String).
serialize_utf8_string(String, _AllowNull, StrictMode) ->
serialize_utf8_string(String, StrictMode).
serialize_utf8_string(String) ->
serialize_utf8_string(String, true) ->
StringBin = unicode:characters_to_binary(String),
Len = byte_size(StringBin),
serialize_utf8_string(StringBin, false);
serialize_utf8_string(String, false) ->
Len = byte_size(String),
true = (Len =< 16#ffff),
<<Len:16/big, StringBin/binary>>.
<<Len:16/big, String/binary>>.
serialize_remaining_len(I) ->
serialize_variable_byte_integer(I).
@ -1129,18 +1187,34 @@ validate_subqos([3 | _]) -> ?PARSE_ERR(bad_subqos);
validate_subqos([_ | T]) -> validate_subqos(T);
validate_subqos([]) -> ok.
%% from spec: the server MAY send disconnect with reason code 0x84
%% we chose to close socket because the client is likely not talking MQTT anyway
validate_proto_name(<<"MQTT">>) ->
ok;
validate_proto_name(<<"MQIsdp">>) ->
ok;
validate_proto_name(ProtoName) ->
?PARSE_ERR(#{
cause => invalid_proto_name,
expected => <<"'MQTT' or 'MQIsdp'">>,
received => ProtoName
}).
%% MQTT-v3.1.1-[MQTT-3.1.2-3], MQTT-v5.0-[MQTT-3.1.2-3]
-compile({inline, [validate_connect_reserved/1]}).
validate_connect_reserved(0) -> ok;
validate_connect_reserved(1) -> ?PARSE_ERR(reserved_connect_flag).
-compile({inline, [validate_connect_will/3]}).
%% MQTT-v3.1.1-[MQTT-3.1.2-13], MQTT-v5.0-[MQTT-3.1.2-11]
validate_connect_will(false, _, WillQos) when WillQos > 0 -> ?PARSE_ERR(invalid_will_qos);
validate_connect_will(false, _, WillQoS) when WillQoS > 0 -> ?PARSE_ERR(invalid_will_qos);
%% MQTT-v3.1.1-[MQTT-3.1.2-14], MQTT-v5.0-[MQTT-3.1.2-12]
validate_connect_will(true, _, WillQoS) when WillQoS > 2 -> ?PARSE_ERR(invalid_will_qos);
%% MQTT-v3.1.1-[MQTT-3.1.2-15], MQTT-v5.0-[MQTT-3.1.2-13]
validate_connect_will(false, WillRetain, _) when WillRetain -> ?PARSE_ERR(invalid_will_retain);
validate_connect_will(_, _, _) -> ok.
-compile({inline, [validate_connect_password_flag/4]}).
%% MQTT-v3.1
%% Username flag and password flag are not strongly related
%% https://public.dhe.ibm.com/software/dw/webservices/ws-mqtt/mqtt-v3r1.html#connect
@ -1155,6 +1229,7 @@ validate_connect_password_flag(true, ?MQTT_PROTO_V5, _, _) ->
validate_connect_password_flag(_, _, _, _) ->
ok.
-compile({inline, [bool/1]}).
bool(0) -> false;
bool(1) -> true.

View File

@ -30,7 +30,6 @@
-export([
init/1,
run/2,
run/3,
info/1,
reset/1
@ -62,12 +61,7 @@ init(#{count := Count, bytes := Bytes}) ->
Oct = [{oct, {Bytes, Bytes}} || ?ENABLED(Bytes)],
?GCS(maps:from_list(Cnt ++ Oct)).
%% @doc Try to run GC based on reduntions of count or bytes.
-spec run(#{cnt := pos_integer(), oct := pos_integer()}, gc_state()) ->
{boolean(), gc_state()}.
run(#{cnt := Cnt, oct := Oct}, GcSt) ->
run(Cnt, Oct, GcSt).
%% @doc Try to run GC based on reductions of count or bytes.
-spec run(pos_integer(), pos_integer(), gc_state()) ->
{boolean(), gc_state()}.
run(Cnt, Oct, ?GCS(St)) ->

View File

@ -64,6 +64,17 @@
-export_type([listener_id/0]).
-dialyzer(
{no_unknown, [
is_running/3,
current_conns/3,
do_stop_listener/3,
do_start_listener/4,
do_update_listener/4,
quic_listener_conf_rollback/3
]}
).
-type listener_id() :: atom() | binary().
-type listener_type() :: tcp | ssl | ws | wss | quic | dtls.
@ -421,7 +432,7 @@ do_start_listener(Type, Name, Id, #{bind := ListenOn} = Opts) when ?ESOCKD_LISTE
esockd:open(
Id,
ListenOn,
merge_default(esockd_opts(Id, Type, Name, Opts))
merge_default(esockd_opts(Id, Type, Name, Opts, _OldOpts = undefined))
);
%% Start MQTT/WS listener
do_start_listener(Type, Name, Id, Opts) when ?COWBOY_LISTENER(Type) ->
@ -465,7 +476,7 @@ do_update_listener(Type, Name, OldConf, NewConf = #{bind := ListenOn}) when
Id = listener_id(Type, Name),
case maps:get(bind, OldConf) of
ListenOn ->
esockd:set_options({Id, ListenOn}, esockd_opts(Id, Type, Name, NewConf));
esockd:set_options({Id, ListenOn}, esockd_opts(Id, Type, Name, NewConf, OldConf));
_Different ->
%% TODO
%% Again, we're not strictly required to drop live connections in this case.
@ -577,7 +588,7 @@ perform_listener_change(update, {{Type, Name, ConfOld}, {_, _, ConfNew}}) ->
perform_listener_change(stop, {Type, Name, Conf}) ->
stop_listener(Type, Name, Conf).
esockd_opts(ListenerId, Type, Name, Opts0) ->
esockd_opts(ListenerId, Type, Name, Opts0, OldOpts) ->
Opts1 = maps:with([acceptors, max_connections, proxy_protocol, proxy_protocol_timeout], Opts0),
Limiter = limiter(Opts0),
Opts2 =
@ -609,7 +620,7 @@ esockd_opts(ListenerId, Type, Name, Opts0) ->
tcp ->
Opts3#{tcp_options => tcp_opts(Opts0)};
ssl ->
OptsWithCRL = inject_crl_config(Opts0),
OptsWithCRL = inject_crl_config(Opts0, OldOpts),
OptsWithSNI = inject_sni_fun(ListenerId, OptsWithCRL),
OptsWithRootFun = inject_root_fun(OptsWithSNI),
OptsWithVerifyFun = inject_verify_fun(OptsWithRootFun),
@ -985,7 +996,7 @@ inject_sni_fun(_ListenerId, Conf) ->
Conf.
inject_crl_config(
Conf = #{ssl_options := #{enable_crl_check := true} = SSLOpts}
Conf = #{ssl_options := #{enable_crl_check := true} = SSLOpts}, _OldOpts
) ->
HTTPTimeout = emqx_config:get([crl_cache, http_timeout], timer:seconds(15)),
Conf#{
@ -995,7 +1006,16 @@ inject_crl_config(
crl_cache => {emqx_ssl_crl_cache, {internal, [{http, HTTPTimeout}]}}
}
};
inject_crl_config(Conf) ->
inject_crl_config(#{ssl_options := SSLOpts0} = Conf0, #{} = OldOpts) ->
%% Note: we must set crl options to `undefined' to unset them. Otherwise,
%% `esockd' will retain such options when `esockd:merge_opts/2' is called and the SSL
%% options were previously enabled.
WasEnabled = emqx_utils_maps:deep_get([ssl_options, enable_crl_check], OldOpts, false),
Undefine = fun(Acc, K) -> emqx_utils_maps:put_if(Acc, K, undefined, WasEnabled) end,
SSLOpts1 = Undefine(SSLOpts0, crl_check),
SSLOpts = Undefine(SSLOpts1, crl_cache),
Conf0#{ssl_options := SSLOpts};
inject_crl_config(Conf, undefined = _OldOpts) ->
Conf.
maybe_unregister_ocsp_stapling_refresh(
@ -1018,7 +1038,6 @@ ensure_max_conns(<<"infinity">>) -> <<"infinity">>;
ensure_max_conns(MaxConn) when is_binary(MaxConn) -> binary_to_integer(MaxConn);
ensure_max_conns(MaxConn) -> MaxConn.
-spec quic_listen_on(X :: any()) -> quicer:listen_on().
quic_listen_on(Bind) ->
case Bind of
{Addr, Port} when tuple_size(Addr) == 4 ->

View File

@ -25,7 +25,7 @@
-export([start_link/0]).
%% throttler API
-export([allow/1]).
-export([allow/2]).
%% gen_server callbacks
-export([
@ -40,23 +40,29 @@
-define(SEQ_ID(Msg), {?MODULE, Msg}).
-define(NEW_SEQ, atomics:new(1, [{signed, false}])).
-define(GET_SEQ(Msg), persistent_term:get(?SEQ_ID(Msg), undefined)).
-define(ERASE_SEQ(Msg), persistent_term:erase(?SEQ_ID(Msg))).
-define(RESET_SEQ(SeqRef), atomics:put(SeqRef, 1, 0)).
-define(INC_SEQ(SeqRef), atomics:add(SeqRef, 1, 1)).
-define(GET_DROPPED(SeqRef), atomics:get(SeqRef, 1) - 1).
-define(IS_ALLOWED(SeqRef), atomics:add_get(SeqRef, 1, 1) =:= 1).
-define(NEW_THROTTLE(Msg, SeqRef), persistent_term:put(?SEQ_ID(Msg), SeqRef)).
-define(MSGS_LIST, emqx:get_config([log, throttling, msgs], [])).
-define(TIME_WINDOW_MS, timer:seconds(emqx:get_config([log, throttling, time_window], 60))).
-spec allow(atom()) -> boolean().
allow(Msg) when is_atom(Msg) ->
%% @doc Check if a throttled log message is allowed to pass down to the logger this time.
%% The Msg has to be an atom, and the second argument `UniqueKey' should be `undefined'
%% for predefined message IDs.
%% For relatively static resources created from configurations such as data integration
%% resource IDs `UniqueKey' should be of `binary()' type.
-spec allow(atom(), undefined | binary()) -> boolean().
allow(Msg, UniqueKey) when
is_atom(Msg) andalso (is_binary(UniqueKey) orelse UniqueKey =:= undefined)
->
case emqx_logger:get_primary_log_level() of
debug ->
true;
_ ->
do_allow(Msg)
do_allow(Msg, UniqueKey)
end.
-spec start_link() -> startlink_ret().
@ -68,7 +74,8 @@ start_link() ->
%%--------------------------------------------------------------------
init([]) ->
ok = lists:foreach(fun(Msg) -> ?NEW_THROTTLE(Msg, ?NEW_SEQ) end, ?MSGS_LIST),
process_flag(trap_exit, true),
ok = lists:foreach(fun new_throttler/1, ?MSGS_LIST),
CurrentPeriodMs = ?TIME_WINDOW_MS,
TimerRef = schedule_refresh(CurrentPeriodMs),
{ok, #{timer_ref => TimerRef, current_period_ms => CurrentPeriodMs}}.
@ -86,16 +93,22 @@ handle_info(refresh, #{current_period_ms := PeriodMs} = State) ->
DroppedStats = lists:foldl(
fun(Msg, Acc) ->
case ?GET_SEQ(Msg) of
%% Should not happen, unless the static ids list is updated at run-time.
undefined ->
?NEW_THROTTLE(Msg, ?NEW_SEQ),
%% Should not happen, unless the static ids list is updated at run-time.
new_throttler(Msg),
?tp(log_throttler_new_msg, #{throttled_msg => Msg}),
Acc;
SeqMap when is_map(SeqMap) ->
maps:fold(
fun(Key, Ref, Acc0) ->
ID = iolist_to_binary([atom_to_binary(Msg), $:, Key]),
drop_stats(Ref, ID, Acc0)
end,
Acc,
SeqMap
);
SeqRef ->
Dropped = ?GET_DROPPED(SeqRef),
ok = ?RESET_SEQ(SeqRef),
?tp(log_throttler_dropped, #{dropped_count => Dropped, throttled_msg => Msg}),
maybe_add_dropped(Msg, Dropped, Acc)
drop_stats(SeqRef, Msg, Acc)
end
end,
#{},
@ -112,7 +125,16 @@ handle_info(Info, State) ->
?SLOG(error, #{msg => "unxpected_info", info => Info}),
{noreply, State}.
drop_stats(SeqRef, Msg, Acc) ->
Dropped = ?GET_DROPPED(SeqRef),
ok = ?RESET_SEQ(SeqRef),
?tp(log_throttler_dropped, #{dropped_count => Dropped, throttled_msg => Msg}),
maybe_add_dropped(Msg, Dropped, Acc).
terminate(_Reason, _State) ->
%% atomics do not have delete/remove/release/deallocate API
%% after the reference is garbage-collected the resource is released
lists:foreach(fun(Msg) -> ?ERASE_SEQ(Msg) end, ?MSGS_LIST),
ok.
code_change(_OldVsn, State, _Extra) ->
@ -122,17 +144,27 @@ code_change(_OldVsn, State, _Extra) ->
%% internal functions
%%--------------------------------------------------------------------
do_allow(Msg) ->
do_allow(Msg, UniqueKey) ->
case persistent_term:get(?SEQ_ID(Msg), undefined) of
undefined ->
%% This is either a race condition (emqx_log_throttler is not started yet)
%% or a developer mistake (msg used in ?SLOG_THROTTLE/2,3 macro is
%% not added to the default value of `log.throttling.msgs`.
?SLOG(info, #{
msg => "missing_log_throttle_sequence",
?SLOG(debug, #{
msg => "log_throttle_disabled",
throttled_msg => Msg
}),
true;
%% e.g: unrecoverable msg throttle according resource_id
SeqMap when is_map(SeqMap) ->
case maps:find(UniqueKey, SeqMap) of
{ok, SeqRef} ->
?IS_ALLOWED(SeqRef);
error ->
SeqRef = ?NEW_SEQ,
new_throttler(Msg, SeqMap#{UniqueKey => SeqRef}),
true
end;
SeqRef ->
?IS_ALLOWED(SeqRef)
end.
@ -154,3 +186,11 @@ maybe_log_dropped(_DroppedStats, _PeriodMs) ->
schedule_refresh(PeriodMs) ->
?tp(log_throttler_sched_refresh, #{new_period_ms => PeriodMs}),
erlang:send_after(PeriodMs, ?MODULE, refresh).
new_throttler(unrecoverable_resource_error = Msg) ->
new_throttler(Msg, #{});
new_throttler(Msg) ->
new_throttler(Msg, ?NEW_SEQ).
new_throttler(Msg, AtomicOrEmptyMap) ->
persistent_term:put(?SEQ_ID(Msg), AtomicOrEmptyMap).

View File

@ -105,7 +105,7 @@ format(Msg, Meta, Config) ->
maybe_format_msg(undefined, _Meta, _Config) ->
#{};
maybe_format_msg({report, Report0} = Msg, #{report_cb := Cb} = Meta, Config) ->
Report = emqx_logger_textfmt:try_encode_payload(Report0, Config),
Report = emqx_logger_textfmt:try_encode_meta(Report0, Config),
case is_map(Report) andalso Cb =:= ?DEFAULT_FORMATTER of
true ->
%% reporting a map without a customised format function
@ -294,6 +294,7 @@ json_obj_root(Data0, Config) ->
_ ->
json(Msg1, Config)
end,
MFA = emqx_utils:format_mfal(Data0, Config),
Data =
maps:fold(
fun(K, V, D) ->
@ -302,12 +303,12 @@ json_obj_root(Data0, Config) ->
end,
[],
maps:without(
[time, gl, file, report_cb, msg, '$kind', level, is_trace], Data0
[time, gl, file, report_cb, msg, '$kind', level, mfa, is_trace], Data0
)
),
lists:filter(
fun({_, V}) -> V =/= undefined end,
[{time, format_ts(Time, Config)}, {level, Level}, {msg, Msg}]
[{time, format_ts(Time, Config)}, {level, Level}, {msg, Msg}, {mfa, MFA}]
) ++ Data.
format_ts(Ts, #{timestamp_format := rfc3339, time_offset := Offset}) when is_integer(Ts) ->

View File

@ -20,12 +20,12 @@
-export([format/2]).
-export([check_config/1]).
-export([try_format_unicode/1, try_encode_payload/2]).
-export([try_format_unicode/1, try_encode_meta/2]).
%% Used in the other log formatters
-export([evaluate_lazy_values_if_dbg_level/1, evaluate_lazy_values/1]).
check_config(X) ->
logger_formatter:check_config(maps:without([timestamp_format, payload_encode], X)).
logger_formatter:check_config(maps:without([timestamp_format, with_mfa, payload_encode], X)).
%% Principle here is to delegate the formatting to logger_formatter:format/2
%% as much as possible, and only enrich the report with clientid, peername, topic, username
@ -41,18 +41,24 @@ format(#{msg := {report, ReportMap0}, meta := _Meta} = Event0, Config) when is_m
false ->
maps:from_list(ReportList)
end,
fmt(Event#{msg := {report, Report}}, Config);
fmt(Event#{msg := {report, Report}}, maps:remove(with_mfa, Config));
format(#{msg := {string, String}} = Event, Config) ->
%% copied from logger_formatter:format/2
%% unsure how this case is triggered
format(Event#{msg => {"~ts ", [String]}}, Config);
format(Event#{msg => {"~ts ", [String]}}, maps:remove(with_mfa, Config));
format(#{msg := _Msg, meta := _Meta} = Event0, Config) ->
#{msg := Msg0, meta := Meta} = Event1 = evaluate_lazy_values_if_dbg_level(Event0),
%% For format strings like logger:log(Level, "~p", [Var])
%% and logger:log(Level, "message", #{key => value})
Msg1 = enrich_client_info(Msg0, Meta),
Msg2 = enrich_topic(Msg1, Meta),
fmt(Event1#{msg := Msg2}, Config).
Msg2 = enrich_mfa(Msg1, Meta, Config),
Msg3 = enrich_topic(Msg2, Meta),
fmt(Event1#{msg := Msg3}, maps:remove(with_mfa, Config)).
enrich_mfa({Fmt, Args}, Data, #{with_mfa := true} = Config) when is_list(Fmt) ->
{Fmt ++ " mfa: ~ts", Args ++ [emqx_utils:format_mfal(Data, Config)]};
enrich_mfa(Msg, _, _) ->
Msg.
%% Most log entries with lazy values are trace events with level debug. So to
%% be more efficient we only search for lazy values to evaluate in the entries
@ -105,7 +111,7 @@ is_list_report_acceptable(_) ->
enrich_report(ReportRaw0, Meta, Config) ->
%% clientid and peername always in emqx_conn's process metadata.
%% topic and username can be put in meta using ?SLOG/3, or put in msg's report by ?SLOG/2
ReportRaw = try_encode_payload(ReportRaw0, Config),
ReportRaw = try_encode_meta(ReportRaw0, Config),
Topic =
case maps:get(topic, Meta, undefined) of
undefined -> maps:get(topic, ReportRaw, undefined);
@ -119,6 +125,7 @@ enrich_report(ReportRaw0, Meta, Config) ->
ClientId = maps:get(clientid, Meta, undefined),
Peer = maps:get(peername, Meta, undefined),
Msg = maps:get(msg, ReportRaw, undefined),
MFA = emqx_utils:format_mfal(Meta, Config),
%% TODO: move all tags to Meta so we can filter traces
%% based on tags (currently not supported)
Tag = maps:get(tag, ReportRaw, maps:get(tag, Meta, undefined)),
@ -133,6 +140,7 @@ enrich_report(ReportRaw0, Meta, Config) ->
{topic, try_format_unicode(Topic)},
{username, try_format_unicode(Username)},
{peername, Peer},
{mfa, try_format_unicode(MFA)},
{msg, Msg},
{clientid, try_format_unicode(ClientId)},
{tag, Tag}
@ -172,9 +180,22 @@ enrich_topic({Fmt, Args}, #{topic := Topic}) when is_list(Fmt) ->
enrich_topic(Msg, _) ->
Msg.
try_encode_payload(#{payload := Payload} = Report, #{payload_encode := Encode}) ->
try_encode_meta(Report, Config) ->
lists:foldl(
fun(Meta, Acc) ->
try_encode_meta(Meta, Acc, Config)
end,
Report,
[payload, packet]
).
try_encode_meta(payload, #{payload := Payload} = Report, #{payload_encode := Encode}) ->
Report#{payload := encode_payload(Payload, Encode)};
try_encode_payload(Report, _Config) ->
try_encode_meta(packet, #{packet := Packet} = Report, #{payload_encode := Encode}) when
is_tuple(Packet)
->
Report#{packet := emqx_packet:format(Packet, Encode)};
try_encode_meta(_, Report, _Config) ->
Report.
encode_payload(Payload, text) ->
@ -182,4 +203,5 @@ encode_payload(Payload, text) ->
encode_payload(_Payload, hidden) ->
"******";
encode_payload(Payload, hex) ->
binary:encode_hex(Payload).
Bin = emqx_utils_conv:bin(Payload),
binary:encode_hex(Bin).

View File

@ -51,7 +51,6 @@
]).
-export([
format/1,
format/2
]).
@ -481,10 +480,6 @@ will_msg(#mqtt_packet_connect{
headers = #{username => Username, properties => Props}
}.
%% @doc Format packet
-spec format(emqx_types:packet()) -> iolist().
format(Packet) -> format(Packet, emqx_trace_handler:payload_encode()).
%% @doc Format packet
-spec format(emqx_types:packet(), hex | text | hidden) -> iolist().
format(#mqtt_packet{header = Header, variable = Variable, payload = Payload}, PayloadEncode) ->

View File

@ -25,6 +25,7 @@
-include("emqx_mqtt.hrl").
-include("emqx_session.hrl").
-include("emqx_persistent_session_ds/session_internals.hrl").
-ifdef(TEST).
@ -63,6 +64,7 @@
deliver/3,
replay/3,
handle_timeout/3,
handle_info/2,
disconnect/2,
terminate/2
]).
@ -106,6 +108,7 @@
seqno/0,
timestamp/0,
topic_filter/0,
share_topic_filter/0,
subscription_id/0,
subscription/0,
session/0,
@ -117,7 +120,8 @@
%% Currently, this is the clientid. We avoid `emqx_types:clientid()' because that can be
%% an atom, in theory (?).
-type id() :: binary().
-type topic_filter() :: emqx_types:topic() | #share{}.
-type share_topic_filter() :: #share{}.
-type topic_filter() :: emqx_types:topic() | share_topic_filter().
%% Subscription and subscription states:
%%
@ -155,6 +159,8 @@
subopts := map()
}.
-type shared_sub_state() :: term().
-define(TIMER_PULL, timer_pull).
-define(TIMER_GET_STREAMS, timer_get_streams).
-define(TIMER_BUMP_LAST_ALIVE_AT, timer_bump_last_alive_at).
@ -172,8 +178,13 @@
props := map(),
%% Persistent state:
s := emqx_persistent_session_ds_state:t(),
%% Shared subscription state:
shared_sub_s := shared_sub_state(),
%% Buffer:
inflight := emqx_persistent_session_ds_inflight:t(),
%% Last fetched stream:
%% Used as a continuation point for fair stream scheduling.
last_fetched_stream => emqx_persistent_session_ds_state:stream_key(),
%% In-progress replay:
%% List of stream replay states to be added to the inflight buffer.
replay => [{_StreamKey, stream_state()}, ...],
@ -277,8 +288,11 @@ info(created_at, #{s := S}) ->
emqx_persistent_session_ds_state:get_created_at(S);
info(is_persistent, #{}) ->
true;
info(subscriptions, #{s := S}) ->
emqx_persistent_session_ds_subs:to_map(S);
info(subscriptions, #{s := S, shared_sub_s := SharedSubS}) ->
maps:merge(
emqx_persistent_session_ds_subs:to_map(S),
emqx_persistent_session_ds_shared_subs:to_map(S, SharedSubS)
);
info(subscriptions_cnt, #{s := S}) ->
emqx_persistent_session_ds_state:n_subscriptions(S);
info(subscriptions_max, #{props := Conf}) ->
@ -356,15 +370,23 @@ print_session(ClientId) ->
%% Client -> Broker: SUBSCRIBE / UNSUBSCRIBE
%%--------------------------------------------------------------------
%% Suppress warnings about clauses handling unimplemented results
%% of `emqx_persistent_session_ds_shared_subs:on_subscribe/3`
-dialyzer({nowarn_function, subscribe/3}).
-spec subscribe(topic_filter(), emqx_types:subopts(), session()) ->
{ok, session()} | {error, emqx_types:reason_code()}.
subscribe(
#share{},
_SubOpts,
_Session
#share{} = TopicFilter,
SubOpts,
Session
) ->
%% TODO: Shared subscriptions are not supported yet:
{error, ?RC_SHARED_SUBSCRIPTIONS_NOT_SUPPORTED};
case emqx_persistent_session_ds_shared_subs:on_subscribe(TopicFilter, SubOpts, Session) of
{ok, S0, SharedSubS} ->
S = emqx_persistent_session_ds_state:commit(S0),
{ok, Session#{s => S, shared_sub_s => SharedSubS}};
Error = {error, _} ->
Error
end;
subscribe(
TopicFilter,
SubOpts,
@ -378,8 +400,27 @@ subscribe(
Error
end.
%% Suppress warnings about clauses handling unimplemented results
%% of `emqx_persistent_session_ds_shared_subs:on_unsubscribe/4`
-dialyzer({nowarn_function, unsubscribe/2}).
-spec unsubscribe(topic_filter(), session()) ->
{ok, session(), emqx_types:subopts()} | {error, emqx_types:reason_code()}.
unsubscribe(
#share{} = TopicFilter,
Session = #{id := SessionId, s := S0, shared_sub_s := SharedSubS0}
) ->
case
emqx_persistent_session_ds_shared_subs:on_unsubscribe(
SessionId, TopicFilter, S0, SharedSubS0
)
of
{ok, S1, SharedSubS1, #{id := SubId, subopts := SubOpts}} ->
S2 = emqx_persistent_session_ds_stream_scheduler:on_unsubscribe(SubId, S1),
S = emqx_persistent_session_ds_state:commit(S2),
{ok, Session#{s => S, shared_sub_s => SharedSubS1}, SubOpts};
Error = {error, _} ->
Error
end;
unsubscribe(
TopicFilter,
Session = #{id := SessionId, s := S0}
@ -540,6 +581,8 @@ pubcomp(_ClientInfo, PacketId, Session0) ->
end.
%%--------------------------------------------------------------------
%% Delivers
%%--------------------------------------------------------------------
-spec deliver(clientinfo(), [emqx_types:deliver()], session()) ->
{ok, replies(), session()}.
@ -551,6 +594,10 @@ deliver(ClientInfo, Delivers, Session0) ->
),
{ok, [], pull_now(Session)}.
%%--------------------------------------------------------------------
%% Timeouts
%%--------------------------------------------------------------------
-spec handle_timeout(clientinfo(), _Timeout, session()) ->
{ok, replies(), session()} | {ok, replies(), timeout(), session()}.
handle_timeout(ClientInfo, ?TIMER_PULL, Session0) ->
@ -573,14 +620,19 @@ handle_timeout(ClientInfo, ?TIMER_PULL, Session0) ->
handle_timeout(ClientInfo, ?TIMER_RETRY_REPLAY, Session0) ->
Session = replay_streams(Session0, ClientInfo),
{ok, [], Session};
handle_timeout(ClientInfo, ?TIMER_GET_STREAMS, Session0 = #{s := S0}) ->
S1 = emqx_persistent_session_ds_subs:gc(S0),
S = emqx_persistent_session_ds_stream_scheduler:renew_streams(S1),
handle_timeout(ClientInfo, ?TIMER_GET_STREAMS, Session0 = #{s := S0, shared_sub_s := SharedSubS0}) ->
%% `gc` and `renew_streams` methods may drop unsubscribed streams.
%% Shared subscription handler must have a chance to see unsubscribed streams
%% in the fully replayed state.
{S1, SharedSubS1} = emqx_persistent_session_ds_shared_subs:pre_renew_streams(S0, SharedSubS0),
S2 = emqx_persistent_session_ds_subs:gc(S1),
S3 = emqx_persistent_session_ds_stream_scheduler:renew_streams(S2),
{S, SharedSubS} = emqx_persistent_session_ds_shared_subs:renew_streams(S3, SharedSubS1),
Interval = get_config(ClientInfo, [renew_streams_interval]),
Session = emqx_session:ensure_timer(
?TIMER_GET_STREAMS,
Interval,
Session0#{s => S}
Session0#{s => S, shared_sub_s => SharedSubS}
),
{ok, [], Session};
handle_timeout(_ClientInfo, ?TIMER_BUMP_LAST_ALIVE_AT, Session0 = #{s := S0}) ->
@ -601,6 +653,22 @@ handle_timeout(_ClientInfo, Timeout, Session) ->
?SLOG(warning, #{msg => "unknown_ds_timeout", timeout => Timeout}),
{ok, [], Session}.
%%--------------------------------------------------------------------
%% Generic messages
%%--------------------------------------------------------------------
-spec handle_info(term(), session()) -> session().
handle_info(?shared_sub_message(Msg), Session = #{s := S0, shared_sub_s := SharedSubS0}) ->
{S, SharedSubS} = emqx_persistent_session_ds_shared_subs:on_info(S0, SharedSubS0, Msg),
Session#{s => S, shared_sub_s => SharedSubS}.
%%--------------------------------------------------------------------
%% Shared subscription outgoing messages
%%--------------------------------------------------------------------
shared_sub_opts(SessionId) ->
#{session_id => SessionId}.
bump_last_alive(S0) ->
%% Note: we take a pessimistic approach here and assume that the client will be alive
%% until the next bump timeout. With this, we avoid garbage collecting this session
@ -693,7 +761,7 @@ skip_batch(StreamKey, SRS0, Session = #{s := S0}, ClientInfo, Reason) ->
%%--------------------------------------------------------------------
-spec disconnect(session(), emqx_types:conninfo()) -> {shutdown, session()}.
disconnect(Session = #{id := Id, s := S0}, ConnInfo) ->
disconnect(Session = #{id := Id, s := S0, shared_sub_s := SharedSubS0}, ConnInfo) ->
S1 = maybe_set_offline_info(S0, Id),
S2 = emqx_persistent_session_ds_state:set_last_alive_at(now_ms(), S1),
S3 =
@ -703,8 +771,9 @@ disconnect(Session = #{id := Id, s := S0}, ConnInfo) ->
_ ->
S2
end,
S = emqx_persistent_session_ds_state:commit(S3),
{shutdown, Session#{s => S}}.
{S4, SharedSubS} = emqx_persistent_session_ds_shared_subs:on_disconnect(S3, SharedSubS0),
S = emqx_persistent_session_ds_state:commit(S4),
{shutdown, Session#{s => S, shared_sub_s => SharedSubS}}.
-spec terminate(Reason :: term(), session()) -> ok.
terminate(_Reason, Session = #{id := Id, s := S}) ->
@ -752,10 +821,12 @@ list_client_subscriptions(ClientId) ->
{error, not_found}
end.
-spec get_client_subscription(emqx_types:clientid(), emqx_types:topic()) ->
-spec get_client_subscription(emqx_types:clientid(), topic_filter() | share_topic_filter()) ->
subscription() | undefined.
get_client_subscription(ClientId, Topic) ->
emqx_persistent_session_ds_subs:cold_get_subscription(ClientId, Topic).
get_client_subscription(ClientId, #share{} = ShareTopicFilter) ->
emqx_persistent_session_ds_shared_subs:cold_get_subscription(ClientId, ShareTopicFilter);
get_client_subscription(ClientId, TopicFilter) ->
emqx_persistent_session_ds_subs:cold_get_subscription(ClientId, TopicFilter).
%%--------------------------------------------------------------------
%% Session tables operations
@ -814,13 +885,17 @@ session_open(
S4 = emqx_persistent_session_ds_state:set_will_message(MaybeWillMsg, S3),
S5 = set_clientinfo(ClientInfo, S4),
S6 = emqx_persistent_session_ds_state:set_protocol({ProtoName, ProtoVer}, S5),
S = emqx_persistent_session_ds_state:commit(S6),
{ok, S7, SharedSubS} = emqx_persistent_session_ds_shared_subs:open(
S6, shared_sub_opts(SessionId)
),
S = emqx_persistent_session_ds_state:commit(S7),
Inflight = emqx_persistent_session_ds_inflight:new(
receive_maximum(NewConnInfo)
),
#{
id => SessionId,
s => S,
shared_sub_s => SharedSubS,
inflight => Inflight,
props => #{}
}
@ -869,6 +944,7 @@ session_ensure_new(
id => Id,
props => Conf,
s => S,
shared_sub_s => emqx_persistent_session_ds_shared_subs:new(shared_sub_opts(Id)),
inflight => emqx_persistent_session_ds_inflight:new(receive_maximum(ConnInfo))
}.
@ -879,8 +955,8 @@ session_drop(SessionId, Reason) ->
case emqx_persistent_session_ds_state:open(SessionId) of
{ok, S0} ->
?tp(debug, drop_persistent_session, #{client_id => SessionId, reason => Reason}),
emqx_persistent_session_ds_subs:on_session_drop(SessionId, S0),
emqx_persistent_session_ds_state:delete(SessionId);
ok = emqx_persistent_session_ds_subs:on_session_drop(SessionId, S0),
ok = emqx_persistent_session_ds_state:delete(SessionId);
undefined ->
ok
end.
@ -917,24 +993,33 @@ do_ensure_all_iterators_closed(_DSSessionID) ->
%% Normal replay:
%%--------------------------------------------------------------------
fetch_new_messages(Session = #{s := S}, ClientInfo) ->
Streams = emqx_persistent_session_ds_stream_scheduler:find_new_streams(S),
fetch_new_messages(Streams, Session, ClientInfo).
fetch_new_messages([], Session, _ClientInfo) ->
Session;
fetch_new_messages([I | Streams], Session0 = #{inflight := Inflight}, ClientInfo) ->
fetch_new_messages(Session0 = #{s := S0, shared_sub_s := SharedSubS0}, ClientInfo) ->
{S1, SharedSubS1} = emqx_persistent_session_ds_shared_subs:on_streams_replay(S0, SharedSubS0),
Session1 = Session0#{s => S1, shared_sub_s => SharedSubS1},
LFS = maps:get(last_fetched_stream, Session1, beginning),
ItStream = emqx_persistent_session_ds_stream_scheduler:iter_next_streams(LFS, S1),
BatchSize = get_config(ClientInfo, [batch_size]),
Session2 = fetch_new_messages(ItStream, BatchSize, Session1, ClientInfo),
Session2#{shared_sub_s => SharedSubS1}.
fetch_new_messages(ItStream0, BatchSize, Session0, ClientInfo) ->
#{inflight := Inflight} = Session0,
case emqx_persistent_session_ds_inflight:n_buffered(all, Inflight) >= BatchSize of
true ->
%% Buffer is full:
Session0;
false ->
Session = new_batch(I, BatchSize, Session0, ClientInfo),
fetch_new_messages(Streams, Session, ClientInfo)
case emqx_persistent_session_ds_stream_scheduler:next_stream(ItStream0) of
{StreamKey, Srs, ItStream} ->
Session1 = new_batch(StreamKey, Srs, BatchSize, Session0, ClientInfo),
Session = Session1#{last_fetched_stream => StreamKey},
fetch_new_messages(ItStream, BatchSize, Session, ClientInfo);
none ->
Session0
end
end.
new_batch({StreamKey, Srs0}, BatchSize, Session0 = #{s := S0}, ClientInfo) ->
new_batch(StreamKey, Srs0, BatchSize, Session0 = #{s := S0}, ClientInfo) ->
SN1 = emqx_persistent_session_ds_state:get_seqno(?next(?QOS_1), S0),
SN2 = emqx_persistent_session_ds_state:get_seqno(?next(?QOS_2), S0),
Srs1 = Srs0#srs{

View File

@ -67,7 +67,7 @@
-type t() :: #inflight{}.
%%================================================================================
%% API funcions
%% API functions
%%================================================================================
-spec new(non_neg_integer()) -> t().

View File

@ -17,7 +17,7 @@
-module(emqx_persistent_session_ds_router).
-include("emqx.hrl").
-include("emqx_persistent_session_ds/emqx_ps_ds_int.hrl").
-include("emqx_ps_ds_int.hrl").
-export([init_tables/0]).
@ -46,9 +46,10 @@
-export([has_route/2]).
-endif.
-type dest() :: emqx_persistent_session_ds:id().
-type route() :: #ps_route{}.
-type dest() :: emqx_persistent_session_ds:id() | #share_dest{}.
-export_type([dest/0]).
-export_type([dest/0, route/0]).
%%--------------------------------------------------------------------
%% Table Initialization
@ -123,19 +124,19 @@ has_any_route(Topic) ->
%% @doc Take a real topic (not filter) as input, return the matching topics and topic
%% filters associated with route destination.
-spec match_routes(emqx_types:topic()) -> [emqx_types:route()].
-spec match_routes(emqx_types:topic()) -> [route()].
match_routes(Topic) when is_binary(Topic) ->
lookup_route_tab(Topic) ++
[match_to_route(M) || M <- match_filters(Topic)].
%% @doc Take a topic or filter as input, and return the existing routes with exactly
%% this topic or filter.
-spec lookup_routes(emqx_types:topic()) -> [emqx_types:route()].
-spec lookup_routes(emqx_types:topic()) -> [route()].
lookup_routes(Topic) ->
case emqx_topic:wildcard(Topic) of
true ->
Pat = #ps_routeidx{entry = emqx_topic_index:make_key(Topic, '$1')},
[Dest || [Dest] <- ets:match(?PS_FILTERS_TAB, Pat)];
[#ps_route{topic = Topic, dest = Dest} || [Dest] <- ets:match(?PS_FILTERS_TAB, Pat)];
false ->
lookup_route_tab(Topic)
end.
@ -160,7 +161,7 @@ topics() ->
print_routes(Topic) ->
lists:foreach(
fun(#ps_route{topic = To, dest = Dest}) ->
io:format("~ts -> ~ts~n", [To, Dest])
io:format("~ts -> ~tp~n", [To, Dest])
end,
match_routes(Topic)
).
@ -194,11 +195,11 @@ cleanup_routes(DSSessionId) ->
?PS_ROUTER_TAB
).
-spec foldl_routes(fun((emqx_types:route(), Acc) -> Acc), Acc) -> Acc.
-spec foldl_routes(fun((route(), Acc) -> Acc), Acc) -> Acc.
foldl_routes(FoldFun, AccIn) ->
fold_routes(foldl, FoldFun, AccIn).
-spec foldr_routes(fun((emqx_types:route(), Acc) -> Acc), Acc) -> Acc.
-spec foldr_routes(fun((route(), Acc) -> Acc), Acc) -> Acc.
foldr_routes(FoldFun, AccIn) ->
fold_routes(foldr, FoldFun, AccIn).
@ -246,6 +247,8 @@ mk_filtertab_fold_fun(FoldFun) ->
match_filters(Topic) ->
emqx_topic_index:matches(Topic, ?PS_FILTERS_TAB, []).
get_dest_session_id(#share_dest{session_id = DSSessionId}) ->
DSSessionId;
get_dest_session_id({_, DSSessionId}) ->
DSSessionId;
get_dest_session_id(DSSessionId) ->

View File

@ -0,0 +1,791 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
%% @doc This module
%% * handles creation and management of _shared_ subscriptions for the session;
%% * provides streams to the session;
%% * handles progress of stream replay.
%%
%% The logic is quite straightforward; most of the parts resemble the logic of the
%% `emqx_persistent_session_ds_subs` (subscribe/unsubscribe) and
%% `emqx_persistent_session_ds_scheduler` (providing new streams),
%% but some data is sent or received from the `emqx_persistent_session_ds_shared_subs_agent`
%% which communicates with remote shared subscription leaders.
%%
%% A tricky part is the concept of "scheduled actions". When we unsubscribe from a topic
%% we may have some streams that have unacked messages. So we do not have a reliable
%% progress for them. Sending the current progress to the leader and disconnecting
%% will lead to the duplication of messages. So after unsubscription, we need to wait
%% some time until all streams are acked, and only then we disconnect from the leader.
%%
%% For this purpose we have the `scheduled_actions` map in the state of the module.
%% We preserve there the streams that we need to wait for and collect their progress.
%% We also use `scheduled_actions` for resubscriptions. If a client quickly resubscribes
%% after unsubscription, we may still have the mentioned streams unacked. If we abandon
%% them, just connect to the leader, then it may lease us the same streams again, but with
%% the previous progress. So messages may duplicate.
-module(emqx_persistent_session_ds_shared_subs).
-include("emqx_mqtt.hrl").
-include("emqx.hrl").
-include("logger.hrl").
-include("session_internals.hrl").
-include_lib("snabbkaffe/include/trace.hrl").
-export([
new/1,
open/2,
on_subscribe/3,
on_unsubscribe/4,
on_disconnect/2,
on_streams_replay/2,
on_info/3,
pre_renew_streams/2,
renew_streams/2,
to_map/2
]).
%% Management API:
-export([
cold_get_subscription/2
]).
-export([
format_lease_events/1,
format_stream_progresses/1
]).
-define(schedule_subscribe, schedule_subscribe).
-define(schedule_unsubscribe, schedule_unsubscribe).
-type stream_key() :: {emqx_persistent_session_ds:id(), emqx_ds:stream()}.
-type scheduled_action_type() ::
{?schedule_subscribe, emqx_types:subopts()} | ?schedule_unsubscribe.
-type agent_stream_progress() :: #{
stream := emqx_ds:stream(),
progress := progress(),
use_finished := boolean()
}.
-type progress() ::
#{
iterator := emqx_ds:iterator()
}.
-type scheduled_action() :: #{
type := scheduled_action_type(),
stream_keys_to_wait := [stream_key()],
progresses := [agent_stream_progress()]
}.
-type t() :: #{
agent := emqx_persistent_session_ds_shared_subs_agent:t(),
scheduled_actions := #{
share_topic_filter() => scheduled_action()
}
}.
-type share_topic_filter() :: emqx_persistent_session_ds:share_topic_filter().
-type opts() :: #{
session_id := emqx_persistent_session_ds:id()
}.
-define(rank_x, rank_shared).
-define(rank_y, 0).
-export_type([
progress/0,
agent_stream_progress/0
]).
%%--------------------------------------------------------------------
%% API
%%--------------------------------------------------------------------
%%--------------------------------------------------------------------
%% new
-spec new(opts()) -> t().
new(Opts) ->
#{
agent => emqx_persistent_session_ds_shared_subs_agent:new(
agent_opts(Opts)
),
scheduled_actions => #{}
}.
%%--------------------------------------------------------------------
%% open
-spec open(emqx_persistent_session_ds_state:t(), opts()) ->
{ok, emqx_persistent_session_ds_state:t(), t()}.
open(S0, Opts) ->
SharedSubscriptions = fold_shared_subs(
fun(#share{} = ShareTopicFilter, Sub, Acc) ->
[{ShareTopicFilter, to_agent_subscription(S0, Sub)} | Acc]
end,
[],
S0
),
Agent = emqx_persistent_session_ds_shared_subs_agent:open(
SharedSubscriptions, agent_opts(Opts)
),
SharedSubS = #{agent => Agent, scheduled_actions => #{}},
S1 = revoke_all_streams(S0),
{ok, S1, SharedSubS}.
%%--------------------------------------------------------------------
%% on_subscribe
-spec on_subscribe(
share_topic_filter(),
emqx_types:subopts(),
emqx_persistent_session_ds:session()
) -> {ok, emqx_persistent_session_ds_state:t(), t()} | {error, emqx_types:reason_code()}.
on_subscribe(#share{} = ShareTopicFilter, SubOpts, #{s := S} = Session) ->
Subscription = emqx_persistent_session_ds_state:get_subscription(ShareTopicFilter, S),
on_subscribe(Subscription, ShareTopicFilter, SubOpts, Session).
%%--------------------------------------------------------------------
%% on_subscribe internal functions
on_subscribe(undefined, ShareTopicFilter, SubOpts, #{props := Props, s := S} = Session) ->
#{max_subscriptions := MaxSubscriptions} = Props,
case emqx_persistent_session_ds_state:n_subscriptions(S) < MaxSubscriptions of
true ->
create_new_subscription(ShareTopicFilter, SubOpts, Session);
false ->
{error, ?RC_QUOTA_EXCEEDED}
end;
on_subscribe(Subscription, ShareTopicFilter, SubOpts, Session) ->
update_subscription(Subscription, ShareTopicFilter, SubOpts, Session).
-dialyzer({nowarn_function, create_new_subscription/3}).
create_new_subscription(#share{topic = TopicFilter, group = Group} = ShareTopicFilter, SubOpts, #{
id := SessionId,
s := S0,
shared_sub_s := #{agent := Agent} = SharedSubS0,
props := Props
}) ->
case
emqx_persistent_session_ds_shared_subs_agent:can_subscribe(
Agent, ShareTopicFilter, SubOpts
)
of
ok ->
ok = emqx_persistent_session_ds_router:do_add_route(TopicFilter, #share_dest{
session_id = SessionId, group = Group
}),
_ = emqx_external_broker:add_persistent_shared_route(TopicFilter, Group, SessionId),
#{upgrade_qos := UpgradeQoS} = Props,
{SubId, S1} = emqx_persistent_session_ds_state:new_id(S0),
{SStateId, S2} = emqx_persistent_session_ds_state:new_id(S1),
SState = #{
parent_subscription => SubId, upgrade_qos => UpgradeQoS, subopts => SubOpts
},
S3 = emqx_persistent_session_ds_state:put_subscription_state(
SStateId, SState, S2
),
Subscription = #{
id => SubId,
current_state => SStateId,
start_time => now_ms()
},
S = emqx_persistent_session_ds_state:put_subscription(
ShareTopicFilter, Subscription, S3
),
SharedSubS = schedule_subscribe(SharedSubS0, ShareTopicFilter, SubOpts),
{ok, S, SharedSubS};
{error, _} = Error ->
Error
end.
update_subscription(
#{current_state := SStateId0, id := SubId} = Sub0, ShareTopicFilter, SubOpts, #{
s := S0, shared_sub_s := SharedSubS, props := Props
}
) ->
#{upgrade_qos := UpgradeQoS} = Props,
SState = #{parent_subscription => SubId, upgrade_qos => UpgradeQoS, subopts => SubOpts},
case emqx_persistent_session_ds_state:get_subscription_state(SStateId0, S0) of
SState ->
%% Client resubscribed with the same parameters:
{ok, S0, SharedSubS};
_ ->
%% Subsription parameters changed:
{SStateId, S1} = emqx_persistent_session_ds_state:new_id(S0),
S2 = emqx_persistent_session_ds_state:put_subscription_state(
SStateId, SState, S1
),
Sub = Sub0#{current_state => SStateId},
S = emqx_persistent_session_ds_state:put_subscription(ShareTopicFilter, Sub, S2),
{ok, S, SharedSubS}
end.
-dialyzer({nowarn_function, schedule_subscribe/3}).
schedule_subscribe(
#{agent := Agent0, scheduled_actions := ScheduledActions0} = SharedSubS0,
ShareTopicFilter,
SubOpts
) ->
case ScheduledActions0 of
#{ShareTopicFilter := ScheduledAction} ->
ScheduledActions1 = ScheduledActions0#{
ShareTopicFilter => ScheduledAction#{type => {?schedule_subscribe, SubOpts}}
},
?tp(debug, shared_subs_schedule_subscribe_override, #{
share_topic_filter => ShareTopicFilter,
new_type => {?schedule_subscribe, SubOpts},
old_action => format_schedule_action(ScheduledAction)
}),
SharedSubS0#{scheduled_actions := ScheduledActions1};
_ ->
?tp(debug, shared_subs_schedule_subscribe_new, #{
share_topic_filter => ShareTopicFilter, subopts => SubOpts
}),
Agent1 = emqx_persistent_session_ds_shared_subs_agent:on_subscribe(
Agent0, ShareTopicFilter, SubOpts
),
SharedSubS0#{agent => Agent1}
end.
%%--------------------------------------------------------------------
%% on_unsubscribe
-spec on_unsubscribe(
emqx_persistent_session_ds:id(),
share_topic_filter(),
emqx_persistent_session_ds_state:t(),
t()
) ->
{ok, emqx_persistent_session_ds_state:t(), t(), emqx_persistent_session_ds:subscription()}
| {error, emqx_types:reason_code()}.
on_unsubscribe(
SessionId, #share{topic = TopicFilter, group = Group} = ShareTopicFilter, S0, SharedSubS0
) ->
case lookup(ShareTopicFilter, S0) of
undefined ->
{error, ?RC_NO_SUBSCRIPTION_EXISTED};
#{id := SubId} = Subscription ->
?tp(persistent_session_ds_subscription_delete, #{
session_id => SessionId, share_topic_filter => ShareTopicFilter
}),
_ = emqx_external_broker:delete_persistent_shared_route(TopicFilter, Group, SessionId),
ok = emqx_persistent_session_ds_router:do_delete_route(TopicFilter, #share_dest{
session_id = SessionId, group = Group
}),
S = emqx_persistent_session_ds_state:del_subscription(ShareTopicFilter, S0),
SharedSubS = schedule_unsubscribe(S, SharedSubS0, SubId, ShareTopicFilter),
{ok, S, SharedSubS, Subscription}
end.
%%--------------------------------------------------------------------
%% on_unsubscribe internal functions
schedule_unsubscribe(
S, #{scheduled_actions := ScheduledActions0} = SharedSubS0, UnsubscridedSubId, ShareTopicFilter
) ->
case ScheduledActions0 of
#{ShareTopicFilter := ScheduledAction0} ->
ScheduledAction1 = ScheduledAction0#{type => ?schedule_unsubscribe},
ScheduledActions1 = ScheduledActions0#{
ShareTopicFilter => ScheduledAction1
},
?tp(debug, shared_subs_schedule_unsubscribe_override, #{
share_topic_filter => ShareTopicFilter,
new_type => ?schedule_unsubscribe,
old_action => format_schedule_action(ScheduledAction0)
}),
SharedSubS0#{scheduled_actions := ScheduledActions1};
_ ->
StreamKeys = stream_keys_by_sub_id(S, UnsubscridedSubId),
ScheduledActions1 = ScheduledActions0#{
ShareTopicFilter => #{
type => ?schedule_unsubscribe,
stream_keys_to_wait => StreamKeys,
progresses => []
}
},
?tp(debug, shared_subs_schedule_unsubscribe_new, #{
share_topic_filter => ShareTopicFilter,
stream_keys => format_stream_keys(StreamKeys)
}),
SharedSubS0#{scheduled_actions := ScheduledActions1}
end.
%%--------------------------------------------------------------------
%% pre_renew_streams
-spec pre_renew_streams(emqx_persistent_session_ds_state:t(), t()) ->
{emqx_persistent_session_ds_state:t(), t()}.
pre_renew_streams(S, SharedSubS) ->
on_streams_replay(S, SharedSubS).
%%--------------------------------------------------------------------
%% renew_streams
-spec renew_streams(emqx_persistent_session_ds_state:t(), t()) ->
{emqx_persistent_session_ds_state:t(), t()}.
renew_streams(S0, #{agent := Agent0, scheduled_actions := ScheduledActions} = SharedSubS0) ->
{StreamLeaseEvents, Agent1} = emqx_persistent_session_ds_shared_subs_agent:renew_streams(
Agent0
),
StreamLeaseEvents =/= [] andalso
?tp(debug, shared_subs_new_stream_lease_events, #{
stream_lease_events => format_lease_events(StreamLeaseEvents)
}),
S1 = lists:foldl(
fun
(#{type := lease} = Event, S) -> accept_stream(Event, S, ScheduledActions);
(#{type := revoke} = Event, S) -> revoke_stream(Event, S)
end,
S0,
StreamLeaseEvents
),
SharedSubS1 = SharedSubS0#{agent => Agent1},
{S1, SharedSubS1}.
%%--------------------------------------------------------------------
%% renew_streams internal functions
accept_stream(#{share_topic_filter := ShareTopicFilter} = Event, S, ScheduledActions) ->
%% If we have a pending action (subscribe or unsubscribe) for this topic filter,
%% we should not accept a stream and start replaying it. We won't use it anyway:
%% * if subscribe is pending, we will reset agent obtain a new lease
%% * if unsubscribe is pending, we will drop connection
case ScheduledActions of
#{ShareTopicFilter := _Action} ->
S;
_ ->
accept_stream(Event, S)
end.
accept_stream(
#{
share_topic_filter := ShareTopicFilter,
stream := Stream,
progress := #{iterator := Iterator} = _Progress
} = _Event,
S0
) ->
case emqx_persistent_session_ds_state:get_subscription(ShareTopicFilter, S0) of
undefined ->
%% We unsubscribed
S0;
#{id := SubId, current_state := SStateId} ->
Key = {SubId, Stream},
NeedCreateStream =
case emqx_persistent_session_ds_state:get_stream(Key, S0) of
undefined ->
true;
#srs{unsubscribed = true} ->
true;
_SRS ->
false
end,
case NeedCreateStream of
true ->
NewSRS =
#srs{
rank_x = ?rank_x,
rank_y = ?rank_y,
it_begin = Iterator,
it_end = Iterator,
sub_state_id = SStateId
},
S1 = emqx_persistent_session_ds_state:put_stream(Key, NewSRS, S0),
S1;
false ->
S0
end
end.
revoke_stream(
#{share_topic_filter := ShareTopicFilter, stream := Stream}, S0
) ->
case emqx_persistent_session_ds_state:get_subscription(ShareTopicFilter, S0) of
undefined ->
%% This should not happen.
%% Agent should have received unsubscribe callback
%% and should not have revoked this stream
S0;
#{id := SubId} ->
Key = {SubId, Stream},
case emqx_persistent_session_ds_state:get_stream(Key, S0) of
undefined ->
S0;
SRS0 ->
SRS1 = SRS0#srs{unsubscribed = true},
S1 = emqx_persistent_session_ds_state:put_stream(Key, SRS1, S0),
S1
end
end.
%%--------------------------------------------------------------------
%% on_streams_replay
-spec on_streams_replay(
emqx_persistent_session_ds_state:t(),
t()
) -> {emqx_persistent_session_ds_state:t(), t()}.
on_streams_replay(S0, SharedSubS0) ->
{S1, #{agent := Agent0, scheduled_actions := ScheduledActions0} = SharedSubS1} =
renew_streams(S0, SharedSubS0),
Progresses = all_stream_progresses(S1, Agent0),
Agent1 = emqx_persistent_session_ds_shared_subs_agent:on_stream_progress(
Agent0, Progresses
),
{Agent2, ScheduledActions1} = run_scheduled_actions(S1, Agent1, ScheduledActions0),
SharedSubS2 = SharedSubS1#{
agent => Agent2,
scheduled_actions => ScheduledActions1
},
{S1, SharedSubS2}.
%%--------------------------------------------------------------------
%% on_streams_replay internal functions
all_stream_progresses(S, Agent) ->
all_stream_progresses(S, Agent, _NeedUnacked = false).
all_stream_progresses(S, _Agent, NeedUnacked) ->
CommQos1 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_1), S),
CommQos2 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_2), S),
fold_shared_stream_states(
fun(ShareTopicFilter, Stream, SRS, ProgressesAcc0) ->
case
is_stream_started(CommQos1, CommQos2, SRS) and
(NeedUnacked or is_stream_fully_acked(CommQos1, CommQos2, SRS))
of
true ->
StreamProgress = stream_progress(CommQos1, CommQos2, Stream, SRS),
maps:update_with(
ShareTopicFilter,
fun(Progresses) -> [StreamProgress | Progresses] end,
[StreamProgress],
ProgressesAcc0
);
false ->
ProgressesAcc0
end
end,
#{},
S
).
run_scheduled_actions(S, Agent, ScheduledActions) ->
maps:fold(
fun(ShareTopicFilter, Action0, {AgentAcc0, ScheduledActionsAcc}) ->
case run_scheduled_action(S, AgentAcc0, ShareTopicFilter, Action0) of
{ok, AgentAcc1} ->
{AgentAcc1, maps:remove(ShareTopicFilter, ScheduledActionsAcc)};
{continue, Action1} ->
{AgentAcc0, ScheduledActionsAcc#{ShareTopicFilter => Action1}}
end
end,
{Agent, ScheduledActions},
ScheduledActions
).
run_scheduled_action(
S,
Agent0,
ShareTopicFilter,
#{type := Type, stream_keys_to_wait := StreamKeysToWait0, progresses := Progresses0} = Action
) ->
StreamKeysToWait1 = filter_unfinished_streams(S, StreamKeysToWait0),
Progresses1 = stream_progresses(S, StreamKeysToWait0 -- StreamKeysToWait1) ++ Progresses0,
case StreamKeysToWait1 of
[] ->
?tp(debug, shared_subs_schedule_action_complete, #{
share_topic_filter => ShareTopicFilter,
progresses => format_stream_progresses(Progresses1),
type => Type
}),
%% Regular progress won't se unsubscribed streams, so we need to
%% send the progress explicitly.
Agent1 = emqx_persistent_session_ds_shared_subs_agent:on_stream_progress(
Agent0, #{ShareTopicFilter => Progresses1}
),
case Type of
{?schedule_subscribe, SubOpts} ->
{ok,
emqx_persistent_session_ds_shared_subs_agent:on_subscribe(
Agent1, ShareTopicFilter, SubOpts
)};
?schedule_unsubscribe ->
{ok,
emqx_persistent_session_ds_shared_subs_agent:on_unsubscribe(
Agent1, ShareTopicFilter, Progresses1
)}
end;
_ ->
Action1 = Action#{stream_keys_to_wait => StreamKeysToWait1, progresses => Progresses1},
?tp(debug, shared_subs_schedule_action_continue, #{
share_topic_filter => ShareTopicFilter,
new_action => format_schedule_action(Action1)
}),
{continue, Action1}
end.
filter_unfinished_streams(S, StreamKeysToWait) ->
CommQos1 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_1), S),
CommQos2 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_2), S),
lists:filter(
fun(Key) ->
case emqx_persistent_session_ds_state:get_stream(Key, S) of
undefined ->
%% This should not happen: we should see any stream
%% in completed state before deletion
true;
SRS ->
not is_stream_fully_acked(CommQos1, CommQos2, SRS)
end
end,
StreamKeysToWait
).
stream_progresses(S, StreamKeys) ->
CommQos1 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_1), S),
CommQos2 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_2), S),
lists:map(
fun({_SubId, Stream} = Key) ->
SRS = emqx_persistent_session_ds_state:get_stream(Key, S),
stream_progress(CommQos1, CommQos2, Stream, SRS)
end,
StreamKeys
).
%%--------------------------------------------------------------------
%% on_disconnect
on_disconnect(S0, #{agent := Agent0} = SharedSubS0) ->
S1 = revoke_all_streams(S0),
Progresses = all_stream_progresses(S1, Agent0, _NeedUnacked = true),
Agent1 = emqx_persistent_session_ds_shared_subs_agent:on_disconnect(Agent0, Progresses),
SharedSubS1 = SharedSubS0#{agent => Agent1, scheduled_actions => #{}},
{S1, SharedSubS1}.
%%--------------------------------------------------------------------
%% on_disconnect helpers
revoke_all_streams(S0) ->
fold_shared_stream_states(
fun(ShareTopicFilter, Stream, _SRS, S) ->
revoke_stream(#{share_topic_filter => ShareTopicFilter, stream => Stream}, S)
end,
S0,
S0
).
%%--------------------------------------------------------------------
%% on_info
-spec on_info(emqx_persistent_session_ds_state:t(), t(), term()) ->
{emqx_persistent_session_ds_state:t(), t()}.
on_info(S, #{agent := Agent0} = SharedSubS0, Info) ->
Agent1 = emqx_persistent_session_ds_shared_subs_agent:on_info(Agent0, Info),
SharedSubS1 = SharedSubS0#{agent => Agent1},
{S, SharedSubS1}.
%%--------------------------------------------------------------------
%% to_map
-spec to_map(emqx_persistent_session_ds_state:t(), t()) -> map().
to_map(S, _SharedSubS) ->
fold_shared_subs(
fun(ShareTopicFilter, _, Acc) -> Acc#{ShareTopicFilter => lookup(ShareTopicFilter, S)} end,
#{},
S
).
%%--------------------------------------------------------------------
%% cold_get_subscription
-spec cold_get_subscription(emqx_persistent_session_ds:id(), share_topic_filter()) ->
emqx_persistent_session_ds:subscription() | undefined.
cold_get_subscription(SessionId, ShareTopicFilter) ->
case emqx_persistent_session_ds_state:cold_get_subscription(SessionId, ShareTopicFilter) of
[Sub = #{current_state := SStateId}] ->
case
emqx_persistent_session_ds_state:cold_get_subscription_state(SessionId, SStateId)
of
[#{subopts := Subopts}] ->
Sub#{subopts => Subopts};
_ ->
undefined
end;
_ ->
undefined
end.
%%--------------------------------------------------------------------
%% Generic helpers
%%--------------------------------------------------------------------
lookup(ShareTopicFilter, S) ->
case emqx_persistent_session_ds_state:get_subscription(ShareTopicFilter, S) of
Sub = #{current_state := SStateId} ->
case emqx_persistent_session_ds_state:get_subscription_state(SStateId, S) of
#{subopts := SubOpts} ->
Sub#{subopts => SubOpts};
undefined ->
undefined
end;
undefined ->
undefined
end.
stream_keys_by_sub_id(S, MatchSubId) ->
emqx_persistent_session_ds_state:fold_streams(
fun({SubId, _Stream} = StreamKey, _SRS, StreamKeys) ->
case SubId of
MatchSubId ->
[StreamKey | StreamKeys];
_ ->
StreamKeys
end
end,
[],
S
).
stream_progress(
CommQos1,
CommQos2,
Stream,
#srs{
it_end = EndIt,
it_begin = BeginIt
} = SRS
) ->
Iterator =
case is_stream_fully_acked(CommQos1, CommQos2, SRS) of
true -> EndIt;
false -> BeginIt
end,
#{
stream => Stream,
progress => #{
iterator => Iterator
},
use_finished => is_use_finished(SRS)
}.
fold_shared_subs(Fun, Acc, S) ->
emqx_persistent_session_ds_state:fold_subscriptions(
fun
(#share{} = ShareTopicFilter, Sub, Acc0) -> Fun(ShareTopicFilter, Sub, Acc0);
(_, _Sub, Acc0) -> Acc0
end,
Acc,
S
).
fold_shared_stream_states(Fun, Acc, S) ->
%% TODO
%% Optimize or cache
ShareTopicFilters = fold_shared_subs(
fun
(#share{} = ShareTopicFilter, #{id := Id} = _Sub, Acc0) ->
Acc0#{Id => ShareTopicFilter};
(_, _, Acc0) ->
Acc0
end,
#{},
S
),
emqx_persistent_session_ds_state:fold_streams(
fun({SubId, Stream}, SRS, Acc0) ->
case ShareTopicFilters of
#{SubId := ShareTopicFilter} ->
Fun(ShareTopicFilter, Stream, SRS, Acc0);
_ ->
Acc0
end
end,
Acc,
S
).
to_agent_subscription(_S, Subscription) ->
maps:with([start_time], Subscription).
agent_opts(#{session_id := SessionId}) ->
#{session_id => SessionId}.
-dialyzer({nowarn_function, now_ms/0}).
now_ms() ->
erlang:system_time(millisecond).
is_use_finished(#srs{unsubscribed = Unsubscribed}) ->
Unsubscribed.
is_stream_started(CommQos1, CommQos2, #srs{first_seqno_qos1 = Q1, last_seqno_qos1 = Q2}) ->
(CommQos1 >= Q1) or (CommQos2 >= Q2).
is_stream_fully_acked(_, _, #srs{
first_seqno_qos1 = Q1, last_seqno_qos1 = Q1, first_seqno_qos2 = Q2, last_seqno_qos2 = Q2
}) ->
%% Streams where the last chunk doesn't contain any QoS1 and 2
%% messages are considered fully acked:
true;
is_stream_fully_acked(Comm1, Comm2, #srs{last_seqno_qos1 = S1, last_seqno_qos2 = S2}) ->
(Comm1 >= S1) andalso (Comm2 >= S2).
%%--------------------------------------------------------------------
%% Formatters
%%--------------------------------------------------------------------
format_schedule_action(#{
type := Type, progresses := Progresses, stream_keys_to_wait := StreamKeysToWait
}) ->
#{
type => Type,
progresses => format_stream_progresses(Progresses),
stream_keys_to_wait => format_stream_keys(StreamKeysToWait)
}.
format_stream_progresses(Streams) ->
lists:map(
fun format_stream_progress/1,
Streams
).
format_stream_progress(#{stream := Stream, progress := Progress} = Value) ->
Value#{stream => format_opaque(Stream), progress => format_progress(Progress)}.
format_progress(#{iterator := Iterator} = Progress) ->
Progress#{iterator => format_opaque(Iterator)}.
format_stream_key(beginning) -> beginning;
format_stream_key({SubId, Stream}) -> {SubId, format_opaque(Stream)}.
format_stream_keys(StreamKeys) ->
lists:map(
fun format_stream_key/1,
StreamKeys
).
format_lease_events(Events) ->
lists:map(
fun format_lease_event/1,
Events
).
format_lease_event(#{stream := Stream, progress := Progress} = Event) ->
Event#{stream => format_opaque(Stream), progress => format_progress(Progress)};
format_lease_event(#{stream := Stream} = Event) ->
Event#{stream => format_opaque(Stream)}.
format_opaque(Opaque) ->
erlang:phash2(Opaque).

View File

@ -0,0 +1,138 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_persistent_session_ds_shared_subs_agent).
-include("shared_subs_agent.hrl").
-include("emqx_session.hrl").
-include("session_internals.hrl").
-type session_id() :: emqx_persistent_session_ds:id().
-type subscription() :: #{
start_time := emqx_ds:time()
}.
-type t() :: term().
-type share_topic_filter() :: emqx_persistent_session_ds:share_topic_filter().
-type opts() :: #{
session_id := session_id()
}.
%% TODO
%% This records go through network, we better shrink them
%% * use integer keys
%% * somehow avoid passing stream and topic_filter they both are part of the iterator
-type stream_lease() :: #{
type => lease,
%% Used as "external" subscription_id
share_topic_filter := share_topic_filter(),
stream := emqx_ds:stream(),
iterator := emqx_ds:iterator()
}.
-type stream_revoke() :: #{
type => revoke,
share_topic_filter := share_topic_filter(),
stream := emqx_ds:stream()
}.
-type stream_lease_event() :: stream_lease() | stream_revoke().
-type stream_progress() :: #{
share_topic_filter := share_topic_filter(),
stream := emqx_ds:stream(),
iterator := emqx_ds:iterator(),
use_finished := boolean()
}.
-export_type([
t/0,
subscription/0,
session_id/0,
stream_lease_event/0,
opts/0
]).
-export([
new/1,
open/2,
can_subscribe/3,
on_subscribe/3,
on_unsubscribe/3,
on_stream_progress/2,
on_info/2,
on_disconnect/2,
renew_streams/1
]).
-export([
send/2,
send_after/3
]).
%%--------------------------------------------------------------------
%% Behaviour
%%--------------------------------------------------------------------
-callback new(opts()) -> t().
-callback open([{share_topic_filter(), subscription()}], opts()) -> t().
-callback can_subscribe(t(), share_topic_filter(), emqx_types:subopts()) -> ok | {error, term()}.
-callback on_subscribe(t(), share_topic_filter(), emqx_types:subopts()) -> t().
-callback on_unsubscribe(t(), share_topic_filter(), [stream_progress()]) -> t().
-callback on_disconnect(t(), [stream_progress()]) -> t().
-callback renew_streams(t()) -> {[stream_lease_event()], t()}.
-callback on_stream_progress(t(), #{share_topic_filter() => [stream_progress()]}) -> t().
-callback on_info(t(), term()) -> t().
%%--------------------------------------------------------------------
%% API
%%--------------------------------------------------------------------
-spec new(opts()) -> t().
new(Opts) ->
?shared_subs_agent:new(Opts).
-spec open([{share_topic_filter(), subscription()}], opts()) -> t().
open(Topics, Opts) ->
?shared_subs_agent:open(Topics, Opts).
-spec can_subscribe(t(), share_topic_filter(), emqx_types:subopts()) -> ok | {error, term()}.
can_subscribe(Agent, ShareTopicFilter, SubOpts) ->
?shared_subs_agent:can_subscribe(Agent, ShareTopicFilter, SubOpts).
-spec on_subscribe(t(), share_topic_filter(), emqx_types:subopts()) -> t().
on_subscribe(Agent, ShareTopicFilter, SubOpts) ->
?shared_subs_agent:on_subscribe(Agent, ShareTopicFilter, SubOpts).
-spec on_unsubscribe(t(), share_topic_filter(), [stream_progress()]) -> t().
on_unsubscribe(Agent, ShareTopicFilter, StreamProgresses) ->
?shared_subs_agent:on_unsubscribe(Agent, ShareTopicFilter, StreamProgresses).
-spec on_disconnect(t(), #{share_topic_filter() => [stream_progress()]}) -> t().
on_disconnect(Agent, StreamProgresses) ->
?shared_subs_agent:on_disconnect(Agent, StreamProgresses).
-spec renew_streams(t()) -> {[stream_lease_event()], t()}.
renew_streams(Agent) ->
?shared_subs_agent:renew_streams(Agent).
-spec on_stream_progress(t(), #{share_topic_filter() => [stream_progress()]}) -> t().
on_stream_progress(Agent, StreamProgress) ->
?shared_subs_agent:on_stream_progress(Agent, StreamProgress).
-spec on_info(t(), term()) -> t().
on_info(Agent, Info) ->
?shared_subs_agent:on_info(Agent, Info).
-spec send(pid(), term()) -> term().
send(Dest, Msg) ->
erlang:send(Dest, ?session_message(?shared_sub_message(Msg))).
-spec send_after(non_neg_integer(), pid(), term()) -> reference().
send_after(Time, Dest, Msg) ->
erlang:send_after(Time, Dest, ?session_message(?shared_sub_message(Msg))).

View File

@ -0,0 +1,54 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_persistent_session_ds_shared_subs_null_agent).
-include("emqx_mqtt.hrl").
-export([
new/1,
open/2,
can_subscribe/3,
on_subscribe/3,
on_unsubscribe/3,
on_stream_progress/2,
on_info/2,
on_disconnect/2,
renew_streams/1
]).
-behaviour(emqx_persistent_session_ds_shared_subs_agent).
%%--------------------------------------------------------------------
%% API
%%--------------------------------------------------------------------
new(_Opts) ->
undefined.
open(_Topics, _Opts) ->
undefined.
can_subscribe(_Agent, _TopicFilter, _SubOpts) ->
{error, ?RC_SHARED_SUBSCRIPTIONS_NOT_SUPPORTED}.
on_subscribe(Agent, _TopicFilter, _SubOpts) ->
Agent.
on_unsubscribe(Agent, _TopicFilter, _Progresses) ->
Agent.
on_disconnect(Agent, _) ->
Agent.
renew_streams(Agent) ->
{[], Agent}.
on_stream_progress(Agent, _StreamProgress) ->
Agent.
on_info(Agent, _Info) ->
Agent.

View File

@ -39,7 +39,7 @@
-export([get_peername/1, set_peername/2]).
-export([get_protocol/1, set_protocol/2]).
-export([new_id/1]).
-export([get_stream/2, put_stream/3, del_stream/2, fold_streams/3, n_streams/1]).
-export([get_stream/2, put_stream/3, del_stream/2, fold_streams/3, iter_streams/2, n_streams/1]).
-export([get_seqno/2, put_seqno/3]).
-export([get_rank/2, put_rank/3, del_rank/2, fold_ranks/3]).
-export([
@ -66,11 +66,14 @@
n_awaiting_rel/1
]).
-export([iter_next/1]).
-export([make_session_iterator/0, session_iterator_next/2]).
-export_type([
t/0,
metadata/0,
iter/2,
seqno_type/0,
stream_key/0,
rank_key/0,
@ -89,6 +92,8 @@
-type message() :: emqx_types:message().
-opaque iter(K, V) :: gb_trees:iter(K, V).
-opaque session_iterator() :: emqx_persistent_session_ds:id() | '$end_of_table'.
%% Generic key-value wrapper that is used for exporting arbitrary
@ -113,7 +118,7 @@
-type pmap(K, V) ::
#pmap{
table :: atom(),
cache :: #{K => V},
cache :: #{K => V} | gb_trees:tree(K, V),
dirty :: #{K => dirty | del}
}.
@ -192,7 +197,7 @@
-endif.
%%================================================================================
%% API funcions
%% API functions
%%================================================================================
-spec create_tables() -> ok.
@ -394,7 +399,9 @@ new_id(Rec) ->
get_subscription(TopicFilter, Rec) ->
gen_get(?subscriptions, TopicFilter, Rec).
-spec cold_get_subscription(emqx_persistent_session_ds:id(), emqx_types:topic()) ->
-spec cold_get_subscription(
emqx_persistent_session_ds:id(), emqx_types:topic() | emqx_types:share()
) ->
[emqx_persistent_session_ds_subs:subscription()].
cold_get_subscription(SessionId, Topic) ->
kv_pmap_read(?subscription_tab, SessionId, Topic).
@ -476,6 +483,14 @@ del_stream(Key, Rec) ->
fold_streams(Fun, Acc, Rec) ->
gen_fold(?streams, Fun, Acc, Rec).
-spec iter_streams(_StartAfter :: stream_key() | beginning, t()) ->
iter(stream_key(), emqx_persistent_session_ds:stream_state()).
iter_streams(After, Rec) ->
%% NOTE
%% No special handling for `beginning', as it always compares less
%% than any `stream_key()'.
gen_iter_after(?streams, After, Rec).
-spec n_streams(t()) -> non_neg_integer().
n_streams(Rec) ->
gen_size(?streams, Rec).
@ -534,6 +549,12 @@ n_awaiting_rel(Rec) ->
%%
-spec iter_next(iter(K, V)) -> {K, V, iter(K, V)} | none.
iter_next(It0) ->
gen_iter_next(It0).
%%
-spec make_session_iterator() -> session_iterator().
make_session_iterator() ->
mnesia:dirty_first(?session_tab).
@ -601,6 +622,14 @@ gen_size(Field, Rec) ->
check_sequence(Rec),
pmap_size(maps:get(Field, Rec)).
gen_iter_after(Field, After, Rec) ->
check_sequence(Rec),
pmap_iter_after(After, maps:get(Field, Rec)).
gen_iter_next(It) ->
%% NOTE: Currently, gbt iterators is the only type of iterators.
gbt_iter_next(It).
-spec update_pmaps(fun((pmap(_K, _V) | undefined, atom()) -> term()), map()) -> map().
update_pmaps(Fun, Map) ->
lists:foldl(
@ -619,7 +648,7 @@ update_pmaps(Fun, Map) ->
%% This functtion should be ran in a transaction.
-spec pmap_open(atom(), emqx_persistent_session_ds:id()) -> pmap(_K, _V).
pmap_open(Table, SessionId) ->
Clean = maps:from_list(kv_pmap_restore(Table, SessionId)),
Clean = cache_from_list(Table, kv_pmap_restore(Table, SessionId)),
#pmap{
table = Table,
cache = Clean,
@ -627,29 +656,29 @@ pmap_open(Table, SessionId) ->
}.
-spec pmap_get(K, pmap(K, V)) -> V | undefined.
pmap_get(K, #pmap{cache = Cache}) ->
maps:get(K, Cache, undefined).
pmap_get(K, #pmap{table = Table, cache = Cache}) ->
cache_get(Table, K, Cache).
-spec pmap_put(K, V, pmap(K, V)) -> pmap(K, V).
pmap_put(K, V, Pmap = #pmap{dirty = Dirty, cache = Cache}) ->
pmap_put(K, V, Pmap = #pmap{table = Table, dirty = Dirty, cache = Cache}) ->
Pmap#pmap{
cache = maps:put(K, V, Cache),
cache = cache_put(Table, K, V, Cache),
dirty = Dirty#{K => dirty}
}.
-spec pmap_del(K, pmap(K, V)) -> pmap(K, V).
pmap_del(
Key,
Pmap = #pmap{dirty = Dirty, cache = Cache}
Pmap = #pmap{table = Table, dirty = Dirty, cache = Cache}
) ->
Pmap#pmap{
cache = maps:remove(Key, Cache),
cache = cache_remove(Table, Key, Cache),
dirty = Dirty#{Key => del}
}.
-spec pmap_fold(fun((K, V, A) -> A), A, pmap(K, V)) -> A.
pmap_fold(Fun, Acc, #pmap{cache = Cache}) ->
maps:fold(Fun, Acc, Cache).
pmap_fold(Fun, Acc, #pmap{table = Table, cache = Cache}) ->
cache_fold(Table, Fun, Acc, Cache).
-spec pmap_commit(emqx_persistent_session_ds:id(), pmap(K, V)) -> pmap(K, V).
pmap_commit(
@ -660,7 +689,7 @@ pmap_commit(
(K, del) ->
kv_pmap_delete(Tab, SessionId, K);
(K, dirty) ->
V = maps:get(K, Cache),
V = cache_get(Tab, K, Cache),
kv_pmap_persist(Tab, SessionId, K, V)
end,
Dirty
@ -670,13 +699,110 @@ pmap_commit(
}.
-spec pmap_format(pmap(_K, _V)) -> map().
pmap_format(#pmap{cache = Cache}) ->
Cache.
pmap_format(#pmap{table = Table, cache = Cache}) ->
cache_format(Table, Cache).
-spec pmap_size(pmap(_K, _V)) -> non_neg_integer().
pmap_size(#pmap{cache = Cache}) ->
pmap_size(#pmap{table = Table, cache = Cache}) ->
cache_size(Table, Cache).
pmap_iter_after(After, #pmap{table = Table, cache = Cache}) ->
%% NOTE: Only valid for gbt-backed PMAPs.
gbt = cache_data_type(Table),
gbt_iter_after(After, Cache).
%%
cache_data_type(?stream_tab) -> gbt;
cache_data_type(_Table) -> map.
cache_from_list(?stream_tab, L) ->
gbt_from_list(L);
cache_from_list(_Table, L) ->
maps:from_list(L).
cache_get(?stream_tab, K, Cache) ->
gbt_get(K, Cache, undefined);
cache_get(_Table, K, Cache) ->
maps:get(K, Cache, undefined).
cache_put(?stream_tab, K, V, Cache) ->
gbt_put(K, V, Cache);
cache_put(_Table, K, V, Cache) ->
maps:put(K, V, Cache).
cache_remove(?stream_tab, K, Cache) ->
gbt_remove(K, Cache);
cache_remove(_Table, K, Cache) ->
maps:remove(K, Cache).
cache_fold(?stream_tab, Fun, Acc, Cache) ->
gbt_fold(Fun, Acc, Cache);
cache_fold(_Table, Fun, Acc, Cache) ->
maps:fold(Fun, Acc, Cache).
cache_format(?stream_tab, Cache) ->
gbt_format(Cache);
cache_format(_Table, Cache) ->
Cache.
cache_size(?stream_tab, Cache) ->
gbt_size(Cache);
cache_size(_Table, Cache) ->
maps:size(Cache).
%% PMAP Cache implementation backed by `gb_trees'.
%% Supports iteration starting from specific key.
gbt_from_list(L) ->
lists:foldl(
fun({K, V}, Acc) -> gb_trees:insert(K, V, Acc) end,
gb_trees:empty(),
L
).
gbt_get(K, Cache, undefined) ->
case gb_trees:lookup(K, Cache) of
none -> undefined;
{_, V} -> V
end.
gbt_put(K, V, Cache) ->
gb_trees:enter(K, V, Cache).
gbt_remove(K, Cache) ->
gb_trees:delete_any(K, Cache).
gbt_format(Cache) ->
gb_trees:to_list(Cache).
gbt_fold(Fun, Acc, Cache) ->
It = gb_trees:iterator(Cache),
gbt_fold_iter(Fun, Acc, It).
gbt_fold_iter(Fun, Acc, It0) ->
case gb_trees:next(It0) of
{K, V, It} ->
gbt_fold_iter(Fun, Fun(K, V, Acc), It);
_ ->
Acc
end.
gbt_size(Cache) ->
gb_trees:size(Cache).
gbt_iter_after(After, Cache) ->
It0 = gb_trees:iterator_from(After, Cache),
case gb_trees:next(It0) of
{After, _, It} ->
It;
_ ->
It0
end.
gbt_iter_next(It) ->
gb_trees:next(It).
%% Functions dealing with set tables:
kv_persist(Tab, SessionId, Val0) ->

View File

@ -16,7 +16,8 @@
-module(emqx_persistent_session_ds_stream_scheduler).
%% API:
-export([find_new_streams/1, find_replay_streams/1, is_fully_acked/2]).
-export([iter_next_streams/2, next_stream/1]).
-export([find_replay_streams/1, is_fully_acked/2]).
-export([renew_streams/1, on_unsubscribe/2]).
%% behavior callbacks:
@ -35,6 +36,29 @@
%% Type declarations
%%================================================================================
-type stream_key() :: emqx_persistent_session_ds_state:stream_key().
-type stream_state() :: emqx_persistent_session_ds:stream_state().
%% Restartable iterator with a filter and an iteration limit.
-record(iter, {
limit :: non_neg_integer(),
filter,
it,
it_cont
}).
-type iter(K, V, IterInner) :: #iter{
filter :: fun((K, V) -> boolean()),
it :: IterInner,
it_cont :: IterInner
}.
-type iter_stream() :: iter(
stream_key(),
stream_state(),
emqx_persistent_session_ds_state:iter(stream_key(), stream_state())
).
%%================================================================================
%% API functions
%%================================================================================
@ -70,9 +94,9 @@ find_replay_streams(S) ->
%%
%% This function is non-detereministic: it randomizes the order of
%% streams to ensure fair replay of different topics.
-spec find_new_streams(emqx_persistent_session_ds_state:t()) ->
[{emqx_persistent_session_ds_state:stream_key(), emqx_persistent_session_ds:stream_state()}].
find_new_streams(S) ->
-spec iter_next_streams(_LastVisited :: stream_key(), emqx_persistent_session_ds_state:t()) ->
iter_stream().
iter_next_streams(LastVisited, S) ->
%% FIXME: this function is currently very sensitive to the
%% consistency of the packet IDs on both broker and client side.
%%
@ -87,23 +111,44 @@ find_new_streams(S) ->
%% after timeout?)
Comm1 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_1), S),
Comm2 = emqx_persistent_session_ds_state:get_seqno(?committed(?QOS_2), S),
shuffle(
emqx_persistent_session_ds_state:fold_streams(
fun
(_Key, #srs{it_end = end_of_stream}, Acc) ->
Acc;
(Key, Stream, Acc) ->
case is_fully_acked(Comm1, Comm2, Stream) andalso not Stream#srs.unsubscribed of
Filter = fun(_Key, Stream) -> is_fetchable(Comm1, Comm2, Stream) end,
#iter{
%% Limit the iteration to one round over all streams:
limit = emqx_persistent_session_ds_state:n_streams(S),
%% Filter out the streams not eligible for fetching:
filter = Filter,
%% Start the iteration right after the last visited stream:
it = emqx_persistent_session_ds_state:iter_streams(LastVisited, S),
%% Restart the iteration from the beginning:
it_cont = emqx_persistent_session_ds_state:iter_streams(beginning, S)
}.
-spec next_stream(iter_stream()) -> {stream_key(), stream_state(), iter_stream()} | none.
next_stream(#iter{limit = 0}) ->
none;
next_stream(ItStream0 = #iter{limit = N, filter = Filter, it = It0, it_cont = ItCont}) ->
case emqx_persistent_session_ds_state:iter_next(It0) of
{Key, Stream, It} ->
ItStream = ItStream0#iter{it = It, limit = N - 1},
case Filter(Key, Stream) of
true ->
[{Key, Stream} | Acc];
{Key, Stream, ItStream};
false ->
Acc
end
end,
[],
S
)
).
next_stream(ItStream)
end;
none when It0 =/= ItCont ->
%% Restart the iteration from the beginning:
ItStream = ItStream0#iter{it = ItCont},
next_stream(ItStream);
none ->
%% No point in restarting the iteration, `ItCont` is empty:
none
end.
is_fetchable(_Comm1, _Comm2, #srs{it_end = end_of_stream}) ->
false;
is_fetchable(Comm1, Comm2, #srs{unsubscribed = Unsubscribed} = Stream) ->
is_fully_acked(Comm1, Comm2, Stream) andalso not Unsubscribed.
%% @doc This function makes the session aware of the new streams.
%%
@ -127,7 +172,12 @@ renew_streams(S0) ->
S1 = remove_unsubscribed_streams(S0),
S2 = remove_fully_replayed_streams(S1),
S3 = update_stream_subscription_state_ids(S2),
emqx_persistent_session_ds_subs:fold(
%% For shared subscriptions, the streams are populated by
%% `emqx_persistent_session_ds_shared_subs`.
%% TODO
%% Move discovery of proper streams
%% out of the scheduler for complete symmetry?
fold_proper_subscriptions(
fun
(Key, #{start_time := StartTime, id := SubId, current_state := SStateId}, Acc) ->
TopicFilter = emqx_topic:words(Key),
@ -206,9 +256,6 @@ ensure_iterator(TopicFilter, StartTime, SubId, SStateId, {{RankX, RankY}, Stream
Key = {SubId, Stream},
case emqx_persistent_session_ds_state:get_stream(Key, S) of
undefined ->
?SLOG(debug, #{
msg => new_stream, key => Key, stream => Stream
}),
case emqx_ds:make_iterator(?PERSISTENT_MESSAGE_DB, Stream, TopicFilter, StartTime) of
{ok, Iterator} ->
NewStreamState = #srs{
@ -408,15 +455,12 @@ is_fully_acked(_, _, #srs{
is_fully_acked(Comm1, Comm2, #srs{last_seqno_qos1 = S1, last_seqno_qos2 = S2}) ->
(Comm1 >= S1) andalso (Comm2 >= S2).
-spec shuffle([A]) -> [A].
shuffle(L0) ->
L1 = lists:map(
fun(A) ->
%% maybe topic/stream prioritization could be introduced here?
{rand:uniform(), A}
fold_proper_subscriptions(Fun, Acc, S) ->
emqx_persistent_session_ds_state:fold_subscriptions(
fun
(#share{}, _Sub, Acc0) -> Acc0;
(TopicFilter, Sub, Acc0) -> Fun(TopicFilter, Sub, Acc0)
end,
L0
),
L2 = lists:sort(L1),
{_, L} = lists:unzip(L2),
L.
Acc,
S
).

View File

@ -30,8 +30,7 @@
on_session_drop/2,
gc/1,
lookup/2,
to_map/1,
fold/3
to_map/1
]).
%% Management API:
@ -93,6 +92,7 @@ on_subscribe(TopicFilter, SubOpts, #{id := SessionId, s := S0, props := Props})
case emqx_persistent_session_ds_state:n_subscriptions(S0) < MaxSubscriptions of
true ->
ok = emqx_persistent_session_ds_router:do_add_route(TopicFilter, SessionId),
_ = emqx_external_broker:add_persistent_route(TopicFilter, SessionId),
{SubId, S1} = emqx_persistent_session_ds_state:new_id(S0),
{SStateId, S2} = emqx_persistent_session_ds_state:new_id(S1),
SState = #{
@ -155,12 +155,13 @@ on_unsubscribe(SessionId, TopicFilter, S0) ->
#{session_id => SessionId, topic_filter => TopicFilter},
ok = emqx_persistent_session_ds_router:do_delete_route(TopicFilter, SessionId)
),
_ = emqx_external_broker:delete_persistent_route(TopicFilter, SessionId),
{ok, emqx_persistent_session_ds_state:del_subscription(TopicFilter, S0), Subscription}
end.
-spec on_session_drop(emqx_persistent_session_ds:id(), emqx_persistent_session_ds_state:t()) -> ok.
on_session_drop(SessionId, S0) ->
fold(
_ = fold_proper_subscriptions(
fun(TopicFilter, _Subscription, S) ->
case on_unsubscribe(SessionId, TopicFilter, S) of
{ok, S1, _} -> S1;
@ -169,10 +170,14 @@ on_session_drop(SessionId, S0) ->
end,
S0,
S0
).
),
ok.
%% @doc Remove subscription states that don't have a parent, and that
%% don't have any unacked messages:
%% don't have any unacked messages.
%% TODO
%% This function collects shared subs as well
%% Move to a separate module to keep symmetry?
-spec gc(emqx_persistent_session_ds_state:t()) -> emqx_persistent_session_ds_state:t().
gc(S0) ->
%% Create a set of subscription states IDs referenced either by a
@ -210,7 +215,7 @@ gc(S0) ->
S0
).
%% @doc Fold over active subscriptions:
%% @doc Lookup a subscription and merge it with its current state:
-spec lookup(emqx_persistent_session_ds:topic_filter(), emqx_persistent_session_ds_state:t()) ->
emqx_persistent_session_ds:subscription() | undefined.
lookup(TopicFilter, S) ->
@ -230,22 +235,12 @@ lookup(TopicFilter, S) ->
%% purpose:
-spec to_map(emqx_persistent_session_ds_state:t()) -> map().
to_map(S) ->
fold(
fold_proper_subscriptions(
fun(TopicFilter, _, Acc) -> Acc#{TopicFilter => lookup(TopicFilter, S)} end,
#{},
S
).
%% @doc Fold over active subscriptions:
-spec fold(
fun((emqx_types:topic(), emqx_persistent_session_ds:subscription(), Acc) -> Acc),
Acc,
emqx_persistent_session_ds_state:t()
) ->
Acc.
fold(Fun, Acc, S) ->
emqx_persistent_session_ds_state:fold_subscriptions(Fun, Acc, S).
-spec cold_get_subscription(emqx_persistent_session_ds:id(), emqx_types:topic()) ->
emqx_persistent_session_ds:subscription() | undefined.
cold_get_subscription(SessionId, Topic) ->
@ -267,5 +262,15 @@ cold_get_subscription(SessionId, Topic) ->
%% Internal functions
%%================================================================================
fold_proper_subscriptions(Fun, Acc, S) ->
emqx_persistent_session_ds_state:fold_subscriptions(
fun
(#share{}, _Sub, Acc0) -> Acc0;
(TopicFilter, Sub, Acc0) -> Fun(TopicFilter, Sub, Acc0)
end,
Acc,
S
).
now_ms() ->
erlang:system_time(millisecond).

View File

@ -21,7 +21,7 @@
-record(ps_route, {
topic :: binary(),
dest :: emqx_persistent_session_ds:id() | '_'
dest :: emqx_persistent_session_ds_router:dest() | '_'
}).
-record(ps_routeidx, {

View File

@ -71,4 +71,11 @@
sub_state_id :: emqx_persistent_session_ds_subs:subscription_state_id()
}).
%% (Erlang) messages that session should forward to the
%% shared subscription handler.
-record(shared_sub_message, {
message :: term()
}).
-define(shared_sub_message(MSG), #shared_sub_message{message = MSG}).
-endif.

View File

@ -0,0 +1,45 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-ifndef(SHARED_SUBS_AGENT_HRL).
-define(SHARED_SUBS_AGENT_HRL, true).
-ifdef(EMQX_RELEASE_EDITION).
-if(?EMQX_RELEASE_EDITION == ee).
%% agent from BSL app
-ifdef(TEST).
-define(shared_subs_agent, emqx_ds_shared_sub_agent).
%% clause of -ifdef(TEST).
-else.
%% Till full implementation we need to dispach to the null agent.
%% It will report "not implemented" error for attempts to use shared subscriptions.
-define(shared_subs_agent, emqx_persistent_session_ds_shared_subs_null_agent).
% -define(shared_subs_agent, emqx_ds_shared_sub_agent).
%% end of -ifdef(TEST).
-endif.
%% clause of -if(?EMQX_RELEASE_EDITION == ee).
-else.
-define(shared_subs_agent, emqx_persistent_session_ds_shared_subs_null_agent).
%% end of -if(?EMQX_RELEASE_EDITION == ee).
-endif.
%% clause of -ifdef(EMQX_RELEASE_EDITION).
-else.
-define(shared_subs_agent, emqx_persistent_session_ds_shared_subs_null_agent).
%% end of -ifdef(EMQX_RELEASE_EDITION).
-endif.
-endif.

View File

@ -0,0 +1,40 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2017-2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_post_upgrade).
%% Example of a hot upgrade callback function.
%% PR#12765
% -export([
% pr12765_update_stats_timer/1,
% pr20000_ensure_sup_started/3
% ]).
%% Please ensure that every callback function is reentrant.
%% This way, users can attempt upgrade multiple times if an issue arises.
%%
% pr12765_update_stats_timer(_FromVsn) ->
% emqx_stats:update_interval(broker_stats, fun emqx_broker_helper:stats_fun/0).
%
% pr20000_ensure_sup_started(_FromVsn, "5.6.1" ++ _, ChildSpec) ->
% ChildId = maps:get(id, ChildSpec),
% case supervisor:terminate_child(emqx_sup, ChildId) of
% ok -> supervisor:delete_child(emqx_sup, ChildId);
% Error -> Error
% end,
% supervisor:start_child(emqx_sup, ChildSpec);
% pr20000_ensure_sup_started(_FromVsn, _TargetVsn, _) ->
% ok.

View File

@ -62,7 +62,7 @@
streams := [{pid(), quicer:stream_handle()}],
%% New stream opts
stream_opts := map(),
%% If conneciton is resumed from session ticket
%% If connection is resumed from session ticket
is_resumed => boolean(),
%% mqtt message serializer config
serialize => undefined,
@ -70,8 +70,8 @@
}.
-type cb_ret() :: quicer_lib:cb_ret().
%% @doc Data streams initializions are started in parallel with control streams, data streams are blocked
%% for the activation from control stream after it is accepted as a legit conneciton.
%% @doc Data streams initializations are started in parallel with control streams, data streams are blocked
%% for the activation from control stream after it is accepted as a legit connection.
%% For security, the initial number of allowed data streams from client should be limited by
%% 'peer_bidi_stream_count` & 'peer_unidi_stream_count`
-spec activate_data_streams(pid(), {
@ -80,7 +80,7 @@
activate_data_streams(ConnOwner, {PS, Serialize, Channel}) ->
gen_server:call(ConnOwner, {activate_data_streams, {PS, Serialize, Channel}}, infinity).
%% @doc conneciton owner init callback
%% @doc connection owner init callback
-spec init(map()) -> {ok, cb_state()}.
init(#{stream_opts := SOpts} = S) when is_list(SOpts) ->
init(S#{stream_opts := maps:from_list(SOpts)});

View File

@ -1,42 +0,0 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2017-2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_relup).
%% NOTE: DO NOT remove this `-include`.
%% We use this to force this module to be upgraded every release.
-include("emqx_release.hrl").
-export([
post_release_upgrade/2,
post_release_downgrade/2
]).
-define(INFO(FORMAT), io:format("[emqx_relup] " ++ FORMAT ++ "~n")).
-define(INFO(FORMAT, ARGS), io:format("[emqx_relup] " ++ FORMAT ++ "~n", ARGS)).
%% What to do after upgraded from an old release vsn.
post_release_upgrade(FromRelVsn, _) ->
?INFO("emqx has been upgraded from ~s to ~s!", [FromRelVsn, emqx_release:version()]),
reload_components().
%% What to do after downgraded to an old release vsn.
post_release_downgrade(ToRelVsn, _) ->
?INFO("emqx has been downgraded from ~s to ~s!", [emqx_release:version(), ToRelVsn]),
reload_components().
reload_components() ->
ok.

View File

@ -107,7 +107,14 @@
unused = [] :: nil()
}).
-define(node_patterns(Node), [Node, {'_', Node}]).
-define(dest_patterns(NodeOrExtDest),
case is_atom(NodeOrExtDest) of
%% node
true -> [NodeOrExtDest, {'_', NodeOrExtDest}];
%% external destination
false -> [NodeOrExtDest]
end
).
-define(UNSUPPORTED, unsupported).
@ -307,13 +314,13 @@ print_routes(Topic) ->
).
-spec cleanup_routes(node()) -> ok.
cleanup_routes(Node) ->
cleanup_routes(get_schema_vsn(), Node).
cleanup_routes(NodeOrExtDest) ->
cleanup_routes(get_schema_vsn(), NodeOrExtDest).
cleanup_routes(v2, Node) ->
cleanup_routes_v2(Node);
cleanup_routes(v1, Node) ->
cleanup_routes_v1(Node).
cleanup_routes(v2, NodeOrExtDest) ->
cleanup_routes_v2(NodeOrExtDest);
cleanup_routes(v1, NodeOrExtDest) ->
cleanup_routes_v1(NodeOrExtDest).
-spec foldl_routes(fun((emqx_types:route(), Acc) -> Acc), Acc) -> Acc.
foldl_routes(FoldFun, AccIn) ->
@ -430,19 +437,19 @@ has_route_v1(Topic, Dest) ->
has_route_tab_entry(Topic, Dest) ->
[] =/= ets:match(?ROUTE_TAB, #route{topic = Topic, dest = Dest}).
cleanup_routes_v1(Node) ->
cleanup_routes_v1(NodeOrExtDest) ->
?with_fallback(
lists:foreach(
fun(Pattern) ->
throw_unsupported(mria:match_delete(?ROUTE_TAB, make_route_rec_pat(Pattern)))
end,
?node_patterns(Node)
?dest_patterns(NodeOrExtDest)
),
cleanup_routes_v1_fallback(Node)
cleanup_routes_v1_fallback(NodeOrExtDest)
).
cleanup_routes_v1_fallback(Node) ->
Patterns = [make_route_rec_pat(P) || P <- ?node_patterns(Node)],
cleanup_routes_v1_fallback(NodeOrExtDest) ->
Patterns = [make_route_rec_pat(P) || P <- ?dest_patterns(NodeOrExtDest)],
mria:transaction(?ROUTE_SHARD, fun() ->
[
mnesia:delete_object(?ROUTE_TAB, Route, write)
@ -512,7 +519,7 @@ lookup_routes_v2(Topic) ->
case emqx_topic:wildcard(Topic) of
true ->
Pat = #routeidx{entry = emqx_topic_index:make_key(Topic, '$1')},
[Dest || [Dest] <- ets:match(?ROUTE_TAB_FILTERS, Pat)];
[#route{topic = Topic, dest = Dest} || [Dest] <- ets:match(?ROUTE_TAB_FILTERS, Pat)];
false ->
lookup_route_tab(Topic)
end.
@ -525,7 +532,7 @@ has_route_v2(Topic, Dest) ->
has_route_tab_entry(Topic, Dest)
end.
cleanup_routes_v2(Node) ->
cleanup_routes_v2(NodeOrExtDest) ->
?with_fallback(
lists:foreach(
fun(Pattern) ->
@ -537,18 +544,18 @@ cleanup_routes_v2(Node) ->
),
throw_unsupported(mria:match_delete(?ROUTE_TAB, make_route_rec_pat(Pattern)))
end,
?node_patterns(Node)
?dest_patterns(NodeOrExtDest)
),
cleanup_routes_v2_fallback(Node)
cleanup_routes_v2_fallback(NodeOrExtDest)
).
cleanup_routes_v2_fallback(Node) ->
cleanup_routes_v2_fallback(NodeOrExtDest) ->
%% NOTE
%% No point in transaction here because all the operations on filters table are dirty.
ok = ets:foldl(
fun(#routeidx{entry = K}, ok) ->
case get_dest_node(emqx_topic_index:get_id(K)) of
Node ->
NodeOrExtDest ->
mria:dirty_delete(?ROUTE_TAB_FILTERS, K);
_ ->
ok
@ -560,7 +567,7 @@ cleanup_routes_v2_fallback(Node) ->
ok = ets:foldl(
fun(#route{dest = Dest} = Route, ok) ->
case get_dest_node(Dest) of
Node ->
NodeOrExtDest ->
mria:dirty_delete_object(?ROUTE_TAB, Route);
_ ->
ok
@ -570,6 +577,8 @@ cleanup_routes_v2_fallback(Node) ->
?ROUTE_TAB
).
get_dest_node({external, _} = ExtDest) ->
ExtDest;
get_dest_node({_, Node}) ->
Node;
get_dest_node(Node) ->

View File

@ -21,11 +21,17 @@
-behaviour(gen_server).
-export([start_link/1]).
-export([start_link/2]).
-export([start_link_pooled/2]).
-export([push/4]).
-export([push/5]).
-export([wait/1]).
-export([suspend/1]).
-export([activate/1]).
-export([stats/0]).
-export([
@ -38,6 +44,15 @@
-type action() :: add | delete.
-type options() :: #{
max_batch_size => pos_integer(),
min_sync_interval => non_neg_integer(),
error_delay => non_neg_integer(),
error_retry_interval => non_neg_integer(),
initial_state => activated | suspended,
batch_handler => {module(), _Function :: atom(), _Args :: list()}
}.
-define(POOL, router_syncer_pool).
-define(MAX_BATCH_SIZE, 1000).
@ -77,13 +92,23 @@
%%
-spec start_link(atom(), pos_integer()) ->
-spec start_link(options()) ->
{ok, pid()} | {error, _Reason}.
start_link(Options) ->
gen_server:start_link(?MODULE, mk_state(Options), []).
-spec start_link(_Name, options()) ->
{ok, pid()} | {error, _Reason}.
start_link(Name, Options) ->
gen_server:start_link(Name, ?MODULE, mk_state(Options), []).
-spec start_link_pooled(atom(), pos_integer()) ->
{ok, pid()}.
start_link(Pool, Id) ->
start_link_pooled(Pool, Id) ->
gen_server:start_link(
{local, emqx_utils:proc_name(?MODULE, Id)},
?MODULE,
[Pool, Id],
{Pool, Id, mk_state(#{})},
[]
).
@ -93,9 +118,16 @@ when
Opts :: #{reply => pid()}.
push(Action, Topic, Dest, Opts) ->
Worker = gproc_pool:pick_worker(?POOL, Topic),
push(Worker, Action, Topic, Dest, Opts).
-spec push(_Ref, action(), emqx_types:topic(), emqx_router:dest(), Opts) ->
ok | _WaitRef :: reference()
when
Opts :: #{reply => pid()}.
push(Ref, Action, Topic, Dest, Opts) ->
Prio = designate_prio(Action, Opts),
Context = mk_push_context(Opts),
_ = erlang:send(Worker, ?PUSH(Prio, {Action, Topic, Dest, Context})),
_ = gproc:send(Ref, ?PUSH(Prio, {Action, Topic, Dest, Context})),
case Context of
[{MRef, _}] ->
MRef;
@ -134,6 +166,16 @@ mk_push_context(_) ->
%%
%% Suspended syncer receives and accumulates route ops but doesn't apply them
%% until it is activated.
suspend(Ref) ->
gen_server:call(Ref, suspend, infinity).
activate(Ref) ->
gen_server:call(Ref, activate, infinity).
%%
-type stats() :: #{
size := non_neg_integer(),
n_add := non_neg_integer(),
@ -149,10 +191,34 @@ stats() ->
%%
init([Pool, Id]) ->
true = gproc_pool:connect_worker(Pool, {Pool, Id}),
{ok, #{stash => stash_new()}}.
mk_state(Options) ->
#{
state => maps:get(initial_state, Options, active),
stash => stash_new(),
retry_timer => undefined,
max_batch_size => maps:get(max_batch_size, Options, ?MAX_BATCH_SIZE),
min_sync_interval => maps:get(min_sync_interval, Options, ?MIN_SYNC_INTERVAL),
error_delay => maps:get(error_delay, Options, ?ERROR_DELAY),
error_retry_interval => maps:get(error_retry_interval, Options, ?ERROR_RETRY_INTERVAL),
batch_handler => maps:get(batch_handler, Options, default)
}.
%%
init({Pool, Id, State}) ->
true = gproc_pool:connect_worker(Pool, {Pool, Id}),
{ok, State};
init(State) ->
{ok, State}.
handle_call(suspend, _From, State) ->
NState = State#{state := suspended},
{reply, ok, NState};
handle_call(activate, _From, State = #{state := suspended}) ->
NState = run_batch_loop([], State#{state := active}),
{reply, ok, NState};
handle_call(activate, _From, State) ->
{reply, ok, State};
handle_call(stats, _From, State = #{stash := Stash}) ->
{reply, stash_stats(Stash), State};
handle_call(_Call, _From, State) ->
@ -162,11 +228,11 @@ handle_cast(_Msg, State) ->
{noreply, State}.
handle_info({timeout, _TRef, retry}, State) ->
NState = run_batch_loop([], maps:remove(retry_timer, State)),
NState = run_batch_loop([], State#{retry_timer := undefined}),
{noreply, NState};
handle_info(Push = ?PUSH(_, _), State) ->
handle_info(Push = ?PUSH(_, _), State = #{min_sync_interval := MSI}) ->
%% NOTE: Wait a bit to collect potentially overlapping operations.
ok = timer:sleep(?MIN_SYNC_INTERVAL),
ok = timer:sleep(MSI),
NState = run_batch_loop([Push], State),
{noreply, NState}.
@ -175,12 +241,16 @@ terminate(_Reason, _State) ->
%%
run_batch_loop(Incoming, State = #{stash := Stash0}) ->
run_batch_loop(Incoming, State = #{stash := Stash0, state := suspended}) ->
Stash1 = stash_add(Incoming, Stash0),
Stash2 = stash_drain(Stash1),
{Batch, Stash3} = mk_batch(Stash2),
State#{stash := Stash2};
run_batch_loop(Incoming, State = #{stash := Stash0, max_batch_size := MBS}) ->
Stash1 = stash_add(Incoming, Stash0),
Stash2 = stash_drain(Stash1),
{Batch, Stash3} = mk_batch(Stash2, MBS),
?tp_ignore_side_effects_in_prod(router_syncer_new_batch, batch_stats(Batch, Stash3)),
case run_batch(Batch) of
case run_batch(Batch, State) of
Status = #{} ->
ok = send_replies(Status, Batch),
NState = cancel_retry_timer(State#{stash := Stash3}),
@ -203,37 +273,37 @@ run_batch_loop(Incoming, State = #{stash := Stash0}) ->
batch => batch_stats(Batch, Stash3)
}),
NState = State#{stash := Stash2},
ok = timer:sleep(?ERROR_DELAY),
ok = error_cooldown(NState),
ensure_retry_timer(NState)
end.
error_cooldown(#{error_delay := ED}) ->
timer:sleep(ED).
ensure_retry_timer(State = #{retry_timer := undefined, error_retry_interval := ERI}) ->
TRef = emqx_utils:start_timer(ERI, retry),
State#{retry_timer := TRef};
ensure_retry_timer(State = #{retry_timer := _TRef}) ->
State;
ensure_retry_timer(State) ->
TRef = emqx_utils:start_timer(?ERROR_RETRY_INTERVAL, retry),
State#{retry_timer => TRef}.
State.
cancel_retry_timer(State = #{retry_timer := TRef}) ->
ok = emqx_utils:cancel_timer(TRef),
maps:remove(retry_timer, State);
State#{retry_timer := undefined};
cancel_retry_timer(State) ->
State.
%%
mk_batch(Stash) when map_size(Stash) =< ?MAX_BATCH_SIZE ->
mk_batch(Stash, BatchSize) when map_size(Stash) =< BatchSize ->
%% This is perfect situation, we just use stash as batch w/o extra reallocations.
{Stash, stash_new()};
mk_batch(Stash) ->
mk_batch(Stash, BatchSize) ->
%% Take a subset of stashed operations to form a batch.
%% Note that stash is an unordered map, it's not a queue. The order of operations is
%% not preserved strictly, only loosely, because of how we start from high priority
%% operations and go down to low priority ones. This might cause some operations to
%% stay in stash for unfairly long time, when there are many high priority operations.
%% However, it's unclear how likely this is to happen in practice.
mk_batch(Stash, ?MAX_BATCH_SIZE).
mk_batch(Stash, BatchSize) ->
mk_batch(?PRIO_HI, #{}, BatchSize, Stash).
mk_batch(Prio, Batch, SizeLeft, Stash) ->
@ -278,10 +348,12 @@ replyctx_send(Result, RefsPids) ->
%%
run_batch(Batch) when map_size(Batch) > 0 ->
run_batch(Empty, _State) when Empty =:= #{} ->
#{};
run_batch(Batch, #{batch_handler := default}) ->
catch emqx_router:do_batch(Batch);
run_batch(_Empty) ->
#{}.
run_batch(Batch, #{batch_handler := {Module, Function, Args}}) ->
erlang:apply(Module, Function, [Batch | Args]).
%%

View File

@ -137,7 +137,7 @@ maybe_badrpc(Delivery) ->
Delivery.
max_client_num() ->
emqx:get_config([rpc, tcp_client_num], ?DefaultClientNum).
emqx:get_config([rpc, client_num], ?DefaultClientNum).
-spec unwrap_erpc(emqx_rpc:erpc(A) | [emqx_rpc:erpc(A)]) -> A | {error, _Err} | list().
unwrap_erpc(Res) when is_list(Res) ->

View File

@ -351,6 +351,7 @@ fields("authz_cache") ->
#{
default => true,
required => true,
importance => ?IMPORTANCE_NO_DOC,
desc => ?DESC(fields_cache_enable)
}
)},
@ -387,6 +388,7 @@ fields("flapping_detect") ->
boolean(),
#{
default => false,
%% importance => ?IMPORTANCE_NO_DOC,
desc => ?DESC(flapping_detect_enable)
}
)},
@ -423,6 +425,7 @@ fields("force_shutdown") ->
boolean(),
#{
default => true,
importance => ?IMPORTANCE_NO_DOC,
desc => ?DESC(force_shutdown_enable)
}
)},
@ -452,6 +455,7 @@ fields("overload_protection") ->
boolean(),
#{
desc => ?DESC(overload_protection_enable),
%% importance => ?IMPORTANCE_NO_DOC,
default => false
}
)},
@ -512,7 +516,11 @@ fields("force_gc") ->
{"enable",
sc(
boolean(),
#{default => true, desc => ?DESC(force_gc_enable)}
#{
default => true,
importance => ?IMPORTANCE_NO_DOC,
desc => ?DESC(force_gc_enable)
}
)},
{"count",
sc(
@ -1665,6 +1673,7 @@ fields("durable_sessions") ->
sc(
boolean(), #{
desc => ?DESC(durable_sessions_enable),
%% importance => ?IMPORTANCE_NO_DOC,
default => false
}
)},
@ -1888,6 +1897,7 @@ base_listener(Bind) ->
#{
default => true,
aliases => [enabled],
importance => ?IMPORTANCE_NO_DOC,
desc => ?DESC(fields_listener_enabled)
}
)},
@ -1970,10 +1980,6 @@ zones_field_schema() ->
}
).
desc("persistent_session_store") ->
"Settings for message persistence.";
desc("persistent_session_builtin") ->
"Settings for the built-in storage engine of persistent messages.";
desc("persistent_table_mria_opts") ->
"Tuning options for the mria table.";
desc("stats") ->
@ -2420,6 +2426,7 @@ client_ssl_opts_schema(Defaults) ->
boolean(),
#{
default => false,
%% importance => ?IMPORTANCE_NO_DOC,
desc => ?DESC(client_ssl_opts_schema_enable)
}
)},
@ -3643,9 +3650,9 @@ mqtt_general() ->
)},
{"retry_interval",
sc(
timeout_duration(),
hoconsc:union([infinity, timeout_duration()]),
#{
default => <<"30s">>,
default => infinity,
desc => ?DESC(mqtt_retry_interval)
}
)},

View File

@ -30,7 +30,7 @@
-define(LOADER, emqx_secret_loader).
%%================================================================================
%% API funcions
%% API functions
%%================================================================================
%% @doc Wrap a term in a secret closure.

View File

@ -83,6 +83,7 @@
-export([
deliver/3,
handle_info/2,
handle_timeout/3,
disconnect/3,
terminate/3
@ -188,6 +189,10 @@
-callback destroy(t() | clientinfo()) -> ok.
-callback clear_will_message(t()) -> t().
-callback publish_will_message_now(t(), message()) -> t().
-callback handle_timeout(clientinfo(), common_timer_name() | custom_timer_name(), t()) ->
{ok, replies(), t()}
| {ok, replies(), timeout(), t()}.
-callback handle_info(term(), t()) -> t().
%%--------------------------------------------------------------------
%% Create a Session
@ -484,6 +489,14 @@ enrich_subopts(_Opt, _V, Msg, _) ->
handle_timeout(ClientInfo, Timer, Session) ->
?IMPL(Session):handle_timeout(ClientInfo, Timer, Session).
%%--------------------------------------------------------------------
%% Generic Messages
%%--------------------------------------------------------------------
-spec handle_info(term(), t()) -> t().
handle_info(Info, Session) ->
?IMPL(Session):handle_info(Info, Session).
%%--------------------------------------------------------------------
-spec ensure_timer(custom_timer_name(), timeout(), map()) ->
@ -601,7 +614,7 @@ should_keep(MsgDeliver) ->
not is_banned_msg(MsgDeliver).
is_banned_msg(#message{from = ClientId}) ->
[] =/= emqx_banned:look_up({clientid, ClientId}).
emqx_banned:check_clientid(ClientId).
%%--------------------------------------------------------------------

View File

@ -87,6 +87,7 @@
deliver/3,
replay/3,
handle_timeout/3,
handle_info/2,
disconnect/2,
terminate/2
]).
@ -597,14 +598,23 @@ handle_timeout(ClientInfo, retry_delivery, Session) ->
handle_timeout(ClientInfo, expire_awaiting_rel, Session) ->
expire(ClientInfo, Session).
%%--------------------------------------------------------------------
%% Geneic messages
%%--------------------------------------------------------------------
-spec handle_info(term(), session()) -> session().
handle_info(Msg, Session) ->
?SLOG(warning, #{msg => emqx_session_mem_unknown_message, message => Msg}),
Session.
%%--------------------------------------------------------------------
%% Retry Delivery
%%--------------------------------------------------------------------
-spec retry(clientinfo(), session()) ->
{ok, replies(), session()}.
retry(ClientInfo, Session = #session{inflight = Inflight}) ->
case emqx_inflight:is_empty(Inflight) of
{ok, replies(), session()} | {ok, replies(), timeout(), session()}.
retry(ClientInfo, Session = #session{inflight = Inflight, retry_interval = Interval}) ->
case emqx_inflight:is_empty(Inflight) orelse Interval =:= infinity of
true ->
{ok, [], Session};
false ->

View File

@ -421,8 +421,12 @@ init_monitors() ->
handle_call({subscribe, Group, Topic, SubPid}, _From, State = #state{pmon = PMon}) ->
mria:dirty_write(?SHARED_SUBSCRIPTION, record(Group, Topic, SubPid)),
case ets:member(?SHARED_SUBSCRIBER, {Group, Topic}) of
true -> ok;
false -> ok = emqx_router:do_add_route(Topic, {Group, node()})
true ->
ok;
false ->
ok = emqx_router:do_add_route(Topic, {Group, node()}),
_ = emqx_external_broker:add_shared_route(Topic, Group),
ok
end,
ok = maybe_insert_alive_tab(SubPid),
ok = maybe_insert_round_robin_count({Group, Topic}),
@ -545,7 +549,9 @@ is_alive_sub(Pid) ->
delete_route_if_needed({Group, Topic} = GroupTopic) ->
if_no_more_subscribers(GroupTopic, fun() ->
ok = emqx_router:do_delete_route(Topic, {Group, node()})
ok = emqx_router:do_delete_route(Topic, {Group, node()}),
_ = emqx_external_broker:delete_shared_route(Topic, Group),
ok
end).
get_default_shared_subscription_strategy() ->

View File

@ -589,6 +589,14 @@ ensure_valid_options(Options, Versions) ->
ensure_valid_options([], _, Acc) ->
lists:reverse(Acc);
ensure_valid_options([{K, undefined} | T], Versions, Acc) when
K =:= crl_check;
K =:= crl_cache
->
%% Note: we must set crl options to `undefined' to unset them. Otherwise,
%% `esockd' will retain such options when `esockd:merge_opts/2' is called and the SSL
%% options were previously enabled.
ensure_valid_options(T, Versions, [{K, undefined} | Acc]);
ensure_valid_options([{_, undefined} | T], Versions, Acc) ->
ensure_valid_options(T, Versions, Acc);
ensure_valid_options([{_, ""} | T], Versions, Acc) ->

View File

@ -33,7 +33,8 @@
feed_var/3,
systop/1,
parse/1,
parse/2
parse/2,
intersection/2
]).
-export([
@ -52,6 +53,8 @@
((C =:= '#' orelse C =:= <<"#">>) andalso REST =/= [])
).
-define(IS_WILDCARD(W), W =:= '+' orelse W =:= '#').
%%--------------------------------------------------------------------
%% APIs
%%--------------------------------------------------------------------
@ -98,6 +101,55 @@ match(_, ['#']) ->
match(_, _) ->
false.
%% @doc Finds an intersection between two topics, two filters or a topic and a filter.
%% The function is commutative: reversing parameters doesn't affect the returned value.
%% Two topics intersect only when they are equal.
%% The intersection of a topic and a filter is always either the topic itself or false (no intersection).
%% The intersection of two filters is either false or a new topic filter that would match only those topics,
%% that can be matched by both input filters.
%% For example, the intersection of "t/global/#" and "t/+/1/+" is "t/global/1/+".
-spec intersection(TopicOrFilter, TopicOrFilter) -> TopicOrFilter | false when
TopicOrFilter :: emqx_types:topic().
intersection(Topic1, Topic2) when is_binary(Topic1), is_binary(Topic2) ->
case intersect_start(words(Topic1), words(Topic2)) of
false -> false;
Intersection -> join(Intersection)
end.
intersect_start([<<"$", _/bytes>> | _], [W | _]) when ?IS_WILDCARD(W) ->
false;
intersect_start([W | _], [<<"$", _/bytes>> | _]) when ?IS_WILDCARD(W) ->
false;
intersect_start(Words1, Words2) ->
intersect(Words1, Words2).
intersect(Words1, ['#']) ->
Words1;
intersect(['#'], Words2) ->
Words2;
intersect([W1], ['+']) ->
[W1];
intersect(['+'], [W2]) ->
[W2];
intersect([W1 | T1], [W2 | T2]) when ?IS_WILDCARD(W1), ?IS_WILDCARD(W2) ->
intersect_join(wildcard_intersection(W1, W2), intersect(T1, T2));
intersect([W | T1], [W | T2]) ->
intersect_join(W, intersect(T1, T2));
intersect([W1 | T1], [W2 | T2]) when ?IS_WILDCARD(W1) ->
intersect_join(W2, intersect(T1, T2));
intersect([W1 | T1], [W2 | T2]) when ?IS_WILDCARD(W2) ->
intersect_join(W1, intersect(T1, T2));
intersect([], []) ->
[];
intersect(_, _) ->
false.
intersect_join(_, false) -> false;
intersect_join(W, Words) -> [W | Words].
wildcard_intersection(W, W) -> W;
wildcard_intersection(_, _) -> '+'.
-spec match_share(Name, Filter) -> boolean() when
Name :: share(),
Filter :: topic() | share().

View File

@ -23,6 +23,7 @@
-export([delete/3]).
-export([match/2]).
-export([matches/3]).
-export([matches_filter/3]).
-export([make_key/2]).
@ -72,6 +73,12 @@ match(Topic, Tab) ->
matches(Topic, Tab, Opts) ->
emqx_trie_search:matches(Topic, make_nextf(Tab), Opts).
%% @doc Match given topic filter against the index and return _all_ matches.
%% If `unique` option is given, return only unique matches by record ID.
-spec matches_filter(emqx_types:topic(), ets:table(), emqx_trie_search:opts()) -> [match(_ID)].
matches_filter(TopicFilter, Tab, Opts) ->
emqx_trie_search:matches_filter(TopicFilter, make_nextf(Tab), Opts).
%% @doc Extract record ID from the match.
-spec get_id(match(ID)) -> ID.
get_id(Key) ->

View File

@ -17,7 +17,6 @@
-include("emqx_mqtt.hrl").
-export([format/2]).
-export([format_meta_map/1]).
%% logger_formatter:config/0 is not exported.
-type config() :: map().
@ -43,10 +42,6 @@ format(
format(Event, Config) ->
emqx_logger_textfmt:format(Event, Config).
format_meta_map(Meta) ->
Encode = emqx_trace_handler:payload_encode(),
format_meta_map(Meta, Encode).
format_meta_map(Meta, Encode) ->
format_meta_map(Meta, Encode, [
{packet, fun format_packet/2},

View File

@ -99,7 +99,7 @@
-module(emqx_trie_search).
-export([make_key/2, make_pat/2, filter/1]).
-export([match/2, matches/3, get_id/1, get_topic/1]).
-export([match/2, matches/3, get_id/1, get_topic/1, matches_filter/3]).
-export_type([key/1, word/0, words/0, nextf/0, opts/0]).
-define(END, '$end_of_table').
@ -183,9 +183,20 @@ match(Topic, NextF) ->
matches(Topic, NextF, Opts) ->
search(Topic, NextF, Opts).
%% @doc Match given topic filter against the index and return _all_ matches.
-spec matches_filter(emqx_types:topic(), nextf(), opts()) -> [key(_)].
matches_filter(TopicFilter, NextF, Opts) ->
search(TopicFilter, NextF, [topic_filter | Opts]).
%% @doc Entrypoint of the search for a given topic.
search(Topic, NextF, Opts) ->
Words = topic_words(Topic),
%% A private opt
IsFilter = proplists:get_bool(topic_filter, Opts),
Words =
case IsFilter of
true -> filter_words(Topic);
false -> topic_words(Topic)
end,
Base = base_init(Words),
ORetFirst = proplists:get_bool(return_first, Opts),
OUnique = proplists:get_bool(unique, Opts),
@ -200,8 +211,10 @@ search(Topic, NextF, Opts) ->
end,
Matches =
case search_new(Words, Base, NextF, Acc0) of
{Cursor, Acc} ->
{Cursor, Acc} when not IsFilter ->
match_topics(Topic, Cursor, NextF, Acc);
{_Cursor, Acc} ->
Acc;
Acc ->
Acc
end,
@ -275,6 +288,17 @@ compare(['#'], _Words, _) ->
% Closest possible next entries that we must not miss:
% * a/+/+/d/# (same topic but a different ID)
match_full;
%% Filter search %%
compare(_Filter, ['#'], _) ->
match_full;
compare([_ | TF], ['+' | TW], Pos) ->
case compare(TF, TW, Pos + 1) of
lower ->
lower;
Other ->
Other
end;
%% Filter search end %%
compare(['+' | TF], [HW | TW], Pos) ->
case compare(TF, TW, Pos + 1) of
lower ->

View File

@ -267,6 +267,7 @@
[
{node(), topic(), deliver_result()}
| {share, topic(), deliver_result()}
| {emqx_external_broker:dest(), topic(), deliver_result()}
| persisted
]
| disconnect.

View File

@ -399,7 +399,6 @@ compat_windows(Fun) when is_function(Fun, 0) ->
0.0
end;
compat_windows(Fun) ->
?SLOG(warning, "Invalid function: ~p", [Fun]),
error({badarg, Fun}).
load(Avg) ->

View File

@ -303,7 +303,7 @@ websocket_init([Req, Opts]) ->
max_size => emqx_config:get_zone_conf(Zone, [mqtt, max_packet_size])
},
ParseState = emqx_frame:initial_parse_state(FrameOpts),
Serialize = emqx_frame:serialize_opts(),
Serialize = emqx_frame:initial_serialize_opts(FrameOpts),
Channel = emqx_channel:init(ConnInfo, Opts),
GcState = get_force_gc(Zone),
StatsTimer = get_stats_enable(Zone),
@ -436,6 +436,7 @@ websocket_handle({Frame, _}, State) ->
%% TODO: should not close the ws connection
?LOG(error, #{msg => "unexpected_frame", frame => Frame}),
shutdown(unexpected_ws_frame, State).
websocket_info({call, From, Req}, State) ->
handle_call(From, Req, State);
websocket_info({cast, rate_limit}, State) ->
@ -455,8 +456,8 @@ websocket_info({incoming, Packet}, State) ->
handle_incoming(Packet, State);
websocket_info({outgoing, Packets}, State) ->
return(enqueue(Packets, State));
websocket_info({check_gc, Stats}, State) ->
return(check_oom(run_gc(Stats, State)));
websocket_info({check_gc, Cnt, Oct}, State) ->
return(check_oom(run_gc(Cnt, Oct, State)));
websocket_info(
Deliver = {deliver, _Topic, _Msg},
State = #state{listener = {Type, Listener}}
@ -603,17 +604,23 @@ check_limiter(
Data,
WhenOk,
Msgs,
#state{limiter_timer = undefined, limiter = Limiter} = State
#state{channel = Channel, limiter_timer = undefined, limiter = Limiter} = State
) ->
case emqx_limiter_container:check_list(Needs, Limiter) of
{ok, Limiter2} ->
WhenOk(Data, Msgs, State#state{limiter = Limiter2});
{pause, Time, Limiter2} ->
?SLOG(debug, #{
msg => "pause_time_due_to_rate_limit",
needs => Needs,
time_in_ms => Time
}),
?SLOG_THROTTLE(
warning,
#{
msg => socket_receive_paused_by_rate_limit,
paused_ms => Time
},
#{
tag => "RATE",
clientid => emqx_channel:info(clientid, Channel)
}
),
Retry = #retry{
types = [Type || {_, Type} <- Needs],
@ -647,7 +654,7 @@ check_limiter(
State#state{limiter_buffer = queue:in(New, Buffer)}.
-spec retry_limiter(state()) -> state().
retry_limiter(#state{limiter = Limiter} = State) ->
retry_limiter(#state{channel = Channel, limiter = Limiter} = State) ->
#retry{types = Types, data = Data, next = Next} = emqx_limiter_container:get_retry_context(
Limiter
),
@ -662,11 +669,17 @@ retry_limiter(#state{limiter = Limiter} = State) ->
}
);
{pause, Time, Limiter2} ->
?SLOG(debug, #{
msg => "pause_time_due_to_rate_limit",
types => Types,
time_in_ms => Time
}),
?SLOG_THROTTLE(
warning,
#{
msg => socket_receive_paused_by_rate_limit,
paused_ms => Time
},
#{
tag => "RATE",
clientid => emqx_channel:info(clientid, Channel)
}
),
TRef = start_timer(Time, limit_timeout),
@ -682,8 +695,8 @@ when_msg_in(Packets, Msgs, State) ->
%% Run GC, Check OOM
%%--------------------------------------------------------------------
run_gc(Stats, State = #state{gc_state = GcSt}) ->
case ?ENABLED(GcSt) andalso emqx_gc:run(Stats, GcSt) of
run_gc(Cnt, Oct, State = #state{gc_state = GcSt}) ->
case ?ENABLED(GcSt) andalso emqx_gc:run(Cnt, Oct, GcSt) of
false -> State;
{_IsGC, GcSt1} -> State#state{gc_state = GcSt1}
end.
@ -725,7 +738,8 @@ parse_incoming(Data, Packets, State = #state{parse_state = ParseState}) ->
input_bytes => Data
}),
FrameError = {frame_error, Reason},
{[{incoming, FrameError} | Packets], State};
NState = enrich_state(Reason, State),
{[{incoming, FrameError} | Packets], NState};
error:Reason:Stacktrace ->
?LOG(error, #{
at_state => emqx_frame:describe_state(ParseState),
@ -796,11 +810,9 @@ handle_outgoing(
get_active_n(Type, Listener)
of
true ->
Stats = #{
cnt => emqx_pd:reset_counter(outgoing_pubs),
oct => emqx_pd:reset_counter(outgoing_bytes)
},
postpone({check_gc, Stats}, State);
Cnt = emqx_pd:reset_counter(outgoing_pubs),
Oct = emqx_pd:reset_counter(outgoing_bytes),
postpone({check_gc, Cnt, Oct}, State);
false ->
State
end,
@ -820,7 +832,7 @@ serialize_and_inc_stats_fun(#state{serialize = Serialize}) ->
?LOG(warning, #{
msg => "packet_discarded",
reason => "frame_too_large",
packet => emqx_packet:format(Packet)
packet => Packet
}),
ok = emqx_metrics:inc('delivery.dropped.too_large'),
ok = emqx_metrics:inc('delivery.dropped'),
@ -1059,6 +1071,13 @@ check_max_connection(Type, Listener) ->
{denny, Reason}
end
end.
enrich_state(#{parse_state := NParseState}, State) ->
Serialize = emqx_frame:serialize_opts(NParseState),
State#state{parse_state = NParseState, serialize = Serialize};
enrich_state(_, State) ->
State.
%%--------------------------------------------------------------------
%% For CT tests
%%--------------------------------------------------------------------

View File

@ -33,7 +33,7 @@ introduced_in() ->
"5.0.8".
%%================================================================================
%% API funcions
%% API functions
%%================================================================================
-spec send(node(), pid(), emqx_types:topic(), term()) -> true.

View File

@ -112,6 +112,10 @@ t_check(_) ->
?assertNot(emqx_banned:check(ClientInfoValidFull)),
?assertNot(emqx_banned:check(ClientInfoValidEmpty)),
?assertNot(emqx_banned:check(ClientInfoValidOnlyClientId)),
?assert(emqx_banned:check_clientid(<<"BannedClient">>)),
?assert(emqx_banned:check_clientid(<<"BannedClientRE">>)),
ok = emqx_banned:delete(emqx_banned:who(clientid, <<"BannedClient">>)),
ok = emqx_banned:delete(emqx_banned:who(username, <<"BannedUser">>)),
ok = emqx_banned:delete(emqx_banned:who(peerhost, {192, 168, 0, 1})),
@ -127,6 +131,10 @@ t_check(_) ->
?assertNot(emqx_banned:check(ClientInfoBannedUsernameRE)),
?assertNot(emqx_banned:check(ClientInfoBannedAddrNet)),
?assertNot(emqx_banned:check(ClientInfoValidFull)),
?assertNot(emqx_banned:check_clientid(<<"BannedClient">>)),
?assertNot(emqx_banned:check_clientid(<<"BannedClientRE">>)),
?assertEqual(0, emqx_banned:info(size)).
t_unused(_) ->

View File

@ -414,11 +414,18 @@ t_handle_in_auth(_) ->
emqx_channel:handle_in(?AUTH_PACKET(), Channel).
t_handle_in_frame_error(_) ->
IdleChannel = channel(#{conn_state => idle}),
{shutdown, #{shutdown_count := frame_too_large, cause := frame_too_large}, _Chan} =
emqx_channel:handle_in({frame_error, #{cause => frame_too_large}}, IdleChannel),
IdleChannelV5 = channel(#{conn_state => idle}),
%% no CONNACK packet for v4
?assertMatch(
{shutdown, #{shutdown_count := frame_too_large, cause := frame_too_large}, _Chan},
emqx_channel:handle_in(
{frame_error, #{cause => frame_too_large}}, v4(IdleChannelV5)
)
),
ConnectingChan = channel(#{conn_state => connecting}),
ConnackPacket = ?CONNACK_PACKET(?RC_PACKET_TOO_LARGE),
?assertMatch(
{shutdown,
#{
shutdown_count := frame_too_large,
@ -426,12 +433,13 @@ t_handle_in_frame_error(_) ->
limit := 100,
received := 101
},
ConnackPacket,
_} =
ConnackPacket, _},
emqx_channel:handle_in(
{frame_error, #{cause => frame_too_large, received => 101, limit => 100}},
ConnectingChan
)
),
DisconnectPacket = ?DISCONNECT_PACKET(?RC_PACKET_TOO_LARGE),
ConnectedChan = channel(#{conn_state => connected}),
?assertMatch(

View File

@ -78,6 +78,7 @@
start_epmd/0,
start_peer/2,
stop_peer/1,
ebin_path/0,
listener_port/2
]).

View File

@ -445,7 +445,7 @@ zone_global_defaults() ->
peer_cert_as_username => disabled,
response_information => [],
retain_available => true,
retry_interval => 30000,
retry_interval => infinity,
message_expiry_interval => infinity,
server_keepalive => disabled,
session_expiry_interval => 7200000,

View File

@ -333,6 +333,17 @@ t_handle_incoming(_) ->
),
?assertMatch({ok, _Out, _NState}, emqx_connection:handle_incoming(frame_error, st())).
t_handle_outing_non_utf8_topic(_) ->
Topic = <<"测试"/utf16>>,
Publish = ?PUBLISH_PACKET(0, Topic, 1),
StrictOff = #{version => 5, max_size => 16#FFFF, strict_mode => false},
StOff = st(#{serialize => StrictOff}),
OffResult = emqx_connection:handle_outgoing(Publish, StOff),
?assertMatch({ok, _}, OffResult),
StrictOn = #{version => 5, max_size => 16#FFFF, strict_mode => true},
StOn = st(#{serialize => StrictOn}),
?assertError(frame_serialize_error, emqx_connection:handle_outgoing(Publish, StOn)).
t_with_channel(_) ->
State = st(),
ok = meck:expect(emqx_channel, handle_in, fun(_, _) -> ok end),
@ -515,7 +526,7 @@ t_oom_shutdown(_) ->
with_conn(
fun(Pid) ->
Pid ! {tcp_passive, foo},
{ok, _} = ?block_until(#{?snk_kind := check_oom}, 1000),
{ok, _} = ?block_until(#{?snk_kind := check_oom_shutdown}, 1000),
{ok, _} = ?block_until(#{?snk_kind := terminate}, 100),
Trace = snabbkaffe:collect_trace(),
?assertEqual(1, length(?of_kind(terminate, Trace))),

View File

@ -138,13 +138,14 @@ init_per_testcase(t_refresh_config = TestCase, Config) ->
];
init_per_testcase(TestCase, Config) when
TestCase =:= t_update_listener;
TestCase =:= t_update_listener_enable_disable;
TestCase =:= t_validations
->
ct:timetrap({seconds, 30}),
ok = snabbkaffe:start_trace(),
%% when running emqx standalone tests, we can't use those
%% features.
case does_module_exist(emqx_management) of
case does_module_exist(emqx_mgmt) of
true ->
DataDir = ?config(data_dir, Config),
CRLFile = filename:join([DataDir, "intermediate-revoked.crl.pem"]),
@ -165,7 +166,7 @@ init_per_testcase(TestCase, Config) when
{emqx_conf, #{config => #{listeners => #{ssl => #{default => ListenerConf}}}}},
emqx,
emqx_management,
{emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"}
emqx_mgmt_api_test_util:emqx_dashboard()
],
#{work_dir => emqx_cth_suite:work_dir(TestCase, Config)}
),
@ -206,6 +207,7 @@ read_crl(Filename) ->
end_per_testcase(TestCase, Config) when
TestCase =:= t_update_listener;
TestCase =:= t_update_listener_enable_disable;
TestCase =:= t_validations
->
Skip = proplists:get_bool(skip_does_not_apply, Config),
@ -1057,3 +1059,104 @@ do_t_validations(_Config) ->
),
ok.
%% Checks that if CRL is ever enabled and then disabled, clients can connect, even if they
%% would otherwise not have their corresponding CRLs cached and fail with `{bad_crls,
%% no_relevant_crls}`.
t_update_listener_enable_disable(Config) ->
case proplists:get_bool(skip_does_not_apply, Config) of
true ->
ct:pal("skipping as this test does not apply in this profile"),
ok;
false ->
do_t_update_listener_enable_disable(Config)
end.
do_t_update_listener_enable_disable(Config) ->
DataDir = ?config(data_dir, Config),
Keyfile = filename:join([DataDir, "server.key.pem"]),
Certfile = filename:join([DataDir, "server.cert.pem"]),
Cacertfile = filename:join([DataDir, "ca-chain.cert.pem"]),
ClientCert = filename:join(DataDir, "client.cert.pem"),
ClientKey = filename:join(DataDir, "client.key.pem"),
ListenerId = "ssl:default",
%% Enable CRL
{ok, {{_, 200, _}, _, ListenerData0}} = get_listener_via_api(ListenerId),
CRLConfig0 =
#{
<<"ssl_options">> =>
#{
<<"keyfile">> => Keyfile,
<<"certfile">> => Certfile,
<<"cacertfile">> => Cacertfile,
<<"enable_crl_check">> => true,
<<"fail_if_no_peer_cert">> => true
}
},
ListenerData1 = emqx_utils_maps:deep_merge(ListenerData0, CRLConfig0),
{ok, {_, _, ListenerData2}} = update_listener_via_api(ListenerId, ListenerData1),
?assertMatch(
#{
<<"ssl_options">> :=
#{
<<"enable_crl_check">> := true,
<<"verify">> := <<"verify_peer">>,
<<"fail_if_no_peer_cert">> := true
}
},
ListenerData2
),
%% Disable CRL
CRLConfig1 =
#{
<<"ssl_options">> =>
#{
<<"keyfile">> => Keyfile,
<<"certfile">> => Certfile,
<<"cacertfile">> => Cacertfile,
<<"enable_crl_check">> => false,
<<"fail_if_no_peer_cert">> => true
}
},
ListenerData3 = emqx_utils_maps:deep_merge(ListenerData2, CRLConfig1),
redbug:start(
[
"esockd_server:get_listener_prop -> return",
"esockd_server:set_listener_prop -> return",
"esockd:merge_opts -> return",
"esockd_listener_sup:set_options -> return",
"emqx_listeners:inject_crl_config -> return"
],
[{msgs, 100}]
),
{ok, {_, _, ListenerData4}} = update_listener_via_api(ListenerId, ListenerData3),
?assertMatch(
#{
<<"ssl_options">> :=
#{
<<"enable_crl_check">> := false,
<<"verify">> := <<"verify_peer">>,
<<"fail_if_no_peer_cert">> := true
}
},
ListenerData4
),
%% Now the client that would be blocked tries to connect and should now be allowed.
{ok, C} = emqtt:start_link([
{ssl, true},
{ssl_opts, [
{certfile, ClientCert},
{keyfile, ClientKey},
{verify, verify_none}
]},
{port, 8883}
]),
?assertMatch({ok, _}, emqtt:connect(C)),
emqtt:stop(C),
?assertNotReceive({http_get, _}),
ok.

View File

@ -38,7 +38,7 @@
%% in `end_per_suite/1` or `end_per_group/2`) with the result from step 2.
-module(emqx_cth_cluster).
-export([start/1, start/2, restart/1, restart/2]).
-export([start/1, start/2, restart/1]).
-export([stop/1, stop_node/1]).
-export([start_bare_nodes/1, start_bare_nodes/2]).
@ -158,18 +158,18 @@ wait_clustered([Node | Nodes] = All, Check, Deadline) ->
nodes_not_running => NodesNotRunnging
}}
);
{false, Nodes} ->
{false, _Nodes} ->
timer:sleep(100),
wait_clustered(All, Check, Deadline)
end.
restart(NodeSpec) ->
restart(maps:get(name, NodeSpec), NodeSpec).
restart(Node, Spec) ->
ct:pal("Stopping peer node ~p", [Node]),
ok = emqx_cth_peer:stop(Node),
start([Spec#{boot_type => restart}]).
restart(NodeSpecs = [_ | _]) ->
Nodes = [maps:get(name, Spec) || Spec <- NodeSpecs],
ct:pal("Stopping peer nodes: ~p", [Nodes]),
ok = stop(Nodes),
start([Spec#{boot_type => restart} || Spec <- NodeSpecs]);
restart(NodeSpec = #{}) ->
restart([NodeSpec]).
mk_nodespecs(Nodes, ClusterOpts) ->
NodeSpecs = lists:zipwith(

Some files were not shown because too many files have changed in this diff Show More