Compare commits

...

83 Commits

Author SHA1 Message Date
Ivan Dyachkov bcd63344b8
Merge pull request #13583 from id/20240807-sync-release-branches
sync release branches
2024-08-07 11:38:14 +02:00
Ivan Dyachkov cc3b26a3ac Merge remote-tracking branch 'upstream/release-58' into 20240807-sync-release-branches 2024-08-07 09:48:38 +02:00
Ivan Dyachkov dd686c24a0 Merge remote-tracking branch 'upstream/release-57' into 20240807-sync-release-branches 2024-08-07 09:44:38 +02:00
Ivan Dyachkov 592c4e0045
Merge pull request #12774 from emqx/dependabot/github_actions/dot-github/actions/package-macos/actions-package-macos-83d1e47aa6
chore(deps): bump the actions-package-macos group in /.github/actions/package-macos with 1 update
2024-08-07 09:43:53 +02:00
Ivan Dyachkov 073e3ea0a8
Merge pull request #13569 from emqx/dependabot/github_actions/actions-ef71aea555
chore(deps): bump the actions group across 1 directory with 8 updates
2024-08-07 09:35:52 +02:00
Xinyu Liu 81978ceaeb
Merge pull request #13571 from terry-xiaoyu/fast_fail_on_invalid_ssl_opts
chore: update esockd to 5.12.0
2024-08-07 11:21:32 +08:00
Ilia Averianov 6bfddd9952
Merge pull request #13565 from savonarola/0801-shared-subs-compact-structures
Reduce size of shared sub protocol structures
2024-08-06 19:56:08 +03:00
Thales Macedo Garitezi cf608a73a5
Merge pull request #13578 from thalesmg/20240806-r58-port-raft-precond
feat(dsraft): support atomic batches + preconditions (release-58)
2024-08-06 13:40:46 -03:00
Thales Macedo Garitezi a8200fb83d
Merge pull request #13579 from thalesmg/20240806-r58-test-flaky-consumer-rebalance
test: attempt to reduce test flakiness
2024-08-06 13:33:46 -03:00
Ilya Averyanov 9ad65c6ac1 feat(queue): reduce logging levels 2024-08-06 18:45:15 +03:00
Thales Macedo Garitezi 9ca3985bbd test: attempt to reduce test flakiness 2024-08-06 12:44:51 -03:00
Ilya Averyanov e17becb84d feat(queue): compact protocol structures, organize formatting 2024-08-06 18:05:02 +03:00
Andrew Mayorov 5dd8fefded test(ds): avoid side effects in check phase 2024-08-06 11:43:12 -03:00
Andrew Mayorov 7b85faf12a chore(dsraft): fix few spelling errors
Co-Authored-By: Thales Macedo Garitezi <thalesmg@gmail.com>
2024-08-06 11:43:12 -03:00
Andrew Mayorov b0594271b2 chore(dsraft): fix a typespec 2024-08-06 11:43:12 -03:00
Andrew Mayorov d8aa39a310 fix(dsraft): use local application environment 2024-08-06 11:43:12 -03:00
Andrew Mayorov fc0434afc8 chore(dslocal): refine few typespecs 2024-08-06 11:43:12 -03:00
Andrew Mayorov 5502af18b7 feat(ds): support deletions + precondition-related API in bitfield-lts 2024-08-06 11:43:12 -03:00
Andrew Mayorov 9f96e0957e test(ds): verify deletions work predictably 2024-08-06 11:43:12 -03:00
Andrew Mayorov 109ffe7a70 fix(dsbackend): unify timestamp resolution in operations / preconditions 2024-08-06 11:43:12 -03:00
Andrew Mayorov 1559aac486 test(dsbackend): add shared tests for atomic batches + preconditions 2024-08-06 11:43:12 -03:00
Andrew Mayorov 68990f1538 feat(ds): support operations + preconditions in skipstream-lts 2024-08-06 11:43:12 -03:00
Andrew Mayorov 5356d678cc feat(dsraft): support atomic batches + preconditions 2024-08-06 11:43:12 -03:00
Andrew Mayorov 11951f8f6c feat(ds): adopt buffer interface to `emqx_ds:operation()` 2024-08-06 11:43:12 -03:00
Andrew Mayorov 0aa4cdbaf3 feat(ds): add generic preconditions implementation 2024-08-06 11:43:12 -03:00
Ivan Dyachkov 281f8ddc83
Merge pull request #13575 from Kinplemelon/kinple/upgrade-dashboard-58
chore(dashboard): bump dashboard version to v1.10.0-beta.1 & e1.8.0-beta.1
2024-08-06 16:39:06 +02:00
Kinplemelon b80513e941 ci: update emqx docs link in dashboard 2024-08-06 15:21:19 +02:00
Ivan Dyachkov 822ed71282 chore: release 5.7.2 2024-08-06 13:25:56 +02:00
Kinple b8fd5de2a5
Merge pull request #13577 from Kinplemelon/kinple/upgrade-dashboard
chore(dashboard): bump dashboard version to v1.9.2 & e1.7.2
2024-08-06 19:02:50 +08:00
Kinplemelon 3ee84d60ae chore(dashboard): bump dashboard version to v1.9.2 & e1.7.2 2024-08-06 18:11:35 +08:00
Andrew Mayorov 3b52b658cd
Merge pull request #13559 from keynslug/feat/EMQX-12309/raft-precond
feat(dsraft): support atomic batches + preconditions
2024-08-06 09:17:16 +02:00
Kinplemelon cba3dcbeda chore(dashboard): bump dashboard version to v1.10.0-beta.1 & e1.8.0-beta.1 2024-08-06 13:44:16 +08:00
Kinple caf1897979
Merge pull request #13574 from Kinplemelon/kinple/upgrade-dashboard
chore(dashboard): bump dashboard version to e1.7.2-beta.7
2024-08-06 10:51:03 +08:00
Kinplemelon dbbd5e1458 ci: update emqx docs link in dashboard 2024-08-06 09:33:20 +08:00
Kinplemelon 0ab31df9d2 chore(dashboard): bump dashboard version to v1.9.2-beta.1 & e1.7.2-beta.7 2024-08-06 09:32:17 +08:00
Thales Macedo Garitezi 613fc644f5
Merge pull request #13425 from kjellwinblad/kjell/review_connector_error_logs_mqtt_etc/EMQX-12555/EMQX-12657
fix: make MQTT connector error log messages easier to understand
2024-08-05 17:34:13 -03:00
Andrew Mayorov b1a53568d6
test(ds): avoid side effects in check phase 2024-08-05 16:34:17 +02:00
Andrew Mayorov 382feab7d1
chore(dsraft): fix few spelling errors
Co-Authored-By: Thales Macedo Garitezi <thalesmg@gmail.com>
2024-08-05 10:55:49 +02:00
Andrew Mayorov 6aad774075
chore(dsraft): fix a typespec 2024-08-05 10:55:49 +02:00
Andrew Mayorov 649cbf1c79
fix(dsraft): use local application environment 2024-08-05 10:55:49 +02:00
Andrew Mayorov 4cde5e98a3
chore(dslocal): refine few typespecs 2024-08-05 10:55:48 +02:00
Andrew Mayorov d631b5b296
feat(ds): support deletions + precondition-related API in bitfield-lts 2024-08-05 10:55:48 +02:00
Andrew Mayorov 26ec69d5f4
test(ds): verify deletions work predictably 2024-08-05 10:55:48 +02:00
Andrew Mayorov 58b9ab0210
fix(dsbackend): unify timestamp resolution in operations / preconditions 2024-08-05 10:55:22 +02:00
lafirest 4644072fd8
Merge pull request #13570 from lafirest/fix/api_key_bootstrap
fix(api_key): do not crash boot when the bootstrap file is not exists
2024-08-05 16:33:43 +08:00
Shawn bd87e3ce2b chore: update esockd to 5.12.0 2024-08-05 16:18:04 +08:00
firest c9c4d1a196 fix(api_key): do not crash boot when the bootstrap file is not exists 2024-08-05 15:56:05 +08:00
dependabot[bot] 11546b72f4
chore(deps): bump the actions group across 1 directory with 8 updates
Bumps the actions group with 8 updates in the / directory:

| Package | From | To |
| --- | --- | --- |
| [actions/checkout](https://github.com/actions/checkout) | `4.1.2` | `4.1.7` |
| [actions/upload-artifact](https://github.com/actions/upload-artifact) | `4.3.3` | `4.3.5` |
| [actions/download-artifact](https://github.com/actions/download-artifact) | `4.1.7` | `4.1.8` |
| [docker/setup-qemu-action](https://github.com/docker/setup-qemu-action) | `3.0.0` | `3.2.0` |
| [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) | `3.3.0` | `3.6.1` |
| [docker/login-action](https://github.com/docker/login-action) | `3.2.0` | `3.3.0` |
| [erlef/setup-beam](https://github.com/erlef/setup-beam) | `1.18.0` | `1.18.1` |
| [ossf/scorecard-action](https://github.com/ossf/scorecard-action) | `2.3.3` | `2.4.0` |



Updates `actions/checkout` from 4.1.2 to 4.1.7
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v4.1.2...692973e3d937129bcbf40652eb9f2f61becf3332)

Updates `actions/upload-artifact` from 4.3.3 to 4.3.5
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](65462800fd...89ef406dd8)

Updates `actions/download-artifact` from 4.1.7 to 4.1.8
- [Release notes](https://github.com/actions/download-artifact/releases)
- [Commits](65a9edc588...fa0a91b85d)

Updates `docker/setup-qemu-action` from 3.0.0 to 3.2.0
- [Release notes](https://github.com/docker/setup-qemu-action/releases)
- [Commits](68827325e0...49b3bc8e6b)

Updates `docker/setup-buildx-action` from 3.3.0 to 3.6.1
- [Release notes](https://github.com/docker/setup-buildx-action/releases)
- [Commits](d70bba72b1...988b5a0280)

Updates `docker/login-action` from 3.2.0 to 3.3.0
- [Release notes](https://github.com/docker/login-action/releases)
- [Commits](0d4c9c5ea7...9780b0c442)

Updates `erlef/setup-beam` from 1.18.0 to 1.18.1
- [Release notes](https://github.com/erlef/setup-beam/releases)
- [Commits](a6e26b2231...b9c58b0450)

Updates `ossf/scorecard-action` from 2.3.3 to 2.4.0
- [Release notes](https://github.com/ossf/scorecard-action/releases)
- [Changelog](https://github.com/ossf/scorecard-action/blob/main/RELEASE.md)
- [Commits](dc50aa9510...62b2cac7ed)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions
- dependency-name: actions/upload-artifact
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions
- dependency-name: actions/download-artifact
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions
- dependency-name: docker/setup-qemu-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: actions
- dependency-name: docker/setup-buildx-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: actions
- dependency-name: docker/login-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: actions
- dependency-name: erlef/setup-beam
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions
- dependency-name: ossf/scorecard-action
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: actions
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-08-05 03:25:47 +00:00
dependabot[bot] bcb70a9fb9
chore(deps): bump the actions-package-macos group
Bumps the actions-package-macos group in /.github/actions/package-macos with 1 update: [actions/cache](https://github.com/actions/cache).


Updates `actions/cache` from 4.0.1 to 4.0.2
- [Release notes](https://github.com/actions/cache/releases)
- [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md)
- [Commits](ab5e6d0c87...0c45773b62)

---
updated-dependencies:
- dependency-name: actions/cache
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: actions-package-macos
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-08-05 03:17:26 +00:00
JimMoen 09ec31908b
Merge pull request #13357 from JimMoen/fix-utf8-frame-error-connack
Stop returning `CONNACK` or `DISCONNECT` to clients that sent malformed CONNECT packets.

- Only send `CONNACK` with reason code `frame_too_large` for MQTT-v5.0 when connecting if the protocol version field in CONNECT can be detected.
- Otherwise **DONOT** send any CONNACK or DISCONNECT packet.
2024-08-02 15:24:30 +08:00
lafirest b94ec4014f
Merge pull request #13563 from lafirest/fix/payload_encode
fix(log): respect payload encoding settings when formatting packets
2024-08-02 14:38:12 +08:00
firest 74c346f9d1 fix(log): respect payload encoding settings when formatting packets 2024-08-02 12:41:30 +08:00
zhongwencool 8a33ef8576
Merge pull request #13562 from zhongwencool/fix-deactivate-alarm
fix: deactivate alarm before create resource
2024-08-02 12:08:27 +08:00
zhongwencool 6c2033ecbf fix: deactivate alarm before create resource 2024-08-02 11:03:59 +08:00
zmstone 51530588ef ci: fix a typo in commented out docker-compose yaml file 2024-08-01 22:41:42 +02:00
Thales Macedo Garitezi bba9d085d6 test: refactor test structure 2024-08-01 16:03:04 -03:00
Thales Macedo Garitezi 3162fe7a27 feat: prettify some error explanations 2024-08-01 15:31:00 -03:00
Thales Macedo Garitezi 52b2d73b28 test: move new test to newer module and use current apis 2024-08-01 15:13:25 -03:00
Thales Macedo Garitezi 44e7f2e9b2 refactor: use macros for status to avoid typos 2024-08-01 14:49:43 -03:00
Thales Macedo Garitezi baf2b96cbc test: refactor test structure 2024-08-01 14:27:25 -03:00
Kjell Winblad ba2d4f3df3 docs: add change log entry 2024-08-01 14:21:27 -03:00
Kjell Winblad 11aaa7b07d fix: make MQTT connector error log messages easier to understand
Fixes:
https://emqx.atlassian.net/browse/EMQX-12555
https://emqx.atlassian.net/browse/EMQX-12657
2024-08-01 14:21:26 -03:00
Andrew Mayorov 810a4d3cf9
test(dsbackend): add shared tests for atomic batches + preconditions 2024-08-01 14:26:45 +02:00
Andrew Mayorov 7b243ef7ad
feat(ds): support operations + preconditions in skipstream-lts 2024-08-01 14:26:45 +02:00
Andrew Mayorov fcf76d28ba
feat(dsraft): support atomic batches + preconditions 2024-08-01 14:26:45 +02:00
Andrew Mayorov 3b5d98c1d9
feat(ds): adopt buffer interface to `emqx_ds:operation()` 2024-08-01 14:26:45 +02:00
Andrew Mayorov 451b03ff99
feat(ds): add generic preconditions implementation 2024-08-01 14:26:45 +02:00
JimMoen f792418a68
Merge pull request #13552 from JimMoen/fix-plugin-app-takes-too-long
fix: add a startup timeout limit for the plugin application
2024-08-01 16:46:09 +08:00
JimMoen 4915cc0da6
chore: add changelog entry for 13357 2024-08-01 15:23:58 +08:00
JimMoen 15b3f4deb0
fix: rm unused func and exports 2024-08-01 15:00:24 +08:00
JimMoen 7a251c9ead
test: handle frame error for CONNECT packets 2024-08-01 10:26:31 +08:00
JimMoen 37a89d0094
fix: enrich parse_state and connection serialize opts 2024-08-01 10:26:31 +08:00
JimMoen c313aa89f0
fix: try throw proto_ver and proto_name when parsing CONNECT packet 2024-08-01 10:26:31 +08:00
JimMoen 6db1c0a446
refactor: separate function to handle `frame_error` 2024-08-01 10:26:31 +08:00
JimMoen d4508a4f1d
chore: sync master `elvis.config` 2024-08-01 10:26:31 +08:00
Ivan Dyachkov 577f1a7d8a
Merge pull request #13553 from id/20240731-ci-fix-docker-build
ci: fix docker images build
2024-07-31 16:04:47 +02:00
Thales Macedo Garitezi 08c58cc319
Merge pull request #13543 from thalesmg/20240730-r57-sr-delete-protobuf-cache
fix(schema registry): clear protobuf code cache when deleting/updating serdes
2024-07-31 10:16:48 -03:00
Thales Macedo Garitezi 150fee87f1
Merge pull request #13541 from thalesmg/20240730-r57-unset-crl-check-listener
fix(crl): force remove CRL fields from SSL opts after listener update
2024-07-31 10:16:35 -03:00
JimMoen c658cfe269
fix: make static_check happy 2024-07-31 17:17:13 +08:00
JimMoen a246551914
fix: add a startup timeout limit for the plugin application 2024-07-31 17:17:11 +08:00
Ivan Dyachkov 8d8ff6cf5d ci: fix docker images build
/etc/docker/daemon.json requires root for read access
2024-07-31 10:27:04 +02:00
Thales Macedo Garitezi ebb69f4ebf fix(crl): force remove crl fields from SSL opts after listener update
Fixes https://emqx.atlassian.net/browse/EMQX-12785
2024-07-30 14:00:24 -03:00
Thales Macedo Garitezi fd961f9da7 fix(schema registry): clear protobuf code cache when deleting/updating serde
Fixes https://emqx.atlassian.net/browse/EMQX-12789
2024-07-30 13:52:34 -03:00
124 changed files with 2053 additions and 883 deletions

View File

@ -10,7 +10,7 @@ services:
nofile: 1024
image: openldap
#ports:
# - 389:389
# - "389:389"
volumes:
- ./certs/ca.crt:/etc/certs/ca.crt
restart: always

View File

@ -51,7 +51,7 @@ runs:
echo "SELF_HOSTED=false" >> $GITHUB_OUTPUT
;;
esac
- uses: actions/cache@ab5e6d0c87105b4c9c2047343972218f562e4319 # v4.0.1
- uses: actions/cache@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
id: cache
if: steps.prepare.outputs.SELF_HOSTED != 'true'
with:

View File

@ -152,7 +152,7 @@ jobs:
echo "PROFILE=${PROFILE}" | tee -a .env
echo "PKG_VSN=$(./pkg-vsn.sh ${PROFILE})" | tee -a .env
zip -ryq -x@.github/workflows/.zipignore $PROFILE.zip .
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
with:
name: ${{ matrix.profile }}
path: ${{ matrix.profile }}.zip

View File

@ -163,7 +163,7 @@ jobs:
echo "PROFILE=${PROFILE}" | tee -a .env
echo "PKG_VSN=$(./pkg-vsn.sh ${PROFILE})" | tee -a .env
zip -ryq -x@.github/workflows/.zipignore $PROFILE.zip .
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
with:
name: ${{ matrix.profile }}
path: ${{ matrix.profile }}.zip

View File

@ -83,7 +83,7 @@ jobs:
id: build
run: |
make ${{ matrix.profile }}-tgz
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
with:
name: "${{ matrix.profile }}-${{ matrix.arch }}.tar.gz"
path: "_packages/emqx*/emqx-*.tar.gz"
@ -110,7 +110,7 @@ jobs:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
ref: ${{ github.event.inputs.ref }}
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
pattern: "${{ matrix.profile[0] }}-*.tar.gz"
path: _packages
@ -122,24 +122,25 @@ jobs:
run: |
ls -lR _packages/$PROFILE
mv _packages/$PROFILE/*.tar.gz ./
- name: Enable containerd image store on Docker Engine
run: |
echo "$(jq '. += {"features": {"containerd-snapshotter": true}}' /etc/docker/daemon.json)" > daemon.json
echo "$(sudo cat /etc/docker/daemon.json | jq '. += {"features": {"containerd-snapshotter": true}}')" > daemon.json
sudo mv daemon.json /etc/docker/daemon.json
sudo systemctl restart docker
- uses: docker/setup-qemu-action@68827325e0b33c7199eb31dd4e31fbe9023e06e3 # v3.0.0
- uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0
- uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0
- uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1
- name: Login to hub.docker.com
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
if: inputs.publish && contains(matrix.profile[1], 'docker.io')
with:
username: ${{ secrets.DOCKER_HUB_USER }}
password: ${{ secrets.DOCKER_HUB_TOKEN }}
- name: Login to AWS ECR
uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
if: inputs.publish && contains(matrix.profile[1], 'public.ecr.aws')
with:
registry: public.ecr.aws

View File

@ -51,7 +51,7 @@ jobs:
if: always()
run: |
docker save $_EMQX_DOCKER_IMAGE_TAG | gzip > $EMQX_NAME-docker-$PKG_VSN.tar.gz
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
with:
name: "${{ env.EMQX_NAME }}-docker"
path: "${{ env.EMQX_NAME }}-docker-${{ env.PKG_VSN }}.tar.gz"

View File

@ -95,7 +95,7 @@ jobs:
apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }}
apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }}
apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }}
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: success()
with:
name: ${{ matrix.profile }}-${{ matrix.os }}-${{ matrix.otp }}
@ -180,7 +180,7 @@ jobs:
--builder $BUILDER \
--elixir $IS_ELIXIR \
--pkgtype pkg
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
with:
name: ${{ matrix.profile }}-${{ matrix.os }}-${{ matrix.arch }}${{ matrix.with_elixir == 'yes' && '-elixir' || '' }}-${{ matrix.builder }}-${{ matrix.otp }}-${{ matrix.elixir }}
path: _packages/${{ matrix.profile }}/
@ -198,7 +198,7 @@ jobs:
profile:
- ${{ inputs.profile }}
steps:
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
pattern: "${{ matrix.profile }}-*"
path: packages/${{ matrix.profile }}

View File

@ -54,7 +54,7 @@ jobs:
- name: build pkg
run: |
./scripts/buildx.sh --profile "$PROFILE" --pkgtype pkg --builder "$BUILDER"
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: success()
with:
name: ${{ matrix.profile[0] }}-${{ matrix.profile[1] }}-${{ matrix.os }}
@ -102,7 +102,7 @@ jobs:
apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }}
apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }}
apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }}
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: success()
with:
name: ${{ matrix.profile }}-${{ matrix.os }}

View File

@ -41,13 +41,13 @@ jobs:
- name: build pkg
run: |
./scripts/buildx.sh --profile $PROFILE --pkgtype pkg --elixir $ELIXIR --arch $ARCH
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
with:
name: "${{ matrix.profile[0] }}-${{ matrix.profile[1] }}-${{ matrix.profile[2] }}"
path: _packages/${{ matrix.profile[0] }}/*
retention-days: 7
compression-level: 0
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
with:
name: "${{ matrix.profile[0] }}-schema-dump-${{ matrix.profile[1] }}-${{ matrix.profile[2] }}"
path: |
@ -84,7 +84,7 @@ jobs:
apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }}
apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }}
apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }}
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
with:
name: ${{ matrix.os }}
path: _packages/**/*

View File

@ -37,7 +37,7 @@ jobs:
- run: ./scripts/check-elixir-deps-discrepancies.exs
- run: ./scripts/check-elixir-applications.exs
- name: Upload produced lock files
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: failure()
with:
name: ${{ matrix.profile }}_produced_lock_files

View File

@ -52,7 +52,7 @@ jobs:
id: package_file
run: |
echo "PACKAGE_FILE=$(find _packages/emqx -name 'emqx-*.deb' | head -n 1 | xargs basename)" >> $GITHUB_OUTPUT
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
with:
name: emqx-ubuntu20.04
path: _packages/emqx/${{ steps.package_file.outputs.PACKAGE_FILE }}
@ -77,7 +77,7 @@ jobs:
repository: emqx/tf-emqx-performance-test
path: tf-emqx-performance-test
ref: v0.2.3
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: emqx-ubuntu20.04
path: tf-emqx-performance-test/
@ -113,13 +113,13 @@ jobs:
working-directory: ./tf-emqx-performance-test
run: |
terraform destroy -auto-approve
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: success()
with:
name: metrics
path: |
"./tf-emqx-performance-test/*.tar.gz"
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: failure()
with:
name: terraform
@ -148,7 +148,7 @@ jobs:
repository: emqx/tf-emqx-performance-test
path: tf-emqx-performance-test
ref: v0.2.3
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: emqx-ubuntu20.04
path: tf-emqx-performance-test/
@ -184,13 +184,13 @@ jobs:
working-directory: ./tf-emqx-performance-test
run: |
terraform destroy -auto-approve
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: success()
with:
name: metrics
path: |
"./tf-emqx-performance-test/*.tar.gz"
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: failure()
with:
name: terraform
@ -220,7 +220,7 @@ jobs:
repository: emqx/tf-emqx-performance-test
path: tf-emqx-performance-test
ref: v0.2.3
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: emqx-ubuntu20.04
path: tf-emqx-performance-test/
@ -257,13 +257,13 @@ jobs:
working-directory: ./tf-emqx-performance-test
run: |
terraform destroy -auto-approve
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: success()
with:
name: metrics
path: |
"./tf-emqx-performance-test/*.tar.gz"
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: failure()
with:
name: terraform
@ -294,7 +294,7 @@ jobs:
repository: emqx/tf-emqx-performance-test
path: tf-emqx-performance-test
ref: v0.2.3
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: emqx-ubuntu20.04
path: tf-emqx-performance-test/
@ -330,13 +330,13 @@ jobs:
working-directory: ./tf-emqx-performance-test
run: |
terraform destroy -auto-approve
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: success()
with:
name: metrics
path: |
"./tf-emqx-performance-test/*.tar.gz"
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: failure()
with:
name: terraform

View File

@ -25,7 +25,7 @@ jobs:
- emqx
- emqx-enterprise
steps:
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: ${{ matrix.profile }}
- name: extract artifact
@ -40,7 +40,7 @@ jobs:
if: failure()
run: |
cat _build/${{ matrix.profile }}/rel/emqx/log/erlang.log.*
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: failure()
with:
name: conftest-logs-${{ matrix.profile }}

View File

@ -35,7 +35,7 @@ jobs:
source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh "$EMQX_NAME")
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: ${{ env.EMQX_NAME }}-docker
path: /tmp
@ -90,7 +90,7 @@ jobs:
fi
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh "$EMQX_NAME")
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: ${{ env.EMQX_NAME }}-docker
path: /tmp

View File

@ -95,7 +95,7 @@ jobs:
echo "Suites: $SUITES"
./rebar3 as standalone_test ct --name 'test@127.0.0.1' -v --readable=true --suite="$SUITES"
fi
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: failure()
with:
name: logs-emqx-app-tests-${{ matrix.type }}

View File

@ -44,7 +44,7 @@ jobs:
source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh "$EMQX_NAME")
echo "EMQX_TAG=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: "${{ env.EMQX_NAME }}-docker"
path: /tmp

View File

@ -31,7 +31,7 @@ jobs:
else
wget --no-verbose --no-check-certificate -O /tmp/apache-jmeter.tgz $ARCHIVE_URL
fi
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
with:
name: apache-jmeter.tgz
path: /tmp/apache-jmeter.tgz
@ -58,7 +58,7 @@ jobs:
source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh emqx)
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: emqx-docker
path: /tmp
@ -95,7 +95,7 @@ jobs:
echo "check logs failed"
exit 1
fi
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: always()
with:
name: jmeter_logs-advanced_feat-${{ matrix.scripts_type }}
@ -127,7 +127,7 @@ jobs:
source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh emqx)
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: emqx-docker
path: /tmp
@ -175,7 +175,7 @@ jobs:
if: failure()
run: |
docker compose -f .ci/docker-compose-file/docker-compose-emqx-cluster.yaml logs --no-color > ./jmeter_logs/emqx.log
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: always()
with:
name: jmeter_logs-pgsql_authn_authz-${{ matrix.scripts_type }}_${{ matrix.pgsql_tag }}
@ -204,7 +204,7 @@ jobs:
source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh emqx)
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: emqx-docker
path: /tmp
@ -248,7 +248,7 @@ jobs:
echo "check logs failed"
exit 1
fi
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: always()
with:
name: jmeter_logs-mysql_authn_authz-${{ matrix.scripts_type }}_${{ matrix.mysql_tag }}
@ -273,7 +273,7 @@ jobs:
source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh emqx)
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: emqx-docker
path: /tmp
@ -313,7 +313,7 @@ jobs:
echo "check logs failed"
exit 1
fi
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: always()
with:
name: jmeter_logs-JWT_authn-${{ matrix.scripts_type }}
@ -339,7 +339,7 @@ jobs:
source env.sh
PKG_VSN=$(docker run --rm -v $(pwd):$(pwd) -w $(pwd) -u $(id -u) "$EMQX_BUILDER" ./pkg-vsn.sh emqx)
echo "PKG_VSN=$PKG_VSN" >> "$GITHUB_ENV"
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: emqx-docker
path: /tmp
@ -370,7 +370,7 @@ jobs:
echo "check logs failed"
exit 1
fi
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: always()
with:
name: jmeter_logs-built_in_database_authn_authz-${{ matrix.scripts_type }}

View File

@ -25,7 +25,7 @@ jobs:
run:
shell: bash
steps:
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: emqx-enterprise
- name: extract artifact
@ -45,7 +45,7 @@ jobs:
run: |
export PROFILE='emqx-enterprise'
make emqx-enterprise-tgz
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
name: Upload built emqx and test scenario
with:
name: relup_tests_emqx_built
@ -72,7 +72,7 @@ jobs:
run:
shell: bash
steps:
- uses: erlef/setup-beam@a6e26b22319003294c58386b6f25edbc7336819a # v1.18.0
- uses: erlef/setup-beam@b9c58b0450cd832ccdb3c17cc156a47065d2114f # v1.18.1
with:
otp-version: 26.2.5
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
@ -88,7 +88,7 @@ jobs:
./configure
make
echo "$(pwd)/bin" >> $GITHUB_PATH
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
name: Download built emqx and test scenario
with:
name: relup_tests_emqx_built
@ -111,7 +111,7 @@ jobs:
docker logs node2.emqx.io | tee lux_logs/emqx2.log
exit 1
fi
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
name: Save debug data
if: failure()
with:

View File

@ -46,7 +46,7 @@ jobs:
contents: read
steps:
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: ${{ matrix.profile }}
@ -90,7 +90,7 @@ jobs:
contents: read
steps:
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: ${{ matrix.profile }}
- name: extract artifact
@ -133,7 +133,7 @@ jobs:
if: failure()
run: tar -czf logs.tar.gz _build/test/logs
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: failure()
with:
name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-sg${{ matrix.suitegroup }}
@ -164,7 +164,7 @@ jobs:
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-sg${{ matrix.suitegroup }}
steps:
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: ${{ matrix.profile }}
- name: extract artifact
@ -193,7 +193,7 @@ jobs:
if: failure()
run: tar -czf logs.tar.gz _build/test/logs
- uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
- uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
if: failure()
with:
name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-sg${{ matrix.suitegroup }}

View File

@ -30,7 +30,7 @@ jobs:
persist-credentials: false
- name: "Run analysis"
uses: ossf/scorecard-action@dc50aa9510b46c811795eb24b2f1ba02a914e534 # v2.3.3
uses: ossf/scorecard-action@62b2cac7ed8198b15735ed49ab1e5cf35480ba46 # v2.4.0
with:
results_file: results.sarif
results_format: sarif
@ -40,7 +40,7 @@ jobs:
publish_results: true
- name: "Upload artifact"
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3
uses: actions/upload-artifact@89ef406dd8d7e03cfd12d9e0a4a378f454709029 # v4.3.5
with:
name: SARIF file
path: results.sarif

View File

@ -19,7 +19,7 @@ jobs:
- emqx-enterprise
runs-on: ${{ endsWith(github.repository, '/emqx') && 'ubuntu-22.04' || fromJSON('["self-hosted","ephemeral","linux","x64"]') }}
steps:
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
pattern: "${{ matrix.profile }}-schema-dump-*-x64"
merge-multiple: true

View File

@ -30,7 +30,7 @@ jobs:
include: ${{ fromJson(inputs.ct-matrix) }}
container: "${{ inputs.builder }}"
steps:
- uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
with:
name: ${{ matrix.profile }}
- name: extract artifact

View File

@ -34,7 +34,7 @@ jobs:
pull-requests: write
steps:
- uses: actions/checkout@9bb56186c3b09b4f86b1c65136769dd318469633 # v4.1.2
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
fetch-depth: 0

View File

@ -10,8 +10,8 @@ include env.sh
# Dashboard version
# from https://github.com/emqx/emqx-dashboard5
export EMQX_DASHBOARD_VERSION ?= v1.9.1
export EMQX_EE_DASHBOARD_VERSION ?= e1.7.1
export EMQX_DASHBOARD_VERSION ?= v1.10.0-beta.1
export EMQX_EE_DASHBOARD_VERSION ?= e1.8.0-beta.1
export EMQX_RELUP ?= true
export EMQX_REL_FORM ?= tgz

View File

@ -683,6 +683,7 @@ end).
-define(FRAME_PARSE_ERROR, frame_parse_error).
-define(FRAME_SERIALIZE_ERROR, frame_serialize_error).
-define(THROW_FRAME_ERROR(Reason), erlang:throw({?FRAME_PARSE_ERROR, Reason})).
-define(THROW_SERIALIZE_ERROR(Reason), erlang:throw({?FRAME_SERIALIZE_ERROR, Reason})).

View File

@ -91,7 +91,7 @@
?_DO_TRACE(Tag, Msg, Meta),
?SLOG(
Level,
(emqx_trace_formatter:format_meta_map(Meta))#{msg => Msg, tag => Tag},
(Meta)#{msg => Msg, tag => Tag},
#{is_trace => false}
)
end).

View File

@ -28,7 +28,7 @@
{lc, {git, "https://github.com/emqx/lc.git", {tag, "0.3.2"}}},
{gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}},
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}},
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.11.3"}}},
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.12.0"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.19.5"}}},
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "3.3.1"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.43.2"}}},

View File

@ -2,7 +2,7 @@
{application, emqx, [
{id, "emqx"},
{description, "EMQX Core"},
{vsn, "5.3.3"},
{vsn, "5.3.4"},
{modules, []},
{registered, []},
{applications, [

View File

@ -146,7 +146,9 @@
-type replies() :: emqx_types:packet() | reply() | [reply()].
-define(IS_MQTT_V5, #channel{conninfo = #{proto_ver := ?MQTT_PROTO_V5}}).
-define(IS_CONNECTED_OR_REAUTHENTICATING(ConnState),
((ConnState == connected) orelse (ConnState == reauthenticating))
).
-define(IS_COMMON_SESSION_TIMER(N),
((N == retry_delivery) orelse (N == expire_awaiting_rel))
).
@ -337,7 +339,7 @@ take_conn_info_fields(Fields, ClientInfo, ConnInfo) ->
| {shutdown, Reason :: term(), channel()}
| {shutdown, Reason :: term(), replies(), channel()}.
handle_in(?CONNECT_PACKET(), Channel = #channel{conn_state = ConnState}) when
ConnState =:= connected orelse ConnState =:= reauthenticating
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState)
->
handle_out(disconnect, ?RC_PROTOCOL_ERROR, Channel);
handle_in(?CONNECT_PACKET(), Channel = #channel{conn_state = connecting}) ->
@ -567,29 +569,8 @@ handle_in(
process_disconnect(ReasonCode, Properties, NChannel);
handle_in(?AUTH_PACKET(), Channel) ->
handle_out(disconnect, ?RC_IMPLEMENTATION_SPECIFIC_ERROR, Channel);
handle_in({frame_error, Reason}, Channel = #channel{conn_state = idle}) ->
shutdown(shutdown_count(frame_error, Reason), Channel);
handle_in(
{frame_error, #{cause := frame_too_large} = R}, Channel = #channel{conn_state = connecting}
) ->
shutdown(
shutdown_count(frame_error, R), ?CONNACK_PACKET(?RC_PACKET_TOO_LARGE), Channel
);
handle_in({frame_error, Reason}, Channel = #channel{conn_state = connecting}) ->
shutdown(shutdown_count(frame_error, Reason), ?CONNACK_PACKET(?RC_MALFORMED_PACKET), Channel);
handle_in(
{frame_error, #{cause := frame_too_large}}, Channel = #channel{conn_state = ConnState}
) when
ConnState =:= connected orelse ConnState =:= reauthenticating
->
handle_out(disconnect, {?RC_PACKET_TOO_LARGE, frame_too_large}, Channel);
handle_in({frame_error, Reason}, Channel = #channel{conn_state = ConnState}) when
ConnState =:= connected orelse ConnState =:= reauthenticating
->
handle_out(disconnect, {?RC_MALFORMED_PACKET, Reason}, Channel);
handle_in({frame_error, Reason}, Channel = #channel{conn_state = disconnected}) ->
?SLOG(error, #{msg => "malformed_mqtt_message", reason => Reason}),
{ok, Channel};
handle_in({frame_error, Reason}, Channel) ->
handle_frame_error(Reason, Channel);
handle_in(Packet, Channel) ->
?SLOG(error, #{msg => "disconnecting_due_to_unexpected_message", packet => Packet}),
handle_out(disconnect, ?RC_PROTOCOL_ERROR, Channel).
@ -1021,6 +1002,68 @@ not_nacked({deliver, _Topic, Msg}) ->
true
end.
%%--------------------------------------------------------------------
%% Handle Frame Error
%%--------------------------------------------------------------------
handle_frame_error(
Reason = #{cause := frame_too_large},
Channel = #channel{conn_state = ConnState, conninfo = ConnInfo}
) when
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState)
->
ShutdownCount = shutdown_count(frame_error, Reason),
case proto_ver(Reason, ConnInfo) of
?MQTT_PROTO_V5 ->
handle_out(disconnect, {?RC_PACKET_TOO_LARGE, frame_too_large}, Channel);
_ ->
shutdown(ShutdownCount, Channel)
end;
%% Only send CONNACK with reason code `frame_too_large` for MQTT-v5.0 when connecting,
%% otherwise DONOT send any CONNACK or DISCONNECT packet.
handle_frame_error(
Reason,
Channel = #channel{conn_state = ConnState, conninfo = ConnInfo}
) when
is_map(Reason) andalso
(ConnState == idle orelse ConnState == connecting)
->
ShutdownCount = shutdown_count(frame_error, Reason),
ProtoVer = proto_ver(Reason, ConnInfo),
NChannel = Channel#channel{conninfo = ConnInfo#{proto_ver => ProtoVer}},
case ProtoVer of
?MQTT_PROTO_V5 ->
shutdown(ShutdownCount, ?CONNACK_PACKET(?RC_PACKET_TOO_LARGE), NChannel);
_ ->
shutdown(ShutdownCount, NChannel)
end;
handle_frame_error(
Reason,
Channel = #channel{conn_state = connecting}
) ->
shutdown(
shutdown_count(frame_error, Reason),
?CONNACK_PACKET(?RC_MALFORMED_PACKET),
Channel
);
handle_frame_error(
Reason,
Channel = #channel{conn_state = ConnState}
) when
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState)
->
handle_out(
disconnect,
{?RC_MALFORMED_PACKET, Reason},
Channel
);
handle_frame_error(
Reason,
Channel = #channel{conn_state = disconnected}
) ->
?SLOG(error, #{msg => "malformed_mqtt_message", reason => Reason}),
{ok, Channel}.
%%--------------------------------------------------------------------
%% Handle outgoing packet
%%--------------------------------------------------------------------
@ -1289,7 +1332,7 @@ handle_info(
session = Session
}
) when
ConnState =:= connected orelse ConnState =:= reauthenticating
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState)
->
{Intent, Session1} = session_disconnect(ClientInfo, ConnInfo, Session),
Channel1 = ensure_disconnected(Reason, maybe_publish_will_msg(sock_closed, Channel)),
@ -2636,8 +2679,7 @@ save_alias(outbound, AliasId, Topic, TopicAliases = #{outbound := Aliases}) ->
NAliases = maps:put(Topic, AliasId, Aliases),
TopicAliases#{outbound => NAliases}.
-compile({inline, [reply/2, shutdown/2, shutdown/3, sp/1, flag/1]}).
-compile({inline, [reply/2, shutdown/2, shutdown/3]}).
reply(Reply, Channel) ->
{reply, Reply, Channel}.
@ -2673,13 +2715,13 @@ disconnect_and_shutdown(
?IS_MQTT_V5 =
#channel{conn_state = ConnState}
) when
ConnState =:= connected orelse ConnState =:= reauthenticating
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState)
->
NChannel = ensure_disconnected(Reason, Channel),
shutdown(Reason, Reply, ?DISCONNECT_PACKET(reason_code(Reason)), NChannel);
%% mqtt v3/v4 connected sessions
disconnect_and_shutdown(Reason, Reply, Channel = #channel{conn_state = ConnState}) when
ConnState =:= connected orelse ConnState =:= reauthenticating
?IS_CONNECTED_OR_REAUTHENTICATING(ConnState)
->
NChannel = ensure_disconnected(Reason, Channel),
shutdown(Reason, Reply, NChannel);
@ -2722,6 +2764,13 @@ is_durable_session(#channel{session = Session}) ->
false
end.
proto_ver(#{proto_ver := ProtoVer}, _ConnInfo) ->
ProtoVer;
proto_ver(_Reason, #{proto_ver := ProtoVer}) ->
ProtoVer;
proto_ver(_, _) ->
?MQTT_PROTO_V4.
%%--------------------------------------------------------------------
%% For CT tests
%%--------------------------------------------------------------------

View File

@ -783,7 +783,8 @@ parse_incoming(Data, Packets, State = #state{parse_state = ParseState}) ->
input_bytes => Data,
parsed_packets => Packets
}),
{[{frame_error, Reason} | Packets], State};
NState = enrich_state(Reason, State),
{[{frame_error, Reason} | Packets], NState};
error:Reason:Stacktrace ->
?LOG(error, #{
at_state => emqx_frame:describe_state(ParseState),
@ -1227,6 +1228,12 @@ inc_counter(Key, Inc) ->
_ = emqx_pd:inc_counter(Key, Inc),
ok.
enrich_state(#{parse_state := NParseState}, State) ->
Serialize = emqx_frame:serialize_opts(NParseState),
State#state{parse_state = NParseState, serialize = Serialize};
enrich_state(_, State) ->
State.
set_tcp_keepalive({quic, _Listener}) ->
ok;
set_tcp_keepalive({Type, Id}) ->

View File

@ -267,28 +267,50 @@ packet(Header, Variable) ->
packet(Header, Variable, Payload) ->
#mqtt_packet{header = Header, variable = Variable, payload = Payload}.
parse_connect(FrameBin, StrictMode) ->
{ProtoName, Rest} = parse_utf8_string_with_cause(FrameBin, StrictMode, invalid_proto_name),
case ProtoName of
<<"MQTT">> ->
ok;
<<"MQIsdp">> ->
ok;
_ ->
%% from spec: the server MAY send disconnect with reason code 0x84
%% we chose to close socket because the client is likely not talking MQTT anyway
?PARSE_ERR(#{
cause => invalid_proto_name,
expected => <<"'MQTT' or 'MQIsdp'">>,
received => ProtoName
})
end,
parse_connect2(ProtoName, Rest, StrictMode).
parse_connect(FrameBin, Options = #{strict_mode := StrictMode}) ->
{ProtoName, Rest0} = parse_utf8_string_with_cause(FrameBin, StrictMode, invalid_proto_name),
%% No need to parse and check proto_ver if proto_name is invalid, check it first
%% And the matching check of `proto_name` and `proto_ver` fields will be done in `emqx_packet:check_proto_ver/2`
_ = validate_proto_name(ProtoName),
{IsBridge, ProtoVer, Rest2} = parse_connect_proto_ver(Rest0),
NOptions = Options#{version => ProtoVer},
try
do_parse_connect(ProtoName, IsBridge, ProtoVer, Rest2, StrictMode)
catch
throw:{?FRAME_PARSE_ERROR, ReasonM} when is_map(ReasonM) ->
?PARSE_ERR(
ReasonM#{
proto_ver => ProtoVer,
proto_name => ProtoName,
parse_state => ?NONE(NOptions)
}
);
throw:{?FRAME_PARSE_ERROR, Reason} ->
?PARSE_ERR(
#{
cause => Reason,
proto_ver => ProtoVer,
proto_name => ProtoName,
parse_state => ?NONE(NOptions)
}
)
end.
parse_connect2(
do_parse_connect(
ProtoName,
<<BridgeTag:4, ProtoVer:4, UsernameFlagB:1, PasswordFlagB:1, WillRetainB:1, WillQoS:2,
WillFlagB:1, CleanStart:1, Reserved:1, KeepAlive:16/big, Rest2/binary>>,
IsBridge,
ProtoVer,
<<
UsernameFlagB:1,
PasswordFlagB:1,
WillRetainB:1,
WillQoS:2,
WillFlagB:1,
CleanStart:1,
Reserved:1,
KeepAlive:16/big,
Rest/binary
>>,
StrictMode
) ->
_ = validate_connect_reserved(Reserved),
@ -303,14 +325,14 @@ parse_connect2(
UsernameFlag = bool(UsernameFlagB),
PasswordFlag = bool(PasswordFlagB)
),
{Properties, Rest3} = parse_properties(Rest2, ProtoVer, StrictMode),
{Properties, Rest3} = parse_properties(Rest, ProtoVer, StrictMode),
{ClientId, Rest4} = parse_utf8_string_with_cause(Rest3, StrictMode, invalid_clientid),
ConnPacket = #mqtt_packet_connect{
proto_name = ProtoName,
proto_ver = ProtoVer,
%% For bridge mode, non-standard implementation
%% Invented by mosquitto, named 'try_private': https://mosquitto.org/man/mosquitto-conf-5.html
is_bridge = (BridgeTag =:= 8),
is_bridge = IsBridge,
clean_start = bool(CleanStart),
will_flag = WillFlag,
will_qos = WillQoS,
@ -343,16 +365,16 @@ parse_connect2(
unexpected_trailing_bytes => size(Rest7)
})
end;
parse_connect2(_ProtoName, Bin, _StrictMode) ->
%% sent less than 32 bytes
do_parse_connect(_ProtoName, _IsBridge, _ProtoVer, Bin, _StrictMode) ->
%% sent less than 24 bytes
?PARSE_ERR(#{cause => malformed_connect, header_bytes => Bin}).
parse_packet(
#mqtt_packet_header{type = ?CONNECT},
FrameBin,
#{strict_mode := StrictMode}
Options
) ->
parse_connect(FrameBin, StrictMode);
parse_connect(FrameBin, Options);
parse_packet(
#mqtt_packet_header{type = ?CONNACK},
<<AckFlags:8, ReasonCode:8, Rest/binary>>,
@ -516,6 +538,12 @@ parse_packet_id(<<PacketId:16/big, Rest/binary>>) ->
parse_packet_id(_) ->
?PARSE_ERR(invalid_packet_id).
parse_connect_proto_ver(<<BridgeTag:4, ProtoVer:4, Rest/binary>>) ->
{_IsBridge = (BridgeTag =:= 8), ProtoVer, Rest};
parse_connect_proto_ver(Bin) ->
%% sent less than 1 bytes or empty
?PARSE_ERR(#{cause => malformed_connect, header_bytes => Bin}).
parse_properties(Bin, Ver, _StrictMode) when Ver =/= ?MQTT_PROTO_V5 ->
{#{}, Bin};
%% TODO: version mess?
@ -739,6 +767,8 @@ serialize_fun(#{version := Ver, max_size := MaxSize, strict_mode := StrictMode})
initial_serialize_opts(Opts) ->
maps:merge(?DEFAULT_OPTIONS, Opts).
serialize_opts(?NONE(Options)) ->
maps:merge(?DEFAULT_OPTIONS, Options);
serialize_opts(#mqtt_packet_connect{proto_ver = ProtoVer, properties = ConnProps}) ->
MaxSize = get_property('Maximum-Packet-Size', ConnProps, ?MAX_PACKET_SIZE),
#{version => ProtoVer, max_size => MaxSize, strict_mode => false}.
@ -1157,18 +1187,34 @@ validate_subqos([3 | _]) -> ?PARSE_ERR(bad_subqos);
validate_subqos([_ | T]) -> validate_subqos(T);
validate_subqos([]) -> ok.
%% from spec: the server MAY send disconnect with reason code 0x84
%% we chose to close socket because the client is likely not talking MQTT anyway
validate_proto_name(<<"MQTT">>) ->
ok;
validate_proto_name(<<"MQIsdp">>) ->
ok;
validate_proto_name(ProtoName) ->
?PARSE_ERR(#{
cause => invalid_proto_name,
expected => <<"'MQTT' or 'MQIsdp'">>,
received => ProtoName
}).
%% MQTT-v3.1.1-[MQTT-3.1.2-3], MQTT-v5.0-[MQTT-3.1.2-3]
-compile({inline, [validate_connect_reserved/1]}).
validate_connect_reserved(0) -> ok;
validate_connect_reserved(1) -> ?PARSE_ERR(reserved_connect_flag).
-compile({inline, [validate_connect_will/3]}).
%% MQTT-v3.1.1-[MQTT-3.1.2-13], MQTT-v5.0-[MQTT-3.1.2-11]
validate_connect_will(false, _, WillQos) when WillQos > 0 -> ?PARSE_ERR(invalid_will_qos);
validate_connect_will(false, _, WillQoS) when WillQoS > 0 -> ?PARSE_ERR(invalid_will_qos);
%% MQTT-v3.1.1-[MQTT-3.1.2-14], MQTT-v5.0-[MQTT-3.1.2-12]
validate_connect_will(true, _, WillQoS) when WillQoS > 2 -> ?PARSE_ERR(invalid_will_qos);
%% MQTT-v3.1.1-[MQTT-3.1.2-15], MQTT-v5.0-[MQTT-3.1.2-13]
validate_connect_will(false, WillRetain, _) when WillRetain -> ?PARSE_ERR(invalid_will_retain);
validate_connect_will(_, _, _) -> ok.
-compile({inline, [validate_connect_password_flag/4]}).
%% MQTT-v3.1
%% Username flag and password flag are not strongly related
%% https://public.dhe.ibm.com/software/dw/webservices/ws-mqtt/mqtt-v3r1.html#connect
@ -1183,6 +1229,7 @@ validate_connect_password_flag(true, ?MQTT_PROTO_V5, _, _) ->
validate_connect_password_flag(_, _, _, _) ->
ok.
-compile({inline, [bool/1]}).
bool(0) -> false;
bool(1) -> true.

View File

@ -432,7 +432,7 @@ do_start_listener(Type, Name, Id, #{bind := ListenOn} = Opts) when ?ESOCKD_LISTE
esockd:open(
Id,
ListenOn,
merge_default(esockd_opts(Id, Type, Name, Opts))
merge_default(esockd_opts(Id, Type, Name, Opts, _OldOpts = undefined))
);
%% Start MQTT/WS listener
do_start_listener(Type, Name, Id, Opts) when ?COWBOY_LISTENER(Type) ->
@ -476,7 +476,7 @@ do_update_listener(Type, Name, OldConf, NewConf = #{bind := ListenOn}) when
Id = listener_id(Type, Name),
case maps:get(bind, OldConf) of
ListenOn ->
esockd:set_options({Id, ListenOn}, esockd_opts(Id, Type, Name, NewConf));
esockd:set_options({Id, ListenOn}, esockd_opts(Id, Type, Name, NewConf, OldConf));
_Different ->
%% TODO
%% Again, we're not strictly required to drop live connections in this case.
@ -588,7 +588,7 @@ perform_listener_change(update, {{Type, Name, ConfOld}, {_, _, ConfNew}}) ->
perform_listener_change(stop, {Type, Name, Conf}) ->
stop_listener(Type, Name, Conf).
esockd_opts(ListenerId, Type, Name, Opts0) ->
esockd_opts(ListenerId, Type, Name, Opts0, OldOpts) ->
Opts1 = maps:with([acceptors, max_connections, proxy_protocol, proxy_protocol_timeout], Opts0),
Limiter = limiter(Opts0),
Opts2 =
@ -620,7 +620,7 @@ esockd_opts(ListenerId, Type, Name, Opts0) ->
tcp ->
Opts3#{tcp_options => tcp_opts(Opts0)};
ssl ->
OptsWithCRL = inject_crl_config(Opts0),
OptsWithCRL = inject_crl_config(Opts0, OldOpts),
OptsWithSNI = inject_sni_fun(ListenerId, OptsWithCRL),
OptsWithRootFun = inject_root_fun(OptsWithSNI),
OptsWithVerifyFun = inject_verify_fun(OptsWithRootFun),
@ -996,7 +996,7 @@ inject_sni_fun(_ListenerId, Conf) ->
Conf.
inject_crl_config(
Conf = #{ssl_options := #{enable_crl_check := true} = SSLOpts}
Conf = #{ssl_options := #{enable_crl_check := true} = SSLOpts}, _OldOpts
) ->
HTTPTimeout = emqx_config:get([crl_cache, http_timeout], timer:seconds(15)),
Conf#{
@ -1006,7 +1006,16 @@ inject_crl_config(
crl_cache => {emqx_ssl_crl_cache, {internal, [{http, HTTPTimeout}]}}
}
};
inject_crl_config(Conf) ->
inject_crl_config(#{ssl_options := SSLOpts0} = Conf0, #{} = OldOpts) ->
%% Note: we must set crl options to `undefined' to unset them. Otherwise,
%% `esockd' will retain such options when `esockd:merge_opts/2' is called and the SSL
%% options were previously enabled.
WasEnabled = emqx_utils_maps:deep_get([ssl_options, enable_crl_check], OldOpts, false),
Undefine = fun(Acc, K) -> emqx_utils_maps:put_if(Acc, K, undefined, WasEnabled) end,
SSLOpts1 = Undefine(SSLOpts0, crl_check),
SSLOpts = Undefine(SSLOpts1, crl_cache),
Conf0#{ssl_options := SSLOpts};
inject_crl_config(Conf, undefined = _OldOpts) ->
Conf.
maybe_unregister_ocsp_stapling_refresh(

View File

@ -105,7 +105,7 @@ format(Msg, Meta, Config) ->
maybe_format_msg(undefined, _Meta, _Config) ->
#{};
maybe_format_msg({report, Report0} = Msg, #{report_cb := Cb} = Meta, Config) ->
Report = emqx_logger_textfmt:try_encode_payload(Report0, Config),
Report = emqx_logger_textfmt:try_encode_meta(Report0, Config),
case is_map(Report) andalso Cb =:= ?DEFAULT_FORMATTER of
true ->
%% reporting a map without a customised format function

View File

@ -20,7 +20,7 @@
-export([format/2]).
-export([check_config/1]).
-export([try_format_unicode/1, try_encode_payload/2]).
-export([try_format_unicode/1, try_encode_meta/2]).
%% Used in the other log formatters
-export([evaluate_lazy_values_if_dbg_level/1, evaluate_lazy_values/1]).
@ -111,7 +111,7 @@ is_list_report_acceptable(_) ->
enrich_report(ReportRaw0, Meta, Config) ->
%% clientid and peername always in emqx_conn's process metadata.
%% topic and username can be put in meta using ?SLOG/3, or put in msg's report by ?SLOG/2
ReportRaw = try_encode_payload(ReportRaw0, Config),
ReportRaw = try_encode_meta(ReportRaw0, Config),
Topic =
case maps:get(topic, Meta, undefined) of
undefined -> maps:get(topic, ReportRaw, undefined);
@ -180,9 +180,22 @@ enrich_topic({Fmt, Args}, #{topic := Topic}) when is_list(Fmt) ->
enrich_topic(Msg, _) ->
Msg.
try_encode_payload(#{payload := Payload} = Report, #{payload_encode := Encode}) ->
try_encode_meta(Report, Config) ->
lists:foldl(
fun(Meta, Acc) ->
try_encode_meta(Meta, Acc, Config)
end,
Report,
[payload, packet]
).
try_encode_meta(payload, #{payload := Payload} = Report, #{payload_encode := Encode}) ->
Report#{payload := encode_payload(Payload, Encode)};
try_encode_payload(Report, _Config) ->
try_encode_meta(packet, #{packet := Packet} = Report, #{payload_encode := Encode}) when
is_tuple(Packet)
->
Report#{packet := emqx_packet:format(Packet, Encode)};
try_encode_meta(_, Report, _Config) ->
Report.
encode_payload(Payload, text) ->
@ -190,4 +203,5 @@ encode_payload(Payload, text) ->
encode_payload(_Payload, hidden) ->
"******";
encode_payload(Payload, hex) ->
binary:encode_hex(Payload).
Bin = emqx_utils_conv:bin(Payload),
binary:encode_hex(Bin).

View File

@ -51,7 +51,6 @@
]).
-export([
format/1,
format/2
]).
@ -481,10 +480,6 @@ will_msg(#mqtt_packet_connect{
headers = #{username => Username, properties => Props}
}.
%% @doc Format packet
-spec format(emqx_types:packet()) -> iolist().
format(Packet) -> format(Packet, emqx_trace_handler:payload_encode()).
%% @doc Format packet
-spec format(emqx_types:packet(), hex | text | hidden) -> iolist().
format(#mqtt_packet{header = Header, variable = Variable, payload = Payload}, PayloadEncode) ->

View File

@ -56,6 +56,11 @@
cold_get_subscription/2
]).
-export([
format_lease_events/1,
format_stream_progresses/1
]).
-define(schedule_subscribe, schedule_subscribe).
-define(schedule_unsubscribe, schedule_unsubscribe).
@ -236,14 +241,14 @@ schedule_subscribe(
ScheduledActions1 = ScheduledActions0#{
ShareTopicFilter => ScheduledAction#{type => {?schedule_subscribe, SubOpts}}
},
?tp(warning, shared_subs_schedule_subscribe_override, #{
?tp(debug, shared_subs_schedule_subscribe_override, #{
share_topic_filter => ShareTopicFilter,
new_type => {?schedule_subscribe, SubOpts},
old_action => format_schedule_action(ScheduledAction)
}),
SharedSubS0#{scheduled_actions := ScheduledActions1};
_ ->
?tp(warning, shared_subs_schedule_subscribe_new, #{
?tp(debug, shared_subs_schedule_subscribe_new, #{
share_topic_filter => ShareTopicFilter, subopts => SubOpts
}),
Agent1 = emqx_persistent_session_ds_shared_subs_agent:on_subscribe(
@ -294,7 +299,7 @@ schedule_unsubscribe(
ScheduledActions1 = ScheduledActions0#{
ShareTopicFilter => ScheduledAction1
},
?tp(warning, shared_subs_schedule_unsubscribe_override, #{
?tp(debug, shared_subs_schedule_unsubscribe_override, #{
share_topic_filter => ShareTopicFilter,
new_type => ?schedule_unsubscribe,
old_action => format_schedule_action(ScheduledAction0)
@ -309,7 +314,7 @@ schedule_unsubscribe(
progresses => []
}
},
?tp(warning, shared_subs_schedule_unsubscribe_new, #{
?tp(debug, shared_subs_schedule_unsubscribe_new, #{
share_topic_filter => ShareTopicFilter,
stream_keys => format_stream_keys(StreamKeys)
}),
@ -334,7 +339,7 @@ renew_streams(S0, #{agent := Agent0, scheduled_actions := ScheduledActions} = Sh
Agent0
),
StreamLeaseEvents =/= [] andalso
?tp(warning, shared_subs_new_stream_lease_events, #{
?tp(debug, shared_subs_new_stream_lease_events, #{
stream_lease_events => format_lease_events(StreamLeaseEvents)
}),
S1 = lists:foldl(
@ -501,7 +506,7 @@ run_scheduled_action(
Progresses1 = stream_progresses(S, StreamKeysToWait0 -- StreamKeysToWait1) ++ Progresses0,
case StreamKeysToWait1 of
[] ->
?tp(warning, shared_subs_schedule_action_complete, #{
?tp(debug, shared_subs_schedule_action_complete, #{
share_topic_filter => ShareTopicFilter,
progresses => format_stream_progresses(Progresses1),
type => Type
@ -525,7 +530,7 @@ run_scheduled_action(
end;
_ ->
Action1 = Action#{stream_keys_to_wait => StreamKeysToWait1, progresses => Progresses1},
?tp(warning, shared_subs_schedule_action_continue, #{
?tp(debug, shared_subs_schedule_action_continue, #{
share_topic_filter => ShareTopicFilter,
new_action => format_schedule_action(Action1)
}),

View File

@ -62,7 +62,7 @@
streams := [{pid(), quicer:stream_handle()}],
%% New stream opts
stream_opts := map(),
%% If conneciton is resumed from session ticket
%% If connection is resumed from session ticket
is_resumed => boolean(),
%% mqtt message serializer config
serialize => undefined,
@ -70,8 +70,8 @@
}.
-type cb_ret() :: quicer_lib:cb_ret().
%% @doc Data streams initializions are started in parallel with control streams, data streams are blocked
%% for the activation from control stream after it is accepted as a legit conneciton.
%% @doc Data streams initializations are started in parallel with control streams, data streams are blocked
%% for the activation from control stream after it is accepted as a legit connection.
%% For security, the initial number of allowed data streams from client should be limited by
%% 'peer_bidi_stream_count` & 'peer_unidi_stream_count`
-spec activate_data_streams(pid(), {
@ -80,7 +80,7 @@
activate_data_streams(ConnOwner, {PS, Serialize, Channel}) ->
gen_server:call(ConnOwner, {activate_data_streams, {PS, Serialize, Channel}}, infinity).
%% @doc conneciton owner init callback
%% @doc connection owner init callback
-spec init(map()) -> {ok, cb_state()}.
init(#{stream_opts := SOpts} = S) when is_list(SOpts) ->
init(S#{stream_opts := maps:from_list(SOpts)});

View File

@ -589,6 +589,14 @@ ensure_valid_options(Options, Versions) ->
ensure_valid_options([], _, Acc) ->
lists:reverse(Acc);
ensure_valid_options([{K, undefined} | T], Versions, Acc) when
K =:= crl_check;
K =:= crl_cache
->
%% Note: we must set crl options to `undefined' to unset them. Otherwise,
%% `esockd' will retain such options when `esockd:merge_opts/2' is called and the SSL
%% options were previously enabled.
ensure_valid_options(T, Versions, [{K, undefined} | Acc]);
ensure_valid_options([{_, undefined} | T], Versions, Acc) ->
ensure_valid_options(T, Versions, Acc);
ensure_valid_options([{_, ""} | T], Versions, Acc) ->

View File

@ -17,7 +17,6 @@
-include("emqx_mqtt.hrl").
-export([format/2]).
-export([format_meta_map/1]).
%% logger_formatter:config/0 is not exported.
-type config() :: map().
@ -43,10 +42,6 @@ format(
format(Event, Config) ->
emqx_logger_textfmt:format(Event, Config).
format_meta_map(Meta) ->
Encode = emqx_trace_handler:payload_encode(),
format_meta_map(Meta, Encode).
format_meta_map(Meta, Encode) ->
format_meta_map(Meta, Encode, [
{packet, fun format_packet/2},

View File

@ -436,6 +436,7 @@ websocket_handle({Frame, _}, State) ->
%% TODO: should not close the ws connection
?LOG(error, #{msg => "unexpected_frame", frame => Frame}),
shutdown(unexpected_ws_frame, State).
websocket_info({call, From, Req}, State) ->
handle_call(From, Req, State);
websocket_info({cast, rate_limit}, State) ->
@ -737,7 +738,8 @@ parse_incoming(Data, Packets, State = #state{parse_state = ParseState}) ->
input_bytes => Data
}),
FrameError = {frame_error, Reason},
{[{incoming, FrameError} | Packets], State};
NState = enrich_state(Reason, State),
{[{incoming, FrameError} | Packets], NState};
error:Reason:Stacktrace ->
?LOG(error, #{
at_state => emqx_frame:describe_state(ParseState),
@ -830,7 +832,7 @@ serialize_and_inc_stats_fun(#state{serialize = Serialize}) ->
?LOG(warning, #{
msg => "packet_discarded",
reason => "frame_too_large",
packet => emqx_packet:format(Packet)
packet => Packet
}),
ok = emqx_metrics:inc('delivery.dropped.too_large'),
ok = emqx_metrics:inc('delivery.dropped'),
@ -1069,6 +1071,13 @@ check_max_connection(Type, Listener) ->
{denny, Reason}
end
end.
enrich_state(#{parse_state := NParseState}, State) ->
Serialize = emqx_frame:serialize_opts(NParseState),
State#state{parse_state = NParseState, serialize = Serialize};
enrich_state(_, State) ->
State.
%%--------------------------------------------------------------------
%% For CT tests
%%--------------------------------------------------------------------

View File

@ -414,24 +414,32 @@ t_handle_in_auth(_) ->
emqx_channel:handle_in(?AUTH_PACKET(), Channel).
t_handle_in_frame_error(_) ->
IdleChannel = channel(#{conn_state => idle}),
{shutdown, #{shutdown_count := frame_too_large, cause := frame_too_large}, _Chan} =
emqx_channel:handle_in({frame_error, #{cause => frame_too_large}}, IdleChannel),
IdleChannelV5 = channel(#{conn_state => idle}),
%% no CONNACK packet for v4
?assertMatch(
{shutdown, #{shutdown_count := frame_too_large, cause := frame_too_large}, _Chan},
emqx_channel:handle_in(
{frame_error, #{cause => frame_too_large}}, v4(IdleChannelV5)
)
),
ConnectingChan = channel(#{conn_state => connecting}),
ConnackPacket = ?CONNACK_PACKET(?RC_PACKET_TOO_LARGE),
{shutdown,
#{
shutdown_count := frame_too_large,
cause := frame_too_large,
limit := 100,
received := 101
},
ConnackPacket,
_} =
?assertMatch(
{shutdown,
#{
shutdown_count := frame_too_large,
cause := frame_too_large,
limit := 100,
received := 101
},
ConnackPacket, _},
emqx_channel:handle_in(
{frame_error, #{cause => frame_too_large, received => 101, limit => 100}},
ConnectingChan
),
)
),
DisconnectPacket = ?DISCONNECT_PACKET(?RC_PACKET_TOO_LARGE),
ConnectedChan = channel(#{conn_state => connected}),
?assertMatch(

View File

@ -138,13 +138,14 @@ init_per_testcase(t_refresh_config = TestCase, Config) ->
];
init_per_testcase(TestCase, Config) when
TestCase =:= t_update_listener;
TestCase =:= t_update_listener_enable_disable;
TestCase =:= t_validations
->
ct:timetrap({seconds, 30}),
ok = snabbkaffe:start_trace(),
%% when running emqx standalone tests, we can't use those
%% features.
case does_module_exist(emqx_management) of
case does_module_exist(emqx_mgmt) of
true ->
DataDir = ?config(data_dir, Config),
CRLFile = filename:join([DataDir, "intermediate-revoked.crl.pem"]),
@ -165,7 +166,7 @@ init_per_testcase(TestCase, Config) when
{emqx_conf, #{config => #{listeners => #{ssl => #{default => ListenerConf}}}}},
emqx,
emqx_management,
{emqx_dashboard, "dashboard.listeners.http { enable = true, bind = 18083 }"}
emqx_mgmt_api_test_util:emqx_dashboard()
],
#{work_dir => emqx_cth_suite:work_dir(TestCase, Config)}
),
@ -206,6 +207,7 @@ read_crl(Filename) ->
end_per_testcase(TestCase, Config) when
TestCase =:= t_update_listener;
TestCase =:= t_update_listener_enable_disable;
TestCase =:= t_validations
->
Skip = proplists:get_bool(skip_does_not_apply, Config),
@ -1057,3 +1059,104 @@ do_t_validations(_Config) ->
),
ok.
%% Checks that if CRL is ever enabled and then disabled, clients can connect, even if they
%% would otherwise not have their corresponding CRLs cached and fail with `{bad_crls,
%% no_relevant_crls}`.
t_update_listener_enable_disable(Config) ->
case proplists:get_bool(skip_does_not_apply, Config) of
true ->
ct:pal("skipping as this test does not apply in this profile"),
ok;
false ->
do_t_update_listener_enable_disable(Config)
end.
do_t_update_listener_enable_disable(Config) ->
DataDir = ?config(data_dir, Config),
Keyfile = filename:join([DataDir, "server.key.pem"]),
Certfile = filename:join([DataDir, "server.cert.pem"]),
Cacertfile = filename:join([DataDir, "ca-chain.cert.pem"]),
ClientCert = filename:join(DataDir, "client.cert.pem"),
ClientKey = filename:join(DataDir, "client.key.pem"),
ListenerId = "ssl:default",
%% Enable CRL
{ok, {{_, 200, _}, _, ListenerData0}} = get_listener_via_api(ListenerId),
CRLConfig0 =
#{
<<"ssl_options">> =>
#{
<<"keyfile">> => Keyfile,
<<"certfile">> => Certfile,
<<"cacertfile">> => Cacertfile,
<<"enable_crl_check">> => true,
<<"fail_if_no_peer_cert">> => true
}
},
ListenerData1 = emqx_utils_maps:deep_merge(ListenerData0, CRLConfig0),
{ok, {_, _, ListenerData2}} = update_listener_via_api(ListenerId, ListenerData1),
?assertMatch(
#{
<<"ssl_options">> :=
#{
<<"enable_crl_check">> := true,
<<"verify">> := <<"verify_peer">>,
<<"fail_if_no_peer_cert">> := true
}
},
ListenerData2
),
%% Disable CRL
CRLConfig1 =
#{
<<"ssl_options">> =>
#{
<<"keyfile">> => Keyfile,
<<"certfile">> => Certfile,
<<"cacertfile">> => Cacertfile,
<<"enable_crl_check">> => false,
<<"fail_if_no_peer_cert">> => true
}
},
ListenerData3 = emqx_utils_maps:deep_merge(ListenerData2, CRLConfig1),
redbug:start(
[
"esockd_server:get_listener_prop -> return",
"esockd_server:set_listener_prop -> return",
"esockd:merge_opts -> return",
"esockd_listener_sup:set_options -> return",
"emqx_listeners:inject_crl_config -> return"
],
[{msgs, 100}]
),
{ok, {_, _, ListenerData4}} = update_listener_via_api(ListenerId, ListenerData3),
?assertMatch(
#{
<<"ssl_options">> :=
#{
<<"enable_crl_check">> := false,
<<"verify">> := <<"verify_peer">>,
<<"fail_if_no_peer_cert">> := true
}
},
ListenerData4
),
%% Now the client that would be blocked tries to connect and should now be allowed.
{ok, C} = emqtt:start_link([
{ssl, true},
{ssl_opts, [
{certfile, ClientCert},
{keyfile, ClientKey},
{verify, verify_none}
]},
{port, 8883}
]),
?assertMatch({ok, _}, emqtt:connect(C)),
emqtt:stop(C),
?assertNotReceive({http_get, _}),
ok.

View File

@ -63,6 +63,7 @@ groups() ->
t_parse_malformed_properties,
t_malformed_connect_header,
t_malformed_connect_data,
t_malformed_connect_data_proto_ver,
t_reserved_connect_flag,
t_invalid_clientid,
t_undefined_password,
@ -167,6 +168,8 @@ t_parse_malformed_utf8_string(_) ->
ParseState = emqx_frame:initial_parse_state(#{strict_mode => true}),
?ASSERT_FRAME_THROW(utf8_string_invalid, emqx_frame:parse(MalformedPacket, ParseState)).
%% TODO: parse v3 with 0 length clientid
t_serialize_parse_v3_connect(_) ->
Bin =
<<16, 37, 0, 6, 77, 81, 73, 115, 100, 112, 3, 2, 0, 60, 0, 23, 109, 111, 115, 113, 112, 117,
@ -324,7 +327,7 @@ t_serialize_parse_bridge_connect(_) ->
header = #mqtt_packet_header{type = ?CONNECT},
variable = #mqtt_packet_connect{
clientid = <<"C_00:0C:29:2B:77:52">>,
proto_ver = 16#03,
proto_ver = ?MQTT_PROTO_V3,
proto_name = <<"MQIsdp">>,
is_bridge = true,
will_retain = true,
@ -686,15 +689,36 @@ t_malformed_connect_header(_) ->
).
t_malformed_connect_data(_) ->
ProtoNameWithLen = <<0, 6, "MQIsdp">>,
ConnectFlags = <<2#00000000>>,
ClientIdwithLen = <<0, 1, "a">>,
UnexpectedRestBin = <<0, 1, 2>>,
?ASSERT_FRAME_THROW(
#{cause := malformed_connect, unexpected_trailing_bytes := _},
emqx_frame:parse(<<16, 15, 0, 6, 77, 81, 73, 115, 100, 112, 3, 0, 0, 0, 0, 0, 0>>)
#{cause := malformed_connect, unexpected_trailing_bytes := 3},
emqx_frame:parse(
<<16, 18, ProtoNameWithLen/binary, ?MQTT_PROTO_V3, ConnectFlags/binary, 0, 0,
ClientIdwithLen/binary, UnexpectedRestBin/binary>>
)
).
t_malformed_connect_data_proto_ver(_) ->
Proto3NameWithLen = <<0, 6, "MQIsdp">>,
?ASSERT_FRAME_THROW(
#{cause := malformed_connect, header_bytes := <<>>},
emqx_frame:parse(<<16, 8, Proto3NameWithLen/binary>>)
),
ProtoNameWithLen = <<0, 4, "MQTT">>,
?ASSERT_FRAME_THROW(
#{cause := malformed_connect, header_bytes := <<>>},
emqx_frame:parse(<<16, 6, ProtoNameWithLen/binary>>)
).
t_reserved_connect_flag(_) ->
?assertException(
throw,
{frame_parse_error, reserved_connect_flag},
{frame_parse_error, #{
cause := reserved_connect_flag, proto_ver := ?MQTT_PROTO_V3, proto_name := <<"MQIsdp">>
}},
emqx_frame:parse(<<16, 15, 0, 6, 77, 81, 73, 115, 100, 112, 3, 1, 0, 0, 1, 0, 0>>)
).
@ -726,7 +750,7 @@ t_undefined_password(_) ->
},
variable = #mqtt_packet_connect{
proto_name = <<"MQTT">>,
proto_ver = 4,
proto_ver = ?MQTT_PROTO_V4,
is_bridge = false,
clean_start = true,
will_flag = false,
@ -774,7 +798,9 @@ t_invalid_will_retain(_) ->
54, 75, 78, 112, 57, 0, 6, 68, 103, 55, 87, 87, 87>>,
?assertException(
throw,
{frame_parse_error, invalid_will_retain},
{frame_parse_error, #{
cause := invalid_will_retain, proto_ver := ?MQTT_PROTO_V5, proto_name := <<"MQTT">>
}},
emqx_frame:parse(ConnectBin)
),
ok.
@ -796,22 +822,30 @@ t_invalid_will_qos(_) ->
),
?assertException(
throw,
{frame_parse_error, invalid_will_qos},
{frame_parse_error, #{
cause := invalid_will_qos, proto_ver := ?MQTT_PROTO_V5, proto_name := <<"MQTT">>
}},
emqx_frame:parse(ConnectBinFun(Will_F_WillQoS1))
),
?assertException(
throw,
{frame_parse_error, invalid_will_qos},
{frame_parse_error, #{
cause := invalid_will_qos, proto_ver := ?MQTT_PROTO_V5, proto_name := <<"MQTT">>
}},
emqx_frame:parse(ConnectBinFun(Will_F_WillQoS2))
),
?assertException(
throw,
{frame_parse_error, invalid_will_qos},
{frame_parse_error, #{
cause := invalid_will_qos, proto_ver := ?MQTT_PROTO_V5, proto_name := <<"MQTT">>
}},
emqx_frame:parse(ConnectBinFun(Will_F_WillQoS3))
),
?assertException(
throw,
{frame_parse_error, invalid_will_qos},
{frame_parse_error, #{
cause := invalid_will_qos, proto_ver := ?MQTT_PROTO_V5, proto_name := <<"MQTT">>
}},
emqx_frame:parse(ConnectBinFun(Will_T_WillQoS3))
),
ok.

View File

@ -377,42 +377,60 @@ t_will_msg(_) ->
t_format(_) ->
io:format("~ts", [
emqx_packet:format(#mqtt_packet{
header = #mqtt_packet_header{type = ?CONNACK, retain = true, dup = 0},
variable = undefined
})
]),
io:format("~ts", [
emqx_packet:format(#mqtt_packet{
header = #mqtt_packet_header{type = ?CONNACK}, variable = 1, payload = <<"payload">>
})
emqx_packet:format(
#mqtt_packet{
header = #mqtt_packet_header{type = ?CONNACK, retain = true, dup = 0},
variable = undefined
},
text
)
]),
io:format(
"~ts",
[
emqx_packet:format(
#mqtt_packet{
header = #mqtt_packet_header{type = ?CONNACK},
variable = 1,
payload = <<"payload">>
},
text
)
]
),
io:format("~ts", [
emqx_packet:format(
?CONNECT_PACKET(#mqtt_packet_connect{
will_flag = true,
will_retain = true,
will_qos = ?QOS_2,
will_topic = <<"topic">>,
will_payload = <<"payload">>
})
?CONNECT_PACKET(
#mqtt_packet_connect{
will_flag = true,
will_retain = true,
will_qos = ?QOS_2,
will_topic = <<"topic">>,
will_payload = <<"payload">>
}
),
text
)
]),
io:format("~ts", [
emqx_packet:format(?CONNECT_PACKET(#mqtt_packet_connect{password = password}))
emqx_packet:format(?CONNECT_PACKET(#mqtt_packet_connect{password = password}), text)
]),
io:format("~ts", [emqx_packet:format(?CONNACK_PACKET(?CONNACK_SERVER))]),
io:format("~ts", [emqx_packet:format(?PUBLISH_PACKET(?QOS_1, 1))]),
io:format("~ts", [emqx_packet:format(?PUBLISH_PACKET(?QOS_2, <<"topic">>, 10, <<"payload">>))]),
io:format("~ts", [emqx_packet:format(?PUBACK_PACKET(?PUBACK, 98))]),
io:format("~ts", [emqx_packet:format(?PUBREL_PACKET(99))]),
io:format("~ts", [emqx_packet:format(?CONNACK_PACKET(?CONNACK_SERVER), text)]),
io:format("~ts", [emqx_packet:format(?PUBLISH_PACKET(?QOS_1, 1), text)]),
io:format("~ts", [
emqx_packet:format(?SUBSCRIBE_PACKET(15, [{<<"topic">>, ?QOS_0}, {<<"topic1">>, ?QOS_1}]))
emqx_packet:format(?PUBLISH_PACKET(?QOS_2, <<"topic">>, 10, <<"payload">>), text)
]),
io:format("~ts", [emqx_packet:format(?SUBACK_PACKET(40, [?QOS_0, ?QOS_1]))]),
io:format("~ts", [emqx_packet:format(?UNSUBSCRIBE_PACKET(89, [<<"t">>, <<"t2">>]))]),
io:format("~ts", [emqx_packet:format(?UNSUBACK_PACKET(90))]),
io:format("~ts", [emqx_packet:format(?DISCONNECT_PACKET(128))]).
io:format("~ts", [emqx_packet:format(?PUBACK_PACKET(?PUBACK, 98), text)]),
io:format("~ts", [emqx_packet:format(?PUBREL_PACKET(99), text)]),
io:format("~ts", [
emqx_packet:format(
?SUBSCRIBE_PACKET(15, [{<<"topic">>, ?QOS_0}, {<<"topic1">>, ?QOS_1}]), text
)
]),
io:format("~ts", [emqx_packet:format(?SUBACK_PACKET(40, [?QOS_0, ?QOS_1]), text)]),
io:format("~ts", [emqx_packet:format(?UNSUBSCRIBE_PACKET(89, [<<"t">>, <<"t2">>]), text)]),
io:format("~ts", [emqx_packet:format(?UNSUBACK_PACKET(90), text)]),
io:format("~ts", [emqx_packet:format(?DISCONNECT_PACKET(128), text)]).
t_parse_empty_publish(_) ->
%% 52: 0011(type=PUBLISH) 0100 (QoS=2)

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_auth, [
{description, "EMQX Authentication and authorization"},
{vsn, "0.3.3"},
{vsn, "0.3.4"},
{modules, []},
{registered, [emqx_auth_sup]},
{applications, [

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_auth_http, [
{description, "EMQX External HTTP API Authentication and Authorization"},
{vsn, "0.3.0"},
{vsn, "0.3.1"},
{registered, []},
{mod, {emqx_auth_http_app, []}},
{applications, [

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_auth_jwt, [
{description, "EMQX JWT Authentication and Authorization"},
{vsn, "0.3.2"},
{vsn, "0.3.3"},
{registered, []},
{mod, {emqx_auth_jwt_app, []}},
{applications, [

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_auth_mnesia, [
{description, "EMQX Buitl-in Database Authentication and Authorization"},
{vsn, "0.1.6"},
{vsn, "0.1.7"},
{registered, []},
{mod, {emqx_auth_mnesia_app, []}},
{applications, [

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_auth_mongodb, [
{description, "EMQX MongoDB Authentication and Authorization"},
{vsn, "0.2.1"},
{vsn, "0.2.2"},
{registered, []},
{mod, {emqx_auth_mongodb_app, []}},
{applications, [

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_auth_mysql, [
{description, "EMQX MySQL Authentication and Authorization"},
{vsn, "0.2.1"},
{vsn, "0.2.2"},
{registered, []},
{mod, {emqx_auth_mysql_app, []}},
{applications, [

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_auth_postgresql, [
{description, "EMQX PostgreSQL Authentication and Authorization"},
{vsn, "0.2.1"},
{vsn, "0.2.2"},
{registered, []},
{mod, {emqx_auth_postgresql_app, []}},
{applications, [

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_auth_redis, [
{description, "EMQX Redis Authentication and Authorization"},
{vsn, "0.2.1"},
{vsn, "0.2.2"},
{registered, []},
{mod, {emqx_auth_redis_app, []}},
{applications, [

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_bridge, [
{description, "EMQX bridges"},
{vsn, "0.2.3"},
{vsn, "0.2.4"},
{registered, [emqx_bridge_sup]},
{mod, {emqx_bridge_app, []}},
{applications, [

View File

@ -1154,7 +1154,7 @@ t_bridges_probe(Config) ->
?assertMatch(
{ok, 400, #{
<<"code">> := <<"TEST_FAILED">>,
<<"message">> := <<"Connection refused">>
<<"message">> := <<"Connection refused", _/binary>>
}},
request_json(
post,

View File

@ -1,6 +1,6 @@
{application, emqx_bridge_gcp_pubsub, [
{description, "EMQX Enterprise GCP Pub/Sub Bridge"},
{vsn, "0.3.2"},
{vsn, "0.3.3"},
{registered, []},
{applications, [
kernel,

View File

@ -1,6 +1,6 @@
{application, emqx_bridge_http, [
{description, "EMQX HTTP Bridge and Connector Application"},
{vsn, "0.3.3"},
{vsn, "0.3.4"},
{registered, []},
{applications, [kernel, stdlib, emqx_resource, ehttpc]},
{env, [

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_bridge_kafka, [
{description, "EMQX Enterprise Kafka Bridge"},
{vsn, "0.3.3"},
{vsn, "0.3.4"},
{registered, [emqx_bridge_kafka_consumer_sup]},
{applications, [
kernel,

View File

@ -1918,13 +1918,14 @@ t_node_joins_existing_cluster(Config) ->
_Attempts2 = 50,
[] =/= erpc:call(N2, emqx_router, lookup_routes, [MQTTTopic])
),
NumMsgs = 50 * NPartitions,
{ok, SRef1} =
snabbkaffe:subscribe(
?match_event(#{
?snk_kind := kafka_consumer_handle_message,
?snk_span := {complete, _}
}),
NPartitions,
NumMsgs,
20_000
),
lists:foreach(
@ -1933,7 +1934,7 @@ t_node_joins_existing_cluster(Config) ->
Val = <<"v", (integer_to_binary(N))/binary>>,
publish(Config, KafkaTopic, [#{key => Key, value => Val}])
end,
lists:seq(1, 10 * NPartitions)
lists:seq(1, NumMsgs)
),
{ok, _} = snabbkaffe:receive_events(SRef1),

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_bridge_mqtt, [
{description, "EMQX MQTT Broker Bridge"},
{vsn, "0.2.3"},
{vsn, "0.2.4"},
{registered, []},
{applications, [
kernel,

View File

@ -98,7 +98,7 @@ on_start(ResourceId, #{server := Server} = Conf) ->
server => Server
}};
{error, Reason} ->
{error, Reason}
{error, emqx_maybe:define(explain_error(Reason), Reason)}
end.
on_add_channel(
@ -200,7 +200,7 @@ on_get_channel_status(
} = _State
) when is_map_key(ChannelId, Channels) ->
%% The channel should be ok as long as the MQTT client is ok
connected.
?status_connected.
on_get_channels(ResId) ->
emqx_bridge_v2:get_channels_for_connector(ResId).
@ -356,10 +356,15 @@ on_get_status(_ResourceId, State) ->
Workers = [{Pool, Worker} || {Pool, PN} <- Pools, {_Name, Worker} <- ecpool:workers(PN)],
try emqx_utils:pmap(fun get_status/1, Workers, ?HEALTH_CHECK_TIMEOUT) of
Statuses ->
combine_status(Statuses)
case combine_status(Statuses) of
{Status, Msg} ->
{Status, State, Msg};
Status ->
Status
end
catch
exit:timeout ->
connecting
?status_connecting
end.
get_status({_Pool, Worker}) ->
@ -367,7 +372,7 @@ get_status({_Pool, Worker}) ->
{ok, Client} ->
emqx_bridge_mqtt_ingress:status(Client);
{error, _} ->
disconnected
?status_disconnected
end.
combine_status(Statuses) ->
@ -375,11 +380,25 @@ combine_status(Statuses) ->
%% Natural order of statuses: [connected, connecting, disconnected]
%% * `disconnected` wins over any other status
%% * `connecting` wins over `connected`
case lists:reverse(lists:usort(Statuses)) of
ToStatus = fun
({S, _Reason}) -> S;
(S) when is_atom(S) -> S
end,
CompareFn = fun(S1A, S2A) ->
S1 = ToStatus(S1A),
S2 = ToStatus(S2A),
S1 > S2
end,
case lists:usort(CompareFn, Statuses) of
[{Status, Reason} | _] ->
case explain_error(Reason) of
undefined -> Status;
Msg -> {Status, Msg}
end;
[Status | _] ->
Status;
[] ->
disconnected
?status_disconnected
end.
mk_ingress_config(
@ -514,15 +533,54 @@ connect(Pid, Name) ->
{ok, Pid};
{error, Reason} = Error ->
IsDryRun = emqx_resource:is_dry_run(Name),
?SLOG(?LOG_LEVEL(IsDryRun), #{
msg => "ingress_client_connect_failed",
reason => Reason,
resource_id => Name
}),
log_connect_error_reason(?LOG_LEVEL(IsDryRun), Reason, Name),
_ = catch emqtt:stop(Pid),
Error
end.
log_connect_error_reason(Level, {tcp_closed, _} = Reason, Name) ->
?tp(emqx_bridge_mqtt_connector_tcp_closed, #{}),
?SLOG(Level, #{
msg => "ingress_client_connect_failed",
reason => Reason,
name => Name,
explain => explain_error(Reason)
});
log_connect_error_reason(Level, econnrefused = Reason, Name) ->
?tp(emqx_bridge_mqtt_connector_econnrefused_error, #{}),
?SLOG(Level, #{
msg => "ingress_client_connect_failed",
reason => Reason,
name => Name,
explain => explain_error(Reason)
});
log_connect_error_reason(Level, Reason, Name) ->
?SLOG(Level, #{
msg => "ingress_client_connect_failed",
reason => Reason,
name => Name
}).
explain_error(econnrefused) ->
<<
"Connection refused. "
"This error indicates that your connection attempt to the MQTT server was rejected. "
"In simpler terms, the server you tried to connect to refused your request. "
"There can be multiple reasons for this. "
"For example, the MQTT server you're trying to connect to might be down or not "
"running at all or you might have provided the wrong address "
"or port number for the server."
>>;
explain_error({tcp_closed, _}) ->
<<
"Your MQTT connection attempt was unsuccessful. "
"It might be at its maximum capacity for handling new connections. "
"To diagnose the issue further, you can check the server logs for "
"any specific messages related to the unavailability or connection limits."
>>;
explain_error(_Reason) ->
undefined.
handle_disconnect(_Reason) ->
ok.

View File

@ -19,6 +19,7 @@
-include_lib("emqx/include/logger.hrl").
-include_lib("emqx/include/emqx_mqtt.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-include_lib("emqx_resource/include/emqx_resource.hrl").
%% management APIs
-export([
@ -234,13 +235,13 @@ status(Pid) ->
try
case proplists:get_value(socket, info(Pid)) of
Socket when Socket /= undefined ->
connected;
?status_connected;
undefined ->
connecting
?status_connecting
end
catch
exit:{noproc, _} ->
disconnected
?status_disconnected
end.
%%

View File

@ -1025,31 +1025,39 @@ t_mqtt_conn_bridge_egress_async_reconnect(_) ->
ct:sleep(1000),
%% stop the listener 1883 to make the bridge disconnected
ok = emqx_listeners:stop_listener('tcp:default'),
ct:sleep(1500),
?assertMatch(
#{<<"status">> := Status} when
Status == <<"connecting">> orelse Status == <<"disconnected">>,
request_bridge(BridgeIDEgress)
?check_trace(
begin
ok = emqx_listeners:stop_listener('tcp:default'),
ct:sleep(1500),
?assertMatch(
#{<<"status">> := Status} when
Status == <<"connecting">> orelse Status == <<"disconnected">>,
request_bridge(BridgeIDEgress)
),
%% start the listener 1883 to make the bridge reconnected
ok = emqx_listeners:start_listener('tcp:default'),
timer:sleep(1500),
?assertMatch(
#{<<"status">> := <<"connected">>},
request_bridge(BridgeIDEgress)
),
N = stop_publisher(Publisher),
%% all those messages should eventually be delivered
[
assert_mqtt_msg_received(RemoteTopic, Payload)
|| I <- lists:seq(1, N),
Payload <- [integer_to_binary(I)]
],
ok
end,
fun(Trace) ->
?assertMatch([_ | _], ?of_kind(emqx_bridge_mqtt_connector_econnrefused_error, Trace)),
ok
end
),
%% start the listener 1883 to make the bridge reconnected
ok = emqx_listeners:start_listener('tcp:default'),
timer:sleep(1500),
?assertMatch(
#{<<"status">> := <<"connected">>},
request_bridge(BridgeIDEgress)
),
N = stop_publisher(Publisher),
%% all those messages should eventually be delivered
[
assert_mqtt_msg_received(RemoteTopic, Payload)
|| I <- lists:seq(1, N),
Payload <- [integer_to_binary(I)]
],
ok.
start_publisher(Topic, Interval, CtrlPid) ->

View File

@ -131,6 +131,9 @@ hookpoint(Config) ->
BridgeId = bridge_id(Config),
emqx_bridge_resource:bridge_hookpoint(BridgeId).
simplify_result(Res) ->
emqx_bridge_v2_testlib:simplify_result(Res).
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
@ -246,3 +249,46 @@ t_receive_via_rule(Config) ->
end
),
ok.
t_connect_with_more_clients_than_the_broker_accepts(Config) ->
Name = ?config(connector_name, Config),
OrgConf = emqx_mgmt_listeners_conf:get_raw(tcp, default),
on_exit(fun() ->
emqx_mgmt_listeners_conf:update(tcp, default, OrgConf)
end),
NewConf = OrgConf#{<<"max_connections">> => 3},
{ok, _} = emqx_mgmt_listeners_conf:update(tcp, default, NewConf),
?check_trace(
#{timetrap => 10_000},
begin
?assertMatch(
{201, #{
<<"status">> := <<"disconnected">>,
<<"status_reason">> :=
<<"Your MQTT connection attempt was unsuccessful", _/binary>>
}},
simplify_result(
emqx_bridge_v2_testlib:create_connector_api(
Config,
#{<<"pool_size">> => 100}
)
)
),
?block_until(#{?snk_kind := emqx_bridge_mqtt_connector_tcp_closed}),
?assertMatch(
{200, #{
<<"status">> := <<"disconnected">>,
<<"status_reason">> :=
<<"Your MQTT connection attempt was unsuccessful", _/binary>>
}},
simplify_result(emqx_bridge_v2_testlib:get_connector_api(mqtt, Name))
),
ok
end,
fun(Trace) ->
?assertMatch([_ | _], ?of_kind(emqx_bridge_mqtt_connector_tcp_closed, Trace)),
ok
end
),
ok.

View File

@ -1,6 +1,6 @@
{application, emqx_bridge_pulsar, [
{description, "EMQX Pulsar Bridge"},
{vsn, "0.2.3"},
{vsn, "0.2.4"},
{registered, []},
{applications, [
kernel,

View File

@ -1,6 +1,6 @@
{application, emqx_bridge_rabbitmq, [
{description, "EMQX Enterprise RabbitMQ Bridge"},
{vsn, "0.2.2"},
{vsn, "0.2.3"},
{registered, []},
{mod, {emqx_bridge_rabbitmq_app, []}},
{applications, [

View File

@ -1,6 +1,6 @@
{application, emqx_bridge_s3, [
{description, "EMQX Enterprise S3 Bridge"},
{vsn, "0.1.5"},
{vsn, "0.1.6"},
{registered, []},
{applications, [
kernel,

View File

@ -1,6 +1,6 @@
{application, emqx_bridge_sqlserver, [
{description, "EMQX Enterprise SQL Server Bridge"},
{vsn, "0.2.3"},
{vsn, "0.2.4"},
{registered, []},
{applications, [kernel, stdlib, emqx_resource, odbc]},
{env, [

View File

@ -1,6 +1,6 @@
{application, emqx_bridge_syskeeper, [
{description, "EMQX Enterprise Data bridge for Syskeeper"},
{vsn, "0.1.4"},
{vsn, "0.1.5"},
{registered, []},
{applications, [
kernel,

View File

@ -1,6 +1,6 @@
{application, emqx_conf, [
{description, "EMQX configuration management"},
{vsn, "0.2.3"},
{vsn, "0.2.4"},
{registered, []},
{mod, {emqx_conf_app, []}},
{applications, [kernel, stdlib]},

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_connector, [
{description, "EMQX Data Integration Connectors"},
{vsn, "0.3.3"},
{vsn, "0.3.4"},
{registered, []},
{mod, {emqx_connector_app, []}},
{applications, [

View File

@ -125,6 +125,7 @@ create(Type, Name, Conf0, Opts) ->
TypeBin = bin(Type),
ResourceId = resource_id(Type, Name),
Conf = Conf0#{connector_type => TypeBin, connector_name => Name},
_ = emqx_alarm:ensure_deactivated(ResourceId),
{ok, _Data} = emqx_resource:create_local(
ResourceId,
?CONNECTOR_RESOURCE_GROUP,
@ -132,7 +133,6 @@ create(Type, Name, Conf0, Opts) ->
parse_confs(TypeBin, Name, Conf),
parse_opts(Conf, Opts)
),
_ = emqx_alarm:ensure_deactivated(ResourceId),
ok.
update(ConnectorId, {OldConf, Conf}) ->

View File

@ -2,7 +2,7 @@
{application, emqx_dashboard, [
{description, "EMQX Web Dashboard"},
% strict semver, bump manually!
{vsn, "5.1.3"},
{vsn, "5.1.4"},
{modules, []},
{registered, [emqx_dashboard_sup]},
{applications, [

View File

@ -1,6 +1,6 @@
{application, emqx_dashboard_sso, [
{description, "EMQX Dashboard Single Sign-On"},
{vsn, "0.1.5"},
{vsn, "0.1.6"},
{registered, [emqx_dashboard_sso_sup]},
{applications, [
kernel,

View File

@ -19,6 +19,7 @@
-compile(nowarn_export_all).
-include("../../emqx/include/emqx.hrl").
-include("../../emqx_durable_storage/include/emqx_ds.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("stdlib/include/assert.hrl").
-include("../../emqx/include/asserts.hrl").
@ -145,7 +146,7 @@ t_06_smoke_add_generation(Config) ->
?assertMatch(ok, emqx_ds:add_generation(DB)),
[
{Gen1, #{created_at := Created1, since := Since1, until := Until1}},
{Gen2, #{created_at := Created2, since := Since2, until := undefined}}
{_Gen2, #{created_at := Created2, since := Since2, until := undefined}}
] = maps:to_list(emqx_ds:list_generations_with_lifetimes(DB)),
%% Check units of the return values (+/- 10s from test begin time):
?give_or_take(BeginTime, 10_000, Created1),
@ -234,8 +235,8 @@ t_09_atomic_store_batch(Config) ->
DB = ?FUNCTION_NAME,
?check_trace(
begin
application:set_env(emqx_durable_storage, egress_batch_size, 1),
?assertMatch(ok, emqx_ds:open_db(DB, opts(Config))),
DBOpts = (opts(Config))#{atomic_batches => true},
?assertMatch(ok, emqx_ds:open_db(DB, DBOpts)),
Msgs = [
message(<<"1">>, <<"1">>, 0),
message(<<"2">>, <<"2">>, 1),
@ -243,13 +244,8 @@ t_09_atomic_store_batch(Config) ->
],
?assertEqual(
ok,
emqx_ds:store_batch(DB, Msgs, #{
atomic => true,
sync => true
})
),
{ok, Flush} = ?block_until(#{?snk_kind := emqx_ds_buffer_flush}),
?assertMatch(#{batch := [_, _, _]}, Flush)
emqx_ds:store_batch(DB, Msgs, #{sync => true})
)
end,
[]
),
@ -289,6 +285,124 @@ t_10_non_atomic_store_batch(Config) ->
),
ok.
t_11_batch_preconditions(Config) ->
DB = ?FUNCTION_NAME,
?check_trace(
begin
DBOpts = (opts(Config))#{
atomic_batches => true,
force_monotonic_timestamps => false
},
?assertMatch(ok, emqx_ds:open_db(DB, DBOpts)),
%% Conditional delete
TS = 42,
Batch1 = #dsbatch{
preconditions = [{if_exists, matcher(<<"c1">>, <<"t/a">>, '_', TS)}],
operations = [{delete, matcher(<<"c1">>, <<"t/a">>, '_', TS)}]
},
%% Conditional insert
M1 = message(<<"c1">>, <<"t/a">>, <<"M1">>, TS),
Batch2 = #dsbatch{
preconditions = [{unless_exists, matcher(<<"c1">>, <<"t/a">>, '_', TS)}],
operations = [M1]
},
%% No such message yet, precondition fails:
?assertEqual(
{error, unrecoverable, {precondition_failed, not_found}},
emqx_ds:store_batch(DB, Batch1)
),
%% No such message yet, `unless` precondition holds:
?assertEqual(
ok,
emqx_ds:store_batch(DB, Batch2)
),
%% Now there's such message, `unless` precondition now fails:
?assertEqual(
{error, unrecoverable, {precondition_failed, M1}},
emqx_ds:store_batch(DB, Batch2)
),
%% On the other hand, `if` precondition now holds:
?assertEqual(
ok,
emqx_ds:store_batch(DB, Batch1)
),
%% Wait at least until current epoch ends.
ct:sleep(1000),
%% There's no messages in the DB.
?assertEqual(
[],
emqx_ds_test_helpers:consume(DB, emqx_topic:words(<<"t/#">>))
)
end,
[]
).
t_12_batch_precondition_conflicts(Config) ->
DB = ?FUNCTION_NAME,
NBatches = 50,
NMessages = 10,
?check_trace(
begin
DBOpts = (opts(Config))#{
atomic_batches => true,
force_monotonic_timestamps => false
},
?assertMatch(ok, emqx_ds:open_db(DB, DBOpts)),
ConflictBatches = [
#dsbatch{
%% If the slot is free...
preconditions = [{if_exists, matcher(<<"c1">>, <<"t/slot">>, _Free = <<>>, 0)}],
%% Take it and write NMessages extra messages, so that batches take longer to
%% process and have higher chances to conflict with each other.
operations =
[
message(<<"c1">>, <<"t/slot">>, integer_to_binary(I), _TS = 0)
| [
message(<<"c1">>, {"t/owner/~p/~p", [I, J]}, <<>>, I * 100 + J)
|| J <- lists:seq(1, NMessages)
]
]
}
|| I <- lists:seq(1, NBatches)
],
%% Run those batches concurrently.
ok = emqx_ds:store_batch(DB, [message(<<"c1">>, <<"t/slot">>, <<>>, 0)]),
Results = emqx_utils:pmap(
fun(B) -> emqx_ds:store_batch(DB, B) end,
ConflictBatches,
infinity
),
%% Only one should have succeeded.
?assertEqual([ok], [Ok || Ok = ok <- Results]),
%% While other failed with an identical `precondition_failed`.
Failures = lists:usort([PreconditionFailed || {error, _, PreconditionFailed} <- Results]),
?assertMatch(
[{precondition_failed, #message{topic = <<"t/slot">>, payload = <<_/bytes>>}}],
Failures
),
%% Wait at least until current epoch ends.
ct:sleep(1000),
%% Storage should contain single batch's messages.
[{precondition_failed, #message{payload = IOwner}}] = Failures,
WinnerBatch = lists:nth(binary_to_integer(IOwner), ConflictBatches),
BatchMessages = lists:sort(WinnerBatch#dsbatch.operations),
DBMessages = emqx_ds_test_helpers:consume(DB, emqx_topic:words(<<"t/#">>)),
?assertEqual(
BatchMessages,
DBMessages
)
end,
[]
).
t_smoke_delete_next(Config) ->
DB = ?FUNCTION_NAME,
?check_trace(
@ -534,12 +648,25 @@ message(ClientId, Topic, Payload, PublishedAt) ->
message(Topic, Payload, PublishedAt) ->
#message{
topic = Topic,
payload = Payload,
topic = try_format(Topic),
payload = try_format(Payload),
timestamp = PublishedAt,
id = emqx_guid:gen()
}.
matcher(ClientID, Topic, Payload, Timestamp) ->
#message_matcher{
from = ClientID,
topic = try_format(Topic),
timestamp = Timestamp,
payload = Payload
}.
try_format({Fmt, Args}) ->
emqx_utils:format(Fmt, Args);
try_format(String) ->
String.
delete(DB, It, Selector, BatchSize) ->
delete(DB, It, Selector, BatchSize, 0).
@ -562,9 +689,18 @@ all() ->
groups() ->
TCs = emqx_common_test_helpers:all(?MODULE),
%% TODO: Remove once builtin-local supports preconditions + atomic batches.
BuiltinLocalTCs =
TCs --
[
t_09_atomic_store_batch,
t_11_batch_preconditions,
t_12_batch_precondition_conflicts
],
BuiltinRaftTCs = TCs,
[
{builtin_local, TCs},
{builtin_raft, TCs}
{builtin_local, BuiltinLocalTCs},
{builtin_raft, BuiltinRaftTCs}
].
init_per_group(builtin_local, Config) ->

View File

@ -43,7 +43,7 @@
%% `emqx_ds_buffer':
init_buffer/3,
flush_buffer/4,
shard_of_message/4
shard_of_operation/4
]).
%% Internal exports:
@ -55,6 +55,7 @@
-export_type([db_opts/0, shard/0, iterator/0, delete_iterator/0]).
-include_lib("emqx_utils/include/emqx_message.hrl").
-include_lib("emqx_durable_storage/include/emqx_ds.hrl").
%%================================================================================
%% Type declarations
@ -230,9 +231,9 @@ flush_buffer(DB, Shard, Messages, S0 = #bs{options = Options}) ->
make_batch(_ForceMonotonic = true, Latest, Messages) ->
assign_monotonic_timestamps(Latest, Messages, []);
make_batch(false, Latest, Messages) ->
assign_message_timestamps(Latest, Messages, []).
assign_operation_timestamps(Latest, Messages, []).
assign_monotonic_timestamps(Latest0, [Message | Rest], Acc0) ->
assign_monotonic_timestamps(Latest0, [Message = #message{} | Rest], Acc0) ->
case emqx_message:timestamp(Message, microsecond) of
TimestampUs when TimestampUs > Latest0 ->
Latest = TimestampUs;
@ -241,28 +242,43 @@ assign_monotonic_timestamps(Latest0, [Message | Rest], Acc0) ->
end,
Acc = [assign_timestamp(Latest, Message) | Acc0],
assign_monotonic_timestamps(Latest, Rest, Acc);
assign_monotonic_timestamps(Latest, [Operation | Rest], Acc0) ->
Acc = [Operation | Acc0],
assign_monotonic_timestamps(Latest, Rest, Acc);
assign_monotonic_timestamps(Latest, [], Acc) ->
{Latest, lists:reverse(Acc)}.
assign_message_timestamps(Latest0, [Message | Rest], Acc0) ->
TimestampUs = emqx_message:timestamp(Message, microsecond),
assign_operation_timestamps(Latest0, [Message = #message{} | Rest], Acc0) ->
TimestampUs = emqx_message:timestamp(Message),
Latest = max(TimestampUs, Latest0),
Acc = [assign_timestamp(TimestampUs, Message) | Acc0],
assign_message_timestamps(Latest, Rest, Acc);
assign_message_timestamps(Latest, [], Acc) ->
assign_operation_timestamps(Latest, Rest, Acc);
assign_operation_timestamps(Latest, [Operation | Rest], Acc0) ->
Acc = [Operation | Acc0],
assign_operation_timestamps(Latest, Rest, Acc);
assign_operation_timestamps(Latest, [], Acc) ->
{Latest, lists:reverse(Acc)}.
assign_timestamp(TimestampUs, Message) ->
{TimestampUs, Message}.
-spec shard_of_message(emqx_ds:db(), emqx_types:message(), clientid | topic, _Options) -> shard().
shard_of_message(DB, #message{from = From, topic = Topic}, SerializeBy, _Options) ->
-spec shard_of_operation(emqx_ds:db(), emqx_ds:operation(), clientid | topic, _Options) -> shard().
shard_of_operation(DB, #message{from = From, topic = Topic}, SerializeBy, _Options) ->
case SerializeBy of
clientid -> Key = From;
topic -> Key = Topic
end,
shard_of_key(DB, Key);
shard_of_operation(DB, {_, #message_matcher{from = From, topic = Topic}}, SerializeBy, _Options) ->
case SerializeBy of
clientid -> Key = From;
topic -> Key = Topic
end,
shard_of_key(DB, Key).
shard_of_key(DB, Key) ->
N = emqx_ds_builtin_local_meta:n_shards(DB),
Hash =
case SerializeBy of
clientid -> erlang:phash2(From, N);
topic -> erlang:phash2(Topic, N)
end,
Hash = erlang:phash2(Key, N),
integer_to_binary(Hash).
-spec get_streams(emqx_ds:db(), emqx_ds:topic_filter(), emqx_ds:time()) ->
@ -288,7 +304,7 @@ get_streams(DB, TopicFilter, StartTime) ->
-spec make_iterator(
emqx_ds:db(), emqx_ds:ds_specific_stream(), emqx_ds:topic_filter(), emqx_ds:time()
) ->
emqx_ds:make_iterator_result(emqx_ds:ds_specific_iterator()).
emqx_ds:make_iterator_result(iterator()).
make_iterator(DB, ?stream(Shard, InnerStream), TopicFilter, StartTime) ->
ShardId = {DB, Shard},
case
@ -302,7 +318,7 @@ make_iterator(DB, ?stream(Shard, InnerStream), TopicFilter, StartTime) ->
Error
end.
-spec update_iterator(emqx_ds:db(), emqx_ds:ds_specific_iterator(), emqx_ds:message_key()) ->
-spec update_iterator(emqx_ds:db(), iterator(), emqx_ds:message_key()) ->
emqx_ds:make_iterator_result(iterator()).
update_iterator(DB, Iter0 = #{?tag := ?IT, ?shard := Shard, ?enc := StorageIter0}, Key) ->
case emqx_ds_storage_layer:update_iterator({DB, Shard}, StorageIter0, Key) of
@ -380,7 +396,7 @@ do_next(DB, Iter0 = #{?tag := ?IT, ?shard := Shard, ?enc := StorageIter0}, N) ->
end.
-spec do_delete_next(emqx_ds:db(), delete_iterator(), emqx_ds:delete_selector(), pos_integer()) ->
emqx_ds:delete_next_result(emqx_ds:delete_iterator()).
emqx_ds:delete_next_result(delete_iterator()).
do_delete_next(
DB, Iter = #{?tag := ?DELETE_IT, ?shard := Shard, ?enc := StorageIter0}, Selector, N
) ->

View File

@ -29,7 +29,7 @@
current_timestamp/2,
shard_of_message/4,
shard_of_operation/4,
flush_buffer/4,
init_buffer/3
]).
@ -83,6 +83,7 @@
ra_state/0
]).
-include_lib("emqx_durable_storage/include/emqx_ds.hrl").
-include_lib("emqx_utils/include/emqx_message.hrl").
-include_lib("snabbkaffe/include/trace.hrl").
-include("emqx_ds_replication_layer.hrl").
@ -100,7 +101,10 @@
n_shards => pos_integer(),
n_sites => pos_integer(),
replication_factor => pos_integer(),
replication_options => _TODO :: #{}
replication_options => _TODO :: #{},
%% Inherited from `emqx_ds:generic_db_opts()`.
force_monotonic_timestamps => boolean(),
atomic_batches => boolean()
}.
%% This enapsulates the stream entity from the replication level.
@ -135,11 +139,12 @@
?enc := emqx_ds_storage_layer:delete_iterator()
}.
%% TODO: this type is obsolete and is kept only for compatibility with
%% BPAPIs. Remove it when emqx_ds_proto_v4 is gone (EMQX 5.6)
%% Write batch.
%% Instances of this type currently form the majority of the Raft log.
-type batch() :: #{
?tag := ?BATCH,
?batch_messages := [emqx_types:message()]
?batch_operations := [emqx_ds:operation()],
?batch_preconditions => [emqx_ds:precondition()]
}.
-type generation_rank() :: {shard_id(), term()}.
@ -240,16 +245,45 @@ drop_db(DB) ->
_ = emqx_ds_proto_v4:drop_db(list_nodes(), DB),
emqx_ds_replication_layer_meta:drop_db(DB).
-spec store_batch(emqx_ds:db(), [emqx_types:message(), ...], emqx_ds:message_store_opts()) ->
-spec store_batch(emqx_ds:db(), emqx_ds:batch(), emqx_ds:message_store_opts()) ->
emqx_ds:store_batch_result().
store_batch(DB, Messages, Opts) ->
store_batch(DB, Batch = #dsbatch{preconditions = [_ | _]}, Opts) ->
%% NOTE: Atomic batch is implied, will not check with DB config.
store_batch_atomic(DB, Batch, Opts);
store_batch(DB, Batch, Opts) ->
case emqx_ds_replication_layer_meta:db_config(DB) of
#{atomic_batches := true} ->
store_batch_atomic(DB, Batch, Opts);
#{} ->
store_batch_buffered(DB, Batch, Opts)
end.
store_batch_buffered(DB, #dsbatch{operations = Operations}, Opts) ->
store_batch_buffered(DB, Operations, Opts);
store_batch_buffered(DB, Batch, Opts) ->
try
emqx_ds_buffer:store_batch(DB, Messages, Opts)
emqx_ds_buffer:store_batch(DB, Batch, Opts)
catch
error:{Reason, _Call} when Reason == timeout; Reason == noproc ->
{error, recoverable, Reason}
end.
store_batch_atomic(DB, Batch, _Opts) ->
Shards = shards_of_batch(DB, Batch),
case Shards of
[Shard] ->
case ra_store_batch(DB, Shard, Batch) of
{timeout, ServerId} ->
{error, recoverable, {timeout, ServerId}};
Result ->
Result
end;
[] ->
ok;
[_ | _] ->
{error, unrecoverable, atomic_batch_spans_multiple_shards}
end.
-spec get_streams(emqx_ds:db(), emqx_ds:topic_filter(), emqx_ds:time()) ->
[{emqx_ds:stream_rank(), stream()}].
get_streams(DB, TopicFilter, StartTime) ->
@ -392,17 +426,49 @@ flush_buffer(DB, Shard, Messages, State) ->
end,
{State, Result}.
-spec shard_of_message(emqx_ds:db(), emqx_types:message(), clientid | topic, _Options) ->
-spec shard_of_operation(
emqx_ds:db(),
emqx_ds:operation() | emqx_ds:precondition(),
clientid | topic,
_Options
) ->
emqx_ds_replication_layer:shard_id().
shard_of_message(DB, #message{from = From, topic = Topic}, SerializeBy, _Options) ->
shard_of_operation(DB, #message{from = From, topic = Topic}, SerializeBy, _Options) ->
case SerializeBy of
clientid -> Key = From;
topic -> Key = Topic
end,
shard_of_key(DB, Key);
shard_of_operation(DB, {_OpName, Matcher}, SerializeBy, _Options) ->
#message_matcher{from = From, topic = Topic} = Matcher,
case SerializeBy of
clientid -> Key = From;
topic -> Key = Topic
end,
shard_of_key(DB, Key).
shard_of_key(DB, Key) ->
N = emqx_ds_replication_shard_allocator:n_shards(DB),
Hash =
case SerializeBy of
clientid -> erlang:phash2(From, N);
topic -> erlang:phash2(Topic, N)
end,
Hash = erlang:phash2(Key, N),
integer_to_binary(Hash).
shards_of_batch(DB, #dsbatch{operations = Operations, preconditions = Preconditions}) ->
shards_of_batch(DB, Preconditions, shards_of_batch(DB, Operations, []));
shards_of_batch(DB, Operations) ->
shards_of_batch(DB, Operations, []).
shards_of_batch(DB, [Operation | Rest], Acc) ->
case shard_of_operation(DB, Operation, clientid, #{}) of
Shard when Shard =:= hd(Acc) ->
shards_of_batch(DB, Rest, Acc);
Shard when Acc =:= [] ->
shards_of_batch(DB, Rest, [Shard]);
ShardAnother ->
[ShardAnother | Acc]
end;
shards_of_batch(_DB, [], Acc) ->
Acc.
%%================================================================================
%% Internal exports (RPC targets)
%%================================================================================
@ -612,7 +678,7 @@ list_nodes() ->
-define(SHARD_RPC(DB, SHARD, NODE, BODY),
case
emqx_ds_replication_layer_shard:servers(
DB, SHARD, application:get_env(emqx_durable_storage, reads, leader_preferred)
DB, SHARD, application:get_env(emqx_ds_builtin_raft, reads, leader_preferred)
)
of
[{_, NODE} | _] ->
@ -624,13 +690,22 @@ list_nodes() ->
end
).
-spec ra_store_batch(emqx_ds:db(), emqx_ds_replication_layer:shard_id(), [emqx_types:message()]) ->
ok | {timeout, _} | {error, recoverable | unrecoverable, _Err}.
ra_store_batch(DB, Shard, Messages) ->
Command = #{
?tag => ?BATCH,
?batch_messages => Messages
},
-spec ra_store_batch(emqx_ds:db(), emqx_ds_replication_layer:shard_id(), emqx_ds:batch()) ->
ok | {timeout, _} | emqx_ds:error(_).
ra_store_batch(DB, Shard, Batch) ->
case Batch of
#dsbatch{operations = Operations, preconditions = Preconditions} ->
Command = #{
?tag => ?BATCH,
?batch_operations => Operations,
?batch_preconditions => Preconditions
};
Operations ->
Command = #{
?tag => ?BATCH,
?batch_operations => Operations
}
end,
Servers = emqx_ds_replication_layer_shard:servers(DB, Shard, leader_preferred),
case emqx_ds_replication_layer_shard:process_command(Servers, Command, ?RA_TIMEOUT) of
{ok, Result, _Leader} ->
@ -767,6 +842,7 @@ ra_drop_shard(DB, Shard) ->
-define(pd_ra_idx_need_release, '$emqx_ds_raft_idx_need_release').
-define(pd_ra_bytes_need_release, '$emqx_ds_raft_bytes_need_release').
-define(pd_ra_force_monotonic, '$emqx_ds_raft_force_monotonic').
-spec init(_Args :: map()) -> ra_state().
init(#{db := DB, shard := Shard}) ->
@ -776,18 +852,30 @@ init(#{db := DB, shard := Shard}) ->
{ra_state(), _Reply, _Effects}.
apply(
RaftMeta,
#{
Command = #{
?tag := ?BATCH,
?batch_messages := MessagesIn
?batch_operations := OperationsIn
},
#{db_shard := DBShard = {DB, Shard}, latest := Latest0} = State0
) ->
?tp(ds_ra_apply_batch, #{db => DB, shard => Shard, batch => MessagesIn, latest => Latest0}),
{Stats, Latest, Messages} = assign_timestamps(Latest0, MessagesIn),
Result = emqx_ds_storage_layer:store_batch(DBShard, Messages, #{durable => false}),
State = State0#{latest := Latest},
set_ts(DBShard, Latest),
Effects = try_release_log(Stats, RaftMeta, State),
?tp(ds_ra_apply_batch, #{db => DB, shard => Shard, batch => OperationsIn, latest => Latest0}),
Preconditions = maps:get(?batch_preconditions, Command, []),
{Stats, Latest, Operations} = assign_timestamps(DB, Latest0, OperationsIn),
%% FIXME
case emqx_ds_precondition:verify(emqx_ds_storage_layer, DBShard, Preconditions) of
ok ->
Result = emqx_ds_storage_layer:store_batch(DBShard, Operations, #{durable => false}),
State = State0#{latest := Latest},
set_ts(DBShard, Latest),
Effects = try_release_log(Stats, RaftMeta, State);
PreconditionFailed = {precondition_failed, _} ->
Result = {error, unrecoverable, PreconditionFailed},
State = State0,
Effects = [];
Result ->
State = State0,
Effects = []
end,
Effects =/= [] andalso ?tp(ds_ra_effects, #{effects => Effects, meta => RaftMeta}),
{State, Result, Effects};
apply(
@ -862,6 +950,21 @@ apply(
Effects = handle_custom_event(DBShard, Latest, CustomEvent),
{State#{latest => Latest}, ok, Effects}.
assign_timestamps(DB, Latest, Messages) ->
ForceMonotonic = force_monotonic_timestamps(DB),
assign_timestamps(ForceMonotonic, Latest, Messages, [], 0, 0).
force_monotonic_timestamps(DB) ->
case erlang:get(?pd_ra_force_monotonic) of
undefined ->
DBConfig = emqx_ds_replication_layer_meta:db_config(DB),
Flag = maps:get(force_monotonic_timestamps, DBConfig),
erlang:put(?pd_ra_force_monotonic, Flag);
Flag ->
ok
end,
Flag.
try_release_log({_N, BatchSize}, RaftMeta = #{index := CurrentIdx}, State) ->
%% NOTE
%% Because cursor release means storage flush (see
@ -924,10 +1027,7 @@ tick(TimeMs, #{db_shard := DBShard = {DB, Shard}, latest := Latest}) ->
?tp(emqx_ds_replication_layer_tick, #{db => DB, shard => Shard, timestamp => Timestamp}),
handle_custom_event(DBShard, Timestamp, tick).
assign_timestamps(Latest, Messages) ->
assign_timestamps(Latest, Messages, [], 0, 0).
assign_timestamps(Latest0, [Message0 | Rest], Acc, N, Sz) ->
assign_timestamps(true, Latest0, [Message0 = #message{} | Rest], Acc, N, Sz) ->
case emqx_message:timestamp(Message0, microsecond) of
TimestampUs when TimestampUs > Latest0 ->
Latest = TimestampUs,
@ -936,8 +1036,17 @@ assign_timestamps(Latest0, [Message0 | Rest], Acc, N, Sz) ->
Latest = Latest0 + 1,
Message = assign_timestamp(Latest, Message0)
end,
assign_timestamps(Latest, Rest, [Message | Acc], N + 1, Sz + approx_message_size(Message0));
assign_timestamps(Latest, [], Acc, N, Size) ->
MSize = approx_message_size(Message0),
assign_timestamps(true, Latest, Rest, [Message | Acc], N + 1, Sz + MSize);
assign_timestamps(false, Latest0, [Message0 = #message{} | Rest], Acc, N, Sz) ->
TimestampUs = emqx_message:timestamp(Message0),
Latest = max(Latest0, TimestampUs),
Message = assign_timestamp(TimestampUs, Message0),
MSize = approx_message_size(Message0),
assign_timestamps(false, Latest, Rest, [Message | Acc], N + 1, Sz + MSize);
assign_timestamps(ForceMonotonic, Latest, [Operation | Rest], Acc, N, Sz) ->
assign_timestamps(ForceMonotonic, Latest, Rest, [Operation | Acc], N + 1, Sz);
assign_timestamps(_ForceMonotonic, Latest, [], Acc, N, Size) ->
{{N, Size}, Latest, lists:reverse(Acc)}.
assign_timestamp(TimestampUs, Message) ->

View File

@ -19,7 +19,8 @@
-define(enc, 3).
%% ?BATCH
-define(batch_messages, 2).
-define(batch_operations, 2).
-define(batch_preconditions, 4).
-define(timestamp, 3).
%% add_generation / update_config

View File

@ -100,7 +100,7 @@ open(TopicSubscriptions, Opts) ->
State0 = init_state(Opts),
State1 = lists:foldl(
fun({ShareTopicFilter, #{}}, State) ->
?tp(warning, ds_agent_open_subscription, #{
?tp(debug, ds_agent_open_subscription, #{
topic_filter => ShareTopicFilter
}),
add_shared_subscription(State, ShareTopicFilter)
@ -120,7 +120,7 @@ can_subscribe(_State, _ShareTopicFilter, _SubOpts) ->
-spec on_subscribe(t(), share_topic_filter(), emqx_types:subopts()) -> t().
on_subscribe(State0, ShareTopicFilter, _SubOpts) ->
?tp(warning, ds_agent_on_subscribe, #{
?tp(debug, ds_agent_on_subscribe, #{
share_topic_filter => ShareTopicFilter
}),
add_shared_subscription(State0, ShareTopicFilter).
@ -163,7 +163,7 @@ on_disconnect(#{groups := Groups0} = State, StreamProgresses) ->
-spec on_info(t(), term()) -> t().
on_info(State, ?leader_lease_streams_match(GroupId, Leader, StreamProgresses, Version)) ->
?SLOG(info, #{
?SLOG(debug, #{
msg => leader_lease_streams,
group_id => GroupId,
streams => StreamProgresses,
@ -176,7 +176,7 @@ on_info(State, ?leader_lease_streams_match(GroupId, Leader, StreamProgresses, Ve
)
end);
on_info(State, ?leader_renew_stream_lease_match(GroupId, Version)) ->
?SLOG(info, #{
?SLOG(debug, #{
msg => leader_renew_stream_lease,
group_id => GroupId,
version => Version
@ -185,7 +185,7 @@ on_info(State, ?leader_renew_stream_lease_match(GroupId, Version)) ->
emqx_ds_shared_sub_group_sm:handle_leader_renew_stream_lease(GSM, Version)
end);
on_info(State, ?leader_renew_stream_lease_match(GroupId, VersionOld, VersionNew)) ->
?SLOG(info, #{
?SLOG(debug, #{
msg => leader_renew_stream_lease,
group_id => GroupId,
version_old => VersionOld,
@ -195,7 +195,7 @@ on_info(State, ?leader_renew_stream_lease_match(GroupId, VersionOld, VersionNew)
emqx_ds_shared_sub_group_sm:handle_leader_renew_stream_lease(GSM, VersionOld, VersionNew)
end);
on_info(State, ?leader_update_streams_match(GroupId, VersionOld, VersionNew, StreamsNew)) ->
?SLOG(info, #{
?SLOG(debug, #{
msg => leader_update_streams,
group_id => GroupId,
version_old => VersionOld,
@ -208,7 +208,7 @@ on_info(State, ?leader_update_streams_match(GroupId, VersionOld, VersionNew, Str
)
end);
on_info(State, ?leader_invalidate_match(GroupId)) ->
?SLOG(info, #{
?SLOG(debug, #{
msg => leader_invalidate,
group_id => GroupId
}),
@ -245,7 +245,7 @@ delete_shared_subscription(State, ShareTopicFilter, GroupProgress) ->
add_shared_subscription(
#{session_id := SessionId, groups := Groups0} = State0, ShareTopicFilter
) ->
?SLOG(info, #{
?SLOG(debug, #{
msg => agent_add_shared_subscription,
share_topic_filter => ShareTopicFilter
}),

View File

@ -120,7 +120,7 @@ new(#{
send_after := SendAfter
}) ->
?SLOG(
info,
debug,
#{
msg => group_sm_new,
agent => Agent,
@ -133,7 +133,7 @@ new(#{
agent => Agent,
send_after => SendAfter
},
?tp(warning, group_sm_new, #{
?tp(debug, group_sm_new, #{
agent => Agent,
share_topic_filter => ShareTopicFilter
}),
@ -176,7 +176,7 @@ handle_disconnect(
%% Connecting state
handle_connecting(#{agent := Agent, share_topic_filter := ShareTopicFilter} = GSM) ->
?tp(warning, group_sm_enter_connecting, #{
?tp(debug, group_sm_enter_connecting, #{
agent => Agent,
share_topic_filter => ShareTopicFilter
}),
@ -264,11 +264,13 @@ handle_leader_update_streams(
VersionNew,
StreamProgresses
) ->
?tp(warning, shared_sub_group_sm_leader_update_streams, #{
?tp(debug, shared_sub_group_sm_leader_update_streams, #{
id => Id,
version_old => VersionOld,
version_new => VersionNew,
stream_progresses => emqx_ds_shared_sub_proto:format_stream_progresses(StreamProgresses)
stream_progresses => emqx_persistent_session_ds_shared_subs:format_stream_progresses(
StreamProgresses
)
}),
{AddEvents, Streams1} = lists:foldl(
fun(#{stream := Stream, progress := Progress}, {AddEventAcc, StreamsAcc}) ->
@ -303,9 +305,11 @@ handle_leader_update_streams(
maps:keys(Streams1)
),
StreamLeaseEvents = AddEvents ++ RevokeEvents,
?tp(warning, shared_sub_group_sm_leader_update_streams, #{
?tp(debug, shared_sub_group_sm_leader_update_streams, #{
id => Id,
stream_lease_events => emqx_ds_shared_sub_proto:format_lease_events(StreamLeaseEvents)
stream_lease_events => emqx_persistent_session_ds_shared_subs:format_lease_events(
StreamLeaseEvents
)
}),
transition(
GSM,
@ -431,24 +435,11 @@ handle_leader_invalidate(#{agent := Agent, share_topic_filter := ShareTopicFilte
%% Internal API
%%-----------------------------------------------------------------------
handle_state_timeout(
#{state := ?connecting, share_topic_filter := ShareTopicFilter} = GSM,
find_leader_timeout,
_Message
) ->
?tp(debug, find_leader_timeout, #{share_topic_filter => ShareTopicFilter}),
handle_state_timeout(#{state := ?connecting} = GSM, find_leader_timeout, _Message) ->
handle_find_leader_timeout(GSM);
handle_state_timeout(
#{state := ?replaying} = GSM,
renew_lease_timeout,
_Message
) ->
handle_state_timeout(#{state := ?replaying} = GSM, renew_lease_timeout, _Message) ->
handle_renew_lease_timeout(GSM);
handle_state_timeout(
GSM,
update_stream_state_timeout,
_Message
) ->
handle_state_timeout(GSM, update_stream_state_timeout, _Message) ->
?tp(debug, update_stream_state_timeout, #{}),
handle_stream_progress(GSM, []).

View File

@ -164,7 +164,7 @@ handle_event({call, From}, #register{register_fun = Fun}, ?leader_waiting_regist
%%--------------------------------------------------------------------
%% repalying state
handle_event(enter, _OldState, ?leader_active, #{topic := Topic} = _Data) ->
?tp(warning, shared_sub_leader_enter_actve, #{topic => Topic}),
?tp(debug, shared_sub_leader_enter_actve, #{topic => Topic}),
{keep_state_and_data, [
{{timeout, #renew_streams{}}, 0, #renew_streams{}},
{{timeout, #renew_leases{}}, ?dq_config(leader_renew_lease_interval_ms), #renew_leases{}},
@ -174,7 +174,7 @@ handle_event(enter, _OldState, ?leader_active, #{topic := Topic} = _Data) ->
%% timers
%% renew_streams timer
handle_event({timeout, #renew_streams{}}, #renew_streams{}, ?leader_active, Data0) ->
% ?tp(warning, shared_sub_leader_timeout, #{timeout => renew_streams}),
?tp(debug, shared_sub_leader_timeout, #{timeout => renew_streams}),
Data1 = renew_streams(Data0),
{keep_state, Data1,
{
@ -184,7 +184,7 @@ handle_event({timeout, #renew_streams{}}, #renew_streams{}, ?leader_active, Data
}};
%% renew_leases timer
handle_event({timeout, #renew_leases{}}, #renew_leases{}, ?leader_active, Data0) ->
% ?tp(warning, shared_sub_leader_timeout, #{timeout => renew_leases}),
?tp(debug, shared_sub_leader_timeout, #{timeout => renew_leases}),
Data1 = renew_leases(Data0),
{keep_state, Data1,
{{timeout, #renew_leases{}}, ?dq_config(leader_renew_lease_interval_ms), #renew_leases{}}};
@ -279,7 +279,7 @@ renew_streams(
Data2 = Data1#{stream_states => NewStreamStates, rank_progress => RankProgress1},
Data3 = revoke_streams(Data2),
Data4 = assign_streams(Data3),
?SLOG(info, #{
?SLOG(debug, #{
msg => leader_renew_streams,
topic_filter => TopicFilter,
new_streams => length(NewStreamsWRanks)
@ -368,7 +368,7 @@ revoke_excess_streams_from_agent(Data0, Agent, DesiredCount) ->
false ->
AgentState0;
true ->
?tp(warning, shared_sub_leader_revoke_streams, #{
?tp(debug, shared_sub_leader_revoke_streams, #{
agent => Agent,
agent_stream_count => length(Streams0),
revoke_count => RevokeCount,
@ -421,7 +421,7 @@ assign_lacking_streams(Data0, Agent, DesiredCount) ->
false ->
Data0;
true ->
?tp(warning, shared_sub_leader_assign_streams, #{
?tp(debug, shared_sub_leader_assign_streams, #{
agent => Agent,
agent_stream_count => length(Streams0),
assign_count => AssignCount,
@ -449,7 +449,7 @@ select_streams_for_assign(Data0, _Agent, AssignCount) ->
%% renew_leases - send lease confirmations to agents
renew_leases(#{agents := AgentStates} = Data) ->
?tp(warning, shared_sub_leader_renew_leases, #{agents => maps:keys(AgentStates)}),
?tp(debug, shared_sub_leader_renew_leases, #{agents => maps:keys(AgentStates)}),
ok = lists:foreach(
fun({Agent, AgentState}) ->
renew_lease(Data, Agent, AgentState)
@ -492,7 +492,7 @@ drop_timeout_agents(#{agents := Agents} = Data) ->
(is_integer(NoReplayingDeadline) andalso NoReplayingDeadline < Now)
of
true ->
?SLOG(info, #{
?SLOG(debug, #{
msg => leader_agent_timeout,
now => Now,
update_deadline => UpdateDeadline,
@ -516,14 +516,14 @@ connect_agent(
Agent,
AgentMetadata
) ->
?SLOG(info, #{
?SLOG(debug, #{
msg => leader_agent_connected,
agent => Agent,
group_id => GroupId
}),
case Agents of
#{Agent := AgentState} ->
?tp(warning, shared_sub_leader_agent_already_connected, #{
?tp(debug, shared_sub_leader_agent_already_connected, #{
agent => Agent
}),
reconnect_agent(Data, Agent, AgentMetadata, AgentState);
@ -546,7 +546,7 @@ reconnect_agent(
AgentMetadata,
#{streams := OldStreams, revoked_streams := OldRevokedStreams} = _OldAgentState
) ->
?tp(warning, shared_sub_leader_agent_reconnect, #{
?tp(debug, shared_sub_leader_agent_reconnect, #{
agent => Agent,
agent_metadata => AgentMetadata,
inherited_streams => OldStreams
@ -767,7 +767,7 @@ update_agent_stream_states(Data0, Agent, AgentStreamProgresses, VersionOld, Vers
disconnect_agent(Data0, Agent, AgentStreamProgresses, Version) ->
case get_agent_state(Data0, Agent) of
#{version := Version} ->
?tp(warning, shared_sub_leader_disconnect_agent, #{
?tp(debug, shared_sub_leader_disconnect_agent, #{
agent => Agent,
version => Version
}),
@ -794,7 +794,7 @@ agent_transition_to_waiting_updating(
Streams,
RevokedStreams
) ->
?tp(warning, shared_sub_leader_agent_state_transition, #{
?tp(debug, shared_sub_leader_agent_state_transition, #{
agent => Agent,
old_state => OldState,
new_state => ?waiting_updating
@ -818,7 +818,7 @@ agent_transition_to_waiting_updating(
agent_transition_to_waiting_replaying(
#{group_id := GroupId} = _Data, Agent, #{state := OldState, version := Version} = AgentState0
) ->
?tp(warning, shared_sub_leader_agent_state_transition, #{
?tp(debug, shared_sub_leader_agent_state_transition, #{
agent => Agent,
old_state => OldState,
new_state => ?waiting_replaying
@ -833,7 +833,7 @@ agent_transition_to_waiting_replaying(
agent_transition_to_initial_waiting_replaying(
#{group_id := GroupId} = Data, Agent, AgentMetadata, InitialStreams
) ->
?tp(warning, shared_sub_leader_agent_state_transition, #{
?tp(debug, shared_sub_leader_agent_state_transition, #{
agent => Agent,
old_state => none,
new_state => ?waiting_replaying
@ -856,7 +856,7 @@ agent_transition_to_initial_waiting_replaying(
renew_no_replaying_deadline(AgentState).
agent_transition_to_replaying(Agent, #{state := ?waiting_replaying} = AgentState) ->
?tp(warning, shared_sub_leader_agent_state_transition, #{
?tp(debug, shared_sub_leader_agent_state_transition, #{
agent => Agent,
old_state => ?waiting_replaying,
new_state => ?replaying
@ -868,7 +868,7 @@ agent_transition_to_replaying(Agent, #{state := ?waiting_replaying} = AgentState
}.
agent_transition_to_updating(Agent, #{state := ?waiting_updating} = AgentState0) ->
?tp(warning, shared_sub_leader_agent_state_transition, #{
?tp(debug, shared_sub_leader_agent_state_transition, #{
agent => Agent,
old_state => ?waiting_updating,
new_state => ?updating
@ -995,7 +995,7 @@ drop_agent(#{agents := Agents} = Data0, Agent) ->
#{streams := Streams, revoked_streams := RevokedStreams} = AgentState,
AllStreams = Streams ++ RevokedStreams,
Data1 = unassign_streams(Data0, AllStreams),
?tp(warning, shared_sub_leader_drop_agent, #{agent => Agent}),
?tp(debug, shared_sub_leader_drop_agent, #{agent => Agent}),
Data1#{agents => maps:remove(Agent, Agents)}.
invalidate_agent(#{group_id := GroupId}, Agent) ->

View File

@ -55,7 +55,7 @@ set_replayed({{RankX, RankY}, Stream}, State) ->
State#{RankX => #{min_y => MinY, ys => Ys2}};
_ ->
?SLOG(
warning,
debug,
#{
msg => leader_rank_progress_double_or_invalid_update,
rank_x => RankX,

View File

@ -22,12 +22,6 @@
]).
-export([
format_stream_progresses/1,
format_stream_progress/1,
format_stream_key/1,
format_stream_keys/1,
format_lease_event/1,
format_lease_events/1,
agent/2
]).
@ -57,6 +51,20 @@
agent_metadata/0
]).
-define(log_agent_msg(ToLeader, Msg),
?tp(debug, shared_sub_proto_msg, #{
to_leader => ToLeader,
msg => emqx_ds_shared_sub_proto_format:format_agent_msg(Msg)
})
).
-define(log_leader_msg(ToAgent, Msg),
?tp(debug, shared_sub_proto_msg, #{
to_agent => ToAgent,
msg => emqx_ds_shared_sub_proto_format:format_leader_msg(Msg)
})
).
%%--------------------------------------------------------------------
%% API
%%--------------------------------------------------------------------
@ -67,15 +75,7 @@
agent_connect_leader(ToLeader, FromAgent, AgentMetadata, ShareTopicFilter) when
?is_local_leader(ToLeader)
->
?tp(warning, shared_sub_proto_msg, #{
type => agent_connect_leader,
to_leader => ToLeader,
from_agent => FromAgent,
agent_metadata => AgentMetadata,
share_topic_filter => ShareTopicFilter
}),
_ = erlang:send(ToLeader, ?agent_connect_leader(FromAgent, AgentMetadata, ShareTopicFilter)),
ok;
send_agent_msg(ToLeader, ?agent_connect_leader(FromAgent, AgentMetadata, ShareTopicFilter));
agent_connect_leader(ToLeader, FromAgent, AgentMetadata, ShareTopicFilter) ->
emqx_ds_shared_sub_proto_v1:agent_connect_leader(
?leader_node(ToLeader), ToLeader, FromAgent, AgentMetadata, ShareTopicFilter
@ -85,15 +85,7 @@ agent_connect_leader(ToLeader, FromAgent, AgentMetadata, ShareTopicFilter) ->
agent_update_stream_states(ToLeader, FromAgent, StreamProgresses, Version) when
?is_local_leader(ToLeader)
->
?tp(warning, shared_sub_proto_msg, #{
type => agent_update_stream_states,
to_leader => ToLeader,
from_agent => FromAgent,
stream_progresses => format_stream_progresses(StreamProgresses),
version => Version
}),
_ = erlang:send(ToLeader, ?agent_update_stream_states(FromAgent, StreamProgresses, Version)),
ok;
send_agent_msg(ToLeader, ?agent_update_stream_states(FromAgent, StreamProgresses, Version));
agent_update_stream_states(ToLeader, FromAgent, StreamProgresses, Version) ->
emqx_ds_shared_sub_proto_v1:agent_update_stream_states(
?leader_node(ToLeader), ToLeader, FromAgent, StreamProgresses, Version
@ -105,18 +97,9 @@ agent_update_stream_states(ToLeader, FromAgent, StreamProgresses, Version) ->
agent_update_stream_states(ToLeader, FromAgent, StreamProgresses, VersionOld, VersionNew) when
?is_local_leader(ToLeader)
->
?tp(warning, shared_sub_proto_msg, #{
type => agent_update_stream_states,
to_leader => ToLeader,
from_agent => FromAgent,
stream_progresses => format_stream_progresses(StreamProgresses),
version_old => VersionOld,
version_new => VersionNew
}),
_ = erlang:send(
send_agent_msg(
ToLeader, ?agent_update_stream_states(FromAgent, StreamProgresses, VersionOld, VersionNew)
),
ok;
);
agent_update_stream_states(ToLeader, FromAgent, StreamProgresses, VersionOld, VersionNew) ->
emqx_ds_shared_sub_proto_v1:agent_update_stream_states(
?leader_node(ToLeader), ToLeader, FromAgent, StreamProgresses, VersionOld, VersionNew
@ -125,15 +108,7 @@ agent_update_stream_states(ToLeader, FromAgent, StreamProgresses, VersionOld, Ve
agent_disconnect(ToLeader, FromAgent, StreamProgresses, Version) when
?is_local_leader(ToLeader)
->
?tp(warning, shared_sub_proto_msg, #{
type => agent_disconnect,
to_leader => ToLeader,
from_agent => FromAgent,
stream_progresses => format_stream_progresses(StreamProgresses),
version => Version
}),
_ = erlang:send(ToLeader, ?agent_disconnect(FromAgent, StreamProgresses, Version)),
ok;
send_agent_msg(ToLeader, ?agent_disconnect(FromAgent, StreamProgresses, Version));
agent_disconnect(ToLeader, FromAgent, StreamProgresses, Version) ->
emqx_ds_shared_sub_proto_v1:agent_disconnect(
?leader_node(ToLeader), ToLeader, FromAgent, StreamProgresses, Version
@ -144,19 +119,7 @@ agent_disconnect(ToLeader, FromAgent, StreamProgresses, Version) ->
-spec leader_lease_streams(agent(), group(), leader(), list(leader_stream_progress()), version()) ->
ok.
leader_lease_streams(ToAgent, OfGroup, Leader, Streams, Version) when ?is_local_agent(ToAgent) ->
?tp(warning, shared_sub_proto_msg, #{
type => leader_lease_streams,
to_agent => ToAgent,
of_group => OfGroup,
leader => Leader,
streams => format_stream_progresses(Streams),
version => Version
}),
_ = emqx_persistent_session_ds_shared_subs_agent:send(
?agent_pid(ToAgent),
?leader_lease_streams(OfGroup, Leader, Streams, Version)
),
ok;
send_leader_msg(ToAgent, ?leader_lease_streams(OfGroup, Leader, Streams, Version));
leader_lease_streams(ToAgent, OfGroup, Leader, Streams, Version) ->
emqx_ds_shared_sub_proto_v1:leader_lease_streams(
?agent_node(ToAgent), ToAgent, OfGroup, Leader, Streams, Version
@ -164,17 +127,7 @@ leader_lease_streams(ToAgent, OfGroup, Leader, Streams, Version) ->
-spec leader_renew_stream_lease(agent(), group(), version()) -> ok.
leader_renew_stream_lease(ToAgent, OfGroup, Version) when ?is_local_agent(ToAgent) ->
?tp(warning, shared_sub_proto_msg, #{
type => leader_renew_stream_lease,
to_agent => ToAgent,
of_group => OfGroup,
version => Version
}),
_ = emqx_persistent_session_ds_shared_subs_agent:send(
?agent_pid(ToAgent),
?leader_renew_stream_lease(OfGroup, Version)
),
ok;
send_leader_msg(ToAgent, ?leader_renew_stream_lease(OfGroup, Version));
leader_renew_stream_lease(ToAgent, OfGroup, Version) ->
emqx_ds_shared_sub_proto_v1:leader_renew_stream_lease(
?agent_node(ToAgent), ToAgent, OfGroup, Version
@ -182,18 +135,7 @@ leader_renew_stream_lease(ToAgent, OfGroup, Version) ->
-spec leader_renew_stream_lease(agent(), group(), version(), version()) -> ok.
leader_renew_stream_lease(ToAgent, OfGroup, VersionOld, VersionNew) when ?is_local_agent(ToAgent) ->
?tp(warning, shared_sub_proto_msg, #{
type => leader_renew_stream_lease,
to_agent => ToAgent,
of_group => OfGroup,
version_old => VersionOld,
version_new => VersionNew
}),
_ = emqx_persistent_session_ds_shared_subs_agent:send(
?agent_pid(ToAgent),
?leader_renew_stream_lease(OfGroup, VersionOld, VersionNew)
),
ok;
send_leader_msg(ToAgent, ?leader_renew_stream_lease(OfGroup, VersionOld, VersionNew));
leader_renew_stream_lease(ToAgent, OfGroup, VersionOld, VersionNew) ->
emqx_ds_shared_sub_proto_v1:leader_renew_stream_lease(
?agent_node(ToAgent), ToAgent, OfGroup, VersionOld, VersionNew
@ -204,19 +146,7 @@ leader_renew_stream_lease(ToAgent, OfGroup, VersionOld, VersionNew) ->
leader_update_streams(ToAgent, OfGroup, VersionOld, VersionNew, StreamsNew) when
?is_local_agent(ToAgent)
->
?tp(warning, shared_sub_proto_msg, #{
type => leader_update_streams,
to_agent => ToAgent,
of_group => OfGroup,
version_old => VersionOld,
version_new => VersionNew,
streams_new => format_stream_progresses(StreamsNew)
}),
_ = emqx_persistent_session_ds_shared_subs_agent:send(
?agent_pid(ToAgent),
?leader_update_streams(OfGroup, VersionOld, VersionNew, StreamsNew)
),
ok;
send_leader_msg(ToAgent, ?leader_update_streams(OfGroup, VersionOld, VersionNew, StreamsNew));
leader_update_streams(ToAgent, OfGroup, VersionOld, VersionNew, StreamsNew) ->
emqx_ds_shared_sub_proto_v1:leader_update_streams(
?agent_node(ToAgent), ToAgent, OfGroup, VersionOld, VersionNew, StreamsNew
@ -224,16 +154,7 @@ leader_update_streams(ToAgent, OfGroup, VersionOld, VersionNew, StreamsNew) ->
-spec leader_invalidate(agent(), group()) -> ok.
leader_invalidate(ToAgent, OfGroup) when ?is_local_agent(ToAgent) ->
?tp(warning, shared_sub_proto_msg, #{
type => leader_invalidate,
to_agent => ToAgent,
of_group => OfGroup
}),
_ = emqx_persistent_session_ds_shared_subs_agent:send(
?agent_pid(ToAgent),
?leader_invalidate(OfGroup)
),
ok;
send_leader_msg(ToAgent, ?leader_invalidate(OfGroup));
leader_invalidate(ToAgent, OfGroup) ->
emqx_ds_shared_sub_proto_v1:leader_invalidate(
?agent_node(ToAgent), ToAgent, OfGroup
@ -247,41 +168,12 @@ agent(Id, Pid) ->
_ = Id,
?agent(Id, Pid).
format_stream_progresses(Streams) ->
lists:map(
fun format_stream_progress/1,
Streams
).
send_agent_msg(ToLeader, Msg) ->
?log_agent_msg(ToLeader, Msg),
_ = erlang:send(ToLeader, Msg),
ok.
format_stream_progress(#{stream := Stream, progress := Progress} = Value) ->
Value#{stream => format_opaque(Stream), progress => format_progress(Progress)}.
format_progress(#{iterator := Iterator} = Progress) ->
Progress#{iterator => format_opaque(Iterator)}.
format_stream_key({SubId, Stream}) ->
{SubId, format_opaque(Stream)}.
format_stream_keys(StreamKeys) ->
lists:map(
fun format_stream_key/1,
StreamKeys
).
format_lease_events(Events) ->
lists:map(
fun format_lease_event/1,
Events
).
format_lease_event(#{stream := Stream, progress := Progress} = Event) ->
Event#{stream => format_opaque(Stream), progress => format_progress(Progress)};
format_lease_event(#{stream := Stream} = Event) ->
Event#{stream => format_opaque(Stream)}.
%%--------------------------------------------------------------------
%% Helpers
%%--------------------------------------------------------------------
format_opaque(Opaque) ->
erlang:phash2(Opaque).
send_leader_msg(ToAgent, Msg) ->
?log_leader_msg(ToAgent, Msg),
_ = emqx_persistent_session_ds_shared_subs_agent:send(?agent_pid(ToAgent), Msg),
ok.

View File

@ -12,146 +12,167 @@
%% agent messages, sent from agent side to the leader
-define(agent_connect_leader_msg, agent_connect_leader).
-define(agent_update_stream_states_msg, agent_update_stream_states).
-define(agent_connect_leader_timeout_msg, agent_connect_leader_timeout).
-define(agent_renew_stream_lease_timeout_msg, agent_renew_stream_lease_timeout).
-define(agent_disconnect_msg, agent_disconnect).
-define(agent_connect_leader_msg, 1).
-define(agent_update_stream_states_msg, 2).
-define(agent_connect_leader_timeout_msg, 3).
-define(agent_renew_stream_lease_timeout_msg, 4).
-define(agent_disconnect_msg, 5).
%% message keys (used used not to send atoms over the network)
-define(agent_msg_type, 1).
-define(agent_msg_agent, 2).
-define(agent_msg_share_topic_filter, 3).
-define(agent_msg_agent_metadata, 4).
-define(agent_msg_stream_states, 5).
-define(agent_msg_version, 6).
-define(agent_msg_version_old, 7).
-define(agent_msg_version_new, 8).
%% Agent messages sent to the leader.
%% Leader talks to many agents, `agent` field is used to identify the sender.
-define(agent_connect_leader(Agent, AgentMetadata, ShareTopicFilter), #{
type => ?agent_connect_leader_msg,
share_topic_filter => ShareTopicFilter,
agent_metadata => AgentMetadata,
agent => Agent
?agent_msg_type => ?agent_connect_leader_msg,
?agent_msg_share_topic_filter => ShareTopicFilter,
?agent_msg_agent_metadata => AgentMetadata,
?agent_msg_agent => Agent
}).
-define(agent_connect_leader_match(Agent, AgentMetadata, ShareTopicFilter), #{
type := ?agent_connect_leader_msg,
share_topic_filter := ShareTopicFilter,
agent_metadata := AgentMetadata,
agent := Agent
?agent_msg_type := ?agent_connect_leader_msg,
?agent_msg_share_topic_filter := ShareTopicFilter,
?agent_msg_agent_metadata := AgentMetadata,
?agent_msg_agent := Agent
}).
-define(agent_update_stream_states(Agent, StreamStates, Version), #{
type => ?agent_update_stream_states_msg,
stream_states => StreamStates,
version => Version,
agent => Agent
?agent_msg_type => ?agent_update_stream_states_msg,
?agent_msg_stream_states => StreamStates,
?agent_msg_version => Version,
?agent_msg_agent => Agent
}).
-define(agent_update_stream_states_match(Agent, StreamStates, Version), #{
type := ?agent_update_stream_states_msg,
stream_states := StreamStates,
version := Version,
agent := Agent
?agent_msg_type := ?agent_update_stream_states_msg,
?agent_msg_stream_states := StreamStates,
?agent_msg_version := Version,
?agent_msg_agent := Agent
}).
-define(agent_update_stream_states(Agent, StreamStates, VersionOld, VersionNew), #{
type => ?agent_update_stream_states_msg,
stream_states => StreamStates,
version_old => VersionOld,
version_new => VersionNew,
agent => Agent
?agent_msg_type => ?agent_update_stream_states_msg,
?agent_msg_stream_states => StreamStates,
?agent_msg_version_old => VersionOld,
?agent_msg_version_new => VersionNew,
?agent_msg_agent => Agent
}).
-define(agent_update_stream_states_match(Agent, StreamStates, VersionOld, VersionNew), #{
type := ?agent_update_stream_states_msg,
stream_states := StreamStates,
version_old := VersionOld,
version_new := VersionNew,
agent := Agent
?agent_msg_type := ?agent_update_stream_states_msg,
?agent_msg_stream_states := StreamStates,
?agent_msg_version_old := VersionOld,
?agent_msg_version_new := VersionNew,
?agent_msg_agent := Agent
}).
-define(agent_disconnect(Agent, StreamStates, Version), #{
type => ?agent_disconnect_msg,
stream_states => StreamStates,
version => Version,
agent => Agent
?agent_msg_type => ?agent_disconnect_msg,
?agent_msg_stream_states => StreamStates,
?agent_msg_version => Version,
?agent_msg_agent => Agent
}).
-define(agent_disconnect_match(Agent, StreamStates, Version), #{
type := ?agent_disconnect_msg,
stream_states := StreamStates,
version := Version,
agent := Agent
?agent_msg_type := ?agent_disconnect_msg,
?agent_msg_stream_states := StreamStates,
?agent_msg_version := Version,
?agent_msg_agent := Agent
}).
%% leader messages, sent from the leader to the agent
%% Agent may have several shared subscriptions, so may talk to several leaders
%% `group_id` field is used to identify the leader.
-define(leader_lease_streams_msg, leader_lease_streams).
-define(leader_renew_stream_lease_msg, leader_renew_stream_lease).
-define(leader_lease_streams_msg, 101).
-define(leader_renew_stream_lease_msg, 102).
-define(leader_update_streams, 103).
-define(leader_invalidate, 104).
-define(leader_msg_type, 101).
-define(leader_msg_streams, 102).
-define(leader_msg_version, 103).
-define(leader_msg_version_old, 104).
-define(leader_msg_version_new, 105).
-define(leader_msg_streams_new, 106).
-define(leader_msg_leader, 107).
-define(leader_msg_group_id, 108).
-define(leader_lease_streams(GrouId, Leader, Streams, Version), #{
type => ?leader_lease_streams_msg,
streams => Streams,
version => Version,
leader => Leader,
group_id => GrouId
?leader_msg_type => ?leader_lease_streams_msg,
?leader_msg_streams => Streams,
?leader_msg_version => Version,
?leader_msg_leader => Leader,
?leader_msg_group_id => GrouId
}).
-define(leader_lease_streams_match(GroupId, Leader, Streams, Version), #{
type := ?leader_lease_streams_msg,
streams := Streams,
version := Version,
leader := Leader,
group_id := GroupId
?leader_msg_type := ?leader_lease_streams_msg,
?leader_msg_streams := Streams,
?leader_msg_version := Version,
?leader_msg_leader := Leader,
?leader_msg_group_id := GroupId
}).
-define(leader_renew_stream_lease(GroupId, Version), #{
type => ?leader_renew_stream_lease_msg,
version => Version,
group_id => GroupId
?leader_msg_type => ?leader_renew_stream_lease_msg,
?leader_msg_version => Version,
?leader_msg_group_id => GroupId
}).
-define(leader_renew_stream_lease_match(GroupId, Version), #{
type := ?leader_renew_stream_lease_msg,
version := Version,
group_id := GroupId
?leader_msg_type := ?leader_renew_stream_lease_msg,
?leader_msg_version := Version,
?leader_msg_group_id := GroupId
}).
-define(leader_renew_stream_lease(GroupId, VersionOld, VersionNew), #{
type => ?leader_renew_stream_lease_msg,
version_old => VersionOld,
version_new => VersionNew,
group_id => GroupId
?leader_msg_type => ?leader_renew_stream_lease_msg,
?leader_msg_version_old => VersionOld,
?leader_msg_version_new => VersionNew,
?leader_msg_group_id => GroupId
}).
-define(leader_renew_stream_lease_match(GroupId, VersionOld, VersionNew), #{
type := ?leader_renew_stream_lease_msg,
version_old := VersionOld,
version_new := VersionNew,
group_id := GroupId
?leader_msg_type := ?leader_renew_stream_lease_msg,
?leader_msg_version_old := VersionOld,
?leader_msg_version_new := VersionNew,
?leader_msg_group_id := GroupId
}).
-define(leader_update_streams(GroupId, VersionOld, VersionNew, StreamsNew), #{
type => leader_update_streams,
version_old => VersionOld,
version_new => VersionNew,
streams_new => StreamsNew,
group_id => GroupId
?leader_msg_type => ?leader_update_streams,
?leader_msg_version_old => VersionOld,
?leader_msg_version_new => VersionNew,
?leader_msg_streams_new => StreamsNew,
?leader_msg_group_id => GroupId
}).
-define(leader_update_streams_match(GroupId, VersionOld, VersionNew, StreamsNew), #{
type := leader_update_streams,
version_old := VersionOld,
version_new := VersionNew,
streams_new := StreamsNew,
group_id := GroupId
?leader_msg_type := ?leader_update_streams,
?leader_msg_version_old := VersionOld,
?leader_msg_version_new := VersionNew,
?leader_msg_streams_new := StreamsNew,
?leader_msg_group_id := GroupId
}).
-define(leader_invalidate(GroupId), #{
type => leader_invalidate,
group_id => GroupId
?leader_msg_type => ?leader_invalidate,
?leader_msg_group_id => GroupId
}).
-define(leader_invalidate_match(GroupId), #{
type := leader_invalidate,
group_id := GroupId
?leader_msg_type := ?leader_invalidate,
?leader_msg_group_id := GroupId
}).
%% Helpers

View File

@ -0,0 +1,82 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_ds_shared_sub_proto_format).
-include("emqx_ds_shared_sub_proto.hrl").
-export([format_agent_msg/1, format_leader_msg/1]).
%%--------------------------------------------------------------------
%% API
%%--------------------------------------------------------------------
format_agent_msg(Msg) ->
maps:from_list(
lists:map(
fun({K, V}) ->
FormattedKey = agent_msg_key(K),
{FormattedKey, format_agent_msg_value(FormattedKey, V)}
end,
maps:to_list(Msg)
)
).
format_leader_msg(Msg) ->
maps:from_list(
lists:map(
fun({K, V}) ->
FormattedKey = leader_msg_key(K),
{FormattedKey, format_leader_msg_value(FormattedKey, V)}
end,
maps:to_list(Msg)
)
).
%%--------------------------------------------------------------------
%% Internal functions
%%--------------------------------------------------------------------
format_agent_msg_value(agent_msg_type, Type) ->
agent_msg_type(Type);
format_agent_msg_value(agent_msg_stream_states, StreamStates) ->
emqx_persistent_session_ds_shared_subs:format_stream_progresses(StreamStates);
format_agent_msg_value(_, Value) ->
Value.
format_leader_msg_value(leader_msg_type, Type) ->
leader_msg_type(Type);
format_leader_msg_value(leader_msg_streams, Streams) ->
emqx_persistent_session_ds_shared_subs:format_lease_events(Streams);
format_leader_msg_value(_, Value) ->
Value.
agent_msg_type(?agent_connect_leader_msg) -> agent_connect_leader_msg;
agent_msg_type(?agent_update_stream_states_msg) -> agent_update_stream_states_msg;
agent_msg_type(?agent_connect_leader_timeout_msg) -> agent_connect_leader_timeout_msg;
agent_msg_type(?agent_renew_stream_lease_timeout_msg) -> agent_renew_stream_lease_timeout_msg;
agent_msg_type(?agent_disconnect_msg) -> agent_disconnect_msg.
agent_msg_key(?agent_msg_type) -> agent_msg_type;
agent_msg_key(?agent_msg_agent) -> agent_msg_agent;
agent_msg_key(?agent_msg_share_topic_filter) -> agent_msg_share_topic_filter;
agent_msg_key(?agent_msg_agent_metadata) -> agent_msg_agent_metadata;
agent_msg_key(?agent_msg_stream_states) -> agent_msg_stream_states;
agent_msg_key(?agent_msg_version) -> agent_msg_version;
agent_msg_key(?agent_msg_version_old) -> agent_msg_version_old;
agent_msg_key(?agent_msg_version_new) -> agent_msg_version_new.
leader_msg_type(?leader_lease_streams_msg) -> leader_lease_streams_msg;
leader_msg_type(?leader_renew_stream_lease_msg) -> leader_renew_stream_lease_msg;
leader_msg_type(?leader_update_streams) -> leader_update_streams;
leader_msg_type(?leader_invalidate) -> leader_invalidate.
leader_msg_key(?leader_msg_type) -> leader_msg_type;
leader_msg_key(?leader_msg_streams) -> leader_msg_streams;
leader_msg_key(?leader_msg_version) -> leader_msg_version;
leader_msg_key(?leader_msg_version_old) -> leader_msg_version_old;
leader_msg_key(?leader_msg_version_new) -> leader_msg_version_new;
leader_msg_key(?leader_msg_streams_new) -> leader_msg_streams_new;
leader_msg_key(?leader_msg_leader) -> leader_msg_leader;
leader_msg_key(?leader_msg_group_id) -> leader_msg_group_id.

View File

@ -113,7 +113,7 @@ do_lookup_leader(Agent, AgentMetadata, ShareTopicFilter, State) ->
Pid ->
Pid
end,
?SLOG(info, #{
?SLOG(debug, #{
msg => lookup_leader,
agent => Agent,
share_topic_filter => ShareTopicFilter,

View File

@ -417,7 +417,7 @@ t_lease_reconnect(_Config) ->
?assertWaitEvent(
{ok, _, _} = emqtt:subscribe(ConnShared, <<"$share/gr2/topic2/#">>, 1),
#{?snk_kind := find_leader_timeout},
#{?snk_kind := group_sm_find_leader_timeout},
5_000
),

View File

@ -56,6 +56,7 @@
topic/0,
batch/0,
operation/0,
deletion/0,
precondition/0,
stream/0,
delete_stream/0,
@ -110,7 +111,9 @@
message()
%% Delete a message.
%% Does nothing if the message does not exist.
| {delete, message_matcher('_')}.
| deletion().
-type deletion() :: {delete, message_matcher('_')}.
%% Precondition.
%% Fails whole batch if the storage already has the matching message (`if_exists'),

View File

@ -21,7 +21,7 @@
-behaviour(gen_server).
%% API:
-export([start_link/4, store_batch/3, shard_of_message/3]).
-export([start_link/4, store_batch/3, shard_of_operation/3]).
-export([ls/0]).
%% behavior callbacks:
@ -46,19 +46,18 @@
-define(cbm(DB), {?MODULE, DB}).
-record(enqueue_req, {
messages :: [emqx_types:message()],
operations :: [emqx_ds:operation()],
sync :: boolean(),
atomic :: boolean(),
n_messages :: non_neg_integer(),
n_operations :: non_neg_integer(),
payload_bytes :: non_neg_integer()
}).
-callback init_buffer(emqx_ds:db(), _Shard, _Options) -> {ok, _State}.
-callback flush_buffer(emqx_ds:db(), _Shard, [emqx_types:message()], State) ->
-callback flush_buffer(emqx_ds:db(), _Shard, [emqx_ds:operation()], State) ->
{State, ok | {error, recoverable | unrecoverable, _}}.
-callback shard_of_message(emqx_ds:db(), emqx_types:message(), topic | clientid, _Options) ->
-callback shard_of_operation(emqx_ds:db(), emqx_ds:operation(), topic | clientid, _Options) ->
_Shard.
%%================================================================================
@ -77,39 +76,33 @@ start_link(CallbackModule, CallbackOptions, DB, Shard) ->
?via(DB, Shard), ?MODULE, [CallbackModule, CallbackOptions, DB, Shard], []
).
-spec store_batch(emqx_ds:db(), [emqx_types:message()], emqx_ds:message_store_opts()) ->
-spec store_batch(emqx_ds:db(), [emqx_ds:operation()], emqx_ds:message_store_opts()) ->
emqx_ds:store_batch_result().
store_batch(DB, Messages, Opts) ->
store_batch(DB, Operations, Opts) ->
Sync = maps:get(sync, Opts, true),
Atomic = maps:get(atomic, Opts, false),
%% Usually we expect all messages in the batch to go into the
%% single shard, so this function is optimized for the happy case.
case shards_of_batch(DB, Messages) of
[{Shard, {NMsgs, NBytes}}] ->
case shards_of_batch(DB, Operations) of
[{Shard, {NOps, NBytes}}] ->
%% Happy case:
enqueue_call_or_cast(
?via(DB, Shard),
#enqueue_req{
messages = Messages,
operations = Operations,
sync = Sync,
atomic = Atomic,
n_messages = NMsgs,
n_operations = NOps,
payload_bytes = NBytes
}
);
[_, _ | _] when Atomic ->
%% It's impossible to commit a batch to multiple shards
%% atomically
{error, unrecoverable, atomic_commit_to_multiple_shards};
_Shards ->
%% Use a slower implementation for the unlikely case:
repackage_messages(DB, Messages, Sync)
repackage_messages(DB, Operations, Sync)
end.
-spec shard_of_message(emqx_ds:db(), emqx_types:message(), clientid | topic) -> _Shard.
shard_of_message(DB, Message, ShardBy) ->
-spec shard_of_operation(emqx_ds:db(), emqx_ds:operation(), clientid | topic) -> _Shard.
shard_of_operation(DB, Operation, ShardBy) ->
{CBM, Options} = persistent_term:get(?cbm(DB)),
CBM:shard_of_message(DB, Message, ShardBy, Options).
CBM:shard_of_operation(DB, Operation, ShardBy, Options).
%%================================================================================
%% behavior callbacks
@ -129,7 +122,7 @@ shard_of_message(DB, Message, ShardBy) ->
n = 0 :: non_neg_integer(),
n_bytes = 0 :: non_neg_integer(),
tref :: undefined | reference(),
queue :: queue:queue(emqx_types:message()),
queue :: queue:queue(emqx_ds:operation()),
pending_replies = [] :: [gen_server:from()]
}).
@ -168,31 +161,29 @@ format_status(Status) ->
handle_call(
#enqueue_req{
messages = Msgs,
operations = Operations,
sync = Sync,
atomic = Atomic,
n_messages = NMsgs,
n_operations = NOps,
payload_bytes = NBytes
},
From,
S0 = #s{pending_replies = Replies0}
) ->
S = S0#s{pending_replies = [From | Replies0]},
{noreply, enqueue(Sync, Atomic, Msgs, NMsgs, NBytes, S)};
{noreply, enqueue(Sync, Operations, NOps, NBytes, S)};
handle_call(_Call, _From, S) ->
{reply, {error, unknown_call}, S}.
handle_cast(
#enqueue_req{
messages = Msgs,
operations = Operations,
sync = Sync,
atomic = Atomic,
n_messages = NMsgs,
n_operations = NOps,
payload_bytes = NBytes
},
S
) ->
{noreply, enqueue(Sync, Atomic, Msgs, NMsgs, NBytes, S)};
{noreply, enqueue(Sync, Operations, NOps, NBytes, S)};
handle_cast(_Cast, S) ->
{noreply, S}.
@ -215,11 +206,10 @@ terminate(_Reason, #s{db = DB}) ->
enqueue(
Sync,
Atomic,
Msgs,
Ops,
BatchSize,
BatchBytes,
S0 = #s{n = NMsgs0, n_bytes = NBytes0, queue = Q0}
S0 = #s{n = NOps0, n_bytes = NBytes0, queue = Q0}
) ->
%% At this point we don't split the batches, even when they aren't
%% atomic. It wouldn't win us anything in terms of memory, and
@ -227,18 +217,18 @@ enqueue(
%% granularity should be fine enough.
NMax = application:get_env(emqx_durable_storage, egress_batch_size, 1000),
NBytesMax = application:get_env(emqx_durable_storage, egress_batch_bytes, infinity),
NMsgs = NMsgs0 + BatchSize,
NMsgs = NOps0 + BatchSize,
NBytes = NBytes0 + BatchBytes,
case (NMsgs >= NMax orelse NBytes >= NBytesMax) andalso (NMsgs0 > 0) of
case (NMsgs >= NMax orelse NBytes >= NBytesMax) andalso (NOps0 > 0) of
true ->
%% Adding this batch would cause buffer to overflow. Flush
%% it now, and retry:
S1 = flush(S0),
enqueue(Sync, Atomic, Msgs, BatchSize, BatchBytes, S1);
enqueue(Sync, Ops, BatchSize, BatchBytes, S1);
false ->
%% The buffer is empty, we enqueue the atomic batch in its
%% entirety:
Q1 = lists:foldl(fun queue:in/2, Q0, Msgs),
Q1 = lists:foldl(fun queue:in/2, Q0, Ops),
S1 = S0#s{n = NMsgs, n_bytes = NBytes, queue = Q1},
case NMsgs >= NMax orelse NBytes >= NBytesMax of
true ->
@ -336,18 +326,18 @@ do_flush(
}
end.
-spec shards_of_batch(emqx_ds:db(), [emqx_types:message()]) ->
-spec shards_of_batch(emqx_ds:db(), [emqx_ds:operation()]) ->
[{_ShardId, {NMessages, NBytes}}]
when
NMessages :: non_neg_integer(),
NBytes :: non_neg_integer().
shards_of_batch(DB, Messages) ->
shards_of_batch(DB, Batch) ->
maps:to_list(
lists:foldl(
fun(Message, Acc) ->
fun(Operation, Acc) ->
%% TODO: sharding strategy must be part of the DS DB schema:
Shard = shard_of_message(DB, Message, clientid),
Size = payload_size(Message),
Shard = shard_of_operation(DB, Operation, clientid),
Size = payload_size(Operation),
maps:update_with(
Shard,
fun({N, S}) ->
@ -358,36 +348,35 @@ shards_of_batch(DB, Messages) ->
)
end,
#{},
Messages
Batch
)
).
repackage_messages(DB, Messages, Sync) ->
repackage_messages(DB, Batch, Sync) ->
Batches = lists:foldl(
fun(Message, Acc) ->
Shard = shard_of_message(DB, Message, clientid),
Size = payload_size(Message),
fun(Operation, Acc) ->
Shard = shard_of_operation(DB, Operation, clientid),
Size = payload_size(Operation),
maps:update_with(
Shard,
fun({N, S, Msgs}) ->
{N + 1, S + Size, [Message | Msgs]}
{N + 1, S + Size, [Operation | Msgs]}
end,
{1, Size, [Message]},
{1, Size, [Operation]},
Acc
)
end,
#{},
Messages
Batch
),
maps:fold(
fun(Shard, {NMsgs, ByteSize, RevMessages}, ErrAcc) ->
fun(Shard, {NOps, ByteSize, RevOperations}, ErrAcc) ->
Err = enqueue_call_or_cast(
?via(DB, Shard),
#enqueue_req{
messages = lists:reverse(RevMessages),
operations = lists:reverse(RevOperations),
sync = Sync,
atomic = false,
n_messages = NMsgs,
n_operations = NOps,
payload_bytes = ByteSize
}
),
@ -427,4 +416,6 @@ cancel_timer(S = #s{tref = TRef}) ->
%% @doc Return approximate size of the MQTT message (it doesn't take
%% all things into account, for example headers and extras)
payload_size(#message{payload = P, topic = T}) ->
size(P) + size(T).
size(P) + size(T);
payload_size({_OpName, _}) ->
0.

View File

@ -0,0 +1,184 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2024 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_ds_precondition).
-include_lib("emqx_utils/include/emqx_message.hrl").
-include_lib("emqx_durable_storage/include/emqx_ds.hrl").
-export([verify/3]).
-export([matches/2]).
-export_type([matcher/0, mismatch/0]).
-type matcher() :: #message_matcher{}.
-type mismatch() :: emqx_types:message() | not_found.
-callback lookup_message(_Ctx, matcher()) ->
emqx_types:message() | not_found | emqx_ds:error(_).
%%
-spec verify(module(), _Ctx, [emqx_ds:precondition()]) ->
ok | {precondition_failed, mismatch()} | emqx_ds:error(_).
verify(Mod, Ctx, [_Precondition = {Cond, Msg} | Rest]) ->
case verify_precondition(Mod, Ctx, Cond, Msg) of
ok ->
verify(Mod, Ctx, Rest);
Failed ->
Failed
end;
verify(_Mod, _Ctx, []) ->
ok.
verify_precondition(Mod, Ctx, if_exists, Matcher) ->
case Mod:lookup_message(Ctx, Matcher) of
Msg = #message{} ->
verify_match(Msg, Matcher);
not_found ->
{precondition_failed, not_found};
Error = {error, _, _} ->
Error
end;
verify_precondition(Mod, Ctx, unless_exists, Matcher) ->
case Mod:lookup_message(Ctx, Matcher) of
Msg = #message{} ->
verify_nomatch(Msg, Matcher);
not_found ->
ok;
Error = {error, _, _} ->
Error
end.
verify_match(Msg, Matcher) ->
case matches(Msg, Matcher) of
true -> ok;
false -> {precondition_failed, Msg}
end.
verify_nomatch(Msg, Matcher) ->
case matches(Msg, Matcher) of
false -> ok;
true -> {precondition_failed, Msg}
end.
-spec matches(emqx_types:message(), matcher()) -> boolean().
matches(
Message,
#message_matcher{from = From, topic = Topic, payload = Pat, headers = Headers}
) ->
case Message of
#message{from = From, topic = Topic} when Pat =:= '_' ->
matches_headers(Message, Headers);
#message{from = From, topic = Topic, payload = Pat} ->
matches_headers(Message, Headers);
_ ->
false
end.
matches_headers(_Message, MatchHeaders) when map_size(MatchHeaders) =:= 0 ->
true;
matches_headers(#message{headers = Headers}, MatchHeaders) ->
maps:intersect(MatchHeaders, Headers) =:= MatchHeaders.
%% Basic tests
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
-compile(export_all).
conjunction_test() ->
%% Contradictory preconditions, always false.
Preconditions = [
{if_exists, matcher(<<"c1">>, <<"t/1">>, 0, '_')},
{unless_exists, matcher(<<"c1">>, <<"t/1">>, 0, '_')}
],
?assertEqual(
{precondition_failed, not_found},
verify(?MODULE, [], Preconditions)
),
%% Check that the order does not matter.
?assertEqual(
{precondition_failed, not_found},
verify(?MODULE, [], lists:reverse(Preconditions))
),
?assertEqual(
{precondition_failed, message(<<"c1">>, <<"t/1">>, 0, <<>>)},
verify(
?MODULE,
[message(<<"c1">>, <<"t/1">>, 0, <<>>)],
Preconditions
)
).
matches_test() ->
?assert(
matches(
message(<<"mtest1">>, <<"t/same">>, 12345, <<?MODULE_STRING>>),
matcher(<<"mtest1">>, <<"t/same">>, 12345, '_')
)
).
matches_headers_test() ->
?assert(
matches(
message(<<"mtest2">>, <<"t/same">>, 23456, <<?MODULE_STRING>>, #{h1 => 42, h2 => <<>>}),
matcher(<<"mtest2">>, <<"t/same">>, 23456, '_', #{h2 => <<>>})
)
).
mismatches_headers_test() ->
?assertNot(
matches(
message(<<"mtest3">>, <<"t/same">>, 23456, <<?MODULE_STRING>>, #{h1 => 42, h2 => <<>>}),
matcher(<<"mtest3">>, <<"t/same">>, 23456, '_', #{h2 => <<>>, h3 => <<"required">>})
)
).
matcher(ClientID, Topic, TS, Payload) ->
matcher(ClientID, Topic, TS, Payload, #{}).
matcher(ClientID, Topic, TS, Payload, Headers) ->
#message_matcher{
from = ClientID,
topic = Topic,
timestamp = TS,
payload = Payload,
headers = Headers
}.
message(ClientID, Topic, TS, Payload) ->
message(ClientID, Topic, TS, Payload, #{}).
message(ClientID, Topic, TS, Payload, Headers) ->
#message{
id = <<>>,
qos = 0,
from = ClientID,
topic = Topic,
timestamp = TS,
payload = Payload,
headers = Headers
}.
lookup_message(Messages, Matcher) ->
case lists:search(fun(M) -> matches(M, Matcher) end, Messages) of
{value, Message} ->
Message;
false ->
not_found
end.
-endif.

View File

@ -37,6 +37,7 @@
update_iterator/4,
next/6,
delete_next/7,
lookup_message/3,
handle_event/4
]).
@ -46,6 +47,7 @@
-export_type([options/0]).
-include("emqx_ds.hrl").
-include("emqx_ds_metrics.hrl").
-include_lib("emqx_utils/include/emqx_message.hrl").
-include_lib("snabbkaffe/include/trace.hrl").
@ -68,10 +70,13 @@
-define(start_time, 3).
-define(storage_key, 4).
-define(last_seen_key, 5).
-define(cooked_payloads, 6).
-define(cooked_msg_ops, 6).
-define(cooked_lts_ops, 7).
-define(cooked_ts, 8).
%% atoms:
-define(delete, 100).
-type options() ::
#{
bits_per_wildcard_level => pos_integer(),
@ -110,7 +115,7 @@
-type cooked_batch() ::
#{
?cooked_payloads := [{binary(), binary()}],
?cooked_msg_ops := [{binary(), binary() | ?delete}],
?cooked_lts_ops := [{binary(), binary()}],
?cooked_ts := integer()
}.
@ -271,24 +276,28 @@ drop(_Shard, DBHandle, GenId, CFRefs, #s{trie = Trie, gvars = GVars}) ->
-spec prepare_batch(
emqx_ds_storage_layer:shard_id(),
s(),
[{emqx_ds:time(), emqx_types:message()}, ...],
emqx_ds_storage_layer:batch(),
emqx_ds_storage_layer:batch_store_opts()
) ->
{ok, cooked_batch()}.
prepare_batch(_ShardId, S, Messages, _Options) ->
prepare_batch(_ShardId, S, Batch, _Options) ->
_ = erase(?lts_persist_ops),
{Payloads, MaxTs} =
{Operations, MaxTs} =
lists:mapfoldl(
fun({Timestamp, Msg}, Acc) ->
{Key, _} = make_key(S, Timestamp, Msg),
Payload = {Key, message_to_value_v1(Msg)},
{Payload, max(Acc, Timestamp)}
fun
({Timestamp, Msg = #message{topic = Topic}}, Acc) ->
{Key, _} = make_key(S, Timestamp, Topic),
Op = {Key, message_to_value_v1(Msg)},
{Op, max(Acc, Timestamp)};
({delete, #message_matcher{topic = Topic, timestamp = Timestamp}}, Acc) ->
{Key, _} = make_key(S, Timestamp, Topic),
{_Op = {Key, ?delete}, Acc}
end,
0,
Messages
Batch
),
{ok, #{
?cooked_payloads => Payloads,
?cooked_msg_ops => Operations,
?cooked_lts_ops => pop_lts_persist_ops(),
?cooked_ts => MaxTs
}}.
@ -302,7 +311,7 @@ prepare_batch(_ShardId, S, Messages, _Options) ->
commit_batch(
_ShardId,
_Data,
#{?cooked_payloads := [], ?cooked_lts_ops := LTS},
#{?cooked_msg_ops := [], ?cooked_lts_ops := LTS},
_Options
) ->
%% Assert:
@ -311,7 +320,7 @@ commit_batch(
commit_batch(
_ShardId,
#s{db = DB, data = DataCF, trie = Trie, trie_cf = TrieCF, gvars = Gvars},
#{?cooked_lts_ops := LtsOps, ?cooked_payloads := Payloads, ?cooked_ts := MaxTs},
#{?cooked_lts_ops := LtsOps, ?cooked_msg_ops := Operations, ?cooked_ts := MaxTs},
Options
) ->
{ok, Batch} = rocksdb:batch(),
@ -326,10 +335,13 @@ commit_batch(
_ = emqx_ds_lts:trie_update(Trie, LtsOps),
%% Commit payloads:
lists:foreach(
fun({Key, Val}) ->
ok = rocksdb:batch_put(Batch, DataCF, Key, term_to_binary(Val))
fun
({Key, Val}) when is_tuple(Val) ->
ok = rocksdb:batch_put(Batch, DataCF, Key, term_to_binary(Val));
({Key, ?delete}) ->
ok = rocksdb:batch_delete(Batch, DataCF, Key)
end,
Payloads
Operations
),
Result = rocksdb:write_batch(DB, Batch, write_batch_opts(Options)),
rocksdb:release_batch(Batch),
@ -556,6 +568,23 @@ delete_next_until(
rocksdb:iterator_close(ITHandle)
end.
-spec lookup_message(emqx_ds_storage_layer:shard_id(), s(), emqx_ds_precondition:matcher()) ->
emqx_types:message() | not_found | emqx_ds:error(_).
lookup_message(
_ShardId,
S = #s{db = DB, data = CF},
#message_matcher{topic = Topic, timestamp = Timestamp}
) ->
{Key, _} = make_key(S, Timestamp, Topic),
case rocksdb:get(DB, CF, Key, _ReadOpts = []) of
{ok, Blob} ->
deserialize(Blob);
not_found ->
not_found;
Error ->
{error, unrecoverable, {rocksdb, Error}}
end.
handle_event(_ShardId, State = #s{gvars = Gvars}, Time, tick) ->
%% If the last message was published more than one epoch ago, and
%% the shard remains idle, we need to advance safety cutoff
@ -811,9 +840,9 @@ format_key(KeyMapper, Key) ->
Vec = [integer_to_list(I, 16) || I <- emqx_ds_bitmask_keymapper:key_to_vector(KeyMapper, Key)],
lists:flatten(io_lib:format("~.16B (~s)", [Key, string:join(Vec, ",")])).
-spec make_key(s(), emqx_ds:time(), emqx_types:message()) -> {binary(), [binary()]}.
make_key(#s{keymappers = KeyMappers, trie = Trie}, Timestamp, #message{topic = TopicBin}) ->
Tokens = emqx_topic:words(TopicBin),
-spec make_key(s(), emqx_ds:time(), emqx_types:topic()) -> {binary(), [binary()]}.
make_key(#s{keymappers = KeyMappers, trie = Trie}, Timestamp, Topic) ->
Tokens = emqx_topic:words(Topic),
{TopicIndex, Varying} = emqx_ds_lts:topic_key(Trie, fun threshold_fun/1, Tokens),
VaryingHashes = [hash_topic_level(I) || I <- Varying],
KeyMapper = array:get(length(Varying), KeyMappers),

View File

@ -37,6 +37,9 @@
next/4,
delete_next/5,
%% Preconditions
lookup_message/2,
%% Generations
update_config/3,
add_generation/2,
@ -61,6 +64,7 @@
-export_type([
gen_id/0,
generation/0,
batch/0,
cf_refs/0,
stream/0,
delete_stream/0,
@ -74,6 +78,7 @@
batch_store_opts/0
]).
-include("emqx_ds.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-define(REF(ShardId), {via, gproc, {n, l, {?MODULE, ShardId}}}).
@ -115,6 +120,11 @@
-type gen_id() :: 0..16#ffff.
-type batch() :: [
{emqx_ds:time(), emqx_types:message()}
| emqx_ds:deletion()
].
%% Options affecting how batches should be stored.
%% See also: `emqx_ds:message_store_opts()'.
-type batch_store_opts() ::
@ -294,6 +304,10 @@
| {ok, end_of_stream}
| emqx_ds:error(_).
%% Lookup a single message, for preconditions to work.
-callback lookup_message(shard_id(), generation_data(), emqx_ds_precondition:matcher()) ->
emqx_types:message() | not_found | emqx_ds:error(_).
-callback handle_event(shard_id(), generation_data(), emqx_ds:time(), CustomEvent | tick) ->
[CustomEvent].
@ -317,14 +331,10 @@ drop_shard(Shard) ->
%% @doc This is a convenicence wrapper that combines `prepare' and
%% `commit' operations.
-spec store_batch(
shard_id(),
[{emqx_ds:time(), emqx_types:message()}],
batch_store_opts()
) ->
-spec store_batch(shard_id(), batch(), batch_store_opts()) ->
emqx_ds:store_batch_result().
store_batch(Shard, Messages, Options) ->
case prepare_batch(Shard, Messages, #{}) of
store_batch(Shard, Batch, Options) ->
case prepare_batch(Shard, Batch, #{}) of
{ok, CookedBatch} ->
commit_batch(Shard, CookedBatch, Options);
ignore ->
@ -342,23 +352,21 @@ store_batch(Shard, Messages, Options) ->
%%
%% The underlying storage layout MAY use timestamp as a unique message
%% ID.
-spec prepare_batch(
shard_id(),
[{emqx_ds:time(), emqx_types:message()}],
batch_prepare_opts()
) -> {ok, cooked_batch()} | ignore | emqx_ds:error(_).
prepare_batch(Shard, Messages = [{Time, _} | _], Options) ->
-spec prepare_batch(shard_id(), batch(), batch_prepare_opts()) ->
{ok, cooked_batch()} | ignore | emqx_ds:error(_).
prepare_batch(Shard, Batch, Options) ->
%% NOTE
%% We assume that batches do not span generations. Callers should enforce this.
?tp(emqx_ds_storage_layer_prepare_batch, #{
shard => Shard, messages => Messages, options => Options
shard => Shard, batch => Batch, options => Options
}),
%% FIXME: always store messages in the current generation
case generation_at(Shard, Time) of
Time = batch_starts_at(Batch),
case is_integer(Time) andalso generation_at(Shard, Time) of
{GenId, #{module := Mod, data := GenData}} ->
T0 = erlang:monotonic_time(microsecond),
Result =
case Mod:prepare_batch(Shard, GenData, Messages, Options) of
case Mod:prepare_batch(Shard, GenData, Batch, Options) of
{ok, CookedBatch} ->
{ok, #{?tag => ?COOKED_BATCH, ?generation => GenId, ?enc => CookedBatch}};
Error = {error, _, _} ->
@ -368,11 +376,21 @@ prepare_batch(Shard, Messages = [{Time, _} | _], Options) ->
%% TODO store->prepare
emqx_ds_builtin_metrics:observe_store_batch_time(Shard, T1 - T0),
Result;
false ->
%% No write operations in this batch.
ignore;
not_found ->
%% Generation is likely already GCed.
ignore
end;
prepare_batch(_Shard, [], _Options) ->
ignore.
end.
-spec batch_starts_at(batch()) -> emqx_ds:time() | undefined.
batch_starts_at([{Time, _Message} | _]) when is_integer(Time) ->
Time;
batch_starts_at([{delete, #message_matcher{timestamp = Time}} | _]) ->
Time;
batch_starts_at([]) ->
undefined.
%% @doc Commit cooked batch to the storage.
%%
@ -559,6 +577,16 @@ update_config(ShardId, Since, Options) ->
add_generation(ShardId, Since) ->
gen_server:call(?REF(ShardId), #call_add_generation{since = Since}, infinity).
-spec lookup_message(shard_id(), emqx_ds_precondition:matcher()) ->
emqx_types:message() | not_found | emqx_ds:error(_).
lookup_message(ShardId, Matcher = #message_matcher{timestamp = Time}) ->
case generation_at(ShardId, Time) of
{_GenId, #{module := Mod, data := GenData}} ->
Mod:lookup_message(ShardId, GenData, Matcher);
not_found ->
not_found
end.
-spec list_generations_with_lifetimes(shard_id()) ->
#{
gen_id() => #{

View File

@ -21,6 +21,8 @@
%% used for testing.
-module(emqx_ds_storage_reference).
-include("emqx_ds.hrl").
-behaviour(emqx_ds_storage_layer).
%% API:
@ -39,7 +41,8 @@
make_delete_iterator/5,
update_iterator/4,
next/6,
delete_next/7
delete_next/7,
lookup_message/3
]).
%% internal exports:
@ -49,6 +52,8 @@
-include_lib("emqx_utils/include/emqx_message.hrl").
-define(DB_KEY(TIMESTAMP), <<TIMESTAMP:64>>).
%%================================================================================
%% Type declarations
%%================================================================================
@ -102,23 +107,22 @@ drop(_ShardId, DBHandle, _GenId, _CFRefs, #s{cf = CFHandle}) ->
ok = rocksdb:drop_column_family(DBHandle, CFHandle),
ok.
prepare_batch(_ShardId, _Data, Messages, _Options) ->
{ok, Messages}.
prepare_batch(_ShardId, _Data, Batch, _Options) ->
{ok, Batch}.
commit_batch(_ShardId, #s{db = DB, cf = CF}, Messages, Options) ->
{ok, Batch} = rocksdb:batch(),
lists:foreach(
fun({TS, Msg}) ->
Key = <<TS:64>>,
Val = term_to_binary(Msg),
rocksdb:batch_put(Batch, CF, Key, Val)
end,
Messages
),
Res = rocksdb:write_batch(DB, Batch, write_batch_opts(Options)),
rocksdb:release_batch(Batch),
commit_batch(_ShardId, S = #s{db = DB}, Batch, Options) ->
{ok, BatchHandle} = rocksdb:batch(),
lists:foreach(fun(Op) -> process_batch_operation(S, Op, BatchHandle) end, Batch),
Res = rocksdb:write_batch(DB, BatchHandle, write_batch_opts(Options)),
rocksdb:release_batch(BatchHandle),
Res.
process_batch_operation(S, {TS, Msg = #message{}}, BatchHandle) ->
Val = encode_message(Msg),
rocksdb:batch_put(BatchHandle, S#s.cf, ?DB_KEY(TS), Val);
process_batch_operation(S, {delete, #message_matcher{timestamp = TS}}, BatchHandle) ->
rocksdb:batch_delete(BatchHandle, S#s.cf, ?DB_KEY(TS)).
get_streams(_Shard, _Data, _TopicFilter, _StartTime) ->
[#stream{}].
@ -205,6 +209,16 @@ delete_next(_Shard, #s{db = DB, cf = CF}, It0, Selector, BatchSize, _Now, IsCurr
{ok, It, NumDeleted, NumIterated}
end.
lookup_message(_ShardId, #s{db = DB, cf = CF}, #message_matcher{timestamp = TS}) ->
case rocksdb:get(DB, CF, ?DB_KEY(TS), _ReadOpts = []) of
{ok, Val} ->
decode_message(Val);
not_found ->
not_found;
{error, Reason} ->
{error, unrecoverable, Reason}
end.
%%================================================================================
%% Internal functions
%%================================================================================
@ -214,7 +228,7 @@ do_next(_, _, _, _, 0, Key, Acc) ->
do_next(TopicFilter, StartTime, IT, Action, NLeft, Key0, Acc) ->
case rocksdb:iterator_move(IT, Action) of
{ok, Key = <<TS:64>>, Blob} ->
Msg = #message{topic = Topic} = binary_to_term(Blob),
Msg = #message{topic = Topic} = decode_message(Blob),
TopicWords = emqx_topic:words(Topic),
case emqx_topic:match(TopicWords, TopicFilter) andalso TS >= StartTime of
true ->
@ -234,7 +248,7 @@ do_delete_next(
) ->
case rocksdb:iterator_move(IT, Action) of
{ok, Key, Blob} ->
Msg = #message{topic = Topic, timestamp = TS} = binary_to_term(Blob),
Msg = #message{topic = Topic, timestamp = TS} = decode_message(Blob),
TopicWords = emqx_topic:words(Topic),
case emqx_topic:match(TopicWords, TopicFilter) andalso TS >= StartTime of
true ->
@ -285,6 +299,12 @@ do_delete_next(
{Key0, {AccDel, AccIter}}
end.
encode_message(Msg) ->
term_to_binary(Msg).
decode_message(Val) ->
binary_to_term(Val).
%% @doc Generate a column family ID for the MQTT messages
-spec data_cf(emqx_ds_storage_layer:gen_id()) -> [char()].
data_cf(GenId) ->

View File

@ -33,7 +33,8 @@
make_delete_iterator/5,
update_iterator/4,
next/6,
delete_next/7
delete_next/7,
lookup_message/3
]).
%% internal exports:
@ -43,6 +44,7 @@
-include_lib("emqx_utils/include/emqx_message.hrl").
-include_lib("snabbkaffe/include/trace.hrl").
-include("emqx_ds.hrl").
-include("emqx_ds_metrics.hrl").
-ifdef(TEST).
@ -56,11 +58,12 @@
%%================================================================================
%% TLOG entry
%% keys:
-define(cooked_payloads, 6).
%% Keys:
-define(cooked_msg_ops, 6).
-define(cooked_lts_ops, 7).
%% Payload:
-define(cooked_payload(TIMESTAMP, STATIC, VARYING, VALUE),
-define(cooked_delete, 100).
-define(cooked_msg_op(TIMESTAMP, STATIC, VARYING, VALUE),
{TIMESTAMP, STATIC, VARYING, VALUE}
).
@ -176,25 +179,39 @@ drop(_ShardId, DBHandle, _GenId, _CFRefs, #s{data_cf = DataCF, trie_cf = TrieCF,
ok = rocksdb:drop_column_family(DBHandle, TrieCF),
ok.
prepare_batch(_ShardId, S = #s{trie = Trie}, Messages, _Options) ->
prepare_batch(
_ShardId,
S = #s{trie = Trie},
Operations,
_Options
) ->
_ = erase(?lts_persist_ops),
Payloads = [
begin
Tokens = words(Topic),
{Static, Varying} = emqx_ds_lts:topic_key(Trie, fun threshold_fun/1, Tokens),
?cooked_payload(Timestamp, Static, Varying, serialize(S, Varying, Msg))
end
|| {Timestamp, Msg = #message{topic = Topic}} <- Messages
],
OperationsCooked = emqx_utils:flattermap(
fun
({Timestamp, Msg = #message{topic = Topic}}) ->
Tokens = words(Topic),
{Static, Varying} = emqx_ds_lts:topic_key(Trie, fun threshold_fun/1, Tokens),
?cooked_msg_op(Timestamp, Static, Varying, serialize(S, Varying, Msg));
({delete, #message_matcher{topic = Topic, timestamp = Timestamp}}) ->
case emqx_ds_lts:lookup_topic_key(Trie, words(Topic)) of
{ok, {Static, Varying}} ->
?cooked_msg_op(Timestamp, Static, Varying, ?cooked_delete);
undefined ->
%% Topic is unknown, nothing to delete.
[]
end
end,
Operations
),
{ok, #{
?cooked_payloads => Payloads,
?cooked_msg_ops => OperationsCooked,
?cooked_lts_ops => pop_lts_persist_ops()
}}.
commit_batch(
_ShardId,
#s{db = DB, trie_cf = TrieCF, data_cf = DataCF, trie = Trie, hash_bytes = HashBytes},
#{?cooked_lts_ops := LtsOps, ?cooked_payloads := Payloads},
#{?cooked_lts_ops := LtsOps, ?cooked_msg_ops := Operations},
Options
) ->
{ok, Batch} = rocksdb:batch(),
@ -210,12 +227,17 @@ commit_batch(
_ = emqx_ds_lts:trie_update(Trie, LtsOps),
%% Commit payloads:
lists:foreach(
fun(?cooked_payload(Timestamp, Static, Varying, ValBlob)) ->
MasterKey = mk_key(Static, 0, <<>>, Timestamp),
ok = rocksdb:batch_put(Batch, DataCF, MasterKey, ValBlob),
mk_index(Batch, DataCF, HashBytes, Static, Varying, Timestamp)
fun
(?cooked_msg_op(Timestamp, Static, Varying, ValBlob = <<_/bytes>>)) ->
MasterKey = mk_key(Static, 0, <<>>, Timestamp),
ok = rocksdb:batch_put(Batch, DataCF, MasterKey, ValBlob),
mk_index(Batch, DataCF, HashBytes, Static, Varying, Timestamp);
(?cooked_msg_op(Timestamp, Static, Varying, ?cooked_delete)) ->
MasterKey = mk_key(Static, 0, <<>>, Timestamp),
ok = rocksdb:batch_delete(Batch, DataCF, MasterKey),
delete_index(Batch, DataCF, HashBytes, Static, Varying, Timestamp)
end,
Payloads
Operations
),
Result = rocksdb:write_batch(DB, Batch, [
{disable_wal, not maps:get(durable, Options, true)}
@ -285,6 +307,28 @@ delete_next(Shard, S, It0, Selector, BatchSize, Now, IsCurrent) ->
Ret
end.
lookup_message(
Shard,
S = #s{db = DB, data_cf = CF, trie = Trie},
#message_matcher{topic = Topic, timestamp = Timestamp}
) ->
case emqx_ds_lts:lookup_topic_key(Trie, words(Topic)) of
{ok, {StaticIdx, _Varying}} ->
DSKey = mk_key(StaticIdx, 0, <<>>, Timestamp),
case rocksdb:get(DB, CF, DSKey, _ReadOpts = []) of
{ok, Val} ->
{ok, TopicStructure} = emqx_ds_lts:reverse_lookup(Trie, StaticIdx),
Msg = deserialize(S, Val),
enrich(Shard, S, TopicStructure, DSKey, Msg);
not_found ->
not_found;
{error, Reason} ->
{error, unrecoverable, Reason}
end;
undefined ->
not_found
end.
%%================================================================================
%% Internal exports
%%================================================================================
@ -330,12 +374,18 @@ serialize(#s{serialization_schema = SSchema, with_guid = WithGuid}, Varying, Msg
},
emqx_ds_msg_serializer:serialize(SSchema, Msg).
enrich(#ctx{shard = Shard, s = S, topic_structure = TopicStructure}, DSKey, Msg0) ->
enrich(Shard, S, TopicStructure, DSKey, Msg0).
enrich(
#ctx{shard = Shard, topic_structure = Structure, s = #s{with_guid = WithGuid}},
Shard,
#s{with_guid = WithGuid},
TopicStructure,
DSKey,
Msg0
) ->
Topic = emqx_topic:join(emqx_ds_lts:decompress_topic(Structure, words(Msg0#message.topic))),
Tokens = words(Msg0#message.topic),
Topic = emqx_topic:join(emqx_ds_lts:decompress_topic(TopicStructure, Tokens)),
Msg0#message{
topic = Topic,
id =
@ -584,6 +634,16 @@ mk_index(Batch, CF, HashBytes, Static, Timestamp, N, [TopicLevel | Varying]) ->
mk_index(_Batch, _CF, _HashBytes, _Static, _Timestamp, _N, []) ->
ok.
delete_index(Batch, CF, HashBytes, Static, Varying, Timestamp) ->
delete_index(Batch, CF, HashBytes, Static, Timestamp, 1, Varying).
delete_index(Batch, CF, HashBytes, Static, Timestamp, N, [TopicLevel | Varying]) ->
Key = mk_key(Static, N, hash(HashBytes, TopicLevel), Timestamp),
ok = rocksdb:batch_delete(Batch, CF, Key),
delete_index(Batch, CF, HashBytes, Static, Timestamp, N + 1, Varying);
delete_index(_Batch, _CF, _HashBytes, _Static, _Timestamp, _N, []) ->
ok.
%%%%%%%% Keys %%%%%%%%%%
get_key_range(StaticIdx, WildcardIdx, Hash) ->

View File

@ -18,11 +18,14 @@
-compile(export_all).
-compile(nowarn_export_all).
-include("emqx_ds.hrl").
-include("../../emqx/include/emqx.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-include_lib("stdlib/include/assert.hrl").
-define(assertSameSet(A, B), ?assertEqual(lists:sort(A), lists:sort(B))).
-define(FUTURE, (1 bsl 64 - 1)).
-define(SHARD, shard(?FUNCTION_NAME)).
@ -66,6 +69,30 @@ t_store(_Config) ->
},
?assertMatch(ok, emqx_ds:store_batch(?FUNCTION_NAME, [Msg])).
%% Smoke test of applying batch operations
t_operations(db_config, _Config) ->
#{force_monotonic_timestamps => false}.
t_operations(_Config) ->
Batch1 = [
make_message(100, <<"t/1">>, <<"M1">>),
make_message(200, <<"t/2">>, <<"M2">>),
make_message(300, <<"t/3">>, <<"M3">>)
],
Batch2 = [
make_deletion(200, <<"t/2">>, <<"M2">>),
make_deletion(300, <<"t/3">>, '_'),
make_deletion(400, <<"t/4">>, '_')
],
?assertEqual(ok, emqx_ds:store_batch(?FUNCTION_NAME, Batch1)),
?assertEqual(ok, emqx_ds:store_batch(?FUNCTION_NAME, Batch2)),
?assertMatch(
[
#message{timestamp = 100, topic = <<"t/1">>, payload = <<"M1">>}
],
dump_messages(?SHARD, <<"t/#">>, 0)
).
%% Smoke test for iteration through a concrete topic
t_iterate(_Config) ->
%% Prepare data:
@ -124,8 +151,6 @@ t_delete(_Config) ->
?assertNot(is_map_key(TopicToDelete, MessagesByTopic), #{msgs => MessagesByTopic}),
?assertEqual(20, length(Messages)).
-define(assertSameSet(A, B), ?assertEqual(lists:sort(A), lists:sort(B))).
%% Smoke test that verifies that concrete topics are mapped to
%% individual streams, unless there's too many of them.
t_get_streams(Config) ->
@ -417,79 +442,26 @@ dump_stream(Shard, Stream, TopicFilter, StartTime) ->
%% || Topic <- Topics, PublishedAt <- Timestamps
%% ].
%% t_iterate_multigen(_Config) ->
%% {ok, 1} = emqx_ds_storage_layer:create_generation(?SHARD, 10, ?COMPACT_CONFIG),
%% {ok, 2} = emqx_ds_storage_layer:create_generation(?SHARD, 50, ?DEFAULT_CONFIG),
%% {ok, 3} = emqx_ds_storage_layer:create_generation(?SHARD, 1000, ?DEFAULT_CONFIG),
%% Topics = ["foo/bar", "foo/bar/baz", "a", "a/bar"],
%% Timestamps = lists:seq(1, 100),
%% _ = [
%% store(?SHARD, PublishedAt, Topic, term_to_binary({Topic, PublishedAt}))
%% || Topic <- Topics, PublishedAt <- Timestamps
%% ],
%% ?assertEqual(
%% lists:sort([
%% {Topic, PublishedAt}
%% || Topic <- ["foo/bar", "foo/bar/baz"], PublishedAt <- Timestamps
%% ]),
%% lists:sort([binary_to_term(Payload) || Payload <- iterate(?SHARD, "foo/#", 0)])
%% ),
%% ?assertEqual(
%% lists:sort([
%% {Topic, PublishedAt}
%% || Topic <- ["a", "a/bar"], PublishedAt <- lists:seq(60, 100)
%% ]),
%% lists:sort([binary_to_term(Payload) || Payload <- iterate(?SHARD, "a/#", 60)])
%% ).
%% t_iterate_multigen_preserve_restore(_Config) ->
%% ReplayID = atom_to_binary(?FUNCTION_NAME),
%% {ok, 1} = emqx_ds_storage_layer:create_generation(?SHARD, 10, ?COMPACT_CONFIG),
%% {ok, 2} = emqx_ds_storage_layer:create_generation(?SHARD, 50, ?DEFAULT_CONFIG),
%% {ok, 3} = emqx_ds_storage_layer:create_generation(?SHARD, 100, ?DEFAULT_CONFIG),
%% Topics = ["foo/bar", "foo/bar/baz", "a/bar"],
%% Timestamps = lists:seq(1, 100),
%% TopicFilter = "foo/#",
%% TopicsMatching = ["foo/bar", "foo/bar/baz"],
%% _ = [
%% store(?SHARD, TS, Topic, term_to_binary({Topic, TS}))
%% || Topic <- Topics, TS <- Timestamps
%% ],
%% It0 = iterator(?SHARD, TopicFilter, 0),
%% {It1, Res10} = iterate(It0, 10),
%% % preserve mid-generation
%% ok = emqx_ds_storage_layer:preserve_iterator(It1, ReplayID),
%% {ok, It2} = emqx_ds_storage_layer:restore_iterator(?SHARD, ReplayID),
%% {It3, Res100} = iterate(It2, 88),
%% % preserve on the generation boundary
%% ok = emqx_ds_storage_layer:preserve_iterator(It3, ReplayID),
%% {ok, It4} = emqx_ds_storage_layer:restore_iterator(?SHARD, ReplayID),
%% {It5, Res200} = iterate(It4, 1000),
%% ?assertEqual({end_of_stream, []}, iterate(It5, 1)),
%% ?assertEqual(
%% lists:sort([{Topic, TS} || Topic <- TopicsMatching, TS <- Timestamps]),
%% lists:sort([binary_to_term(Payload) || Payload <- Res10 ++ Res100 ++ Res200])
%% ),
%% ?assertEqual(
%% ok,
%% emqx_ds_storage_layer:discard_iterator(?SHARD, ReplayID)
%% ),
%% ?assertEqual(
%% {error, not_found},
%% emqx_ds_storage_layer:restore_iterator(?SHARD, ReplayID)
%% ).
make_message(PublishedAt, Topic, Payload) when is_list(Topic) ->
make_message(PublishedAt, list_to_binary(Topic), Payload);
make_message(PublishedAt, Topic, Payload) when is_binary(Topic) ->
ID = emqx_guid:gen(),
#message{
id = ID,
from = <<?MODULE_STRING>>,
topic = Topic,
timestamp = PublishedAt,
payload = Payload
}.
make_deletion(Timestamp, Topic, Payload) ->
{delete, #message_matcher{
from = <<?MODULE_STRING>>,
topic = Topic,
timestamp = Timestamp,
payload = Payload
}}.
make_topic(Tokens = [_ | _]) ->
emqx_topic:join([bin(T) || T <- Tokens]).
@ -535,13 +507,23 @@ end_per_suite(Config) ->
ok.
init_per_testcase(TC, Config) ->
ok = emqx_ds:open_db(TC, ?DB_CONFIG(Config)),
ok = emqx_ds:open_db(TC, db_config(TC, Config)),
Config.
end_per_testcase(TC, _Config) ->
emqx_ds:drop_db(TC),
ok.
db_config(TC, Config) ->
ConfigBase = ?DB_CONFIG(Config),
SpecificConfig =
try
?MODULE:TC(?FUNCTION_NAME, Config)
catch
error:undef -> #{}
end,
maps:merge(ConfigBase, SpecificConfig).
shard(TC) ->
{TC, <<"0">>}.

View File

@ -377,7 +377,7 @@ nodes_of_clientid(DB, ClientId, Nodes = [N0 | _]) ->
shard_of_clientid(DB, Node, ClientId) ->
?ON(
Node,
emqx_ds_buffer:shard_of_message(DB, #message{from = ClientId}, clientid)
emqx_ds_buffer:shard_of_operation(DB, #message{from = ClientId}, clientid)
).
%% Consume eagerly:

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_gateway_coap, [
{description, "CoAP Gateway"},
{vsn, "0.1.9"},
{vsn, "0.1.10"},
{registered, []},
{applications, [kernel, stdlib, emqx, emqx_gateway]},
{env, []},

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_gateway_exproto, [
{description, "ExProto Gateway"},
{vsn, "0.1.12"},
{vsn, "0.1.13"},
{registered, []},
{applications, [kernel, stdlib, grpc, emqx, emqx_gateway]},
{env, []},

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_gateway_gbt32960, [
{description, "GBT32960 Gateway"},
{vsn, "0.1.4"},
{vsn, "0.1.5"},
{registered, []},
{applications, [kernel, stdlib, emqx, emqx_gateway]},
{env, []},

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_gateway_jt808, [
{description, "JT/T 808 Gateway"},
{vsn, "0.1.0"},
{vsn, "0.1.1"},
{registered, []},
{applications, [kernel, stdlib, emqx, emqx_gateway]},
{env, []},

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_gateway_mqttsn, [
{description, "MQTT-SN Gateway"},
{vsn, "0.2.2"},
{vsn, "0.2.3"},
{registered, []},
{applications, [kernel, stdlib, emqx, emqx_gateway]},
{env, []},

View File

@ -3,7 +3,7 @@
{id, "emqx_machine"},
{description, "The EMQX Machine"},
% strict semver, bump manually!
{vsn, "0.3.3"},
{vsn, "0.3.4"},
{modules, []},
{registered, []},
{applications, [kernel, stdlib, emqx_ctl, redbug]},

Some files were not shown because too many files have changed in this diff Show More