Merge remote-tracking branch 'origin/master' into 0907-sync-master-to-release-52
This commit is contained in:
commit
f3bebb0ba2
|
@ -1,4 +0,0 @@
|
||||||
ARG CASSANDRA_TAG=3.11.6
|
|
||||||
FROM cassandra:${CASSANDRA_TAG}
|
|
||||||
COPY cassandra.yaml /etc/cassandra/cassandra.yaml
|
|
||||||
CMD ["cassandra", "-f"]
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,32 +1,38 @@
|
||||||
version: '3.9'
|
version: '3.9'
|
||||||
|
|
||||||
|
x-cassandra: &cassandra
|
||||||
|
restart: always
|
||||||
|
image: cassandra:${CASSANDRA_TAG:-3.11.6}
|
||||||
|
environment:
|
||||||
|
CASSANDRA_BROADCAST_ADDRESS: "1.2.3.4"
|
||||||
|
CASSANDRA_RPC_ADDRESS: "0.0.0.0"
|
||||||
|
HEAP_NEWSIZE: "128M"
|
||||||
|
MAX_HEAP_SIZE: "2048M"
|
||||||
|
#ports:
|
||||||
|
# - "9042:9042"
|
||||||
|
# - "9142:9142"
|
||||||
|
command:
|
||||||
|
- /bin/bash
|
||||||
|
- -c
|
||||||
|
- |
|
||||||
|
/opt/cassandra/bin/cassandra -f -R > /cassandra.log &
|
||||||
|
/opt/cassandra/bin/cqlsh -u cassandra -p cassandra -e "CREATE KEYSPACE mqtt WITH REPLICATION = { 'class':'SimpleStrategy','replication_factor':1};"
|
||||||
|
while [[ $$? -ne 0 ]];do sleep 5; /opt/cassandra/bin/cqlsh -u cassandra -p cassandra -e "CREATE KEYSPACE mqtt WITH REPLICATION = { 'class':'SimpleStrategy','replication_factor':1};"; done
|
||||||
|
/opt/cassandra/bin/cqlsh -u cassandra -p cassandra -e "describe keyspaces;"
|
||||||
|
tail -f /cassandra.log
|
||||||
|
networks:
|
||||||
|
- emqx_bridge
|
||||||
|
|
||||||
services:
|
services:
|
||||||
cassandra_server:
|
cassandra_server:
|
||||||
|
<<: *cassandra
|
||||||
container_name: cassandra
|
container_name: cassandra
|
||||||
build:
|
|
||||||
context: ./cassandra
|
|
||||||
args:
|
|
||||||
CASSANDRA_TAG: ${CASSANDRA_TAG}
|
|
||||||
image: emqx-cassandra
|
|
||||||
restart: always
|
|
||||||
environment:
|
|
||||||
CASSANDRA_BROADCAST_ADDRESS: "1.2.3.4"
|
|
||||||
CASSANDRA_RPC_ADDRESS: "0.0.0.0"
|
|
||||||
HEAP_NEWSIZE: "128M"
|
|
||||||
MAX_HEAP_SIZE: "2048M"
|
|
||||||
volumes:
|
volumes:
|
||||||
- ./certs:/certs
|
- ./certs:/certs
|
||||||
#ports:
|
- ./cassandra/cassandra.yaml:/etc/cassandra/cassandra.yaml
|
||||||
# - "9042:9042"
|
cassandra_noauth_server:
|
||||||
# - "9142:9142"
|
<<: *cassandra
|
||||||
command:
|
container_name: cassandra_noauth
|
||||||
- /bin/bash
|
volumes:
|
||||||
- -c
|
- ./certs:/certs
|
||||||
- |
|
- ./cassandra/cassandra_noauth.yaml:/etc/cassandra/cassandra.yaml
|
||||||
/opt/cassandra/bin/cassandra -f -R > /cassandra.log &
|
|
||||||
/opt/cassandra/bin/cqlsh -u cassandra -p cassandra -e "CREATE KEYSPACE mqtt WITH REPLICATION = { 'class':'SimpleStrategy','replication_factor':1};"
|
|
||||||
while [[ $$? -ne 0 ]];do sleep 5; /opt/cassandra/bin/cqlsh -u cassandra -p cassandra -e "CREATE KEYSPACE mqtt WITH REPLICATION = { 'class':'SimpleStrategy','replication_factor':1};"; done
|
|
||||||
/opt/cassandra/bin/cqlsh -u cassandra -p cassandra -e "describe keyspaces;"
|
|
||||||
tail -f /cassandra.log
|
|
||||||
networks:
|
|
||||||
- emqx_bridge
|
|
||||||
|
|
|
@ -18,7 +18,7 @@ services:
|
||||||
- /tmp/emqx-ci/emqx-shared-secret:/var/lib/secret
|
- /tmp/emqx-ci/emqx-shared-secret:/var/lib/secret
|
||||||
kdc:
|
kdc:
|
||||||
hostname: kdc.emqx.net
|
hostname: kdc.emqx.net
|
||||||
image: ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu20.04
|
image: ghcr.io/emqx/emqx-builder/5.1-4:1.14.5-25.3.2-2-ubuntu20.04
|
||||||
container_name: kdc.emqx.net
|
container_name: kdc.emqx.net
|
||||||
expose:
|
expose:
|
||||||
- 88 # kdc
|
- 88 # kdc
|
||||||
|
|
|
@ -3,7 +3,7 @@ version: '3.9'
|
||||||
services:
|
services:
|
||||||
erlang:
|
erlang:
|
||||||
container_name: erlang
|
container_name: erlang
|
||||||
image: ${DOCKER_CT_RUNNER_IMAGE:-ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu20.04}
|
image: ${DOCKER_CT_RUNNER_IMAGE:-ghcr.io/emqx/emqx-builder/5.1-4:1.14.5-25.3.2-2-ubuntu20.04}
|
||||||
env_file:
|
env_file:
|
||||||
- conf.env
|
- conf.env
|
||||||
environment:
|
environment:
|
||||||
|
|
|
@ -3,7 +3,7 @@ inputs:
|
||||||
profile: # emqx, emqx-enterprise
|
profile: # emqx, emqx-enterprise
|
||||||
required: true
|
required: true
|
||||||
type: string
|
type: string
|
||||||
otp: # 25.3.2-1
|
otp: # 25.3.2-2
|
||||||
required: true
|
required: true
|
||||||
type: string
|
type: string
|
||||||
os:
|
os:
|
||||||
|
|
|
@ -17,7 +17,7 @@ env:
|
||||||
jobs:
|
jobs:
|
||||||
sanity-checks:
|
sanity-checks:
|
||||||
runs-on: ${{ github.repository_owner == 'emqx' && 'aws-amd64' || 'ubuntu-22.04' }}
|
runs-on: ${{ github.repository_owner == 'emqx' && 'aws-amd64' || 'ubuntu-22.04' }}
|
||||||
container: "ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04"
|
container: "ghcr.io/emqx/emqx-builder/5.1-4:1.14.5-25.3.2-2-ubuntu22.04"
|
||||||
outputs:
|
outputs:
|
||||||
ct-matrix: ${{ steps.matrix.outputs.ct-matrix }}
|
ct-matrix: ${{ steps.matrix.outputs.ct-matrix }}
|
||||||
ct-host: ${{ steps.matrix.outputs.ct-host }}
|
ct-host: ${{ steps.matrix.outputs.ct-host }}
|
||||||
|
@ -25,9 +25,9 @@ jobs:
|
||||||
version-emqx: ${{ steps.matrix.outputs.version-emqx }}
|
version-emqx: ${{ steps.matrix.outputs.version-emqx }}
|
||||||
version-emqx-enterprise: ${{ steps.matrix.outputs.version-emqx-enterprise }}
|
version-emqx-enterprise: ${{ steps.matrix.outputs.version-emqx-enterprise }}
|
||||||
runner: ${{ github.repository_owner == 'emqx' && 'aws-amd64' || 'ubuntu-22.04' }}
|
runner: ${{ github.repository_owner == 'emqx' && 'aws-amd64' || 'ubuntu-22.04' }}
|
||||||
builder: "ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04"
|
builder: "ghcr.io/emqx/emqx-builder/5.1-4:1.14.5-25.3.2-2-ubuntu22.04"
|
||||||
builder_vsn: "5.1-3"
|
builder_vsn: "5.1-4"
|
||||||
otp_vsn: "25.3.2-1"
|
otp_vsn: "25.3.2-2"
|
||||||
elixir_vsn: "1.14.5"
|
elixir_vsn: "1.14.5"
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
|
@ -93,13 +93,13 @@ jobs:
|
||||||
MATRIX="$(echo "${APPS}" | jq -c '
|
MATRIX="$(echo "${APPS}" | jq -c '
|
||||||
[
|
[
|
||||||
(.[] | select(.profile == "emqx") | . + {
|
(.[] | select(.profile == "emqx") | . + {
|
||||||
builder: "5.1-3",
|
builder: "5.1-4",
|
||||||
otp: "25.3.2-1",
|
otp: "25.3.2-2",
|
||||||
elixir: "1.14.5"
|
elixir: "1.14.5"
|
||||||
}),
|
}),
|
||||||
(.[] | select(.profile == "emqx-enterprise") | . + {
|
(.[] | select(.profile == "emqx-enterprise") | . + {
|
||||||
builder: "5.1-3",
|
builder: "5.1-4",
|
||||||
otp: ["25.3.2-1"][],
|
otp: ["25.3.2-2"][],
|
||||||
elixir: "1.14.5"
|
elixir: "1.14.5"
|
||||||
})
|
})
|
||||||
]
|
]
|
||||||
|
|
|
@ -21,7 +21,7 @@ env:
|
||||||
jobs:
|
jobs:
|
||||||
prepare:
|
prepare:
|
||||||
runs-on: ${{ github.repository_owner == 'emqx' && 'aws-amd64' || 'ubuntu-22.04' }}
|
runs-on: ${{ github.repository_owner == 'emqx' && 'aws-amd64' || 'ubuntu-22.04' }}
|
||||||
container: 'ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04'
|
container: 'ghcr.io/emqx/emqx-builder/5.1-4:1.14.5-25.3.2-2-ubuntu22.04'
|
||||||
outputs:
|
outputs:
|
||||||
profile: ${{ steps.parse-git-ref.outputs.profile }}
|
profile: ${{ steps.parse-git-ref.outputs.profile }}
|
||||||
release: ${{ steps.parse-git-ref.outputs.release }}
|
release: ${{ steps.parse-git-ref.outputs.release }}
|
||||||
|
@ -31,9 +31,9 @@ jobs:
|
||||||
ct-host: ${{ steps.matrix.outputs.ct-host }}
|
ct-host: ${{ steps.matrix.outputs.ct-host }}
|
||||||
ct-docker: ${{ steps.matrix.outputs.ct-docker }}
|
ct-docker: ${{ steps.matrix.outputs.ct-docker }}
|
||||||
runner: ${{ github.repository_owner == 'emqx' && 'aws-amd64' || 'ubuntu-22.04' }}
|
runner: ${{ github.repository_owner == 'emqx' && 'aws-amd64' || 'ubuntu-22.04' }}
|
||||||
builder: 'ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04'
|
builder: 'ghcr.io/emqx/emqx-builder/5.1-4:1.14.5-25.3.2-2-ubuntu22.04'
|
||||||
builder_vsn: '5.1-3'
|
builder_vsn: '5.1-4'
|
||||||
otp_vsn: '25.3.2-1'
|
otp_vsn: '25.3.2-2'
|
||||||
elixir_vsn: '1.14.5'
|
elixir_vsn: '1.14.5'
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
|
@ -64,13 +64,13 @@ jobs:
|
||||||
MATRIX="$(echo "${APPS}" | jq -c '
|
MATRIX="$(echo "${APPS}" | jq -c '
|
||||||
[
|
[
|
||||||
(.[] | select(.profile == "emqx") | . + {
|
(.[] | select(.profile == "emqx") | . + {
|
||||||
builder: "5.1-3",
|
builder: "5.1-4",
|
||||||
otp: "25.3.2-1",
|
otp: "25.3.2-2",
|
||||||
elixir: "1.14.5"
|
elixir: "1.14.5"
|
||||||
}),
|
}),
|
||||||
(.[] | select(.profile == "emqx-enterprise") | . + {
|
(.[] | select(.profile == "emqx-enterprise") | . + {
|
||||||
builder: "5.1-3",
|
builder: "5.1-4",
|
||||||
otp: ["25.3.2-1"][],
|
otp: ["25.3.2-2"][],
|
||||||
elixir: "1.14.5"
|
elixir: "1.14.5"
|
||||||
})
|
})
|
||||||
]
|
]
|
||||||
|
|
|
@ -61,7 +61,7 @@ on:
|
||||||
otp_vsn:
|
otp_vsn:
|
||||||
required: false
|
required: false
|
||||||
type: string
|
type: string
|
||||||
default: '25.3.2-1'
|
default: '25.3.2-2'
|
||||||
elixir_vsn:
|
elixir_vsn:
|
||||||
required: false
|
required: false
|
||||||
type: string
|
type: string
|
||||||
|
@ -69,7 +69,7 @@ on:
|
||||||
builder_vsn:
|
builder_vsn:
|
||||||
required: false
|
required: false
|
||||||
type: string
|
type: string
|
||||||
default: '5.1-3'
|
default: '5.1-4'
|
||||||
runner:
|
runner:
|
||||||
required: false
|
required: false
|
||||||
type: string
|
type: string
|
||||||
|
|
|
@ -57,7 +57,7 @@ on:
|
||||||
otp_vsn:
|
otp_vsn:
|
||||||
required: false
|
required: false
|
||||||
type: string
|
type: string
|
||||||
default: '25.3.2-1'
|
default: '25.3.2-2'
|
||||||
elixir_vsn:
|
elixir_vsn:
|
||||||
required: false
|
required: false
|
||||||
type: string
|
type: string
|
||||||
|
@ -69,7 +69,7 @@ on:
|
||||||
builder_vsn:
|
builder_vsn:
|
||||||
required: false
|
required: false
|
||||||
type: string
|
type: string
|
||||||
default: '5.1-3'
|
default: '5.1-4'
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
windows:
|
windows:
|
||||||
|
|
|
@ -24,7 +24,7 @@ jobs:
|
||||||
- ['emqx-enterprise', 'release-51']
|
- ['emqx-enterprise', 'release-51']
|
||||||
- ['emqx-enterprise', 'release-52']
|
- ['emqx-enterprise', 'release-52']
|
||||||
otp:
|
otp:
|
||||||
- 25.3.2-1
|
- 25.3.2-2
|
||||||
arch:
|
arch:
|
||||||
- amd64
|
- amd64
|
||||||
os:
|
os:
|
||||||
|
@ -32,7 +32,7 @@ jobs:
|
||||||
- ubuntu22.04
|
- ubuntu22.04
|
||||||
- amzn2023
|
- amzn2023
|
||||||
builder:
|
builder:
|
||||||
- 5.1-3
|
- 5.1-4
|
||||||
elixir:
|
elixir:
|
||||||
- 1.14.5
|
- 1.14.5
|
||||||
|
|
||||||
|
@ -99,7 +99,7 @@ jobs:
|
||||||
branch:
|
branch:
|
||||||
- master
|
- master
|
||||||
otp:
|
otp:
|
||||||
- 25.3.2-1
|
- 25.3.2-2
|
||||||
os:
|
os:
|
||||||
- macos-13
|
- macos-13
|
||||||
- macos-12-arm64
|
- macos-12-arm64
|
||||||
|
|
|
@ -34,15 +34,15 @@ on:
|
||||||
builder:
|
builder:
|
||||||
required: false
|
required: false
|
||||||
type: string
|
type: string
|
||||||
default: 'ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu22.04'
|
default: 'ghcr.io/emqx/emqx-builder/5.1-4:1.14.5-25.3.2-2-ubuntu22.04'
|
||||||
builder_vsn:
|
builder_vsn:
|
||||||
required: false
|
required: false
|
||||||
type: string
|
type: string
|
||||||
default: '5.1-3'
|
default: '5.1-4'
|
||||||
otp_vsn:
|
otp_vsn:
|
||||||
required: false
|
required: false
|
||||||
type: string
|
type: string
|
||||||
default: '25.3.2-1'
|
default: '25.3.2-2'
|
||||||
elixir_vsn:
|
elixir_vsn:
|
||||||
required: false
|
required: false
|
||||||
type: string
|
type: string
|
||||||
|
@ -58,8 +58,8 @@ jobs:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
profile:
|
profile:
|
||||||
- ["emqx", "25.3.2-1", "ubuntu20.04", "elixir"]
|
- ["emqx", "25.3.2-2", "ubuntu20.04", "elixir"]
|
||||||
- ["emqx-enterprise", "25.3.2-1", "ubuntu20.04", "erlang"]
|
- ["emqx-enterprise", "25.3.2-2", "ubuntu20.04", "erlang"]
|
||||||
|
|
||||||
container: "ghcr.io/emqx/emqx-builder/${{ inputs.builder_vsn }}:${{ inputs.elixir_vsn }}-${{ matrix.profile[1] }}-${{ matrix.profile[2] }}"
|
container: "ghcr.io/emqx/emqx-builder/${{ inputs.builder_vsn }}:${{ inputs.elixir_vsn }}-${{ matrix.profile[1] }}-${{ matrix.profile[2] }}"
|
||||||
|
|
||||||
|
|
|
@ -18,7 +18,7 @@ jobs:
|
||||||
contents: read
|
contents: read
|
||||||
security-events: write
|
security-events: write
|
||||||
container:
|
container:
|
||||||
image: ghcr.io/emqx/emqx-builder/5.1-1:1.14.5-25.3.2-1-ubuntu22.04
|
image: ghcr.io/emqx/emqx-builder/5.1-4:1.14.5-25.3.2-2-ubuntu22.04
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
|
|
|
@ -23,7 +23,7 @@ jobs:
|
||||||
prepare:
|
prepare:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: github.repository_owner == 'emqx'
|
if: github.repository_owner == 'emqx'
|
||||||
container: ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-ubuntu20.04
|
container: ghcr.io/emqx/emqx-builder/5.1-4:1.14.5-25.3.2-2-ubuntu20.04
|
||||||
outputs:
|
outputs:
|
||||||
BENCH_ID: ${{ steps.prepare.outputs.BENCH_ID }}
|
BENCH_ID: ${{ steps.prepare.outputs.BENCH_ID }}
|
||||||
PACKAGE_FILE: ${{ steps.package_file.outputs.PACKAGE_FILE }}
|
PACKAGE_FILE: ${{ steps.package_file.outputs.PACKAGE_FILE }}
|
||||||
|
|
|
@ -29,6 +29,7 @@ env:
|
||||||
jobs:
|
jobs:
|
||||||
eunit_and_proper:
|
eunit_and_proper:
|
||||||
runs-on: ${{ inputs.runner }}
|
runs-on: ${{ inputs.runner }}
|
||||||
|
name: "eunit_and_proper (${{ matrix.profile }})"
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
|
@ -69,6 +70,7 @@ jobs:
|
||||||
|
|
||||||
ct_docker:
|
ct_docker:
|
||||||
runs-on: ${{ inputs.runner }}
|
runs-on: ${{ inputs.runner }}
|
||||||
|
name: "ct_docker (${{ matrix.app }}-${{ matrix.suitegroup }})"
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
|
@ -116,6 +118,7 @@ jobs:
|
||||||
|
|
||||||
ct:
|
ct:
|
||||||
runs-on: ${{ inputs.runner }}
|
runs-on: ${{ inputs.runner }}
|
||||||
|
name: "ct (${{ matrix.app }}-${{ matrix.suitegroup }})"
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
|
@ -155,6 +158,17 @@ jobs:
|
||||||
name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-${{ matrix.otp }}-sg${{ matrix.suitegroup }}
|
name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-${{ matrix.otp }}-sg${{ matrix.suitegroup }}
|
||||||
path: _build/test/logs
|
path: _build/test/logs
|
||||||
|
|
||||||
|
tests_passed:
|
||||||
|
needs:
|
||||||
|
- eunit_and_proper
|
||||||
|
- ct
|
||||||
|
- ct_docker
|
||||||
|
runs-on: ${{ inputs.runner }}
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
steps:
|
||||||
|
- run: echo "All tests passed"
|
||||||
|
|
||||||
make_cover:
|
make_cover:
|
||||||
needs:
|
needs:
|
||||||
- eunit_and_proper
|
- eunit_and_proper
|
||||||
|
|
|
@ -23,6 +23,7 @@ env:
|
||||||
jobs:
|
jobs:
|
||||||
static_checks:
|
static_checks:
|
||||||
runs-on: ${{ inputs.runner }}
|
runs-on: ${{ inputs.runner }}
|
||||||
|
name: "static_checks (${{ matrix.profile }})"
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
|
|
|
@ -1,2 +1,2 @@
|
||||||
erlang 25.3.2-1
|
erlang 25.3.2-2
|
||||||
elixir 1.14.5-otp-25
|
elixir 1.14.5-otp-25
|
||||||
|
|
4
Makefile
4
Makefile
|
@ -2,7 +2,7 @@ REBAR = $(CURDIR)/rebar3
|
||||||
BUILD = $(CURDIR)/build
|
BUILD = $(CURDIR)/build
|
||||||
SCRIPTS = $(CURDIR)/scripts
|
SCRIPTS = $(CURDIR)/scripts
|
||||||
export EMQX_RELUP ?= true
|
export EMQX_RELUP ?= true
|
||||||
export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.1-3:1.14.5-25.3.2-1-debian11
|
export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.1-4:1.14.5-25.3.2-2-debian11
|
||||||
export EMQX_DEFAULT_RUNNER = debian:11-slim
|
export EMQX_DEFAULT_RUNNER = debian:11-slim
|
||||||
export EMQX_REL_FORM ?= tgz
|
export EMQX_REL_FORM ?= tgz
|
||||||
export QUICER_DOWNLOAD_FROM_RELEASE = 1
|
export QUICER_DOWNLOAD_FROM_RELEASE = 1
|
||||||
|
@ -296,7 +296,7 @@ $(foreach tt,$(ALL_ELIXIR_TGZS),$(eval $(call gen-elixir-tgz-target,$(tt))))
|
||||||
|
|
||||||
.PHONY: fmt
|
.PHONY: fmt
|
||||||
fmt: $(REBAR)
|
fmt: $(REBAR)
|
||||||
@$(SCRIPTS)/erlfmt -w '{apps,lib-ee}/*/{src,include,priv,test}/**/*.{erl,hrl,app.src,eterm}'
|
@$(SCRIPTS)/erlfmt -w '{apps,lib-ee}/*/{src,include,priv,test,integration_test}/**/*.{erl,hrl,app.src,eterm}'
|
||||||
@$(SCRIPTS)/erlfmt -w 'rebar.config.erl'
|
@$(SCRIPTS)/erlfmt -w 'rebar.config.erl'
|
||||||
@mix format
|
@mix format
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,6 @@
|
||||||
-define(SHARED_SUB_SHARD, emqx_shared_sub_shard).
|
-define(SHARED_SUB_SHARD, emqx_shared_sub_shard).
|
||||||
-define(CM_SHARD, emqx_cm_shard).
|
-define(CM_SHARD, emqx_cm_shard).
|
||||||
-define(ROUTE_SHARD, route_shard).
|
-define(ROUTE_SHARD, route_shard).
|
||||||
-define(PERSISTENT_SESSION_SHARD, emqx_persistent_session_shard).
|
|
||||||
|
|
||||||
%% Banner
|
%% Banner
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
@ -92,7 +91,7 @@
|
||||||
|
|
||||||
-record(route, {
|
-record(route, {
|
||||||
topic :: binary(),
|
topic :: binary(),
|
||||||
dest :: node() | {binary(), node()} | emqx_session:sessionID()
|
dest :: node() | {binary(), node()} | emqx_session:session_id()
|
||||||
}).
|
}).
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
|
@ -41,4 +41,9 @@
|
||||||
will_msg
|
will_msg
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
-define(REPLY_OUTGOING(Packets), {outgoing, Packets}).
|
||||||
|
-define(REPLY_CONNACK(Packet), {connack, Packet}).
|
||||||
|
-define(REPLY_EVENT(StateOrEvent), {event, StateOrEvent}).
|
||||||
|
-define(REPLY_CLOSE(Reason), {close, Reason}).
|
||||||
|
|
||||||
-define(EXPIRE_INTERVAL_INFINITE, 4294967295000).
|
-define(EXPIRE_INTERVAL_INFINITE, 4294967295000).
|
||||||
|
|
|
@ -32,7 +32,7 @@
|
||||||
%% `apps/emqx/src/bpapi/README.md'
|
%% `apps/emqx/src/bpapi/README.md'
|
||||||
|
|
||||||
%% Opensource edition
|
%% Opensource edition
|
||||||
-define(EMQX_RELEASE_CE, "5.1.5-build.3").
|
-define(EMQX_RELEASE_CE, "5.1.6").
|
||||||
|
|
||||||
%% Enterprise edition
|
%% Enterprise edition
|
||||||
-define(EMQX_RELEASE_EE, "5.2.0").
|
-define(EMQX_RELEASE_EE, "5.2.0").
|
||||||
|
|
|
@ -0,0 +1,59 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2017-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%
|
||||||
|
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
%% you may not use this file except in compliance with the License.
|
||||||
|
%% You may obtain a copy of the License at
|
||||||
|
%%
|
||||||
|
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
%%
|
||||||
|
%% Unless required by applicable law or agreed to in writing, software
|
||||||
|
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
%% See the License for the specific language governing permissions and
|
||||||
|
%% limitations under the License.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
|
-ifndef(EMQX_SESSION_HRL).
|
||||||
|
-define(EMQX_SESSION_HRL, true).
|
||||||
|
|
||||||
|
-record(session, {
|
||||||
|
%% Client's id
|
||||||
|
clientid :: emqx_types:clientid(),
|
||||||
|
id :: emqx_session:session_id(),
|
||||||
|
%% Is this session a persistent session i.e. was it started with Session-Expiry > 0
|
||||||
|
is_persistent :: boolean(),
|
||||||
|
%% Client’s Subscriptions.
|
||||||
|
subscriptions :: map(),
|
||||||
|
%% Max subscriptions allowed
|
||||||
|
max_subscriptions :: non_neg_integer() | infinity,
|
||||||
|
%% Upgrade QoS?
|
||||||
|
upgrade_qos :: boolean(),
|
||||||
|
%% Client <- Broker: QoS1/2 messages sent to the client but
|
||||||
|
%% have not been unacked.
|
||||||
|
inflight :: emqx_inflight:inflight(),
|
||||||
|
%% All QoS1/2 messages published to when client is disconnected,
|
||||||
|
%% or QoS1/2 messages pending transmission to the Client.
|
||||||
|
%%
|
||||||
|
%% Optionally, QoS0 messages pending transmission to the Client.
|
||||||
|
mqueue :: emqx_mqueue:mqueue(),
|
||||||
|
%% Next packet id of the session
|
||||||
|
next_pkt_id = 1 :: emqx_types:packet_id(),
|
||||||
|
%% Retry interval for redelivering QoS1/2 messages (Unit: millisecond)
|
||||||
|
retry_interval :: timeout(),
|
||||||
|
%% Client -> Broker: QoS2 messages received from the client, but
|
||||||
|
%% have not been completely acknowledged
|
||||||
|
awaiting_rel :: map(),
|
||||||
|
%% Maximum number of awaiting QoS2 messages allowed
|
||||||
|
max_awaiting_rel :: non_neg_integer() | infinity,
|
||||||
|
%% Awaiting PUBREL Timeout (Unit: millisecond)
|
||||||
|
await_rel_timeout :: timeout(),
|
||||||
|
%% Created at
|
||||||
|
created_at :: pos_integer(),
|
||||||
|
%% Topic filter to iterator ID mapping.
|
||||||
|
%% Note: we shouldn't serialize this when persisting sessions, as this information
|
||||||
|
%% also exists in the `?ITERATOR_REF_TAB' table.
|
||||||
|
iterators = #{} :: #{emqx_topic:topic() => emqx_ds:iterator_id()}
|
||||||
|
}).
|
||||||
|
|
||||||
|
-endif.
|
|
@ -0,0 +1,340 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
-module(emqx_ds_SUITE).
|
||||||
|
|
||||||
|
-compile(export_all).
|
||||||
|
-compile(nowarn_export_all).
|
||||||
|
|
||||||
|
-include_lib("stdlib/include/assert.hrl").
|
||||||
|
-include_lib("common_test/include/ct.hrl").
|
||||||
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
|
-include_lib("emqx/include/emqx_mqtt.hrl").
|
||||||
|
|
||||||
|
-define(DS_SHARD, <<"local">>).
|
||||||
|
-define(ITERATOR_REF_TAB, emqx_ds_iterator_ref).
|
||||||
|
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
%% CT boilerplate
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
all() ->
|
||||||
|
emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
||||||
|
init_per_suite(Config) ->
|
||||||
|
TCApps = emqx_cth_suite:start(
|
||||||
|
app_specs(),
|
||||||
|
#{work_dir => emqx_cth_suite:work_dir(Config)}
|
||||||
|
),
|
||||||
|
[{tc_apps, TCApps} | Config].
|
||||||
|
|
||||||
|
end_per_suite(Config) ->
|
||||||
|
TCApps = ?config(tc_apps, Config),
|
||||||
|
emqx_cth_suite:stop(TCApps),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
init_per_testcase(TestCase, Config) when
|
||||||
|
TestCase =:= t_session_subscription_idempotency;
|
||||||
|
TestCase =:= t_session_unsubscription_idempotency
|
||||||
|
->
|
||||||
|
Cluster = cluster(#{n => 1}),
|
||||||
|
ClusterOpts = #{work_dir => emqx_cth_suite:work_dir(TestCase, Config)},
|
||||||
|
NodeSpecs = emqx_cth_cluster:mk_nodespecs(Cluster, ClusterOpts),
|
||||||
|
Nodes = emqx_cth_cluster:start(Cluster, ClusterOpts),
|
||||||
|
[
|
||||||
|
{cluster, Cluster},
|
||||||
|
{node_specs, NodeSpecs},
|
||||||
|
{cluster_opts, ClusterOpts},
|
||||||
|
{nodes, Nodes}
|
||||||
|
| Config
|
||||||
|
];
|
||||||
|
init_per_testcase(_TestCase, Config) ->
|
||||||
|
Config.
|
||||||
|
|
||||||
|
end_per_testcase(TestCase, Config) when
|
||||||
|
TestCase =:= t_session_subscription_idempotency;
|
||||||
|
TestCase =:= t_session_unsubscription_idempotency
|
||||||
|
->
|
||||||
|
Nodes = ?config(nodes, Config),
|
||||||
|
ok = emqx_cth_cluster:stop(Nodes),
|
||||||
|
ok;
|
||||||
|
end_per_testcase(_TestCase, _Config) ->
|
||||||
|
ok.
|
||||||
|
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
%% Helper fns
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
cluster(#{n := N}) ->
|
||||||
|
Node1 = ds_SUITE1,
|
||||||
|
Spec = #{
|
||||||
|
role => core,
|
||||||
|
join_to => emqx_cth_cluster:node_name(Node1),
|
||||||
|
apps => app_specs()
|
||||||
|
},
|
||||||
|
[
|
||||||
|
{Node1, Spec}
|
||||||
|
| lists:map(
|
||||||
|
fun(M) ->
|
||||||
|
Name = binary_to_atom(<<"ds_SUITE", (integer_to_binary(M))/binary>>),
|
||||||
|
{Name, Spec}
|
||||||
|
end,
|
||||||
|
lists:seq(2, N)
|
||||||
|
)
|
||||||
|
].
|
||||||
|
|
||||||
|
app_specs() ->
|
||||||
|
[
|
||||||
|
emqx_durable_storage,
|
||||||
|
{emqx, #{
|
||||||
|
before_start => fun() ->
|
||||||
|
emqx_app:set_config_loader(?MODULE)
|
||||||
|
end,
|
||||||
|
config => #{persistent_session_store => #{ds => true}},
|
||||||
|
override_env => [{boot_modules, [broker, listeners]}]
|
||||||
|
}}
|
||||||
|
].
|
||||||
|
|
||||||
|
get_mqtt_port(Node, Type) ->
|
||||||
|
{_IP, Port} = erpc:call(Node, emqx_config, get, [[listeners, Type, default, bind]]),
|
||||||
|
Port.
|
||||||
|
|
||||||
|
get_all_iterator_refs(Node) ->
|
||||||
|
erpc:call(Node, mnesia, dirty_all_keys, [?ITERATOR_REF_TAB]).
|
||||||
|
|
||||||
|
get_all_iterator_ids(Node) ->
|
||||||
|
Fn = fun(K, _V, Acc) -> [K | Acc] end,
|
||||||
|
erpc:call(Node, fun() ->
|
||||||
|
emqx_ds_storage_layer:foldl_iterator_prefix(?DS_SHARD, <<>>, Fn, [])
|
||||||
|
end).
|
||||||
|
|
||||||
|
get_session_iterators(Node, ClientId) ->
|
||||||
|
erpc:call(Node, fun() ->
|
||||||
|
[ConnPid] = emqx_cm:lookup_channels(ClientId),
|
||||||
|
emqx_connection:info({channel, {session, iterators}}, sys:get_state(ConnPid))
|
||||||
|
end).
|
||||||
|
|
||||||
|
wait_nodeup(Node) ->
|
||||||
|
?retry(
|
||||||
|
_Sleep0 = 500,
|
||||||
|
_Attempts0 = 50,
|
||||||
|
pong = net_adm:ping(Node)
|
||||||
|
).
|
||||||
|
|
||||||
|
wait_gen_rpc_down(_NodeSpec = #{apps := Apps}) ->
|
||||||
|
#{override_env := Env} = proplists:get_value(gen_rpc, Apps),
|
||||||
|
Port = proplists:get_value(tcp_server_port, Env),
|
||||||
|
?retry(
|
||||||
|
_Sleep0 = 500,
|
||||||
|
_Attempts0 = 50,
|
||||||
|
false = emqx_common_test_helpers:is_tcp_server_available("127.0.0.1", Port)
|
||||||
|
).
|
||||||
|
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
%% Testcases
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
t_session_subscription_idempotency(Config) ->
|
||||||
|
[Node1Spec | _] = ?config(node_specs, Config),
|
||||||
|
[Node1] = ?config(nodes, Config),
|
||||||
|
Port = get_mqtt_port(Node1, tcp),
|
||||||
|
SubTopicFilter = <<"t/+">>,
|
||||||
|
ClientId = <<"myclientid">>,
|
||||||
|
?check_trace(
|
||||||
|
begin
|
||||||
|
?force_ordering(
|
||||||
|
#{?snk_kind := persistent_session_ds_iterator_added},
|
||||||
|
_NEvents0 = 1,
|
||||||
|
#{?snk_kind := will_restart_node},
|
||||||
|
_Guard0 = true
|
||||||
|
),
|
||||||
|
?force_ordering(
|
||||||
|
#{?snk_kind := restarted_node},
|
||||||
|
_NEvents1 = 1,
|
||||||
|
#{?snk_kind := persistent_session_ds_open_iterators, ?snk_span := start},
|
||||||
|
_Guard1 = true
|
||||||
|
),
|
||||||
|
|
||||||
|
spawn_link(fun() ->
|
||||||
|
?tp(will_restart_node, #{}),
|
||||||
|
ct:pal("restarting node ~p", [Node1]),
|
||||||
|
true = monitor_node(Node1, true),
|
||||||
|
ok = erpc:call(Node1, init, restart, []),
|
||||||
|
receive
|
||||||
|
{nodedown, Node1} ->
|
||||||
|
ok
|
||||||
|
after 10_000 ->
|
||||||
|
ct:fail("node ~p didn't stop", [Node1])
|
||||||
|
end,
|
||||||
|
ct:pal("waiting for nodeup ~p", [Node1]),
|
||||||
|
wait_nodeup(Node1),
|
||||||
|
wait_gen_rpc_down(Node1Spec),
|
||||||
|
ct:pal("restarting apps on ~p", [Node1]),
|
||||||
|
Apps = maps:get(apps, Node1Spec),
|
||||||
|
ok = erpc:call(Node1, emqx_cth_suite, load_apps, [Apps]),
|
||||||
|
_ = erpc:call(Node1, emqx_cth_suite, start_apps, [Apps, Node1Spec]),
|
||||||
|
%% have to re-inject this so that we may stop the node succesfully at the
|
||||||
|
%% end....
|
||||||
|
ok = emqx_cth_cluster:set_node_opts(Node1, Node1Spec),
|
||||||
|
ok = snabbkaffe:forward_trace(Node1),
|
||||||
|
ct:pal("node ~p restarted", [Node1]),
|
||||||
|
?tp(restarted_node, #{}),
|
||||||
|
ok
|
||||||
|
end),
|
||||||
|
|
||||||
|
ct:pal("starting 1"),
|
||||||
|
{ok, Client0} = emqtt:start_link([
|
||||||
|
{port, Port},
|
||||||
|
{clientid, ClientId},
|
||||||
|
{proto_ver, v5}
|
||||||
|
]),
|
||||||
|
{ok, _} = emqtt:connect(Client0),
|
||||||
|
ct:pal("subscribing 1"),
|
||||||
|
process_flag(trap_exit, true),
|
||||||
|
catch emqtt:subscribe(Client0, SubTopicFilter, qos2),
|
||||||
|
receive
|
||||||
|
{'EXIT', {shutdown, _}} ->
|
||||||
|
ok
|
||||||
|
after 0 -> ok
|
||||||
|
end,
|
||||||
|
process_flag(trap_exit, false),
|
||||||
|
|
||||||
|
{ok, _} = ?block_until(#{?snk_kind := restarted_node}, 15_000),
|
||||||
|
ct:pal("starting 2"),
|
||||||
|
{ok, Client1} = emqtt:start_link([
|
||||||
|
{port, Port},
|
||||||
|
{clientid, ClientId},
|
||||||
|
{proto_ver, v5}
|
||||||
|
]),
|
||||||
|
{ok, _} = emqtt:connect(Client1),
|
||||||
|
ct:pal("subscribing 2"),
|
||||||
|
{ok, _, [2]} = emqtt:subscribe(Client1, SubTopicFilter, qos2),
|
||||||
|
SessionIterators = get_session_iterators(Node1, ClientId),
|
||||||
|
|
||||||
|
ok = emqtt:stop(Client1),
|
||||||
|
|
||||||
|
#{session_iterators => SessionIterators}
|
||||||
|
end,
|
||||||
|
fun(Res, Trace) ->
|
||||||
|
ct:pal("trace:\n ~p", [Trace]),
|
||||||
|
#{session_iterators := SessionIterators} = Res,
|
||||||
|
%% Exactly one iterator should have been opened.
|
||||||
|
?assertEqual(1, map_size(SessionIterators), #{iterators => SessionIterators}),
|
||||||
|
?assertMatch(#{SubTopicFilter := _}, SessionIterators),
|
||||||
|
SubTopicFilterWords = emqx_topic:words(SubTopicFilter),
|
||||||
|
?assertEqual([{ClientId, SubTopicFilterWords}], get_all_iterator_refs(Node1)),
|
||||||
|
?assertMatch({ok, [_]}, get_all_iterator_ids(Node1)),
|
||||||
|
?assertMatch(
|
||||||
|
{_IsNew = false, ClientId},
|
||||||
|
erpc:call(Node1, emqx_ds, session_open, [ClientId])
|
||||||
|
),
|
||||||
|
ok
|
||||||
|
end
|
||||||
|
),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
%% Check that we close the iterators before deleting the iterator id entry.
|
||||||
|
t_session_unsubscription_idempotency(Config) ->
|
||||||
|
[Node1Spec | _] = ?config(node_specs, Config),
|
||||||
|
[Node1] = ?config(nodes, Config),
|
||||||
|
Port = get_mqtt_port(Node1, tcp),
|
||||||
|
SubTopicFilter = <<"t/+">>,
|
||||||
|
ClientId = <<"myclientid">>,
|
||||||
|
?check_trace(
|
||||||
|
begin
|
||||||
|
?force_ordering(
|
||||||
|
#{?snk_kind := persistent_session_ds_close_iterators, ?snk_span := {complete, _}},
|
||||||
|
_NEvents0 = 1,
|
||||||
|
#{?snk_kind := will_restart_node},
|
||||||
|
_Guard0 = true
|
||||||
|
),
|
||||||
|
?force_ordering(
|
||||||
|
#{?snk_kind := restarted_node},
|
||||||
|
_NEvents1 = 1,
|
||||||
|
#{?snk_kind := persistent_session_ds_iterator_delete, ?snk_span := start},
|
||||||
|
_Guard1 = true
|
||||||
|
),
|
||||||
|
|
||||||
|
spawn_link(fun() ->
|
||||||
|
?tp(will_restart_node, #{}),
|
||||||
|
ct:pal("restarting node ~p", [Node1]),
|
||||||
|
true = monitor_node(Node1, true),
|
||||||
|
ok = erpc:call(Node1, init, restart, []),
|
||||||
|
receive
|
||||||
|
{nodedown, Node1} ->
|
||||||
|
ok
|
||||||
|
after 10_000 ->
|
||||||
|
ct:fail("node ~p didn't stop", [Node1])
|
||||||
|
end,
|
||||||
|
ct:pal("waiting for nodeup ~p", [Node1]),
|
||||||
|
wait_nodeup(Node1),
|
||||||
|
wait_gen_rpc_down(Node1Spec),
|
||||||
|
ct:pal("restarting apps on ~p", [Node1]),
|
||||||
|
Apps = maps:get(apps, Node1Spec),
|
||||||
|
ok = erpc:call(Node1, emqx_cth_suite, load_apps, [Apps]),
|
||||||
|
_ = erpc:call(Node1, emqx_cth_suite, start_apps, [Apps, Node1Spec]),
|
||||||
|
%% have to re-inject this so that we may stop the node succesfully at the
|
||||||
|
%% end....
|
||||||
|
ok = emqx_cth_cluster:set_node_opts(Node1, Node1Spec),
|
||||||
|
ok = snabbkaffe:forward_trace(Node1),
|
||||||
|
ct:pal("node ~p restarted", [Node1]),
|
||||||
|
?tp(restarted_node, #{}),
|
||||||
|
ok
|
||||||
|
end),
|
||||||
|
|
||||||
|
ct:pal("starting 1"),
|
||||||
|
{ok, Client0} = emqtt:start_link([
|
||||||
|
{port, Port},
|
||||||
|
{clientid, ClientId},
|
||||||
|
{proto_ver, v5}
|
||||||
|
]),
|
||||||
|
{ok, _} = emqtt:connect(Client0),
|
||||||
|
ct:pal("subscribing 1"),
|
||||||
|
{ok, _, [?RC_GRANTED_QOS_2]} = emqtt:subscribe(Client0, SubTopicFilter, qos2),
|
||||||
|
ct:pal("unsubscribing 1"),
|
||||||
|
process_flag(trap_exit, true),
|
||||||
|
catch emqtt:unsubscribe(Client0, SubTopicFilter),
|
||||||
|
receive
|
||||||
|
{'EXIT', {shutdown, _}} ->
|
||||||
|
ok
|
||||||
|
after 0 -> ok
|
||||||
|
end,
|
||||||
|
process_flag(trap_exit, false),
|
||||||
|
|
||||||
|
{ok, _} = ?block_until(#{?snk_kind := restarted_node}, 15_000),
|
||||||
|
ct:pal("starting 2"),
|
||||||
|
{ok, Client1} = emqtt:start_link([
|
||||||
|
{port, Port},
|
||||||
|
{clientid, ClientId},
|
||||||
|
{proto_ver, v5}
|
||||||
|
]),
|
||||||
|
{ok, _} = emqtt:connect(Client1),
|
||||||
|
ct:pal("subscribing 2"),
|
||||||
|
{ok, _, [?RC_GRANTED_QOS_2]} = emqtt:subscribe(Client1, SubTopicFilter, qos2),
|
||||||
|
ct:pal("unsubscribing 2"),
|
||||||
|
{{ok, _, [?RC_SUCCESS]}, {ok, _}} =
|
||||||
|
?wait_async_action(
|
||||||
|
emqtt:unsubscribe(Client1, SubTopicFilter),
|
||||||
|
#{
|
||||||
|
?snk_kind := persistent_session_ds_iterator_delete,
|
||||||
|
?snk_span := {complete, _}
|
||||||
|
},
|
||||||
|
15_000
|
||||||
|
),
|
||||||
|
SessionIterators = get_session_iterators(Node1, ClientId),
|
||||||
|
|
||||||
|
ok = emqtt:stop(Client1),
|
||||||
|
|
||||||
|
#{session_iterators => SessionIterators}
|
||||||
|
end,
|
||||||
|
fun(Res, Trace) ->
|
||||||
|
ct:pal("trace:\n ~p", [Trace]),
|
||||||
|
#{session_iterators := SessionIterators} = Res,
|
||||||
|
%% No iterators remaining
|
||||||
|
?assertEqual(#{}, SessionIterators),
|
||||||
|
?assertEqual([], get_all_iterator_refs(Node1)),
|
||||||
|
?assertEqual({ok, []}, get_all_iterator_ids(Node1)),
|
||||||
|
ok
|
||||||
|
end
|
||||||
|
),
|
||||||
|
ok.
|
|
@ -47,6 +47,7 @@
|
||||||
{emqx_node_rebalance_status,1}.
|
{emqx_node_rebalance_status,1}.
|
||||||
{emqx_node_rebalance_status,2}.
|
{emqx_node_rebalance_status,2}.
|
||||||
{emqx_persistent_session,1}.
|
{emqx_persistent_session,1}.
|
||||||
|
{emqx_persistent_session_ds,1}.
|
||||||
{emqx_plugins,1}.
|
{emqx_plugins,1}.
|
||||||
{emqx_prometheus,1}.
|
{emqx_prometheus,1}.
|
||||||
{emqx_resource,1}.
|
{emqx_resource,1}.
|
||||||
|
|
|
@ -28,10 +28,10 @@
|
||||||
{gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}},
|
{gproc, {git, "https://github.com/emqx/gproc", {tag, "0.9.0.1"}}},
|
||||||
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}},
|
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.2"}}},
|
||||||
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}},
|
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}},
|
||||||
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.10"}}},
|
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.11"}}},
|
||||||
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
|
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
|
||||||
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.16"}}},
|
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.16"}}},
|
||||||
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}},
|
{emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.3"}}},
|
||||||
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
|
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
|
||||||
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},
|
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},
|
||||||
{snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.8"}}}
|
{snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.8"}}}
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
{application, emqx, [
|
{application, emqx, [
|
||||||
{id, "emqx"},
|
{id, "emqx"},
|
||||||
{description, "EMQX Core"},
|
{description, "EMQX Core"},
|
||||||
{vsn, "5.1.8"},
|
{vsn, "5.1.9"},
|
||||||
{modules, []},
|
{modules, []},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -18,9 +18,9 @@
|
||||||
|
|
||||||
-export([is_enabled/1]).
|
-export([is_enabled/1]).
|
||||||
|
|
||||||
-define(BOOT_MODULES, [router, broker, listeners]).
|
-define(BOOT_MODULES, [broker, listeners]).
|
||||||
|
|
||||||
-spec is_enabled(all | router | broker | listeners) -> boolean().
|
-spec is_enabled(all | broker | listeners) -> boolean().
|
||||||
is_enabled(Mod) ->
|
is_enabled(Mod) ->
|
||||||
(BootMods = boot_modules()) =:= all orelse lists:member(Mod, BootMods).
|
(BootMods = boot_modules()) =:= all orelse lists:member(Mod, BootMods).
|
||||||
|
|
||||||
|
|
|
@ -122,6 +122,7 @@
|
||||||
-type reply() ::
|
-type reply() ::
|
||||||
{outgoing, emqx_types:packet()}
|
{outgoing, emqx_types:packet()}
|
||||||
| {outgoing, [emqx_types:packet()]}
|
| {outgoing, [emqx_types:packet()]}
|
||||||
|
| {connack, emqx_types:packet()}
|
||||||
| {event, conn_state() | updated}
|
| {event, conn_state() | updated}
|
||||||
| {close, Reason :: atom()}.
|
| {close, Reason :: atom()}.
|
||||||
|
|
||||||
|
@ -1023,7 +1024,7 @@ handle_out(publish, [], Channel) ->
|
||||||
{ok, Channel};
|
{ok, Channel};
|
||||||
handle_out(publish, Publishes, Channel) ->
|
handle_out(publish, Publishes, Channel) ->
|
||||||
{Packets, NChannel} = do_deliver(Publishes, Channel),
|
{Packets, NChannel} = do_deliver(Publishes, Channel),
|
||||||
{ok, {outgoing, Packets}, NChannel};
|
{ok, ?REPLY_OUTGOING(Packets), NChannel};
|
||||||
handle_out(puback, {PacketId, ReasonCode}, Channel) ->
|
handle_out(puback, {PacketId, ReasonCode}, Channel) ->
|
||||||
{ok, ?PUBACK_PACKET(PacketId, ReasonCode), Channel};
|
{ok, ?PUBACK_PACKET(PacketId, ReasonCode), Channel};
|
||||||
handle_out(pubrec, {PacketId, ReasonCode}, Channel) ->
|
handle_out(pubrec, {PacketId, ReasonCode}, Channel) ->
|
||||||
|
@ -1048,7 +1049,7 @@ handle_out(disconnect, {ReasonCode, ReasonName}, Channel) ->
|
||||||
handle_out(disconnect, {ReasonCode, ReasonName, #{}}, Channel);
|
handle_out(disconnect, {ReasonCode, ReasonName, #{}}, Channel);
|
||||||
handle_out(disconnect, {ReasonCode, ReasonName, Props}, Channel = ?IS_MQTT_V5) ->
|
handle_out(disconnect, {ReasonCode, ReasonName, Props}, Channel = ?IS_MQTT_V5) ->
|
||||||
Packet = ?DISCONNECT_PACKET(ReasonCode, Props),
|
Packet = ?DISCONNECT_PACKET(ReasonCode, Props),
|
||||||
{ok, [{outgoing, Packet}, {close, ReasonName}], Channel};
|
{ok, [?REPLY_OUTGOING(Packet), ?REPLY_CLOSE(ReasonName)], Channel};
|
||||||
handle_out(disconnect, {_ReasonCode, ReasonName, _Props}, Channel) ->
|
handle_out(disconnect, {_ReasonCode, ReasonName, _Props}, Channel) ->
|
||||||
{ok, {close, ReasonName}, Channel};
|
{ok, {close, ReasonName}, Channel};
|
||||||
handle_out(auth, {ReasonCode, Properties}, Channel) ->
|
handle_out(auth, {ReasonCode, Properties}, Channel) ->
|
||||||
|
@ -1062,7 +1063,7 @@ handle_out(Type, Data, Channel) ->
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
return_connack(AckPacket, Channel) ->
|
return_connack(AckPacket, Channel) ->
|
||||||
Replies = [{event, connected}, {connack, AckPacket}],
|
Replies = [?REPLY_EVENT(connected), ?REPLY_CONNACK(AckPacket)],
|
||||||
case maybe_resume_session(Channel) of
|
case maybe_resume_session(Channel) of
|
||||||
ignore ->
|
ignore ->
|
||||||
{ok, Replies, Channel};
|
{ok, Replies, Channel};
|
||||||
|
@ -1073,7 +1074,7 @@ return_connack(AckPacket, Channel) ->
|
||||||
session = NSession
|
session = NSession
|
||||||
},
|
},
|
||||||
{Packets, NChannel2} = do_deliver(Publishes, NChannel1),
|
{Packets, NChannel2} = do_deliver(Publishes, NChannel1),
|
||||||
Outgoing = [{outgoing, Packets} || length(Packets) > 0],
|
Outgoing = [?REPLY_OUTGOING(Packets) || length(Packets) > 0],
|
||||||
{ok, Replies ++ Outgoing, NChannel2}
|
{ok, Replies ++ Outgoing, NChannel2}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
@ -1121,7 +1122,7 @@ do_deliver(Publishes, Channel) when is_list(Publishes) ->
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
return_sub_unsub_ack(Packet, Channel) ->
|
return_sub_unsub_ack(Packet, Channel) ->
|
||||||
{ok, [{outgoing, Packet}, {event, updated}], Channel}.
|
{ok, [?REPLY_OUTGOING(Packet), ?REPLY_EVENT(updated)], Channel}.
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% Handle call
|
%% Handle call
|
||||||
|
@ -1235,7 +1236,7 @@ handle_info(
|
||||||
->
|
->
|
||||||
Channel1 = ensure_disconnected(Reason, maybe_publish_will_msg(Channel)),
|
Channel1 = ensure_disconnected(Reason, maybe_publish_will_msg(Channel)),
|
||||||
case maybe_shutdown(Reason, Channel1) of
|
case maybe_shutdown(Reason, Channel1) of
|
||||||
{ok, Channel2} -> {ok, {event, disconnected}, Channel2};
|
{ok, Channel2} -> {ok, ?REPLY_EVENT(disconnected), Channel2};
|
||||||
Shutdown -> Shutdown
|
Shutdown -> Shutdown
|
||||||
end;
|
end;
|
||||||
handle_info({sock_closed, Reason}, Channel = #channel{conn_state = disconnected}) ->
|
handle_info({sock_closed, Reason}, Channel = #channel{conn_state = disconnected}) ->
|
||||||
|
@ -1252,6 +1253,11 @@ handle_info({disconnect, ReasonCode, ReasonName, Props}, Channel) ->
|
||||||
handle_out(disconnect, {ReasonCode, ReasonName, Props}, Channel);
|
handle_out(disconnect, {ReasonCode, ReasonName, Props}, Channel);
|
||||||
handle_info({puback, PacketId, PubRes, RC}, Channel) ->
|
handle_info({puback, PacketId, PubRes, RC}, Channel) ->
|
||||||
do_finish_publish(PacketId, PubRes, RC, Channel);
|
do_finish_publish(PacketId, PubRes, RC, Channel);
|
||||||
|
handle_info({'DOWN', Ref, process, Pid, Reason}, Channel) ->
|
||||||
|
case emqx_hooks:run_fold('client.monitored_process_down', [Ref, Pid, Reason], []) of
|
||||||
|
[] -> {ok, Channel};
|
||||||
|
Msgs -> {ok, Msgs, Channel}
|
||||||
|
end;
|
||||||
handle_info(Info, Channel) ->
|
handle_info(Info, Channel) ->
|
||||||
?SLOG(error, #{msg => "unexpected_info", info => Info}),
|
?SLOG(error, #{msg => "unexpected_info", info => Info}),
|
||||||
{ok, Channel}.
|
{ok, Channel}.
|
||||||
|
@ -1358,9 +1364,13 @@ handle_timeout(
|
||||||
{_, Quota2} ->
|
{_, Quota2} ->
|
||||||
{ok, clean_timer(quota_timer, Channel#channel{quota = Quota2})}
|
{ok, clean_timer(quota_timer, Channel#channel{quota = Quota2})}
|
||||||
end;
|
end;
|
||||||
handle_timeout(_TRef, Msg, Channel) ->
|
handle_timeout(TRef, Msg, Channel) ->
|
||||||
?SLOG(error, #{msg => "unexpected_timeout", timeout_msg => Msg}),
|
case emqx_hooks:run_fold('client.timeout', [TRef, Msg], []) of
|
||||||
{ok, Channel}.
|
[] ->
|
||||||
|
{ok, Channel};
|
||||||
|
Msgs ->
|
||||||
|
{ok, Msgs, Channel}
|
||||||
|
end.
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% Ensure timers
|
%% Ensure timers
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
|
|
||||||
-include("emqx.hrl").
|
-include("emqx.hrl").
|
||||||
-include("emqx_cm.hrl").
|
-include("emqx_cm.hrl").
|
||||||
|
-include("emqx_session.hrl").
|
||||||
-include("logger.hrl").
|
-include("logger.hrl").
|
||||||
-include("types.hrl").
|
-include("types.hrl").
|
||||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
|
@ -188,7 +189,7 @@ do_unregister_channel({_ClientId, ChanPid} = Chan) ->
|
||||||
true = ets:delete(?CHAN_CONN_TAB, Chan),
|
true = ets:delete(?CHAN_CONN_TAB, Chan),
|
||||||
true = ets:delete(?CHAN_INFO_TAB, Chan),
|
true = ets:delete(?CHAN_INFO_TAB, Chan),
|
||||||
ets:delete_object(?CHAN_TAB, Chan),
|
ets:delete_object(?CHAN_TAB, Chan),
|
||||||
ok = emqx_hooks:run('channel.unregistered', [ChanPid]),
|
ok = emqx_hooks:run('cm.channel.unregistered', [ChanPid]),
|
||||||
true.
|
true.
|
||||||
|
|
||||||
%% @doc Get info of a channel.
|
%% @doc Get info of a channel.
|
||||||
|
@ -292,7 +293,7 @@ open_session(false, ClientInfo = #{clientid := ClientId}, ConnInfo) ->
|
||||||
|
|
||||||
create_session(ClientInfo, ConnInfo) ->
|
create_session(ClientInfo, ConnInfo) ->
|
||||||
Options = get_session_confs(ClientInfo, ConnInfo),
|
Options = get_session_confs(ClientInfo, ConnInfo),
|
||||||
Session = emqx_session:init(Options),
|
Session = emqx_session:init_and_open(Options),
|
||||||
ok = emqx_metrics:inc('session.created'),
|
ok = emqx_metrics:inc('session.created'),
|
||||||
ok = emqx_hooks:run('session.created', [ClientInfo, emqx_session:info(Session)]),
|
ok = emqx_hooks:run('session.created', [ClientInfo, emqx_session:info(Session)]),
|
||||||
Session.
|
Session.
|
||||||
|
|
|
@ -472,8 +472,8 @@ parse_packet(
|
||||||
) ->
|
) ->
|
||||||
{Properties, <<>>} = parse_properties(Rest, ?MQTT_PROTO_V5, StrictMode),
|
{Properties, <<>>} = parse_properties(Rest, ?MQTT_PROTO_V5, StrictMode),
|
||||||
#mqtt_packet_auth{reason_code = ReasonCode, properties = Properties};
|
#mqtt_packet_auth{reason_code = ReasonCode, properties = Properties};
|
||||||
parse_packet(_Header, _FrameBin, _Options) ->
|
parse_packet(Header, _FrameBin, _Options) ->
|
||||||
?PARSE_ERR(malformed_packet).
|
?PARSE_ERR(#{hint => malformed_packet, header_type => Header#mqtt_packet_header.type}).
|
||||||
|
|
||||||
parse_will_message(
|
parse_will_message(
|
||||||
Packet = #mqtt_packet_connect{
|
Packet = #mqtt_packet_connect{
|
||||||
|
@ -512,8 +512,16 @@ parse_properties(<<0, Rest/binary>>, ?MQTT_PROTO_V5, _StrictMode) ->
|
||||||
{#{}, Rest};
|
{#{}, Rest};
|
||||||
parse_properties(Bin, ?MQTT_PROTO_V5, StrictMode) ->
|
parse_properties(Bin, ?MQTT_PROTO_V5, StrictMode) ->
|
||||||
{Len, Rest} = parse_variable_byte_integer(Bin),
|
{Len, Rest} = parse_variable_byte_integer(Bin),
|
||||||
<<PropsBin:Len/binary, Rest1/binary>> = Rest,
|
case Rest of
|
||||||
{parse_property(PropsBin, #{}, StrictMode), Rest1}.
|
<<PropsBin:Len/binary, Rest1/binary>> ->
|
||||||
|
{parse_property(PropsBin, #{}, StrictMode), Rest1};
|
||||||
|
_ ->
|
||||||
|
?PARSE_ERR(#{
|
||||||
|
hint => user_property_not_enough_bytes,
|
||||||
|
parsed_key_length => Len,
|
||||||
|
remaining_bytes_length => byte_size(Rest)
|
||||||
|
})
|
||||||
|
end.
|
||||||
|
|
||||||
parse_property(<<>>, Props, _StrictMode) ->
|
parse_property(<<>>, Props, _StrictMode) ->
|
||||||
Props;
|
Props;
|
||||||
|
|
|
@ -16,15 +16,33 @@
|
||||||
|
|
||||||
-module(emqx_persistent_session_ds).
|
-module(emqx_persistent_session_ds).
|
||||||
|
|
||||||
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
|
|
||||||
-export([init/0]).
|
-export([init/0]).
|
||||||
|
|
||||||
-export([persist_message/1]).
|
-export([
|
||||||
|
persist_message/1,
|
||||||
|
open_session/1,
|
||||||
|
add_subscription/2,
|
||||||
|
del_subscription/3
|
||||||
|
]).
|
||||||
|
|
||||||
-export([
|
-export([
|
||||||
serialize_message/1,
|
serialize_message/1,
|
||||||
deserialize_message/1
|
deserialize_message/1
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
%% RPC
|
||||||
|
-export([
|
||||||
|
ensure_iterator_closed_on_all_shards/1,
|
||||||
|
ensure_all_iterators_closed/1
|
||||||
|
]).
|
||||||
|
-export([
|
||||||
|
do_open_iterator/3,
|
||||||
|
do_ensure_iterator_closed/1,
|
||||||
|
do_ensure_all_iterators_closed/1
|
||||||
|
]).
|
||||||
|
|
||||||
%% FIXME
|
%% FIXME
|
||||||
-define(DS_SHARD, <<"local">>).
|
-define(DS_SHARD, <<"local">>).
|
||||||
|
|
||||||
|
@ -72,6 +90,105 @@ store_message(Msg) ->
|
||||||
find_subscribers(_Msg) ->
|
find_subscribers(_Msg) ->
|
||||||
[node()].
|
[node()].
|
||||||
|
|
||||||
|
open_session(ClientID) ->
|
||||||
|
?WHEN_ENABLED(emqx_ds:session_open(ClientID)).
|
||||||
|
|
||||||
|
-spec add_subscription(emqx_types:topic(), emqx_ds:session_id()) ->
|
||||||
|
{ok, emqx_ds:iterator_id(), IsNew :: boolean()} | {skipped, disabled}.
|
||||||
|
add_subscription(TopicFilterBin, DSSessionID) ->
|
||||||
|
?WHEN_ENABLED(
|
||||||
|
begin
|
||||||
|
TopicFilter = emqx_topic:words(TopicFilterBin),
|
||||||
|
{ok, IteratorID, StartMS, IsNew} = emqx_ds:session_add_iterator(
|
||||||
|
DSSessionID, TopicFilter
|
||||||
|
),
|
||||||
|
Ctx = #{
|
||||||
|
iterator_id => IteratorID,
|
||||||
|
start_time => StartMS,
|
||||||
|
is_new => IsNew
|
||||||
|
},
|
||||||
|
?tp(persistent_session_ds_iterator_added, Ctx),
|
||||||
|
?tp_span(
|
||||||
|
persistent_session_ds_open_iterators,
|
||||||
|
Ctx,
|
||||||
|
ok = open_iterator_on_all_shards(TopicFilter, StartMS, IteratorID)
|
||||||
|
),
|
||||||
|
{ok, IteratorID, IsNew}
|
||||||
|
end
|
||||||
|
).
|
||||||
|
|
||||||
|
-spec open_iterator_on_all_shards(emqx_topic:words(), emqx_ds:time(), emqx_ds:iterator_id()) -> ok.
|
||||||
|
open_iterator_on_all_shards(TopicFilter, StartMS, IteratorID) ->
|
||||||
|
?tp(persistent_session_ds_will_open_iterators, #{
|
||||||
|
iterator_id => IteratorID,
|
||||||
|
start_time => StartMS
|
||||||
|
}),
|
||||||
|
%% Note: currently, shards map 1:1 to nodes, but this will change in the future.
|
||||||
|
Nodes = emqx:running_nodes(),
|
||||||
|
Results = emqx_persistent_session_ds_proto_v1:open_iterator(
|
||||||
|
Nodes, TopicFilter, StartMS, IteratorID
|
||||||
|
),
|
||||||
|
%% TODO: handle errors
|
||||||
|
true = lists:all(fun(Res) -> Res =:= {ok, ok} end, Results),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
%% RPC target.
|
||||||
|
-spec do_open_iterator(emqx_topic:words(), emqx_ds:time(), emqx_ds:iterator_id()) -> ok.
|
||||||
|
do_open_iterator(TopicFilter, StartMS, IteratorID) ->
|
||||||
|
Replay = {TopicFilter, StartMS},
|
||||||
|
{ok, _It} = emqx_ds_storage_layer:ensure_iterator(?DS_SHARD, IteratorID, Replay),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
-spec del_subscription(emqx_ds:iterator_id() | undefined, emqx_types:topic(), emqx_ds:session_id()) ->
|
||||||
|
ok | {skipped, disabled}.
|
||||||
|
del_subscription(IteratorID, TopicFilterBin, DSSessionID) ->
|
||||||
|
?WHEN_ENABLED(
|
||||||
|
begin
|
||||||
|
TopicFilter = emqx_topic:words(TopicFilterBin),
|
||||||
|
Ctx = #{iterator_id => IteratorID},
|
||||||
|
?tp_span(
|
||||||
|
persistent_session_ds_close_iterators,
|
||||||
|
Ctx,
|
||||||
|
ok = ensure_iterator_closed_on_all_shards(IteratorID)
|
||||||
|
),
|
||||||
|
?tp_span(
|
||||||
|
persistent_session_ds_iterator_delete,
|
||||||
|
Ctx,
|
||||||
|
emqx_ds:session_del_iterator(DSSessionID, TopicFilter)
|
||||||
|
)
|
||||||
|
end
|
||||||
|
).
|
||||||
|
|
||||||
|
-spec ensure_iterator_closed_on_all_shards(emqx_ds:iterator_id()) -> ok.
|
||||||
|
ensure_iterator_closed_on_all_shards(IteratorID) ->
|
||||||
|
%% Note: currently, shards map 1:1 to nodes, but this will change in the future.
|
||||||
|
Nodes = emqx:running_nodes(),
|
||||||
|
Results = emqx_persistent_session_ds_proto_v1:close_iterator(Nodes, IteratorID),
|
||||||
|
%% TODO: handle errors
|
||||||
|
true = lists:all(fun(Res) -> Res =:= {ok, ok} end, Results),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
%% RPC target.
|
||||||
|
-spec do_ensure_iterator_closed(emqx_ds:iterator_id()) -> ok.
|
||||||
|
do_ensure_iterator_closed(IteratorID) ->
|
||||||
|
ok = emqx_ds_storage_layer:discard_iterator(?DS_SHARD, IteratorID),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
-spec ensure_all_iterators_closed(emqx_ds:session_id()) -> ok.
|
||||||
|
ensure_all_iterators_closed(DSSessionID) ->
|
||||||
|
%% Note: currently, shards map 1:1 to nodes, but this will change in the future.
|
||||||
|
Nodes = emqx:running_nodes(),
|
||||||
|
Results = emqx_persistent_session_ds_proto_v1:close_all_iterators(Nodes, DSSessionID),
|
||||||
|
%% TODO: handle errors
|
||||||
|
true = lists:all(fun(Res) -> Res =:= {ok, ok} end, Results),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
%% RPC target.
|
||||||
|
-spec do_ensure_all_iterators_closed(emqx_ds:session_id()) -> ok.
|
||||||
|
do_ensure_all_iterators_closed(DSSessionID) ->
|
||||||
|
ok = emqx_ds_storage_layer:discard_iterator_prefix(?DS_SHARD, DSSessionID),
|
||||||
|
ok.
|
||||||
|
|
||||||
%%
|
%%
|
||||||
|
|
||||||
serialize_message(Msg) ->
|
serialize_message(Msg) ->
|
||||||
|
|
|
@ -44,6 +44,7 @@
|
||||||
-module(emqx_session).
|
-module(emqx_session).
|
||||||
|
|
||||||
-include("emqx.hrl").
|
-include("emqx.hrl").
|
||||||
|
-include("emqx_session.hrl").
|
||||||
-include("emqx_mqtt.hrl").
|
-include("emqx_mqtt.hrl").
|
||||||
-include("logger.hrl").
|
-include("logger.hrl").
|
||||||
-include("types.hrl").
|
-include("types.hrl").
|
||||||
|
@ -59,7 +60,7 @@
|
||||||
unpersist/1
|
unpersist/1
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-export([init/1]).
|
-export([init/1, init_and_open/1]).
|
||||||
|
|
||||||
-export([
|
-export([
|
||||||
info/1,
|
info/1,
|
||||||
|
@ -101,49 +102,13 @@
|
||||||
%% Export for CT
|
%% Export for CT
|
||||||
-export([set_field/3]).
|
-export([set_field/3]).
|
||||||
|
|
||||||
-type sessionID() :: emqx_guid:guid().
|
-type session_id() :: emqx_guid:guid().
|
||||||
|
|
||||||
-export_type([
|
-export_type([
|
||||||
session/0,
|
session/0,
|
||||||
sessionID/0
|
session_id/0
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-record(session, {
|
|
||||||
%% Client's id
|
|
||||||
clientid :: emqx_types:clientid(),
|
|
||||||
id :: sessionID(),
|
|
||||||
%% Is this session a persistent session i.e. was it started with Session-Expiry > 0
|
|
||||||
is_persistent :: boolean(),
|
|
||||||
%% Client’s Subscriptions.
|
|
||||||
subscriptions :: map(),
|
|
||||||
%% Max subscriptions allowed
|
|
||||||
max_subscriptions :: non_neg_integer() | infinity,
|
|
||||||
%% Upgrade QoS?
|
|
||||||
upgrade_qos :: boolean(),
|
|
||||||
%% Client <- Broker: QoS1/2 messages sent to the client but
|
|
||||||
%% have not been unacked.
|
|
||||||
inflight :: emqx_inflight:inflight(),
|
|
||||||
%% All QoS1/2 messages published to when client is disconnected,
|
|
||||||
%% or QoS1/2 messages pending transmission to the Client.
|
|
||||||
%%
|
|
||||||
%% Optionally, QoS0 messages pending transmission to the Client.
|
|
||||||
mqueue :: emqx_mqueue:mqueue(),
|
|
||||||
%% Next packet id of the session
|
|
||||||
next_pkt_id = 1 :: emqx_types:packet_id(),
|
|
||||||
%% Retry interval for redelivering QoS1/2 messages (Unit: millisecond)
|
|
||||||
retry_interval :: timeout(),
|
|
||||||
%% Client -> Broker: QoS2 messages received from the client, but
|
|
||||||
%% have not been completely acknowledged
|
|
||||||
awaiting_rel :: map(),
|
|
||||||
%% Maximum number of awaiting QoS2 messages allowed
|
|
||||||
max_awaiting_rel :: non_neg_integer() | infinity,
|
|
||||||
%% Awaiting PUBREL Timeout (Unit: millisecond)
|
|
||||||
await_rel_timeout :: timeout(),
|
|
||||||
%% Created at
|
|
||||||
created_at :: pos_integer()
|
|
||||||
%% Message deliver latency stats
|
|
||||||
}).
|
|
||||||
|
|
||||||
-type inflight_data_phase() :: wait_ack | wait_comp.
|
-type inflight_data_phase() :: wait_ack | wait_comp.
|
||||||
|
|
||||||
-record(inflight_data, {
|
-record(inflight_data, {
|
||||||
|
@ -201,6 +166,13 @@
|
||||||
%% Init a Session
|
%% Init a Session
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
|
-spec init_and_open(options()) -> session().
|
||||||
|
init_and_open(Options) ->
|
||||||
|
#{clientid := ClientID} = Options,
|
||||||
|
Session0 = emqx_session:init(Options),
|
||||||
|
_ = emqx_persistent_session_ds:open_session(ClientID),
|
||||||
|
Session0.
|
||||||
|
|
||||||
-spec init(options()) -> session().
|
-spec init(options()) -> session().
|
||||||
init(Opts) ->
|
init(Opts) ->
|
||||||
MaxInflight = maps:get(max_inflight, Opts),
|
MaxInflight = maps:get(max_inflight, Opts),
|
||||||
|
@ -297,7 +269,9 @@ info(awaiting_rel_max, #session{max_awaiting_rel = Max}) ->
|
||||||
info(await_rel_timeout, #session{await_rel_timeout = Timeout}) ->
|
info(await_rel_timeout, #session{await_rel_timeout = Timeout}) ->
|
||||||
Timeout;
|
Timeout;
|
||||||
info(created_at, #session{created_at = CreatedAt}) ->
|
info(created_at, #session{created_at = CreatedAt}) ->
|
||||||
CreatedAt.
|
CreatedAt;
|
||||||
|
info(iterators, #session{iterators = Iterators}) ->
|
||||||
|
Iterators.
|
||||||
|
|
||||||
%% @doc Get stats of the session.
|
%% @doc Get stats of the session.
|
||||||
-spec stats(session()) -> emqx_types:stats().
|
-spec stats(session()) -> emqx_types:stats().
|
||||||
|
@ -324,11 +298,13 @@ subscribe(
|
||||||
case IsNew andalso is_subscriptions_full(Session) of
|
case IsNew andalso is_subscriptions_full(Session) of
|
||||||
false ->
|
false ->
|
||||||
ok = emqx_broker:subscribe(TopicFilter, ClientId, SubOpts),
|
ok = emqx_broker:subscribe(TopicFilter, ClientId, SubOpts),
|
||||||
|
Session1 = Session#session{subscriptions = maps:put(TopicFilter, SubOpts, Subs)},
|
||||||
|
Session2 = add_persistent_subscription(TopicFilter, ClientId, Session1),
|
||||||
ok = emqx_hooks:run(
|
ok = emqx_hooks:run(
|
||||||
'session.subscribed',
|
'session.subscribed',
|
||||||
[ClientInfo, TopicFilter, SubOpts#{is_new => IsNew}]
|
[ClientInfo, TopicFilter, SubOpts#{is_new => IsNew}]
|
||||||
),
|
),
|
||||||
{ok, Session#session{subscriptions = maps:put(TopicFilter, SubOpts, Subs)}};
|
{ok, Session2};
|
||||||
true ->
|
true ->
|
||||||
{error, ?RC_QUOTA_EXCEEDED}
|
{error, ?RC_QUOTA_EXCEEDED}
|
||||||
end.
|
end.
|
||||||
|
@ -341,6 +317,17 @@ is_subscriptions_full(#session{
|
||||||
}) ->
|
}) ->
|
||||||
maps:size(Subs) >= MaxLimit.
|
maps:size(Subs) >= MaxLimit.
|
||||||
|
|
||||||
|
-spec add_persistent_subscription(emqx_types:topic(), emqx_types:clientid(), session()) ->
|
||||||
|
session().
|
||||||
|
add_persistent_subscription(TopicFilterBin, ClientId, Session) ->
|
||||||
|
case emqx_persistent_session_ds:add_subscription(TopicFilterBin, ClientId) of
|
||||||
|
{ok, IteratorId, _IsNew} ->
|
||||||
|
Iterators = Session#session.iterators,
|
||||||
|
Session#session{iterators = Iterators#{TopicFilterBin => IteratorId}};
|
||||||
|
_ ->
|
||||||
|
Session
|
||||||
|
end.
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% Client -> Broker: UNSUBSCRIBE
|
%% Client -> Broker: UNSUBSCRIBE
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
@ -348,23 +335,37 @@ is_subscriptions_full(#session{
|
||||||
-spec unsubscribe(emqx_types:clientinfo(), emqx_types:topic(), emqx_types:subopts(), session()) ->
|
-spec unsubscribe(emqx_types:clientinfo(), emqx_types:topic(), emqx_types:subopts(), session()) ->
|
||||||
{ok, session()} | {error, emqx_types:reason_code()}.
|
{ok, session()} | {error, emqx_types:reason_code()}.
|
||||||
unsubscribe(
|
unsubscribe(
|
||||||
ClientInfo,
|
ClientInfo = #{clientid := ClientId},
|
||||||
TopicFilter,
|
TopicFilter,
|
||||||
UnSubOpts,
|
UnSubOpts,
|
||||||
Session = #session{subscriptions = Subs}
|
Session0 = #session{subscriptions = Subs}
|
||||||
) ->
|
) ->
|
||||||
case maps:find(TopicFilter, Subs) of
|
case maps:find(TopicFilter, Subs) of
|
||||||
{ok, SubOpts} ->
|
{ok, SubOpts} ->
|
||||||
ok = emqx_broker:unsubscribe(TopicFilter),
|
ok = emqx_broker:unsubscribe(TopicFilter),
|
||||||
|
Session1 = remove_persistent_subscription(Session0, TopicFilter, ClientId),
|
||||||
ok = emqx_hooks:run(
|
ok = emqx_hooks:run(
|
||||||
'session.unsubscribed',
|
'session.unsubscribed',
|
||||||
[ClientInfo, TopicFilter, maps:merge(SubOpts, UnSubOpts)]
|
[ClientInfo, TopicFilter, maps:merge(SubOpts, UnSubOpts)]
|
||||||
),
|
),
|
||||||
{ok, Session#session{subscriptions = maps:remove(TopicFilter, Subs)}};
|
{ok, Session1#session{subscriptions = maps:remove(TopicFilter, Subs)}};
|
||||||
error ->
|
error ->
|
||||||
{error, ?RC_NO_SUBSCRIPTION_EXISTED}
|
{error, ?RC_NO_SUBSCRIPTION_EXISTED}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
-spec remove_persistent_subscription(session(), emqx_types:topic(), emqx_types:clientid()) ->
|
||||||
|
session().
|
||||||
|
remove_persistent_subscription(Session, TopicFilterBin, ClientId) ->
|
||||||
|
Iterators = Session#session.iterators,
|
||||||
|
case maps:get(TopicFilterBin, Iterators, undefined) of
|
||||||
|
undefined ->
|
||||||
|
ok;
|
||||||
|
IteratorId ->
|
||||||
|
_ = emqx_persistent_session_ds:del_subscription(IteratorId, TopicFilterBin, ClientId),
|
||||||
|
ok
|
||||||
|
end,
|
||||||
|
Session#session{iterators = maps:remove(TopicFilterBin, Iterators)}.
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% Client -> Broker: PUBLISH
|
%% Client -> Broker: PUBLISH
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
|
@ -21,6 +21,7 @@
|
||||||
-include("emqx.hrl").
|
-include("emqx.hrl").
|
||||||
-include("logger.hrl").
|
-include("logger.hrl").
|
||||||
-include("types.hrl").
|
-include("types.hrl").
|
||||||
|
-include("persistent_session/emqx_persistent_session.hrl").
|
||||||
|
|
||||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
|
|
||||||
|
|
|
@ -74,7 +74,7 @@ init([]) ->
|
||||||
Children =
|
Children =
|
||||||
[KernelSup] ++
|
[KernelSup] ++
|
||||||
[SessionSup || emqx_persistent_session:is_store_enabled()] ++
|
[SessionSup || emqx_persistent_session:is_store_enabled()] ++
|
||||||
[RouterSup || emqx_boot:is_enabled(router)] ++
|
[RouterSup || emqx_boot:is_enabled(broker)] ++
|
||||||
[BrokerSup || emqx_boot:is_enabled(broker)] ++
|
[BrokerSup || emqx_boot:is_enabled(broker)] ++
|
||||||
[CMSup || emqx_boot:is_enabled(broker)] ++
|
[CMSup || emqx_boot:is_enabled(broker)] ++
|
||||||
[SysSup, Limiter],
|
[SysSup, Limiter],
|
||||||
|
|
|
@ -160,7 +160,7 @@ create(Trace) ->
|
||||||
end;
|
end;
|
||||||
false ->
|
false ->
|
||||||
{error,
|
{error,
|
||||||
"The number of traces created has reache the maximum"
|
"The number of traces created has reached the maximum"
|
||||||
" please delete the useless ones first"}
|
" please delete the useless ones first"}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
@ -371,10 +371,16 @@ start_trace(Trace) ->
|
||||||
|
|
||||||
stop_trace(Finished, Started) ->
|
stop_trace(Finished, Started) ->
|
||||||
lists:foreach(
|
lists:foreach(
|
||||||
fun(#{name := Name, type := Type, filter := Filter}) ->
|
fun(#{name := Name, id := HandlerID, dst := FilePath, type := Type, filter := Filter}) ->
|
||||||
case lists:member(Name, Finished) of
|
case lists:member(Name, Finished) of
|
||||||
true ->
|
true ->
|
||||||
?TRACE("API", "trace_stopping", #{Type => Filter}),
|
_ = maybe_sync_logfile(HandlerID),
|
||||||
|
case file:read_file_info(FilePath) of
|
||||||
|
{ok, #file_info{size = Size}} when Size > 0 ->
|
||||||
|
?TRACE("API", "trace_stopping", #{Type => Filter});
|
||||||
|
_ ->
|
||||||
|
ok
|
||||||
|
end,
|
||||||
emqx_trace_handler:uninstall(Type, Name);
|
emqx_trace_handler:uninstall(Type, Name);
|
||||||
false ->
|
false ->
|
||||||
ok
|
ok
|
||||||
|
@ -383,6 +389,19 @@ stop_trace(Finished, Started) ->
|
||||||
Started
|
Started
|
||||||
).
|
).
|
||||||
|
|
||||||
|
maybe_sync_logfile(HandlerID) ->
|
||||||
|
case logger:get_handler_config(HandlerID) of
|
||||||
|
{ok, #{module := Mod}} ->
|
||||||
|
case erlang:function_exported(Mod, filesync, 1) of
|
||||||
|
true ->
|
||||||
|
Mod:filesync(HandlerID);
|
||||||
|
false ->
|
||||||
|
ok
|
||||||
|
end;
|
||||||
|
_ ->
|
||||||
|
ok
|
||||||
|
end.
|
||||||
|
|
||||||
clean_stale_trace_files() ->
|
clean_stale_trace_files() ->
|
||||||
TraceDir = trace_dir(),
|
TraceDir = trace_dir(),
|
||||||
case file:list_dir(TraceDir) of
|
case file:list_dir(TraceDir) of
|
||||||
|
|
|
@ -115,10 +115,10 @@ storage_backend() ->
|
||||||
%% Session message ADT API
|
%% Session message ADT API
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
-spec session_message_info('timestamp' | 'sessionID', sess_msg_key()) -> term().
|
-spec session_message_info('timestamp' | 'session_id', sess_msg_key()) -> term().
|
||||||
session_message_info(timestamp, {_, <<>>, <<TS:64>>, ?ABANDONED}) -> TS;
|
session_message_info(timestamp, {_, <<>>, <<TS:64>>, ?ABANDONED}) -> TS;
|
||||||
session_message_info(timestamp, {_, GUID, _, _}) -> emqx_guid:timestamp(GUID);
|
session_message_info(timestamp, {_, GUID, _, _}) -> emqx_guid:timestamp(GUID);
|
||||||
session_message_info(sessionID, {SessionID, _, _, _}) -> SessionID.
|
session_message_info(session_id, {SessionID, _, _, _}) -> SessionID.
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% DB API
|
%% DB API
|
||||||
|
@ -243,7 +243,7 @@ discard_opt(true, ClientID, Session) ->
|
||||||
emqx_session_router:delete_routes(SessionID, Subscriptions),
|
emqx_session_router:delete_routes(SessionID, Subscriptions),
|
||||||
emqx_session:set_field(is_persistent, false, Session).
|
emqx_session:set_field(is_persistent, false, Session).
|
||||||
|
|
||||||
-spec mark_resume_begin(emqx_session:sessionID()) -> emqx_guid:guid().
|
-spec mark_resume_begin(emqx_session:session_id()) -> emqx_guid:guid().
|
||||||
mark_resume_begin(SessionID) ->
|
mark_resume_begin(SessionID) ->
|
||||||
MarkerID = emqx_guid:gen(),
|
MarkerID = emqx_guid:gen(),
|
||||||
put_session_message({SessionID, MarkerID, <<>>, ?MARKER}),
|
put_session_message({SessionID, MarkerID, <<>>, ?MARKER}),
|
||||||
|
@ -396,12 +396,12 @@ do_mark_as_delivered(SessionID, [{deliver, STopic, Msg} | Left]) ->
|
||||||
do_mark_as_delivered(_SessionID, []) ->
|
do_mark_as_delivered(_SessionID, []) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
-spec pending(emqx_session:sessionID()) ->
|
-spec pending(emqx_session:session_id()) ->
|
||||||
[{emqx_types:message(), STopic :: binary()}].
|
[{emqx_types:message(), STopic :: binary()}].
|
||||||
pending(SessionID) ->
|
pending(SessionID) ->
|
||||||
pending_messages_in_db(SessionID, []).
|
pending_messages_in_db(SessionID, []).
|
||||||
|
|
||||||
-spec pending(emqx_session:sessionID(), MarkerIDs :: [emqx_guid:guid()]) ->
|
-spec pending(emqx_session:session_id(), MarkerIDs :: [emqx_guid:guid()]) ->
|
||||||
[{emqx_types:message(), STopic :: binary()}].
|
[{emqx_types:message(), STopic :: binary()}].
|
||||||
pending(SessionID, MarkerIds) ->
|
pending(SessionID, MarkerIds) ->
|
||||||
%% TODO: Handle lost MarkerIDs
|
%% TODO: Handle lost MarkerIDs
|
||||||
|
@ -460,8 +460,8 @@ read_pending_msgs([], Acc) ->
|
||||||
lists:reverse(Acc).
|
lists:reverse(Acc).
|
||||||
|
|
||||||
%% The keys are ordered by
|
%% The keys are ordered by
|
||||||
%% {sessionID(), <<>>, bin_timestamp(), ?ABANDONED} For abandoned sessions (clean started or expired).
|
%% {session_id(), <<>>, bin_timestamp(), ?ABANDONED} For abandoned sessions (clean started or expired).
|
||||||
%% {sessionID(), emqx_guid:guid(), STopic :: binary(), ?DELIVERED | ?UNDELIVERED | ?MARKER}
|
%% {session_id(), emqx_guid:guid(), STopic :: binary(), ?DELIVERED | ?UNDELIVERED | ?MARKER}
|
||||||
%% where
|
%% where
|
||||||
%% <<>> < emqx_guid:guid()
|
%% <<>> < emqx_guid:guid()
|
||||||
%% <<>> < bin_timestamp()
|
%% <<>> < bin_timestamp()
|
||||||
|
@ -491,7 +491,7 @@ pending_messages({SessionID, PrevMsgId, PrevSTopic, PrevTag} = PrevKey, Acc, Mar
|
||||||
false -> pending_messages(Key, Acc, MarkerIds);
|
false -> pending_messages(Key, Acc, MarkerIds);
|
||||||
true -> pending_messages(Key, [{PrevMsgId, PrevSTopic} | Acc], MarkerIds)
|
true -> pending_messages(Key, [{PrevMsgId, PrevSTopic} | Acc], MarkerIds)
|
||||||
end;
|
end;
|
||||||
%% Next sessionID or '$end_of_table'
|
%% Next session_id or '$end_of_table'
|
||||||
_What ->
|
_What ->
|
||||||
case PrevTag =:= ?UNDELIVERED of
|
case PrevTag =:= ?UNDELIVERED of
|
||||||
false -> {lists:reverse(Acc), MarkerIds};
|
false -> {lists:reverse(Acc), MarkerIds};
|
||||||
|
|
|
@ -14,6 +14,8 @@
|
||||||
%% limitations under the License.
|
%% limitations under the License.
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
|
-define(PERSISTENT_SESSION_SHARD, emqx_persistent_session_shard).
|
||||||
|
|
||||||
-record(session_store, {
|
-record(session_store, {
|
||||||
client_id :: binary(),
|
client_id :: binary(),
|
||||||
expiry_interval :: non_neg_integer(),
|
expiry_interval :: non_neg_integer(),
|
||||||
|
|
|
@ -56,6 +56,7 @@ start_link() ->
|
||||||
|
|
||||||
init([]) ->
|
init([]) ->
|
||||||
process_flag(trap_exit, true),
|
process_flag(trap_exit, true),
|
||||||
|
mria_rlog:ensure_shard(?PERSISTENT_SESSION_SHARD),
|
||||||
{ok, start_message_gc_timer(start_session_gc_timer(#{}))}.
|
{ok, start_message_gc_timer(start_session_gc_timer(#{}))}.
|
||||||
|
|
||||||
handle_call(_Request, _From, State) ->
|
handle_call(_Request, _From, State) ->
|
||||||
|
|
|
@ -0,0 +1,79 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%
|
||||||
|
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
%% you may not use this file except in compliance with the License.
|
||||||
|
%% You may obtain a copy of the License at
|
||||||
|
%%
|
||||||
|
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
%%
|
||||||
|
%% Unless required by applicable law or agreed to in writing, software
|
||||||
|
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
%% See the License for the specific language governing permissions and
|
||||||
|
%% limitations under the License.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
|
-module(emqx_persistent_session_ds_proto_v1).
|
||||||
|
|
||||||
|
-behaviour(emqx_bpapi).
|
||||||
|
|
||||||
|
-export([
|
||||||
|
introduced_in/0,
|
||||||
|
|
||||||
|
open_iterator/4,
|
||||||
|
close_iterator/2,
|
||||||
|
close_all_iterators/2
|
||||||
|
]).
|
||||||
|
|
||||||
|
-include_lib("emqx/include/bpapi.hrl").
|
||||||
|
|
||||||
|
-define(TIMEOUT, 30_000).
|
||||||
|
|
||||||
|
introduced_in() ->
|
||||||
|
%% FIXME
|
||||||
|
"5.3.0".
|
||||||
|
|
||||||
|
-spec open_iterator(
|
||||||
|
[node()],
|
||||||
|
emqx_topic:words(),
|
||||||
|
emqx_ds:time(),
|
||||||
|
emqx_ds:iterator_id()
|
||||||
|
) ->
|
||||||
|
emqx_rpc:erpc_multicall(ok).
|
||||||
|
open_iterator(Nodes, TopicFilter, StartMS, IteratorID) ->
|
||||||
|
erpc:multicall(
|
||||||
|
Nodes,
|
||||||
|
emqx_persistent_session_ds,
|
||||||
|
do_open_iterator,
|
||||||
|
[TopicFilter, StartMS, IteratorID],
|
||||||
|
?TIMEOUT
|
||||||
|
).
|
||||||
|
|
||||||
|
-spec close_iterator(
|
||||||
|
[node()],
|
||||||
|
emqx_ds:iterator_id()
|
||||||
|
) ->
|
||||||
|
emqx_rpc:erpc_multicall(ok).
|
||||||
|
close_iterator(Nodes, IteratorID) ->
|
||||||
|
erpc:multicall(
|
||||||
|
Nodes,
|
||||||
|
emqx_persistent_session_ds,
|
||||||
|
do_ensure_iterator_closed,
|
||||||
|
[IteratorID],
|
||||||
|
?TIMEOUT
|
||||||
|
).
|
||||||
|
|
||||||
|
-spec close_all_iterators(
|
||||||
|
[node()],
|
||||||
|
emqx_ds:session_id()
|
||||||
|
) ->
|
||||||
|
emqx_rpc:erpc_multicall(ok).
|
||||||
|
close_all_iterators(Nodes, DSSessionID) ->
|
||||||
|
erpc:multicall(
|
||||||
|
Nodes,
|
||||||
|
emqx_persistent_session_ds,
|
||||||
|
do_ensure_all_iterators_closed,
|
||||||
|
[DSSessionID],
|
||||||
|
?TIMEOUT
|
||||||
|
).
|
|
@ -26,7 +26,7 @@
|
||||||
all() -> emqx_common_test_helpers:all(?MODULE).
|
all() -> emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
emqx_common_test_helpers:boot_modules([router, broker]),
|
emqx_common_test_helpers:boot_modules([broker]),
|
||||||
emqx_common_test_helpers:start_apps([]),
|
emqx_common_test_helpers:start_apps([]),
|
||||||
Config.
|
Config.
|
||||||
|
|
||||||
|
|
|
@ -26,19 +26,12 @@ all() -> emqx_common_test_helpers:all(?MODULE).
|
||||||
t_is_enabled(_) ->
|
t_is_enabled(_) ->
|
||||||
try
|
try
|
||||||
ok = application:set_env(emqx, boot_modules, all),
|
ok = application:set_env(emqx, boot_modules, all),
|
||||||
?assert(emqx_boot:is_enabled(router)),
|
|
||||||
?assert(emqx_boot:is_enabled(broker)),
|
?assert(emqx_boot:is_enabled(broker)),
|
||||||
?assert(emqx_boot:is_enabled(listeners)),
|
?assert(emqx_boot:is_enabled(listeners)),
|
||||||
ok = application:set_env(emqx, boot_modules, [router]),
|
ok = application:set_env(emqx, boot_modules, [broker]),
|
||||||
?assert(emqx_boot:is_enabled(router)),
|
|
||||||
?assertNot(emqx_boot:is_enabled(broker)),
|
|
||||||
?assertNot(emqx_boot:is_enabled(listeners)),
|
|
||||||
ok = application:set_env(emqx, boot_modules, [router, broker]),
|
|
||||||
?assert(emqx_boot:is_enabled(router)),
|
|
||||||
?assert(emqx_boot:is_enabled(broker)),
|
?assert(emqx_boot:is_enabled(broker)),
|
||||||
?assertNot(emqx_boot:is_enabled(listeners)),
|
?assertNot(emqx_boot:is_enabled(listeners)),
|
||||||
ok = application:set_env(emqx, boot_modules, [router, broker, listeners]),
|
ok = application:set_env(emqx, boot_modules, [broker, listeners]),
|
||||||
?assert(emqx_boot:is_enabled(router)),
|
|
||||||
?assert(emqx_boot:is_enabled(broker)),
|
?assert(emqx_boot:is_enabled(broker)),
|
||||||
?assert(emqx_boot:is_enabled(listeners))
|
?assert(emqx_boot:is_enabled(listeners))
|
||||||
after
|
after
|
||||||
|
|
|
@ -59,39 +59,54 @@ groups() ->
|
||||||
init_per_group(connected_client_count_group, Config) ->
|
init_per_group(connected_client_count_group, Config) ->
|
||||||
Config;
|
Config;
|
||||||
init_per_group(tcp, Config) ->
|
init_per_group(tcp, Config) ->
|
||||||
emqx_common_test_helpers:boot_modules(all),
|
Apps = emqx_cth_suite:start(
|
||||||
emqx_common_test_helpers:start_apps([]),
|
[emqx],
|
||||||
[{conn_fun, connect} | Config];
|
#{work_dir => emqx_cth_suite:work_dir(Config)}
|
||||||
|
),
|
||||||
|
[{conn_fun, connect}, {group_apps, Apps} | Config];
|
||||||
init_per_group(ws, Config) ->
|
init_per_group(ws, Config) ->
|
||||||
emqx_common_test_helpers:boot_modules(all),
|
Apps = emqx_cth_suite:start(
|
||||||
emqx_common_test_helpers:start_apps([]),
|
[emqx],
|
||||||
|
#{work_dir => emqx_cth_suite:work_dir(Config)}
|
||||||
|
),
|
||||||
[
|
[
|
||||||
{ssl, false},
|
{ssl, false},
|
||||||
{enable_websocket, true},
|
{enable_websocket, true},
|
||||||
{conn_fun, ws_connect},
|
{conn_fun, ws_connect},
|
||||||
{port, 8083},
|
{port, 8083},
|
||||||
{host, "localhost"}
|
{host, "localhost"},
|
||||||
|
{group_apps, Apps}
|
||||||
| Config
|
| Config
|
||||||
];
|
];
|
||||||
init_per_group(quic, Config) ->
|
init_per_group(quic, Config) ->
|
||||||
emqx_common_test_helpers:boot_modules(all),
|
Apps = emqx_cth_suite:start(
|
||||||
emqx_common_test_helpers:start_apps([]),
|
[
|
||||||
UdpPort = 14567,
|
{emqx,
|
||||||
ok = emqx_common_test_helpers:ensure_quic_listener(?MODULE, UdpPort),
|
"listeners.quic.test {"
|
||||||
|
"\n enable = true"
|
||||||
|
"\n max_connections = 1024000"
|
||||||
|
"\n idle_timeout = 15s"
|
||||||
|
"\n }"}
|
||||||
|
],
|
||||||
|
#{work_dir => emqx_cth_suite:work_dir(Config)}
|
||||||
|
),
|
||||||
[
|
[
|
||||||
{conn_fun, quic_connect},
|
{conn_fun, quic_connect},
|
||||||
{port, UdpPort}
|
{port, emqx_config:get([listeners, quic, test, bind])},
|
||||||
|
{group_apps, Apps}
|
||||||
| Config
|
| Config
|
||||||
];
|
];
|
||||||
init_per_group(_Group, Config) ->
|
init_per_group(_Group, Config) ->
|
||||||
emqx_common_test_helpers:boot_modules(all),
|
Apps = emqx_cth_suite:start(
|
||||||
emqx_common_test_helpers:start_apps([]),
|
[emqx],
|
||||||
Config.
|
#{work_dir => emqx_cth_suite:work_dir(Config)}
|
||||||
|
),
|
||||||
|
[{group_apps, Apps} | Config].
|
||||||
|
|
||||||
end_per_group(connected_client_count_group, _Config) ->
|
end_per_group(connected_client_count_group, _Config) ->
|
||||||
ok;
|
ok;
|
||||||
end_per_group(_Group, _Config) ->
|
end_per_group(_Group, Config) ->
|
||||||
emqx_common_test_helpers:stop_apps([]).
|
emqx_cth_suite:stop(?config(group_apps, Config)).
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
Config.
|
Config.
|
||||||
|
|
|
@ -49,7 +49,7 @@ init_per_suite(Config) ->
|
||||||
%% Meck Hooks
|
%% Meck Hooks
|
||||||
ok = meck:new(emqx_hooks, [passthrough, no_history, no_link]),
|
ok = meck:new(emqx_hooks, [passthrough, no_history, no_link]),
|
||||||
ok = meck:expect(emqx_hooks, run, fun(_Hook, _Args) -> ok end),
|
ok = meck:expect(emqx_hooks, run, fun(_Hook, _Args) -> ok end),
|
||||||
ok = meck:expect(emqx_hooks, run_fold, fun(_Hook, _Args, Acc) -> {ok, Acc} end),
|
ok = meck:expect(emqx_hooks, run_fold, fun(_Hook, _Args, Acc) -> Acc end),
|
||||||
|
|
||||||
ok = meck:expect(emqx_channel, ensure_disconnected, fun(_, Channel) -> Channel end),
|
ok = meck:expect(emqx_channel, ensure_disconnected, fun(_, Channel) -> Channel end),
|
||||||
|
|
||||||
|
|
|
@ -41,6 +41,7 @@ init_per_suite(Config) ->
|
||||||
Config.
|
Config.
|
||||||
|
|
||||||
end_per_suite(_Config) ->
|
end_per_suite(_Config) ->
|
||||||
|
emqx_config:erase_all(),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
init_per_testcase(TestCase, Config) when
|
init_per_testcase(TestCase, Config) when
|
||||||
|
|
|
@ -14,17 +14,38 @@
|
||||||
%% limitations under the License.
|
%% limitations under the License.
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
|
%% @doc Common Test Helper / Running tests in a cluster
|
||||||
|
%%
|
||||||
|
%% This module allows setting up and tearing down clusters of EMQX nodes with
|
||||||
|
%% the purpose of running integration tests in a distributed environment, but
|
||||||
|
%% with the same isolation measures that `emqx_cth_suite` provides.
|
||||||
|
%%
|
||||||
|
%% Additionally to what `emqx_cth_suite` does with respect to isolation, each
|
||||||
|
%% node in the cluster is started with a separate, unique working directory.
|
||||||
|
%%
|
||||||
|
%% What should be started on each node is defined by the same appspecs that are
|
||||||
|
%% used by `emqx_cth_suite` to start applications on the CT node. However, there
|
||||||
|
%% are additional set of defaults applied to appspecs to make sure that the
|
||||||
|
%% cluster is started in a consistent, interconnected state, with no conflicts
|
||||||
|
%% between applications.
|
||||||
|
%%
|
||||||
|
%% Most of the time, you just need to:
|
||||||
|
%% 1. Describe the cluster with one or more _nodespecs_.
|
||||||
|
%% 2. Call `emqx_cth_cluster:start/2` before the testrun (e.g. in `init_per_suite/1`
|
||||||
|
%% or `init_per_group/2`), providing unique work dir (e.g.
|
||||||
|
%% `emqx_cth_suite:work_dir/1`). Save the result in a context.
|
||||||
|
%% 3. Call `emqx_cth_cluster:stop/1` after the testrun concludes (e.g.
|
||||||
|
%% in `end_per_suite/1` or `end_per_group/2`) with the result from step 2.
|
||||||
-module(emqx_cth_cluster).
|
-module(emqx_cth_cluster).
|
||||||
|
|
||||||
-export([start/2]).
|
-export([start/2]).
|
||||||
-export([stop/1]).
|
-export([stop/1, stop_node/1]).
|
||||||
|
|
||||||
-export([start_bare_node/2]).
|
-export([start_bare_node/2]).
|
||||||
|
|
||||||
-export([share_load_module/2]).
|
-export([share_load_module/2]).
|
||||||
-export([node_name/1]).
|
-export([node_name/1, mk_nodespecs/2]).
|
||||||
|
-export([start_apps/2, set_node_opts/2]).
|
||||||
-export([node_name/1]).
|
|
||||||
|
|
||||||
-define(APPS_CLUSTERING, [gen_rpc, mria, ekka]).
|
-define(APPS_CLUSTERING, [gen_rpc, mria, ekka]).
|
||||||
|
|
||||||
|
@ -88,7 +109,7 @@ when
|
||||||
}.
|
}.
|
||||||
start(Nodes, ClusterOpts) ->
|
start(Nodes, ClusterOpts) ->
|
||||||
NodeSpecs = mk_nodespecs(Nodes, ClusterOpts),
|
NodeSpecs = mk_nodespecs(Nodes, ClusterOpts),
|
||||||
ct:pal("Starting cluster: ~p", [NodeSpecs]),
|
ct:pal("Starting cluster:\n ~p", [NodeSpecs]),
|
||||||
% 1. Start bare nodes with only basic applications running
|
% 1. Start bare nodes with only basic applications running
|
||||||
_ = emqx_utils:pmap(fun start_node_init/1, NodeSpecs, ?TIMEOUT_NODE_START_MS),
|
_ = emqx_utils:pmap(fun start_node_init/1, NodeSpecs, ?TIMEOUT_NODE_START_MS),
|
||||||
% 2. Start applications needed to enable clustering
|
% 2. Start applications needed to enable clustering
|
||||||
|
@ -289,17 +310,20 @@ load_apps(Node, #{apps := Apps}) ->
|
||||||
erpc:call(Node, emqx_cth_suite, load_apps, [Apps]).
|
erpc:call(Node, emqx_cth_suite, load_apps, [Apps]).
|
||||||
|
|
||||||
start_apps_clustering(Node, #{apps := Apps} = Spec) ->
|
start_apps_clustering(Node, #{apps := Apps} = Spec) ->
|
||||||
SuiteOpts = maps:with([work_dir], Spec),
|
SuiteOpts = suite_opts(Spec),
|
||||||
AppsClustering = [lists:keyfind(App, 1, Apps) || App <- ?APPS_CLUSTERING],
|
AppsClustering = [lists:keyfind(App, 1, Apps) || App <- ?APPS_CLUSTERING],
|
||||||
_Started = erpc:call(Node, emqx_cth_suite, start, [AppsClustering, SuiteOpts]),
|
_Started = erpc:call(Node, emqx_cth_suite, start, [AppsClustering, SuiteOpts]),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
start_apps(Node, #{apps := Apps} = Spec) ->
|
start_apps(Node, #{apps := Apps} = Spec) ->
|
||||||
SuiteOpts = maps:with([work_dir], Spec),
|
SuiteOpts = suite_opts(Spec),
|
||||||
AppsRest = [AppSpec || AppSpec = {App, _} <- Apps, not lists:member(App, ?APPS_CLUSTERING)],
|
AppsRest = [AppSpec || AppSpec = {App, _} <- Apps, not lists:member(App, ?APPS_CLUSTERING)],
|
||||||
_Started = erpc:call(Node, emqx_cth_suite, start_apps, [AppsRest, SuiteOpts]),
|
_Started = erpc:call(Node, emqx_cth_suite, start_apps, [AppsRest, SuiteOpts]),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
suite_opts(Spec) ->
|
||||||
|
maps:with([work_dir], Spec).
|
||||||
|
|
||||||
maybe_join_cluster(_Node, #{role := replicant}) ->
|
maybe_join_cluster(_Node, #{role := replicant}) ->
|
||||||
ok;
|
ok;
|
||||||
maybe_join_cluster(Node, Spec) ->
|
maybe_join_cluster(Node, Spec) ->
|
||||||
|
|
|
@ -14,6 +14,47 @@
|
||||||
%% limitations under the License.
|
%% limitations under the License.
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
|
%% @doc Common Test Helper / Running test suites
|
||||||
|
%%
|
||||||
|
%% The purpose of this module is to run application-level, integration
|
||||||
|
%% tests in an isolated fashion.
|
||||||
|
%%
|
||||||
|
%% Isolation is this context means that each testrun does not leave any
|
||||||
|
%% persistent state accessible to following testruns. The goal is to
|
||||||
|
%% make testruns completely independent of each other, of the order in
|
||||||
|
%% which they are executed, and of the testrun granularity, i.e. whether
|
||||||
|
%% they are executed individually or as part of a larger suite. This
|
||||||
|
%% should help to increase reproducibility and reduce the risk of false
|
||||||
|
%% positives.
|
||||||
|
%%
|
||||||
|
%% Isolation is achieved through the following measures:
|
||||||
|
%% * Each testrun completely terminates and unload all applications
|
||||||
|
%% started during the testrun.
|
||||||
|
%% * Each testrun is executed in a separate directory, usually under
|
||||||
|
%% common_test's private directory, where all persistent state should
|
||||||
|
%% be stored.
|
||||||
|
%% * Additionally, each cleans out few bits of persistent state that
|
||||||
|
%% survives the above measures, namely persistent VM terms related
|
||||||
|
%% to configuration and authentication (see `clean_suite_state/0`).
|
||||||
|
%%
|
||||||
|
%% Integration test in this context means a test that works with applications
|
||||||
|
%% as a whole, and needs to start and stop them as part of the test run.
|
||||||
|
%% For this, there's an abstraction called _appspec_ that describes how to
|
||||||
|
%% configure and start an application.
|
||||||
|
%%
|
||||||
|
%% The module also provides a set of default appspecs for some applications
|
||||||
|
%% that hide details and quirks of how to start them, to make it easier to
|
||||||
|
%% write test suites.
|
||||||
|
%%
|
||||||
|
%% Most of the time, you just need to:
|
||||||
|
%% 1. Describe the appspecs for the applications you want to test.
|
||||||
|
%% 2. Call `emqx_cth_sutie:start/2` to start the applications before the testrun
|
||||||
|
%% (e.g. in `init_per_suite/1` / `init_per_group/2`), providing the appspecs
|
||||||
|
%% and unique work dir for the testrun (e.g. `work_dir/1`). Save the result
|
||||||
|
%% in a context.
|
||||||
|
%% 3. Call `emqx_cth_sutie:stop/1` to stop the applications after the testrun
|
||||||
|
%% finishes (e.g. in `end_per_suite/1` / `end_per_group/2`), providing the
|
||||||
|
%% result from step 2.
|
||||||
-module(emqx_cth_suite).
|
-module(emqx_cth_suite).
|
||||||
|
|
||||||
-include_lib("common_test/include/ct.hrl").
|
-include_lib("common_test/include/ct.hrl").
|
||||||
|
@ -22,6 +63,9 @@
|
||||||
-export([start/2]).
|
-export([start/2]).
|
||||||
-export([stop/1]).
|
-export([stop/1]).
|
||||||
|
|
||||||
|
-export([work_dir/1]).
|
||||||
|
-export([work_dir/2]).
|
||||||
|
|
||||||
-export([load_apps/1]).
|
-export([load_apps/1]).
|
||||||
-export([start_apps/2]).
|
-export([start_apps/2]).
|
||||||
-export([start_app/2]).
|
-export([start_app/2]).
|
||||||
|
@ -98,7 +142,8 @@ when
|
||||||
SuiteOpts :: #{
|
SuiteOpts :: #{
|
||||||
%% Working directory
|
%% Working directory
|
||||||
%% Everything a test produces should go here. If this directory is not empty,
|
%% Everything a test produces should go here. If this directory is not empty,
|
||||||
%% function will raise an error.
|
%% function will raise an error. Most of the time, the result of `work_dir/1`
|
||||||
|
%% or `work_dir/2` (if used in a testcase) should be fine here.
|
||||||
work_dir := file:name()
|
work_dir := file:name()
|
||||||
}.
|
}.
|
||||||
start(Apps, SuiteOpts = #{work_dir := WorkDir}) ->
|
start(Apps, SuiteOpts = #{work_dir := WorkDir}) ->
|
||||||
|
@ -333,6 +378,45 @@ default_config(App, SuiteOpts) ->
|
||||||
|
|
||||||
%%
|
%%
|
||||||
|
|
||||||
|
%% @doc Determine the unique work directory for the current test run.
|
||||||
|
%% Takes into account name of the test suite, and all test groups the current run
|
||||||
|
%% is part of.
|
||||||
|
-spec work_dir(CTConfig :: proplists:proplist()) ->
|
||||||
|
file:filename_all().
|
||||||
|
work_dir(CTConfig) ->
|
||||||
|
% Directory specific to the current test run.
|
||||||
|
[PrivDir] = proplists:get_all_values(priv_dir, CTConfig),
|
||||||
|
% Directory specific to the currently executing test suite.
|
||||||
|
[DataDir] = proplists:get_all_values(data_dir, CTConfig),
|
||||||
|
% NOTE: Contains the name of the current test group, if executed as part of a group.
|
||||||
|
GroupProps = proplists:get_value(tc_group_properties, CTConfig, []),
|
||||||
|
% NOTE: Contains names of outer test groups, if any.
|
||||||
|
GroupPathOuter = proplists:get_value(tc_group_path, CTConfig, []),
|
||||||
|
SuiteDir = filename:basename(DataDir),
|
||||||
|
GroupPath = lists:append([GroupProps | GroupPathOuter]),
|
||||||
|
GroupLevels = [atom_to_list(Name) || {name, Name} <- GroupPath],
|
||||||
|
WorkDir1 = filename:join(PrivDir, SuiteDir),
|
||||||
|
WorkDir2 =
|
||||||
|
case GroupLevels of
|
||||||
|
[] ->
|
||||||
|
WorkDir1;
|
||||||
|
[_ | _] ->
|
||||||
|
GroupDir = string:join(lists:reverse(GroupLevels), "."),
|
||||||
|
filename:join(WorkDir1, GroupDir)
|
||||||
|
end,
|
||||||
|
WorkDir2.
|
||||||
|
|
||||||
|
%% @doc Determine the unique work directory for the current testcase run.
|
||||||
|
%% Be careful when testcase runs under no groups, and its name matches the name of a
|
||||||
|
%% previously executed test group, it's best to avoid such naming.
|
||||||
|
-spec work_dir(TestCaseName :: atom(), CTConfig :: proplists:proplist()) ->
|
||||||
|
file:filename_all().
|
||||||
|
work_dir(TCName, CTConfig) ->
|
||||||
|
WorkDir = work_dir(CTConfig),
|
||||||
|
filename:join(WorkDir, TCName).
|
||||||
|
|
||||||
|
%%
|
||||||
|
|
||||||
start_ekka() ->
|
start_ekka() ->
|
||||||
ok = emqx_common_test_helpers:start_ekka(),
|
ok = emqx_common_test_helpers:start_ekka(),
|
||||||
{ok, [mnesia, ekka]}.
|
{ok, [mnesia, ekka]}.
|
||||||
|
|
|
@ -35,7 +35,7 @@ init_per_suite(Config) ->
|
||||||
"\n ban_time = 2s"
|
"\n ban_time = 2s"
|
||||||
"\n }"}
|
"\n }"}
|
||||||
],
|
],
|
||||||
#{work_dir => ?config(priv_dir, Config)}
|
#{work_dir => emqx_cth_suite:work_dir(Config)}
|
||||||
),
|
),
|
||||||
[{suite_apps, Apps} | Config].
|
[{suite_apps, Apps} | Config].
|
||||||
|
|
||||||
|
|
|
@ -17,32 +17,45 @@
|
||||||
-module(emqx_persistent_messages_SUITE).
|
-module(emqx_persistent_messages_SUITE).
|
||||||
|
|
||||||
-include_lib("stdlib/include/assert.hrl").
|
-include_lib("stdlib/include/assert.hrl").
|
||||||
|
-include_lib("common_test/include/ct.hrl").
|
||||||
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
|
|
||||||
-compile(export_all).
|
-compile(export_all).
|
||||||
-compile(nowarn_export_all).
|
-compile(nowarn_export_all).
|
||||||
|
|
||||||
-define(NOW,
|
-define(DS_SHARD, <<"local">>).
|
||||||
(calendar:system_time_to_rfc3339(erlang:system_time(millisecond), [{unit, millisecond}]))
|
|
||||||
).
|
|
||||||
|
|
||||||
all() ->
|
all() ->
|
||||||
emqx_common_test_helpers:all(?MODULE).
|
emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
{ok, _} = application:ensure_all_started(emqx_durable_storage),
|
%% avoid inter-suite flakiness...
|
||||||
ok = emqx_common_test_helpers:start_apps([], fun
|
%% TODO: remove after other suites start to use `emx_cth_suite'
|
||||||
(emqx) ->
|
application:stop(emqx),
|
||||||
emqx_common_test_helpers:boot_modules(all),
|
application:stop(emqx_durable_storage),
|
||||||
emqx_config:init_load(emqx_schema, <<"persistent_session_store.ds = true">>),
|
TCApps = emqx_cth_suite:start(
|
||||||
emqx_app:set_config_loader(?MODULE);
|
app_specs(),
|
||||||
(_) ->
|
#{work_dir => emqx_cth_suite:work_dir(Config)}
|
||||||
ok
|
),
|
||||||
end),
|
[{tc_apps, TCApps} | Config].
|
||||||
|
|
||||||
|
end_per_suite(Config) ->
|
||||||
|
TCApps = ?config(tc_apps, Config),
|
||||||
|
emqx_cth_suite:stop(TCApps),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
init_per_testcase(t_session_subscription_iterators, Config) ->
|
||||||
|
Cluster = cluster(),
|
||||||
|
Nodes = emqx_cth_cluster:start(Cluster, #{work_dir => ?config(priv_dir, Config)}),
|
||||||
|
[{nodes, Nodes} | Config];
|
||||||
|
init_per_testcase(_TestCase, Config) ->
|
||||||
Config.
|
Config.
|
||||||
|
|
||||||
end_per_suite(_Config) ->
|
end_per_testcase(t_session_subscription_iterators, Config) ->
|
||||||
emqx_common_test_helpers:stop_apps([]),
|
Nodes = ?config(nodes, Config),
|
||||||
application:stop(emqx_durable_storage),
|
ok = emqx_cth_cluster:stop(Nodes),
|
||||||
|
ok;
|
||||||
|
end_per_testcase(_TestCase, _Config) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
t_messages_persisted(_Config) ->
|
t_messages_persisted(_Config) ->
|
||||||
|
@ -76,7 +89,7 @@ t_messages_persisted(_Config) ->
|
||||||
|
|
||||||
ct:pal("Results = ~p", [Results]),
|
ct:pal("Results = ~p", [Results]),
|
||||||
|
|
||||||
Persisted = consume(<<"local">>, {['#'], 0}),
|
Persisted = consume(?DS_SHARD, {['#'], 0}),
|
||||||
|
|
||||||
ct:pal("Persisted = ~p", [Persisted]),
|
ct:pal("Persisted = ~p", [Persisted]),
|
||||||
|
|
||||||
|
@ -88,6 +101,97 @@ t_messages_persisted(_Config) ->
|
||||||
|
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
%% TODO: test quic and ws too
|
||||||
|
t_session_subscription_iterators(Config) ->
|
||||||
|
[Node1, Node2] = ?config(nodes, Config),
|
||||||
|
Port = get_mqtt_port(Node1, tcp),
|
||||||
|
Topic = <<"t/topic">>,
|
||||||
|
SubTopicFilter = <<"t/+">>,
|
||||||
|
AnotherTopic = <<"u/another-topic">>,
|
||||||
|
ClientId = <<"myclientid">>,
|
||||||
|
?check_trace(
|
||||||
|
begin
|
||||||
|
[
|
||||||
|
Payload1,
|
||||||
|
Payload2,
|
||||||
|
Payload3,
|
||||||
|
Payload4
|
||||||
|
] = lists:map(
|
||||||
|
fun(N) -> <<"hello", (integer_to_binary(N))/binary>> end,
|
||||||
|
lists:seq(1, 4)
|
||||||
|
),
|
||||||
|
ct:pal("starting"),
|
||||||
|
{ok, Client} = emqtt:start_link([
|
||||||
|
{port, Port},
|
||||||
|
{clientid, ClientId},
|
||||||
|
{proto_ver, v5}
|
||||||
|
]),
|
||||||
|
{ok, _} = emqtt:connect(Client),
|
||||||
|
ct:pal("publishing 1"),
|
||||||
|
Message1 = emqx_message:make(Topic, Payload1),
|
||||||
|
publish(Node1, Message1),
|
||||||
|
ct:pal("subscribing 1"),
|
||||||
|
{ok, _, [2]} = emqtt:subscribe(Client, SubTopicFilter, qos2),
|
||||||
|
ct:pal("publishing 2"),
|
||||||
|
Message2 = emqx_message:make(Topic, Payload2),
|
||||||
|
publish(Node1, Message2),
|
||||||
|
[_] = receive_messages(1),
|
||||||
|
ct:pal("subscribing 2"),
|
||||||
|
{ok, _, [1]} = emqtt:subscribe(Client, SubTopicFilter, qos1),
|
||||||
|
ct:pal("publishing 3"),
|
||||||
|
Message3 = emqx_message:make(Topic, Payload3),
|
||||||
|
publish(Node1, Message3),
|
||||||
|
[_] = receive_messages(1),
|
||||||
|
ct:pal("publishing 4"),
|
||||||
|
Message4 = emqx_message:make(AnotherTopic, Payload4),
|
||||||
|
publish(Node1, Message4),
|
||||||
|
emqtt:stop(Client),
|
||||||
|
#{
|
||||||
|
messages => [Message1, Message2, Message3, Message4]
|
||||||
|
}
|
||||||
|
end,
|
||||||
|
fun(Results, Trace) ->
|
||||||
|
ct:pal("trace:\n ~p", [Trace]),
|
||||||
|
#{
|
||||||
|
messages := [_Message1, Message2, Message3 | _]
|
||||||
|
} = Results,
|
||||||
|
case ?of_kind(ds_session_subscription_added, Trace) of
|
||||||
|
[] ->
|
||||||
|
%% Since `emqx_durable_storage' is a dependency of `emqx', it gets
|
||||||
|
%% compiled in "prod" mode when running emqx standalone tests.
|
||||||
|
ok;
|
||||||
|
[_ | _] ->
|
||||||
|
?assertMatch(
|
||||||
|
[
|
||||||
|
#{?snk_kind := ds_session_subscription_added},
|
||||||
|
#{?snk_kind := ds_session_subscription_present}
|
||||||
|
],
|
||||||
|
?of_kind(
|
||||||
|
[
|
||||||
|
ds_session_subscription_added,
|
||||||
|
ds_session_subscription_present
|
||||||
|
],
|
||||||
|
Trace
|
||||||
|
)
|
||||||
|
),
|
||||||
|
ok
|
||||||
|
end,
|
||||||
|
?assertMatch({ok, [_]}, get_all_iterator_ids(Node1)),
|
||||||
|
{ok, [IteratorId]} = get_all_iterator_ids(Node1),
|
||||||
|
?assertMatch({ok, [IteratorId]}, get_all_iterator_ids(Node2)),
|
||||||
|
ReplayMessages1 = erpc:call(Node1, fun() -> consume(?DS_SHARD, IteratorId) end),
|
||||||
|
ExpectedMessages = [Message2, Message3],
|
||||||
|
%% Note: it is expected that this will break after replayers are in place.
|
||||||
|
%% They might have consumed all the messages by this time.
|
||||||
|
?assertEqual(ExpectedMessages, ReplayMessages1),
|
||||||
|
%% Different DS shard
|
||||||
|
ReplayMessages2 = erpc:call(Node2, fun() -> consume(?DS_SHARD, IteratorId) end),
|
||||||
|
?assertEqual([], ReplayMessages2),
|
||||||
|
ok
|
||||||
|
end
|
||||||
|
),
|
||||||
|
ok.
|
||||||
|
|
||||||
%%
|
%%
|
||||||
|
|
||||||
connect(ClientId, CleanStart, EI) ->
|
connect(ClientId, CleanStart, EI) ->
|
||||||
|
@ -103,8 +207,11 @@ connect(ClientId, CleanStart, EI) ->
|
||||||
{ok, _} = emqtt:connect(Client),
|
{ok, _} = emqtt:connect(Client),
|
||||||
Client.
|
Client.
|
||||||
|
|
||||||
consume(Shard, Replay) ->
|
consume(Shard, Replay = {_TopicFiler, _StartMS}) ->
|
||||||
{ok, It} = emqx_ds_storage_layer:make_iterator(Shard, Replay),
|
{ok, It} = emqx_ds_storage_layer:make_iterator(Shard, Replay),
|
||||||
|
consume(It);
|
||||||
|
consume(Shard, IteratorId) when is_binary(IteratorId) ->
|
||||||
|
{ok, It} = emqx_ds_storage_layer:restore_iterator(Shard, IteratorId),
|
||||||
consume(It).
|
consume(It).
|
||||||
|
|
||||||
consume(It) ->
|
consume(It) ->
|
||||||
|
@ -114,3 +221,54 @@ consume(It) ->
|
||||||
none ->
|
none ->
|
||||||
[]
|
[]
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
receive_messages(Count) ->
|
||||||
|
receive_messages(Count, []).
|
||||||
|
|
||||||
|
receive_messages(0, Msgs) ->
|
||||||
|
Msgs;
|
||||||
|
receive_messages(Count, Msgs) ->
|
||||||
|
receive
|
||||||
|
{publish, Msg} ->
|
||||||
|
receive_messages(Count - 1, [Msg | Msgs])
|
||||||
|
after 5_000 ->
|
||||||
|
Msgs
|
||||||
|
end.
|
||||||
|
|
||||||
|
publish(Node, Message) ->
|
||||||
|
erpc:call(Node, emqx, publish, [Message]).
|
||||||
|
|
||||||
|
get_iterator_ids(Node, ClientId) ->
|
||||||
|
Channel = erpc:call(Node, fun() ->
|
||||||
|
[ConnPid] = emqx_cm:lookup_channels(ClientId),
|
||||||
|
sys:get_state(ConnPid)
|
||||||
|
end),
|
||||||
|
emqx_connection:info({channel, {session, iterators}}, Channel).
|
||||||
|
|
||||||
|
app_specs() ->
|
||||||
|
[
|
||||||
|
emqx_durable_storage,
|
||||||
|
{emqx, #{
|
||||||
|
config => #{persistent_session_store => #{ds => true}},
|
||||||
|
override_env => [{boot_modules, [broker, listeners]}]
|
||||||
|
}}
|
||||||
|
].
|
||||||
|
|
||||||
|
cluster() ->
|
||||||
|
Node1 = persistent_messages_SUITE1,
|
||||||
|
Spec = #{
|
||||||
|
role => core,
|
||||||
|
join_to => emqx_cth_cluster:node_name(Node1),
|
||||||
|
apps => app_specs()
|
||||||
|
},
|
||||||
|
[
|
||||||
|
{Node1, Spec},
|
||||||
|
{persistent_messages_SUITE2, Spec}
|
||||||
|
].
|
||||||
|
|
||||||
|
get_mqtt_port(Node, Type) ->
|
||||||
|
{_IP, Port} = erpc:call(Node, emqx_config, get, [[listeners, Type, default, bind]]),
|
||||||
|
Port.
|
||||||
|
|
||||||
|
get_all_iterator_ids(Node) ->
|
||||||
|
erpc:call(Node, emqx_ds_storage_layer, list_iterator_prefix, [?DS_SHARD, <<>>]).
|
||||||
|
|
|
@ -20,6 +20,7 @@
|
||||||
|
|
||||||
-include_lib("proper/include/proper.hrl").
|
-include_lib("proper/include/proper.hrl").
|
||||||
-include("emqx.hrl").
|
-include("emqx.hrl").
|
||||||
|
-include("emqx_session.hrl").
|
||||||
-include("emqx_access_control.hrl").
|
-include("emqx_access_control.hrl").
|
||||||
|
|
||||||
%% High level Types
|
%% High level Types
|
||||||
|
@ -132,33 +133,22 @@ clientinfo() ->
|
||||||
sessioninfo() ->
|
sessioninfo() ->
|
||||||
?LET(
|
?LET(
|
||||||
Session,
|
Session,
|
||||||
{session, clientid(),
|
#session{
|
||||||
% id
|
clientid = clientid(),
|
||||||
sessionid(),
|
id = sessionid(),
|
||||||
% is_persistent
|
is_persistent = boolean(),
|
||||||
boolean(),
|
subscriptions = subscriptions(),
|
||||||
% subscriptions
|
max_subscriptions = non_neg_integer(),
|
||||||
subscriptions(),
|
upgrade_qos = boolean(),
|
||||||
% max_subscriptions
|
inflight = inflight(),
|
||||||
non_neg_integer(),
|
mqueue = mqueue(),
|
||||||
% upgrade_qos
|
next_pkt_id = packet_id(),
|
||||||
boolean(),
|
retry_interval = safty_timeout(),
|
||||||
% emqx_inflight:inflight()
|
awaiting_rel = awaiting_rel(),
|
||||||
inflight(),
|
max_awaiting_rel = non_neg_integer(),
|
||||||
% emqx_mqueue:mqueue()
|
await_rel_timeout = safty_timeout(),
|
||||||
mqueue(),
|
created_at = timestamp()
|
||||||
% next_pkt_id
|
},
|
||||||
packet_id(),
|
|
||||||
% retry_interval
|
|
||||||
safty_timeout(),
|
|
||||||
% awaiting_rel
|
|
||||||
awaiting_rel(),
|
|
||||||
% max_awaiting_rel
|
|
||||||
non_neg_integer(),
|
|
||||||
% await_rel_timeout
|
|
||||||
safty_timeout(),
|
|
||||||
% created_at
|
|
||||||
timestamp()},
|
|
||||||
emqx_session:info(Session)
|
emqx_session:info(Session)
|
||||||
).
|
).
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ init_per_group(GroupName, Config) ->
|
||||||
AppSpecs = [
|
AppSpecs = [
|
||||||
{emqx, #{
|
{emqx, #{
|
||||||
config => mk_config(GroupName),
|
config => mk_config(GroupName),
|
||||||
override_env => [{boot_modules, [router]}]
|
override_env => [{boot_modules, [broker]}]
|
||||||
}}
|
}}
|
||||||
],
|
],
|
||||||
Apps = emqx_cth_suite:start(AppSpecs, #{work_dir => WorkDir}),
|
Apps = emqx_cth_suite:start(AppSpecs, #{work_dir => WorkDir}),
|
||||||
|
|
|
@ -51,12 +51,12 @@ end_per_group(_GroupName, Config) ->
|
||||||
mk_config(routing_schema_v1) ->
|
mk_config(routing_schema_v1) ->
|
||||||
#{
|
#{
|
||||||
config => "broker.routing.storage_schema = v1",
|
config => "broker.routing.storage_schema = v1",
|
||||||
override_env => [{boot_modules, [router]}]
|
override_env => [{boot_modules, [broker]}]
|
||||||
};
|
};
|
||||||
mk_config(routing_schema_v2) ->
|
mk_config(routing_schema_v2) ->
|
||||||
#{
|
#{
|
||||||
config => "broker.routing.storage_schema = v2",
|
config => "broker.routing.storage_schema = v2",
|
||||||
override_env => [{boot_modules, [router]}]
|
override_env => [{boot_modules, [broker]}]
|
||||||
}.
|
}.
|
||||||
|
|
||||||
init_per_testcase(_TestCase, Config) ->
|
init_per_testcase(_TestCase, Config) ->
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
-include_lib("emqx/include/emqx.hrl").
|
-include_lib("emqx/include/emqx.hrl").
|
||||||
-include_lib("emqx/include/emqx_trace.hrl").
|
-include_lib("emqx/include/emqx_trace.hrl").
|
||||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
|
-include_lib("kernel/include/file.hrl").
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% Setups
|
%% Setups
|
||||||
|
@ -52,6 +53,7 @@ init_per_testcase(_, Config) ->
|
||||||
Config.
|
Config.
|
||||||
|
|
||||||
end_per_testcase(_) ->
|
end_per_testcase(_) ->
|
||||||
|
snabbkaffe:stop(),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
t_base_create_delete(_Config) ->
|
t_base_create_delete(_Config) ->
|
||||||
|
@ -454,6 +456,36 @@ t_migrate_trace(_Config) ->
|
||||||
),
|
),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
%% If no relevant event occurred, the log file size must be exactly 0 after stopping the trace.
|
||||||
|
t_empty_trace_log_file(_Config) ->
|
||||||
|
?check_trace(
|
||||||
|
begin
|
||||||
|
Now = erlang:system_time(second),
|
||||||
|
Name = <<"empty_trace_log">>,
|
||||||
|
Trace = [
|
||||||
|
{<<"name">>, Name},
|
||||||
|
{<<"type">>, clientid},
|
||||||
|
{<<"clientid">>, <<"test_trace_no_clientid_1">>},
|
||||||
|
{<<"start_at">>, Now},
|
||||||
|
{<<"end_at">>, Now + 100}
|
||||||
|
],
|
||||||
|
?wait_async_action(
|
||||||
|
?assertMatch({ok, _}, emqx_trace:create(Trace)),
|
||||||
|
#{?snk_kind := update_trace_done}
|
||||||
|
),
|
||||||
|
ok = emqx_trace_handler_SUITE:filesync(Name, clientid),
|
||||||
|
{ok, Filename} = emqx_trace:get_trace_filename(Name),
|
||||||
|
?assertMatch({ok, #{size := 0}}, emqx_trace:trace_file_detail(Filename)),
|
||||||
|
?wait_async_action(
|
||||||
|
?assertEqual(ok, emqx_trace:update(Name, false)),
|
||||||
|
#{?snk_kind := update_trace_done}
|
||||||
|
),
|
||||||
|
?assertMatch({ok, #{size := 0}}, emqx_trace:trace_file_detail(Filename)),
|
||||||
|
?assertEqual(ok, emqx_trace:delete(Name))
|
||||||
|
end,
|
||||||
|
[]
|
||||||
|
).
|
||||||
|
|
||||||
build_new_trace_data() ->
|
build_new_trace_data() ->
|
||||||
Now = erlang:system_time(second),
|
Now = erlang:system_time(second),
|
||||||
{ok, _} = emqx_trace:create([
|
{ok, _} = emqx_trace:create([
|
||||||
|
|
|
@ -539,8 +539,17 @@ t_parse_incoming_order(_) ->
|
||||||
|
|
||||||
t_parse_incoming_frame_error(_) ->
|
t_parse_incoming_frame_error(_) ->
|
||||||
{Packets, _St} = ?ws_conn:parse_incoming(<<3, 2, 1, 0>>, [], st()),
|
{Packets, _St} = ?ws_conn:parse_incoming(<<3, 2, 1, 0>>, [], st()),
|
||||||
FrameError = {frame_error, malformed_packet},
|
|
||||||
[{incoming, FrameError}] = Packets.
|
?assertMatch(
|
||||||
|
[
|
||||||
|
{incoming,
|
||||||
|
{frame_error, #{
|
||||||
|
header_type := _,
|
||||||
|
hint := malformed_packet
|
||||||
|
}}}
|
||||||
|
],
|
||||||
|
Packets
|
||||||
|
).
|
||||||
|
|
||||||
t_handle_incomming_frame_error(_) ->
|
t_handle_incomming_frame_error(_) ->
|
||||||
FrameError = {frame_error, bad_qos},
|
FrameError = {frame_error, bad_qos},
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_authn, [
|
{application, emqx_authn, [
|
||||||
{description, "EMQX Authentication"},
|
{description, "EMQX Authentication"},
|
||||||
{vsn, "0.1.25"},
|
{vsn, "0.1.26"},
|
||||||
{modules, []},
|
{modules, []},
|
||||||
{registered, [emqx_authn_sup, emqx_authn_registry]},
|
{registered, [emqx_authn_sup, emqx_authn_registry]},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -63,6 +63,9 @@
|
||||||
check_password/4
|
check_password/4
|
||||||
]).
|
]).
|
||||||
|
|
||||||
|
-define(SALT_ROUNDS_MIN, 5).
|
||||||
|
-define(SALT_ROUNDS_MAX, 10).
|
||||||
|
|
||||||
namespace() -> "authn-hash".
|
namespace() -> "authn-hash".
|
||||||
roots() -> [pbkdf2, bcrypt, bcrypt_rw, simple].
|
roots() -> [pbkdf2, bcrypt, bcrypt_rw, simple].
|
||||||
|
|
||||||
|
@ -71,11 +74,12 @@ fields(bcrypt_rw) ->
|
||||||
[
|
[
|
||||||
{salt_rounds,
|
{salt_rounds,
|
||||||
sc(
|
sc(
|
||||||
integer(),
|
range(?SALT_ROUNDS_MIN, ?SALT_ROUNDS_MAX),
|
||||||
#{
|
#{
|
||||||
default => 10,
|
default => ?SALT_ROUNDS_MAX,
|
||||||
example => 10,
|
example => ?SALT_ROUNDS_MAX,
|
||||||
desc => "Salt rounds for BCRYPT password generation."
|
desc => "Work factor for BCRYPT password generation.",
|
||||||
|
converter => fun salt_rounds_converter/2
|
||||||
}
|
}
|
||||||
)}
|
)}
|
||||||
];
|
];
|
||||||
|
@ -106,6 +110,13 @@ fields(simple) ->
|
||||||
{salt_position, fun salt_position/1}
|
{salt_position, fun salt_position/1}
|
||||||
].
|
].
|
||||||
|
|
||||||
|
salt_rounds_converter(undefined, _) ->
|
||||||
|
undefined;
|
||||||
|
salt_rounds_converter(I, _) when is_integer(I) ->
|
||||||
|
emqx_utils:clamp(I, ?SALT_ROUNDS_MIN, ?SALT_ROUNDS_MAX);
|
||||||
|
salt_rounds_converter(X, _) ->
|
||||||
|
X.
|
||||||
|
|
||||||
desc(bcrypt_rw) ->
|
desc(bcrypt_rw) ->
|
||||||
"Settings for bcrypt password hashing algorithm (for DB backends with write capability).";
|
"Settings for bcrypt password hashing algorithm (for DB backends with write capability).";
|
||||||
desc(bcrypt) ->
|
desc(bcrypt) ->
|
||||||
|
|
|
@ -44,7 +44,7 @@ init_per_testcase(TestCase, Config) ->
|
||||||
{emqx_conf, "authorization.no_match = deny, authorization.cache.enable = false"},
|
{emqx_conf, "authorization.no_match = deny, authorization.cache.enable = false"},
|
||||||
emqx_authz
|
emqx_authz
|
||||||
],
|
],
|
||||||
#{work_dir => filename:join(?config(priv_dir, Config), TestCase)}
|
#{work_dir => emqx_cth_suite:work_dir(TestCase, Config)}
|
||||||
),
|
),
|
||||||
[{tc_apps, Apps} | Config].
|
[{tc_apps, Apps} | Config].
|
||||||
|
|
||||||
|
|
|
@ -37,7 +37,7 @@ init_per_testcase(TestCase, Config) ->
|
||||||
{emqx_conf, "authorization.no_match = deny, authorization.cache.enable = false"},
|
{emqx_conf, "authorization.no_match = deny, authorization.cache.enable = false"},
|
||||||
emqx_authz
|
emqx_authz
|
||||||
],
|
],
|
||||||
#{work_dir => filename:join(?config(priv_dir, Config), TestCase)}
|
#{work_dir => emqx_cth_suite:work_dir(TestCase, Config)}
|
||||||
),
|
),
|
||||||
[{tc_apps, Apps} | Config].
|
[{tc_apps, Apps} | Config].
|
||||||
|
|
||||||
|
|
|
@ -116,13 +116,13 @@ end_per_suite(_Config) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
init_per_group(cluster = Name, Config) ->
|
init_per_group(cluster = Name, Config) ->
|
||||||
Nodes = [NodePrimary | _] = mk_cluster(Name, Config),
|
Nodes = [NodePrimary | _] = mk_cluster(Config),
|
||||||
init_api([{group, Name}, {cluster_nodes, Nodes}, {node, NodePrimary} | Config]);
|
init_api([{group, Name}, {cluster_nodes, Nodes}, {node, NodePrimary} | Config]);
|
||||||
init_per_group(cluster_later_join = Name, Config) ->
|
init_per_group(cluster_later_join = Name, Config) ->
|
||||||
Nodes = [NodePrimary | _] = mk_cluster(Name, Config, #{join_to => undefined}),
|
Nodes = [NodePrimary | _] = mk_cluster(Config, #{join_to => undefined}),
|
||||||
init_api([{group, Name}, {cluster_nodes, Nodes}, {node, NodePrimary} | Config]);
|
init_api([{group, Name}, {cluster_nodes, Nodes}, {node, NodePrimary} | Config]);
|
||||||
init_per_group(Name, Config) ->
|
init_per_group(_Name, Config) ->
|
||||||
WorkDir = filename:join(?config(priv_dir, Config), Name),
|
WorkDir = emqx_cth_suite:work_dir(Config),
|
||||||
Apps = emqx_cth_suite:start(?APPSPECS ++ [?APPSPEC_DASHBOARD], #{work_dir => WorkDir}),
|
Apps = emqx_cth_suite:start(?APPSPECS ++ [?APPSPEC_DASHBOARD], #{work_dir => WorkDir}),
|
||||||
init_api([{group, single}, {group_apps, Apps}, {node, node()} | Config]).
|
init_api([{group, single}, {group_apps, Apps}, {node, node()} | Config]).
|
||||||
|
|
||||||
|
@ -131,10 +131,10 @@ init_api(Config) ->
|
||||||
{ok, App} = erpc:call(APINode, emqx_common_test_http, create_default_app, []),
|
{ok, App} = erpc:call(APINode, emqx_common_test_http, create_default_app, []),
|
||||||
[{api, App} | Config].
|
[{api, App} | Config].
|
||||||
|
|
||||||
mk_cluster(Name, Config) ->
|
mk_cluster(Config) ->
|
||||||
mk_cluster(Name, Config, #{}).
|
mk_cluster(Config, #{}).
|
||||||
|
|
||||||
mk_cluster(Name, Config, Opts) ->
|
mk_cluster(Config, Opts) ->
|
||||||
Node1Apps = ?APPSPECS ++ [?APPSPEC_DASHBOARD],
|
Node1Apps = ?APPSPECS ++ [?APPSPEC_DASHBOARD],
|
||||||
Node2Apps = ?APPSPECS,
|
Node2Apps = ?APPSPECS,
|
||||||
emqx_cth_cluster:start(
|
emqx_cth_cluster:start(
|
||||||
|
@ -142,7 +142,7 @@ mk_cluster(Name, Config, Opts) ->
|
||||||
{emqx_bridge_api_SUITE1, Opts#{role => core, apps => Node1Apps}},
|
{emqx_bridge_api_SUITE1, Opts#{role => core, apps => Node1Apps}},
|
||||||
{emqx_bridge_api_SUITE2, Opts#{role => core, apps => Node2Apps}}
|
{emqx_bridge_api_SUITE2, Opts#{role => core, apps => Node2Apps}}
|
||||||
],
|
],
|
||||||
#{work_dir => filename:join(?config(priv_dir, Config), Name)}
|
#{work_dir => emqx_cth_suite:work_dir(Config)}
|
||||||
).
|
).
|
||||||
|
|
||||||
end_per_group(Group, Config) when
|
end_per_group(Group, Config) when
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
%% -*- mode: erlang; -*-
|
%% -*- mode: erlang; -*-
|
||||||
{erl_opts, [debug_info]}.
|
{erl_opts, [debug_info]}.
|
||||||
{deps, [ {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.7.6"}}}
|
{deps, [ {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.7.7"}}}
|
||||||
, {kafka_protocol, {git, "https://github.com/kafka4beam/kafka_protocol.git", {tag, "4.1.3"}}}
|
, {kafka_protocol, {git, "https://github.com/kafka4beam/kafka_protocol.git", {tag, "4.1.3"}}}
|
||||||
, {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.0"}}}
|
, {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.0"}}}
|
||||||
, {brod, {git, "https://github.com/kafka4beam/brod.git", {tag, "3.16.8"}}}
|
, {brod, {git, "https://github.com/kafka4beam/brod.git", {tag, "3.16.8"}}}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_bridge_cassandra, [
|
{application, emqx_bridge_cassandra, [
|
||||||
{description, "EMQX Enterprise Cassandra Bridge"},
|
{description, "EMQX Enterprise Cassandra Bridge"},
|
||||||
{vsn, "0.1.3"},
|
{vsn, "0.1.4"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [
|
{applications, [
|
||||||
kernel,
|
kernel,
|
||||||
|
|
|
@ -94,7 +94,6 @@ on_start(
|
||||||
#{
|
#{
|
||||||
servers := Servers0,
|
servers := Servers0,
|
||||||
keyspace := Keyspace,
|
keyspace := Keyspace,
|
||||||
username := Username,
|
|
||||||
pool_size := PoolSize,
|
pool_size := PoolSize,
|
||||||
ssl := SSL
|
ssl := SSL
|
||||||
} = Config
|
} = Config
|
||||||
|
@ -114,12 +113,12 @@ on_start(
|
||||||
|
|
||||||
Options = [
|
Options = [
|
||||||
{nodes, Servers},
|
{nodes, Servers},
|
||||||
{username, Username},
|
|
||||||
{password, emqx_secret:wrap(maps:get(password, Config, ""))},
|
|
||||||
{keyspace, Keyspace},
|
{keyspace, Keyspace},
|
||||||
{auto_reconnect, ?AUTO_RECONNECT_INTERVAL},
|
{auto_reconnect, ?AUTO_RECONNECT_INTERVAL},
|
||||||
{pool_size, PoolSize}
|
{pool_size, PoolSize}
|
||||||
],
|
],
|
||||||
|
Options1 = maybe_add_opt(username, Config, Options),
|
||||||
|
Options2 = maybe_add_opt(password, Config, Options1, _IsSensitive = true),
|
||||||
|
|
||||||
SslOpts =
|
SslOpts =
|
||||||
case maps:get(enable, SSL) of
|
case maps:get(enable, SSL) of
|
||||||
|
@ -132,7 +131,7 @@ on_start(
|
||||||
[]
|
[]
|
||||||
end,
|
end,
|
||||||
State = parse_prepare_cql(Config),
|
State = parse_prepare_cql(Config),
|
||||||
case emqx_resource_pool:start(InstId, ?MODULE, Options ++ SslOpts) of
|
case emqx_resource_pool:start(InstId, ?MODULE, Options2 ++ SslOpts) of
|
||||||
ok ->
|
ok ->
|
||||||
{ok, init_prepare(State#{pool_name => InstId, prepare_statement => #{}})};
|
{ok, init_prepare(State#{pool_name => InstId, prepare_statement => #{}})};
|
||||||
{error, Reason} ->
|
{error, Reason} ->
|
||||||
|
@ -513,3 +512,19 @@ maybe_assign_type(V) when is_integer(V) ->
|
||||||
maybe_assign_type(V) when is_float(V) -> {double, V};
|
maybe_assign_type(V) when is_float(V) -> {double, V};
|
||||||
maybe_assign_type(V) ->
|
maybe_assign_type(V) ->
|
||||||
V.
|
V.
|
||||||
|
|
||||||
|
maybe_add_opt(Key, Conf, Opts) ->
|
||||||
|
maybe_add_opt(Key, Conf, Opts, _IsSensitive = false).
|
||||||
|
|
||||||
|
maybe_add_opt(Key, Conf, Opts, IsSensitive) ->
|
||||||
|
case Conf of
|
||||||
|
#{Key := Val} ->
|
||||||
|
[{Key, maybe_wrap(IsSensitive, Val)} | Opts];
|
||||||
|
_ ->
|
||||||
|
Opts
|
||||||
|
end.
|
||||||
|
|
||||||
|
maybe_wrap(false = _IsSensitive, Val) ->
|
||||||
|
Val;
|
||||||
|
maybe_wrap(true, Val) ->
|
||||||
|
emqx_secret:wrap(Val).
|
||||||
|
|
|
@ -7,15 +7,17 @@
|
||||||
-compile(nowarn_export_all).
|
-compile(nowarn_export_all).
|
||||||
-compile(export_all).
|
-compile(export_all).
|
||||||
|
|
||||||
|
-include_lib("common_test/include/ct.hrl").
|
||||||
-include("emqx_bridge_cassandra.hrl").
|
-include("emqx_bridge_cassandra.hrl").
|
||||||
-include("emqx_connector/include/emqx_connector.hrl").
|
-include("emqx_connector/include/emqx_connector.hrl").
|
||||||
-include_lib("eunit/include/eunit.hrl").
|
-include_lib("eunit/include/eunit.hrl").
|
||||||
-include_lib("emqx/include/emqx.hrl").
|
-include_lib("emqx/include/emqx.hrl").
|
||||||
-include_lib("stdlib/include/assert.hrl").
|
-include_lib("stdlib/include/assert.hrl").
|
||||||
|
|
||||||
%% Cassandra server defined at `.ci/docker-compose-file/docker-compose-cassandra-tcp.yaml`
|
%% Cassandra servers are defined at `.ci/docker-compose-file/docker-compose-cassandra.yaml`
|
||||||
%% You can change it to `127.0.0.1`, if you run this SUITE locally
|
%% You can change it to `127.0.0.1`, if you run this SUITE locally
|
||||||
-define(CASSANDRA_HOST, "cassandra").
|
-define(CASSANDRA_HOST, "cassandra").
|
||||||
|
-define(CASSANDRA_HOST_NOAUTH, "cassandra_noauth").
|
||||||
-define(CASSANDRA_RESOURCE_MOD, emqx_bridge_cassandra_connector).
|
-define(CASSANDRA_RESOURCE_MOD, emqx_bridge_cassandra_connector).
|
||||||
|
|
||||||
%% This test SUITE requires a running cassandra instance. If you don't want to
|
%% This test SUITE requires a running cassandra instance. If you don't want to
|
||||||
|
@ -32,40 +34,58 @@
|
||||||
-define(CASSA_PASSWORD, <<"cassandra">>).
|
-define(CASSA_PASSWORD, <<"cassandra">>).
|
||||||
|
|
||||||
all() ->
|
all() ->
|
||||||
emqx_common_test_helpers:all(?MODULE).
|
[
|
||||||
|
{group, auth},
|
||||||
|
{group, noauth}
|
||||||
|
].
|
||||||
|
|
||||||
groups() ->
|
groups() ->
|
||||||
[].
|
TCs = emqx_common_test_helpers:all(?MODULE),
|
||||||
|
[
|
||||||
|
{auth, TCs},
|
||||||
|
{noauth, TCs}
|
||||||
|
].
|
||||||
|
|
||||||
cassandra_servers() ->
|
cassandra_servers(CassandraHost) ->
|
||||||
lists:map(
|
lists:map(
|
||||||
fun(#{hostname := Host, port := Port}) ->
|
fun(#{hostname := Host, port := Port}) ->
|
||||||
{Host, Port}
|
{Host, Port}
|
||||||
end,
|
end,
|
||||||
emqx_schema:parse_servers(
|
emqx_schema:parse_servers(
|
||||||
iolist_to_binary([?CASSANDRA_HOST, ":", erlang:integer_to_list(?CASSANDRA_DEFAULT_PORT)]),
|
iolist_to_binary([CassandraHost, ":", erlang:integer_to_list(?CASSANDRA_DEFAULT_PORT)]),
|
||||||
#{default_port => ?CASSANDRA_DEFAULT_PORT}
|
#{default_port => ?CASSANDRA_DEFAULT_PORT}
|
||||||
)
|
)
|
||||||
).
|
).
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
case
|
ok = emqx_common_test_helpers:start_apps([emqx_conf]),
|
||||||
emqx_common_test_helpers:is_tcp_server_available(?CASSANDRA_HOST, ?CASSANDRA_DEFAULT_PORT)
|
ok = emqx_connector_test_helpers:start_apps([emqx_resource]),
|
||||||
of
|
{ok, _} = application:ensure_all_started(emqx_connector),
|
||||||
|
Config.
|
||||||
|
|
||||||
|
init_per_group(Group, Config) ->
|
||||||
|
{CassandraHost, AuthOpts} =
|
||||||
|
case Group of
|
||||||
|
auth ->
|
||||||
|
{?CASSANDRA_HOST, [{username, ?CASSA_USERNAME}, {password, ?CASSA_PASSWORD}]};
|
||||||
|
noauth ->
|
||||||
|
{?CASSANDRA_HOST_NOAUTH, []}
|
||||||
|
end,
|
||||||
|
case emqx_common_test_helpers:is_tcp_server_available(CassandraHost, ?CASSANDRA_DEFAULT_PORT) of
|
||||||
true ->
|
true ->
|
||||||
ok = emqx_common_test_helpers:start_apps([emqx_conf]),
|
|
||||||
ok = emqx_connector_test_helpers:start_apps([emqx_resource]),
|
|
||||||
{ok, _} = application:ensure_all_started(emqx_connector),
|
|
||||||
%% keyspace `mqtt` must be created in advance
|
%% keyspace `mqtt` must be created in advance
|
||||||
{ok, Conn} =
|
{ok, Conn} =
|
||||||
ecql:connect([
|
ecql:connect([
|
||||||
{nodes, cassandra_servers()},
|
{nodes, cassandra_servers(CassandraHost)},
|
||||||
{username, ?CASSA_USERNAME},
|
|
||||||
{password, ?CASSA_PASSWORD},
|
|
||||||
{keyspace, "mqtt"}
|
{keyspace, "mqtt"}
|
||||||
|
| AuthOpts
|
||||||
]),
|
]),
|
||||||
ecql:close(Conn),
|
ecql:close(Conn),
|
||||||
Config;
|
[
|
||||||
|
{cassa_host, CassandraHost},
|
||||||
|
{cassa_auth_opts, AuthOpts}
|
||||||
|
| Config
|
||||||
|
];
|
||||||
false ->
|
false ->
|
||||||
case os:getenv("IS_CI") of
|
case os:getenv("IS_CI") of
|
||||||
"yes" ->
|
"yes" ->
|
||||||
|
@ -75,6 +95,9 @@ init_per_suite(Config) ->
|
||||||
end
|
end
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
end_per_group(_Group, _Config) ->
|
||||||
|
ok.
|
||||||
|
|
||||||
end_per_suite(_Config) ->
|
end_per_suite(_Config) ->
|
||||||
ok = emqx_common_test_helpers:stop_apps([emqx_conf]),
|
ok = emqx_common_test_helpers:stop_apps([emqx_conf]),
|
||||||
ok = emqx_connector_test_helpers:stop_apps([emqx_resource]),
|
ok = emqx_connector_test_helpers:stop_apps([emqx_resource]),
|
||||||
|
@ -90,10 +113,10 @@ end_per_testcase(_, _Config) ->
|
||||||
%% cases
|
%% cases
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
t_lifecycle(_Config) ->
|
t_lifecycle(Config) ->
|
||||||
perform_lifecycle_check(
|
perform_lifecycle_check(
|
||||||
<<"emqx_connector_cassandra_SUITE">>,
|
<<"emqx_connector_cassandra_SUITE">>,
|
||||||
cassandra_config()
|
cassandra_config(Config)
|
||||||
).
|
).
|
||||||
|
|
||||||
show(X) ->
|
show(X) ->
|
||||||
|
@ -168,25 +191,25 @@ perform_lifecycle_check(ResourceId, InitialConfig) ->
|
||||||
%% utils
|
%% utils
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
cassandra_config() ->
|
cassandra_config(Config) ->
|
||||||
Config =
|
Host = ?config(cassa_host, Config),
|
||||||
#{
|
AuthOpts = maps:from_list(?config(cassa_auth_opts, Config)),
|
||||||
|
CassConfig =
|
||||||
|
AuthOpts#{
|
||||||
auto_reconnect => true,
|
auto_reconnect => true,
|
||||||
keyspace => <<"mqtt">>,
|
keyspace => <<"mqtt">>,
|
||||||
username => ?CASSA_USERNAME,
|
|
||||||
password => ?CASSA_PASSWORD,
|
|
||||||
pool_size => 8,
|
pool_size => 8,
|
||||||
servers => iolist_to_binary(
|
servers => iolist_to_binary(
|
||||||
io_lib:format(
|
io_lib:format(
|
||||||
"~s:~b",
|
"~s:~b",
|
||||||
[
|
[
|
||||||
?CASSANDRA_HOST,
|
Host,
|
||||||
?CASSANDRA_DEFAULT_PORT
|
?CASSANDRA_DEFAULT_PORT
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
#{<<"config">> => Config}.
|
#{<<"config">> => CassConfig}.
|
||||||
|
|
||||||
test_query_no_params() ->
|
test_query_no_params() ->
|
||||||
{query, <<"SELECT count(1) AS T FROM system.local">>}.
|
{query, <<"SELECT count(1) AS T FROM system.local">>}.
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_bridge_gcp_pubsub, [
|
{application, emqx_bridge_gcp_pubsub, [
|
||||||
{description, "EMQX Enterprise GCP Pub/Sub Bridge"},
|
{description, "EMQX Enterprise GCP Pub/Sub Bridge"},
|
||||||
{vsn, "0.1.7"},
|
{vsn, "0.1.8"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [
|
{applications, [
|
||||||
kernel,
|
kernel,
|
||||||
|
|
|
@ -577,7 +577,7 @@ cluster(Config) ->
|
||||||
{schema_mod, emqx_enterprise_schema},
|
{schema_mod, emqx_enterprise_schema},
|
||||||
{env_handler, fun
|
{env_handler, fun
|
||||||
(emqx) ->
|
(emqx) ->
|
||||||
application:set_env(emqx, boot_modules, [broker, router]),
|
application:set_env(emqx, boot_modules, [broker]),
|
||||||
ok;
|
ok;
|
||||||
(emqx_conf) ->
|
(emqx_conf) ->
|
||||||
ok;
|
ok;
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
%% -*- mode: erlang; -*-
|
%% -*- mode: erlang; -*-
|
||||||
{erl_opts, [debug_info]}.
|
{erl_opts, [debug_info]}.
|
||||||
{deps, [ {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.7.6"}}}
|
{deps, [ {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.7.7"}}}
|
||||||
, {kafka_protocol, {git, "https://github.com/kafka4beam/kafka_protocol.git", {tag, "4.1.3"}}}
|
, {kafka_protocol, {git, "https://github.com/kafka4beam/kafka_protocol.git", {tag, "4.1.3"}}}
|
||||||
, {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.0"}}}
|
, {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.0"}}}
|
||||||
, {brod, {git, "https://github.com/kafka4beam/brod.git", {tag, "3.16.8"}}}
|
, {brod, {git, "https://github.com/kafka4beam/brod.git", {tag, "3.16.8"}}}
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_bridge_kafka, [
|
{application, emqx_bridge_kafka, [
|
||||||
{description, "EMQX Enterprise Kafka Bridge"},
|
{description, "EMQX Enterprise Kafka Bridge"},
|
||||||
{vsn, "0.1.8"},
|
{vsn, "0.1.9"},
|
||||||
{registered, [emqx_bridge_kafka_consumer_sup]},
|
{registered, [emqx_bridge_kafka_consumer_sup]},
|
||||||
{applications, [
|
{applications, [
|
||||||
kernel,
|
kernel,
|
||||||
|
|
|
@ -1101,7 +1101,7 @@ cluster(Config) ->
|
||||||
{load_apps, [emqx_machine]},
|
{load_apps, [emqx_machine]},
|
||||||
{env_handler, fun
|
{env_handler, fun
|
||||||
(emqx) ->
|
(emqx) ->
|
||||||
application:set_env(emqx, boot_modules, [broker, router]),
|
application:set_env(emqx, boot_modules, [broker]),
|
||||||
ExtraEnvHandlerHook(),
|
ExtraEnvHandlerHook(),
|
||||||
ok;
|
ok;
|
||||||
(emqx_conf) ->
|
(emqx_conf) ->
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_bridge_kinesis, [
|
{application, emqx_bridge_kinesis, [
|
||||||
{description, "EMQX Enterprise Amazon Kinesis Bridge"},
|
{description, "EMQX Enterprise Amazon Kinesis Bridge"},
|
||||||
{vsn, "0.1.1"},
|
{vsn, "0.1.2"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [
|
{applications, [
|
||||||
kernel,
|
kernel,
|
||||||
|
|
|
@ -536,7 +536,7 @@ cluster(Config) ->
|
||||||
{schema_mod, emqx_enterprise_schema},
|
{schema_mod, emqx_enterprise_schema},
|
||||||
{env_handler, fun
|
{env_handler, fun
|
||||||
(emqx) ->
|
(emqx) ->
|
||||||
application:set_env(emqx, boot_modules, [broker, router]),
|
application:set_env(emqx, boot_modules, [broker]),
|
||||||
ok;
|
ok;
|
||||||
(emqx_conf) ->
|
(emqx_conf) ->
|
||||||
ok;
|
ok;
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_bridge_sqlserver, [
|
{application, emqx_bridge_sqlserver, [
|
||||||
{description, "EMQX Enterprise SQL Server Bridge"},
|
{description, "EMQX Enterprise SQL Server Bridge"},
|
||||||
{vsn, "0.1.2"},
|
{vsn, "0.1.3"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [kernel, stdlib, emqx_resource, odbc]},
|
{applications, [kernel, stdlib, emqx_resource, odbc]},
|
||||||
{env, []},
|
{env, []},
|
||||||
|
|
|
@ -173,7 +173,6 @@ on_start(
|
||||||
#{
|
#{
|
||||||
server := Server,
|
server := Server,
|
||||||
username := Username,
|
username := Username,
|
||||||
password := Password,
|
|
||||||
driver := Driver,
|
driver := Driver,
|
||||||
database := Database,
|
database := Database,
|
||||||
pool_size := PoolSize,
|
pool_size := PoolSize,
|
||||||
|
@ -200,7 +199,7 @@ on_start(
|
||||||
Options = [
|
Options = [
|
||||||
{server, to_bin(Server)},
|
{server, to_bin(Server)},
|
||||||
{username, Username},
|
{username, Username},
|
||||||
{password, Password},
|
{password, emqx_secret:wrap(maps:get(password, Config, ""))},
|
||||||
{driver, Driver},
|
{driver, Driver},
|
||||||
{database, Database},
|
{database, Database},
|
||||||
{pool_size, PoolSize}
|
{pool_size, PoolSize}
|
||||||
|
@ -320,7 +319,7 @@ conn_str([{database, Database} | Opts], Acc) ->
|
||||||
conn_str([{username, Username} | Opts], Acc) ->
|
conn_str([{username, Username} | Opts], Acc) ->
|
||||||
conn_str(Opts, ["UID=" ++ str(Username) | Acc]);
|
conn_str(Opts, ["UID=" ++ str(Username) | Acc]);
|
||||||
conn_str([{password, Password} | Opts], Acc) ->
|
conn_str([{password, Password} | Opts], Acc) ->
|
||||||
conn_str(Opts, ["PWD=" ++ str(Password) | Acc]);
|
conn_str(Opts, ["PWD=" ++ str(emqx_secret:unwrap(Password)) | Acc]);
|
||||||
conn_str([{_, _} | Opts], Acc) ->
|
conn_str([{_, _} | Opts], Acc) ->
|
||||||
conn_str(Opts, Acc).
|
conn_str(Opts, Acc).
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_connector, [
|
{application, emqx_connector, [
|
||||||
{description, "EMQX Data Integration Connectors"},
|
{description, "EMQX Data Integration Connectors"},
|
||||||
{vsn, "0.1.30"},
|
{vsn, "0.1.31"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{mod, {emqx_connector_app, []}},
|
{mod, {emqx_connector_app, []}},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -85,13 +85,10 @@ server() ->
|
||||||
adjust_fields(Fields) ->
|
adjust_fields(Fields) ->
|
||||||
lists:map(
|
lists:map(
|
||||||
fun
|
fun
|
||||||
({username, OrigUsernameFn}) ->
|
({username, Sc}) ->
|
||||||
{username, fun
|
%% to please dialyzer...
|
||||||
(required) ->
|
Override = #{type => hocon_schema:field_schema(Sc, type), required => true},
|
||||||
true;
|
{username, hocon_schema:override(Sc, Override)};
|
||||||
(Any) ->
|
|
||||||
OrigUsernameFn(Any)
|
|
||||||
end};
|
|
||||||
(Field) ->
|
(Field) ->
|
||||||
Field
|
Field
|
||||||
end,
|
end,
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_ctl, [
|
{application, emqx_ctl, [
|
||||||
{description, "Backend for emqx_ctl script"},
|
{description, "Backend for emqx_ctl script"},
|
||||||
{vsn, "0.1.2"},
|
{vsn, "0.1.3"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{mod, {emqx_ctl_app, []}},
|
{mod, {emqx_ctl_app, []}},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -119,8 +119,7 @@ run_command(Cmd, Args) when is_atom(Cmd) ->
|
||||||
case lookup_command(Cmd) of
|
case lookup_command(Cmd) of
|
||||||
[{Mod, Fun}] ->
|
[{Mod, Fun}] ->
|
||||||
try
|
try
|
||||||
_ = apply(Mod, Fun, [Args]),
|
apply(Mod, Fun, [Args])
|
||||||
ok
|
|
||||||
catch
|
catch
|
||||||
_:Reason:Stacktrace ->
|
_:Reason:Stacktrace ->
|
||||||
?LOG_ERROR(#{
|
?LOG_ERROR(#{
|
||||||
|
|
|
@ -15,6 +15,9 @@
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
-module(emqx_ds).
|
-module(emqx_ds).
|
||||||
|
|
||||||
|
-include_lib("stdlib/include/ms_transform.hrl").
|
||||||
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
|
|
||||||
%% API:
|
%% API:
|
||||||
-export([ensure_shard/2]).
|
-export([ensure_shard/2]).
|
||||||
%% Messages:
|
%% Messages:
|
||||||
|
@ -27,6 +30,7 @@
|
||||||
session_drop/1,
|
session_drop/1,
|
||||||
session_suspend/1,
|
session_suspend/1,
|
||||||
session_add_iterator/2,
|
session_add_iterator/2,
|
||||||
|
session_get_iterator_id/2,
|
||||||
session_del_iterator/2,
|
session_del_iterator/2,
|
||||||
session_stats/0
|
session_stats/0
|
||||||
]).
|
]).
|
||||||
|
@ -39,6 +43,8 @@
|
||||||
message_stats/0,
|
message_stats/0,
|
||||||
message_store_opts/0,
|
message_store_opts/0,
|
||||||
session_id/0,
|
session_id/0,
|
||||||
|
replay/0,
|
||||||
|
replay_id/0,
|
||||||
iterator_id/0,
|
iterator_id/0,
|
||||||
iterator/0,
|
iterator/0,
|
||||||
shard/0,
|
shard/0,
|
||||||
|
@ -52,11 +58,13 @@
|
||||||
%% Type declarations
|
%% Type declarations
|
||||||
%%================================================================================
|
%%================================================================================
|
||||||
|
|
||||||
-type session_id() :: emqx_types:clientid().
|
%% Currently, this is the clientid. We avoid `emqx_types:clientid()' because that can be
|
||||||
|
%% an atom, in theory (?).
|
||||||
|
-type session_id() :: binary().
|
||||||
|
|
||||||
-type iterator() :: term().
|
-type iterator() :: term().
|
||||||
|
|
||||||
-opaque iterator_id() :: binary().
|
-type iterator_id() :: binary().
|
||||||
|
|
||||||
%%-type session() :: #session{}.
|
%%-type session() :: #session{}.
|
||||||
|
|
||||||
|
@ -73,9 +81,17 @@
|
||||||
|
|
||||||
%% Timestamp
|
%% Timestamp
|
||||||
%% Earliest possible timestamp is 0.
|
%% Earliest possible timestamp is 0.
|
||||||
%% TODO granularity?
|
%% TODO granularity? Currently, we should always use micro second, as that's the unit we
|
||||||
|
%% use in emqx_guid. Otherwise, the iterators won't match the message timestamps.
|
||||||
-type time() :: non_neg_integer().
|
-type time() :: non_neg_integer().
|
||||||
|
|
||||||
|
-type replay_id() :: binary().
|
||||||
|
|
||||||
|
-type replay() :: {
|
||||||
|
_TopicFilter :: emqx_topic:words(),
|
||||||
|
_StartTime :: time()
|
||||||
|
}.
|
||||||
|
|
||||||
%%================================================================================
|
%%================================================================================
|
||||||
%% API funcions
|
%% API funcions
|
||||||
%%================================================================================
|
%%================================================================================
|
||||||
|
@ -121,23 +137,20 @@ message_stats() ->
|
||||||
%%
|
%%
|
||||||
%% Note: session API doesn't handle session takeovers, it's the job of
|
%% Note: session API doesn't handle session takeovers, it's the job of
|
||||||
%% the broker.
|
%% the broker.
|
||||||
-spec session_open(emqx_types:clientid()) -> {_New :: boolean(), session_id(), [iterator_id()]}.
|
-spec session_open(emqx_types:clientid()) -> {_New :: boolean(), session_id()}.
|
||||||
session_open(ClientID) ->
|
session_open(ClientID) ->
|
||||||
{atomic, Ret} =
|
{atomic, Res} =
|
||||||
mria:transaction(
|
mria:transaction(?DS_SHARD, fun() ->
|
||||||
?DS_SHARD,
|
case mnesia:read(?SESSION_TAB, ClientID, write) of
|
||||||
fun() ->
|
[#session{}] ->
|
||||||
case mnesia:read(?SESSION_TAB, ClientID) of
|
{false, ClientID};
|
||||||
[#session{iterators = Iterators}] ->
|
[] ->
|
||||||
{false, ClientID, Iterators};
|
Session = #session{id = ClientID},
|
||||||
[] ->
|
mnesia:write(?SESSION_TAB, Session, write),
|
||||||
Session = #session{id = ClientID, iterators = []},
|
{true, ClientID}
|
||||||
mnesia:write(?SESSION_TAB, Session, write),
|
|
||||||
{true, ClientID, []}
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
),
|
end),
|
||||||
Ret.
|
Res.
|
||||||
|
|
||||||
%% @doc Called when a client reconnects with `clean session=true' or
|
%% @doc Called when a client reconnects with `clean session=true' or
|
||||||
%% during session GC
|
%% during session GC
|
||||||
|
@ -146,6 +159,7 @@ session_drop(ClientID) ->
|
||||||
{atomic, ok} = mria:transaction(
|
{atomic, ok} = mria:transaction(
|
||||||
?DS_SHARD,
|
?DS_SHARD,
|
||||||
fun() ->
|
fun() ->
|
||||||
|
%% TODO: ensure all iterators from this clientid are closed?
|
||||||
mnesia:delete({?SESSION_TAB, ClientID})
|
mnesia:delete({?SESSION_TAB, ClientID})
|
||||||
end
|
end
|
||||||
),
|
),
|
||||||
|
@ -160,19 +174,57 @@ session_suspend(_SessionId) ->
|
||||||
|
|
||||||
%% @doc Called when a client subscribes to a topic. Idempotent.
|
%% @doc Called when a client subscribes to a topic. Idempotent.
|
||||||
-spec session_add_iterator(session_id(), emqx_topic:words()) ->
|
-spec session_add_iterator(session_id(), emqx_topic:words()) ->
|
||||||
{ok, iterator_id()} | {error, session_not_found}.
|
{ok, iterator_id(), time(), _IsNew :: boolean()}.
|
||||||
session_add_iterator(_SessionId, _TopicFilter) ->
|
session_add_iterator(DSSessionId, TopicFilter) ->
|
||||||
%% TODO
|
IteratorRefId = {DSSessionId, TopicFilter},
|
||||||
{ok, <<"">>}.
|
{atomic, Res} =
|
||||||
|
mria:transaction(?DS_SHARD, fun() ->
|
||||||
|
case mnesia:read(?ITERATOR_REF_TAB, IteratorRefId, write) of
|
||||||
|
[] ->
|
||||||
|
{IteratorId, StartMS} = new_iterator_id(DSSessionId),
|
||||||
|
IteratorRef = #iterator_ref{
|
||||||
|
ref_id = IteratorRefId,
|
||||||
|
it_id = IteratorId,
|
||||||
|
start_time = StartMS
|
||||||
|
},
|
||||||
|
ok = mnesia:write(?ITERATOR_REF_TAB, IteratorRef, write),
|
||||||
|
?tp(
|
||||||
|
ds_session_subscription_added,
|
||||||
|
#{iterator_id => IteratorId, session_id => DSSessionId}
|
||||||
|
),
|
||||||
|
IsNew = true,
|
||||||
|
{ok, IteratorId, StartMS, IsNew};
|
||||||
|
[#iterator_ref{it_id = IteratorId, start_time = StartMS}] ->
|
||||||
|
?tp(
|
||||||
|
ds_session_subscription_present,
|
||||||
|
#{iterator_id => IteratorId, session_id => DSSessionId}
|
||||||
|
),
|
||||||
|
IsNew = false,
|
||||||
|
{ok, IteratorId, StartMS, IsNew}
|
||||||
|
end
|
||||||
|
end),
|
||||||
|
Res.
|
||||||
|
|
||||||
%% @doc Called when a client unsubscribes from a topic. Returns `true'
|
-spec session_get_iterator_id(session_id(), emqx_topic:words()) ->
|
||||||
%% if the session contained the subscription or `false' if it wasn't
|
{ok, iterator_id()} | {error, not_found}.
|
||||||
%% subscribed.
|
session_get_iterator_id(DSSessionId, TopicFilter) ->
|
||||||
-spec session_del_iterator(session_id(), emqx_topic:words()) ->
|
IteratorRefId = {DSSessionId, TopicFilter},
|
||||||
{ok, boolean()} | {error, session_not_found}.
|
case mnesia:dirty_read(?ITERATOR_REF_TAB, IteratorRefId) of
|
||||||
session_del_iterator(_SessionId, _TopicFilter) ->
|
[] ->
|
||||||
%% TODO
|
{error, not_found};
|
||||||
{ok, false}.
|
[#iterator_ref{it_id = IteratorId}] ->
|
||||||
|
{ok, IteratorId}
|
||||||
|
end.
|
||||||
|
|
||||||
|
%% @doc Called when a client unsubscribes from a topic.
|
||||||
|
-spec session_del_iterator(session_id(), emqx_topic:words()) -> ok.
|
||||||
|
session_del_iterator(DSSessionId, TopicFilter) ->
|
||||||
|
IteratorRefId = {DSSessionId, TopicFilter},
|
||||||
|
{atomic, ok} =
|
||||||
|
mria:transaction(?DS_SHARD, fun() ->
|
||||||
|
mnesia:delete(?ITERATOR_REF_TAB, IteratorRefId, write)
|
||||||
|
end),
|
||||||
|
ok.
|
||||||
|
|
||||||
-spec session_stats() -> #{}.
|
-spec session_stats() -> #{}.
|
||||||
session_stats() ->
|
session_stats() ->
|
||||||
|
@ -201,3 +253,9 @@ iterator_stats() ->
|
||||||
%%================================================================================
|
%%================================================================================
|
||||||
%% Internal functions
|
%% Internal functions
|
||||||
%%================================================================================
|
%%================================================================================
|
||||||
|
|
||||||
|
-spec new_iterator_id(session_id()) -> {iterator_id(), time()}.
|
||||||
|
new_iterator_id(DSSessionId) ->
|
||||||
|
NowMS = erlang:system_time(microsecond),
|
||||||
|
IteratorId = <<DSSessionId/binary, (emqx_guid:gen())/binary>>,
|
||||||
|
{IteratorId, NowMS}.
|
||||||
|
|
|
@ -25,7 +25,18 @@ init_mnesia() ->
|
||||||
{record_name, session},
|
{record_name, session},
|
||||||
{attributes, record_info(fields, session)}
|
{attributes, record_info(fields, session)}
|
||||||
]
|
]
|
||||||
).
|
),
|
||||||
|
ok = mria:create_table(
|
||||||
|
?ITERATOR_REF_TAB,
|
||||||
|
[
|
||||||
|
{rlog_shard, ?DS_SHARD},
|
||||||
|
{type, ordered_set},
|
||||||
|
{storage, storage()},
|
||||||
|
{record_name, iterator_ref},
|
||||||
|
{attributes, record_info(fields, iterator_ref)}
|
||||||
|
]
|
||||||
|
),
|
||||||
|
ok.
|
||||||
|
|
||||||
storage() ->
|
storage() ->
|
||||||
case mria:rocksdb_backend_available() of
|
case mria:rocksdb_backend_available() of
|
||||||
|
|
|
@ -17,11 +17,20 @@
|
||||||
-define(EMQX_DS_HRL, true).
|
-define(EMQX_DS_HRL, true).
|
||||||
|
|
||||||
-define(SESSION_TAB, emqx_ds_session).
|
-define(SESSION_TAB, emqx_ds_session).
|
||||||
|
-define(ITERATOR_REF_TAB, emqx_ds_iterator_ref).
|
||||||
-define(DS_SHARD, emqx_ds_shard).
|
-define(DS_SHARD, emqx_ds_shard).
|
||||||
|
|
||||||
-record(session, {
|
-record(session, {
|
||||||
|
%% same as clientid
|
||||||
id :: emqx_ds:session_id(),
|
id :: emqx_ds:session_id(),
|
||||||
iterators :: [{emqx_topic:words(), emqx_ds:iterator_id()}]
|
%% for future usage
|
||||||
|
props = #{} :: map()
|
||||||
|
}).
|
||||||
|
|
||||||
|
-record(iterator_ref, {
|
||||||
|
ref_id :: {emqx_ds:session_id(), emqx_topic:words()},
|
||||||
|
it_id :: emqx_ds:iterator_id(),
|
||||||
|
start_time :: emqx_ds:time()
|
||||||
}).
|
}).
|
||||||
|
|
||||||
-endif.
|
-endif.
|
||||||
|
|
|
@ -1,36 +0,0 @@
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
-module(emqx_ds_replay).
|
|
||||||
|
|
||||||
%% API:
|
|
||||||
-export([]).
|
|
||||||
|
|
||||||
-export_type([replay_id/0, replay/0]).
|
|
||||||
|
|
||||||
%%================================================================================
|
|
||||||
%% Type declarations
|
|
||||||
%%================================================================================
|
|
||||||
|
|
||||||
-type replay_id() :: binary().
|
|
||||||
|
|
||||||
-type replay() :: {
|
|
||||||
_TopicFilter :: emqx_ds:topic(),
|
|
||||||
_StartTime :: emqx_ds:time()
|
|
||||||
}.
|
|
||||||
|
|
||||||
%%================================================================================
|
|
||||||
%% API funcions
|
|
||||||
%%================================================================================
|
|
||||||
|
|
||||||
%%================================================================================
|
|
||||||
%% behaviour callbacks
|
|
||||||
%%================================================================================
|
|
||||||
|
|
||||||
%%================================================================================
|
|
||||||
%% Internal exports
|
|
||||||
%%================================================================================
|
|
||||||
|
|
||||||
%%================================================================================
|
|
||||||
%% Internal functions
|
|
||||||
%%================================================================================
|
|
|
@ -13,7 +13,15 @@
|
||||||
|
|
||||||
-export([make_iterator/2, next/1]).
|
-export([make_iterator/2, next/1]).
|
||||||
|
|
||||||
-export([preserve_iterator/2, restore_iterator/2, discard_iterator/2]).
|
-export([
|
||||||
|
preserve_iterator/2,
|
||||||
|
restore_iterator/2,
|
||||||
|
discard_iterator/2,
|
||||||
|
ensure_iterator/3,
|
||||||
|
discard_iterator_prefix/2,
|
||||||
|
list_iterator_prefix/2,
|
||||||
|
foldl_iterator_prefix/4
|
||||||
|
]).
|
||||||
|
|
||||||
%% behaviour callbacks:
|
%% behaviour callbacks:
|
||||||
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]).
|
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]).
|
||||||
|
@ -160,10 +168,10 @@ next(It = #it{module = Mod, data = ItData}) ->
|
||||||
end
|
end
|
||||||
end.
|
end.
|
||||||
|
|
||||||
-spec preserve_iterator(iterator(), emqx_ds:replay_id()) ->
|
-spec preserve_iterator(iterator(), emqx_ds:iterator_id()) ->
|
||||||
ok | {error, _TODO}.
|
ok | {error, _TODO}.
|
||||||
preserve_iterator(It = #it{}, ReplayID) ->
|
preserve_iterator(It = #it{}, IteratorID) ->
|
||||||
iterator_put_state(ReplayID, It).
|
iterator_put_state(IteratorID, It).
|
||||||
|
|
||||||
-spec restore_iterator(emqx_ds:shard(), emqx_ds:replay_id()) ->
|
-spec restore_iterator(emqx_ds:shard(), emqx_ds:replay_id()) ->
|
||||||
{ok, iterator()} | {error, _TODO}.
|
{ok, iterator()} | {error, _TODO}.
|
||||||
|
@ -177,11 +185,50 @@ restore_iterator(Shard, ReplayID) ->
|
||||||
Error
|
Error
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
-spec ensure_iterator(emqx_ds:shard(), emqx_ds:iterator_id(), emqx_ds:replay()) ->
|
||||||
|
{ok, iterator()} | {error, _TODO}.
|
||||||
|
ensure_iterator(Shard, IteratorID, Replay = {_TopicFilter, _StartMS}) ->
|
||||||
|
case restore_iterator(Shard, IteratorID) of
|
||||||
|
{ok, It} ->
|
||||||
|
{ok, It};
|
||||||
|
{error, not_found} ->
|
||||||
|
{ok, It} = make_iterator(Shard, Replay),
|
||||||
|
ok = emqx_ds_storage_layer:preserve_iterator(It, IteratorID),
|
||||||
|
{ok, It};
|
||||||
|
Error ->
|
||||||
|
Error
|
||||||
|
end.
|
||||||
|
|
||||||
-spec discard_iterator(emqx_ds:shard(), emqx_ds:replay_id()) ->
|
-spec discard_iterator(emqx_ds:shard(), emqx_ds:replay_id()) ->
|
||||||
ok | {error, _TODO}.
|
ok | {error, _TODO}.
|
||||||
discard_iterator(Shard, ReplayID) ->
|
discard_iterator(Shard, ReplayID) ->
|
||||||
iterator_delete(Shard, ReplayID).
|
iterator_delete(Shard, ReplayID).
|
||||||
|
|
||||||
|
-spec discard_iterator_prefix(emqx_ds:shard(), binary()) ->
|
||||||
|
ok | {error, _TODO}.
|
||||||
|
discard_iterator_prefix(Shard, KeyPrefix) ->
|
||||||
|
case do_discard_iterator_prefix(Shard, KeyPrefix) of
|
||||||
|
{ok, _} -> ok;
|
||||||
|
Error -> Error
|
||||||
|
end.
|
||||||
|
|
||||||
|
-spec list_iterator_prefix(
|
||||||
|
emqx_ds:shard(),
|
||||||
|
binary()
|
||||||
|
) -> {ok, [emqx_ds:iterator_id()]} | {error, _TODO}.
|
||||||
|
list_iterator_prefix(Shard, KeyPrefix) ->
|
||||||
|
do_list_iterator_prefix(Shard, KeyPrefix).
|
||||||
|
|
||||||
|
-spec foldl_iterator_prefix(
|
||||||
|
emqx_ds:shard(),
|
||||||
|
binary(),
|
||||||
|
fun((_Key :: binary(), _Value :: binary(), Acc) -> Acc),
|
||||||
|
Acc
|
||||||
|
) -> {ok, Acc} | {error, _TODO} when
|
||||||
|
Acc :: term().
|
||||||
|
foldl_iterator_prefix(Shard, KeyPrefix, Fn, Acc) ->
|
||||||
|
do_foldl_iterator_prefix(Shard, KeyPrefix, Fn, Acc).
|
||||||
|
|
||||||
%%================================================================================
|
%%================================================================================
|
||||||
%% behaviour callbacks
|
%% behaviour callbacks
|
||||||
%%================================================================================
|
%%================================================================================
|
||||||
|
@ -344,7 +391,11 @@ open_restore_iterator(#{module := Mod, data := Data}, It = #it{replay = Replay},
|
||||||
|
|
||||||
%%
|
%%
|
||||||
|
|
||||||
-define(KEY_REPLAY_STATE(ReplayID), <<(ReplayID)/binary, "rs">>).
|
-define(KEY_REPLAY_STATE(IteratorId), <<(IteratorId)/binary, "rs">>).
|
||||||
|
-define(KEY_REPLAY_STATE_PAT(KeyReplayState), begin
|
||||||
|
<<IteratorId:(size(KeyReplayState) - 2)/binary, "rs">> = (KeyReplayState),
|
||||||
|
IteratorId
|
||||||
|
end).
|
||||||
|
|
||||||
-define(ITERATION_WRITE_OPTS, []).
|
-define(ITERATION_WRITE_OPTS, []).
|
||||||
-define(ITERATION_READ_OPTS, []).
|
-define(ITERATION_READ_OPTS, []).
|
||||||
|
@ -391,6 +442,44 @@ restore_iterator_state(
|
||||||
It = #it{shard = Shard, gen = Gen, replay = {TopicFilter, StartTime}},
|
It = #it{shard = Shard, gen = Gen, replay = {TopicFilter, StartTime}},
|
||||||
open_restore_iterator(meta_get_gen(Shard, Gen), It, State).
|
open_restore_iterator(meta_get_gen(Shard, Gen), It, State).
|
||||||
|
|
||||||
|
do_list_iterator_prefix(Shard, KeyPrefix) ->
|
||||||
|
Fn = fun(K0, _V, Acc) ->
|
||||||
|
K = ?KEY_REPLAY_STATE_PAT(K0),
|
||||||
|
[K | Acc]
|
||||||
|
end,
|
||||||
|
do_foldl_iterator_prefix(Shard, KeyPrefix, Fn, []).
|
||||||
|
|
||||||
|
do_discard_iterator_prefix(Shard, KeyPrefix) ->
|
||||||
|
#db{handle = DBHandle, cf_iterator = CF} = meta_lookup(Shard, db),
|
||||||
|
Fn = fun(K, _V, _Acc) -> ok = rocksdb:delete(DBHandle, CF, K, ?ITERATION_WRITE_OPTS) end,
|
||||||
|
do_foldl_iterator_prefix(Shard, KeyPrefix, Fn, ok).
|
||||||
|
|
||||||
|
do_foldl_iterator_prefix(Shard, KeyPrefix, Fn, Acc) ->
|
||||||
|
#db{handle = Handle, cf_iterator = CF} = meta_lookup(Shard, db),
|
||||||
|
case rocksdb:iterator(Handle, CF, ?ITERATION_READ_OPTS) of
|
||||||
|
{ok, It} ->
|
||||||
|
NextAction = {seek, KeyPrefix},
|
||||||
|
do_foldl_iterator_prefix(Handle, CF, It, KeyPrefix, NextAction, Fn, Acc);
|
||||||
|
Error ->
|
||||||
|
Error
|
||||||
|
end.
|
||||||
|
|
||||||
|
do_foldl_iterator_prefix(DBHandle, CF, It, KeyPrefix, NextAction, Fn, Acc) ->
|
||||||
|
case rocksdb:iterator_move(It, NextAction) of
|
||||||
|
{ok, K = <<KeyPrefix:(size(KeyPrefix))/binary, _/binary>>, V} ->
|
||||||
|
NewAcc = Fn(K, V, Acc),
|
||||||
|
do_foldl_iterator_prefix(DBHandle, CF, It, KeyPrefix, next, Fn, NewAcc);
|
||||||
|
{ok, _K, _V} ->
|
||||||
|
ok = rocksdb:iterator_close(It),
|
||||||
|
{ok, Acc};
|
||||||
|
{error, invalid_iterator} ->
|
||||||
|
ok = rocksdb:iterator_close(It),
|
||||||
|
{ok, Acc};
|
||||||
|
Error ->
|
||||||
|
ok = rocksdb:iterator_close(It),
|
||||||
|
Error
|
||||||
|
end.
|
||||||
|
|
||||||
%% Functions for dealing with the metadata stored persistently in rocksdb
|
%% Functions for dealing with the metadata stored persistently in rocksdb
|
||||||
|
|
||||||
-define(CURRENT_GEN, <<"current">>).
|
-define(CURRENT_GEN, <<"current">>).
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
{application, emqx_durable_storage, [
|
{application, emqx_durable_storage, [
|
||||||
{description, "Message persistence and subscription replays for EMQX"},
|
{description, "Message persistence and subscription replays for EMQX"},
|
||||||
% strict semver, bump manually!
|
% strict semver, bump manually!
|
||||||
{vsn, "0.1.2"},
|
{vsn, "0.1.3"},
|
||||||
{modules, []},
|
{modules, []},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [kernel, stdlib, rocksdb, gproc, mria]},
|
{applications, [kernel, stdlib, rocksdb, gproc, mria]},
|
||||||
|
|
|
@ -14,6 +14,8 @@
|
||||||
|
|
||||||
-opaque t() :: ets:tid().
|
-opaque t() :: ets:tid().
|
||||||
|
|
||||||
|
-export_type([t/0]).
|
||||||
|
|
||||||
-spec open() -> t().
|
-spec open() -> t().
|
||||||
open() ->
|
open() ->
|
||||||
ets:new(?MODULE, [ordered_set, {keypos, 1}]).
|
ets:new(?MODULE, [ordered_set, {keypos, 1}]).
|
||||||
|
|
|
@ -642,7 +642,7 @@ unsub_properties() ->
|
||||||
#{}.
|
#{}.
|
||||||
|
|
||||||
shutdown_reason() ->
|
shutdown_reason() ->
|
||||||
oneof([utf8(), {shutdown, emqx_proper_types:limited_atom()}]).
|
oneof([utf8(), {shutdown, emqx_proper_types:limited_latin_atom()}]).
|
||||||
|
|
||||||
authresult() ->
|
authresult() ->
|
||||||
?LET(
|
?LET(
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_ft, [
|
{application, emqx_ft, [
|
||||||
{description, "EMQX file transfer over MQTT"},
|
{description, "EMQX file transfer over MQTT"},
|
||||||
{vsn, "0.1.5"},
|
{vsn, "0.1.6"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{mod, {emqx_ft_app, []}},
|
{mod, {emqx_ft_app, []}},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -18,7 +18,9 @@
|
||||||
|
|
||||||
-include_lib("emqx/include/emqx.hrl").
|
-include_lib("emqx/include/emqx.hrl").
|
||||||
-include_lib("emqx/include/emqx_mqtt.hrl").
|
-include_lib("emqx/include/emqx_mqtt.hrl").
|
||||||
|
-include_lib("emqx/include/emqx_channel.hrl").
|
||||||
-include_lib("emqx/include/emqx_hooks.hrl").
|
-include_lib("emqx/include/emqx_hooks.hrl").
|
||||||
|
|
||||||
-include_lib("snabbkaffe/include/trace.hrl").
|
-include_lib("snabbkaffe/include/trace.hrl").
|
||||||
|
|
||||||
-export([
|
-export([
|
||||||
|
@ -28,7 +30,10 @@
|
||||||
|
|
||||||
-export([
|
-export([
|
||||||
on_message_publish/1,
|
on_message_publish/1,
|
||||||
on_message_puback/4
|
on_message_puback/4,
|
||||||
|
on_client_timeout/3,
|
||||||
|
on_process_down/4,
|
||||||
|
on_channel_unregistered/1
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-export([
|
-export([
|
||||||
|
@ -36,8 +41,6 @@
|
||||||
encode_filemeta/1
|
encode_filemeta/1
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-export([on_complete/4]).
|
|
||||||
|
|
||||||
-export_type([
|
-export_type([
|
||||||
clientid/0,
|
clientid/0,
|
||||||
transfer/0,
|
transfer/0,
|
||||||
|
@ -85,17 +88,29 @@
|
||||||
checksum => checksum()
|
checksum => checksum()
|
||||||
}.
|
}.
|
||||||
|
|
||||||
|
-define(FT_EVENT(EVENT), {?MODULE, EVENT}).
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% API for app
|
%% API for app
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
hook() ->
|
hook() ->
|
||||||
ok = emqx_hooks:put('message.publish', {?MODULE, on_message_publish, []}, ?HP_LOWEST),
|
ok = emqx_hooks:put('message.publish', {?MODULE, on_message_publish, []}, ?HP_LOWEST),
|
||||||
ok = emqx_hooks:put('message.puback', {?MODULE, on_message_puback, []}, ?HP_LOWEST).
|
ok = emqx_hooks:put('message.puback', {?MODULE, on_message_puback, []}, ?HP_LOWEST),
|
||||||
|
ok = emqx_hooks:put('client.timeout', {?MODULE, on_client_timeout, []}, ?HP_LOWEST),
|
||||||
|
ok = emqx_hooks:put(
|
||||||
|
'client.monitored_process_down', {?MODULE, on_process_down, []}, ?HP_LOWEST
|
||||||
|
),
|
||||||
|
ok = emqx_hooks:put(
|
||||||
|
'cm.channel.unregistered', {?MODULE, on_channel_unregistered, []}, ?HP_LOWEST
|
||||||
|
).
|
||||||
|
|
||||||
unhook() ->
|
unhook() ->
|
||||||
ok = emqx_hooks:del('message.publish', {?MODULE, on_message_publish}),
|
ok = emqx_hooks:del('message.publish', {?MODULE, on_message_publish}),
|
||||||
ok = emqx_hooks:del('message.puback', {?MODULE, on_message_puback}).
|
ok = emqx_hooks:del('message.puback', {?MODULE, on_message_puback}),
|
||||||
|
ok = emqx_hooks:del('client.timeout', {?MODULE, on_client_timeout}),
|
||||||
|
ok = emqx_hooks:del('client.monitored_process_down', {?MODULE, on_process_down}),
|
||||||
|
ok = emqx_hooks:del('cm.channel.unregistered', {?MODULE, on_channel_unregistered}).
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% API
|
%% API
|
||||||
|
@ -145,6 +160,25 @@ on_message_puback(PacketId, #message{topic = Topic} = Msg, _PubRes, _RC) ->
|
||||||
ignore
|
ignore
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
on_channel_unregistered(ChannelPid) ->
|
||||||
|
ok = emqx_ft_async_reply:deregister_all(ChannelPid).
|
||||||
|
|
||||||
|
on_client_timeout(_TRef, ?FT_EVENT({MRef, PacketId}), Acc) ->
|
||||||
|
_ = erlang:demonitor(MRef, [flush]),
|
||||||
|
_ = emqx_ft_async_reply:take_by_mref(MRef),
|
||||||
|
{stop, [?REPLY_OUTGOING(?PUBACK_PACKET(PacketId, ?RC_UNSPECIFIED_ERROR)) | Acc]};
|
||||||
|
on_client_timeout(_TRef, _Event, Acc) ->
|
||||||
|
{ok, Acc}.
|
||||||
|
|
||||||
|
on_process_down(MRef, _Pid, Reason, Acc) ->
|
||||||
|
case emqx_ft_async_reply:take_by_mref(MRef) of
|
||||||
|
{ok, PacketId, TRef} ->
|
||||||
|
_ = emqx_utils:cancel_timer(TRef),
|
||||||
|
{stop, [?REPLY_OUTGOING(?PUBACK_PACKET(PacketId, reason_to_rc(Reason))) | Acc]};
|
||||||
|
not_found ->
|
||||||
|
{ok, Acc}
|
||||||
|
end.
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% Handlers for transfer messages
|
%% Handlers for transfer messages
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
@ -208,24 +242,13 @@ on_init(PacketId, Msg, Transfer, Meta) ->
|
||||||
transfer => Transfer,
|
transfer => Transfer,
|
||||||
filemeta => Meta
|
filemeta => Meta
|
||||||
}),
|
}),
|
||||||
PacketKey = {self(), PacketId},
|
%% Currently synchronous.
|
||||||
Callback = fun(Result) ->
|
%% If we want to make it async, we need to use `emqx_ft_async_reply`,
|
||||||
?MODULE:on_complete("store_filemeta", PacketKey, Transfer, Result)
|
%% like in `on_fin`.
|
||||||
end,
|
case store_filemeta(Transfer, Meta) of
|
||||||
with_responder(PacketKey, Callback, emqx_ft_conf:init_timeout(), fun() ->
|
ok -> ?RC_SUCCESS;
|
||||||
case store_filemeta(Transfer, Meta) of
|
{error, _} -> ?RC_UNSPECIFIED_ERROR
|
||||||
% Stored, ack through the responder right away
|
end.
|
||||||
ok ->
|
|
||||||
emqx_ft_responder:ack(PacketKey, ok);
|
|
||||||
% Storage operation started, packet will be acked by the responder
|
|
||||||
% {async, Pid} ->
|
|
||||||
% ok = emqx_ft_responder:kickoff(PacketKey, Pid),
|
|
||||||
% ok;
|
|
||||||
%% Storage operation failed, ack through the responder
|
|
||||||
{error, _} = Error ->
|
|
||||||
emqx_ft_responder:ack(PacketKey, Error)
|
|
||||||
end
|
|
||||||
end).
|
|
||||||
|
|
||||||
on_abort(_Msg, _FileId) ->
|
on_abort(_Msg, _FileId) ->
|
||||||
%% TODO
|
%% TODO
|
||||||
|
@ -240,21 +263,13 @@ on_segment(PacketId, Msg, Transfer, Offset, Checksum) ->
|
||||||
checksum => Checksum
|
checksum => Checksum
|
||||||
}),
|
}),
|
||||||
Segment = {Offset, Msg#message.payload},
|
Segment = {Offset, Msg#message.payload},
|
||||||
PacketKey = {self(), PacketId},
|
%% Currently synchronous.
|
||||||
Callback = fun(Result) ->
|
%% If we want to make it async, we need to use `emqx_ft_async_reply`,
|
||||||
?MODULE:on_complete("store_segment", PacketKey, Transfer, Result)
|
%% like in `on_fin`.
|
||||||
end,
|
case store_segment(Transfer, Segment) of
|
||||||
with_responder(PacketKey, Callback, emqx_ft_conf:store_segment_timeout(), fun() ->
|
ok -> ?RC_SUCCESS;
|
||||||
case store_segment(Transfer, Segment) of
|
{error, _} -> ?RC_UNSPECIFIED_ERROR
|
||||||
ok ->
|
end.
|
||||||
emqx_ft_responder:ack(PacketKey, ok);
|
|
||||||
% {async, Pid} ->
|
|
||||||
% ok = emqx_ft_responder:kickoff(PacketKey, Pid),
|
|
||||||
% ok;
|
|
||||||
{error, _} = Error ->
|
|
||||||
emqx_ft_responder:ack(PacketKey, Error)
|
|
||||||
end
|
|
||||||
end).
|
|
||||||
|
|
||||||
on_fin(PacketId, Msg, Transfer, FinalSize, FinalChecksum) ->
|
on_fin(PacketId, Msg, Transfer, FinalSize, FinalChecksum) ->
|
||||||
?tp(info, "file_transfer_fin", #{
|
?tp(info, "file_transfer_fin", #{
|
||||||
|
@ -265,37 +280,30 @@ on_fin(PacketId, Msg, Transfer, FinalSize, FinalChecksum) ->
|
||||||
checksum => FinalChecksum
|
checksum => FinalChecksum
|
||||||
}),
|
}),
|
||||||
%% TODO: handle checksum? Do we need it?
|
%% TODO: handle checksum? Do we need it?
|
||||||
FinPacketKey = {self(), PacketId},
|
emqx_ft_async_reply:with_new_packet(
|
||||||
Callback = fun(Result) ->
|
PacketId,
|
||||||
?MODULE:on_complete("assemble", FinPacketKey, Transfer, Result)
|
fun() ->
|
||||||
end,
|
case assemble(Transfer, FinalSize, FinalChecksum) of
|
||||||
with_responder(FinPacketKey, Callback, emqx_ft_conf:assemble_timeout(), fun() ->
|
ok ->
|
||||||
case assemble(Transfer, FinalSize, FinalChecksum) of
|
?RC_SUCCESS;
|
||||||
%% Assembling completed, ack through the responder right away
|
%% Assembling started, packet will be acked by monitor or timeout
|
||||||
ok ->
|
{async, Pid} ->
|
||||||
emqx_ft_responder:ack(FinPacketKey, ok);
|
ok = register_async_reply(Pid, PacketId),
|
||||||
%% Assembling started, packet will be acked by the responder
|
ok = emqx_ft_storage:kickoff(Pid),
|
||||||
{async, Pid} ->
|
undefined;
|
||||||
ok = emqx_ft_responder:kickoff(FinPacketKey, Pid),
|
{error, _} ->
|
||||||
ok;
|
?RC_UNSPECIFIED_ERROR
|
||||||
%% Assembling failed, ack through the responder
|
end
|
||||||
{error, _} = Error ->
|
end,
|
||||||
emqx_ft_responder:ack(FinPacketKey, Error)
|
undefined
|
||||||
end
|
).
|
||||||
end).
|
|
||||||
|
|
||||||
with_responder(Key, Callback, Timeout, CriticalSection) ->
|
register_async_reply(Pid, PacketId) ->
|
||||||
case emqx_ft_responder:start(Key, Callback, Timeout) of
|
MRef = erlang:monitor(process, Pid),
|
||||||
%% We have new packet
|
TRef = erlang:start_timer(
|
||||||
{ok, _} ->
|
emqx_ft_conf:assemble_timeout(), self(), ?FT_EVENT({MRef, PacketId})
|
||||||
CriticalSection();
|
),
|
||||||
%% Packet already received.
|
ok = emqx_ft_async_reply:register(PacketId, MRef, TRef).
|
||||||
%% Since we are still handling the previous one,
|
|
||||||
%% we probably have retransmit here
|
|
||||||
{error, {already_started, _}} ->
|
|
||||||
ok
|
|
||||||
end,
|
|
||||||
undefined.
|
|
||||||
|
|
||||||
store_filemeta(Transfer, Segment) ->
|
store_filemeta(Transfer, Segment) ->
|
||||||
try
|
try
|
||||||
|
@ -335,28 +343,6 @@ transfer(Msg, FileId) ->
|
||||||
ClientId = Msg#message.from,
|
ClientId = Msg#message.from,
|
||||||
{clientid_to_binary(ClientId), FileId}.
|
{clientid_to_binary(ClientId), FileId}.
|
||||||
|
|
||||||
on_complete(Op, {ChanPid, PacketId}, Transfer, Result) ->
|
|
||||||
?tp(debug, "on_complete", #{
|
|
||||||
operation => Op,
|
|
||||||
packet_id => PacketId,
|
|
||||||
transfer => Transfer
|
|
||||||
}),
|
|
||||||
case Result of
|
|
||||||
{Mode, ok} when Mode == ack orelse Mode == down ->
|
|
||||||
erlang:send(ChanPid, {puback, PacketId, [], ?RC_SUCCESS});
|
|
||||||
{Mode, {error, _} = Reason} when Mode == ack orelse Mode == down ->
|
|
||||||
?tp(error, Op ++ "_failed", #{
|
|
||||||
transfer => Transfer,
|
|
||||||
reason => Reason
|
|
||||||
}),
|
|
||||||
erlang:send(ChanPid, {puback, PacketId, [], ?RC_UNSPECIFIED_ERROR});
|
|
||||||
timeout ->
|
|
||||||
?tp(error, Op ++ "_timed_out", #{
|
|
||||||
transfer => Transfer
|
|
||||||
}),
|
|
||||||
erlang:send(ChanPid, {puback, PacketId, [], ?RC_UNSPECIFIED_ERROR})
|
|
||||||
end.
|
|
||||||
|
|
||||||
validate(Validations, Fun) ->
|
validate(Validations, Fun) ->
|
||||||
case do_validate(Validations, []) of
|
case do_validate(Validations, []) of
|
||||||
{ok, Parsed} ->
|
{ok, Parsed} ->
|
||||||
|
@ -429,3 +415,20 @@ clientid_to_binary(A) when is_atom(A) ->
|
||||||
atom_to_binary(A);
|
atom_to_binary(A);
|
||||||
clientid_to_binary(B) when is_binary(B) ->
|
clientid_to_binary(B) when is_binary(B) ->
|
||||||
B.
|
B.
|
||||||
|
|
||||||
|
reason_to_rc(Reason) ->
|
||||||
|
case map_down_reason(Reason) of
|
||||||
|
ok -> ?RC_SUCCESS;
|
||||||
|
{error, _} -> ?RC_UNSPECIFIED_ERROR
|
||||||
|
end.
|
||||||
|
|
||||||
|
map_down_reason(normal) ->
|
||||||
|
ok;
|
||||||
|
map_down_reason(shutdown) ->
|
||||||
|
ok;
|
||||||
|
map_down_reason({shutdown, Result}) ->
|
||||||
|
Result;
|
||||||
|
map_down_reason(noproc) ->
|
||||||
|
{error, noproc};
|
||||||
|
map_down_reason(Error) ->
|
||||||
|
{error, {internal_error, Error}}.
|
||||||
|
|
|
@ -0,0 +1,114 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%
|
||||||
|
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
%% you may not use this file except in compliance with the License.
|
||||||
|
%% You may obtain a copy of the License at
|
||||||
|
%%
|
||||||
|
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
%%
|
||||||
|
%% Unless required by applicable law or agreed to in writing, software
|
||||||
|
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
%% See the License for the specific language governing permissions and
|
||||||
|
%% limitations under the License.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
|
-module(emqx_ft_async_reply).
|
||||||
|
|
||||||
|
-include_lib("emqx/include/logger.hrl").
|
||||||
|
-include_lib("emqx/include/types.hrl").
|
||||||
|
-include_lib("stdlib/include/ms_transform.hrl").
|
||||||
|
|
||||||
|
-export([
|
||||||
|
create_tables/0,
|
||||||
|
info/0
|
||||||
|
]).
|
||||||
|
|
||||||
|
-export([
|
||||||
|
register/3,
|
||||||
|
take_by_mref/1,
|
||||||
|
with_new_packet/3,
|
||||||
|
deregister_all/1
|
||||||
|
]).
|
||||||
|
|
||||||
|
-type channel_pid() :: pid().
|
||||||
|
-type mon_ref() :: reference().
|
||||||
|
-type timer_ref() :: reference().
|
||||||
|
-type packet_id() :: emqx_types:packet_id().
|
||||||
|
|
||||||
|
%% packets waiting for async workers
|
||||||
|
|
||||||
|
-define(MON_TAB, emqx_ft_async_mons).
|
||||||
|
-define(MON_KEY(MRef), ?MON_KEY(self(), MRef)).
|
||||||
|
-define(MON_KEY(ChannelPid, MRef), {ChannelPid, MRef}).
|
||||||
|
|
||||||
|
%% async worker monitors by packet ids
|
||||||
|
|
||||||
|
-define(PACKET_TAB, emqx_ft_async_packets).
|
||||||
|
-define(PACKET_KEY(PacketId), ?PACKET_KEY(self(), PacketId)).
|
||||||
|
-define(PACKET_KEY(ChannelPid, PacketId), {ChannelPid, PacketId}).
|
||||||
|
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% API
|
||||||
|
%% -------------------------------------------------------------------
|
||||||
|
|
||||||
|
-spec create_tables() -> ok.
|
||||||
|
create_tables() ->
|
||||||
|
EtsOptions = [
|
||||||
|
named_table,
|
||||||
|
public,
|
||||||
|
ordered_set,
|
||||||
|
{read_concurrency, true},
|
||||||
|
{write_concurrency, true}
|
||||||
|
],
|
||||||
|
ok = emqx_utils_ets:new(?MON_TAB, EtsOptions),
|
||||||
|
ok = emqx_utils_ets:new(?PACKET_TAB, EtsOptions),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
-spec register(packet_id(), mon_ref(), timer_ref()) -> ok.
|
||||||
|
register(PacketId, MRef, TRef) ->
|
||||||
|
_ = ets:insert(?PACKET_TAB, {?PACKET_KEY(PacketId), MRef}),
|
||||||
|
_ = ets:insert(?MON_TAB, {?MON_KEY(MRef), PacketId, TRef}),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
-spec with_new_packet(packet_id(), fun(() -> any()), any()) -> any().
|
||||||
|
with_new_packet(PacketId, Fun, Default) ->
|
||||||
|
case ets:member(?PACKET_TAB, ?PACKET_KEY(PacketId)) of
|
||||||
|
true -> Default;
|
||||||
|
false -> Fun()
|
||||||
|
end.
|
||||||
|
|
||||||
|
-spec take_by_mref(mon_ref()) -> {ok, packet_id(), timer_ref()} | not_found.
|
||||||
|
take_by_mref(MRef) ->
|
||||||
|
case ets:take(?MON_TAB, ?MON_KEY(MRef)) of
|
||||||
|
[{_, PacketId, TRef}] ->
|
||||||
|
_ = ets:delete(?PACKET_TAB, ?PACKET_KEY(PacketId)),
|
||||||
|
{ok, PacketId, TRef};
|
||||||
|
[] ->
|
||||||
|
not_found
|
||||||
|
end.
|
||||||
|
|
||||||
|
-spec deregister_all(channel_pid()) -> ok.
|
||||||
|
deregister_all(ChannelPid) ->
|
||||||
|
ok = deregister_packets(ChannelPid),
|
||||||
|
ok = deregister_mons(ChannelPid),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
-spec info() -> {non_neg_integer(), non_neg_integer()}.
|
||||||
|
info() ->
|
||||||
|
{ets:info(?MON_TAB, size), ets:info(?PACKET_TAB, size)}.
|
||||||
|
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Internal
|
||||||
|
%%-------------------------------------------------------------------
|
||||||
|
|
||||||
|
deregister_packets(ChannelPid) when is_pid(ChannelPid) ->
|
||||||
|
MS = [{{?PACKET_KEY(ChannelPid, '_'), '_'}, [], [true]}],
|
||||||
|
_ = ets:select_delete(?PACKET_TAB, MS),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
deregister_mons(ChannelPid) ->
|
||||||
|
MS = [{{?MON_KEY(ChannelPid, '_'), '_', '_'}, [], [true]}],
|
||||||
|
_ = ets:select_delete(?MON_TAB, MS),
|
||||||
|
ok.
|
|
@ -1,116 +0,0 @@
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
|
||||||
%%
|
|
||||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
%% you may not use this file except in compliance with the License.
|
|
||||||
%% You may obtain a copy of the License at
|
|
||||||
%%
|
|
||||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
%%
|
|
||||||
%% Unless required by applicable law or agreed to in writing, software
|
|
||||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
%% See the License for the specific language governing permissions and
|
|
||||||
%% limitations under the License.
|
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
|
|
||||||
-module(emqx_ft_responder).
|
|
||||||
|
|
||||||
-behaviour(gen_server).
|
|
||||||
|
|
||||||
-include_lib("emqx/include/logger.hrl").
|
|
||||||
-include_lib("emqx/include/types.hrl").
|
|
||||||
|
|
||||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
|
||||||
|
|
||||||
%% API
|
|
||||||
-export([start/3]).
|
|
||||||
-export([kickoff/2]).
|
|
||||||
-export([ack/2]).
|
|
||||||
|
|
||||||
%% Supervisor API
|
|
||||||
-export([start_link/3]).
|
|
||||||
|
|
||||||
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]).
|
|
||||||
|
|
||||||
-define(REF(Key), {via, gproc, {n, l, {?MODULE, Key}}}).
|
|
||||||
|
|
||||||
-type key() :: term().
|
|
||||||
-type respfun() :: fun(({ack, _Result} | {down, _Result} | timeout) -> _SideEffect).
|
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
%% API
|
|
||||||
%% -------------------------------------------------------------------
|
|
||||||
|
|
||||||
-spec start(key(), respfun(), timeout()) -> startlink_ret().
|
|
||||||
start(Key, RespFun, Timeout) ->
|
|
||||||
emqx_ft_responder_sup:start_child(Key, RespFun, Timeout).
|
|
||||||
|
|
||||||
-spec kickoff(key(), pid()) -> ok.
|
|
||||||
kickoff(Key, Pid) ->
|
|
||||||
gen_server:call(?REF(Key), {kickoff, Pid}).
|
|
||||||
|
|
||||||
-spec ack(key(), _Result) -> _Return.
|
|
||||||
ack(Key, Result) ->
|
|
||||||
% TODO: it's possible to avoid term copy
|
|
||||||
gen_server:call(?REF(Key), {ack, Result}, infinity).
|
|
||||||
|
|
||||||
-spec start_link(key(), timeout(), respfun()) -> startlink_ret().
|
|
||||||
start_link(Key, RespFun, Timeout) ->
|
|
||||||
gen_server:start_link(?REF(Key), ?MODULE, {Key, RespFun, Timeout}, []).
|
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
%% gen_server callbacks
|
|
||||||
%% -------------------------------------------------------------------
|
|
||||||
|
|
||||||
init({Key, RespFun, Timeout}) ->
|
|
||||||
_ = erlang:process_flag(trap_exit, true),
|
|
||||||
_TRef = erlang:send_after(Timeout, self(), timeout),
|
|
||||||
{ok, {Key, RespFun}}.
|
|
||||||
|
|
||||||
handle_call({kickoff, Pid}, _From, St) ->
|
|
||||||
% TODO: more state?
|
|
||||||
_MRef = erlang:monitor(process, Pid),
|
|
||||||
_ = Pid ! kickoff,
|
|
||||||
{reply, ok, St};
|
|
||||||
handle_call({ack, Result}, _From, {Key, RespFun}) ->
|
|
||||||
Ret = apply(RespFun, [{ack, Result}]),
|
|
||||||
?tp(debug, ft_responder_ack, #{key => Key, result => Result, return => Ret}),
|
|
||||||
{stop, {shutdown, Ret}, Ret, undefined};
|
|
||||||
handle_call(Msg, _From, State) ->
|
|
||||||
?SLOG(warning, #{msg => "unknown_call", call_msg => Msg}),
|
|
||||||
{reply, {error, unknown_call}, State}.
|
|
||||||
|
|
||||||
handle_cast(Msg, State) ->
|
|
||||||
?SLOG(warning, #{msg => "unknown_cast", cast_msg => Msg}),
|
|
||||||
{noreply, State}.
|
|
||||||
|
|
||||||
handle_info(timeout, {Key, RespFun}) ->
|
|
||||||
Ret = apply(RespFun, [timeout]),
|
|
||||||
?tp(debug, ft_responder_timeout, #{key => Key, return => Ret}),
|
|
||||||
{stop, {shutdown, Ret}, undefined};
|
|
||||||
handle_info({'DOWN', _MRef, process, _Pid, Reason}, {Key, RespFun}) ->
|
|
||||||
Ret = apply(RespFun, [{down, map_down_reason(Reason)}]),
|
|
||||||
?tp(debug, ft_responder_procdown, #{key => Key, reason => Reason, return => Ret}),
|
|
||||||
{stop, {shutdown, Ret}, undefined};
|
|
||||||
handle_info(Msg, State) ->
|
|
||||||
?SLOG(warning, #{msg => "unknown_message", info_msg => Msg}),
|
|
||||||
{noreply, State}.
|
|
||||||
|
|
||||||
terminate(_Reason, undefined) ->
|
|
||||||
ok;
|
|
||||||
terminate(Reason, {Key, RespFun}) ->
|
|
||||||
Ret = apply(RespFun, [timeout]),
|
|
||||||
?tp(debug, ft_responder_shutdown, #{key => Key, reason => Reason, return => Ret}),
|
|
||||||
ok.
|
|
||||||
|
|
||||||
map_down_reason(normal) ->
|
|
||||||
ok;
|
|
||||||
map_down_reason(shutdown) ->
|
|
||||||
ok;
|
|
||||||
map_down_reason({shutdown, Result}) ->
|
|
||||||
Result;
|
|
||||||
map_down_reason(noproc) ->
|
|
||||||
{error, noproc};
|
|
||||||
map_down_reason(Error) ->
|
|
||||||
{error, {internal_error, Error}}.
|
|
|
@ -1,48 +0,0 @@
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
|
||||||
%%
|
|
||||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
%% you may not use this file except in compliance with the License.
|
|
||||||
%% You may obtain a copy of the License at
|
|
||||||
%%
|
|
||||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
%%
|
|
||||||
%% Unless required by applicable law or agreed to in writing, software
|
|
||||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
%% See the License for the specific language governing permissions and
|
|
||||||
%% limitations under the License.
|
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
|
|
||||||
-module(emqx_ft_responder_sup).
|
|
||||||
|
|
||||||
-export([start_link/0]).
|
|
||||||
-export([start_child/3]).
|
|
||||||
|
|
||||||
-behaviour(supervisor).
|
|
||||||
-export([init/1]).
|
|
||||||
|
|
||||||
-define(SUPERVISOR, ?MODULE).
|
|
||||||
|
|
||||||
%%
|
|
||||||
|
|
||||||
-spec start_link() -> {ok, pid()}.
|
|
||||||
start_link() ->
|
|
||||||
supervisor:start_link({local, ?SUPERVISOR}, ?MODULE, []).
|
|
||||||
|
|
||||||
start_child(Key, RespFun, Timeout) ->
|
|
||||||
supervisor:start_child(?SUPERVISOR, [Key, RespFun, Timeout]).
|
|
||||||
|
|
||||||
-spec init(_) -> {ok, {supervisor:sup_flags(), [supervisor:child_spec()]}}.
|
|
||||||
init(_) ->
|
|
||||||
Flags = #{
|
|
||||||
strategy => simple_one_for_one,
|
|
||||||
intensity => 100,
|
|
||||||
period => 100
|
|
||||||
},
|
|
||||||
ChildSpec = #{
|
|
||||||
id => responder,
|
|
||||||
start => {emqx_ft_responder, start_link, []},
|
|
||||||
restart => temporary
|
|
||||||
},
|
|
||||||
{ok, {Flags, [ChildSpec]}}.
|
|
|
@ -23,6 +23,7 @@
|
||||||
store_filemeta/2,
|
store_filemeta/2,
|
||||||
store_segment/2,
|
store_segment/2,
|
||||||
assemble/3,
|
assemble/3,
|
||||||
|
kickoff/1,
|
||||||
|
|
||||||
files/0,
|
files/0,
|
||||||
files/1,
|
files/1,
|
||||||
|
@ -121,6 +122,13 @@ store_segment(Transfer, Segment) ->
|
||||||
assemble(Transfer, Size, FinOpts) ->
|
assemble(Transfer, Size, FinOpts) ->
|
||||||
dispatch(assemble, [Transfer, Size, FinOpts]).
|
dispatch(assemble, [Transfer, Size, FinOpts]).
|
||||||
|
|
||||||
|
-spec kickoff(pid()) -> ok.
|
||||||
|
kickoff(Pid) ->
|
||||||
|
_ = erlang:send(Pid, kickoff),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
%%
|
||||||
|
|
||||||
-spec files() ->
|
-spec files() ->
|
||||||
{ok, page(file_info(), _)} | {error, term()}.
|
{ok, page(file_info(), _)} | {error, term()}.
|
||||||
files() ->
|
files() ->
|
||||||
|
|
|
@ -28,6 +28,8 @@ start_link() ->
|
||||||
supervisor:start_link({local, ?SERVER}, ?MODULE, []).
|
supervisor:start_link({local, ?SERVER}, ?MODULE, []).
|
||||||
|
|
||||||
init([]) ->
|
init([]) ->
|
||||||
|
ok = emqx_ft_async_reply:create_tables(),
|
||||||
|
|
||||||
SupFlags = #{
|
SupFlags = #{
|
||||||
strategy => one_for_one,
|
strategy => one_for_one,
|
||||||
intensity => 100,
|
intensity => 100,
|
||||||
|
@ -52,14 +54,5 @@ init([]) ->
|
||||||
modules => [emqx_ft_storage_fs_reader_sup]
|
modules => [emqx_ft_storage_fs_reader_sup]
|
||||||
},
|
},
|
||||||
|
|
||||||
Responder = #{
|
ChildSpecs = [AssemblerSup, FileReaderSup],
|
||||||
id => emqx_ft_responder_sup,
|
|
||||||
start => {emqx_ft_responder_sup, start_link, []},
|
|
||||||
restart => permanent,
|
|
||||||
shutdown => infinity,
|
|
||||||
type => worker,
|
|
||||||
modules => [emqx_ft_responder_sup]
|
|
||||||
},
|
|
||||||
|
|
||||||
ChildSpecs = [Responder, AssemblerSup, FileReaderSup],
|
|
||||||
{ok, {SupFlags, ChildSpecs}}.
|
{ok, {SupFlags, ChildSpecs}}.
|
||||||
|
|
|
@ -37,7 +37,7 @@ all() ->
|
||||||
|
|
||||||
groups() ->
|
groups() ->
|
||||||
[
|
[
|
||||||
{single_node, [parallel], [
|
{single_node, [], [
|
||||||
t_assemble_crash,
|
t_assemble_crash,
|
||||||
t_corrupted_segment_retry,
|
t_corrupted_segment_retry,
|
||||||
t_invalid_checksum,
|
t_invalid_checksum,
|
||||||
|
@ -76,7 +76,7 @@ init_per_suite(Config) ->
|
||||||
[
|
[
|
||||||
{emqx_ft, #{config => emqx_ft_test_helpers:config(Storage)}}
|
{emqx_ft, #{config => emqx_ft_test_helpers:config(Storage)}}
|
||||||
],
|
],
|
||||||
#{work_dir => ?config(priv_dir, Config)}
|
#{work_dir => emqx_cth_suite:work_dir(Config)}
|
||||||
),
|
),
|
||||||
[{suite_apps, Apps} | Config].
|
[{suite_apps, Apps} | Config].
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,247 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%
|
||||||
|
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
%% you may not use this file except in compliance with the License.
|
||||||
|
%% You may obtain a copy of the License at
|
||||||
|
%%
|
||||||
|
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
%%
|
||||||
|
%% Unless required by applicable law or agreed to in writing, software
|
||||||
|
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
%% See the License for the specific language governing permissions and
|
||||||
|
%% limitations under the License.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
|
-module(emqx_ft_async_reply_SUITE).
|
||||||
|
|
||||||
|
-compile(export_all).
|
||||||
|
-compile(nowarn_export_all).
|
||||||
|
|
||||||
|
-include_lib("common_test/include/ct.hrl").
|
||||||
|
-include_lib("stdlib/include/assert.hrl").
|
||||||
|
-include_lib("emqx/include/asserts.hrl").
|
||||||
|
|
||||||
|
all() -> emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
||||||
|
init_per_suite(Config) ->
|
||||||
|
Apps = emqx_cth_suite:start(
|
||||||
|
[
|
||||||
|
{emqx, #{override_env => [{boot_modules, [broker, listeners]}]}},
|
||||||
|
{emqx_ft, "file_transfer { enable = true, assemble_timeout = 1s }"}
|
||||||
|
],
|
||||||
|
#{work_dir => ?config(priv_dir, Config)}
|
||||||
|
),
|
||||||
|
[{suite_apps, Apps} | Config].
|
||||||
|
|
||||||
|
end_per_suite(Config) ->
|
||||||
|
ok = emqx_cth_suite:stop(?config(suite_apps, Config)),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
init_per_testcase(_Case, Config) ->
|
||||||
|
ok = snabbkaffe:start_trace(),
|
||||||
|
Config.
|
||||||
|
|
||||||
|
end_per_testcase(_Case, _Config) ->
|
||||||
|
ok = snabbkaffe:stop(),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Tests
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
|
t_register(_Config) ->
|
||||||
|
PacketId = 1,
|
||||||
|
MRef = make_ref(),
|
||||||
|
TRef = make_ref(),
|
||||||
|
ok = emqx_ft_async_reply:register(PacketId, MRef, TRef),
|
||||||
|
|
||||||
|
?assertEqual(
|
||||||
|
undefined,
|
||||||
|
emqx_ft_async_reply:with_new_packet(PacketId, fun() -> ok end, undefined)
|
||||||
|
),
|
||||||
|
|
||||||
|
?assertEqual(
|
||||||
|
ok,
|
||||||
|
emqx_ft_async_reply:with_new_packet(2, fun() -> ok end, undefined)
|
||||||
|
),
|
||||||
|
|
||||||
|
?assertEqual(
|
||||||
|
{ok, PacketId, TRef},
|
||||||
|
emqx_ft_async_reply:take_by_mref(MRef)
|
||||||
|
).
|
||||||
|
|
||||||
|
t_process_independence(_Config) ->
|
||||||
|
PacketId = 1,
|
||||||
|
MRef = make_ref(),
|
||||||
|
TRef = make_ref(),
|
||||||
|
ok = emqx_ft_async_reply:register(PacketId, MRef, TRef),
|
||||||
|
|
||||||
|
Self = self(),
|
||||||
|
|
||||||
|
spawn_link(fun() ->
|
||||||
|
Self ! emqx_ft_async_reply:take_by_mref(MRef)
|
||||||
|
end),
|
||||||
|
|
||||||
|
Res1 =
|
||||||
|
receive
|
||||||
|
Msg1 -> Msg1
|
||||||
|
end,
|
||||||
|
|
||||||
|
?assertEqual(
|
||||||
|
not_found,
|
||||||
|
Res1
|
||||||
|
),
|
||||||
|
|
||||||
|
spawn_link(fun() ->
|
||||||
|
Self ! emqx_ft_async_reply:with_new_packet(PacketId, fun() -> ok end, undefined)
|
||||||
|
end),
|
||||||
|
|
||||||
|
Res2 =
|
||||||
|
receive
|
||||||
|
Msg2 -> Msg2
|
||||||
|
end,
|
||||||
|
|
||||||
|
?assertEqual(
|
||||||
|
ok,
|
||||||
|
Res2
|
||||||
|
).
|
||||||
|
|
||||||
|
t_take(_Config) ->
|
||||||
|
PacketId = 1,
|
||||||
|
MRef = make_ref(),
|
||||||
|
TRef = make_ref(),
|
||||||
|
ok = emqx_ft_async_reply:register(PacketId, MRef, TRef),
|
||||||
|
|
||||||
|
?assertEqual(
|
||||||
|
{ok, PacketId, TRef},
|
||||||
|
emqx_ft_async_reply:take_by_mref(MRef)
|
||||||
|
),
|
||||||
|
|
||||||
|
?assertEqual(
|
||||||
|
not_found,
|
||||||
|
emqx_ft_async_reply:take_by_mref(MRef)
|
||||||
|
),
|
||||||
|
|
||||||
|
?assertEqual(
|
||||||
|
ok,
|
||||||
|
emqx_ft_async_reply:with_new_packet(2, fun() -> ok end, undefined)
|
||||||
|
).
|
||||||
|
|
||||||
|
t_cleanup(_Config) ->
|
||||||
|
PacketId = 1,
|
||||||
|
MRef0 = make_ref(),
|
||||||
|
TRef0 = make_ref(),
|
||||||
|
MRef1 = make_ref(),
|
||||||
|
TRef1 = make_ref(),
|
||||||
|
ok = emqx_ft_async_reply:register(PacketId, MRef0, TRef0),
|
||||||
|
|
||||||
|
Self = self(),
|
||||||
|
|
||||||
|
Pid = spawn_link(fun() ->
|
||||||
|
ok = emqx_ft_async_reply:register(PacketId, MRef1, TRef1),
|
||||||
|
receive
|
||||||
|
kickoff ->
|
||||||
|
?assertEqual(
|
||||||
|
undefined,
|
||||||
|
emqx_ft_async_reply:with_new_packet(PacketId, fun() -> ok end, undefined)
|
||||||
|
),
|
||||||
|
|
||||||
|
?assertEqual(
|
||||||
|
{ok, PacketId, TRef1},
|
||||||
|
emqx_ft_async_reply:take_by_mref(MRef1)
|
||||||
|
),
|
||||||
|
|
||||||
|
Self ! done
|
||||||
|
end
|
||||||
|
end),
|
||||||
|
|
||||||
|
?assertEqual(
|
||||||
|
undefined,
|
||||||
|
emqx_ft_async_reply:with_new_packet(PacketId, fun() -> ok end, undefined)
|
||||||
|
),
|
||||||
|
|
||||||
|
ok = emqx_ft_async_reply:deregister_all(Self),
|
||||||
|
|
||||||
|
?assertEqual(
|
||||||
|
ok,
|
||||||
|
emqx_ft_async_reply:with_new_packet(PacketId, fun() -> ok end, undefined)
|
||||||
|
),
|
||||||
|
|
||||||
|
Pid ! kickoff,
|
||||||
|
|
||||||
|
receive
|
||||||
|
done -> ok
|
||||||
|
end.
|
||||||
|
|
||||||
|
t_reply_by_tiemout(_Config) ->
|
||||||
|
process_flag(trap_exit, true),
|
||||||
|
ClientId = atom_to_binary(?FUNCTION_NAME),
|
||||||
|
C = emqx_ft_test_helpers:start_client(ClientId, node()),
|
||||||
|
|
||||||
|
SleepForever = fun() ->
|
||||||
|
Ref = make_ref(),
|
||||||
|
receive
|
||||||
|
Ref -> ok
|
||||||
|
end
|
||||||
|
end,
|
||||||
|
|
||||||
|
ok = meck:new(emqx_ft_storage, [passthrough]),
|
||||||
|
meck:expect(emqx_ft_storage, assemble, fun(_, _, _) -> {async, spawn_link(SleepForever)} end),
|
||||||
|
|
||||||
|
FinTopic = <<"$file/fakeid/fin/999999">>,
|
||||||
|
|
||||||
|
?assertMatch(
|
||||||
|
{ok, #{reason_code_name := unspecified_error}},
|
||||||
|
emqtt:publish(C, FinTopic, <<>>, 1)
|
||||||
|
),
|
||||||
|
|
||||||
|
meck:unload(emqx_ft_storage),
|
||||||
|
emqtt:stop(C).
|
||||||
|
|
||||||
|
t_cleanup_by_cm(_Config) ->
|
||||||
|
process_flag(trap_exit, true),
|
||||||
|
ClientId = atom_to_binary(?FUNCTION_NAME),
|
||||||
|
C = emqx_ft_test_helpers:start_client(ClientId, node()),
|
||||||
|
|
||||||
|
ok = meck:new(emqx_ft_storage, [passthrough]),
|
||||||
|
meck:expect(emqx_ft_storage, kickoff, fun(_) -> meck:exception(error, oops) end),
|
||||||
|
|
||||||
|
FinTopic = <<"$file/fakeid/fin/999999">>,
|
||||||
|
|
||||||
|
[ClientPid] = emqx_cm:lookup_channels(ClientId),
|
||||||
|
|
||||||
|
?assertWaitEvent(
|
||||||
|
begin
|
||||||
|
emqtt:publish(C, FinTopic, <<>>, 1),
|
||||||
|
exit(ClientPid, kill)
|
||||||
|
end,
|
||||||
|
#{?snk_kind := emqx_cm_clean_down, client_id := ClientId},
|
||||||
|
1000
|
||||||
|
),
|
||||||
|
|
||||||
|
?assertEqual(
|
||||||
|
{0, 0},
|
||||||
|
emqx_ft_async_reply:info()
|
||||||
|
),
|
||||||
|
|
||||||
|
meck:unload(emqx_ft_storage).
|
||||||
|
|
||||||
|
t_unrelated_events(_Config) ->
|
||||||
|
process_flag(trap_exit, true),
|
||||||
|
ClientId = atom_to_binary(?FUNCTION_NAME),
|
||||||
|
C = emqx_ft_test_helpers:start_client(ClientId, node()),
|
||||||
|
[ClientPid] = emqx_cm:lookup_channels(ClientId),
|
||||||
|
|
||||||
|
erlang:monitor(process, ClientPid),
|
||||||
|
|
||||||
|
ClientPid ! {'DOWN', make_ref(), process, self(), normal},
|
||||||
|
ClientPid ! {timeout, make_ref(), unknown_timer_event},
|
||||||
|
|
||||||
|
?assertNotReceive(
|
||||||
|
{'DOWN', _Ref, process, ClientPid, _Reason},
|
||||||
|
500
|
||||||
|
),
|
||||||
|
|
||||||
|
emqtt:stop(C).
|
|
@ -32,13 +32,12 @@ end_per_suite(_Config) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
init_per_testcase(Case, Config) ->
|
init_per_testcase(Case, Config) ->
|
||||||
WorkDir = filename:join(?config(priv_dir, Config), Case),
|
|
||||||
Apps = emqx_cth_suite:start(
|
Apps = emqx_cth_suite:start(
|
||||||
[
|
[
|
||||||
{emqx_conf, #{}},
|
{emqx_conf, #{}},
|
||||||
{emqx_ft, #{config => "file_transfer {}"}}
|
{emqx_ft, #{config => "file_transfer {}"}}
|
||||||
],
|
],
|
||||||
#{work_dir => WorkDir}
|
#{work_dir => emqx_cth_suite:work_dir(Case, Config)}
|
||||||
),
|
),
|
||||||
[{suite_apps, Apps} | Config].
|
[{suite_apps, Apps} | Config].
|
||||||
|
|
||||||
|
|
|
@ -1,84 +0,0 @@
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
|
||||||
%%
|
|
||||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
%% you may not use this file except in compliance with the License.
|
|
||||||
%% You may obtain a copy of the License at
|
|
||||||
%%
|
|
||||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
%%
|
|
||||||
%% Unless required by applicable law or agreed to in writing, software
|
|
||||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
%% See the License for the specific language governing permissions and
|
|
||||||
%% limitations under the License.
|
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
|
|
||||||
-module(emqx_ft_responder_SUITE).
|
|
||||||
|
|
||||||
-compile(export_all).
|
|
||||||
-compile(nowarn_export_all).
|
|
||||||
|
|
||||||
-include_lib("stdlib/include/assert.hrl").
|
|
||||||
|
|
||||||
all() -> emqx_common_test_helpers:all(?MODULE).
|
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
|
||||||
ok = emqx_common_test_helpers:start_apps([emqx_ft], emqx_ft_test_helpers:env_handler(Config)),
|
|
||||||
Config.
|
|
||||||
|
|
||||||
end_per_suite(_Config) ->
|
|
||||||
ok = emqx_common_test_helpers:stop_apps([emqx_ft]),
|
|
||||||
ok.
|
|
||||||
|
|
||||||
init_per_testcase(_Case, Config) ->
|
|
||||||
Config.
|
|
||||||
|
|
||||||
end_per_testcase(_Case, _Config) ->
|
|
||||||
ok.
|
|
||||||
|
|
||||||
t_start_ack(_Config) ->
|
|
||||||
Key = <<"test">>,
|
|
||||||
DefaultAction = fun({ack, Ref}) -> Ref end,
|
|
||||||
?assertMatch(
|
|
||||||
{ok, _Pid},
|
|
||||||
emqx_ft_responder:start(Key, DefaultAction, 1000)
|
|
||||||
),
|
|
||||||
?assertMatch(
|
|
||||||
{error, {already_started, _Pid}},
|
|
||||||
emqx_ft_responder:start(Key, DefaultAction, 1000)
|
|
||||||
),
|
|
||||||
Ref = make_ref(),
|
|
||||||
?assertEqual(
|
|
||||||
Ref,
|
|
||||||
emqx_ft_responder:ack(Key, Ref)
|
|
||||||
),
|
|
||||||
?assertExit(
|
|
||||||
{noproc, _},
|
|
||||||
emqx_ft_responder:ack(Key, Ref)
|
|
||||||
).
|
|
||||||
|
|
||||||
t_timeout(_Config) ->
|
|
||||||
Key = <<"test">>,
|
|
||||||
Self = self(),
|
|
||||||
DefaultAction = fun(timeout) -> Self ! {timeout, Key} end,
|
|
||||||
{ok, _Pid} = emqx_ft_responder:start(Key, DefaultAction, 20),
|
|
||||||
receive
|
|
||||||
{timeout, Key} ->
|
|
||||||
ok
|
|
||||||
after 100 ->
|
|
||||||
ct:fail("emqx_ft_responder not called")
|
|
||||||
end,
|
|
||||||
?assertExit(
|
|
||||||
{noproc, _},
|
|
||||||
emqx_ft_responder:ack(Key, oops)
|
|
||||||
).
|
|
||||||
|
|
||||||
t_unknown_msgs(_Config) ->
|
|
||||||
{ok, Pid} = emqx_ft_responder:start(make_ref(), fun(_) -> ok end, 100),
|
|
||||||
Pid ! {unknown_msg, <<"test">>},
|
|
||||||
ok = gen_server:cast(Pid, {unknown_msg, <<"test">>}),
|
|
||||||
?assertEqual(
|
|
||||||
{error, unknown_call},
|
|
||||||
gen_server:call(Pid, {unknown_call, <<"test">>})
|
|
||||||
).
|
|
|
@ -36,12 +36,11 @@ groups() ->
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
Storage = emqx_ft_test_helpers:local_storage(Config),
|
Storage = emqx_ft_test_helpers:local_storage(Config),
|
||||||
WorkDir = ?config(priv_dir, Config),
|
|
||||||
Apps = emqx_cth_suite:start(
|
Apps = emqx_cth_suite:start(
|
||||||
[
|
[
|
||||||
{emqx_ft, #{config => emqx_ft_test_helpers:config(Storage)}}
|
{emqx_ft, #{config => emqx_ft_test_helpers:config(Storage)}}
|
||||||
],
|
],
|
||||||
#{work_dir => WorkDir}
|
#{work_dir => emqx_cth_suite:work_dir(Config)}
|
||||||
),
|
),
|
||||||
[{suite_apps, Apps} | Config].
|
[{suite_apps, Apps} | Config].
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@ all() ->
|
||||||
emqx_common_test_helpers:all(?MODULE).
|
emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
Apps = emqx_cth_suite:start([emqx], #{work_dir => ?config(priv_dir, Config)}),
|
Apps = emqx_cth_suite:start([emqx], #{work_dir => emqx_cth_suite:work_dir(Config)}),
|
||||||
[{suite_apps, Apps} | Config].
|
[{suite_apps, Apps} | Config].
|
||||||
|
|
||||||
end_per_suite(Config) ->
|
end_per_suite(Config) ->
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
%% -*- mode: erlang -*-
|
%% -*- mode: erlang -*-
|
||||||
{application, emqx_gateway, [
|
{application, emqx_gateway, [
|
||||||
{description, "The Gateway management application"},
|
{description, "The Gateway management application"},
|
||||||
{vsn, "0.1.23"},
|
{vsn, "0.1.24"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{mod, {emqx_gateway_app, []}},
|
{mod, {emqx_gateway_app, []}},
|
||||||
{applications, [kernel, stdlib, emqx, emqx_authn, emqx_ctl]},
|
{applications, [kernel, stdlib, emqx, emqx_authn, emqx_ctl]},
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_ldap, [
|
{application, emqx_ldap, [
|
||||||
{description, "EMQX LDAP Connector"},
|
{description, "EMQX LDAP Connector"},
|
||||||
{vsn, "0.1.1"},
|
{vsn, "0.1.2"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [
|
{applications, [
|
||||||
kernel,
|
kernel,
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
{id, "emqx_machine"},
|
{id, "emqx_machine"},
|
||||||
{description, "The EMQX Machine"},
|
{description, "The EMQX Machine"},
|
||||||
% strict semver, bump manually!
|
% strict semver, bump manually!
|
||||||
{vsn, "0.2.12"},
|
{vsn, "0.2.13"},
|
||||||
{modules, []},
|
{modules, []},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [kernel, stdlib, emqx_ctl]},
|
{applications, [kernel, stdlib, emqx_ctl]},
|
||||||
|
|
|
@ -30,12 +30,19 @@
|
||||||
-export([sorted_reboot_apps/1, reboot_apps/0]).
|
-export([sorted_reboot_apps/1, reboot_apps/0]).
|
||||||
-endif.
|
-endif.
|
||||||
|
|
||||||
%% these apps are always (re)started by emqx_machine
|
%% These apps are always (re)started by emqx_machine:
|
||||||
-define(BASIC_REBOOT_APPS, [gproc, esockd, ranch, cowboy, emqx]).
|
-define(BASIC_REBOOT_APPS, [gproc, esockd, ranch, cowboy, emqx]).
|
||||||
|
|
||||||
%% If any of these applications crash, the entire EMQX node shuts down
|
%% If any of these applications crash, the entire EMQX node shuts down:
|
||||||
-define(BASIC_PERMANENT_APPS, [mria, ekka, esockd, emqx]).
|
-define(BASIC_PERMANENT_APPS, [mria, ekka, esockd, emqx]).
|
||||||
|
|
||||||
|
%% These apps should NOT be (re)started automatically:
|
||||||
|
-define(EXCLUDED_APPS, [system_monitor, observer_cli, jq]).
|
||||||
|
|
||||||
|
%% These apps are optional, they may or may not be present in the
|
||||||
|
%% release, depending on the build flags:
|
||||||
|
-define(OPTIONAL_APPS, [bcrypt, observer]).
|
||||||
|
|
||||||
post_boot() ->
|
post_boot() ->
|
||||||
ok = ensure_apps_started(),
|
ok = ensure_apps_started(),
|
||||||
ok = print_vsn(),
|
ok = print_vsn(),
|
||||||
|
@ -150,9 +157,9 @@ basic_reboot_apps() ->
|
||||||
?BASIC_REBOOT_APPS ++ (BusinessApps -- excluded_apps()).
|
?BASIC_REBOOT_APPS ++ (BusinessApps -- excluded_apps()).
|
||||||
|
|
||||||
excluded_apps() ->
|
excluded_apps() ->
|
||||||
OptionalApps = [bcrypt, jq, observer],
|
%% Optional apps _should_ be (re)started automatically, but only
|
||||||
[system_monitor, observer_cli] ++
|
%% when they are found in the release:
|
||||||
[App || App <- OptionalApps, not is_app(App)].
|
?EXCLUDED_APPS ++ [App || App <- ?OPTIONAL_APPS, not is_app(App)].
|
||||||
|
|
||||||
is_app(Name) ->
|
is_app(Name) ->
|
||||||
case application:load(Name) of
|
case application:load(Name) of
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue