Merge branch 'master' into 0213-port-blackbox-cover-report-from-v4

This commit is contained in:
Zaiming (Stone) Shi 2023-02-22 22:21:44 +01:00 committed by GitHub
commit 3458aee296
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
195 changed files with 6189 additions and 933 deletions

View File

@ -4,5 +4,6 @@ MONGO_TAG=5
PGSQL_TAG=13 PGSQL_TAG=13
LDAP_TAG=2.4.50 LDAP_TAG=2.4.50
INFLUXDB_TAG=2.5.0 INFLUXDB_TAG=2.5.0
TDENGINE_TAG=3.0.2.4
TARGET=emqx/emqx TARGET=emqx/emqx

View File

@ -1,64 +0,0 @@
.PHONY: help up down ct ct-all bash run
define usage
make -f .ci/docker-compose-file/Makefile.local up
make -f .ci/docker-compose-file/Makefile.local ct CONTAINER=erlang SUITE=apps/emqx_authn/test/emqx_authn_mnesia_SUITE.erl
make -f .ci/docker-compose-file/Makefile.local down
endef
export usage
help:
@echo "$$usage"
up:
env \
MYSQL_TAG=8 \
REDIS_TAG=7.0 \
MONGO_TAG=5 \
PGSQL_TAG=13 \
docker-compose \
-f .ci/docker-compose-file/docker-compose.yaml \
-f .ci/docker-compose-file/docker-compose-mongo-single-tcp.yaml \
-f .ci/docker-compose-file/docker-compose-mongo-single-tls.yaml \
-f .ci/docker-compose-file/docker-compose-mysql-tcp.yaml \
-f .ci/docker-compose-file/docker-compose-mysql-tls.yaml \
-f .ci/docker-compose-file/docker-compose-pgsql-tcp.yaml \
-f .ci/docker-compose-file/docker-compose-pgsql-tls.yaml \
-f .ci/docker-compose-file/docker-compose-redis-single-tcp.yaml \
-f .ci/docker-compose-file/docker-compose-redis-single-tls.yaml \
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tcp.yaml \
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tls.yaml \
-f .ci/docker-compose-file/docker-compose-redis-cluster-tcp.yaml \
-f .ci/docker-compose-file/docker-compose-redis-cluster-tls.yaml \
-f .ci/docker-compose-file/docker-compose-toxiproxy.yaml \
up -d --build --remove-orphans
down:
docker-compose \
-f .ci/docker-compose-file/docker-compose.yaml \
-f .ci/docker-compose-file/docker-compose-mongo-single-tcp.yaml \
-f .ci/docker-compose-file/docker-compose-mongo-single-tls.yaml \
-f .ci/docker-compose-file/docker-compose-mysql-tcp.yaml \
-f .ci/docker-compose-file/docker-compose-mysql-tls.yaml \
-f .ci/docker-compose-file/docker-compose-pgsql-tcp.yaml \
-f .ci/docker-compose-file/docker-compose-pgsql-tls.yaml \
-f .ci/docker-compose-file/docker-compose-redis-single-tcp.yaml \
-f .ci/docker-compose-file/docker-compose-redis-single-tls.yaml \
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tcp.yaml \
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tls.yaml \
-f .ci/docker-compose-file/docker-compose-redis-cluster-tcp.yaml \
-f .ci/docker-compose-file/docker-compose-redis-cluster-tls.yaml \
-f .ci/docker-compose-file/docker-compose-toxiproxy.yaml \
down --remove-orphans
ct:
docker exec -i "$(CONTAINER)" bash -c "rebar3 ct --name 'test@127.0.0.1' --readable true -v --suite $(SUITE)"
ct-all:
docker exec -i "$(CONTAINER)" bash -c "make ct"
bash:
docker exec -it "$(CONTAINER)" bash
run:
docker exec -it "$(CONTAINER)" bash -c "make run";

View File

@ -19,7 +19,7 @@ services:
command: /bin/generate-certs.sh command: /bin/generate-certs.sh
kdc: kdc:
hostname: kdc.emqx.net hostname: kdc.emqx.net
image: ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-24.3.4.2-1-ubuntu20.04 image: ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2-ubuntu20.04
container_name: kdc.emqx.net container_name: kdc.emqx.net
networks: networks:
emqx_bridge: emqx_bridge:
@ -39,9 +39,12 @@ services:
container_name: kafka-1.emqx.net container_name: kafka-1.emqx.net
hostname: kafka-1.emqx.net hostname: kafka-1.emqx.net
depends_on: depends_on:
- "kdc" kdc:
- "zookeeper" condition: service_started
- "ssl_cert_gen" zookeeper:
condition: service_started
ssl_cert_gen:
condition: service_completed_successfully
environment: environment:
KAFKA_BROKER_ID: 1 KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
@ -52,7 +55,7 @@ services:
KAFKA_SASL_ENABLED_MECHANISMS: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512,GSSAPI KAFKA_SASL_ENABLED_MECHANISMS: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512,GSSAPI
KAFKA_SASL_KERBEROS_SERVICE_NAME: kafka KAFKA_SASL_KERBEROS_SERVICE_NAME: kafka
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN
KAFKA_JMX_OPTS: "-Djava.security.auth.login.config=/etc/kafka/jaas.conf" KAFKA_OPTS: "-Djava.security.auth.login.config=/etc/kafka/jaas.conf"
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true" KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
KAFKA_CREATE_TOPICS_NG: test-topic-one-partition:1:1,test-topic-two-partitions:2:1,test-topic-three-partitions:3:1, KAFKA_CREATE_TOPICS_NG: test-topic-one-partition:1:1,test-topic-two-partitions:2:1,test-topic-three-partitions:3:1,
KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer

View File

@ -0,0 +1,11 @@
version: '3.9'
services:
tdengine_server:
container_name: tdengine
image: tdengine/tdengine:${TDENGINE_TAG}
restart: always
ports:
- "6041:6041"
networks:
- emqx_bridge

View File

@ -17,6 +17,7 @@ services:
- 13307:3307 - 13307:3307
- 15432:5432 - 15432:5432
- 15433:5433 - 15433:5433
- 16041:6041
command: command:
- "-host=0.0.0.0" - "-host=0.0.0.0"
- "-config=/config/toxiproxy.json" - "-config=/config/toxiproxy.json"

View File

@ -3,7 +3,7 @@ version: '3.9'
services: services:
erlang: erlang:
container_name: erlang container_name: erlang
image: ${DOCKER_CT_RUNNER_IMAGE:-ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-24.3.4.2-1-ubuntu20.04} image: ${DOCKER_CT_RUNNER_IMAGE:-ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2-ubuntu20.04}
env_file: env_file:
- conf.env - conf.env
environment: environment:

View File

@ -1,7 +1,7 @@
ARG BUILD_FROM=postgres:13 ARG BUILD_FROM=postgres:13
FROM ${BUILD_FROM} FROM ${BUILD_FROM}
ARG POSTGRES_USER=postgres ARG POSTGRES_USER=postgres
COPY --chown=$POSTGRES_USER ./pgsql/pg_hba.conf /var/lib/postgresql/pg_hba.conf COPY --chown=$POSTGRES_USER ./pgsql/pg_hba_tls.conf /var/lib/postgresql/pg_hba.conf
COPY --chown=$POSTGRES_USER certs/server.key /var/lib/postgresql/server.key COPY --chown=$POSTGRES_USER certs/server.key /var/lib/postgresql/server.key
COPY --chown=$POSTGRES_USER certs/server.crt /var/lib/postgresql/server.crt COPY --chown=$POSTGRES_USER certs/server.crt /var/lib/postgresql/server.crt
COPY --chown=$POSTGRES_USER certs/ca.crt /var/lib/postgresql/root.crt COPY --chown=$POSTGRES_USER certs/ca.crt /var/lib/postgresql/root.crt

View File

@ -0,0 +1,8 @@
# TYPE DATABASE USER CIDR-ADDRESS METHOD
local all all trust
# TODO: also test with `cert`? will require client certs
hostssl all all 0.0.0.0/0 password
hostssl all all ::/0 password
hostssl all www-data 0.0.0.0/0 cert clientcert=1
hostssl all postgres 0.0.0.0/0 cert clientcert=1

View File

@ -41,5 +41,11 @@
"listen": "0.0.0.0:5433", "listen": "0.0.0.0:5433",
"upstream": "pgsql-tls:5432", "upstream": "pgsql-tls:5432",
"enabled": true "enabled": true
},
{
"name": "tdengine_restful",
"listen": "0.0.0.0:6041",
"upstream": "tdengine:6041",
"enabled": true
} }
] ]

View File

@ -3,7 +3,7 @@ inputs:
profile: # emqx, emqx-enterprise profile: # emqx, emqx-enterprise
required: true required: true
type: string type: string
otp: # 25.1.2-2, 24.3.4.2-1 otp: # 25.1.2-2, 24.3.4.2-2
required: true required: true
type: string type: string
os: os:

View File

@ -25,7 +25,7 @@ jobs:
prepare: prepare:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
# prepare source with any OTP version, no need for a matrix # prepare source with any OTP version, no need for a matrix
container: "ghcr.io/emqx/emqx-builder/5.0-27:1.13.4-24.3.4.2-1-ubuntu20.04" container: "ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2-ubuntu20.04"
outputs: outputs:
PROFILE: ${{ steps.get_profile.outputs.PROFILE }} PROFILE: ${{ steps.get_profile.outputs.PROFILE }}
@ -125,9 +125,9 @@ jobs:
# NOTE: 'otp' and 'elixir' are to configure emqx-builder image # NOTE: 'otp' and 'elixir' are to configure emqx-builder image
# only support latest otp and elixir, not a matrix # only support latest otp and elixir, not a matrix
builder: builder:
- 5.0-27 # update to latest - 5.0-28 # update to latest
otp: otp:
- 24.3.4.2-1 # switch to 25 once ready to release 5.1 - 24.3.4.2-2 # switch to 25 once ready to release 5.1
elixir: elixir:
- 'no_elixir' - 'no_elixir'
- '1.13.4' # update to latest - '1.13.4' # update to latest

View File

@ -23,7 +23,7 @@ on:
jobs: jobs:
prepare: prepare:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
container: ghcr.io/emqx/emqx-builder/5.0-27:1.13.4-24.3.4.2-1-ubuntu20.04 container: ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2-ubuntu20.04
outputs: outputs:
BUILD_PROFILE: ${{ steps.get_profile.outputs.BUILD_PROFILE }} BUILD_PROFILE: ${{ steps.get_profile.outputs.BUILD_PROFILE }}
IS_EXACT_TAG: ${{ steps.get_profile.outputs.IS_EXACT_TAG }} IS_EXACT_TAG: ${{ steps.get_profile.outputs.IS_EXACT_TAG }}
@ -150,7 +150,7 @@ jobs:
profile: profile:
- ${{ needs.prepare.outputs.BUILD_PROFILE }} - ${{ needs.prepare.outputs.BUILD_PROFILE }}
otp: otp:
- 24.3.4.2-1 - 24.3.4.2-2
os: os:
- macos-11 - macos-11
- macos-12-arm64 - macos-12-arm64
@ -201,7 +201,7 @@ jobs:
profile: profile:
- ${{ needs.prepare.outputs.BUILD_PROFILE }} - ${{ needs.prepare.outputs.BUILD_PROFILE }}
otp: otp:
- 24.3.4.2-1 - 24.3.4.2-2
arch: arch:
- amd64 - amd64
- arm64 - arm64
@ -218,7 +218,7 @@ jobs:
- aws-arm64 - aws-arm64
- ubuntu-20.04 - ubuntu-20.04
builder: builder:
- 5.0-27 - 5.0-28
elixir: elixir:
- 1.13.4 - 1.13.4
exclude: exclude:
@ -232,7 +232,7 @@ jobs:
arch: amd64 arch: amd64
os: ubuntu22.04 os: ubuntu22.04
build_machine: ubuntu-22.04 build_machine: ubuntu-22.04
builder: 5.0-27 builder: 5.0-28
elixir: 1.13.4 elixir: 1.13.4
release_with: elixir release_with: elixir
- profile: emqx - profile: emqx
@ -240,7 +240,7 @@ jobs:
arch: amd64 arch: amd64
os: amzn2 os: amzn2
build_machine: ubuntu-22.04 build_machine: ubuntu-22.04
builder: 5.0-27 builder: 5.0-28
elixir: 1.13.4 elixir: 1.13.4
release_with: elixir release_with: elixir

View File

@ -29,13 +29,13 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
profile: profile:
- ["emqx", "24.3.4.2-1", "el7"] - ["emqx", "24.3.4.2-2", "el7"]
- ["emqx", "24.3.4.2-1", "ubuntu20.04"] - ["emqx", "24.3.4.2-2", "ubuntu20.04"]
- ["emqx", "25.1.2-2", "ubuntu22.04"] - ["emqx", "25.1.2-2", "ubuntu22.04"]
- ["emqx-enterprise", "24.3.4.2-1", "ubuntu20.04"] - ["emqx-enterprise", "24.3.4.2-2", "ubuntu20.04"]
- ["emqx-enterprise", "25.1.2-2", "ubuntu22.04"] - ["emqx-enterprise", "25.1.2-2", "ubuntu22.04"]
builder: builder:
- 5.0-27 - 5.0-28
elixir: elixir:
- 1.13.4 - 1.13.4
@ -128,7 +128,7 @@ jobs:
- emqx - emqx
- emqx-enterprise - emqx-enterprise
otp: otp:
- 24.3.4.2-1 - 24.3.4.2-2
os: os:
- macos-11 - macos-11
- macos-12-arm64 - macos-12-arm64
@ -154,6 +154,50 @@ jobs:
name: ${{ matrix.os }} name: ${{ matrix.os }}
path: _packages/**/* path: _packages/**/*
docker:
runs-on: ubuntu-22.04
strategy:
fail-fast: false
matrix:
profile:
- emqx
- emqx-enterprise
steps:
- uses: actions/checkout@v3
- name: prepare
run: |
EMQX_NAME=${{ matrix.profile }}
PKG_VSN=${PKG_VSN:-$(./pkg-vsn.sh $EMQX_NAME)}
EMQX_IMAGE_TAG=emqx/$EMQX_NAME:test
echo "EMQX_NAME=$EMQX_NAME" >> $GITHUB_ENV
echo "PKG_VSN=$PKG_VSN" >> $GITHUB_ENV
echo "EMQX_IMAGE_TAG=$EMQX_IMAGE_TAG" >> $GITHUB_ENV
- uses: docker/setup-buildx-action@v2
- name: build and export to Docker
uses: docker/build-push-action@v4
with:
context: .
file: ./deploy/docker/Dockerfile
load: true
tags: ${{ env.EMQX_IMAGE_TAG }}
build-args: |
EMQX_NAME=${{ env.EMQX_NAME }}
- name: test docker image
run: |
CID=$(docker run -d --rm -P $EMQX_IMAGE_TAG)
HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' $CID)
./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT
docker stop $CID
- name: export docker image
run: |
docker save $EMQX_IMAGE_TAG | gzip > $EMQX_NAME-$PKG_VSN.tar.gz
- uses: actions/upload-artifact@v3
with:
name: "${{ matrix.profile }}-docker"
path: "${{ env.EMQX_NAME }}-${{ env.PKG_VSN }}.tar.gz"
spellcheck: spellcheck:
needs: linux needs: linux
strategy: strategy:

View File

@ -5,7 +5,7 @@ on: [pull_request, push]
jobs: jobs:
check_deps_integrity: check_deps_integrity:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
container: ghcr.io/emqx/emqx-builder/5.0-27:1.13.4-25.1.2-2-ubuntu20.04 container: ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-25.1.2-2-ubuntu20.04
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3

View File

@ -5,7 +5,7 @@ on: [pull_request]
jobs: jobs:
code_style_check: code_style_check:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
container: "ghcr.io/emqx/emqx-builder/5.0-27:1.13.4-25.1.2-2-ubuntu20.04" container: "ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-25.1.2-2-ubuntu20.04"
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
with: with:

View File

@ -8,7 +8,7 @@ jobs:
elixir_apps_check: elixir_apps_check:
runs-on: ubuntu-latest runs-on: ubuntu-latest
# just use the latest builder # just use the latest builder
container: "ghcr.io/emqx/emqx-builder/5.0-27:1.13.4-25.1.2-2-ubuntu20.04" container: "ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-25.1.2-2-ubuntu20.04"
strategy: strategy:
fail-fast: false fail-fast: false

View File

@ -7,7 +7,7 @@ on: [pull_request, push]
jobs: jobs:
elixir_deps_check: elixir_deps_check:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
container: ghcr.io/emqx/emqx-builder/5.0-27:1.13.4-25.1.2-2-ubuntu20.04 container: ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-25.1.2-2-ubuntu20.04
steps: steps:
- name: Checkout - name: Checkout

View File

@ -17,7 +17,7 @@ jobs:
profile: profile:
- emqx - emqx
- emqx-enterprise - emqx-enterprise
container: ghcr.io/emqx/emqx-builder/5.0-27:1.13.4-25.1.2-2-ubuntu20.04 container: ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-25.1.2-2-ubuntu20.04
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v3

View File

@ -54,7 +54,7 @@ jobs:
OUTPUT_DIR=${{ steps.profile.outputs.s3dir }} OUTPUT_DIR=${{ steps.profile.outputs.s3dir }}
aws s3 cp --recursive s3://$BUCKET/$OUTPUT_DIR/${{ github.ref_name }} packages aws s3 cp --recursive s3://$BUCKET/$OUTPUT_DIR/${{ github.ref_name }} packages
cd packages cd packages
DEFAULT_BEAM_PLATFORM='otp24.3.4.2-1' DEFAULT_BEAM_PLATFORM='otp24.3.4.2-2'
# all packages including full-name and default-name are uploaded to s3 # all packages including full-name and default-name are uploaded to s3
# but we only upload default-name packages (and elixir) as github artifacts # but we only upload default-name packages (and elixir) as github artifacts
# so we rename (overwrite) non-default packages before uploading # so we rename (overwrite) non-default packages before uploading

View File

@ -12,9 +12,9 @@ jobs:
strategy: strategy:
matrix: matrix:
builder: builder:
- 5.0-27 - 5.0-28
otp: otp:
- 24.3.4.2-1 - 24.3.4.2-2
- 25.1.2-2 - 25.1.2-2
# no need to use more than 1 version of Elixir, since tests # no need to use more than 1 version of Elixir, since tests
# run using only Erlang code. This is needed just to specify # run using only Erlang code. This is needed just to specify

View File

@ -16,7 +16,7 @@ jobs:
prepare: prepare:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
# prepare source with any OTP version, no need for a matrix # prepare source with any OTP version, no need for a matrix
container: ghcr.io/emqx/emqx-builder/5.0-27:1.13.4-24.3.4.2-1-debian11 container: ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2-debian11
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v3
@ -49,9 +49,9 @@ jobs:
os: os:
- ["debian11", "debian:11-slim"] - ["debian11", "debian:11-slim"]
builder: builder:
- 5.0-27 - 5.0-28
otp: otp:
- 24.3.4.2-1 - 24.3.4.2-2
elixir: elixir:
- 1.13.4 - 1.13.4
arch: arch:
@ -122,9 +122,9 @@ jobs:
os: os:
- ["debian11", "debian:11-slim"] - ["debian11", "debian:11-slim"]
builder: builder:
- 5.0-27 - 5.0-28
otp: otp:
- 24.3.4.2-1 - 24.3.4.2-2
elixir: elixir:
- 1.13.4 - 1.13.4
arch: arch:

View File

@ -15,7 +15,7 @@ concurrency:
jobs: jobs:
relup_test_plan: relup_test_plan:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
container: "ghcr.io/emqx/emqx-builder/5.0-27:1.13.4-24.3.4.2-1-ubuntu20.04" container: "ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2-ubuntu20.04"
outputs: outputs:
CUR_EE_VSN: ${{ steps.find-versions.outputs.CUR_EE_VSN }} CUR_EE_VSN: ${{ steps.find-versions.outputs.CUR_EE_VSN }}
OLD_VERSIONS: ${{ steps.find-versions.outputs.OLD_VERSIONS }} OLD_VERSIONS: ${{ steps.find-versions.outputs.OLD_VERSIONS }}

View File

@ -30,13 +30,13 @@ jobs:
MATRIX="$(echo "${APPS}" | jq -c ' MATRIX="$(echo "${APPS}" | jq -c '
[ [
(.[] | select(.profile == "emqx") | . + { (.[] | select(.profile == "emqx") | . + {
builder: "5.0-27", builder: "5.0-28",
otp: "25.1.2-2", otp: "25.1.2-2",
elixir: "1.13.4" elixir: "1.13.4"
}), }),
(.[] | select(.profile == "emqx-enterprise") | . + { (.[] | select(.profile == "emqx-enterprise") | . + {
builder: "5.0-27", builder: "5.0-28",
otp: ["24.3.4.2-1", "25.1.2-2"][], otp: ["24.3.4.2-2", "25.1.2-2"][],
elixir: "1.13.4" elixir: "1.13.4"
}) })
] ]
@ -56,7 +56,7 @@ jobs:
echo "runs-on=${RUNS_ON}" | tee -a $GITHUB_OUTPUT echo "runs-on=${RUNS_ON}" | tee -a $GITHUB_OUTPUT
prepare: prepare:
runs-on: aws-amd64 runs-on: ${{ needs.build-matrix.outputs.runs-on }}
needs: [build-matrix] needs: [build-matrix]
strategy: strategy:
fail-fast: false fail-fast: false
@ -161,6 +161,7 @@ jobs:
PGSQL_TAG: "13" PGSQL_TAG: "13"
REDIS_TAG: "7.0" REDIS_TAG: "7.0"
INFLUXDB_TAG: "2.5.0" INFLUXDB_TAG: "2.5.0"
TDENGINE_TAG: "3.0.2.4"
PROFILE: ${{ matrix.profile }} PROFILE: ${{ matrix.profile }}
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }} CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}
run: ./scripts/ct/run.sh --ci --app ${{ matrix.app }} run: ./scripts/ct/run.sh --ci --app ${{ matrix.app }}
@ -223,12 +224,12 @@ jobs:
- ct - ct
- ct_docker - ct_docker
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
container: "ghcr.io/emqx/emqx-builder/5.0-27:1.13.4-24.3.4.2-1-ubuntu20.04" container: "ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2-ubuntu20.04"
steps: steps:
- uses: AutoModality/action-clean@v1 - uses: AutoModality/action-clean@v1
- uses: actions/download-artifact@v3 - uses: actions/download-artifact@v3
with: with:
name: source-emqx-enterprise-24.3.4.2-1 name: source-emqx-enterprise-24.3.4.2-2
path: . path: .
- name: unzip source code - name: unzip source code
run: unzip -q source.zip run: unzip -q source.zip

View File

@ -1,2 +1,2 @@
erlang 24.3.4.2-1 erlang 24.3.4.2-2
elixir 1.13.4-otp-24 elixir 1.13.4-otp-24

View File

@ -2,12 +2,12 @@ REBAR = $(CURDIR)/rebar3
BUILD = $(CURDIR)/build BUILD = $(CURDIR)/build
SCRIPTS = $(CURDIR)/scripts SCRIPTS = $(CURDIR)/scripts
export EMQX_RELUP ?= true export EMQX_RELUP ?= true
export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-24.3.4.2-1-debian11 export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2-debian11
export EMQX_DEFAULT_RUNNER = debian:11-slim export EMQX_DEFAULT_RUNNER = debian:11-slim
export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh) export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh)
export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh) export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh)
export EMQX_DASHBOARD_VERSION ?= v1.1.7 export EMQX_DASHBOARD_VERSION ?= v1.1.8
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.3 export EMQX_EE_DASHBOARD_VERSION ?= e1.0.4-beta.3
export EMQX_REL_FORM ?= tgz export EMQX_REL_FORM ?= tgz
export QUICER_DOWNLOAD_FROM_RELEASE = 1 export QUICER_DOWNLOAD_FROM_RELEASE = 1
ifeq ($(OS),Windows_NT) ifeq ($(OS),Windows_NT)

View File

@ -34,6 +34,10 @@ listeners.wss.default {
# enabled = true # enabled = true
# bind = "0.0.0.0:14567" # bind = "0.0.0.0:14567"
# max_connections = 1024000 # max_connections = 1024000
# keyfile = "{{ platform_etc_dir }}/certs/key.pem" # ssl_options {
# certfile = "{{ platform_etc_dir }}/certs/cert.pem" # verify = verify_none
#} # keyfile = "{{ platform_etc_dir }}/certs/key.pem"
# certfile = "{{ platform_etc_dir }}/certs/cert.pem"
# cacertfile = "{{ platform_etc_dir }}/certs/cacert.pem"
# }
# }

View File

@ -1495,6 +1495,17 @@ In case PSK cipher suites are intended, make sure to configure
} }
} }
common_ssl_opts_schema_hibernate_after {
desc {
en: """ Hibernate the SSL process after idling for amount of time reducing its memory footprint. """
zh: """ 在闲置一定时间后休眠 SSL 进程,减少其内存占用。"""
}
label: {
en: "hibernate after"
zh: "闲置多久后休眠"
}
}
ciphers_schema_common { ciphers_schema_common {
desc { desc {
en: """This config holds TLS cipher suite names separated by comma, en: """This config holds TLS cipher suite names separated by comma,
@ -1804,8 +1815,8 @@ fields_listener_enabled {
fields_mqtt_quic_listener_certfile { fields_mqtt_quic_listener_certfile {
desc { desc {
en: """Path to the certificate file.""" en: """Path to the certificate file. Will be deprecated in 5.1, use .ssl_options.certfile instead."""
zh: """证书文件。""" zh: """证书文件。在 5.1 中会被废弃,使用 .ssl_options.certfile 代替。"""
} }
label: { label: {
en: "Certificate file" en: "Certificate file"
@ -1815,8 +1826,8 @@ fields_mqtt_quic_listener_certfile {
fields_mqtt_quic_listener_keyfile { fields_mqtt_quic_listener_keyfile {
desc { desc {
en: """Path to the secret key file.""" en: """Path to the secret key file. Will be deprecated in 5.1, use .ssl_options.keyfile instead."""
zh: """私钥文件。""" zh: """私钥文件。在 5.1 中会被废弃,使用 .ssl_options.keyfile 代替。"""
} }
label: { label: {
en: "Key file" en: "Key file"
@ -1857,6 +1868,17 @@ fields_mqtt_quic_listener_keep_alive_interval {
} }
} }
fields_mqtt_quic_listener_ssl_options {
desc {
en: """TLS options for QUIC transport"""
zh: """QUIC 传输层的 TLS 选项"""
}
label: {
en: "TLS Options"
zh: "TLS 选项"
}
}
base_listener_bind { base_listener_bind {
desc { desc {
en: """IP address and port for the listening socket.""" en: """IP address and port for the listening socket."""

View File

@ -0,0 +1,25 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-ifndef(EMQX_QUIC_HRL).
-define(EMQX_QUIC_HRL, true).
%% MQTT Over QUIC Shutdown Error code.
-define(MQTT_QUIC_CONN_NOERROR, 0).
-define(MQTT_QUIC_CONN_ERROR_CTRL_STREAM_DOWN, 1).
-define(MQTT_QUIC_CONN_ERROR_OVERLOADED, 2).
-endif.

View File

@ -43,7 +43,7 @@
{meck, "0.9.2"}, {meck, "0.9.2"},
{proper, "1.4.0"}, {proper, "1.4.0"},
{bbmustache, "1.10.0"}, {bbmustache, "1.10.0"},
{emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.7.0"}}} {emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.8.2"}}}
]}, ]},
{extra_src_dirs, [{"test", [recursive]}]} {extra_src_dirs, [{"test", [recursive]}]}
]} ]}

View File

@ -24,7 +24,20 @@ IsQuicSupp = fun() ->
end, end,
Bcrypt = {bcrypt, {git, "https://github.com/emqx/erlang-bcrypt.git", {tag, "0.6.0"}}}, Bcrypt = {bcrypt, {git, "https://github.com/emqx/erlang-bcrypt.git", {tag, "0.6.0"}}},
Quicer = {quicer, {git, "https://github.com/emqx/quic.git", {tag, "0.0.16"}}}. Quicer = {quicer, {git, "https://github.com/emqx/quic.git", {tag, "0.0.111"}}}.
Dialyzer = fun(Config) ->
{dialyzer, OldDialyzerConfig} = lists:keyfind(dialyzer, 1, Config),
{plt_extra_apps, OldExtra} = lists:keyfind(plt_extra_apps, 1, OldDialyzerConfig),
Extra = OldExtra ++ [quicer || IsQuicSupp()],
NewDialyzerConfig = [{plt_extra_apps, Extra} | OldDialyzerConfig],
lists:keystore(
dialyzer,
1,
Config,
{dialyzer, NewDialyzerConfig}
)
end.
ExtraDeps = fun(C) -> ExtraDeps = fun(C) ->
{deps, Deps0} = lists:keyfind(deps, 1, C), {deps, Deps0} = lists:keyfind(deps, 1, C),
@ -43,4 +56,4 @@ ExtraDeps = fun(C) ->
) )
end, end,
ExtraDeps(CONFIG). Dialyzer(ExtraDeps(CONFIG)).

View File

@ -3,7 +3,7 @@
{id, "emqx"}, {id, "emqx"},
{description, "EMQX Core"}, {description, "EMQX Core"},
% strict semver, bump manually! % strict semver, bump manually!
{vsn, "5.0.17"}, {vsn, "5.0.18"},
{modules, []}, {modules, []},
{registered, []}, {registered, []},
{applications, [ {applications, [

View File

@ -1,33 +1,5 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
%% Unless you know what you are doing, DO NOT edit manually!! %% Unless you know what you are doing, DO NOT edit manually!!
{VSN, {VSN,
[{"5.0.0", [{<<".*">>,[]}],
[{load_module,emqx_quic_connection,brutal_purge,soft_purge,[]}, [{<<".*">>,[]}]}.
{load_module,emqx_config,brutal_purge,soft_purge,[]},
{load_module,emqx_channel,brutal_purge,soft_purge,[]},
{load_module,emqx_schema,brutal_purge,soft_purge,[]},
{load_module,emqx_release,brutal_purge,soft_purge,[]},
{load_module,emqx_authentication,brutal_purge,soft_purge,[]},
{load_module,emqx_metrics,brutal_purge,soft_purge,[]},
{add_module,emqx_exclusive_subscription},
{apply,{emqx_exclusive_subscription,on_add_module,[]}},
{load_module,emqx_broker,brutal_purge,soft_purge,[]},
{load_module,emqx_mqtt_caps,brutal_purge,soft_purge,[]},
{load_module,emqx_topic,brutal_purge,soft_purge,[]},
{load_module,emqx_relup}]},
{<<".*">>,[]}],
[{"5.0.0",
[{load_module,emqx_quic_connection,brutal_purge,soft_purge,[]},
{load_module,emqx_config,brutal_purge,soft_purge,[]},
{load_module,emqx_channel,brutal_purge,soft_purge,[]},
{load_module,emqx_schema,brutal_purge,soft_purge,[]},
{load_module,emqx_release,brutal_purge,soft_purge,[]},
{load_module,emqx_authentication,brutal_purge,soft_purge,[]},
{load_module,emqx_metrics,brutal_purge,soft_purge,[]},
{load_module,emqx_broker,brutal_purge,soft_purge,[]},
{load_module,emqx_mqtt_caps,brutal_purge,soft_purge,[]},
{load_module,emqx_topic,brutal_purge,soft_purge,[]},
{apply,{emqx_exclusive_subscription,on_delete_module,[]}},
{delete_module,emqx_exclusive_subscription},
{load_module,emqx_relup}]},
{<<".*">>,[]}]}.

View File

@ -14,7 +14,13 @@
%% limitations under the License. %% limitations under the License.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% MQTT/TCP|TLS Connection %% This module interacts with the transport layer of MQTT
%% Transport:
%% - TCP connection
%% - TCP/TLS connection
%% - QUIC Stream
%%
%% for WebSocket @see emqx_ws_connection.erl
-module(emqx_connection). -module(emqx_connection).
-include("emqx.hrl"). -include("emqx.hrl").
@ -111,7 +117,10 @@
limiter_buffer :: queue:queue(pending_req()), limiter_buffer :: queue:queue(pending_req()),
%% limiter timers %% limiter timers
limiter_timer :: undefined | reference() limiter_timer :: undefined | reference(),
%% QUIC conn owner pid if in use.
quic_conn_pid :: maybe(pid())
}). }).
-record(retry, { -record(retry, {
@ -189,12 +198,16 @@
]} ]}
). ).
-spec start_link( -spec start_link
esockd:transport(), (esockd:transport(), esockd:socket(), emqx_channel:opts()) ->
esockd:socket() | {pid(), quicer:connection_handler()}, {ok, pid()};
emqx_channel:opts() (
) -> emqx_quic_stream,
{ok, pid()}. {ConnOwner :: pid(), quicer:connection_handle(), quicer:new_conn_props()},
emqx_quic_connection:cb_state()
) ->
{ok, pid()}.
start_link(Transport, Socket, Options) -> start_link(Transport, Socket, Options) ->
Args = [self(), Transport, Socket, Options], Args = [self(), Transport, Socket, Options],
CPid = proc_lib:spawn_link(?MODULE, init, Args), CPid = proc_lib:spawn_link(?MODULE, init, Args),
@ -329,6 +342,7 @@ init_state(
}, },
ParseState = emqx_frame:initial_parse_state(FrameOpts), ParseState = emqx_frame:initial_parse_state(FrameOpts),
Serialize = emqx_frame:serialize_opts(), Serialize = emqx_frame:serialize_opts(),
%% Init Channel
Channel = emqx_channel:init(ConnInfo, Opts), Channel = emqx_channel:init(ConnInfo, Opts),
GcState = GcState =
case emqx_config:get_zone_conf(Zone, [force_gc]) of case emqx_config:get_zone_conf(Zone, [force_gc]) of
@ -359,7 +373,9 @@ init_state(
zone = Zone, zone = Zone,
listener = Listener, listener = Listener,
limiter_buffer = queue:new(), limiter_buffer = queue:new(),
limiter_timer = undefined limiter_timer = undefined,
%% for quic streams to inherit
quic_conn_pid = maps:get(conn_pid, Opts, undefined)
}. }.
run_loop( run_loop(
@ -476,7 +492,9 @@ process_msg([Msg | More], State) ->
{ok, Msgs, NState} -> {ok, Msgs, NState} ->
process_msg(append_msg(More, Msgs), NState); process_msg(append_msg(More, Msgs), NState);
{stop, Reason, NState} -> {stop, Reason, NState} ->
{stop, Reason, NState} {stop, Reason, NState};
{stop, Reason} ->
{stop, Reason, State}
end end
catch catch
exit:normal -> exit:normal ->
@ -507,7 +525,6 @@ append_msg(Q, Msg) ->
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Handle a Msg %% Handle a Msg
handle_msg({'$gen_call', From, Req}, State) -> handle_msg({'$gen_call', From, Req}, State) ->
case handle_call(From, Req, State) of case handle_call(From, Req, State) of
{reply, Reply, NState} -> {reply, Reply, NState} ->
@ -525,11 +542,10 @@ handle_msg({Inet, _Sock, Data}, State) when Inet == tcp; Inet == ssl ->
inc_counter(incoming_bytes, Oct), inc_counter(incoming_bytes, Oct),
ok = emqx_metrics:inc('bytes.received', Oct), ok = emqx_metrics:inc('bytes.received', Oct),
when_bytes_in(Oct, Data, State); when_bytes_in(Oct, Data, State);
handle_msg({quic, Data, _Sock, _, _, _}, State) -> handle_msg({quic, Data, _Stream, #{len := Len}}, State) when is_binary(Data) ->
Oct = iolist_size(Data), inc_counter(incoming_bytes, Len),
inc_counter(incoming_bytes, Oct), ok = emqx_metrics:inc('bytes.received', Len),
ok = emqx_metrics:inc('bytes.received', Oct), when_bytes_in(Len, Data, State);
when_bytes_in(Oct, Data, State);
handle_msg(check_cache, #state{limiter_buffer = Cache} = State) -> handle_msg(check_cache, #state{limiter_buffer = Cache} = State) ->
case queue:peek(Cache) of case queue:peek(Cache) of
empty -> empty ->
@ -595,9 +611,20 @@ handle_msg({inet_reply, _Sock, {error, Reason}}, State) ->
handle_msg({connack, ConnAck}, State) -> handle_msg({connack, ConnAck}, State) ->
handle_outgoing(ConnAck, State); handle_outgoing(ConnAck, State);
handle_msg({close, Reason}, State) -> handle_msg({close, Reason}, State) ->
%% @FIXME here it could be close due to appl error.
?TRACE("SOCKET", "socket_force_closed", #{reason => Reason}), ?TRACE("SOCKET", "socket_force_closed", #{reason => Reason}),
handle_info({sock_closed, Reason}, close_socket(State)); handle_info({sock_closed, Reason}, close_socket(State));
handle_msg({event, connected}, State = #state{channel = Channel}) -> handle_msg(
{event, connected},
State = #state{
channel = Channel,
serialize = Serialize,
parse_state = PS,
quic_conn_pid = QuicConnPid
}
) ->
QuicConnPid =/= undefined andalso
emqx_quic_connection:activate_data_streams(QuicConnPid, {PS, Serialize, Channel}),
ClientId = emqx_channel:info(clientid, Channel), ClientId = emqx_channel:info(clientid, Channel),
emqx_cm:insert_channel_info(ClientId, info(State), stats(State)); emqx_cm:insert_channel_info(ClientId, info(State), stats(State));
handle_msg({event, disconnected}, State = #state{channel = Channel}) -> handle_msg({event, disconnected}, State = #state{channel = Channel}) ->
@ -654,6 +681,12 @@ maybe_raise_exception(#{
stacktrace := Stacktrace stacktrace := Stacktrace
}) -> }) ->
erlang:raise(Exception, Context, Stacktrace); erlang:raise(Exception, Context, Stacktrace);
maybe_raise_exception({shutdown, normal}) ->
ok;
maybe_raise_exception(normal) ->
ok;
maybe_raise_exception(shutdown) ->
ok;
maybe_raise_exception(Reason) -> maybe_raise_exception(Reason) ->
exit(Reason). exit(Reason).
@ -748,6 +781,7 @@ when_bytes_in(Oct, Data, State) ->
NState NState
). ).
%% @doc: return a reversed Msg list
-compile({inline, [next_incoming_msgs/3]}). -compile({inline, [next_incoming_msgs/3]}).
next_incoming_msgs([Packet], Msgs, State) -> next_incoming_msgs([Packet], Msgs, State) ->
{ok, [{incoming, Packet} | Msgs], State}; {ok, [{incoming, Packet} | Msgs], State};
@ -870,6 +904,7 @@ send(IoData, #state{transport = Transport, socket = Socket, channel = Channel})
ok; ok;
Error = {error, _Reason} -> Error = {error, _Reason} ->
%% Send an inet_reply to postpone handling the error %% Send an inet_reply to postpone handling the error
%% @FIXME: why not just return error?
self() ! {inet_reply, Socket, Error}, self() ! {inet_reply, Socket, Error},
ok ok
end. end.
@ -893,12 +928,14 @@ handle_info({sock_error, Reason}, State) ->
false -> ok false -> ok
end, end,
handle_info({sock_closed, Reason}, close_socket(State)); handle_info({sock_closed, Reason}, close_socket(State));
handle_info({quic, peer_send_shutdown, _Stream}, State) -> %% handle QUIC control stream events
handle_info({sock_closed, force}, close_socket(State)); handle_info({quic, Event, Handle, Prop}, State) when is_atom(Event) ->
handle_info({quic, closed, _Channel, ReasonFlag}, State) -> case emqx_quic_stream:Event(Handle, Prop, State) of
handle_info({sock_closed, ReasonFlag}, State); {{continue, Msgs}, NewState} ->
handle_info({quic, closed, _Stream}, State) -> {ok, Msgs, NewState};
handle_info({sock_closed, force}, State); Other ->
Other
end;
handle_info(Info, State) -> handle_info(Info, State) ->
with_channel(handle_info, [Info], State). with_channel(handle_info, [Info], State).

View File

@ -35,7 +35,6 @@ init([]) ->
child_spec(emqx_hooks, worker), child_spec(emqx_hooks, worker),
child_spec(emqx_stats, worker), child_spec(emqx_stats, worker),
child_spec(emqx_metrics, worker), child_spec(emqx_metrics, worker),
child_spec(emqx_ctl, worker),
child_spec(emqx_authn_authz_metrics_sup, supervisor) child_spec(emqx_authn_authz_metrics_sup, supervisor)
] ]
}}. }}.

View File

@ -110,11 +110,11 @@ fields(limiter) ->
]; ];
fields(node_opts) -> fields(node_opts) ->
[ [
{rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => "infinity"})}, {rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => <<"infinity">>})},
{burst, {burst,
?HOCON(burst_rate(), #{ ?HOCON(burst_rate(), #{
desc => ?DESC(burst), desc => ?DESC(burst),
default => 0 default => <<"0">>
})} })}
]; ];
fields(client_fields) -> fields(client_fields) ->
@ -128,14 +128,14 @@ fields(client_fields) ->
]; ];
fields(bucket_opts) -> fields(bucket_opts) ->
[ [
{rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => "infinity"})}, {rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => <<"infinity">>})},
{capacity, ?HOCON(capacity(), #{desc => ?DESC(capacity), default => "infinity"})}, {capacity, ?HOCON(capacity(), #{desc => ?DESC(capacity), default => <<"infinity">>})},
{initial, ?HOCON(initial(), #{default => "0", desc => ?DESC(initial)})} {initial, ?HOCON(initial(), #{default => <<"0">>, desc => ?DESC(initial)})}
]; ];
fields(client_opts) -> fields(client_opts) ->
[ [
{rate, ?HOCON(rate(), #{default => "infinity", desc => ?DESC(rate)})}, {rate, ?HOCON(rate(), #{default => <<"infinity">>, desc => ?DESC(rate)})},
{initial, ?HOCON(initial(), #{default => "0", desc => ?DESC(initial)})}, {initial, ?HOCON(initial(), #{default => <<"0">>, desc => ?DESC(initial)})},
%% low_watermark add for emqx_channel and emqx_session %% low_watermark add for emqx_channel and emqx_session
%% both modules consume first and then check %% both modules consume first and then check
%% so we need to use this value to prevent excessive consumption %% so we need to use this value to prevent excessive consumption
@ -145,13 +145,13 @@ fields(client_opts) ->
initial(), initial(),
#{ #{
desc => ?DESC(low_watermark), desc => ?DESC(low_watermark),
default => "0" default => <<"0">>
} }
)}, )},
{capacity, {capacity,
?HOCON(capacity(), #{ ?HOCON(capacity(), #{
desc => ?DESC(client_bucket_capacity), desc => ?DESC(client_bucket_capacity),
default => "infinity" default => <<"infinity">>
})}, })},
{divisible, {divisible,
?HOCON( ?HOCON(
@ -166,7 +166,7 @@ fields(client_opts) ->
emqx_schema:duration(), emqx_schema:duration(),
#{ #{
desc => ?DESC(max_retry_time), desc => ?DESC(max_retry_time),
default => "10s" default => <<"10s">>
} }
)}, )},
{failure_strategy, {failure_strategy,

View File

@ -72,9 +72,7 @@ id_example() -> 'tcp:default'.
list_raw() -> list_raw() ->
[ [
{listener_id(Type, LName), Type, LConf} {listener_id(Type, LName), Type, LConf}
|| %% FIXME: quic is not supported update vi dashboard yet || {Type, LName, LConf} <- do_list_raw()
{Type, LName, LConf} <- do_list_raw(),
Type =/= <<"quic">>
]. ].
list() -> list() ->
@ -170,6 +168,11 @@ current_conns(Type, Name, ListenOn) when Type == tcp; Type == ssl ->
esockd:get_current_connections({listener_id(Type, Name), ListenOn}); esockd:get_current_connections({listener_id(Type, Name), ListenOn});
current_conns(Type, Name, _ListenOn) when Type =:= ws; Type =:= wss -> current_conns(Type, Name, _ListenOn) when Type =:= ws; Type =:= wss ->
proplists:get_value(all_connections, ranch:info(listener_id(Type, Name))); proplists:get_value(all_connections, ranch:info(listener_id(Type, Name)));
current_conns(quic, _Name, _ListenOn) ->
case quicer:perf_counters() of
{ok, PerfCnts} -> proplists:get_value(conn_active, PerfCnts);
_ -> 0
end;
current_conns(_, _, _) -> current_conns(_, _, _) ->
{error, not_support}. {error, not_support}.
@ -367,16 +370,26 @@ do_start_listener(quic, ListenerName, #{bind := Bind} = Opts) ->
case [A || {quicer, _, _} = A <- application:which_applications()] of case [A || {quicer, _, _} = A <- application:which_applications()] of
[_] -> [_] ->
DefAcceptors = erlang:system_info(schedulers_online) * 8, DefAcceptors = erlang:system_info(schedulers_online) * 8,
ListenOpts = [ SSLOpts = maps:merge(
{cert, maps:get(certfile, Opts)}, maps:with([certfile, keyfile], Opts),
{key, maps:get(keyfile, Opts)}, maps:get(ssl_options, Opts, #{})
{alpn, ["mqtt"]}, ),
{conn_acceptors, lists:max([DefAcceptors, maps:get(acceptors, Opts, 0)])}, ListenOpts =
{keep_alive_interval_ms, maps:get(keep_alive_interval, Opts, 0)}, [
{idle_timeout_ms, maps:get(idle_timeout, Opts, 0)}, {certfile, str(maps:get(certfile, SSLOpts))},
{handshake_idle_timeout_ms, maps:get(handshake_idle_timeout, Opts, 10000)}, {keyfile, str(maps:get(keyfile, SSLOpts))},
{server_resumption_level, 2} {alpn, ["mqtt"]},
], {conn_acceptors, lists:max([DefAcceptors, maps:get(acceptors, Opts, 0)])},
{keep_alive_interval_ms, maps:get(keep_alive_interval, Opts, 0)},
{idle_timeout_ms, maps:get(idle_timeout, Opts, 0)},
{handshake_idle_timeout_ms, maps:get(handshake_idle_timeout, Opts, 10000)},
{server_resumption_level, 2},
{verify, maps:get(verify, SSLOpts, verify_none)}
] ++
case maps:get(cacertfile, SSLOpts, undefined) of
undefined -> [];
CaCertFile -> [{cacertfile, binary_to_list(CaCertFile)}]
end,
ConnectionOpts = #{ ConnectionOpts = #{
conn_callback => emqx_quic_connection, conn_callback => emqx_quic_connection,
peer_unidi_stream_count => 1, peer_unidi_stream_count => 1,
@ -385,13 +398,16 @@ do_start_listener(quic, ListenerName, #{bind := Bind} = Opts) ->
listener => {quic, ListenerName}, listener => {quic, ListenerName},
limiter => limiter(Opts) limiter => limiter(Opts)
}, },
StreamOpts = [{stream_callback, emqx_quic_stream}], StreamOpts = #{
stream_callback => emqx_quic_stream,
active => 1
},
Id = listener_id(quic, ListenerName), Id = listener_id(quic, ListenerName),
add_limiter_bucket(Id, Opts), add_limiter_bucket(Id, Opts),
quicer:start_listener( quicer:start_listener(
Id, Id,
ListenOn, ListenOn,
{ListenOpts, ConnectionOpts, StreamOpts} {maps:from_list(ListenOpts), ConnectionOpts, StreamOpts}
); );
[] -> [] ->
{ok, {skipped, quic_app_missing}} {ok, {skipped, quic_app_missing}}

View File

@ -14,60 +14,282 @@
%% limitations under the License. %% limitations under the License.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% @doc impl. the quic connection owner process.
-module(emqx_quic_connection). -module(emqx_quic_connection).
-ifndef(BUILD_WITHOUT_QUIC). -ifndef(BUILD_WITHOUT_QUIC).
-include_lib("quicer/include/quicer.hrl").
-else.
-define(QUIC_CONNECTION_SHUTDOWN_FLAG_NONE, 0).
-endif.
%% Callbacks -include("logger.hrl").
-include_lib("quicer/include/quicer.hrl").
-include_lib("emqx/include/emqx_quic.hrl").
-behaviour(quicer_connection).
-export([ -export([
init/1, init/1,
new_conn/2, new_conn/3,
connected/2, connected/3,
shutdown/2 transport_shutdown/3,
shutdown/3,
closed/3,
local_address_changed/3,
peer_address_changed/3,
streams_available/3,
peer_needs_streams/3,
resumed/3,
new_stream/3
]). ]).
-type cb_state() :: map() | proplists:proplist(). -export([activate_data_streams/2]).
-spec init(cb_state()) -> cb_state(). -export([
init(ConnOpts) when is_list(ConnOpts) -> handle_call/3,
init(maps:from_list(ConnOpts)); handle_info/2
]).
-type cb_state() :: #{
%% connecion owner pid
conn_pid := pid(),
%% Pid of ctrl stream
ctrl_pid := undefined | pid(),
%% quic connecion handle
conn := undefined | quicer:conneciton_handle(),
%% Data streams that handoff from this process
%% these streams could die/close without effecting the connecion/session.
%@TODO type?
streams := [{pid(), quicer:stream_handle()}],
%% New stream opts
stream_opts := map(),
%% If conneciton is resumed from session ticket
is_resumed => boolean(),
%% mqtt message serializer config
serialize => undefined,
_ => _
}.
-type cb_ret() :: quicer_lib:cb_ret().
%% @doc Data streams initializions are started in parallel with control streams, data streams are blocked
%% for the activation from control stream after it is accepted as a legit conneciton.
%% For security, the initial number of allowed data streams from client should be limited by
%% 'peer_bidi_stream_count` & 'peer_unidi_stream_count`
-spec activate_data_streams(pid(), {
emqx_frame:parse_state(), emqx_frame:serialize_opts(), emqx_channel:channel()
}) -> ok.
activate_data_streams(ConnOwner, {PS, Serialize, Channel}) ->
gen_server:call(ConnOwner, {activate_data_streams, {PS, Serialize, Channel}}, infinity).
%% @doc conneciton owner init callback
-spec init(map()) -> {ok, cb_state()}.
init(#{stream_opts := SOpts} = S) when is_list(SOpts) ->
init(S#{stream_opts := maps:from_list(SOpts)});
init(ConnOpts) when is_map(ConnOpts) -> init(ConnOpts) when is_map(ConnOpts) ->
ConnOpts. {ok, init_cb_state(ConnOpts)}.
-spec new_conn(quicer:connection_handler(), cb_state()) -> {ok, cb_state()} | {error, any()}. -spec closed(quicer:conneciton_handle(), quicer:conn_closed_props(), cb_state()) ->
new_conn(Conn, #{zone := Zone} = S) -> {stop, normal, cb_state()}.
closed(_Conn, #{is_peer_acked := _} = Prop, S) ->
?SLOG(debug, Prop),
{stop, normal, S}.
%% @doc handle the new incoming connecion as the connecion acceptor.
-spec new_conn(quicer:connection_handle(), quicer:new_conn_props(), cb_state()) ->
{ok, cb_state()} | {error, any(), cb_state()}.
new_conn(
Conn,
#{version := _Vsn} = ConnInfo,
#{zone := Zone, conn := undefined, ctrl_pid := undefined} = S
) ->
process_flag(trap_exit, true), process_flag(trap_exit, true),
?SLOG(debug, ConnInfo),
case emqx_olp:is_overloaded() andalso is_zone_olp_enabled(Zone) of case emqx_olp:is_overloaded() andalso is_zone_olp_enabled(Zone) of
false -> false ->
{ok, Pid} = emqx_connection:start_link(emqx_quic_stream, {self(), Conn}, S), %% Start control stream process
StartOption = S,
{ok, CtrlPid} = emqx_connection:start_link(
emqx_quic_stream,
{self(), Conn, maps:without([crypto_buffer], ConnInfo)},
StartOption
),
receive receive
{Pid, stream_acceptor_ready} -> {CtrlPid, stream_acceptor_ready} ->
ok = quicer:async_handshake(Conn), ok = quicer:async_handshake(Conn),
{ok, S}; {ok, S#{conn := Conn, ctrl_pid := CtrlPid}};
{'EXIT', Pid, _Reason} -> {'EXIT', _Pid, _Reason} ->
{error, stream_accept_error} {stop, stream_accept_error, S}
end; end;
true -> true ->
emqx_metrics:inc('olp.new_conn'), emqx_metrics:inc('olp.new_conn'),
{error, overloaded} _ = quicer:async_shutdown_connection(
Conn,
?QUIC_CONNECTION_SHUTDOWN_FLAG_NONE,
?MQTT_QUIC_CONN_ERROR_OVERLOADED
),
{stop, normal, S}
end. end.
-spec connected(quicer:connection_handler(), cb_state()) -> {ok, cb_state()} | {error, any()}. %% @doc callback when connection is connected.
connected(Conn, #{slow_start := false} = S) -> -spec connected(quicer:connection_handle(), quicer:connected_props(), cb_state()) ->
{ok, _Pid} = emqx_connection:start_link(emqx_quic_stream, Conn, S), {ok, cb_state()} | {error, any(), cb_state()}.
connected(_Conn, Props, S) ->
?SLOG(debug, Props),
{ok, S}.
%% @doc callback when connection is resumed from 0-RTT
-spec resumed(quicer:connection_handle(), SessionData :: binary() | false, cb_state()) -> cb_ret().
%% reserve resume conn with callback.
%% resumed(Conn, Data, #{resumed_callback := ResumeFun} = S) when
%% is_function(ResumeFun)
%% ->
%% ResumeFun(Conn, Data, S);
resumed(_Conn, _Data, S) ->
{ok, S#{is_resumed := true}}.
%% @doc callback for handling orphan data streams
%% depends on the connecion state and control stream state.
-spec new_stream(quicer:stream_handle(), quicer:new_stream_props(), cb_state()) -> cb_ret().
new_stream(
Stream,
#{is_orphan := true, flags := _Flags} = Props,
#{
conn := Conn,
streams := Streams,
stream_opts := SOpts,
zone := Zone,
limiter := Limiter,
parse_state := PS,
channel := Channel,
serialize := Serialize
} = S
) ->
%% Cherry pick options for data streams
SOpts1 = SOpts#{
is_local => false,
zone => Zone,
% unused
limiter => Limiter,
parse_state => PS,
channel => Channel,
serialize => Serialize,
quic_event_mask => ?QUICER_STREAM_EVENT_MASK_START_COMPLETE
},
{ok, NewStreamOwner} = quicer_stream:start_link(
emqx_quic_data_stream,
Stream,
Conn,
SOpts1,
Props
),
case quicer:handoff_stream(Stream, NewStreamOwner, {PS, Serialize, Channel}) of
ok ->
ok;
E ->
%% Only log, keep connecion alive.
?SLOG(error, #{message => "new stream handoff failed", stream => Stream, error => E})
end,
%% @TODO maybe keep them in `inactive_streams'
{ok, S#{streams := [{NewStreamOwner, Stream} | Streams]}}.
%% @doc callback for handling remote connecion shutdown.
-spec shutdown(quicer:connection_handle(), quicer:error_code(), cb_state()) -> cb_ret().
shutdown(Conn, ErrorCode, S) ->
ErrorCode =/= 0 andalso ?SLOG(debug, #{error_code => ErrorCode, state => S}),
_ = quicer:async_shutdown_connection(Conn, ?QUIC_CONNECTION_SHUTDOWN_FLAG_NONE, 0),
{ok, S}.
%% @doc callback for handling transport error, such as idle timeout
-spec transport_shutdown(quicer:connection_handle(), quicer:transport_shutdown_props(), cb_state()) ->
cb_ret().
transport_shutdown(_C, DownInfo, S) when is_map(DownInfo) ->
?SLOG(debug, DownInfo),
{ok, S}.
%% @doc callback for handling for peer addr changed.
-spec peer_address_changed(quicer:connection_handle(), quicer:quicer_addr(), cb_state) -> cb_ret().
peer_address_changed(_C, _NewAddr, S) ->
%% @TODO update conn info in emqx_quic_stream
{ok, S}.
%% @doc callback for handling local addr change, currently unused
-spec local_address_changed(quicer:connection_handle(), quicer:quicer_addr(), cb_state()) ->
cb_ret().
local_address_changed(_C, _NewAddr, S) ->
{ok, S}.
%% @doc callback for handling remote stream limit updates
-spec streams_available(
quicer:connection_handle(),
{BidirStreams :: non_neg_integer(), UnidirStreams :: non_neg_integer()},
cb_state()
) -> cb_ret().
streams_available(_C, {BidirCnt, UnidirCnt}, S) ->
{ok, S#{
peer_bidi_stream_count => BidirCnt,
peer_unidi_stream_count => UnidirCnt
}}.
%% @doc callback for handling request when remote wants for more streams
%% should cope with rate limiting
%% @TODO this is not going to get triggered in current version
%% ref: https://github.com/microsoft/msquic/issues/3120
-spec peer_needs_streams(quicer:connection_handle(), undefined, cb_state()) -> cb_ret().
peer_needs_streams(_C, undefined, S) ->
?SLOG(info, #{
msg => "ignore: peer need more streames", info => maps:with([conn_pid, ctrl_pid], S)
}),
{ok, S}.
%% @doc handle API calls
-spec handle_call(Req :: term(), gen_server:from(), cb_state()) -> cb_ret().
handle_call(
{activate_data_streams, {PS, Serialize, Channel} = ActivateData},
_From,
#{streams := Streams} = S
) ->
_ = [
%% Try to activate streams individually if failed, stream will shutdown on its own.
%% we dont care about the return val here.
%% note, this is only used after control stream pass the validation. The data streams
%% that are called here are assured to be inactived (data processing hasn't been started).
catch emqx_quic_data_stream:activate_data(OwnerPid, ActivateData)
|| {OwnerPid, _Stream} <- Streams
],
{reply, ok, S#{
channel := Channel,
serialize := Serialize,
parse_state := PS
}};
handle_call(_Req, _From, S) ->
{reply, {error, unimpl}, S}.
%% @doc handle DOWN messages from streams.
handle_info({'EXIT', Pid, Reason}, #{ctrl_pid := Pid, conn := Conn} = S) ->
Code =
case Reason of
normal ->
?MQTT_QUIC_CONN_NOERROR;
_ ->
?MQTT_QUIC_CONN_ERROR_CTRL_STREAM_DOWN
end,
_ = quicer:async_shutdown_connection(Conn, ?QUIC_CONNECTION_SHUTDOWN_FLAG_NONE, Code),
{ok, S}; {ok, S};
connected(_Conn, S) -> handle_info({'EXIT', Pid, Reason}, #{streams := Streams} = S) ->
{ok, S}. case proplists:is_defined(Pid, Streams) of
true when
-spec shutdown(quicer:connection_handler(), cb_state()) -> {ok, cb_state()} | {error, any()}. Reason =:= normal orelse
shutdown(Conn, S) -> Reason =:= {shutdown, protocol_error} orelse
quicer:async_shutdown_connection(Conn, ?QUIC_CONNECTION_SHUTDOWN_FLAG_NONE, 0), Reason =:= killed
{ok, S}. ->
{ok, S};
true ->
?SLOG(info, #{message => "Data stream unexpected exit", reason => Reason}),
{ok, S};
false ->
{stop, unknown_pid_down, S}
end.
%%%
%%% Internals
%%%
-spec is_zone_olp_enabled(emqx_types:zone()) -> boolean(). -spec is_zone_olp_enabled(emqx_types:zone()) -> boolean().
is_zone_olp_enabled(Zone) -> is_zone_olp_enabled(Zone) ->
case emqx_config:get_zone_conf(Zone, [overload_protection]) of case emqx_config:get_zone_conf(Zone, [overload_protection]) of
@ -76,3 +298,20 @@ is_zone_olp_enabled(Zone) ->
_ -> _ ->
false false
end. end.
-spec init_cb_state(map()) -> cb_state().
init_cb_state(#{zone := _Zone} = Map) ->
Map#{
conn_pid => self(),
ctrl_pid => undefined,
conn => undefined,
streams => [],
parse_state => undefined,
channel => undefined,
serialize => undefined,
is_resumed => false
}.
%% BUILD_WITHOUT_QUIC
-else.
-endif.

View File

@ -0,0 +1,469 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
%%
%% @doc QUIC data stream
%% Following the behaviour of emqx_connection:
%% The MQTT packets and their side effects are handled *atomically*.
%%
-module(emqx_quic_data_stream).
-ifndef(BUILD_WITHOUT_QUIC).
-behaviour(quicer_remote_stream).
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-include_lib("quicer/include/quicer.hrl").
-include("emqx_mqtt.hrl").
-include("logger.hrl").
%% Connection Callbacks
-export([
init_handoff/4,
post_handoff/3,
send_complete/3,
peer_send_shutdown/3,
peer_send_aborted/3,
peer_receive_aborted/3,
send_shutdown_complete/3,
stream_closed/3,
passive/3
]).
-export([handle_stream_data/4]).
%% gen_server API
-export([activate_data/2]).
-export([
handle_call/3,
handle_info/2,
handle_continue/2
]).
-type cb_ret() :: quicer_stream:cb_ret().
-type cb_state() :: quicer_stream:cb_state().
-type error_code() :: quicer:error_code().
-type connection_handle() :: quicer:connection_handle().
-type stream_handle() :: quicer:stream_handle().
-type handoff_data() :: {
emqx_frame:parse_state() | undefined,
emqx_frame:serialize_opts() | undefined,
emqx_channel:channel() | undefined
}.
%%
%% @doc Activate the data handling.
%% Note, data handling is disabled before finishing the validation over control stream.
-spec activate_data(pid(), {
emqx_frame:parse_state(), emqx_frame:serialize_opts(), emqx_channel:channel()
}) -> ok.
activate_data(StreamPid, {PS, Serialize, Channel}) ->
gen_server:call(StreamPid, {activate, {PS, Serialize, Channel}}, infinity).
%%
%% @doc Handoff from previous owner, from the connection owner.
%% Note, unlike control stream, there is no acceptor for data streams.
%% The connection owner get new stream, spawn new proc and then handover to it.
%%
-spec init_handoff(stream_handle(), map(), connection_handle(), quicer:new_stream_props()) ->
{ok, cb_state()}.
init_handoff(
Stream,
_StreamOpts,
Connection,
#{is_orphan := true, flags := Flags}
) ->
{ok, init_state(Stream, Connection, Flags)}.
%%
%% @doc Post handoff data stream
%%
-spec post_handoff(stream_handle(), handoff_data(), cb_state()) -> cb_ret().
post_handoff(_Stream, {undefined = _PS, undefined = _Serialize, undefined = _Channel}, S) ->
%% When the channel isn't ready yet.
%% Data stream should wait for activate call with ?MODULE:activate_data/2
{ok, S};
post_handoff(Stream, {PS, Serialize, Channel}, S) ->
?tp(debug, ?FUNCTION_NAME, #{channel => Channel, serialize => Serialize}),
_ = quicer:setopt(Stream, active, 10),
{ok, S#{channel := Channel, serialize := Serialize, parse_state := PS}}.
-spec peer_receive_aborted(stream_handle(), error_code(), cb_state()) -> cb_ret().
peer_receive_aborted(Stream, ErrorCode, #{is_unidir := _} = S) ->
%% we abort send with same reason
_ = quicer:async_shutdown_stream(Stream, ?QUIC_STREAM_SHUTDOWN_FLAG_ABORT, ErrorCode),
{ok, S}.
-spec peer_send_aborted(stream_handle(), error_code(), cb_state()) -> cb_ret().
peer_send_aborted(Stream, ErrorCode, #{is_unidir := _} = S) ->
%% we abort receive with same reason
_ = quicer:async_shutdown_stream(Stream, ?QUIC_STREAM_SHUTDOWN_FLAG_ABORT_RECEIVE, ErrorCode),
{ok, S}.
-spec peer_send_shutdown(stream_handle(), undefined, cb_state()) -> cb_ret().
peer_send_shutdown(Stream, undefined, S) ->
ok = quicer:async_shutdown_stream(Stream, ?QUIC_STREAM_SHUTDOWN_FLAG_GRACEFUL, 0),
{ok, S}.
-spec send_complete(stream_handle(), IsCanceled :: boolean(), cb_state()) -> cb_ret().
send_complete(_Stream, false, S) ->
{ok, S};
send_complete(_Stream, true = _IsCanceled, S) ->
{ok, S}.
-spec send_shutdown_complete(stream_handle(), error_code(), cb_state()) -> cb_ret().
send_shutdown_complete(_Stream, _Flags, S) ->
{ok, S}.
-spec handle_stream_data(stream_handle(), binary(), quicer:recv_data_props(), cb_state()) ->
cb_ret().
handle_stream_data(
_Stream,
Bin,
_Flags,
#{
is_unidir := false,
channel := Channel,
parse_state := PS,
data_queue := QueuedData,
task_queue := TQ
} = State
) when
%% assert get stream data only after channel is created
Channel =/= undefined
->
{MQTTPackets, NewPS} = parse_incoming(list_to_binary(lists:reverse([Bin | QueuedData])), PS),
NewTQ = lists:foldl(
fun(Item, Acc) ->
queue:in(Item, Acc)
end,
TQ,
[{incoming, P} || P <- lists:reverse(MQTTPackets)]
),
{{continue, handle_appl_msg}, State#{parse_state := NewPS, task_queue := NewTQ}}.
-spec passive(stream_handle(), undefined, cb_state()) -> cb_ret().
passive(Stream, undefined, S) ->
_ = quicer:setopt(Stream, active, 10),
{ok, S}.
-spec stream_closed(stream_handle(), quicer:stream_closed_props(), cb_state()) -> cb_ret().
stream_closed(
_Stream,
#{
is_conn_shutdown := IsConnShutdown,
is_app_closing := IsAppClosing,
is_shutdown_by_app := IsAppShutdown,
is_closed_remotely := IsRemote,
status := Status,
error := Code
},
S
) when
is_boolean(IsConnShutdown) andalso
is_boolean(IsAppClosing) andalso
is_boolean(IsAppShutdown) andalso
is_boolean(IsRemote) andalso
is_atom(Status) andalso
is_integer(Code)
->
{stop, normal, S}.
-spec handle_call(Request :: term(), From :: {pid(), term()}, cb_state()) -> cb_ret().
handle_call(Call, _From, S) ->
do_handle_call(Call, S).
-spec handle_continue(Continue :: term(), cb_state()) -> cb_ret().
handle_continue(handle_appl_msg, #{task_queue := Q} = S) ->
case queue:out(Q) of
{{value, Item}, Q2} ->
do_handle_appl_msg(Item, S#{task_queue := Q2});
{empty, _Q} ->
{ok, S}
end.
%%% Internals
do_handle_appl_msg(
{outgoing, Packets},
#{
channel := Channel,
stream := _Stream,
serialize := _Serialize
} = S
) when
Channel =/= undefined
->
case handle_outgoing(Packets, S) of
{ok, Size} ->
ok = emqx_metrics:inc('bytes.sent', Size),
{{continue, handle_appl_msg}, S};
{error, E1, E2} ->
{stop, {E1, E2}, S};
{error, E} ->
{stop, E, S}
end;
do_handle_appl_msg({incoming, #mqtt_packet{} = Packet}, #{channel := Channel} = S) when
Channel =/= undefined
->
ok = inc_incoming_stats(Packet),
with_channel(handle_in, [Packet], S);
do_handle_appl_msg({incoming, {frame_error, _} = FE}, #{channel := Channel} = S) when
Channel =/= undefined
->
with_channel(handle_in, [FE], S);
do_handle_appl_msg({close, Reason}, S) ->
%% @TODO shall we abort shutdown or graceful shutdown here?
with_channel(handle_info, [{sock_closed, Reason}], S);
do_handle_appl_msg({event, updated}, S) ->
%% Data stream don't care about connection state changes.
{{continue, handle_appl_msg}, S}.
handle_info(Deliver = {deliver, _, _}, S) ->
Delivers = [Deliver],
with_channel(handle_deliver, [Delivers], S);
handle_info({timeout, Ref, Msg}, S) ->
with_channel(handle_timeout, [Ref, Msg], S);
handle_info(Info, State) ->
with_channel(handle_info, [Info], State).
with_channel(Fun, Args, #{channel := Channel, task_queue := Q} = S) when
Channel =/= undefined
->
case apply(emqx_channel, Fun, Args ++ [Channel]) of
ok ->
{{continue, handle_appl_msg}, S};
{ok, Msgs, NewChannel} when is_list(Msgs) ->
{{continue, handle_appl_msg}, S#{
task_queue := queue:join(Q, queue:from_list(Msgs)),
channel := NewChannel
}};
{ok, Msg, NewChannel} when is_record(Msg, mqtt_packet) ->
{{continue, handle_appl_msg}, S#{
task_queue := queue:in({outgoing, Msg}, Q), channel := NewChannel
}};
%% @FIXME WTH?
{ok, {outgoing, _} = Msg, NewChannel} ->
{{continue, handle_appl_msg}, S#{task_queue := queue:in(Msg, Q), channel := NewChannel}};
{ok, NewChannel} ->
{{continue, handle_appl_msg}, S#{channel := NewChannel}};
%% @TODO optimisation for shutdown wrap
{shutdown, Reason, NewChannel} ->
{stop, {shutdown, Reason}, S#{channel := NewChannel}};
{shutdown, Reason, Msgs, NewChannel} when is_list(Msgs) ->
%% @TODO handle outgoing?
{stop, {shutdown, Reason}, S#{
channel := NewChannel,
task_queue := queue:join(Q, queue:from_list(Msgs))
}};
{shutdown, Reason, Msg, NewChannel} ->
{stop, {shutdown, Reason}, S#{
channel := NewChannel,
task_queue := queue:in(Msg, Q)
}}
end.
handle_outgoing(#mqtt_packet{} = P, S) ->
handle_outgoing([P], S);
handle_outgoing(Packets, #{serialize := Serialize, stream := Stream, is_unidir := false}) when
is_list(Packets)
->
OutBin = [serialize_packet(P, Serialize) || P <- filter_disallowed_out(Packets)],
%% Send data async but still want send feedback via {quic, send_complete, ...}
Res = quicer:async_send(Stream, OutBin, ?QUICER_SEND_FLAG_SYNC),
?TRACE("MQTT", "mqtt_packet_sent", #{packets => Packets}),
[ok = inc_outgoing_stats(P) || P <- Packets],
Res.
serialize_packet(Packet, Serialize) ->
try emqx_frame:serialize_pkt(Packet, Serialize) of
<<>> ->
?SLOG(warning, #{
msg => "packet_is_discarded",
reason => "frame_is_too_large",
packet => emqx_packet:format(Packet, hidden)
}),
ok = emqx_metrics:inc('delivery.dropped.too_large'),
ok = emqx_metrics:inc('delivery.dropped'),
ok = inc_outgoing_stats({error, message_too_large}),
<<>>;
Data ->
Data
catch
%% Maybe Never happen.
throw:{?FRAME_SERIALIZE_ERROR, Reason} ->
?SLOG(info, #{
reason => Reason,
input_packet => Packet
}),
erlang:error({?FRAME_SERIALIZE_ERROR, Reason});
error:Reason:Stacktrace ->
?SLOG(error, #{
input_packet => Packet,
exception => Reason,
stacktrace => Stacktrace
}),
erlang:error(?FRAME_SERIALIZE_ERROR)
end.
-spec init_state(
quicer:stream_handle(),
quicer:connection_handle(),
quicer:new_stream_props()
) ->
% @TODO
map().
init_state(Stream, Connection, OpenFlags) ->
init_state(Stream, Connection, OpenFlags, undefined).
init_state(Stream, Connection, OpenFlags, PS) ->
%% quic stream handle
#{
stream => Stream,
%% quic connection handle
conn => Connection,
%% if it is QUIC unidi stream
is_unidir => quicer:is_unidirectional(OpenFlags),
%% Frame Parse State
parse_state => PS,
%% Peer Stream handle in a pair for type unidir only
peer_stream => undefined,
%% if the stream is locally initiated.
is_local => false,
%% queue binary data when is NOT connected, in reversed order.
data_queue => [],
%% Channel from connection
%% `undefined' means the connection is not connected.
channel => undefined,
%% serialize opts for connection
serialize => undefined,
%% Current working queue
task_queue => queue:new()
}.
-spec do_handle_call(term(), cb_state()) -> cb_ret().
do_handle_call(
{activate, {PS, Serialize, Channel}},
#{
channel := undefined,
stream := Stream,
serialize := undefined
} = S
) ->
NewS = S#{channel := Channel, serialize := Serialize, parse_state := PS},
%% We use quic protocol for flow control, and we don't check return val
case quicer:setopt(Stream, active, true) of
ok ->
{reply, ok, NewS};
{error, E} ->
?SLOG(error, #{msg => "set stream active failed", error => E}),
{stop, E, NewS}
end;
do_handle_call(_Call, _S) ->
{error, unimpl}.
%% @doc return reserved order of Packets
parse_incoming(Data, PS) ->
try
do_parse_incoming(Data, [], PS)
catch
throw:{?FRAME_PARSE_ERROR, Reason} ->
?SLOG(info, #{
reason => Reason,
input_bytes => Data
}),
{[{frame_error, Reason}], PS};
error:Reason:Stacktrace ->
?SLOG(error, #{
input_bytes => Data,
reason => Reason,
stacktrace => Stacktrace
}),
{[{frame_error, Reason}], PS}
end.
do_parse_incoming(<<>>, Packets, ParseState) ->
{Packets, ParseState};
do_parse_incoming(Data, Packets, ParseState) ->
case emqx_frame:parse(Data, ParseState) of
{more, NParseState} ->
{Packets, NParseState};
{ok, Packet, Rest, NParseState} ->
do_parse_incoming(Rest, [Packet | Packets], NParseState)
end.
%% followings are copied from emqx_connection
-compile({inline, [inc_incoming_stats/1]}).
inc_incoming_stats(Packet = ?PACKET(Type)) ->
inc_counter(recv_pkt, 1),
case Type =:= ?PUBLISH of
true ->
inc_counter(recv_msg, 1),
inc_qos_stats(recv_msg, Packet),
inc_counter(incoming_pubs, 1);
false ->
ok
end,
emqx_metrics:inc_recv(Packet).
-compile({inline, [inc_outgoing_stats/1]}).
inc_outgoing_stats({error, message_too_large}) ->
inc_counter('send_msg.dropped', 1),
inc_counter('send_msg.dropped.too_large', 1);
inc_outgoing_stats(Packet = ?PACKET(Type)) ->
inc_counter(send_pkt, 1),
case Type of
?PUBLISH ->
inc_counter(send_msg, 1),
inc_counter(outgoing_pubs, 1),
inc_qos_stats(send_msg, Packet);
_ ->
ok
end,
emqx_metrics:inc_sent(Packet).
inc_counter(Key, Inc) ->
_ = emqx_pd:inc_counter(Key, Inc),
ok.
inc_qos_stats(Type, Packet) ->
case inc_qos_stats_key(Type, emqx_packet:qos(Packet)) of
undefined ->
ignore;
Key ->
inc_counter(Key, 1)
end.
inc_qos_stats_key(send_msg, ?QOS_0) -> 'send_msg.qos0';
inc_qos_stats_key(send_msg, ?QOS_1) -> 'send_msg.qos1';
inc_qos_stats_key(send_msg, ?QOS_2) -> 'send_msg.qos2';
inc_qos_stats_key(recv_msg, ?QOS_0) -> 'recv_msg.qos0';
inc_qos_stats_key(recv_msg, ?QOS_1) -> 'recv_msg.qos1';
inc_qos_stats_key(recv_msg, ?QOS_2) -> 'recv_msg.qos2';
%% for bad qos
inc_qos_stats_key(_, _) -> undefined.
filter_disallowed_out(Packets) ->
lists:filter(fun is_datastream_out_pkt/1, Packets).
is_datastream_out_pkt(#mqtt_packet{header = #mqtt_packet_header{type = Type}}) when
Type > 2 andalso Type < 12
->
true;
is_datastream_out_pkt(_) ->
false.
%% BUILD_WITHOUT_QUIC
-else.
-endif.

View File

@ -14,9 +14,18 @@
%% limitations under the License. %% limitations under the License.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% MQTT/QUIC Stream %% MQTT over QUIC
%% multistreams: This is the control stream.
%% single stream: This is the only main stream.
%% callbacks are from emqx_connection process rather than quicer_stream
-module(emqx_quic_stream). -module(emqx_quic_stream).
-ifndef(BUILD_WITHOUT_QUIC).
-behaviour(quicer_remote_stream).
-include("logger.hrl").
%% emqx transport Callbacks %% emqx transport Callbacks
-export([ -export([
type/1, type/1,
@ -31,44 +40,84 @@
sockname/1, sockname/1,
peercert/1 peercert/1
]). ]).
-include_lib("quicer/include/quicer.hrl").
-include_lib("emqx/include/emqx_quic.hrl").
wait({ConnOwner, Conn}) -> -type cb_ret() :: quicer_stream:cb_ret().
-type cb_data() :: quicer_stream:cb_state().
-type connection_handle() :: quicer:connection_handle().
-type stream_handle() :: quicer:stream_handle().
-export([
send_complete/3,
peer_send_shutdown/3,
peer_send_aborted/3,
peer_receive_aborted/3,
send_shutdown_complete/3,
stream_closed/3,
passive/3
]).
-export_type([socket/0]).
-opaque socket() :: {quic, connection_handle(), stream_handle(), socket_info()}.
-type socket_info() :: #{
is_orphan => boolean(),
ctrl_stream_start_flags => quicer:stream_open_flags(),
%% and quicer:new_conn_props()
_ => _
}.
%%% For Accepting New Remote Stream
-spec wait({pid(), connection_handle(), socket_info()}) ->
{ok, socket()} | {error, enotconn}.
wait({ConnOwner, Conn, ConnInfo}) ->
{ok, Conn} = quicer:async_accept_stream(Conn, []), {ok, Conn} = quicer:async_accept_stream(Conn, []),
ConnOwner ! {self(), stream_acceptor_ready}, ConnOwner ! {self(), stream_acceptor_ready},
receive receive
%% from msquic %% New incoming stream, this is a *control* stream
{quic, new_stream, Stream} -> {quic, new_stream, Stream, #{is_orphan := IsOrphan, flags := StartFlags}} ->
{ok, {quic, Conn, Stream}}; SocketInfo = ConnInfo#{
is_orphan => IsOrphan,
ctrl_stream_start_flags => StartFlags
},
{ok, socket(Conn, Stream, SocketInfo)};
%% connection closed event for stream acceptor
{quic, closed, undefined, undefined} ->
{error, enotconn};
%% Connection owner process down
{'EXIT', ConnOwner, _Reason} -> {'EXIT', ConnOwner, _Reason} ->
{error, enotconn} {error, enotconn}
end. end.
-spec type(_) -> quic.
type(_) -> type(_) ->
quic. quic.
peername({quic, Conn, _Stream}) -> peername({quic, Conn, _Stream, _Info}) ->
quicer:peername(Conn). quicer:peername(Conn).
sockname({quic, Conn, _Stream}) -> sockname({quic, Conn, _Stream, _Info}) ->
quicer:sockname(Conn). quicer:sockname(Conn).
peercert(_S) -> peercert(_S) ->
%% @todo but unsupported by msquic %% @todo but unsupported by msquic
nossl. nossl.
getstat({quic, Conn, _Stream}, Stats) -> getstat({quic, Conn, _Stream, _Info}, Stats) ->
case quicer:getstat(Conn, Stats) of case quicer:getstat(Conn, Stats) of
{error, _} -> {error, closed}; {error, _} -> {error, closed};
Res -> Res Res -> Res
end. end.
setopts(Socket, Opts) -> setopts({quic, _Conn, Stream, _Info}, Opts) ->
lists:foreach( lists:foreach(
fun fun
({Opt, V}) when is_atom(Opt) -> ({Opt, V}) when is_atom(Opt) ->
quicer:setopt(Socket, Opt, V); quicer:setopt(Stream, Opt, V);
(Opt) when is_atom(Opt) -> (Opt) when is_atom(Opt) ->
quicer:setopt(Socket, Opt, true) quicer:setopt(Stream, Opt, true)
end, end,
Opts Opts
), ),
@ -84,9 +133,18 @@ getopts(_Socket, _Opts) ->
{buffer, 80000} {buffer, 80000}
]}. ]}.
fast_close({quic, _Conn, Stream}) -> %% @TODO supply some App Error Code from caller
%% Flush send buffer, gracefully shutdown fast_close({ConnOwner, Conn, _ConnInfo}) when is_pid(ConnOwner) ->
quicer:async_shutdown_stream(Stream), %% handshake aborted.
_ = quicer:async_shutdown_connection(Conn, ?QUIC_CONNECTION_SHUTDOWN_FLAG_NONE, 0),
ok;
fast_close({quic, _Conn, Stream, _Info}) ->
%% Force flush
_ = quicer:async_shutdown_stream(Stream),
%% @FIXME Since we shutdown the control stream, we shutdown the connection as well
%% *BUT* Msquic does not flush the send buffer if we shutdown the connection after
%% gracefully shutdown the stream.
% quicer:async_shutdown_connection(Conn, ?QUIC_CONNECTION_SHUTDOWN_FLAG_NONE, 0),
ok. ok.
-spec ensure_ok_or_exit(atom(), list(term())) -> term(). -spec ensure_ok_or_exit(atom(), list(term())) -> term().
@ -102,8 +160,92 @@ ensure_ok_or_exit(Fun, Args = [Sock | _]) when is_atom(Fun), is_list(Args) ->
Result Result
end. end.
async_send({quic, _Conn, Stream}, Data, _Options) -> async_send({quic, _Conn, Stream, _Info}, Data, _Options) ->
case quicer:send(Stream, Data) of case quicer:async_send(Stream, Data, ?QUICER_SEND_FLAG_SYNC) of
{ok, _Len} -> ok; {ok, _Len} -> ok;
{error, X, Y} -> {error, {X, Y}};
Other -> Other Other -> Other
end. end.
%%%
%%% quicer stream callbacks
%%%
-spec peer_receive_aborted(stream_handle(), non_neg_integer(), cb_data()) -> cb_ret().
peer_receive_aborted(Stream, ErrorCode, S) ->
_ = quicer:async_shutdown_stream(Stream, ?QUIC_STREAM_SHUTDOWN_FLAG_ABORT, ErrorCode),
{ok, S}.
-spec peer_send_aborted(stream_handle(), non_neg_integer(), cb_data()) -> cb_ret().
peer_send_aborted(Stream, ErrorCode, S) ->
%% we abort receive with same reason
_ = quicer:async_shutdown_stream(Stream, ?QUIC_STREAM_SHUTDOWN_FLAG_ABORT, ErrorCode),
{ok, S}.
-spec peer_send_shutdown(stream_handle(), undefined, cb_data()) -> cb_ret().
peer_send_shutdown(Stream, undefined, S) ->
ok = quicer:async_shutdown_stream(Stream, ?QUIC_STREAM_SHUTDOWN_FLAG_GRACEFUL, 0),
{ok, S}.
-spec send_complete(stream_handle(), boolean(), cb_data()) -> cb_ret().
send_complete(_Stream, false, S) ->
{ok, S};
send_complete(_Stream, true = _IsCancelled, S) ->
?SLOG(error, #{message => "send cancelled"}),
{ok, S}.
-spec send_shutdown_complete(stream_handle(), boolean(), cb_data()) -> cb_ret().
send_shutdown_complete(_Stream, _IsGraceful, S) ->
{ok, S}.
-spec passive(stream_handle(), undefined, cb_data()) -> cb_ret().
passive(Stream, undefined, S) ->
case quicer:setopt(Stream, active, 10) of
ok -> ok;
Error -> ?SLOG(error, #{message => "set active error", error => Error})
end,
{ok, S}.
-spec stream_closed(stream_handle(), quicer:stream_closed_props(), cb_data()) ->
{{continue, term()}, cb_data()}.
stream_closed(
_Stream,
#{
is_conn_shutdown := IsConnShutdown,
is_app_closing := IsAppClosing,
is_shutdown_by_app := IsAppShutdown,
is_closed_remotely := IsRemote,
status := Status,
error := Code
},
S
) when
is_boolean(IsConnShutdown) andalso
is_boolean(IsAppClosing) andalso
is_boolean(IsAppShutdown) andalso
is_boolean(IsRemote) andalso
is_atom(Status) andalso
is_integer(Code)
->
%% For now we fake a sock_closed for
%% emqx_connection:process_msg to append
%% a msg to be processed
Reason =
case Code of
?MQTT_QUIC_CONN_NOERROR ->
normal;
_ ->
Status
end,
{{continue, {sock_closed, Reason}}, S}.
%%%
%%% Internals
%%%
-spec socket(connection_handle(), stream_handle(), socket_info()) -> socket().
socket(Conn, CtrlStream, Info) when is_map(Info) ->
{quic, Conn, CtrlStream, Info}.
%% BUILD_WITHOUT_QUIC
-else.
-endif.

View File

@ -268,7 +268,7 @@ fields("persistent_session_store") ->
sc( sc(
duration(), duration(),
#{ #{
default => "1h", default => <<"1h">>,
desc => ?DESC(persistent_session_store_max_retain_undelivered) desc => ?DESC(persistent_session_store_max_retain_undelivered)
} }
)}, )},
@ -276,7 +276,7 @@ fields("persistent_session_store") ->
sc( sc(
duration(), duration(),
#{ #{
default => "1h", default => <<"1h">>,
desc => ?DESC(persistent_session_store_message_gc_interval) desc => ?DESC(persistent_session_store_message_gc_interval)
} }
)}, )},
@ -284,7 +284,7 @@ fields("persistent_session_store") ->
sc( sc(
duration(), duration(),
#{ #{
default => "1m", default => <<"1m">>,
desc => ?DESC(persistent_session_store_session_message_gc_interval) desc => ?DESC(persistent_session_store_session_message_gc_interval)
} }
)} )}
@ -352,7 +352,7 @@ fields("authz_cache") ->
sc( sc(
duration(), duration(),
#{ #{
default => "1m", default => <<"1m">>,
desc => ?DESC(fields_cache_ttl) desc => ?DESC(fields_cache_ttl)
} }
)} )}
@ -363,7 +363,7 @@ fields("mqtt") ->
sc( sc(
hoconsc:union([infinity, duration()]), hoconsc:union([infinity, duration()]),
#{ #{
default => "15s", default => <<"15s">>,
desc => ?DESC(mqtt_idle_timeout) desc => ?DESC(mqtt_idle_timeout)
} }
)}, )},
@ -371,7 +371,7 @@ fields("mqtt") ->
sc( sc(
bytesize(), bytesize(),
#{ #{
default => "1MB", default => <<"1MB">>,
desc => ?DESC(mqtt_max_packet_size) desc => ?DESC(mqtt_max_packet_size)
} }
)}, )},
@ -507,7 +507,7 @@ fields("mqtt") ->
sc( sc(
duration(), duration(),
#{ #{
default => "30s", default => <<"30s">>,
desc => ?DESC(mqtt_retry_interval) desc => ?DESC(mqtt_retry_interval)
} }
)}, )},
@ -523,7 +523,7 @@ fields("mqtt") ->
sc( sc(
duration(), duration(),
#{ #{
default => "300s", default => <<"300s">>,
desc => ?DESC(mqtt_await_rel_timeout) desc => ?DESC(mqtt_await_rel_timeout)
} }
)}, )},
@ -531,7 +531,7 @@ fields("mqtt") ->
sc( sc(
duration(), duration(),
#{ #{
default => "2h", default => <<"2h">>,
desc => ?DESC(mqtt_session_expiry_interval) desc => ?DESC(mqtt_session_expiry_interval)
} }
)}, )},
@ -617,7 +617,7 @@ fields("flapping_detect") ->
sc( sc(
duration(), duration(),
#{ #{
default => "1m", default => <<"1m">>,
desc => ?DESC(flapping_detect_window_time) desc => ?DESC(flapping_detect_window_time)
} }
)}, )},
@ -625,7 +625,7 @@ fields("flapping_detect") ->
sc( sc(
duration(), duration(),
#{ #{
default => "5m", default => <<"5m">>,
desc => ?DESC(flapping_detect_ban_time) desc => ?DESC(flapping_detect_ban_time)
} }
)} )}
@ -652,7 +652,7 @@ fields("force_shutdown") ->
sc( sc(
wordsize(), wordsize(),
#{ #{
default => "32MB", default => <<"32MB">>,
desc => ?DESC(force_shutdown_max_heap_size), desc => ?DESC(force_shutdown_max_heap_size),
validator => fun ?MODULE:validate_heap_size/1 validator => fun ?MODULE:validate_heap_size/1
} }
@ -715,7 +715,7 @@ fields("conn_congestion") ->
sc( sc(
duration(), duration(),
#{ #{
default => "1m", default => <<"1m">>,
desc => ?DESC(conn_congestion_min_alarm_sustain_duration) desc => ?DESC(conn_congestion_min_alarm_sustain_duration)
} }
)} )}
@ -739,7 +739,7 @@ fields("force_gc") ->
sc( sc(
bytesize(), bytesize(),
#{ #{
default => "16MB", default => <<"16MB">>,
desc => ?DESC(force_gc_bytes) desc => ?DESC(force_gc_bytes)
} }
)} )}
@ -845,16 +845,21 @@ fields("mqtt_wss_listener") ->
]; ];
fields("mqtt_quic_listener") -> fields("mqtt_quic_listener") ->
[ [
%% TODO: ensure cacertfile is configurable
{"certfile", {"certfile",
sc( sc(
string(), string(),
#{desc => ?DESC(fields_mqtt_quic_listener_certfile)} #{
%% TODO: deprecated => {since, "5.1.0"}
desc => ?DESC(fields_mqtt_quic_listener_certfile)
}
)}, )},
{"keyfile", {"keyfile",
sc( sc(
string(), string(),
#{desc => ?DESC(fields_mqtt_quic_listener_keyfile)} %% TODO: deprecated => {since, "5.1.0"}
#{
desc => ?DESC(fields_mqtt_quic_listener_keyfile)
}
)}, )},
{"ciphers", ciphers_schema(quic)}, {"ciphers", ciphers_schema(quic)},
{"idle_timeout", {"idle_timeout",
@ -869,7 +874,7 @@ fields("mqtt_quic_listener") ->
sc( sc(
duration_ms(), duration_ms(),
#{ #{
default => "10s", default => <<"10s">>,
desc => ?DESC(fields_mqtt_quic_listener_handshake_idle_timeout) desc => ?DESC(fields_mqtt_quic_listener_handshake_idle_timeout)
} }
)}, )},
@ -880,6 +885,14 @@ fields("mqtt_quic_listener") ->
default => 0, default => 0,
desc => ?DESC(fields_mqtt_quic_listener_keep_alive_interval) desc => ?DESC(fields_mqtt_quic_listener_keep_alive_interval)
} }
)},
{"ssl_options",
sc(
ref("listener_quic_ssl_opts"),
#{
required => false,
desc => ?DESC(fields_mqtt_quic_listener_ssl_options)
}
)} )}
] ++ base_listener(14567); ] ++ base_listener(14567);
fields("ws_opts") -> fields("ws_opts") ->
@ -888,7 +901,7 @@ fields("ws_opts") ->
sc( sc(
string(), string(),
#{ #{
default => "/mqtt", default => <<"/mqtt">>,
desc => ?DESC(fields_ws_opts_mqtt_path) desc => ?DESC(fields_ws_opts_mqtt_path)
} }
)}, )},
@ -912,7 +925,7 @@ fields("ws_opts") ->
sc( sc(
duration(), duration(),
#{ #{
default => "7200s", default => <<"7200s">>,
desc => ?DESC(fields_ws_opts_idle_timeout) desc => ?DESC(fields_ws_opts_idle_timeout)
} }
)}, )},
@ -936,7 +949,7 @@ fields("ws_opts") ->
sc( sc(
comma_separated_list(), comma_separated_list(),
#{ #{
default => "mqtt, mqtt-v3, mqtt-v3.1.1, mqtt-v5", default => <<"mqtt, mqtt-v3, mqtt-v3.1.1, mqtt-v5">>,
desc => ?DESC(fields_ws_opts_supported_subprotocols) desc => ?DESC(fields_ws_opts_supported_subprotocols)
} }
)}, )},
@ -968,7 +981,7 @@ fields("ws_opts") ->
sc( sc(
string(), string(),
#{ #{
default => "x-forwarded-for", default => <<"x-forwarded-for">>,
desc => ?DESC(fields_ws_opts_proxy_address_header) desc => ?DESC(fields_ws_opts_proxy_address_header)
} }
)}, )},
@ -976,7 +989,7 @@ fields("ws_opts") ->
sc( sc(
string(), string(),
#{ #{
default => "x-forwarded-port", default => <<"x-forwarded-port">>,
desc => ?DESC(fields_ws_opts_proxy_port_header) desc => ?DESC(fields_ws_opts_proxy_port_header)
} }
)}, )},
@ -1008,7 +1021,7 @@ fields("tcp_opts") ->
sc( sc(
duration(), duration(),
#{ #{
default => "15s", default => <<"15s">>,
desc => ?DESC(fields_tcp_opts_send_timeout) desc => ?DESC(fields_tcp_opts_send_timeout)
} }
)}, )},
@ -1049,7 +1062,7 @@ fields("tcp_opts") ->
sc( sc(
bytesize(), bytesize(),
#{ #{
default => "1MB", default => <<"1MB">>,
desc => ?DESC(fields_tcp_opts_high_watermark) desc => ?DESC(fields_tcp_opts_high_watermark)
} }
)}, )},
@ -1090,6 +1103,8 @@ fields("listener_wss_opts") ->
}, },
true true
); );
fields("listener_quic_ssl_opts") ->
server_ssl_opts_schema(#{}, false);
fields("ssl_client_opts") -> fields("ssl_client_opts") ->
client_ssl_opts_schema(#{}); client_ssl_opts_schema(#{});
fields("deflate_opts") -> fields("deflate_opts") ->
@ -1260,7 +1275,7 @@ fields("sys_topics") ->
sc( sc(
hoconsc:union([disabled, duration()]), hoconsc:union([disabled, duration()]),
#{ #{
default => "1m", default => <<"1m">>,
desc => ?DESC(sys_msg_interval) desc => ?DESC(sys_msg_interval)
} }
)}, )},
@ -1268,7 +1283,7 @@ fields("sys_topics") ->
sc( sc(
hoconsc:union([disabled, duration()]), hoconsc:union([disabled, duration()]),
#{ #{
default => "30s", default => <<"30s">>,
desc => ?DESC(sys_heartbeat_interval) desc => ?DESC(sys_heartbeat_interval)
} }
)}, )},
@ -1337,7 +1352,7 @@ fields("sysmon_vm") ->
sc( sc(
duration(), duration(),
#{ #{
default => "30s", default => <<"30s">>,
desc => ?DESC(sysmon_vm_process_check_interval) desc => ?DESC(sysmon_vm_process_check_interval)
} }
)}, )},
@ -1345,7 +1360,7 @@ fields("sysmon_vm") ->
sc( sc(
percent(), percent(),
#{ #{
default => "80%", default => <<"80%">>,
desc => ?DESC(sysmon_vm_process_high_watermark) desc => ?DESC(sysmon_vm_process_high_watermark)
} }
)}, )},
@ -1353,7 +1368,7 @@ fields("sysmon_vm") ->
sc( sc(
percent(), percent(),
#{ #{
default => "60%", default => <<"60%">>,
desc => ?DESC(sysmon_vm_process_low_watermark) desc => ?DESC(sysmon_vm_process_low_watermark)
} }
)}, )},
@ -1369,7 +1384,7 @@ fields("sysmon_vm") ->
sc( sc(
hoconsc:union([disabled, duration()]), hoconsc:union([disabled, duration()]),
#{ #{
default => "240ms", default => <<"240ms">>,
desc => ?DESC(sysmon_vm_long_schedule) desc => ?DESC(sysmon_vm_long_schedule)
} }
)}, )},
@ -1377,7 +1392,7 @@ fields("sysmon_vm") ->
sc( sc(
hoconsc:union([disabled, bytesize()]), hoconsc:union([disabled, bytesize()]),
#{ #{
default => "32MB", default => <<"32MB">>,
desc => ?DESC(sysmon_vm_large_heap) desc => ?DESC(sysmon_vm_large_heap)
} }
)}, )},
@ -1404,7 +1419,7 @@ fields("sysmon_os") ->
sc( sc(
duration(), duration(),
#{ #{
default => "60s", default => <<"60s">>,
desc => ?DESC(sysmon_os_cpu_check_interval) desc => ?DESC(sysmon_os_cpu_check_interval)
} }
)}, )},
@ -1412,7 +1427,7 @@ fields("sysmon_os") ->
sc( sc(
percent(), percent(),
#{ #{
default => "80%", default => <<"80%">>,
desc => ?DESC(sysmon_os_cpu_high_watermark) desc => ?DESC(sysmon_os_cpu_high_watermark)
} }
)}, )},
@ -1420,7 +1435,7 @@ fields("sysmon_os") ->
sc( sc(
percent(), percent(),
#{ #{
default => "60%", default => <<"60%">>,
desc => ?DESC(sysmon_os_cpu_low_watermark) desc => ?DESC(sysmon_os_cpu_low_watermark)
} }
)}, )},
@ -1428,7 +1443,7 @@ fields("sysmon_os") ->
sc( sc(
hoconsc:union([disabled, duration()]), hoconsc:union([disabled, duration()]),
#{ #{
default => "60s", default => <<"60s">>,
desc => ?DESC(sysmon_os_mem_check_interval) desc => ?DESC(sysmon_os_mem_check_interval)
} }
)}, )},
@ -1436,7 +1451,7 @@ fields("sysmon_os") ->
sc( sc(
percent(), percent(),
#{ #{
default => "70%", default => <<"70%">>,
desc => ?DESC(sysmon_os_sysmem_high_watermark) desc => ?DESC(sysmon_os_sysmem_high_watermark)
} }
)}, )},
@ -1444,7 +1459,7 @@ fields("sysmon_os") ->
sc( sc(
percent(), percent(),
#{ #{
default => "5%", default => <<"5%">>,
desc => ?DESC(sysmon_os_procmem_high_watermark) desc => ?DESC(sysmon_os_procmem_high_watermark)
} }
)} )}
@ -1465,7 +1480,7 @@ fields("sysmon_top") ->
emqx_schema:duration(), emqx_schema:duration(),
#{ #{
mapping => "system_monitor.top_sample_interval", mapping => "system_monitor.top_sample_interval",
default => "2s", default => <<"2s">>,
desc => ?DESC(sysmon_top_sample_interval) desc => ?DESC(sysmon_top_sample_interval)
} }
)}, )},
@ -1484,7 +1499,7 @@ fields("sysmon_top") ->
#{ #{
mapping => "system_monitor.db_hostname", mapping => "system_monitor.db_hostname",
desc => ?DESC(sysmon_top_db_hostname), desc => ?DESC(sysmon_top_db_hostname),
default => "" default => <<>>
} }
)}, )},
{"db_port", {"db_port",
@ -1501,7 +1516,7 @@ fields("sysmon_top") ->
string(), string(),
#{ #{
mapping => "system_monitor.db_username", mapping => "system_monitor.db_username",
default => "system_monitor", default => <<"system_monitor">>,
desc => ?DESC(sysmon_top_db_username) desc => ?DESC(sysmon_top_db_username)
} }
)}, )},
@ -1510,7 +1525,7 @@ fields("sysmon_top") ->
binary(), binary(),
#{ #{
mapping => "system_monitor.db_password", mapping => "system_monitor.db_password",
default => "system_monitor_password", default => <<"system_monitor_password">>,
desc => ?DESC(sysmon_top_db_password), desc => ?DESC(sysmon_top_db_password),
converter => fun password_converter/2, converter => fun password_converter/2,
sensitive => true sensitive => true
@ -1521,7 +1536,7 @@ fields("sysmon_top") ->
string(), string(),
#{ #{
mapping => "system_monitor.db_name", mapping => "system_monitor.db_name",
default => "postgres", default => <<"postgres">>,
desc => ?DESC(sysmon_top_db_name) desc => ?DESC(sysmon_top_db_name)
} }
)} )}
@ -1551,7 +1566,7 @@ fields("alarm") ->
sc( sc(
duration(), duration(),
#{ #{
default => "24h", default => <<"24h">>,
example => "24h", example => "24h",
desc => ?DESC(alarm_validity_period) desc => ?DESC(alarm_validity_period)
} }
@ -1590,7 +1605,7 @@ mqtt_listener(Bind) ->
duration(), duration(),
#{ #{
desc => ?DESC(mqtt_listener_proxy_protocol_timeout), desc => ?DESC(mqtt_listener_proxy_protocol_timeout),
default => "3s" default => <<"3s">>
} }
)}, )},
{?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME, authentication(listener)} {?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME, authentication(listener)}
@ -1769,6 +1784,12 @@ desc("listener_ssl_opts") ->
"Socket options for SSL connections."; "Socket options for SSL connections.";
desc("listener_wss_opts") -> desc("listener_wss_opts") ->
"Socket options for WebSocket/SSL connections."; "Socket options for WebSocket/SSL connections.";
desc("fields_mqtt_quic_listener_certfile") ->
"Path to the certificate file. Will be deprecated in 5.1, use '.ssl_options.certfile' instead.";
desc("fields_mqtt_quic_listener_keyfile") ->
"Path to the secret key file. Will be deprecated in 5.1, use '.ssl_options.keyfile' instead.";
desc("listener_quic_ssl_opts") ->
"TLS options for QUIC transport.";
desc("ssl_client_opts") -> desc("ssl_client_opts") ->
"Socket options for SSL clients."; "Socket options for SSL clients.";
desc("deflate_opts") -> desc("deflate_opts") ->
@ -1929,6 +1950,15 @@ common_ssl_opts_schema(Defaults) ->
default => Df("secure_renegotiate", true), default => Df("secure_renegotiate", true),
desc => ?DESC(common_ssl_opts_schema_secure_renegotiate) desc => ?DESC(common_ssl_opts_schema_secure_renegotiate)
} }
)},
{"hibernate_after",
sc(
duration(),
#{
default => Df("hibernate_after", <<"5s">>),
desc => ?DESC(common_ssl_opts_schema_hibernate_after)
}
)} )}
]. ].
@ -1976,7 +2006,7 @@ server_ssl_opts_schema(Defaults, IsRanchListener) ->
sc( sc(
duration(), duration(),
#{ #{
default => Df("handshake_timeout", "15s"), default => Df("handshake_timeout", <<"15s">>),
desc => ?DESC(server_ssl_opts_schema_handshake_timeout) desc => ?DESC(server_ssl_opts_schema_handshake_timeout)
} }
)} )}

View File

@ -24,7 +24,6 @@
get_system_info/1, get_system_info/1,
get_memory/0, get_memory/0,
get_memory/2, get_memory/2,
mem_info/0,
loads/0 loads/0
]). ]).
@ -226,12 +225,6 @@ convert_allocated_areas({Key, Value1, Value2}) ->
convert_allocated_areas({Key, Value}) -> convert_allocated_areas({Key, Value}) ->
{Key, Value}. {Key, Value}.
mem_info() ->
Dataset = memsup:get_system_memory_data(),
Total = proplists:get_value(total_memory, Dataset),
Free = proplists:get_value(free_memory, Dataset),
[{total_memory, Total}, {used_memory, Total - Free}].
%%%% erlang vm scheduler_usage fun copied from recon %%%% erlang vm scheduler_usage fun copied from recon
scheduler_usage(Interval) when is_integer(Interval) -> scheduler_usage(Interval) when is_integer(Interval) ->
%% We start and stop the scheduler_wall_time system flag %% We start and stop the scheduler_wall_time system flag

View File

@ -22,6 +22,8 @@
-export([ -export([
all/1, all/1,
init_per_testcase/3,
end_per_testcase/3,
boot_modules/1, boot_modules/1,
start_apps/1, start_apps/1,
start_apps/2, start_apps/2,
@ -150,6 +152,19 @@ all(Suite) ->
string:substr(atom_to_list(F), 1, 2) == "t_" string:substr(atom_to_list(F), 1, 2) == "t_"
]). ]).
init_per_testcase(Module, TestCase, Config) ->
case erlang:function_exported(Module, TestCase, 2) of
true -> Module:TestCase(init, Config);
false -> Config
end.
end_per_testcase(Module, TestCase, Config) ->
case erlang:function_exported(Module, TestCase, 2) of
true -> Module:TestCase('end', Config);
false -> ok
end,
Config.
%% set emqx app boot modules %% set emqx app boot modules
-spec boot_modules(all | list(atom())) -> ok. -spec boot_modules(all | list(atom())) -> ok.
boot_modules(Mods) -> boot_modules(Mods) ->
@ -499,8 +514,8 @@ ensure_quic_listener(Name, UdpPort) ->
application:ensure_all_started(quicer), application:ensure_all_started(quicer),
Conf = #{ Conf = #{
acceptors => 16, acceptors => 16,
bind => {{0, 0, 0, 0}, UdpPort}, bind => UdpPort,
certfile => filename:join(code:lib_dir(emqx), "etc/certs/cert.pem"),
ciphers => ciphers =>
[ [
"TLS_AES_256_GCM_SHA384", "TLS_AES_256_GCM_SHA384",
@ -509,7 +524,10 @@ ensure_quic_listener(Name, UdpPort) ->
], ],
enabled => true, enabled => true,
idle_timeout => 15000, idle_timeout => 15000,
keyfile => filename:join(code:lib_dir(emqx), "etc/certs/key.pem"), ssl_options => #{
certfile => filename:join(code:lib_dir(emqx), "etc/certs/cert.pem"),
keyfile => filename:join(code:lib_dir(emqx), "etc/certs/key.pem")
},
limiter => #{}, limiter => #{},
max_connections => 1024000, max_connections => 1024000,
mountpoint => <<>>, mountpoint => <<>>,

View File

@ -138,6 +138,41 @@ t_restart_listeners(_) ->
ok = emqx_listeners:restart(), ok = emqx_listeners:restart(),
ok = emqx_listeners:stop(). ok = emqx_listeners:stop().
t_restart_listeners_with_hibernate_after_disabled(_Config) ->
OldLConf = emqx_config:get([listeners]),
maps:foreach(
fun(LType, Listeners) ->
maps:foreach(
fun(Name, Opts) ->
case maps:is_key(ssl_options, Opts) of
true ->
emqx_config:put(
[
listeners,
LType,
Name,
ssl_options,
hibernate_after
],
undefined
);
_ ->
skip
end
end,
Listeners
)
end,
OldLConf
),
ok = emqx_listeners:start(),
ok = emqx_listeners:stop(),
%% flakyness: eaddrinuse
timer:sleep(timer:seconds(2)),
ok = emqx_listeners:restart(),
ok = emqx_listeners:stop(),
emqx_config:put([listeners], OldLConf).
t_max_conns_tcp(_) -> t_max_conns_tcp(_) ->
%% Note: Using a string representation for the bind address like %% Note: Using a string representation for the bind address like
%% "127.0.0.1" does not work %% "127.0.0.1" does not work

View File

@ -905,7 +905,7 @@ t_shared_subscriptions_client_terminates_when_qos_eq_2(Config) ->
emqtt, emqtt,
connected, connected,
fun fun
(cast, ?PUBLISH_PACKET(?QOS_2, _PacketId), _State) -> (cast, {?PUBLISH_PACKET(?QOS_2, _PacketId), _Via}, _State) ->
ok = counters:add(CRef, 1, 1), ok = counters:add(CRef, 1, 1),
{stop, {shutdown, for_testing}}; {stop, {shutdown, for_testing}};
(Arg1, ARg2, Arg3) -> (Arg1, ARg2, Arg3) ->

File diff suppressed because it is too large Load Diff

View File

@ -50,12 +50,6 @@ t_systeminfo(_Config) ->
), ),
?assertEqual(undefined, emqx_vm:get_system_info(undefined)). ?assertEqual(undefined, emqx_vm:get_system_info(undefined)).
t_mem_info(_Config) ->
application:ensure_all_started(os_mon),
MemInfo = emqx_vm:mem_info(),
[{total_memory, _}, {used_memory, _}] = MemInfo,
application:stop(os_mon).
t_process_info(_Config) -> t_process_info(_Config) ->
ProcessInfo = emqx_vm:get_process_info(), ProcessInfo = emqx_vm:get_process_info(),
?assertEqual(emqx_vm:process_info_keys(), [K || {K, _V} <- ProcessInfo]). ?assertEqual(emqx_vm:process_info_keys(), [K || {K, _V} <- ProcessInfo]).

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, emqx_authn, [ {application, emqx_authn, [
{description, "EMQX Authentication"}, {description, "EMQX Authentication"},
{vsn, "0.1.13"}, {vsn, "0.1.14"},
{modules, []}, {modules, []},
{registered, [emqx_authn_sup, emqx_authn_registry]}, {registered, [emqx_authn_sup, emqx_authn_registry]},
{applications, [kernel, stdlib, emqx_resource, emqx_connector, ehttpc, epgsql, mysql, jose]}, {applications, [kernel, stdlib, emqx_resource, emqx_connector, ehttpc, epgsql, mysql, jose]},

View File

@ -74,7 +74,7 @@ query(_) -> undefined.
query_timeout(type) -> emqx_schema:duration_ms(); query_timeout(type) -> emqx_schema:duration_ms();
query_timeout(desc) -> ?DESC(?FUNCTION_NAME); query_timeout(desc) -> ?DESC(?FUNCTION_NAME);
query_timeout(default) -> "5s"; query_timeout(default) -> <<"5s">>;
query_timeout(_) -> undefined. query_timeout(_) -> undefined.
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, emqx_authz, [ {application, emqx_authz, [
{description, "An OTP application"}, {description, "An OTP application"},
{vsn, "0.1.13"}, {vsn, "0.1.14"},
{registered, []}, {registered, []},
{mod, {emqx_authz_app, []}}, {mod, {emqx_authz_app, []}},
{applications, [ {applications, [

View File

@ -108,7 +108,7 @@ authz_http_common_fields() ->
})}, })},
{request_timeout, {request_timeout,
mk_duration("Request timeout", #{ mk_duration("Request timeout", #{
required => false, default => "30s", desc => ?DESC(request_timeout) required => false, default => <<"30s">>, desc => ?DESC(request_timeout)
})} })}
] ++ ] ++
maps:to_list( maps:to_list(

View File

@ -223,7 +223,7 @@ http_common_fields() ->
{url, fun url/1}, {url, fun url/1},
{request_timeout, {request_timeout,
mk_duration("Request timeout", #{ mk_duration("Request timeout", #{
required => false, default => "30s", desc => ?DESC(request_timeout) required => false, default => <<"30s">>, desc => ?DESC(request_timeout)
})}, })},
{body, ?HOCON(map(), #{required => false, desc => ?DESC(body)})} {body, ?HOCON(map(), #{required => false, desc => ?DESC(body)})}
] ++ ] ++

View File

@ -1,9 +1,9 @@
{application, emqx_conf, [ {application, emqx_conf, [
{description, "EMQX configuration management"}, {description, "EMQX configuration management"},
{vsn, "0.1.12"}, {vsn, "0.1.13"},
{registered, []}, {registered, []},
{mod, {emqx_conf_app, []}}, {mod, {emqx_conf_app, []}},
{applications, [kernel, stdlib]}, {applications, [kernel, stdlib, emqx_ctl]},
{env, []}, {env, []},
{modules, []} {modules, []}
]}. ]}.

View File

@ -145,7 +145,7 @@ fields("cluster") ->
emqx_schema:duration(), emqx_schema:duration(),
#{ #{
mapping => "ekka.cluster_autoclean", mapping => "ekka.cluster_autoclean",
default => "5m", default => <<"5m">>,
desc => ?DESC(cluster_autoclean), desc => ?DESC(cluster_autoclean),
'readOnly' => true 'readOnly' => true
} }
@ -214,7 +214,7 @@ fields(cluster_mcast) ->
sc( sc(
string(), string(),
#{ #{
default => "239.192.0.1", default => <<"239.192.0.1">>,
desc => ?DESC(cluster_mcast_addr), desc => ?DESC(cluster_mcast_addr),
'readOnly' => true 'readOnly' => true
} }
@ -232,7 +232,7 @@ fields(cluster_mcast) ->
sc( sc(
string(), string(),
#{ #{
default => "0.0.0.0", default => <<"0.0.0.0">>,
desc => ?DESC(cluster_mcast_iface), desc => ?DESC(cluster_mcast_iface),
'readOnly' => true 'readOnly' => true
} }
@ -259,7 +259,7 @@ fields(cluster_mcast) ->
sc( sc(
emqx_schema:bytesize(), emqx_schema:bytesize(),
#{ #{
default => "16KB", default => <<"16KB">>,
desc => ?DESC(cluster_mcast_sndbuf), desc => ?DESC(cluster_mcast_sndbuf),
'readOnly' => true 'readOnly' => true
} }
@ -268,7 +268,7 @@ fields(cluster_mcast) ->
sc( sc(
emqx_schema:bytesize(), emqx_schema:bytesize(),
#{ #{
default => "16KB", default => <<"16KB">>,
desc => ?DESC(cluster_mcast_recbuf), desc => ?DESC(cluster_mcast_recbuf),
'readOnly' => true 'readOnly' => true
} }
@ -277,7 +277,7 @@ fields(cluster_mcast) ->
sc( sc(
emqx_schema:bytesize(), emqx_schema:bytesize(),
#{ #{
default => "32KB", default => <<"32KB">>,
desc => ?DESC(cluster_mcast_buffer), desc => ?DESC(cluster_mcast_buffer),
'readOnly' => true 'readOnly' => true
} }
@ -289,7 +289,7 @@ fields(cluster_dns) ->
sc( sc(
string(), string(),
#{ #{
default => "localhost", default => <<"localhost">>,
desc => ?DESC(cluster_dns_name), desc => ?DESC(cluster_dns_name),
'readOnly' => true 'readOnly' => true
} }
@ -318,7 +318,7 @@ fields(cluster_etcd) ->
sc( sc(
string(), string(),
#{ #{
default => "emqxcl", default => <<"emqxcl">>,
desc => ?DESC(cluster_etcd_prefix), desc => ?DESC(cluster_etcd_prefix),
'readOnly' => true 'readOnly' => true
} }
@ -327,7 +327,7 @@ fields(cluster_etcd) ->
sc( sc(
emqx_schema:duration(), emqx_schema:duration(),
#{ #{
default => "1m", default => <<"1m">>,
'readOnly' => true, 'readOnly' => true,
desc => ?DESC(cluster_etcd_node_ttl) desc => ?DESC(cluster_etcd_node_ttl)
} }
@ -347,7 +347,7 @@ fields(cluster_k8s) ->
sc( sc(
string(), string(),
#{ #{
default => "http://10.110.111.204:8080", default => <<"http://10.110.111.204:8080">>,
desc => ?DESC(cluster_k8s_apiserver), desc => ?DESC(cluster_k8s_apiserver),
'readOnly' => true 'readOnly' => true
} }
@ -356,7 +356,7 @@ fields(cluster_k8s) ->
sc( sc(
string(), string(),
#{ #{
default => "emqx", default => <<"emqx">>,
desc => ?DESC(cluster_k8s_service_name), desc => ?DESC(cluster_k8s_service_name),
'readOnly' => true 'readOnly' => true
} }
@ -374,7 +374,7 @@ fields(cluster_k8s) ->
sc( sc(
string(), string(),
#{ #{
default => "default", default => <<"default">>,
desc => ?DESC(cluster_k8s_namespace), desc => ?DESC(cluster_k8s_namespace),
'readOnly' => true 'readOnly' => true
} }
@ -383,7 +383,7 @@ fields(cluster_k8s) ->
sc( sc(
string(), string(),
#{ #{
default => "pod.local", default => <<"pod.local">>,
'readOnly' => true, 'readOnly' => true,
desc => ?DESC(cluster_k8s_suffix) desc => ?DESC(cluster_k8s_suffix)
} }
@ -395,7 +395,7 @@ fields("node") ->
sc( sc(
string(), string(),
#{ #{
default => "emqx@127.0.0.1", default => <<"emqx@127.0.0.1">>,
'readOnly' => true, 'readOnly' => true,
desc => ?DESC(node_name) desc => ?DESC(node_name)
} }
@ -477,7 +477,7 @@ fields("node") ->
hoconsc:union([disabled, emqx_schema:duration()]), hoconsc:union([disabled, emqx_schema:duration()]),
#{ #{
mapping => "emqx_machine.global_gc_interval", mapping => "emqx_machine.global_gc_interval",
default => "15m", default => <<"15m">>,
desc => ?DESC(node_global_gc_interval), desc => ?DESC(node_global_gc_interval),
'readOnly' => true 'readOnly' => true
} }
@ -497,7 +497,7 @@ fields("node") ->
emqx_schema:duration_s(), emqx_schema:duration_s(),
#{ #{
mapping => "vm_args.-env ERL_CRASH_DUMP_SECONDS", mapping => "vm_args.-env ERL_CRASH_DUMP_SECONDS",
default => "30s", default => <<"30s">>,
desc => ?DESC(node_crash_dump_seconds), desc => ?DESC(node_crash_dump_seconds),
'readOnly' => true 'readOnly' => true
} }
@ -507,7 +507,7 @@ fields("node") ->
emqx_schema:bytesize(), emqx_schema:bytesize(),
#{ #{
mapping => "vm_args.-env ERL_CRASH_DUMP_BYTES", mapping => "vm_args.-env ERL_CRASH_DUMP_BYTES",
default => "100MB", default => <<"100MB">>,
desc => ?DESC(node_crash_dump_bytes), desc => ?DESC(node_crash_dump_bytes),
'readOnly' => true 'readOnly' => true
} }
@ -517,7 +517,7 @@ fields("node") ->
emqx_schema:duration_s(), emqx_schema:duration_s(),
#{ #{
mapping => "vm_args.-kernel net_ticktime", mapping => "vm_args.-kernel net_ticktime",
default => "2m", default => <<"2m">>,
'readOnly' => true, 'readOnly' => true,
desc => ?DESC(node_dist_net_ticktime) desc => ?DESC(node_dist_net_ticktime)
} }
@ -624,7 +624,7 @@ fields("cluster_call") ->
emqx_schema:duration(), emqx_schema:duration(),
#{ #{
desc => ?DESC(cluster_call_retry_interval), desc => ?DESC(cluster_call_retry_interval),
default => "1m" default => <<"1m">>
} }
)}, )},
{"max_history", {"max_history",
@ -640,7 +640,7 @@ fields("cluster_call") ->
emqx_schema:duration(), emqx_schema:duration(),
#{ #{
desc => ?DESC(cluster_call_cleanup_interval), desc => ?DESC(cluster_call_cleanup_interval),
default => "5m" default => <<"5m">>
} }
)} )}
]; ];
@ -712,7 +712,7 @@ fields("rpc") ->
emqx_schema:duration(), emqx_schema:duration(),
#{ #{
mapping => "gen_rpc.connect_timeout", mapping => "gen_rpc.connect_timeout",
default => "5s", default => <<"5s">>,
desc => ?DESC(rpc_connect_timeout) desc => ?DESC(rpc_connect_timeout)
} }
)}, )},
@ -745,7 +745,7 @@ fields("rpc") ->
emqx_schema:duration(), emqx_schema:duration(),
#{ #{
mapping => "gen_rpc.send_timeout", mapping => "gen_rpc.send_timeout",
default => "5s", default => <<"5s">>,
desc => ?DESC(rpc_send_timeout) desc => ?DESC(rpc_send_timeout)
} }
)}, )},
@ -754,7 +754,7 @@ fields("rpc") ->
emqx_schema:duration(), emqx_schema:duration(),
#{ #{
mapping => "gen_rpc.authentication_timeout", mapping => "gen_rpc.authentication_timeout",
default => "5s", default => <<"5s">>,
desc => ?DESC(rpc_authentication_timeout) desc => ?DESC(rpc_authentication_timeout)
} }
)}, )},
@ -763,7 +763,7 @@ fields("rpc") ->
emqx_schema:duration(), emqx_schema:duration(),
#{ #{
mapping => "gen_rpc.call_receive_timeout", mapping => "gen_rpc.call_receive_timeout",
default => "15s", default => <<"15s">>,
desc => ?DESC(rpc_call_receive_timeout) desc => ?DESC(rpc_call_receive_timeout)
} }
)}, )},
@ -772,7 +772,7 @@ fields("rpc") ->
emqx_schema:duration_s(), emqx_schema:duration_s(),
#{ #{
mapping => "gen_rpc.socket_keepalive_idle", mapping => "gen_rpc.socket_keepalive_idle",
default => "15m", default => <<"15m">>,
desc => ?DESC(rpc_socket_keepalive_idle) desc => ?DESC(rpc_socket_keepalive_idle)
} }
)}, )},
@ -781,7 +781,7 @@ fields("rpc") ->
emqx_schema:duration_s(), emqx_schema:duration_s(),
#{ #{
mapping => "gen_rpc.socket_keepalive_interval", mapping => "gen_rpc.socket_keepalive_interval",
default => "75s", default => <<"75s">>,
desc => ?DESC(rpc_socket_keepalive_interval) desc => ?DESC(rpc_socket_keepalive_interval)
} }
)}, )},
@ -799,7 +799,7 @@ fields("rpc") ->
emqx_schema:bytesize(), emqx_schema:bytesize(),
#{ #{
mapping => "gen_rpc.socket_sndbuf", mapping => "gen_rpc.socket_sndbuf",
default => "1MB", default => <<"1MB">>,
desc => ?DESC(rpc_socket_sndbuf) desc => ?DESC(rpc_socket_sndbuf)
} }
)}, )},
@ -808,7 +808,7 @@ fields("rpc") ->
emqx_schema:bytesize(), emqx_schema:bytesize(),
#{ #{
mapping => "gen_rpc.socket_recbuf", mapping => "gen_rpc.socket_recbuf",
default => "1MB", default => <<"1MB">>,
desc => ?DESC(rpc_socket_recbuf) desc => ?DESC(rpc_socket_recbuf)
} }
)}, )},
@ -817,7 +817,7 @@ fields("rpc") ->
emqx_schema:bytesize(), emqx_schema:bytesize(),
#{ #{
mapping => "gen_rpc.socket_buffer", mapping => "gen_rpc.socket_buffer",
default => "1MB", default => <<"1MB">>,
desc => ?DESC(rpc_socket_buffer) desc => ?DESC(rpc_socket_buffer)
} }
)}, )},
@ -861,7 +861,7 @@ fields("log_file_handler") ->
sc( sc(
hoconsc:union([infinity, emqx_schema:bytesize()]), hoconsc:union([infinity, emqx_schema:bytesize()]),
#{ #{
default => "50MB", default => <<"50MB">>,
desc => ?DESC("log_file_handler_max_size") desc => ?DESC("log_file_handler_max_size")
} }
)} )}
@ -899,7 +899,7 @@ fields("log_overload_kill") ->
sc( sc(
emqx_schema:bytesize(), emqx_schema:bytesize(),
#{ #{
default => "30MB", default => <<"30MB">>,
desc => ?DESC("log_overload_kill_mem_size") desc => ?DESC("log_overload_kill_mem_size")
} }
)}, )},
@ -915,7 +915,7 @@ fields("log_overload_kill") ->
sc( sc(
hoconsc:union([emqx_schema:duration_ms(), infinity]), hoconsc:union([emqx_schema:duration_ms(), infinity]),
#{ #{
default => "5s", default => <<"5s">>,
desc => ?DESC("log_overload_kill_restart_after") desc => ?DESC("log_overload_kill_restart_after")
} }
)} )}
@ -942,7 +942,7 @@ fields("log_burst_limit") ->
sc( sc(
emqx_schema:duration(), emqx_schema:duration(),
#{ #{
default => "1s", default => <<"1s">>,
desc => ?DESC("log_burst_limit_window_time") desc => ?DESC("log_burst_limit_window_time")
} }
)} )}
@ -1092,7 +1092,7 @@ log_handler_common_confs(Enable) ->
sc( sc(
string(), string(),
#{ #{
default => "system", default => <<"system">>,
desc => ?DESC("common_handler_time_offset"), desc => ?DESC("common_handler_time_offset"),
validator => fun validate_time_offset/1 validator => fun validate_time_offset/1
} }
@ -1169,9 +1169,9 @@ crash_dump_file_default() ->
case os:getenv("RUNNER_LOG_DIR") of case os:getenv("RUNNER_LOG_DIR") of
false -> false ->
%% testing, or running emqx app as deps %% testing, or running emqx app as deps
"log/erl_crash.dump"; <<"log/erl_crash.dump">>;
Dir -> Dir ->
[filename:join([Dir, "erl_crash.dump"])] unicode:characters_to_binary(filename:join([Dir, "erl_crash.dump"]), utf8)
end. end.
%% utils %% utils

View File

@ -114,9 +114,13 @@ topic filters for <code>remote.topic</code> of ingress connections."""
desc { desc {
en: """If enable bridge mode. en: """If enable bridge mode.
NOTE: This setting is only for MQTT protocol version older than 5.0, and the remote MQTT NOTE: This setting is only for MQTT protocol version older than 5.0, and the remote MQTT
broker MUST support this feature.""" broker MUST support this feature.
If bridge_mode is set to true, the bridge will indicate to the remote broker that it is a bridge not an ordinary client.
This means that loop detection will be more effective and that retained messages will be propagated correctly."""
zh: """是否启用 Bridge Mode。 zh: """是否启用 Bridge Mode。
注意:此设置只针对 MQTT 协议版本 < 5.0 有效,并且需要远程 MQTT Broker 支持 Bridge Mode。""" 注意:此设置只针对 MQTT 协议版本 < 5.0 有效,并且需要远程 MQTT Broker 支持 Bridge Mode。
如果设置为 true ,桥接会告诉远端服务器当前连接是一个桥接而不是一个普通的客户端。
这意味着消息回环检测会更加高效,并且远端服务器收到的保留消息的标志位会透传给本地。"""
} }
label { label {
en: "Bridge Mode" en: "Bridge Mode"

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, emqx_connector, [ {application, emqx_connector, [
{description, "EMQX Data Integration Connectors"}, {description, "EMQX Data Integration Connectors"},
{vsn, "0.1.14"}, {vsn, "0.1.15"},
{registered, []}, {registered, []},
{mod, {emqx_connector_app, []}}, {mod, {emqx_connector_app, []}},
{applications, [ {applications, [

View File

@ -87,7 +87,7 @@ fields(config) ->
sc( sc(
emqx_schema:duration_ms(), emqx_schema:duration_ms(),
#{ #{
default => "15s", default => <<"15s">>,
desc => ?DESC("connect_timeout") desc => ?DESC("connect_timeout")
} }
)}, )},

View File

@ -391,22 +391,7 @@ proc_sql_params(TypeOrKey, SQLOrData, Params, #{params_tokens := ParamsTokens})
end. end.
on_batch_insert(InstId, BatchReqs, InsertPart, Tokens, State) -> on_batch_insert(InstId, BatchReqs, InsertPart, Tokens, State) ->
JoinFun = fun SQL = emqx_plugin_libs_rule:proc_batch_sql(BatchReqs, InsertPart, Tokens),
([Msg]) ->
emqx_plugin_libs_rule:proc_sql_param_str(Tokens, Msg);
([H | T]) ->
lists:foldl(
fun(Msg, Acc) ->
Value = emqx_plugin_libs_rule:proc_sql_param_str(Tokens, Msg),
<<Acc/binary, ", ", Value/binary>>
end,
emqx_plugin_libs_rule:proc_sql_param_str(Tokens, H),
T
)
end,
{_, Msgs} = lists:unzip(BatchReqs),
JoinPart = JoinFun(Msgs),
SQL = <<InsertPart/binary, " values ", JoinPart/binary>>,
on_sql_query(InstId, query, SQL, [], default_timeout, State). on_sql_query(InstId, query, SQL, [], default_timeout, State).
on_sql_query( on_sql_query(

View File

@ -100,7 +100,11 @@ on_start(
case maps:get(enable, SSL) of case maps:get(enable, SSL) of
true -> true ->
[ [
{ssl, required}, %% note: this is converted to `required' in
%% `conn_opts/2', and there's a boolean guard
%% there; if this is set to `required' here,
%% that'll require changing `conn_opts/2''s guard.
{ssl, true},
{ssl_opts, emqx_tls_lib:to_client_opts(SSL)} {ssl_opts, emqx_tls_lib:to_client_opts(SSL)}
]; ];
false -> false ->

View File

@ -115,12 +115,12 @@ fields("server_configs") ->
desc => ?DESC("clean_start") desc => ?DESC("clean_start")
} }
)}, )},
{keepalive, mk_duration("MQTT Keepalive.", #{default => "300s"})}, {keepalive, mk_duration("MQTT Keepalive.", #{default => <<"300s">>})},
{retry_interval, {retry_interval,
mk_duration( mk_duration(
"Message retry interval. Delay for the MQTT bridge to retry sending the QoS1/QoS2 " "Message retry interval. Delay for the MQTT bridge to retry sending the QoS1/QoS2 "
"messages in case of ACK not received.", "messages in case of ACK not received.",
#{default => "15s"} #{default => <<"15s">>}
)}, )},
{max_inflight, {max_inflight,
mk( mk(

4
apps/emqx_ctl/README.md Normal file
View File

@ -0,0 +1,4 @@
emqx_ctl
=====
Backend module for `emqx_ctl` command.

View File

@ -0,0 +1,2 @@
{erl_opts, [debug_info]}.
{deps, []}.

View File

@ -0,0 +1,15 @@
{application, emqx_ctl, [
{description, "Backend for emqx_ctl script"},
{vsn, "0.1.0"},
{registered, []},
{mod, {emqx_ctl_app, []}},
{applications, [
kernel,
stdlib
]},
{env, []},
{modules, []},
{licenses, ["Apache-2.0"]},
{links, []}
]}.

View File

@ -18,8 +18,7 @@
-behaviour(gen_server). -behaviour(gen_server).
-include("types.hrl"). -include_lib("kernel/include/logger.hrl").
-include("logger.hrl").
-export([start_link/0, stop/0]). -export([start_link/0, stop/0]).
@ -70,7 +69,7 @@
-define(SERVER, ?MODULE). -define(SERVER, ?MODULE).
-define(CMD_TAB, emqx_command). -define(CMD_TAB, emqx_command).
-spec start_link() -> startlink_ret(). -spec start_link() -> {ok, pid()}.
start_link() -> start_link() ->
gen_server:start_link({local, ?SERVER}, ?MODULE, [], []). gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
@ -103,7 +102,7 @@ cast(Msg) -> gen_server:cast(?SERVER, Msg).
run_command([]) -> run_command([]) ->
run_command(help, []); run_command(help, []);
run_command([Cmd | Args]) -> run_command([Cmd | Args]) ->
case emqx_misc:safe_to_existing_atom(Cmd) of case safe_to_existing_atom(Cmd) of
{ok, Cmd1} -> {ok, Cmd1} ->
run_command(Cmd1, Args); run_command(Cmd1, Args);
_ -> _ ->
@ -122,7 +121,7 @@ run_command(Cmd, Args) when is_atom(Cmd) ->
ok ok
catch catch
_:Reason:Stacktrace -> _:Reason:Stacktrace ->
?SLOG(error, #{ ?LOG_ERROR(#{
msg => "ctl_command_crashed", msg => "ctl_command_crashed",
stacktrace => Stacktrace, stacktrace => Stacktrace,
reason => Reason reason => Reason
@ -220,7 +219,7 @@ format_usage(CmdParams, Desc, Width) ->
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
init([]) -> init([]) ->
ok = emqx_tables:new(?CMD_TAB, [protected, ordered_set]), _ = ets:new(?CMD_TAB, [named_table, protected, ordered_set]),
{ok, #state{seq = 0}}. {ok, #state{seq = 0}}.
handle_call({register_command, Cmd, MF, Opts}, _From, State = #state{seq = Seq}) -> handle_call({register_command, Cmd, MF, Opts}, _From, State = #state{seq = Seq}) ->
@ -229,23 +228,23 @@ handle_call({register_command, Cmd, MF, Opts}, _From, State = #state{seq = Seq})
ets:insert(?CMD_TAB, {{Seq, Cmd}, MF, Opts}), ets:insert(?CMD_TAB, {{Seq, Cmd}, MF, Opts}),
{reply, ok, next_seq(State)}; {reply, ok, next_seq(State)};
[[OriginSeq] | _] -> [[OriginSeq] | _] ->
?SLOG(warning, #{msg => "CMD_overidden", cmd => Cmd, mf => MF}), ?LOG_WARNING(#{msg => "CMD_overidden", cmd => Cmd, mf => MF}),
true = ets:insert(?CMD_TAB, {{OriginSeq, Cmd}, MF, Opts}), true = ets:insert(?CMD_TAB, {{OriginSeq, Cmd}, MF, Opts}),
{reply, ok, State} {reply, ok, State}
end; end;
handle_call(Req, _From, State) -> handle_call(Req, _From, State) ->
?SLOG(error, #{msg => "unexpected_call", call => Req}), ?LOG_ERROR(#{msg => "unexpected_call", call => Req}),
{reply, ignored, State}. {reply, ignored, State}.
handle_cast({unregister_command, Cmd}, State) -> handle_cast({unregister_command, Cmd}, State) ->
ets:match_delete(?CMD_TAB, {{'_', Cmd}, '_', '_'}), ets:match_delete(?CMD_TAB, {{'_', Cmd}, '_', '_'}),
noreply(State); noreply(State);
handle_cast(Msg, State) -> handle_cast(Msg, State) ->
?SLOG(error, #{msg => "unexpected_cast", cast => Msg}), ?LOG_ERROR(#{msg => "unexpected_cast", cast => Msg}),
noreply(State). noreply(State).
handle_info(Info, State) -> handle_info(Info, State) ->
?SLOG(error, #{msg => "unexpected_info", info => Info}), ?LOG_ERROR(#{msg => "unexpected_info", info => Info}),
noreply(State). noreply(State).
terminate(_Reason, _State) -> terminate(_Reason, _State) ->
@ -272,3 +271,11 @@ zip_cmd([X | Xs], [Y | Ys]) -> [{X, Y} | zip_cmd(Xs, Ys)];
zip_cmd([X | Xs], []) -> [{X, ""} | zip_cmd(Xs, [])]; zip_cmd([X | Xs], []) -> [{X, ""} | zip_cmd(Xs, [])];
zip_cmd([], [Y | Ys]) -> [{"", Y} | zip_cmd([], Ys)]; zip_cmd([], [Y | Ys]) -> [{"", Y} | zip_cmd([], Ys)];
zip_cmd([], []) -> []. zip_cmd([], []) -> [].
safe_to_existing_atom(Str) ->
try
{ok, list_to_existing_atom(Str)}
catch
_:badarg ->
undefined
end.

View File

@ -0,0 +1,18 @@
%%%-------------------------------------------------------------------
%% @doc emqx_ctl public API
%% @end
%%%-------------------------------------------------------------------
-module(emqx_ctl_app).
-behaviour(application).
-export([start/2, stop/1]).
start(_StartType, _StartArgs) ->
emqx_ctl_sup:start_link().
stop(_State) ->
ok.
%% internal functions

View File

@ -0,0 +1,33 @@
%%%-------------------------------------------------------------------
%% @doc emqx_ctl top level supervisor.
%% @end
%%%-------------------------------------------------------------------
-module(emqx_ctl_sup).
-behaviour(supervisor).
-export([start_link/0]).
-export([init/1]).
-define(SERVER, ?MODULE).
start_link() ->
supervisor:start_link({local, ?SERVER}, ?MODULE, []).
init([]) ->
SupFlags = #{
strategy => one_for_all,
intensity => 0,
period => 1
},
ChildSpecs = [
#{
id => emqx_ctl,
start => {emqx_ctl, start_link, []},
type => worker,
restart => permanent
}
],
{ok, {SupFlags, ChildSpecs}}.

View File

@ -22,12 +22,10 @@
-include_lib("eunit/include/eunit.hrl"). -include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl"). -include_lib("common_test/include/ct.hrl").
all() -> emqx_common_test_helpers:all(?MODULE). all() -> [t_reg_unreg_command, t_run_commands, t_print, t_usage, t_unexpected].
init_per_suite(Config) -> init_per_suite(Config) ->
%% ensure stopped, this suite tests emqx_ctl process independently application:stop(emqx_ctl),
application:stop(emqx),
ok = emqx_logger:set_log_level(emergency),
Config. Config.
end_per_suite(_Config) -> end_per_suite(_Config) ->

View File

@ -2,10 +2,10 @@
{application, emqx_dashboard, [ {application, emqx_dashboard, [
{description, "EMQX Web Dashboard"}, {description, "EMQX Web Dashboard"},
% strict semver, bump manually! % strict semver, bump manually!
{vsn, "5.0.13"}, {vsn, "5.0.14"},
{modules, []}, {modules, []},
{registered, [emqx_dashboard_sup]}, {registered, [emqx_dashboard_sup]},
{applications, [kernel, stdlib, mnesia, minirest, emqx]}, {applications, [kernel, stdlib, mnesia, minirest, emqx, emqx_ctl]},
{mod, {emqx_dashboard_app, []}}, {mod, {emqx_dashboard_app, []}},
{env, []}, {env, []},
{licenses, ["Apache-2.0"]}, {licenses, ["Apache-2.0"]},

View File

@ -55,7 +55,7 @@ schema("/monitor/nodes/:node") ->
parameters => [parameter_node(), parameter_latest()], parameters => [parameter_node(), parameter_latest()],
responses => #{ responses => #{
200 => hoconsc:mk(hoconsc:array(hoconsc:ref(sampler)), #{}), 200 => hoconsc:mk(hoconsc:array(hoconsc:ref(sampler)), #{}),
400 => emqx_dashboard_swagger:error_codes(['BAD_RPC'], <<"Bad RPC">>) 404 => emqx_dashboard_swagger:error_codes(['NOT_FOUND'], <<"Node not found">>)
} }
} }
}; };
@ -79,7 +79,7 @@ schema("/monitor_current/nodes/:node") ->
parameters => [parameter_node()], parameters => [parameter_node()],
responses => #{ responses => #{
200 => hoconsc:mk(hoconsc:ref(sampler_current), #{}), 200 => hoconsc:mk(hoconsc:ref(sampler_current), #{}),
400 => emqx_dashboard_swagger:error_codes(['BAD_RPC'], <<"Bad RPC">>) 404 => emqx_dashboard_swagger:error_codes(['NOT_FOUND'], <<"Node not found">>)
} }
} }
}. }.
@ -122,38 +122,31 @@ fields(sampler_current) ->
monitor(get, #{query_string := QS, bindings := Bindings}) -> monitor(get, #{query_string := QS, bindings := Bindings}) ->
Latest = maps:get(<<"latest">>, QS, infinity), Latest = maps:get(<<"latest">>, QS, infinity),
RawNode = maps:get(node, Bindings, all), RawNode = maps:get(node, Bindings, all),
case emqx_misc:safe_to_existing_atom(RawNode, utf8) of with_node(RawNode, dashboard_samplers_fun(Latest)).
{ok, Node} ->
case emqx_dashboard_monitor:samplers(Node, Latest) of dashboard_samplers_fun(Latest) ->
{badrpc, {Node, Reason}} -> fun(NodeOrCluster) ->
Message = list_to_binary( case emqx_dashboard_monitor:samplers(NodeOrCluster, Latest) of
io_lib:format("Bad node ~p, rpc failed ~p", [Node, Reason]) {badrpc, _} = Error -> Error;
), Samplers -> {ok, Samplers}
{400, 'BAD_RPC', Message}; end
Samplers ->
{200, Samplers}
end;
_ ->
Message = list_to_binary(io_lib:format("Bad node ~p", [RawNode])),
{400, 'BAD_RPC', Message}
end. end.
monitor_current(get, #{bindings := Bindings}) -> monitor_current(get, #{bindings := Bindings}) ->
RawNode = maps:get(node, Bindings, all), RawNode = maps:get(node, Bindings, all),
with_node(RawNode, fun emqx_dashboard_monitor:current_rate/1).
with_node(RawNode, Fun) ->
case emqx_misc:safe_to_existing_atom(RawNode, utf8) of case emqx_misc:safe_to_existing_atom(RawNode, utf8) of
{ok, NodeOrCluster} -> {ok, NodeOrCluster} ->
case emqx_dashboard_monitor:current_rate(NodeOrCluster) of case Fun(NodeOrCluster) of
{ok, CurrentRate} ->
{200, CurrentRate};
{badrpc, {Node, Reason}} -> {badrpc, {Node, Reason}} ->
Message = list_to_binary( {404, 'NOT_FOUND', io_lib:format("Node not found: ~p (~p)", [Node, Reason])};
io_lib:format("Bad node ~p, rpc failed ~p", [Node, Reason]) {ok, Result} ->
), {200, Result}
{400, 'BAD_RPC', Message}
end; end;
{error, _} -> _Error ->
Message = list_to_binary(io_lib:format("Bad node ~p", [RawNode])), {404, 'NOT_FOUND', io_lib:format("Node not found: ~p", [RawNode])}
{400, 'BAD_RPC', Message}
end. end.
%% ------------------------------------------------------------------------------------------------- %% -------------------------------------------------------------------------------------------------

View File

@ -40,7 +40,7 @@ fields("dashboard") ->
?HOCON( ?HOCON(
emqx_schema:duration_s(), emqx_schema:duration_s(),
#{ #{
default => "10s", default => <<"10s">>,
desc => ?DESC(sample_interval), desc => ?DESC(sample_interval),
validator => fun validate_sample_interval/1 validator => fun validate_sample_interval/1
} }
@ -49,7 +49,7 @@ fields("dashboard") ->
?HOCON( ?HOCON(
emqx_schema:duration(), emqx_schema:duration(),
#{ #{
default => "60m", default => <<"60m">>,
desc => ?DESC(token_expired_time) desc => ?DESC(token_expired_time)
} }
)}, )},
@ -141,7 +141,7 @@ common_listener_fields() ->
?HOCON( ?HOCON(
emqx_schema:duration(), emqx_schema:duration(),
#{ #{
default => "10s", default => <<"10s">>,
desc => ?DESC(send_timeout) desc => ?DESC(send_timeout)
} }
)}, )},
@ -206,14 +206,14 @@ desc(_) ->
undefined. undefined.
default_username(type) -> binary(); default_username(type) -> binary();
default_username(default) -> "admin"; default_username(default) -> <<"admin">>;
default_username(required) -> true; default_username(required) -> true;
default_username(desc) -> ?DESC(default_username); default_username(desc) -> ?DESC(default_username);
default_username('readOnly') -> true; default_username('readOnly') -> true;
default_username(_) -> undefined. default_username(_) -> undefined.
default_password(type) -> binary(); default_password(type) -> binary();
default_password(default) -> "public"; default_password(default) -> <<"public">>;
default_password(required) -> true; default_password(required) -> true;
default_password('readOnly') -> true; default_password('readOnly') -> true;
default_password(sensitive) -> true; default_password(sensitive) -> true;

View File

@ -22,8 +22,6 @@
-import(emqx_dashboard_SUITE, [auth_header_/0]). -import(emqx_dashboard_SUITE, [auth_header_/0]).
-include_lib("eunit/include/eunit.hrl"). -include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("emqx/include/emqx.hrl").
-include("emqx_dashboard.hrl"). -include("emqx_dashboard.hrl").
-define(SERVER, "http://127.0.0.1:18083"). -define(SERVER, "http://127.0.0.1:18083").
@ -114,9 +112,9 @@ t_monitor_reset(_) ->
ok. ok.
t_monitor_api_error(_) -> t_monitor_api_error(_) ->
{error, {400, #{<<"code">> := <<"BAD_RPC">>}}} = {error, {404, #{<<"code">> := <<"NOT_FOUND">>}}} =
request(["monitor", "nodes", 'emqx@127.0.0.2']), request(["monitor", "nodes", 'emqx@127.0.0.2']),
{error, {400, #{<<"code">> := <<"BAD_RPC">>}}} = {error, {404, #{<<"code">> := <<"NOT_FOUND">>}}} =
request(["monitor_current", "nodes", 'emqx@127.0.0.2']), request(["monitor_current", "nodes", 'emqx@127.0.0.2']),
{error, {400, #{<<"code">> := <<"BAD_REQUEST">>}}} = {error, {400, #{<<"code">> := <<"BAD_REQUEST">>}}} =
request(["monitor"], "latest=0"), request(["monitor"], "latest=0"),

View File

@ -32,8 +32,8 @@ fields("root") ->
)}, )},
{default_username, fun default_username/1}, {default_username, fun default_username/1},
{default_password, fun default_password/1}, {default_password, fun default_password/1},
{sample_interval, mk(emqx_schema:duration_s(), #{default => "10s"})}, {sample_interval, mk(emqx_schema:duration_s(), #{default => <<"10s">>})},
{token_expired_time, mk(emqx_schema:duration(), #{default => "30m"})} {token_expired_time, mk(emqx_schema:duration(), #{default => <<"30m">>})}
]; ];
fields("ref1") -> fields("ref1") ->
[ [
@ -52,7 +52,7 @@ fields("ref3") ->
]. ].
default_username(type) -> string(); default_username(type) -> string();
default_username(default) -> "admin"; default_username(default) -> <<"admin">>;
default_username(required) -> true; default_username(required) -> true;
default_username(_) -> undefined. default_username(_) -> undefined.

View File

@ -790,7 +790,7 @@ to_schema(Body) ->
fields(good_ref) -> fields(good_ref) ->
[ [
{'webhook-host', mk(emqx_schema:ip_port(), #{default => "127.0.0.1:80"})}, {'webhook-host', mk(emqx_schema:ip_port(), #{default => <<"127.0.0.1:80">>})},
{log_dir, mk(emqx_schema:file(), #{example => "var/log/emqx"})}, {log_dir, mk(emqx_schema:file(), #{example => "var/log/emqx"})},
{tag, mk(binary(), #{desc => <<"tag">>})} {tag, mk(binary(), #{desc => <<"tag">>})}
]; ];

View File

@ -689,7 +689,7 @@ to_schema(Object) ->
fields(good_ref) -> fields(good_ref) ->
[ [
{'webhook-host', mk(emqx_schema:ip_port(), #{default => "127.0.0.1:80"})}, {'webhook-host', mk(emqx_schema:ip_port(), #{default => <<"127.0.0.1:80">>})},
{log_dir, mk(emqx_schema:file(), #{example => "var/log/emqx"})}, {log_dir, mk(emqx_schema:file(), #{example => "var/log/emqx"})},
{tag, mk(binary(), #{desc => <<"tag">>})} {tag, mk(binary(), #{desc => <<"tag">>})}
]; ];

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, emqx_exhook, [ {application, emqx_exhook, [
{description, "EMQX Extension for Hook"}, {description, "EMQX Extension for Hook"},
{vsn, "5.0.9"}, {vsn, "5.0.10"},
{modules, []}, {modules, []},
{registered, []}, {registered, []},
{mod, {emqx_exhook_app, []}}, {mod, {emqx_exhook_app, []}},

View File

@ -229,9 +229,9 @@ server_conf_schema() ->
name => "default", name => "default",
enable => true, enable => true,
url => <<"http://127.0.0.1:8081">>, url => <<"http://127.0.0.1:8081">>,
request_timeout => "5s", request_timeout => <<"5s">>,
failed_action => deny, failed_action => deny,
auto_reconnect => "60s", auto_reconnect => <<"60s">>,
pool_size => 8, pool_size => 8,
ssl => SSL ssl => SSL
} }

View File

@ -63,7 +63,7 @@ fields(server) ->
})}, })},
{request_timeout, {request_timeout,
?HOCON(emqx_schema:duration(), #{ ?HOCON(emqx_schema:duration(), #{
default => "5s", default => <<"5s">>,
desc => ?DESC(request_timeout) desc => ?DESC(request_timeout)
})}, })},
{failed_action, failed_action()}, {failed_action, failed_action()},
@ -74,7 +74,7 @@ fields(server) ->
})}, })},
{auto_reconnect, {auto_reconnect,
?HOCON(hoconsc:union([false, emqx_schema:duration()]), #{ ?HOCON(hoconsc:union([false, emqx_schema:duration()]), #{
default => "60s", default => <<"60s">>,
desc => ?DESC(auto_reconnect) desc => ?DESC(auto_reconnect)
})}, })},
{pool_size, {pool_size,

View File

@ -1,10 +1,10 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, emqx_gateway, [ {application, emqx_gateway, [
{description, "The Gateway management application"}, {description, "The Gateway management application"},
{vsn, "0.1.11"}, {vsn, "0.1.12"},
{registered, []}, {registered, []},
{mod, {emqx_gateway_app, []}}, {mod, {emqx_gateway_app, []}},
{applications, [kernel, stdlib, grpc, emqx, emqx_authn]}, {applications, [kernel, stdlib, grpc, emqx, emqx_authn, emqx_ctl]},
{env, []}, {env, []},
{modules, []}, {modules, []},
{licenses, ["Apache 2.0"]}, {licenses, ["Apache 2.0"]},

View File

@ -19,7 +19,6 @@
-include("emqx_gateway_http.hrl"). -include("emqx_gateway_http.hrl").
-include_lib("typerefl/include/types.hrl"). -include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl"). -include_lib("hocon/include/hoconsc.hrl").
-include_lib("emqx/include/emqx_placeholder.hrl").
-include_lib("emqx/include/logger.hrl"). -include_lib("emqx/include/logger.hrl").
-behaviour(minirest_api). -behaviour(minirest_api).
@ -464,7 +463,12 @@ schema("/gateways/:name/clients") ->
summary => <<"List Gateway's Clients">>, summary => <<"List Gateway's Clients">>,
parameters => params_client_query(), parameters => params_client_query(),
responses => responses =>
?STANDARD_RESP(#{200 => schema_client_list()}) ?STANDARD_RESP(#{
200 => [
{data, schema_client_list()},
{meta, mk(hoconsc:ref(emqx_dashboard_swagger, meta), #{})}
]
})
} }
}; };
schema("/gateways/:name/clients/:clientid") -> schema("/gateways/:name/clients/:clientid") ->

View File

@ -267,7 +267,7 @@ fields(lwm2m) ->
sc( sc(
duration(), duration(),
#{ #{
default => "15s", default => <<"15s">>,
desc => ?DESC(lwm2m_lifetime_min) desc => ?DESC(lwm2m_lifetime_min)
} }
)}, )},
@ -275,7 +275,7 @@ fields(lwm2m) ->
sc( sc(
duration(), duration(),
#{ #{
default => "86400s", default => <<"86400s">>,
desc => ?DESC(lwm2m_lifetime_max) desc => ?DESC(lwm2m_lifetime_max)
} }
)}, )},
@ -283,7 +283,7 @@ fields(lwm2m) ->
sc( sc(
duration_s(), duration_s(),
#{ #{
default => "22s", default => <<"22s">>,
desc => ?DESC(lwm2m_qmode_time_window) desc => ?DESC(lwm2m_qmode_time_window)
} }
)}, )},
@ -624,7 +624,7 @@ mountpoint(Default) ->
sc( sc(
binary(), binary(),
#{ #{
default => Default, default => iolist_to_binary(Default),
desc => ?DESC(gateway_common_mountpoint) desc => ?DESC(gateway_common_mountpoint)
} }
). ).
@ -707,7 +707,7 @@ proxy_protocol_opts() ->
sc( sc(
duration(), duration(),
#{ #{
default => "15s", default => <<"15s">>,
desc => ?DESC(tcp_listener_proxy_protocol_timeout) desc => ?DESC(tcp_listener_proxy_protocol_timeout)
} }
)} )}

View File

@ -6,7 +6,7 @@
{vsn, "0.2.0"}, {vsn, "0.2.0"},
{modules, []}, {modules, []},
{registered, []}, {registered, []},
{applications, [kernel, stdlib]}, {applications, [kernel, stdlib, emqx_ctl]},
{mod, {emqx_machine_app, []}}, {mod, {emqx_machine_app, []}},
{env, []}, {env, []},
{licenses, ["Apache-2.0"]}, {licenses, ["Apache-2.0"]},

View File

@ -29,6 +29,7 @@
%% @doc EMQX boot entrypoint. %% @doc EMQX boot entrypoint.
start() -> start() ->
emqx_mgmt_cli:load(),
case os:type() of case os:type() of
{win32, nt} -> {win32, nt} ->
ok; ok;

View File

@ -16,4 +16,4 @@
-define(MANAGEMENT_SHARD, emqx_management_shard). -define(MANAGEMENT_SHARD, emqx_management_shard).
-define(MAX_ROW_LIMIT, 100). -define(DEFAULT_ROW_LIMIT, 100).

View File

@ -2,10 +2,10 @@
{application, emqx_management, [ {application, emqx_management, [
{description, "EMQX Management API and CLI"}, {description, "EMQX Management API and CLI"},
% strict semver, bump manually! % strict semver, bump manually!
{vsn, "5.0.14"}, {vsn, "5.0.15"},
{modules, []}, {modules, []},
{registered, [emqx_management_sup]}, {registered, [emqx_management_sup]},
{applications, [kernel, stdlib, emqx_plugins, minirest, emqx]}, {applications, [kernel, stdlib, emqx_plugins, minirest, emqx, emqx_ctl]},
{mod, {emqx_mgmt_app, []}}, {mod, {emqx_mgmt_app, []}},
{env, []}, {env, []},
{licenses, ["Apache-2.0"]}, {licenses, ["Apache-2.0"]},

View File

@ -21,8 +21,6 @@
-elvis([{elvis_style, god_modules, disable}]). -elvis([{elvis_style, god_modules, disable}]).
-include_lib("stdlib/include/qlc.hrl"). -include_lib("stdlib/include/qlc.hrl").
-include_lib("emqx/include/emqx.hrl").
-include_lib("emqx/include/emqx_mqtt.hrl").
%% Nodes and Brokers API %% Nodes and Brokers API
-export([ -export([
@ -71,8 +69,6 @@
list_subscriptions/1, list_subscriptions/1,
list_subscriptions_via_topic/2, list_subscriptions_via_topic/2,
list_subscriptions_via_topic/3, list_subscriptions_via_topic/3,
lookup_subscriptions/1,
lookup_subscriptions/2,
do_list_subscriptions/0 do_list_subscriptions/0
]). ]).
@ -104,9 +100,10 @@
]). ]).
%% Common Table API %% Common Table API
-export([max_row_limit/0]). -export([
default_row_limit/0,
-define(APP, emqx_management). vm_stats/0
]).
-elvis([{elvis_style, god_modules, disable}]). -elvis([{elvis_style, god_modules, disable}]).
@ -159,7 +156,24 @@ node_info(Nodes) ->
emqx_rpc:unwrap_erpc(emqx_management_proto_v3:node_info(Nodes)). emqx_rpc:unwrap_erpc(emqx_management_proto_v3:node_info(Nodes)).
stopped_node_info(Node) -> stopped_node_info(Node) ->
#{name => Node, node_status => 'stopped'}. {Node, #{node => Node, node_status => 'stopped'}}.
vm_stats() ->
Idle =
case cpu_sup:util([detailed]) of
%% Not support for Windows
{_, 0, 0, _} -> 0;
{_Num, _Use, IdleList, _} -> proplists:get_value(idle, IdleList, 0)
end,
RunQueue = erlang:statistics(run_queue),
{MemUsedRatio, MemTotal} = get_sys_memory(),
[
{run_queue, RunQueue},
{cpu_idle, Idle},
{cpu_use, 100 - Idle},
{total_memory, MemTotal},
{used_memory, erlang:round(MemTotal * MemUsedRatio)}
].
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Brokers %% Brokers
@ -174,8 +188,13 @@ lookup_broker(Node) ->
Broker. Broker.
broker_info() -> broker_info() ->
Info = maps:from_list([{K, iolist_to_binary(V)} || {K, V} <- emqx_sys:info()]), Info = lists:foldl(fun convert_broker_info/2, #{}, emqx_sys:info()),
Info#{node => node(), otp_release => otp_rel(), node_status => 'Running'}. Info#{node => node(), otp_release => otp_rel(), node_status => 'running'}.
convert_broker_info({uptime, Uptime}, M) ->
M#{uptime => emqx_datetime:human_readable_duration_string(Uptime)};
convert_broker_info({K, V}, M) ->
M#{K => iolist_to_binary(V)}.
broker_info(Nodes) -> broker_info(Nodes) ->
emqx_rpc:unwrap_erpc(emqx_management_proto_v3:broker_info(Nodes)). emqx_rpc:unwrap_erpc(emqx_management_proto_v3:broker_info(Nodes)).
@ -245,7 +264,7 @@ lookup_client({username, Username}, FormatFun) ->
|| Node <- mria_mnesia:running_nodes() || Node <- mria_mnesia:running_nodes()
]). ]).
lookup_client(Node, Key, {M, F}) -> lookup_client(Node, Key, FormatFun) ->
case unwrap_rpc(emqx_cm_proto_v1:lookup_client(Node, Key)) of case unwrap_rpc(emqx_cm_proto_v1:lookup_client(Node, Key)) of
{error, Err} -> {error, Err} ->
{error, Err}; {error, Err};
@ -253,18 +272,23 @@ lookup_client(Node, Key, {M, F}) ->
lists:map( lists:map(
fun({Chan, Info0, Stats}) -> fun({Chan, Info0, Stats}) ->
Info = Info0#{node => Node}, Info = Info0#{node => Node},
M:F({Chan, Info, Stats}) maybe_format(FormatFun, {Chan, Info, Stats})
end, end,
L L
) )
end. end.
kickout_client({ClientID, FormatFun}) -> maybe_format(undefined, A) ->
case lookup_client({clientid, ClientID}, FormatFun) of A;
maybe_format({M, F}, A) ->
M:F(A).
kickout_client(ClientId) ->
case lookup_client({clientid, ClientId}, undefined) of
[] -> [] ->
{error, not_found}; {error, not_found};
_ -> _ ->
Results = [kickout_client(Node, ClientID) || Node <- mria_mnesia:running_nodes()], Results = [kickout_client(Node, ClientId) || Node <- mria_mnesia:running_nodes()],
check_results(Results) check_results(Results)
end. end.
@ -275,17 +299,22 @@ list_authz_cache(ClientId) ->
call_client(ClientId, list_authz_cache). call_client(ClientId, list_authz_cache).
list_client_subscriptions(ClientId) -> list_client_subscriptions(ClientId) ->
Results = [client_subscriptions(Node, ClientId) || Node <- mria_mnesia:running_nodes()], case lookup_client({clientid, ClientId}, undefined) of
Filter = [] ->
fun {error, not_found};
({error, _}) -> _ ->
false; Results = [client_subscriptions(Node, ClientId) || Node <- mria_mnesia:running_nodes()],
({_Node, List}) -> Filter =
erlang:is_list(List) andalso 0 < erlang:length(List) fun
end, ({error, _}) ->
case lists:filter(Filter, Results) of false;
[] -> []; ({_Node, List}) ->
[Result | _] -> Result erlang:is_list(List) andalso 0 < erlang:length(List)
end,
case lists:filter(Filter, Results) of
[] -> [];
[Result | _] -> Result
end
end. end.
client_subscriptions(Node, ClientId) -> client_subscriptions(Node, ClientId) ->
@ -368,17 +397,11 @@ call_client(Node, ClientId, Req) ->
%% Subscriptions %% Subscriptions
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-spec do_list_subscriptions() -> [map()]. -spec do_list_subscriptions() -> no_return().
do_list_subscriptions() -> do_list_subscriptions() ->
case check_row_limit([mqtt_subproperty]) of %% [FIXME] Add function to `emqx_broker` that returns list of subscriptions
false -> %% and either redirect from here or bpapi directly (EMQX-8993).
throw(max_row_limit); throw(not_implemented).
ok ->
[
#{topic => Topic, clientid => ClientId, options => Options}
|| {{Topic, ClientId}, Options} <- ets:tab2list(mqtt_subproperty)
]
end.
list_subscriptions(Node) -> list_subscriptions(Node) ->
unwrap_rpc(emqx_management_proto_v3:list_subscriptions(Node)). unwrap_rpc(emqx_management_proto_v3:list_subscriptions(Node)).
@ -395,12 +418,6 @@ list_subscriptions_via_topic(Node, Topic, _FormatFun = {M, F}) ->
Result -> M:F(Result) Result -> M:F(Result)
end. end.
lookup_subscriptions(ClientId) ->
lists:append([lookup_subscriptions(Node, ClientId) || Node <- mria_mnesia:running_nodes()]).
lookup_subscriptions(Node, ClientId) ->
unwrap_rpc(emqx_broker_proto_v1:list_client_subscriptions(Node, ClientId)).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% PubSub %% PubSub
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -536,24 +553,11 @@ unwrap_rpc(Res) ->
otp_rel() -> otp_rel() ->
iolist_to_binary([emqx_vm:get_otp_version(), "/", erlang:system_info(version)]). iolist_to_binary([emqx_vm:get_otp_version(), "/", erlang:system_info(version)]).
check_row_limit(Tables) ->
check_row_limit(Tables, max_row_limit()).
check_row_limit([], _Limit) ->
ok;
check_row_limit([Tab | Tables], Limit) ->
case table_size(Tab) > Limit of
true -> false;
false -> check_row_limit(Tables, Limit)
end.
check_results(Results) -> check_results(Results) ->
case lists:any(fun(Item) -> Item =:= ok end, Results) of case lists:any(fun(Item) -> Item =:= ok end, Results) of
true -> ok; true -> ok;
false -> unwrap_rpc(lists:last(Results)) false -> unwrap_rpc(lists:last(Results))
end. end.
max_row_limit() -> default_row_limit() ->
?MAX_ROW_LIMIT. ?DEFAULT_ROW_LIMIT.
table_size(Tab) -> ets:info(Tab, size).

View File

@ -23,8 +23,7 @@
-define(LONG_QUERY_TIMEOUT, 50000). -define(LONG_QUERY_TIMEOUT, 50000).
-export([ -export([
paginate/3, paginate/3
paginate/4
]). ]).
%% first_next query APIs %% first_next query APIs
@ -34,6 +33,10 @@
b2i/1 b2i/1
]). ]).
-ifdef(TEST).
-export([paginate_test_format/1]).
-endif.
-export_type([ -export_type([
match_spec_and_filter/0 match_spec_and_filter/0
]). ]).
@ -58,14 +61,14 @@
-export([do_query/2, apply_total_query/1]). -export([do_query/2, apply_total_query/1]).
paginate(Tables, Params, {Module, FormatFun}) -> -spec paginate(atom(), map(), {atom(), atom()}) ->
Qh = query_handle(Tables), #{
Count = count(Tables), meta => #{page => pos_integer(), limit => pos_integer(), count => pos_integer()},
do_paginate(Qh, Count, Params, {Module, FormatFun}). data => list(term())
}.
paginate(Tables, MatchSpec, Params, {Module, FormatFun}) -> paginate(Table, Params, {Module, FormatFun}) ->
Qh = query_handle(Tables, MatchSpec), Qh = query_handle(Table),
Count = count(Tables, MatchSpec), Count = count(Table),
do_paginate(Qh, Count, Params, {Module, FormatFun}). do_paginate(Qh, Count, Params, {Module, FormatFun}).
do_paginate(Qh, Count, Params, {Module, FormatFun}) -> do_paginate(Qh, Count, Params, {Module, FormatFun}) ->
@ -86,57 +89,17 @@ do_paginate(Qh, Count, Params, {Module, FormatFun}) ->
data => [erlang:apply(Module, FormatFun, [Row]) || Row <- Rows] data => [erlang:apply(Module, FormatFun, [Row]) || Row <- Rows]
}. }.
query_handle(Table) when is_atom(Table) -> query_handle(Table) ->
qlc:q([R || R <- ets:table(Table)]); qlc:q([R || R <- ets:table(Table)]).
query_handle({Table, Opts}) when is_atom(Table) ->
qlc:q([R || R <- ets:table(Table, Opts)]);
query_handle([Table]) when is_atom(Table) ->
qlc:q([R || R <- ets:table(Table)]);
query_handle([{Table, Opts}]) when is_atom(Table) ->
qlc:q([R || R <- ets:table(Table, Opts)]);
query_handle(Tables) ->
%
qlc:append([query_handle(T) || T <- Tables]).
query_handle(Table, MatchSpec) when is_atom(Table) -> count(Table) ->
Options = {traverse, {select, MatchSpec}}, ets:info(Table, size).
qlc:q([R || R <- ets:table(Table, Options)]);
query_handle([Table], MatchSpec) when is_atom(Table) ->
Options = {traverse, {select, MatchSpec}},
qlc:q([R || R <- ets:table(Table, Options)]);
query_handle(Tables, MatchSpec) ->
Options = {traverse, {select, MatchSpec}},
qlc:append([qlc:q([E || E <- ets:table(T, Options)]) || T <- Tables]).
count(Table) when is_atom(Table) ->
ets:info(Table, size);
count({Table, _}) when is_atom(Table) ->
ets:info(Table, size);
count([Table]) when is_atom(Table) ->
ets:info(Table, size);
count([{Table, _}]) when is_atom(Table) ->
ets:info(Table, size);
count(Tables) ->
lists:sum([count(T) || T <- Tables]).
count(Table, MatchSpec) when is_atom(Table) ->
[{MatchPattern, Where, _Re}] = MatchSpec,
NMatchSpec = [{MatchPattern, Where, [true]}],
ets:select_count(Table, NMatchSpec);
count([Table], MatchSpec) when is_atom(Table) ->
count(Table, MatchSpec);
count(Tables, MatchSpec) ->
lists:sum([count(T, MatchSpec) || T <- Tables]).
page(Params) when is_map(Params) ->
maps:get(<<"page">>, Params, 1);
page(Params) -> page(Params) ->
proplists:get_value(<<"page">>, Params, <<"1">>). maps:get(<<"page">>, Params, 1).
limit(Params) when is_map(Params) -> limit(Params) when is_map(Params) ->
maps:get(<<"limit">>, Params, emqx_mgmt:max_row_limit()); maps:get(<<"limit">>, Params, emqx_mgmt:default_row_limit()).
limit(Params) ->
proplists:get_value(<<"limit">>, Params, emqx_mgmt:max_row_limit()).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Node Query %% Node Query
@ -210,8 +173,6 @@ cluster_query(Tab, QString, QSchema, MsFun, FmtFun) ->
end. end.
%% @private %% @private
do_cluster_query([], QueryState, ResultAcc) ->
finalize_query(ResultAcc, mark_complete(QueryState));
do_cluster_query( do_cluster_query(
[Node | Tail] = Nodes, [Node | Tail] = Nodes,
QueryState, QueryState,
@ -605,7 +566,7 @@ to_type(V, TargetType) ->
to_type_(V, atom) -> to_atom(V); to_type_(V, atom) -> to_atom(V);
to_type_(V, integer) -> to_integer(V); to_type_(V, integer) -> to_integer(V);
to_type_(V, timestamp) -> to_timestamp(V); to_type_(V, timestamp) -> to_timestamp(V);
to_type_(V, ip) -> aton(V); to_type_(V, ip) -> to_ip(V);
to_type_(V, ip_port) -> to_ip_port(V); to_type_(V, ip_port) -> to_ip_port(V);
to_type_(V, _) -> V. to_type_(V, _) -> V.
@ -624,14 +585,16 @@ to_timestamp(I) when is_integer(I) ->
to_timestamp(B) when is_binary(B) -> to_timestamp(B) when is_binary(B) ->
binary_to_integer(B). binary_to_integer(B).
aton(B) when is_binary(B) -> to_ip(IP0) when is_binary(IP0) ->
list_to_tuple([binary_to_integer(T) || T <- re:split(B, "[.]")]). ensure_ok(inet:parse_address(binary_to_list(IP0))).
to_ip_port(IPAddress) -> to_ip_port(IPAddress) ->
[IP0, Port0] = string:tokens(binary_to_list(IPAddress), ":"), ensure_ok(emqx_schema:to_ip_port(IPAddress)).
{ok, IP} = inet:parse_address(IP0),
Port = list_to_integer(Port0), ensure_ok({ok, V}) ->
{IP, Port}. V;
ensure_ok({error, _R} = E) ->
throw(E).
b2i(Bin) when is_binary(Bin) -> b2i(Bin) when is_binary(Bin) ->
binary_to_integer(Bin); binary_to_integer(Bin);
@ -645,40 +608,115 @@ b2i(Any) ->
-ifdef(TEST). -ifdef(TEST).
-include_lib("eunit/include/eunit.hrl"). -include_lib("eunit/include/eunit.hrl").
params2qs_test() -> params2qs_test_() ->
QSchema = [ QSchema = [
{<<"str">>, binary}, {<<"str">>, binary},
{<<"int">>, integer}, {<<"int">>, integer},
{<<"binatom">>, atom},
{<<"atom">>, atom}, {<<"atom">>, atom},
{<<"ts">>, timestamp}, {<<"ts">>, timestamp},
{<<"gte_range">>, integer}, {<<"gte_range">>, integer},
{<<"lte_range">>, integer}, {<<"lte_range">>, integer},
{<<"like_fuzzy">>, binary}, {<<"like_fuzzy">>, binary},
{<<"match_topic">>, binary} {<<"match_topic">>, binary},
{<<"ip">>, ip},
{<<"ip_port">>, ip_port}
], ],
QString = [ QString = [
{<<"str">>, <<"abc">>}, {<<"str">>, <<"abc">>},
{<<"int">>, <<"123">>}, {<<"int">>, <<"123">>},
{<<"atom">>, <<"connected">>}, {<<"binatom">>, <<"connected">>},
{<<"atom">>, ok},
{<<"ts">>, <<"156000">>}, {<<"ts">>, <<"156000">>},
{<<"gte_range">>, <<"1">>}, {<<"gte_range">>, <<"1">>},
{<<"lte_range">>, <<"5">>}, {<<"lte_range">>, <<"5">>},
{<<"like_fuzzy">>, <<"user">>}, {<<"like_fuzzy">>, <<"user">>},
{<<"match_topic">>, <<"t/#">>} {<<"match_topic">>, <<"t/#">>},
{<<"ip">>, <<"127.0.0.1">>},
{<<"ip_port">>, <<"127.0.0.1:8888">>}
], ],
ExpectedQs = [ ExpectedQs = [
{str, '=:=', <<"abc">>}, {str, '=:=', <<"abc">>},
{int, '=:=', 123}, {int, '=:=', 123},
{atom, '=:=', connected}, {binatom, '=:=', connected},
{atom, '=:=', ok},
{ts, '=:=', 156000}, {ts, '=:=', 156000},
{range, '>=', 1, '=<', 5} {range, '>=', 1, '=<', 5},
{ip, '=:=', {127, 0, 0, 1}},
{ip_port, '=:=', {{127, 0, 0, 1}, 8888}}
], ],
FuzzyNQString = [ FuzzyNQString = [
{fuzzy, like, <<"user">>}, {fuzzy, like, <<"user">>},
{topic, match, <<"t/#">>} {topic, match, <<"t/#">>}
], ],
?assertEqual({7, {ExpectedQs, FuzzyNQString}}, parse_qstring(QString, QSchema)),
{0, {[], []}} = parse_qstring([{not_a_predefined_params, val}], QSchema). [
?_assertEqual({10, {ExpectedQs, FuzzyNQString}}, parse_qstring(QString, QSchema)),
?_assertEqual({0, {[], []}}, parse_qstring([{not_a_predefined_params, val}], QSchema)),
?_assertEqual(
{1, {[{ip, '=:=', {0, 0, 0, 0, 0, 0, 0, 1}}], []}},
parse_qstring([{<<"ip">>, <<"::1">>}], QSchema)
),
?_assertEqual(
{1, {[{ip_port, '=:=', {{0, 0, 0, 0, 0, 0, 0, 1}, 8888}}], []}},
parse_qstring([{<<"ip_port">>, <<"::1:8888">>}], QSchema)
),
?_assertThrow(
{bad_value_type, {<<"ip">>, ip, <<"helloworld">>}},
parse_qstring([{<<"ip">>, <<"helloworld">>}], QSchema)
),
?_assertThrow(
{bad_value_type, {<<"ip_port">>, ip_port, <<"127.0.0.1">>}},
parse_qstring([{<<"ip_port">>, <<"127.0.0.1">>}], QSchema)
),
?_assertThrow(
{bad_value_type, {<<"ip_port">>, ip_port, <<"helloworld:abcd">>}},
parse_qstring([{<<"ip_port">>, <<"helloworld:abcd">>}], QSchema)
)
].
paginate_test_format(Row) ->
Row.
paginate_test_() ->
_ = ets:new(?MODULE, [named_table]),
Size = 1000,
MyLimit = 10,
ets:insert(?MODULE, [{I, foo} || I <- lists:seq(1, Size)]),
DefaultLimit = emqx_mgmt:default_row_limit(),
NoParamsResult = paginate(?MODULE, #{}, {?MODULE, paginate_test_format}),
PaginateResults = [
paginate(
?MODULE, #{<<"page">> => I, <<"limit">> => MyLimit}, {?MODULE, paginate_test_format}
)
|| I <- lists:seq(1, floor(Size / MyLimit))
],
[
?_assertMatch(
#{meta := #{count := Size, page := 1, limit := DefaultLimit}}, NoParamsResult
),
?_assertEqual(DefaultLimit, length(maps:get(data, NoParamsResult))),
?_assertEqual(
#{data => [], meta => #{count => Size, limit => DefaultLimit, page => 100}},
paginate(?MODULE, #{<<"page">> => <<"100">>}, {?MODULE, paginate_test_format})
)
] ++ assert_paginate_results(PaginateResults, Size, MyLimit).
assert_paginate_results(Results, Size, Limit) ->
AllData = lists:flatten([Data || #{data := Data} <- Results]),
[
begin
Result = lists:nth(I, Results),
[
?_assertMatch(#{meta := #{count := Size, limit := Limit, page := I}}, Result),
?_assertEqual(Limit, length(maps:get(data, Result)))
]
end
|| I <- lists:seq(1, floor(Size / Limit))
] ++
[
?_assertEqual(floor(Size / Limit), length(Results)),
?_assertEqual(Size, length(AllData)),
?_assertEqual(Size, sets:size(sets:from_list(AllData)))
].
-endif. -endif.

View File

@ -76,9 +76,10 @@
-define(FORMAT_FUN, {?MODULE, format_channel_info}). -define(FORMAT_FUN, {?MODULE, format_channel_info}).
-define(CLIENT_ID_NOT_FOUND, -define(CLIENTID_NOT_FOUND, #{
<<"{\"code\": \"RESOURCE_NOT_FOUND\", \"reason\": \"Client id not found\"}">> code => 'CLIENTID_NOT_FOUND',
). message => <<"Client ID not found">>
}).
api_spec() -> api_spec() ->
emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true, translate_body => true}). emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true, translate_body => true}).
@ -219,7 +220,7 @@ schema("/clients/:clientid") ->
responses => #{ responses => #{
200 => hoconsc:mk(hoconsc:ref(?MODULE, client), #{}), 200 => hoconsc:mk(hoconsc:ref(?MODULE, client), #{}),
404 => emqx_dashboard_swagger:error_codes( 404 => emqx_dashboard_swagger:error_codes(
['CLIENTID_NOT_FOUND'], <<"Client id not found">> ['CLIENTID_NOT_FOUND'], <<"Client ID not found">>
) )
} }
}, },
@ -232,7 +233,7 @@ schema("/clients/:clientid") ->
responses => #{ responses => #{
204 => <<"Kick out client successfully">>, 204 => <<"Kick out client successfully">>,
404 => emqx_dashboard_swagger:error_codes( 404 => emqx_dashboard_swagger:error_codes(
['CLIENTID_NOT_FOUND'], <<"Client id not found">> ['CLIENTID_NOT_FOUND'], <<"Client ID not found">>
) )
} }
} }
@ -247,7 +248,7 @@ schema("/clients/:clientid/authorization/cache") ->
responses => #{ responses => #{
200 => hoconsc:mk(hoconsc:ref(?MODULE, authz_cache), #{}), 200 => hoconsc:mk(hoconsc:ref(?MODULE, authz_cache), #{}),
404 => emqx_dashboard_swagger:error_codes( 404 => emqx_dashboard_swagger:error_codes(
['CLIENTID_NOT_FOUND'], <<"Client id not found">> ['CLIENTID_NOT_FOUND'], <<"Client ID not found">>
) )
} }
}, },
@ -256,9 +257,9 @@ schema("/clients/:clientid/authorization/cache") ->
tags => ?TAGS, tags => ?TAGS,
parameters => [{clientid, hoconsc:mk(binary(), #{in => path})}], parameters => [{clientid, hoconsc:mk(binary(), #{in => path})}],
responses => #{ responses => #{
204 => <<"Kick out client successfully">>, 204 => <<"Clean client authz cache successfully">>,
404 => emqx_dashboard_swagger:error_codes( 404 => emqx_dashboard_swagger:error_codes(
['CLIENTID_NOT_FOUND'], <<"Client id not found">> ['CLIENTID_NOT_FOUND'], <<"Client ID not found">>
) )
} }
} }
@ -275,7 +276,7 @@ schema("/clients/:clientid/subscriptions") ->
hoconsc:array(hoconsc:ref(emqx_mgmt_api_subscriptions, subscription)), #{} hoconsc:array(hoconsc:ref(emqx_mgmt_api_subscriptions, subscription)), #{}
), ),
404 => emqx_dashboard_swagger:error_codes( 404 => emqx_dashboard_swagger:error_codes(
['CLIENTID_NOT_FOUND'], <<"Client id not found">> ['CLIENTID_NOT_FOUND'], <<"Client ID not found">>
) )
} }
} }
@ -291,7 +292,7 @@ schema("/clients/:clientid/subscribe") ->
responses => #{ responses => #{
200 => hoconsc:ref(emqx_mgmt_api_subscriptions, subscription), 200 => hoconsc:ref(emqx_mgmt_api_subscriptions, subscription),
404 => emqx_dashboard_swagger:error_codes( 404 => emqx_dashboard_swagger:error_codes(
['CLIENTID_NOT_FOUND'], <<"Client id not found">> ['CLIENTID_NOT_FOUND'], <<"Client ID not found">>
) )
} }
} }
@ -307,7 +308,7 @@ schema("/clients/:clientid/subscribe/bulk") ->
responses => #{ responses => #{
200 => hoconsc:array(hoconsc:ref(emqx_mgmt_api_subscriptions, subscription)), 200 => hoconsc:array(hoconsc:ref(emqx_mgmt_api_subscriptions, subscription)),
404 => emqx_dashboard_swagger:error_codes( 404 => emqx_dashboard_swagger:error_codes(
['CLIENTID_NOT_FOUND'], <<"Client id not found">> ['CLIENTID_NOT_FOUND'], <<"Client ID not found">>
) )
} }
} }
@ -323,7 +324,7 @@ schema("/clients/:clientid/unsubscribe") ->
responses => #{ responses => #{
204 => <<"Unsubscribe OK">>, 204 => <<"Unsubscribe OK">>,
404 => emqx_dashboard_swagger:error_codes( 404 => emqx_dashboard_swagger:error_codes(
['CLIENTID_NOT_FOUND'], <<"Client id not found">> ['CLIENTID_NOT_FOUND'], <<"Client ID not found">>
) )
} }
} }
@ -339,7 +340,7 @@ schema("/clients/:clientid/unsubscribe/bulk") ->
responses => #{ responses => #{
204 => <<"Unsubscribe OK">>, 204 => <<"Unsubscribe OK">>,
404 => emqx_dashboard_swagger:error_codes( 404 => emqx_dashboard_swagger:error_codes(
['CLIENTID_NOT_FOUND'], <<"Client id not found">> ['CLIENTID_NOT_FOUND'], <<"Client ID not found">>
) )
} }
} }
@ -355,7 +356,7 @@ schema("/clients/:clientid/keepalive") ->
responses => #{ responses => #{
200 => hoconsc:mk(hoconsc:ref(?MODULE, client), #{}), 200 => hoconsc:mk(hoconsc:ref(?MODULE, client), #{}),
404 => emqx_dashboard_swagger:error_codes( 404 => emqx_dashboard_swagger:error_codes(
['CLIENTID_NOT_FOUND'], <<"Client id not found">> ['CLIENTID_NOT_FOUND'], <<"Client ID not found">>
) )
} }
} }
@ -597,6 +598,8 @@ unsubscribe_batch(post, #{bindings := #{clientid := ClientID}, body := TopicInfo
subscriptions(get, #{bindings := #{clientid := ClientID}}) -> subscriptions(get, #{bindings := #{clientid := ClientID}}) ->
case emqx_mgmt:list_client_subscriptions(ClientID) of case emqx_mgmt:list_client_subscriptions(ClientID) of
{error, not_found} ->
{404, ?CLIENTID_NOT_FOUND};
[] -> [] ->
{200, []}; {200, []};
{Node, Subs} -> {Node, Subs} ->
@ -621,7 +624,7 @@ set_keepalive(put, #{bindings := #{clientid := ClientID}, body := Body}) ->
{ok, Interval} -> {ok, Interval} ->
case emqx_mgmt:set_keepalive(emqx_mgmt_util:urldecode(ClientID), Interval) of case emqx_mgmt:set_keepalive(emqx_mgmt_util:urldecode(ClientID), Interval) of
ok -> lookup(#{clientid => ClientID}); ok -> lookup(#{clientid => ClientID});
{error, not_found} -> {404, ?CLIENT_ID_NOT_FOUND}; {error, not_found} -> {404, ?CLIENTID_NOT_FOUND};
{error, Reason} -> {400, #{code => 'PARAMS_ERROR', message => Reason}} {error, Reason} -> {400, #{code => 'PARAMS_ERROR', message => Reason}}
end end
end. end.
@ -669,15 +672,15 @@ list_clients(QString) ->
lookup(#{clientid := ClientID}) -> lookup(#{clientid := ClientID}) ->
case emqx_mgmt:lookup_client({clientid, ClientID}, ?FORMAT_FUN) of case emqx_mgmt:lookup_client({clientid, ClientID}, ?FORMAT_FUN) of
[] -> [] ->
{404, ?CLIENT_ID_NOT_FOUND}; {404, ?CLIENTID_NOT_FOUND};
ClientInfo -> ClientInfo ->
{200, hd(ClientInfo)} {200, hd(ClientInfo)}
end. end.
kickout(#{clientid := ClientID}) -> kickout(#{clientid := ClientID}) ->
case emqx_mgmt:kickout_client({ClientID, ?FORMAT_FUN}) of case emqx_mgmt:kickout_client(ClientID) of
{error, not_found} -> {error, not_found} ->
{404, ?CLIENT_ID_NOT_FOUND}; {404, ?CLIENTID_NOT_FOUND};
_ -> _ ->
{204} {204}
end. end.
@ -685,7 +688,7 @@ kickout(#{clientid := ClientID}) ->
get_authz_cache(#{clientid := ClientID}) -> get_authz_cache(#{clientid := ClientID}) ->
case emqx_mgmt:list_authz_cache(ClientID) of case emqx_mgmt:list_authz_cache(ClientID) of
{error, not_found} -> {error, not_found} ->
{404, ?CLIENT_ID_NOT_FOUND}; {404, ?CLIENTID_NOT_FOUND};
{error, Reason} -> {error, Reason} ->
Message = list_to_binary(io_lib:format("~p", [Reason])), Message = list_to_binary(io_lib:format("~p", [Reason])),
{500, #{code => <<"UNKNOW_ERROR">>, message => Message}}; {500, #{code => <<"UNKNOW_ERROR">>, message => Message}};
@ -699,7 +702,7 @@ clean_authz_cache(#{clientid := ClientID}) ->
ok -> ok ->
{204}; {204};
{error, not_found} -> {error, not_found} ->
{404, ?CLIENT_ID_NOT_FOUND}; {404, ?CLIENTID_NOT_FOUND};
{error, Reason} -> {error, Reason} ->
Message = list_to_binary(io_lib:format("~p", [Reason])), Message = list_to_binary(io_lib:format("~p", [Reason])),
{500, #{code => <<"UNKNOW_ERROR">>, message => Message}} {500, #{code => <<"UNKNOW_ERROR">>, message => Message}}
@ -709,7 +712,7 @@ subscribe(#{clientid := ClientID, topic := Topic} = Sub) ->
Opts = maps:with([qos, nl, rap, rh], Sub), Opts = maps:with([qos, nl, rap, rh], Sub),
case do_subscribe(ClientID, Topic, Opts) of case do_subscribe(ClientID, Topic, Opts) of
{error, channel_not_found} -> {error, channel_not_found} ->
{404, ?CLIENT_ID_NOT_FOUND}; {404, ?CLIENTID_NOT_FOUND};
{error, Reason} -> {error, Reason} ->
Message = list_to_binary(io_lib:format("~p", [Reason])), Message = list_to_binary(io_lib:format("~p", [Reason])),
{500, #{code => <<"UNKNOW_ERROR">>, message => Message}}; {500, #{code => <<"UNKNOW_ERROR">>, message => Message}};
@ -723,7 +726,7 @@ subscribe_batch(#{clientid := ClientID, topics := Topics}) ->
%% has returned. So if one want to subscribe topics in this hook, it will fail. %% has returned. So if one want to subscribe topics in this hook, it will fail.
case ets:lookup(emqx_channel, ClientID) of case ets:lookup(emqx_channel, ClientID) of
[] -> [] ->
{404, ?CLIENT_ID_NOT_FOUND}; {404, ?CLIENTID_NOT_FOUND};
_ -> _ ->
ArgList = [ ArgList = [
[ClientID, Topic, maps:with([qos, nl, rap, rh], Sub)] [ClientID, Topic, maps:with([qos, nl, rap, rh], Sub)]
@ -735,7 +738,7 @@ subscribe_batch(#{clientid := ClientID, topics := Topics}) ->
unsubscribe(#{clientid := ClientID, topic := Topic}) -> unsubscribe(#{clientid := ClientID, topic := Topic}) ->
case do_unsubscribe(ClientID, Topic) of case do_unsubscribe(ClientID, Topic) of
{error, channel_not_found} -> {error, channel_not_found} ->
{404, ?CLIENT_ID_NOT_FOUND}; {404, ?CLIENTID_NOT_FOUND};
{unsubscribe, [{Topic, #{}}]} -> {unsubscribe, [{Topic, #{}}]} ->
{204} {204}
end. end.
@ -745,8 +748,8 @@ unsubscribe_batch(#{clientid := ClientID, topics := Topics}) ->
{200, _} -> {200, _} ->
_ = emqx_mgmt:unsubscribe_batch(ClientID, Topics), _ = emqx_mgmt:unsubscribe_batch(ClientID, Topics),
{204}; {204};
{404, ?CLIENT_ID_NOT_FOUND} -> {404, NotFound} ->
{404, ?CLIENT_ID_NOT_FOUND} {404, NotFound}
end. end.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------

View File

@ -47,9 +47,11 @@
get_trace_size/0 get_trace_size/0
]). ]).
-define(MAX_SINT32, 2147483647).
-define(TO_BIN(_B_), iolist_to_binary(_B_)). -define(TO_BIN(_B_), iolist_to_binary(_B_)).
-define(NOT_FOUND(N), {404, #{code => 'NOT_FOUND', message => ?TO_BIN([N, " NOT FOUND"])}}). -define(NOT_FOUND(N), {404, #{code => 'NOT_FOUND', message => ?TO_BIN([N, " NOT FOUND"])}}).
-define(BAD_REQUEST(C, M), {400, #{code => C, message => ?TO_BIN(M)}}). -define(SERVICE_UNAVAILABLE(C, M), {503, #{code => C, message => ?TO_BIN(M)}}).
-define(TAGS, [<<"Trace">>]). -define(TAGS, [<<"Trace">>]).
namespace() -> "trace". namespace() -> "trace".
@ -148,8 +150,9 @@ schema("/trace/:name/download") ->
#{schema => #{type => "string", format => "binary"}} #{schema => #{type => "string", format => "binary"}}
} }
}, },
400 => emqx_dashboard_swagger:error_codes(['NODE_ERROR'], <<"Node Not Found">>), 404 => emqx_dashboard_swagger:error_codes(
404 => emqx_dashboard_swagger:error_codes(['NOT_FOUND'], <<"Trace Name Not Found">>) ['NOT_FOUND', 'NODE_ERROR'], <<"Trace Name or Node Not Found">>
)
} }
} }
}; };
@ -184,8 +187,15 @@ schema("/trace/:name/log") ->
{items, hoconsc:mk(binary(), #{example => "TEXT-LOG-ITEMS"})}, {items, hoconsc:mk(binary(), #{example => "TEXT-LOG-ITEMS"})},
{meta, fields(bytes) ++ fields(position)} {meta, fields(bytes) ++ fields(position)}
], ],
400 => emqx_dashboard_swagger:error_codes(['NODE_ERROR'], <<"Trace Log Failed">>), 400 => emqx_dashboard_swagger:error_codes(
404 => emqx_dashboard_swagger:error_codes(['NOT_FOUND'], <<"Trace Name Not Found">>) ['BAD_REQUEST'], <<"Bad input parameter">>
),
404 => emqx_dashboard_swagger:error_codes(
['NOT_FOUND', 'NODE_ERROR'], <<"Trace Name or Node Not Found">>
),
503 => emqx_dashboard_swagger:error_codes(
['SERVICE_UNAVAILABLE'], <<"Requested chunk size too big">>
)
} }
} }
}. }.
@ -313,12 +323,16 @@ fields(bytes) ->
[ [
{bytes, {bytes,
hoconsc:mk( hoconsc:mk(
integer(), %% This seems to be the minimum max value we may encounter
%% across different OS
range(0, ?MAX_SINT32),
#{ #{
desc => "Maximum number of bytes to store in request", desc => "Maximum number of bytes to send in response",
in => query, in => query,
required => false, required => false,
default => 1000 default => 1000,
minimum => 0,
maximum => ?MAX_SINT32
} }
)} )}
]; ];
@ -495,7 +509,7 @@ download_trace_log(get, #{bindings := #{name := Name}, query_string := Query}) -
}, },
{200, Headers, {file_binary, ZipName, Binary}}; {200, Headers, {file_binary, ZipName, Binary}};
{error, not_found} -> {error, not_found} ->
?BAD_REQUEST('NODE_ERROR', <<"Node not found">>) ?NOT_FOUND(<<"Node">>)
end; end;
{error, not_found} -> {error, not_found} ->
?NOT_FOUND(Name) ?NOT_FOUND(Name)
@ -579,11 +593,19 @@ stream_log_file(get, #{bindings := #{name := Name}, query_string := Query}) ->
{200, #{meta => Meta, items => <<"">>}}; {200, #{meta => Meta, items => <<"">>}};
{error, not_found} -> {error, not_found} ->
?NOT_FOUND(Name); ?NOT_FOUND(Name);
{error, enomem} ->
?SLOG(warning, #{
code => not_enough_mem,
msg => "Requested chunk size too big",
bytes => Bytes,
name => Name
}),
?SERVICE_UNAVAILABLE('SERVICE_UNAVAILABLE', <<"Requested chunk size too big">>);
{badrpc, nodedown} -> {badrpc, nodedown} ->
?BAD_REQUEST('NODE_ERROR', <<"Node not found">>) ?NOT_FOUND(<<"Node">>)
end; end;
{error, not_found} -> {error, not_found} ->
?BAD_REQUEST('NODE_ERROR', <<"Node not found">>) ?NOT_FOUND(<<"Node">>)
end. end.
-spec get_trace_size() -> #{{node(), file:name_all()} => non_neg_integer()}. -spec get_trace_size() -> #{{node(), file:name_all()} => non_neg_integer()}.

View File

@ -31,9 +31,7 @@ start(_Type, _Args) ->
ok = mria_rlog:wait_for_shards([?MANAGEMENT_SHARD], infinity), ok = mria_rlog:wait_for_shards([?MANAGEMENT_SHARD], infinity),
case emqx_mgmt_auth:init_bootstrap_file() of case emqx_mgmt_auth:init_bootstrap_file() of
ok -> ok ->
{ok, Sup} = emqx_mgmt_sup:start_link(), emqx_mgmt_sup:start_link();
ok = emqx_mgmt_cli:load(),
{ok, Sup};
{error, Reason} -> {error, Reason} ->
{error, Reason} {error, Reason}
end. end.

View File

@ -302,7 +302,7 @@ page_params() ->
name => limit, name => limit,
in => query, in => query,
description => <<"Page size">>, description => <<"Page size">>,
schema => #{type => integer, default => emqx_mgmt:max_row_limit()} schema => #{type => integer, default => emqx_mgmt:default_row_limit()}
} }
]. ].

View File

@ -0,0 +1,387 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_mgmt_SUITE).
-compile(export_all).
-compile(nowarn_export_all).
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-export([ident/1]).
-define(FORMATFUN, {?MODULE, ident}).
all() ->
emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) ->
emqx_mgmt_api_test_util:init_suite([emqx_conf, emqx_management]),
Config.
end_per_suite(_) ->
emqx_mgmt_api_test_util:end_suite([emqx_management, emqx_conf]).
init_per_testcase(TestCase, Config) ->
meck:expect(mria_mnesia, running_nodes, 0, [node()]),
emqx_common_test_helpers:init_per_testcase(?MODULE, TestCase, Config).
end_per_testcase(TestCase, Config) ->
meck:unload(mria_mnesia),
emqx_common_test_helpers:end_per_testcase(?MODULE, TestCase, Config).
t_list_nodes(init, Config) ->
meck:expect(
mria_mnesia,
cluster_nodes,
fun
(running) -> [node()];
(stopped) -> ['stopped@node']
end
),
Config;
t_list_nodes('end', _Config) ->
ok.
t_list_nodes(_) ->
NodeInfos = emqx_mgmt:list_nodes(),
Node = node(),
?assertMatch(
[
{Node, #{node := Node, node_status := 'running'}},
{'stopped@node', #{node := 'stopped@node', node_status := 'stopped'}}
],
NodeInfos
).
t_lookup_node(init, Config) ->
meck:new(os, [passthrough, unstick, no_link]),
OsType = os:type(),
meck:expect(os, type, 0, {win32, winME}),
[{os_type, OsType} | Config];
t_lookup_node('end', Config) ->
%% We need to restore the original behavior so that rebar3 doesn't crash. If
%% we'd `meck:unload(os)` or not set `no_link` then `ct` crashes calling
%% `os` with "The code server called the unloaded module `os'".
OsType = ?config(os_type, Config),
meck:expect(os, type, 0, OsType),
ok.
t_lookup_node(_) ->
Node = node(),
?assertMatch(
#{node := Node, node_status := 'running', memory_total := 0},
emqx_mgmt:lookup_node(node())
),
?assertMatch(
{error, _},
emqx_mgmt:lookup_node('fake@nohost')
),
ok.
t_list_brokers(_) ->
Node = node(),
?assertMatch(
[{Node, #{node := Node, node_status := running, uptime := _}}],
emqx_mgmt:list_brokers()
).
t_lookup_broker(_) ->
Node = node(),
?assertMatch(
#{node := Node, node_status := running, uptime := _},
emqx_mgmt:lookup_broker(Node)
).
t_get_metrics(_) ->
Metrics = emqx_mgmt:get_metrics(),
?assert(maps:size(Metrics) > 0),
?assertMatch(
Metrics, maps:from_list(emqx_mgmt:get_metrics(node()))
).
t_lookup_client(init, Config) ->
setup_clients(Config);
t_lookup_client('end', Config) ->
disconnect_clients(Config).
t_lookup_client(_Config) ->
[{Chan, Info, Stats}] = emqx_mgmt:lookup_client({clientid, <<"client1">>}, ?FORMATFUN),
?assertEqual(
[{Chan, Info, Stats}],
emqx_mgmt:lookup_client({username, <<"user1">>}, ?FORMATFUN)
),
?assertEqual([], emqx_mgmt:lookup_client({clientid, <<"notfound">>}, ?FORMATFUN)),
meck:expect(mria_mnesia, running_nodes, 0, [node(), 'fake@nonode']),
?assertMatch(
[_ | {error, nodedown}], emqx_mgmt:lookup_client({clientid, <<"client1">>}, ?FORMATFUN)
).
t_kickout_client(init, Config) ->
process_flag(trap_exit, true),
setup_clients(Config);
t_kickout_client('end', _Config) ->
ok.
t_kickout_client(Config) ->
[C | _] = ?config(clients, Config),
ok = emqx_mgmt:kickout_client(<<"client1">>),
receive
{'EXIT', C, Reason} ->
?assertEqual({shutdown, tcp_closed}, Reason);
Foo ->
error({unexpected, Foo})
after 1000 ->
error(timeout)
end,
?assertEqual({error, not_found}, emqx_mgmt:kickout_client(<<"notfound">>)).
t_list_authz_cache(init, Config) ->
setup_clients(Config);
t_list_authz_cache('end', Config) ->
disconnect_clients(Config).
t_list_authz_cache(_) ->
?assertNotMatch({error, _}, emqx_mgmt:list_authz_cache(<<"client1">>)),
?assertMatch({error, not_found}, emqx_mgmt:list_authz_cache(<<"notfound">>)).
t_list_client_subscriptions(init, Config) ->
setup_clients(Config);
t_list_client_subscriptions('end', Config) ->
disconnect_clients(Config).
t_list_client_subscriptions(Config) ->
[Client | _] = ?config(clients, Config),
?assertEqual([], emqx_mgmt:list_client_subscriptions(<<"client1">>)),
emqtt:subscribe(Client, <<"t/#">>),
?assertMatch({_, [{<<"t/#">>, _Opts}]}, emqx_mgmt:list_client_subscriptions(<<"client1">>)),
?assertEqual({error, not_found}, emqx_mgmt:list_client_subscriptions(<<"notfound">>)).
t_clean_cache(init, Config) ->
setup_clients(Config);
t_clean_cache('end', Config) ->
disconnect_clients(Config).
t_clean_cache(_Config) ->
?assertNotMatch(
{error, _},
emqx_mgmt:clean_authz_cache(<<"client1">>)
),
?assertNotMatch(
{error, _},
emqx_mgmt:clean_authz_cache_all()
),
?assertNotMatch(
{error, _},
emqx_mgmt:clean_pem_cache_all()
),
meck:expect(mria_mnesia, running_nodes, 0, [node(), 'fake@nonode']),
?assertMatch(
{error, [{'fake@nonode', {error, _}}]},
emqx_mgmt:clean_authz_cache_all()
),
?assertMatch(
{error, [{'fake@nonode', {error, _}}]},
emqx_mgmt:clean_pem_cache_all()
).
t_set_client_props(init, Config) ->
setup_clients(Config);
t_set_client_props('end', Config) ->
disconnect_clients(Config).
t_set_client_props(_Config) ->
?assertEqual(
% [FIXME] not implemented at this point?
ignored,
emqx_mgmt:set_ratelimit_policy(<<"client1">>, foo)
),
?assertEqual(
{error, not_found},
emqx_mgmt:set_ratelimit_policy(<<"notfound">>, foo)
),
?assertEqual(
% [FIXME] not implemented at this point?
ignored,
emqx_mgmt:set_quota_policy(<<"client1">>, foo)
),
?assertEqual(
{error, not_found},
emqx_mgmt:set_quota_policy(<<"notfound">>, foo)
),
?assertEqual(
ok,
emqx_mgmt:set_keepalive(<<"client1">>, 3600)
),
?assertMatch(
{error, _},
emqx_mgmt:set_keepalive(<<"client1">>, true)
),
?assertEqual(
{error, not_found},
emqx_mgmt:set_keepalive(<<"notfound">>, 3600)
),
ok.
t_list_subscriptions_via_topic(init, Config) ->
setup_clients(Config);
t_list_subscriptions_via_topic('end', Config) ->
disconnect_clients(Config).
t_list_subscriptions_via_topic(Config) ->
[Client | _] = ?config(clients, Config),
?assertEqual([], emqx_mgmt:list_subscriptions_via_topic(<<"t/#">>, ?FORMATFUN)),
emqtt:subscribe(Client, <<"t/#">>),
?assertMatch(
[{{<<"t/#">>, _SubPid}, _Opts}],
emqx_mgmt:list_subscriptions_via_topic(<<"t/#">>, ?FORMATFUN)
).
t_pubsub_api(init, Config) ->
setup_clients(Config);
t_pubsub_api('end', Config) ->
disconnect_clients(Config).
-define(TT(Topic), {Topic, #{qos => 0}}).
t_pubsub_api(Config) ->
[Client | _] = ?config(clients, Config),
?assertEqual([], emqx_mgmt:list_subscriptions_via_topic(<<"t/#">>, ?FORMATFUN)),
?assertMatch(
{subscribe, _, _},
emqx_mgmt:subscribe(<<"client1">>, [?TT(<<"t/#">>), ?TT(<<"t1/#">>), ?TT(<<"t2/#">>)])
),
timer:sleep(100),
?assertMatch(
[{{<<"t/#">>, _SubPid}, _Opts}],
emqx_mgmt:list_subscriptions_via_topic(<<"t/#">>, ?FORMATFUN)
),
Message = emqx_message:make(?MODULE, 0, <<"t/foo">>, <<"helloworld">>, #{}, #{}),
emqx_mgmt:publish(Message),
Recv =
receive
{publish, #{client_pid := Client, payload := <<"helloworld">>}} ->
ok
after 100 ->
timeout
end,
?assertEqual(ok, Recv),
?assertEqual({error, channel_not_found}, emqx_mgmt:subscribe(<<"notfound">>, [?TT(<<"t/#">>)])),
?assertNotMatch({error, _}, emqx_mgmt:unsubscribe(<<"client1">>, <<"t/#">>)),
?assertEqual({error, channel_not_found}, emqx_mgmt:unsubscribe(<<"notfound">>, <<"t/#">>)),
Node = node(),
?assertMatch(
{Node, [{<<"t1/#">>, _}, {<<"t2/#">>, _}]},
emqx_mgmt:list_client_subscriptions(<<"client1">>)
),
?assertMatch(
{unsubscribe, [{<<"t1/#">>, _}, {<<"t2/#">>, _}]},
emqx_mgmt:unsubscribe_batch(<<"client1">>, [<<"t1/#">>, <<"t2/#">>])
),
timer:sleep(100),
?assertMatch([], emqx_mgmt:list_client_subscriptions(<<"client1">>)),
?assertEqual(
{error, channel_not_found},
emqx_mgmt:unsubscribe_batch(<<"notfound">>, [<<"t1/#">>, <<"t2/#">>])
).
t_alarms(init, Config) ->
[
emqx_mgmt:deactivate(Node, Name)
|| {Node, ActiveAlarms} <- emqx_mgmt:get_alarms(activated), #{name := Name} <- ActiveAlarms
],
emqx_mgmt:delete_all_deactivated_alarms(),
Config;
t_alarms('end', Config) ->
Config.
t_alarms(_) ->
Node = node(),
?assertEqual(
[{node(), []}],
emqx_mgmt:get_alarms(all)
),
emqx_alarm:activate(foo),
?assertMatch(
[{Node, [#{name := foo, activated := true, duration := _}]}],
emqx_mgmt:get_alarms(all)
),
emqx_alarm:activate(bar),
?assertMatch(
[{Node, [#{name := foo, activated := true}, #{name := bar, activated := true}]}],
sort_alarms(emqx_mgmt:get_alarms(all))
),
?assertEqual(
ok,
emqx_mgmt:deactivate(node(), bar)
),
?assertMatch(
[{Node, [#{name := foo, activated := true}, #{name := bar, activated := false}]}],
sort_alarms(emqx_mgmt:get_alarms(all))
),
?assertMatch(
[{Node, [#{name := foo, activated := true}]}],
emqx_mgmt:get_alarms(activated)
),
?assertMatch(
[{Node, [#{name := bar, activated := false}]}],
emqx_mgmt:get_alarms(deactivated)
),
?assertEqual(
[ok],
emqx_mgmt:delete_all_deactivated_alarms()
),
?assertMatch(
[{Node, [#{name := foo, activated := true}]}],
emqx_mgmt:get_alarms(all)
),
?assertEqual(
{error, not_found},
emqx_mgmt:deactivate(node(), bar)
).
t_banned(_) ->
Banned = #{
who => {clientid, <<"TestClient">>},
by => <<"banned suite">>,
reason => <<"test">>,
at => erlang:system_time(second),
until => erlang:system_time(second) + 1
},
?assertMatch(
{ok, _},
emqx_mgmt:create_banned(Banned)
),
?assertEqual(
ok,
emqx_mgmt:delete_banned({clientid, <<"TestClient">>})
).
%%% helpers
ident(Arg) ->
Arg.
sort_alarms([{Node, Alarms}]) ->
[{Node, lists:sort(fun(#{activate_at := A}, #{activate_at := B}) -> A < B end, Alarms)}].
setup_clients(Config) ->
{ok, C} = emqtt:start_link([{clientid, <<"client1">>}, {username, <<"user1">>}]),
{ok, _} = emqtt:connect(C),
[{clients, [C]} | Config].
disconnect_clients(Config) ->
Clients = ?config(clients, Config),
lists:foreach(fun emqtt:disconnect/1, Clients).

View File

@ -67,7 +67,7 @@ t_cluster_query(_Config) ->
%% assert: AllPage = Page1 + Page2 + Page3 + Page4 %% assert: AllPage = Page1 + Page2 + Page3 + Page4
%% !!!Note: this equation requires that the queried tables must be ordered_set %% !!!Note: this equation requires that the queried tables must be ordered_set
{200, ClientsPage2} = query_clients(Node1, #{<<"page">> => 2, <<"limit">> => 5}), {200, ClientsPage2} = query_clients(Node1, #{<<"page">> => <<"2">>, <<"limit">> => 5}),
{200, ClientsPage3} = query_clients(Node2, #{<<"page">> => 3, <<"limit">> => 5}), {200, ClientsPage3} = query_clients(Node2, #{<<"page">> => 3, <<"limit">> => 5}),
{200, ClientsPage4} = query_clients(Node1, #{<<"page">> => 4, <<"limit">> => 5}), {200, ClientsPage4} = query_clients(Node1, #{<<"page">> => 4, <<"limit">> => 5}),
GetClientIds = fun(L) -> lists:map(fun(#{clientid := Id}) -> Id end, L) end, GetClientIds = fun(L) -> lists:map(fun(#{clientid := Id}) -> Id end, L) end,
@ -79,6 +79,78 @@ t_cluster_query(_Config) ->
) )
), ),
%% Scroll past count
{200, ClientsPage10} = query_clients(Node1, #{<<"page">> => <<"10">>, <<"limit">> => 5}),
?assertEqual(
#{data => [], meta => #{page => 10, limit => 5, count => 20, hasnext => false}},
ClientsPage10
),
%% Node queries
{200, ClientsNode2} = query_clients(Node1, #{<<"node">> => Node2}),
?assertEqual({200, ClientsNode2}, query_clients(Node2, #{<<"node">> => Node2})),
?assertMatch(
#{page := 1, limit := 100, count := 10},
maps:get(meta, ClientsNode2)
),
?assertMatch(10, length(maps:get(data, ClientsNode2))),
{200, ClientsNode2Page1} = query_clients(Node2, #{<<"node">> => Node2, <<"limit">> => 5}),
{200, ClientsNode2Page2} = query_clients(Node1, #{
<<"node">> => Node2, <<"page">> => <<"2">>, <<"limit">> => 5
}),
{200, ClientsNode2Page3} = query_clients(Node2, #{
<<"node">> => Node2, <<"page">> => 3, <<"limit">> => 5
}),
{200, ClientsNode2Page4} = query_clients(Node1, #{
<<"node">> => Node2, <<"page">> => 4, <<"limit">> => 5
}),
?assertEqual(
GetClientIds(maps:get(data, ClientsNode2)),
GetClientIds(
lists:append([
maps:get(data, Page)
|| Page <- [
ClientsNode2Page1,
ClientsNode2Page2,
ClientsNode2Page3,
ClientsNode2Page4
]
])
)
),
%% Scroll past count
{200, ClientsNode2Page10} = query_clients(Node1, #{
<<"node">> => Node2, <<"page">> => <<"10">>, <<"limit">> => 5
}),
?assertEqual(
#{data => [], meta => #{page => 10, limit => 5, count => 10, hasnext => false}},
ClientsNode2Page10
),
%% Query with bad params
?assertEqual(
{400, #{
code => <<"INVALID_PARAMETER">>,
message => <<"page_limit_invalid">>
}},
query_clients(Node1, #{<<"page">> => -1})
),
?assertEqual(
{400, #{
code => <<"INVALID_PARAMETER">>,
message => <<"page_limit_invalid">>
}},
query_clients(Node1, #{<<"node">> => Node1, <<"page">> => -1})
),
%% Query bad node
?assertMatch(
{500, #{code := <<"NODE_DOWN">>}},
query_clients(Node1, #{<<"node">> => 'nonode@nohost'})
),
%% exact match can return non-zero total %% exact match can return non-zero total
{200, ClientsNode1} = query_clients(Node2, #{<<"username">> => <<"corenode1@127.0.0.1">>}), {200, ClientsNode1} = query_clients(Node2, #{<<"username">> => <<"corenode1@127.0.0.1">>}),
?assertMatch( ?assertMatch(
@ -87,11 +159,11 @@ t_cluster_query(_Config) ->
), ),
%% fuzzy searching can't return total %% fuzzy searching can't return total
{200, ClientsNode2} = query_clients(Node2, #{<<"like_username">> => <<"corenode2">>}), {200, ClientsFuzzyNode2} = query_clients(Node2, #{<<"like_username">> => <<"corenode2">>}),
MetaNode2 = maps:get(meta, ClientsNode2), MetaNode2 = maps:get(meta, ClientsFuzzyNode2),
?assertNotMatch(#{count := _}, MetaNode2), ?assertNotMatch(#{count := _}, MetaNode2),
?assertMatch(#{hasnext := false}, MetaNode2), ?assertMatch(#{hasnext := false}, MetaNode2),
?assertMatch(10, length(maps:get(data, ClientsNode2))), ?assertMatch(10, length(maps:get(data, ClientsFuzzyNode2))),
_ = lists:foreach(fun(C) -> emqtt:disconnect(C) end, ClientLs1), _ = lists:foreach(fun(C) -> emqtt:disconnect(C) end, ClientLs1),
_ = lists:foreach(fun(C) -> emqtt:disconnect(C) end, ClientLs2) _ = lists:foreach(fun(C) -> emqtt:disconnect(C) end, ClientLs2)
@ -101,6 +173,23 @@ t_cluster_query(_Config) ->
end, end,
ok. ok.
t_bad_rpc(_) ->
emqx_mgmt_api_test_util:init_suite(),
process_flag(trap_exit, true),
ClientLs1 = [start_emqtt_client(node(), I, 1883) || I <- lists:seq(1, 10)],
Path = emqx_mgmt_api_test_util:api_path(["clients?limit=2&page=2"]),
try
meck:expect(mria_mnesia, running_nodes, 0, ['fake@nohost']),
{error, {_, 500, _}} = emqx_mgmt_api_test_util:request_api(get, Path),
%% good cop, bad cop
meck:expect(mria_mnesia, running_nodes, 0, [node(), 'fake@nohost']),
{error, {_, 500, _}} = emqx_mgmt_api_test_util:request_api(get, Path)
after
_ = lists:foreach(fun(C) -> emqtt:disconnect(C) end, ClientLs1),
meck:unload(mria_mnesia),
emqx_mgmt_api_test_util:end_suite()
end.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% helpers %% helpers
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------

View File

@ -62,5 +62,5 @@ get_alarms(AssertCount, Activated) ->
Limit = maps:get(<<"limit">>, Meta), Limit = maps:get(<<"limit">>, Meta),
Count = maps:get(<<"count">>, Meta), Count = maps:get(<<"count">>, Meta),
?assertEqual(Page, 1), ?assertEqual(Page, 1),
?assertEqual(Limit, emqx_mgmt:max_row_limit()), ?assertEqual(Limit, emqx_mgmt:default_row_limit()),
?assert(Count >= AssertCount). ?assert(Count >= AssertCount).

View File

@ -64,7 +64,7 @@ t_clients(_) ->
ClientsLimit = maps:get(<<"limit">>, ClientsMeta), ClientsLimit = maps:get(<<"limit">>, ClientsMeta),
ClientsCount = maps:get(<<"count">>, ClientsMeta), ClientsCount = maps:get(<<"count">>, ClientsMeta),
?assertEqual(ClientsPage, 1), ?assertEqual(ClientsPage, 1),
?assertEqual(ClientsLimit, emqx_mgmt:max_row_limit()), ?assertEqual(ClientsLimit, emqx_mgmt:default_row_limit()),
?assertEqual(ClientsCount, 2), ?assertEqual(ClientsCount, 2),
%% get /clients/:clientid %% get /clients/:clientid
@ -78,7 +78,14 @@ t_clients(_) ->
%% delete /clients/:clientid kickout %% delete /clients/:clientid kickout
Client2Path = emqx_mgmt_api_test_util:api_path(["clients", binary_to_list(ClientId2)]), Client2Path = emqx_mgmt_api_test_util:api_path(["clients", binary_to_list(ClientId2)]),
{ok, _} = emqx_mgmt_api_test_util:request_api(delete, Client2Path), {ok, _} = emqx_mgmt_api_test_util:request_api(delete, Client2Path),
timer:sleep(300), Kick =
receive
{'EXIT', C2, _} ->
ok
after 300 ->
timeout
end,
?assertEqual(ok, Kick),
AfterKickoutResponse2 = emqx_mgmt_api_test_util:request_api(get, Client2Path), AfterKickoutResponse2 = emqx_mgmt_api_test_util:request_api(get, Client2Path),
?assertEqual({error, {"HTTP/1.1", 404, "Not Found"}}, AfterKickoutResponse2), ?assertEqual({error, {"HTTP/1.1", 404, "Not Found"}}, AfterKickoutResponse2),
@ -107,7 +114,7 @@ t_clients(_) ->
SubscribeBody SubscribeBody
), ),
timer:sleep(100), timer:sleep(100),
[{AfterSubTopic, #{qos := AfterSubQos}}] = emqx_mgmt:lookup_subscriptions(ClientId1), {_, [{AfterSubTopic, #{qos := AfterSubQos}}]} = emqx_mgmt:list_client_subscriptions(ClientId1),
?assertEqual(AfterSubTopic, Topic), ?assertEqual(AfterSubTopic, Topic),
?assertEqual(AfterSubQos, Qos), ?assertEqual(AfterSubQos, Qos),
@ -152,7 +159,7 @@ t_clients(_) ->
UnSubscribeBody UnSubscribeBody
), ),
timer:sleep(100), timer:sleep(100),
?assertEqual([], emqx_mgmt:lookup_subscriptions(Client1)), ?assertEqual([], emqx_mgmt:list_client_subscriptions(ClientId1)),
%% testcase cleanup, kickout client1 %% testcase cleanup, kickout client1
{ok, _} = emqx_mgmt_api_test_util:request_api(delete, Client1Path), {ok, _} = emqx_mgmt_api_test_util:request_api(delete, Client1Path),
@ -247,6 +254,49 @@ t_keepalive(_Config) ->
emqtt:disconnect(C1), emqtt:disconnect(C1),
ok. ok.
t_client_id_not_found(_Config) ->
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
Http = {"HTTP/1.1", 404, "Not Found"},
Body = "{\"code\":\"CLIENTID_NOT_FOUND\",\"message\":\"Client ID not found\"}",
PathFun = fun(Suffix) ->
emqx_mgmt_api_test_util:api_path(["clients", "no_existed_clientid"] ++ Suffix)
end,
ReqFun = fun(Method, Path) ->
emqx_mgmt_api_test_util:request_api(
Method, Path, "", AuthHeader, [], #{return_all => true}
)
end,
PostFun = fun(Method, Path, Data) ->
emqx_mgmt_api_test_util:request_api(
Method, Path, "", AuthHeader, Data, #{return_all => true}
)
end,
%% Client lookup
?assertMatch({error, {Http, _, Body}}, ReqFun(get, PathFun([]))),
%% Client kickout
?assertMatch({error, {Http, _, Body}}, ReqFun(delete, PathFun([]))),
%% Client Subscription list
?assertMatch({error, {Http, _, Body}}, ReqFun(get, PathFun(["subscriptions"]))),
%% AuthZ Cache lookup
?assertMatch({error, {Http, _, Body}}, ReqFun(get, PathFun(["authorization", "cache"]))),
%% AuthZ Cache clean
?assertMatch({error, {Http, _, Body}}, ReqFun(delete, PathFun(["authorization", "cache"]))),
%% Client Subscribe
SubBody = #{topic => <<"testtopic">>, qos => 1, nl => 1, rh => 1},
?assertMatch({error, {Http, _, Body}}, PostFun(post, PathFun(["subscribe"]), SubBody)),
?assertMatch(
{error, {Http, _, Body}}, PostFun(post, PathFun(["subscribe", "bulk"]), [SubBody])
),
%% Client Unsubscribe
UnsubBody = #{topic => <<"testtopic">>},
?assertMatch({error, {Http, _, Body}}, PostFun(post, PathFun(["unsubscribe"]), UnsubBody)),
?assertMatch(
{error, {Http, _, Body}}, PostFun(post, PathFun(["unsubscribe", "bulk"]), [UnsubBody])
).
time_string_to_epoch_millisecond(DateTime) -> time_string_to_epoch_millisecond(DateTime) ->
time_string_to_epoch(DateTime, millisecond). time_string_to_epoch(DateTime, millisecond).

View File

@ -57,7 +57,7 @@ t_subscription_api(Config) ->
Data = emqx_json:decode(Response, [return_maps]), Data = emqx_json:decode(Response, [return_maps]),
Meta = maps:get(<<"meta">>, Data), Meta = maps:get(<<"meta">>, Data),
?assertEqual(1, maps:get(<<"page">>, Meta)), ?assertEqual(1, maps:get(<<"page">>, Meta)),
?assertEqual(emqx_mgmt:max_row_limit(), maps:get(<<"limit">>, Meta)), ?assertEqual(emqx_mgmt:default_row_limit(), maps:get(<<"limit">>, Meta)),
?assertEqual(2, maps:get(<<"count">>, Meta)), ?assertEqual(2, maps:get(<<"count">>, Meta)),
Subscriptions = maps:get(<<"data">>, Data), Subscriptions = maps:get(<<"data">>, Data),
?assertEqual(length(Subscriptions), 2), ?assertEqual(length(Subscriptions), 2),
@ -95,7 +95,7 @@ t_subscription_api(Config) ->
DataTopic2 = #{<<"meta">> := Meta2} = request_json(get, QS, Headers), DataTopic2 = #{<<"meta">> := Meta2} = request_json(get, QS, Headers),
?assertEqual(1, maps:get(<<"page">>, Meta2)), ?assertEqual(1, maps:get(<<"page">>, Meta2)),
?assertEqual(emqx_mgmt:max_row_limit(), maps:get(<<"limit">>, Meta2)), ?assertEqual(emqx_mgmt:default_row_limit(), maps:get(<<"limit">>, Meta2)),
?assertEqual(1, maps:get(<<"count">>, Meta2)), ?assertEqual(1, maps:get(<<"count">>, Meta2)),
SubscriptionsList2 = maps:get(<<"data">>, DataTopic2), SubscriptionsList2 = maps:get(<<"data">>, DataTopic2),
?assertEqual(length(SubscriptionsList2), 1). ?assertEqual(length(SubscriptionsList2), 1).
@ -120,7 +120,7 @@ t_subscription_fuzzy_search(Config) ->
MatchData1 = #{<<"meta">> := MatchMeta1} = request_json(get, MatchQs, Headers), MatchData1 = #{<<"meta">> := MatchMeta1} = request_json(get, MatchQs, Headers),
?assertEqual(1, maps:get(<<"page">>, MatchMeta1)), ?assertEqual(1, maps:get(<<"page">>, MatchMeta1)),
?assertEqual(emqx_mgmt:max_row_limit(), maps:get(<<"limit">>, MatchMeta1)), ?assertEqual(emqx_mgmt:default_row_limit(), maps:get(<<"limit">>, MatchMeta1)),
%% count is undefined in fuzzy searching %% count is undefined in fuzzy searching
?assertNot(maps:is_key(<<"count">>, MatchMeta1)), ?assertNot(maps:is_key(<<"count">>, MatchMeta1)),
?assertMatch(3, length(maps:get(<<"data">>, MatchData1))), ?assertMatch(3, length(maps:get(<<"data">>, MatchData1))),

View File

@ -52,7 +52,7 @@ t_nodes_api(Config) ->
RoutesData = emqx_json:decode(Response, [return_maps]), RoutesData = emqx_json:decode(Response, [return_maps]),
Meta = maps:get(<<"meta">>, RoutesData), Meta = maps:get(<<"meta">>, RoutesData),
?assertEqual(1, maps:get(<<"page">>, Meta)), ?assertEqual(1, maps:get(<<"page">>, Meta)),
?assertEqual(emqx_mgmt:max_row_limit(), maps:get(<<"limit">>, Meta)), ?assertEqual(emqx_mgmt:default_row_limit(), maps:get(<<"limit">>, Meta)),
?assertEqual(1, maps:get(<<"count">>, Meta)), ?assertEqual(1, maps:get(<<"count">>, Meta)),
Data = maps:get(<<"data">>, RoutesData), Data = maps:get(<<"data">>, RoutesData),
Route = erlang:hd(Data), Route = erlang:hd(Data),

View File

@ -19,9 +19,7 @@
-compile(export_all). -compile(export_all).
-compile(nowarn_export_all). -compile(nowarn_export_all).
-include_lib("common_test/include/ct.hrl").
-include_lib("eunit/include/eunit.hrl"). -include_lib("eunit/include/eunit.hrl").
-include_lib("emqx/include/emqx.hrl").
-include_lib("kernel/include/file.hrl"). -include_lib("kernel/include/file.hrl").
-include_lib("stdlib/include/zip.hrl"). -include_lib("stdlib/include/zip.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl").
@ -225,12 +223,12 @@ t_log_file(_Config) ->
]}, ]},
zip:table(Binary2) zip:table(Binary2)
), ),
{error, {_, 400, _}} = {error, {_, 404, _}} =
request_api( request_api(
get, get,
api_path("trace/test_client_id/download?node=unknonwn_node") api_path("trace/test_client_id/download?node=unknown_node")
), ),
{error, {_, 400, _}} = {error, {_, 404, _}} =
request_api( request_api(
get, get,
% known atom but unknown node % known atom but unknown node
@ -296,12 +294,21 @@ t_stream_log(_Config) ->
#{<<"meta">> := Meta1, <<"items">> := Bin1} = json(Binary1), #{<<"meta">> := Meta1, <<"items">> := Bin1} = json(Binary1),
?assertEqual(#{<<"position">> => 30, <<"bytes">> => 10}, Meta1), ?assertEqual(#{<<"position">> => 30, <<"bytes">> => 10}, Meta1),
?assertEqual(10, byte_size(Bin1)), ?assertEqual(10, byte_size(Bin1)),
{error, {_, 400, _}} = ct:pal("~p vs ~p", [Bin, Bin1]),
%% in theory they could be the same but we know they shouldn't
?assertNotEqual(Bin, Bin1),
BadReqPath = api_path("trace/test_stream_log/log?&bytes=1000000000000"),
{error, {_, 400, _}} = request_api(get, BadReqPath),
meck:new(file, [passthrough, unstick]),
meck:expect(file, read, 2, {error, enomem}),
{error, {_, 503, _}} = request_api(get, Path),
meck:unload(file),
{error, {_, 404, _}} =
request_api( request_api(
get, get,
api_path("trace/test_stream_log/log?node=unknonwn_node") api_path("trace/test_stream_log/log?node=unknown_node")
), ),
{error, {_, 400, _}} = {error, {_, 404, _}} =
request_api( request_api(
get, get,
% known atom but not a node % known atom but not a node

View File

@ -1,9 +1,9 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, emqx_modules, [ {application, emqx_modules, [
{description, "EMQX Modules"}, {description, "EMQX Modules"},
{vsn, "5.0.9"}, {vsn, "5.0.10"},
{modules, []}, {modules, []},
{applications, [kernel, stdlib, emqx]}, {applications, [kernel, stdlib, emqx, emqx_ctl]},
{mod, {emqx_modules_app, []}}, {mod, {emqx_modules_app, []}},
{registered, [emqx_modules_sup]}, {registered, [emqx_modules_sup]},
{env, []} {env, []}

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, emqx_plugin_libs, [ {application, emqx_plugin_libs, [
{description, "EMQX Plugin utility libs"}, {description, "EMQX Plugin utility libs"},
{vsn, "4.3.5"}, {vsn, "4.3.6"},
{modules, []}, {modules, []},
{applications, [kernel, stdlib]}, {applications, [kernel, stdlib]},
{env, []} {env, []}

View File

@ -31,7 +31,8 @@
proc_sql_param_str/2, proc_sql_param_str/2,
proc_cql_param_str/2, proc_cql_param_str/2,
split_insert_sql/1, split_insert_sql/1,
detect_sql_type/1 detect_sql_type/1,
proc_batch_sql/3
]). ]).
%% type converting %% type converting
@ -164,6 +165,20 @@ detect_sql_type(SQL) ->
{error, invalid_sql} {error, invalid_sql}
end. end.
-spec proc_batch_sql(
BatchReqs :: list({atom(), map()}),
InsertPart :: binary(),
Tokens :: tmpl_token()
) -> InsertSQL :: binary().
proc_batch_sql(BatchReqs, InsertPart, Tokens) ->
ValuesPart = erlang:iolist_to_binary(
lists:join(", ", [
emqx_plugin_libs_rule:proc_sql_param_str(Tokens, Msg)
|| {_, Msg} <- BatchReqs
])
),
<<InsertPart/binary, " values ", ValuesPart/binary>>.
unsafe_atom_key(Key) when is_atom(Key) -> unsafe_atom_key(Key) when is_atom(Key) ->
Key; Key;
unsafe_atom_key(Key) when is_binary(Key) -> unsafe_atom_key(Key) when is_binary(Key) ->

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, emqx_plugins, [ {application, emqx_plugins, [
{description, "EMQX Plugin Management"}, {description, "EMQX Plugin Management"},
{vsn, "0.1.1"}, {vsn, "0.1.2"},
{modules, []}, {modules, []},
{mod, {emqx_plugins_app, []}}, {mod, {emqx_plugins_app, []}},
{applications, [kernel, stdlib, emqx]}, {applications, [kernel, stdlib, emqx]},

Some files were not shown because too many files have changed in this diff Show More