Merge tag 'v5.0.10' into dev/ee5.0

This commit is contained in:
Zaiming (Stone) Shi 2022-11-16 15:18:19 +01:00
commit 09455edae8
207 changed files with 4672 additions and 1988 deletions

View File

@ -23,6 +23,7 @@ services:
- ./kerberos/krb5.conf:/etc/krb5.conf
working_dir: /emqx
tty: true
user: "${UID_GID}"
networks:
emqx_bridge:

View File

@ -0,0 +1,19 @@
<!-- Please describe the current behavior and link to a relevant issue. -->
Fixes <issue-number>
**If your build fails** due to your commit message not passing the build checks, please review the guidelines here: https://github.com/emqx/emqx/blob/master/CONTRIBUTING.md.
## PR Checklist
Please convert it to a draft if any of the following conditions are not met. Reviewers may skip over until all the items are checked:
- [ ] Added tests for the changes
- [ ] Changed lines covered in coverage report
- [ ] Change log has been added to `changes/` dir
- [ ] For EMQX 4.x: `appup` files updated (execute `scripts/update-appup.sh emqx`)
- [ ] For internal contributor: there is a jira ticket to track this change
- [ ] If there should be document changes, a PR to emqx-docs.git is sent, or a jira ticket is created to follow up
- [ ] In case of non-backward compatible changes, reviewer should check this item as a write-off, and add details in **Backward Compatibility** section
## Backward Compatibility
## More information

81
.github/actions/docker-meta/action.yaml vendored Normal file
View File

@ -0,0 +1,81 @@
name: 'Docker meta'
inputs:
profile:
required: true
type: string
registry:
required: true
type: string
arch:
required: true
type: string
otp:
required: true
type: string
elixir:
required: false
type: string
default: ''
builder_base:
required: true
type: string
owner:
required: true
type: string
docker_tags:
required: true
type: string
outputs:
emqx_name:
description: "EMQX name"
value: ${{ steps.pre-meta.outputs.emqx_name }}
version:
description: "docker image version"
value: ${{ steps.meta.outputs.version }}
tags:
description: "docker image tags"
value: ${{ steps.meta.outputs.tags }}
labels:
description: "docker image labels"
value: ${{ steps.meta.outputs.labels }}
runs:
using: composite
steps:
- name: prepare for docker/metadata-action
id: pre-meta
shell: bash
run: |
emqx_name=${{ inputs.profile }}
img_suffix=${{ inputs.arch }}
img_labels="org.opencontainers.image.otp.version=${{ inputs.otp }}"
if [ -n "${{ inputs.elixir }}" ]; then
emqx_name="emqx-elixir"
img_suffix="elixir-${{ inputs.arch }}"
img_labels="org.opencontainers.image.elixir.version=${{ inputs.elixir }}\n${img_labels}"
fi
if [ "${{ inputs.profile }}" = "emqx" ]; then
img_labels="org.opencontainers.image.edition=Opensource\n${img_labels}"
fi
if [ "${{ inputs.profile }}" = "emqx-enterprise" ]; then
img_labels="org.opencontainers.image.edition=Enterprise\n${img_labels}"
fi
if [[ "${{ inputs.builder_base }}" =~ "alpine" ]]; then
img_suffix="${img_suffix}-alpine"
fi
echo "emqx_name=${emqx_name}" >> $GITHUB_OUTPUT
echo "img_suffix=${img_suffix}" >> $GITHUB_OUTPUT
echo "img_labels=${img_labels}" >> $GITHUB_OUTPUT
echo "img_name=${{ inputs.registry }}/${{ inputs.owner }}/${{ inputs.profile }}" >> $GITHUB_OUTPUT
- uses: docker/metadata-action@v4
id: meta
with:
images:
${{ steps.pre-meta.outputs.img_name }}
flavor: |
suffix=-${{ steps.pre-meta.outputs.img_suffix }}
tags: |
type=raw,value=${{ inputs.docker_tags }}
labels:
${{ steps.pre-meta.outputs.img_labels }}

View File

@ -33,7 +33,7 @@ runs:
brew install curl zip unzip kerl coreutils openssl@1.1
echo "/usr/local/opt/bison/bin" >> $GITHUB_PATH
echo "/usr/local/bin" >> $GITHUB_PATH
- uses: actions/cache@v2
- uses: actions/cache@v3
id: cache
with:
path: ~/.kerl/${{ inputs.otp }}

View File

@ -7,7 +7,7 @@ jobs:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Check apps version

View File

@ -20,7 +20,7 @@ jobs:
prepare:
runs-on: ubuntu-20.04
# prepare source with any OTP version, no need for a matrix
container: "ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-ubuntu20.04"
container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04"
outputs:
BUILD_PROFILE: ${{ steps.get_profile.outputs.BUILD_PROFILE }}
@ -29,7 +29,7 @@ jobs:
DOCKER_TAG_VERSION: ${{ steps.get_profile.outputs.DOCKER_TAG_VERSION }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
ref: ${{ github.event.inputs.branch_or_tag }} # when input is not given, the event tag is used
path: source
@ -46,7 +46,6 @@ jobs:
else
docker_latest=false
fi
echo "::set-output name=IS_DOCKER_LATEST::${docker_latest}"
if git describe --tags --match "[v|e]*" --exact; then
echo "This is an exact git tag, will publish images"
is_exact='true'
@ -54,7 +53,6 @@ jobs:
echo "This is NOT an exact git tag, will not publish images"
is_exact='false'
fi
echo "::set-output name=IS_EXACT_TAG::${is_exact}"
case $tag in
refs/tags/v*)
PROFILE='emqx'
@ -78,94 +76,69 @@ jobs:
esac
;;
esac
echo "::set-output name=BUILD_PROFILE::$PROFILE"
VSN="$(./pkg-vsn.sh "$PROFILE")"
echo "Building $PROFILE image with tag $VSN (latest=$docker_latest)"
echo "::set-output name=DOCKER_TAG_VERSION::$VSN"
echo "IS_DOCKER_LATEST=$docker_latest" >> $GITHUB_OUTPUT
echo "IS_EXACT_TAG=$is_exact" >> $GITHUB_OUTPUT
echo "BUILD_PROFILE=$PROFILE" >> $GITHUB_OUTPUT
echo "DOCKER_TAG_VERSION=$VSN" >> $GITHUB_OUTPUT
- name: get_all_deps
run: |
make -C source deps-all
zip -ryq source.zip source/* source/.[^.]*
- uses: actions/upload-artifact@v2
- uses: actions/upload-artifact@v3
with:
name: source
path: source.zip
docker:
runs-on: ${{ matrix.build_machine }}
runs-on: ${{ matrix.arch[1] }}
needs: prepare
strategy:
fail-fast: false
matrix:
arch:
- amd64
- arm64
- [amd64, ubuntu-20.04]
- [arm64, aws-arm64]
profile:
- ${{ needs.prepare.outputs.BUILD_PROFILE }}
build_elixir:
- no_elixir
registry:
- 'docker.io'
- 'public.ecr.aws'
os:
- [alpine3.15.1, "alpine:3.15.1", "deploy/docker/Dockerfile.alpine"]
- [debian11, "debian:11-slim", "deploy/docker/Dockerfile"]
# NOTE: for docker, only support latest otp and elixir
# versions, not a matrix
# NOTE: 'otp' and 'elixir' are to configure emqx-builder image
# only support latest otp and elixir, not a matrix
otp:
- 24.2.1-1 # update to latest
- 24.3.4.2-1 # update to latest
elixir:
- 1.13.4 # update to latest
build_machine:
- aws-arm64
- ubuntu-20.04
exclude:
exclude: # TODO: publish enterprise to ecr too?
- registry: 'public.ecr.aws'
profile: emqx-enterprise
- arch: arm64
build_machine: ubuntu-20.04
- arch: amd64
build_machine: aws-arm64
include:
- arch: amd64
profile: emqx
build_elixir: with_elixir
registry: 'docker.io'
os: [debian11, "debian:11-slim", "deploy/docker/Dockerfile"]
otp: 24.2.1-1
elixir: 1.13.4
build_machine: ubuntu-20.04
- arch: arm64
profile: emqx
build_elixir: with_elixir
registry: 'docker.io'
os: [debian11, "debian:11-slim", "deploy/docker/Dockerfile"]
otp: 24.2.1-1
elixir: 1.13.4
build_machine: aws-arm64
steps:
- uses: AutoModality/action-clean@v1
if: matrix.build_machine == 'aws-arm64'
- uses: actions/download-artifact@v2
if: matrix.arch[1] == 'aws-arm64'
- uses: actions/download-artifact@v3
with:
name: source
path: .
- name: unzip source code
run: unzip -q source.zip
- uses: docker/setup-buildx-action@v1
- uses: docker/setup-buildx-action@v2
- name: Login for docker.
uses: docker/login-action@v1
uses: docker/login-action@v2
if: matrix.registry == 'docker.io'
with:
username: ${{ secrets.DOCKER_HUB_USER }}
password: ${{ secrets.DOCKER_HUB_TOKEN }}
- name: Login for AWS ECR
uses: docker/login-action@v1
uses: docker/login-action@v2
if: matrix.registry == 'public.ecr.aws'
with:
registry: public.ecr.aws
@ -173,113 +146,129 @@ jobs:
password: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
ecr: true
- name: prepare for docker-action-parms
id: pre-meta
run: |
emqx_name=${{ matrix.profile }}
img_suffix=${{ matrix.arch }}
img_labels="org.opencontainers.image.otp.version=${{ matrix.otp }}"
if [ ${{ matrix.build_elixir }} = "with_elixir" ]; then
emqx_name="emqx-elixir"
img_suffix="elixir-${{ matrix.arch }}"
img_labels="org.opencontainers.image.elixir.version=${{ matrix.elixir }}\n${img_labels}"
fi
if [ ${{ matrix.profile }} = "emqx" ]; then
img_labels="org.opencontainers.image.edition=Opensource\n${img_labels}"
fi
if [ ${{ matrix.profile }} = "emqx-enterprise" ]; then
img_labels="org.opencontainers.image.edition=Enterprise\n${img_labels}"
fi
if [[ ${{ matrix.os[0] }} =~ "alpine" ]]; then
img_suffix="${img_suffix}-alpine"
fi
echo "::set-output name=emqx_name::${emqx_name}"
echo "::set-output name=img_suffix::${img_suffix}"
echo "::set-output name=img_labels::${img_labels}"
# NOTE, Pls make sure this is identical as the one in job 'docker-push-multi-arch-manifest'
- uses: docker/metadata-action@v3
- uses: ./source/.github/actions/docker-meta
id: meta
with:
images: ${{ matrix.registry }}/${{ github.repository_owner }}/${{ matrix.profile }}
flavor: |
suffix=-${{ steps.pre-meta.outputs.img_suffix }}
tags: |
type=raw,value=${{ needs.prepare.outputs.DOCKER_TAG_VERSION }}
labels:
${{ steps.pre-meta.outputs.img_labels }}
profile: ${{ matrix.profile }}
registry: ${{ matrix.registry }}
arch: ${{ matrix.arch[0] }}
otp: ${{ matrix.otp }}
builder_base: ${{ matrix.os[0] }}
owner: ${{ github.repository_owner }}
docker_tags: ${{ needs.prepare.outputs.DOCKER_TAG_VERSION }}
- uses: docker/build-push-action@v2
- uses: docker/build-push-action@v3
with:
push: ${{ needs.prepare.outputs.IS_EXACT_TAG }}
pull: true
no-cache: true
platforms: linux/${{ matrix.arch }}
platforms: linux/${{ matrix.arch[0] }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
build-args: |
BUILD_FROM=ghcr.io/emqx/emqx-builder/5.0-17:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }}
BUILD_FROM=ghcr.io/emqx/emqx-builder/5.0-18:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }}
RUN_FROM=${{ matrix.os[1] }}
EMQX_NAME=${{ steps.pre-meta.outputs.emqx_name }}
EMQX_NAME=${{ steps.meta.outputs.emqx_name }}
file: source/${{ matrix.os[2] }}
context: source
docker-elixir:
runs-on: ${{ matrix.arch[1] }}
needs: prepare
# do not build elixir images for ee for now
if: needs.prepare.outputs.BUILD_PROFILE == 'emqx'
strategy:
fail-fast: false
matrix:
arch:
- [amd64, ubuntu-20.04]
- [arm64, aws-arm64]
profile:
- ${{ needs.prepare.outputs.BUILD_PROFILE }}
registry:
- 'docker.io'
os:
- [debian11, "debian:11-slim", "deploy/docker/Dockerfile"]
otp:
- 24.3.4.2-1 # update to latest
elixir:
- 1.13.4 # update to latest
steps:
- uses: AutoModality/action-clean@v1
if: matrix.arch[1] == 'aws-arm64'
- uses: actions/download-artifact@v3
with:
name: source
path: .
- name: unzip source code
run: unzip -q source.zip
- uses: docker/setup-buildx-action@v2
- name: Login for docker.
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_HUB_USER }}
password: ${{ secrets.DOCKER_HUB_TOKEN }}
- uses: ./source/.github/actions/docker-meta
id: meta
with:
profile: ${{ matrix.profile }}
registry: ${{ matrix.registry }}
arch: ${{ matrix.arch[0] }}
otp: ${{ matrix.otp }}
elixir: ${{ matrix.elixir }}
builder_base: ${{ matrix.os[0] }}
owner: ${{ github.repository_owner }}
docker_tags: ${{ needs.prepare.outputs.DOCKER_TAG_VERSION }}
- uses: docker/build-push-action@v3
with:
push: ${{ needs.prepare.outputs.IS_EXACT_TAG }}
pull: true
no-cache: true
platforms: linux/${{ matrix.arch[0] }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
build-args: |
BUILD_FROM=ghcr.io/emqx/emqx-builder/5.0-18:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }}
RUN_FROM=${{ matrix.os[1] }}
EMQX_NAME=${{ steps.meta.outputs.emqx_name }}
file: source/${{ matrix.os[2] }}
context: source
docker-push-multi-arch-manifest:
# note, we only run on amd64
if: needs.prepare.outputs.IS_EXACT_TAG == 'true'
if: needs.prepare.outputs.IS_EXACT_TAG
needs:
- prepare
- docker
runs-on: ubuntu-latest
runs-on: ${{ matrix.arch[1] }}
strategy:
fail-fast: false
matrix:
arch:
- [amd64, ubuntu-20.04]
profile:
- ${{ needs.prepare.outputs.BUILD_PROFILE }}
build_elixir:
- no_elixir
os:
- [alpine3.15.1, "alpine:3.15.1", "deploy/docker/Dockerfile.alpine"]
- [debian11, "debian:11-slim", "deploy/docker/Dockerfile"]
# NOTE: for docker, only support latest otp version, not a matrix
# NOTE: only support latest otp version, not a matrix
otp:
- 24.2.1-1 # update to latest
#
elixir:
- 1.13.4 # update to latest
arch:
- amd64
#- arm64
build_machine:
- aws-arm64
- ubuntu-20.04
- 24.3.4.2-1 # update to latest
registry:
- 'docker.io'
- 'public.ecr.aws'
exclude:
- registry: 'public.ecr.aws'
profile: emqx-enterprise
- arch: arm64
build_machine: ubuntu-20.04
- arch: amd64
build_machine: aws-arm64
include:
- arch: amd64
profile: emqx
build_elixir: with_elixir
os: [debian11, "debian:11-slim", "deploy/docker/Dockerfile"]
otp: 24.2.1-1
elixir: 1.13.4
build_machine: ubuntu-20.04
registry: docker.io
steps:
- uses: actions/download-artifact@v2
- uses: actions/download-artifact@v3
with:
name: source
path: .
@ -287,13 +276,13 @@ jobs:
- name: unzip source code
run: unzip -q source.zip
- uses: docker/login-action@v1
- uses: docker/login-action@v2
if: matrix.registry == 'docker.io'
with:
username: ${{ secrets.DOCKER_HUB_USER }}
password: ${{ secrets.DOCKER_HUB_TOKEN }}
- uses: docker/login-action@v1
- uses: docker/login-action@v2
if: matrix.registry == 'public.ecr.aws'
with:
registry: public.ecr.aws
@ -301,54 +290,73 @@ jobs:
password: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
ecr: true
- name: prepare for docker-action-parms
id: pre-meta
run: |
emqx_name=${{ matrix.profile }}
img_suffix=${{ matrix.arch }}
img_labels="org.opencontainers.image.otp.version=${{ matrix.otp }}"
if [ ${{ matrix.build_elixir }} = 'with_elixir' ]; then
emqx_name="emqx-elixir"
img_suffix="elixir-${{ matrix.arch }}"
img_labels="org.opencontainers.image.elixir.version=${{ matrix.elixir }}\n$img_labels"
fi
if [ ${{ matrix.profile }} = "emqx" ]; then
img_labels="org.opencontainers.image.edition=Opensource\n${img_labels}"
fi
if [ ${{ matrix.profile }} = "emqx-enterprise" ]; then
img_labels="org.opencontainers.image.edition=Enterprise\n${img_labels}"
fi
if [[ ${{ matrix.os[0] }} =~ "alpine" ]]; then
img_suffix="${img_suffix}-alpine"
fi
echo "::set-output name=emqx_name::${emqx_name}"
echo "::set-output name=img_suffix::${img_suffix}"
echo "::set-output name=img_labels::${img_labels}"
# NOTE, Pls make sure this is identical as the one in job 'docker'
- uses: docker/metadata-action@v3
- uses: ./source/.github/actions/docker-meta
id: meta
with:
images: ${{ matrix.registry }}/${{ github.repository_owner }}/${{ matrix.profile }}
flavor: |
suffix=-${{ steps.pre-meta.outputs.img_suffix }}
tags: |
type=raw,value=${{ needs.prepare.outputs.DOCKER_TAG_VERSION }}
labels:
${{ steps.pre-meta.outputs.img_labels }}
profile: ${{ matrix.profile }}
registry: ${{ matrix.registry }}
arch: ${{ matrix.arch[0] }}
otp: ${{ matrix.otp }}
builder_base: ${{ matrix.os[0] }}
owner: ${{ github.repository_owner }}
docker_tags: ${{ needs.prepare.outputs.DOCKER_TAG_VERSION }}
- name: update manifest for multiarch image
if: needs.prepare.outputs.IS_EXACT_TAG == 'true'
working-directory: source
run: |
if [ ${{ matrix.build_elixir }} = 'with_elixir' ]; then
is_latest=false
else
is_latest="${{ needs.prepare.outputs.IS_DOCKER_LATEST }}"
fi
scripts/docker-create-push-manifests.sh "${{ steps.meta.outputs.tags }}" "$is_latest"
docker-elixir-push-multi-arch-manifest:
# note, we only run on amd64
# do not build enterprise elixir images for now
if: needs.prepare.outputs.IS_EXACT_TAG && needs.prepare.outputs.BUILD_PROFILE == 'emqx'
needs:
- prepare
- docker-elixir
runs-on: ${{ matrix.arch[1] }}
strategy:
fail-fast: false
matrix:
arch:
- [amd64, ubuntu-20.04]
profile:
- ${{ needs.prepare.outputs.BUILD_PROFILE }}
# NOTE: for docker, only support latest otp version, not a matrix
otp:
- 24.3.4.2-1 # update to latest
elixir:
- 1.13.4 # update to latest
registry:
- 'docker.io'
steps:
- uses: actions/download-artifact@v3
with:
name: source
path: .
- name: unzip source code
run: unzip -q source.zip
- uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_HUB_USER }}
password: ${{ secrets.DOCKER_HUB_TOKEN }}
- uses: ./source/.github/actions/docker-meta
id: meta
with:
profile: ${{ matrix.profile }}
registry: ${{ matrix.registry }}
arch: ${{ matrix.arch[0] }}
otp: ${{ matrix.otp }}
elixir: ${{ matrix.elixir }}
builder_base: ${{ matrix.os[0] }}
owner: ${{ github.repository_owner }}
docker_tags: ${{ needs.prepare.outputs.DOCKER_TAG_VERSION }}
- name: update manifest for multiarch image
working-directory: source
run: |
scripts/docker-create-push-manifests.sh "${{ steps.meta.outputs.tags }}" false

View File

@ -23,13 +23,13 @@ on:
jobs:
prepare:
runs-on: ubuntu-20.04
container: ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-ubuntu20.04
container: ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04
outputs:
BUILD_PROFILE: ${{ steps.get_profile.outputs.BUILD_PROFILE }}
IS_EXACT_TAG: ${{ steps.get_profile.outputs.IS_EXACT_TAG }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
ref: ${{ github.event.inputs.branch_or_tag }} # when input is not given, the event tag is used
path: source
@ -79,7 +79,7 @@ jobs:
run: |
make -C source deps-all
zip -ryq source.zip source/* source/.[^.]*
- uses: actions/upload-artifact@v2
- uses: actions/upload-artifact@v3
with:
name: source
path: source.zip
@ -95,13 +95,13 @@ jobs:
otp:
- 24.2.1
steps:
- uses: actions/download-artifact@v2
- uses: actions/download-artifact@v3
with:
name: source
path: .
- name: unzip source code
run: Expand-Archive -Path source.zip -DestinationPath ./
- uses: ilammy/msvc-dev-cmd@v1
- uses: ilammy/msvc-dev-cmd@v1.12.0
- uses: erlef/setup-beam@v1
with:
otp-version: ${{ matrix.otp }}
@ -127,10 +127,10 @@ jobs:
echo "EMQX installed"
./_build/${{ matrix.profile }}/rel/emqx/bin/emqx uninstall
echo "EMQX uninstalled"
- uses: actions/upload-artifact@v1
- uses: actions/upload-artifact@v3
with:
name: ${{ matrix.profile }}-windows
path: source/_packages/${{ matrix.profile }}/.
path: source/_packages/${{ matrix.profile }}/
mac:
needs: prepare
@ -140,12 +140,12 @@ jobs:
profile:
- ${{ needs.prepare.outputs.BUILD_PROFILE }}
otp:
- 24.2.1-1
- 24.3.4.2-1
os:
- macos-11
runs-on: ${{ matrix.os }}
steps:
- uses: actions/download-artifact@v2
- uses: actions/download-artifact@v3
with:
name: source
path: .
@ -166,16 +166,16 @@ jobs:
apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }}
apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }}
apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }}
- uses: actions/upload-artifact@v1
- uses: actions/upload-artifact@v3
with:
name: ${{ matrix.profile }}-${{ matrix.otp }}
path: _packages/${{ matrix.profile }}/.
path: _packages/${{ matrix.profile }}/
linux:
needs: prepare
runs-on: ${{ matrix.build_machine }}
container:
image: "ghcr.io/emqx/emqx-builder/5.0-17:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}"
image: "ghcr.io/emqx/emqx-builder/5.0-18:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}"
strategy:
fail-fast: false
@ -183,7 +183,7 @@ jobs:
profile:
- ${{ needs.prepare.outputs.BUILD_PROFILE }}
otp:
- 24.2.1-1 # we test with OTP 23, but only build package on OTP 24 versions
- 24.3.4.2-1 # we test with OTP 23, but only build package on OTP 24 versions
elixir:
- 1.13.4
# used to split elixir packages into a separate job, since the
@ -232,14 +232,14 @@ jobs:
profile: emqx-enterprise
include:
- profile: emqx
otp: 24.2.1-1
otp: 24.3.4.2-1
elixir: 1.13.4
build_elixir: with_elixir
arch: amd64
os: ubuntu20.04
build_machine: ubuntu-20.04
- profile: emqx
otp: 24.2.1-1
otp: 24.3.4.2-1
elixir: 1.13.4
build_elixir: with_elixir
arch: amd64
@ -253,7 +253,7 @@ jobs:
steps:
- uses: AutoModality/action-clean@v1
if: matrix.build_machine == 'aws-arm64'
- uses: actions/download-artifact@v2
- uses: actions/download-artifact@v3
with:
name: source
path: .
@ -290,12 +290,12 @@ jobs:
--pkgtype "${PKGTYPE}" \
--arch "${ARCH}" \
--elixir "${IsElixir}" \
--builder "ghcr.io/emqx/emqx-builder/5.0-17:${ELIXIR}-${OTP}-${SYSTEM}"
--builder "ghcr.io/emqx/emqx-builder/5.0-18:${ELIXIR}-${OTP}-${SYSTEM}"
done
- uses: actions/upload-artifact@v1
- uses: actions/upload-artifact@v3
with:
name: ${{ matrix.profile }}-${{ matrix.otp }}
path: source/_packages/${{ matrix.profile }}/.
path: source/_packages/${{ matrix.profile }}/
publish_artifacts:
runs-on: ubuntu-20.04
@ -307,12 +307,12 @@ jobs:
profile:
- ${{ needs.prepare.outputs.BUILD_PROFILE }}
otp:
- 24.2.1-1
- 24.3.4.2-1
include:
- profile: emqx
otp: windows # otp version on windows is rather fixed
steps:
- uses: actions/download-artifact@v2
- uses: actions/download-artifact@v3
with:
name: ${{ matrix.profile }}-${{ matrix.otp }}
path: packages/${{ matrix.profile }}
@ -320,7 +320,7 @@ jobs:
run: sudo apt-get update && sudo apt install -y dos2unix
- name: get packages
run: |
DEFAULT_BEAM_PLATFORM='otp24.2.1-1'
DEFAULT_BEAM_PLATFORM='otp24.3.4.2-1'
set -e -u
cd packages/${{ matrix.profile }}
# Make a copy of the default OTP version package to a file without OTP version infix
@ -334,7 +334,7 @@ jobs:
echo "$(cat $var.sha256) $var" | sha256sum -c || exit 1
done
cd -
- uses: aws-actions/configure-aws-credentials@v1
- uses: aws-actions/configure-aws-credentials@v1-node16
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}

View File

@ -32,18 +32,18 @@ jobs:
- emqx
- emqx-enterprise
otp:
- 24.2.1-1
- 24.3.4.2-1
elixir:
- 1.13.4
os:
- ubuntu20.04
- el8
container: "ghcr.io/emqx/emqx-builder/5.0-17:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}"
container: "ghcr.io/emqx/emqx-builder/5.0-18:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}"
steps:
- uses: AutoModality/action-clean@v1
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: prepare
@ -73,11 +73,11 @@ jobs:
run: |
make ${EMQX_NAME}-elixir-pkg
./scripts/pkg-tests.sh ${EMQX_NAME}-elixir-pkg
- uses: actions/upload-artifact@v2
- uses: actions/upload-artifact@v3
with:
name: ${{ matrix.profile}}-${{ matrix.otp }}-${{ matrix.os }}
path: _packages/${{ matrix.profile}}/*
- uses: actions/upload-artifact@v2
- uses: actions/upload-artifact@v3
with:
name: "${{ matrix.profile }}_schema_dump"
path: |
@ -94,8 +94,8 @@ jobs:
otp:
- 24.2.1
steps:
- uses: actions/checkout@v2
- uses: ilammy/msvc-dev-cmd@v1
- uses: actions/checkout@v3
- uses: ilammy/msvc-dev-cmd@v1.12.0
- uses: erlef/setup-beam@v1
with:
otp-version: ${{ matrix.otp }}
@ -119,7 +119,7 @@ jobs:
echo "EMQX installed"
./_build/${{ matrix.profile }}/rel/emqx/bin/emqx uninstall
echo "EMQX uninstalled"
- uses: actions/upload-artifact@v2
- uses: actions/upload-artifact@v3
with:
name: windows
path: _packages/${{ matrix.profile}}/*
@ -132,14 +132,14 @@ jobs:
- emqx
- emqx-enterprise
otp:
- 24.2.1-1
- 24.3.4.2-1
os:
- macos-11
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: prepare
run: |
echo "EMQX_NAME=${{ matrix.profile }}" >> $GITHUB_ENV
@ -153,7 +153,7 @@ jobs:
apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }}
apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }}
apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }}
- uses: actions/upload-artifact@v2
- uses: actions/upload-artifact@v3
with:
name: macos
path: _packages/**/*
@ -167,7 +167,7 @@ jobs:
- emqx-enterprise
runs-on: aws-amd64
steps:
- uses: actions/download-artifact@v2
- uses: actions/download-artifact@v3
name: Download schema dump
with:
name: "${{ matrix.profile }}_schema_dump"

View File

@ -5,9 +5,9 @@ on: [pull_request, push]
jobs:
check_deps_integrity:
runs-on: ubuntu-20.04
container: ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-ubuntu20.04
container: ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Run check-deps-integrity.escript
run: ./scripts/check-deps-integrity.escript

View File

@ -5,9 +5,9 @@ on: [pull_request]
jobs:
code_style_check:
runs-on: ubuntu-20.04
container: "ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-ubuntu20.04"
container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04"
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
fetch-depth: 1000
- name: Work around https://github.com/actions/checkout/issues/766

View File

@ -8,7 +8,7 @@ jobs:
elixir_apps_check:
runs-on: ubuntu-latest
# just use the latest builder
container: "ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-ubuntu20.04"
container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04"
strategy:
fail-fast: false
@ -23,7 +23,7 @@ jobs:
- name: fix_git_permission
run: git config --global --add safe.directory '/__w/emqx/emqx'
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: ensure rebar

View File

@ -7,11 +7,11 @@ on: [pull_request, push]
jobs:
elixir_deps_check:
runs-on: ubuntu-20.04
container: ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-ubuntu20.04
container: ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: ensure rebar
run: ./scripts/ensure-rebar3.sh
- name: Work around https://github.com/actions/checkout/issues/766

View File

@ -12,22 +12,11 @@ on:
jobs:
elixir_release_build:
runs-on: ubuntu-latest
strategy:
matrix:
otp:
- 24.2.1-1
elixir:
- 1.13.4
os:
- ubuntu20.04
profile:
- emqx
- emqx-enterprise
container: ghcr.io/emqx/emqx-builder/5.0-17:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}
container: ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04
steps:
- name: Checkout
uses: actions/checkout@v2
uses: actions/checkout@v3
- name: install tools
run: apt update && apt install netcat-openbsd
- name: Work around https://github.com/actions/checkout/issues/766

View File

@ -10,7 +10,7 @@ jobs:
strategy:
fail-fast: false
steps:
- uses: aws-actions/configure-aws-credentials@v1
- uses: aws-actions/configure-aws-credentials@v1-node16
with:
aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
@ -32,7 +32,7 @@ jobs:
esac
aws s3 cp --recursive s3://${{ secrets.AWS_S3_BUCKET }}/$s3dir/${{ github.ref_name }} packages
cd packages
DEFAULT_BEAM_PLATFORM='otp24.2.1-1'
DEFAULT_BEAM_PLATFORM='otp24.3.4.2-1'
# all packages including full-name and default-name are uploaded to s3
# but we only upload default-name packages (and elixir) as github artifacts
# so we rename (overwrite) non-default packages before uploading
@ -41,7 +41,7 @@ jobs:
echo "$fname -> $default_fname"
mv -f "$fname" "$default_fname"
done < <(find . -maxdepth 1 -type f | grep -E "emqx(-enterprise)?-5\.[0-9]+\.[0-9]+.*-${DEFAULT_BEAM_PLATFORM}" | grep -v elixir)
- uses: alexellis/upload-assets@0.2.2
- uses: alexellis/upload-assets@0.4.0
env:
GITHUB_TOKEN: ${{ github.token }}
with:
@ -57,24 +57,6 @@ jobs:
-X POST \
-d "{\"repo\":\"emqx/emqx\", \"tag\": \"${{ github.ref_name }}\" }" \
${{ secrets.EMQX_IO_RELEASE_API }}
- uses: emqx/push-helm-action@v1
if: github.event_name == 'release' && startsWith(github.ref_name, 'v')
with:
charts_dir: "${{ github.workspace }}/deploy/charts/emqx"
version: ${{ github.ref_name }}
aws_access_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws_region: "us-west-2"
aws_bucket_name: "repos-emqx-io"
- uses: emqx/push-helm-action@v1
if: github.event_name == 'release' && startsWith(github.ref_name, 'e')
with:
charts_dir: "${{ github.workspace }}/deploy/charts/emqx-enterprise"
version: ${{ github.ref_name }}
aws_access_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws_region: "us-west-2"
aws_bucket_name: "repos-emqx-io"
- name: update homebrew packages
if: github.event_name == 'release'
run: |
@ -96,3 +78,31 @@ jobs:
-d "{\"ref\":\"v1.0.4\",\"inputs\":{\"version\": \"${{ github.ref_name }}\"}}" \
"https://api.github.com/repos/emqx/emqx-ci-helper/actions/workflows/update_emqx_homebrew.yaml/dispatches"
fi
upload-helm:
runs-on: ubuntu-20.04
if: github.event_name == 'release'
strategy:
fail-fast: false
steps:
- uses: actions/checkout@v3
with:
ref: ${{ github.ref }}
- uses: emqx/push-helm-action@v1
if: startsWith(github.ref_name, 'v')
with:
charts_dir: "${{ github.workspace }}/deploy/charts/emqx"
version: ${{ github.ref_name }}
aws_access_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws_region: "us-west-2"
aws_bucket_name: "repos-emqx-io"
- uses: emqx/push-helm-action@v1
if: startsWith(github.ref_name, 'e')
with:
charts_dir: "${{ github.workspace }}/deploy/charts/emqx-enterprise"
version: ${{ github.ref_name }}
aws_access_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }}
aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
aws_region: "us-west-2"
aws_bucket_name: "repos-emqx-io"

View File

@ -12,7 +12,7 @@ jobs:
strategy:
matrix:
otp:
- 24.2.1-1
- 24.3.4.2-1
# no need to use more than 1 version of Elixir, since tests
# run using only Erlang code. This is needed just to specify
# the base image.
@ -24,14 +24,14 @@ jobs:
- amd64
runs-on: aws-amd64
container: "ghcr.io/emqx/emqx-builder/5.0-17:${{ matrix.elixir}}-${{ matrix.otp }}-${{ matrix.os }}"
container: "ghcr.io/emqx/emqx-builder/5.0-18:${{ matrix.elixir}}-${{ matrix.otp }}-${{ matrix.os }}"
defaults:
run:
shell: bash
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
fetch-depth: 0
- name: run
@ -61,7 +61,7 @@ jobs:
./rebar3 eunit -v
./rebar3 ct -v
./rebar3 proper -d test/props
- uses: actions/upload-artifact@v1
- uses: actions/upload-artifact@v3
if: failure()
with:
name: logs

View File

@ -16,10 +16,10 @@ jobs:
prepare:
runs-on: ubuntu-20.04
# prepare source with any OTP version, no need for a matrix
container: ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-alpine3.15.1
container: ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-alpine3.15.1
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
path: source
fetch-depth: 0
@ -27,7 +27,7 @@ jobs:
run: |
make -C source deps-all
zip -ryq source.zip source/* source/.[^.]*
- uses: actions/upload-artifact@v2
- uses: actions/upload-artifact@v3
with:
name: source
path: source.zip
@ -49,7 +49,7 @@ jobs:
os:
- ["alpine3.15.1", "alpine:3.15.1"]
otp:
- 24.2.1-1
- 24.3.4.2-1
elixir:
- 1.13.4
arch:
@ -58,7 +58,7 @@ jobs:
- uses: erlef/setup-beam@v1
with:
otp-version: "24.2"
- uses: actions/download-artifact@v2
- uses: actions/download-artifact@v3
with:
name: source
path: .
@ -68,7 +68,7 @@ jobs:
- name: make docker image
working-directory: source
env:
EMQX_BUILDER: ghcr.io/emqx/emqx-builder/5.0-17:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }}
EMQX_BUILDER: ghcr.io/emqx/emqx-builder/5.0-18:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }}
EMQX_RUNNER: ${{ matrix.os[1] }}
run: |
make ${{ matrix.profile }}-docker
@ -120,7 +120,7 @@ jobs:
os:
- ["debian11", "debian:11-slim"]
otp:
- 24.2.1-1
- 24.3.4.2-1
elixir:
- 1.13.4
arch:
@ -131,7 +131,7 @@ jobs:
- uses: erlef/setup-beam@v1
with:
otp-version: "24.2"
- uses: actions/download-artifact@v2
- uses: actions/download-artifact@v3
with:
name: source
path: .
@ -141,7 +141,7 @@ jobs:
- name: make docker image
working-directory: source
env:
EMQX_BUILDER: ghcr.io/emqx/emqx-builder/5.0-17:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }}
EMQX_BUILDER: ghcr.io/emqx/emqx-builder/5.0-18:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }}
EMQX_RUNNER: ${{ matrix.os[1] }}
run: |
make ${{ matrix.profile }}-docker
@ -207,7 +207,7 @@ jobs:
echo "waiting ${{ matrix.profile }} cluster scale"
sleep 1
done
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
repository: emqx/paho.mqtt.testing
ref: develop-4.0

View File

@ -7,7 +7,7 @@ jobs:
runs-on: ubuntu-20.04
steps:
- name: Checkout source code
uses: actions/checkout@master
uses: actions/checkout@v3
- name: Install gitlint
run: |
sudo apt-get update

View File

@ -5,7 +5,7 @@ on:
tags:
- "v5.*"
pull_request:
branchs:
branches:
- "master"
jobs:
@ -23,11 +23,11 @@ jobs:
JMETER_VERSION: 5.4.3
run: |
wget --no-verbose --no-check-certificate -O /tmp/apache-jmeter.tgz https://downloads.apache.org/jmeter/binaries/apache-jmeter-$JMETER_VERSION.tgz
- uses: actions/upload-artifact@v2
- uses: actions/upload-artifact@v3
with:
name: apache-jmeter.tgz
path: /tmp/apache-jmeter.tgz
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: zip emqx docker image
id: build_docker
if: endsWith(github.repository, 'emqx')
@ -38,7 +38,7 @@ jobs:
VSN="$(./pkg-vsn.sh $PROFILE)"
echo "::set-output name=version::${VSN}"
docker save -o emqx.tar emqx/emqx:${VSN}
- uses: actions/upload-artifact@v2
- uses: actions/upload-artifact@v3
with:
name: emqx.tar
path: ./emqx.tar
@ -60,8 +60,8 @@ jobs:
- uses: erlef/setup-beam@v1
with:
otp-version: "24.2"
- uses: actions/checkout@v2
- uses: actions/download-artifact@v2
- uses: actions/checkout@v3
- uses: actions/download-artifact@v3
with:
name: emqx.tar
path: /tmp
@ -89,17 +89,19 @@ jobs:
done
docker ps -a
echo HAPROXY_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' haproxy) >> $GITHUB_ENV
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
repository: emqx/emqx-fvt
ref: broker-autotest
path: scripts
- uses: actions/setup-java@v1
- uses: actions/setup-java@v3
with:
java-version: '8.0.282' # The JDK version to make available on the path.
java-package: jdk # (jre, jdk, or jdk+fx) - defaults to jdk
architecture: x64 # (x64 or x86) - defaults to x64
- uses: actions/download-artifact@v2
# https://github.com/actions/setup-java/blob/main/docs/switching-to-v2.md
distribution: 'zulu'
- uses: actions/download-artifact@v3
with:
name: apache-jmeter.tgz
path: /tmp
@ -127,7 +129,7 @@ jobs:
echo "check logs filed"
exit 1
fi
- uses: actions/upload-artifact@v1
- uses: actions/upload-artifact@v3
if: always()
with:
name: jmeter_logs
@ -154,8 +156,8 @@ jobs:
- uses: erlef/setup-beam@v1
with:
otp-version: "24.2"
- uses: actions/checkout@v2
- uses: actions/download-artifact@v2
- uses: actions/checkout@v3
- uses: actions/download-artifact@v3
with:
name: emqx.tar
path: /tmp
@ -186,17 +188,19 @@ jobs:
docker ps -a
echo HAPROXY_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' haproxy) >> $GITHUB_ENV
echo PGSQL_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' pgsql-tls) >> $GITHUB_ENV
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
repository: emqx/emqx-fvt
ref: broker-autotest
path: scripts
- uses: actions/setup-java@v1
- uses: actions/setup-java@v3
with:
java-version: '8.0.282' # The JDK version to make available on the path.
java-package: jdk # (jre, jdk, or jdk+fx) - defaults to jdk
architecture: x64 # (x64 or x86) - defaults to x64
- uses: actions/download-artifact@v2
# https://github.com/actions/setup-java/blob/main/docs/switching-to-v2.md
distribution: 'zulu'
- uses: actions/download-artifact@v3
with:
name: apache-jmeter.tgz
path: /tmp
@ -234,7 +238,7 @@ jobs:
echo "check logs filed"
exit 1
fi
- uses: actions/upload-artifact@v1
- uses: actions/upload-artifact@v3
if: always()
with:
name: jmeter_logs
@ -258,8 +262,8 @@ jobs:
- uses: erlef/setup-beam@v1
with:
otp-version: "24.2"
- uses: actions/checkout@v2
- uses: actions/download-artifact@v2
- uses: actions/checkout@v3
- uses: actions/download-artifact@v3
with:
name: emqx.tar
path: /tmp
@ -290,17 +294,19 @@ jobs:
docker ps -a
echo HAPROXY_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' haproxy) >> $GITHUB_ENV
echo MYSQL_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' mysql-tls) >> $GITHUB_ENV
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
repository: emqx/emqx-fvt
ref: broker-autotest
path: scripts
- uses: actions/setup-java@v1
- uses: actions/setup-java@v3
with:
java-version: '8.0.282' # The JDK version to make available on the path.
java-package: jdk # (jre, jdk, or jdk+fx) - defaults to jdk
architecture: x64 # (x64 or x86) - defaults to x64
- uses: actions/download-artifact@v2
# https://github.com/actions/setup-java/blob/main/docs/switching-to-v2.md
distribution: 'zulu'
- uses: actions/download-artifact@v3
with:
name: apache-jmeter.tgz
path: /tmp
@ -338,7 +344,7 @@ jobs:
echo "check logs filed"
exit 1
fi
- uses: actions/upload-artifact@v1
- uses: actions/upload-artifact@v3
if: always()
with:
name: jmeter_logs
@ -358,8 +364,8 @@ jobs:
- uses: erlef/setup-beam@v1
with:
otp-version: "24.2"
- uses: actions/checkout@v2
- uses: actions/download-artifact@v2
- uses: actions/checkout@v3
- uses: actions/download-artifact@v3
with:
name: emqx.tar
path: /tmp
@ -387,7 +393,7 @@ jobs:
done
docker ps -a
echo HAPROXY_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' haproxy) >> $GITHUB_ENV
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
repository: emqx/emqx-fvt
ref: broker-autotest
@ -400,12 +406,14 @@ jobs:
cd target
docker run --name jwks_server --network emqx_bridge --ip 172.100.239.88 -d -v $(pwd)/jwkserver-0.0.1.jar:/jwks_server/jwkserver-0.0.1.jar --workdir /jwks_server openjdk:8-jdk bash \
-c "java -jar jwkserver-0.0.1.jar"
- uses: actions/setup-java@v1
- uses: actions/setup-java@v3
with:
java-version: '8.0.282' # The JDK version to make available on the path.
java-package: jdk # (jre, jdk, or jdk+fx) - defaults to jdk
architecture: x64 # (x64 or x86) - defaults to x64
- uses: actions/download-artifact@v2
# https://github.com/actions/setup-java/blob/main/docs/switching-to-v2.md
distribution: 'zulu'
- uses: actions/download-artifact@v3
with:
name: apache-jmeter.tgz
path: /tmp
@ -434,7 +442,7 @@ jobs:
echo "check logs filed"
exit 1
fi
- uses: actions/upload-artifact@v1
- uses: actions/upload-artifact@v3
if: always()
with:
name: jmeter_logs
@ -455,8 +463,8 @@ jobs:
- uses: erlef/setup-beam@v1
with:
otp-version: "24.2"
- uses: actions/checkout@v2
- uses: actions/download-artifact@v2
- uses: actions/checkout@v3
- uses: actions/download-artifact@v3
with:
name: emqx.tar
path: /tmp
@ -485,17 +493,19 @@ jobs:
done
docker ps -a
echo HAPROXY_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' haproxy) >> $GITHUB_ENV
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
repository: emqx/emqx-fvt
ref: broker-autotest
path: scripts
- uses: actions/setup-java@v1
- uses: actions/setup-java@v3
with:
java-version: '8.0.282' # The JDK version to make available on the path.
java-package: jdk # (jre, jdk, or jdk+fx) - defaults to jdk
architecture: x64 # (x64 or x86) - defaults to x64
- uses: actions/download-artifact@v2
# https://github.com/actions/setup-java/blob/main/docs/switching-to-v2.md
distribution: 'zulu'
- uses: actions/download-artifact@v3
with:
name: apache-jmeter.tgz
path: /tmp
@ -524,7 +534,7 @@ jobs:
echo "check logs filed"
exit 1
fi
- uses: actions/upload-artifact@v1
- uses: actions/upload-artifact@v3
if: always()
with:
name: jmeter_logs
@ -534,7 +544,7 @@ jobs:
runs-on: ubuntu-latest
needs: [advanced_feat,pgsql_authn_authz,JWT_authn,mysql_authn_authz,built_in_database_authn_authz]
steps:
- uses: geekyeggo/delete-artifact@v1
- uses: geekyeggo/delete-artifact@v2
with:
name: emqx.tar

View File

@ -16,7 +16,7 @@ on:
jobs:
relup_test_plan:
runs-on: ubuntu-20.04
container: "ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-ubuntu20.04"
container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04"
outputs:
CUR_EE_VSN: ${{ steps.find-versions.outputs.CUR_EE_VSN }}
OLD_VERSIONS: ${{ steps.find-versions.outputs.OLD_VERSIONS }}
@ -24,7 +24,7 @@ jobs:
run:
shell: bash
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
name: Checkout
with:
path: emqx
@ -45,7 +45,7 @@ jobs:
cd emqx
make emqx-tgz
make emqx-enterprise-tgz
- uses: actions/upload-artifact@v2
- uses: actions/upload-artifact@v3
name: Upload built emqx and test scenario
with:
name: emqx_built
@ -75,7 +75,7 @@ jobs:
- uses: erlef/setup-beam@v1
with:
otp-version: "24.2"
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
repository: hawk/lux
ref: lux-2.8.1
@ -88,7 +88,7 @@ jobs:
./configure
make
echo "$(pwd)/bin" >> $GITHUB_PATH
- uses: actions/download-artifact@v2
- uses: actions/download-artifact@v3
name: Download built emqx and test scenario
with:
name: emqx_built
@ -114,7 +114,7 @@ jobs:
docker logs node2.emqx.io | tee lux_logs/emqx2.log
exit 1
fi
- uses: actions/upload-artifact@v2
- uses: actions/upload-artifact@v3
name: Save debug data
if: failure()
with:

View File

@ -17,12 +17,12 @@ jobs:
prepare:
runs-on: ubuntu-20.04
# prepare source with any OTP version, no need for a matrix
container: "ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-ubuntu20.04"
container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04"
outputs:
fast_ct_apps: ${{ steps.run_find_apps.outputs.fast_ct_apps }}
docker_ct_apps: ${{ steps.run_find_apps.outputs.docker_ct_apps }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
with:
path: source
fetch-depth: 0
@ -43,7 +43,7 @@ jobs:
./rebar3 as test compile
cd ..
zip -ryq source.zip source/* source/.[^.]*
- uses: actions/upload-artifact@v2
- uses: actions/upload-artifact@v3
with:
name: source
path: source.zip
@ -60,11 +60,11 @@ jobs:
defaults:
run:
shell: bash
container: "ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-ubuntu20.04"
container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04"
steps:
- uses: AutoModality/action-clean@v1
- uses: actions/download-artifact@v2
- uses: actions/download-artifact@v3
with:
name: source
path: .
@ -86,7 +86,7 @@ jobs:
working-directory: source
run: make proper
- uses: actions/upload-artifact@v2
- uses: actions/upload-artifact@v3
with:
name: coverdata
path: source/_build/test/cover
@ -107,7 +107,7 @@ jobs:
steps:
- uses: AutoModality/action-clean@v1
- uses: actions/download-artifact@v2
- uses: actions/download-artifact@v3
with:
name: source
path: .
@ -121,12 +121,13 @@ jobs:
PGSQL_TAG: 13
REDIS_TAG: 6
run: |
rm _build/default/lib/rocksdb/_build/cmake/CMakeCache.txt
./scripts/ct/run.sh --app ${{ matrix.app_name }}
- uses: actions/upload-artifact@v1
- uses: actions/upload-artifact@v3
with:
name: coverdata
path: source/_build/test/cover
- uses: actions/upload-artifact@v1
- uses: actions/upload-artifact@v3
if: failure()
with:
name: logs_${{ matrix.otp_release }}-${{ matrix.profile }}
@ -143,14 +144,14 @@ jobs:
- emqx-enterprise
runs-on: aws-amd64
container: "ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-ubuntu20.04"
container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04"
defaults:
run:
shell: bash
steps:
- uses: AutoModality/action-clean@v1
- uses: actions/download-artifact@v2
- uses: actions/download-artifact@v3
with:
name: source
path: .
@ -200,17 +201,17 @@ jobs:
- ct
- ct_docker
runs-on: ubuntu-20.04
container: "ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-ubuntu20.04"
container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04"
steps:
- uses: AutoModality/action-clean@v1
- uses: actions/download-artifact@v2
- uses: actions/download-artifact@v3
with:
name: source
path: .
- name: unzip source code
run: unzip -q source.zip
- uses: actions/download-artifact@v2
- uses: actions/download-artifact@v3
name: download coverdata
with:
name: coverdata

View File

@ -7,7 +7,7 @@ jobs:
runs-on: ubuntu-20.04
steps:
- name: Checkout source code
uses: actions/checkout@master
uses: actions/checkout@v3
- name: Install shellcheck
run: |
sudo apt-get update

View File

@ -17,7 +17,7 @@ jobs:
steps:
- name: Close Stale Issues
uses: actions/stale@v4.1.0
uses: actions/stale@v6
with:
days-before-stale: 7
days-before-close: 7

1
.gitignore vendored
View File

@ -68,3 +68,4 @@ apps/emqx/test/emqx_static_checks_data/master.bpapi
# rendered configurations
*.conf.rendered
lux_logs/
/.prepare

View File

@ -1,2 +1,2 @@
erlang 24.2.1-1
erlang 24.3.4.2-1
elixir 1.13.4-otp-24

View File

@ -1,199 +0,0 @@
# 5.0.9
## Enhancements
* Add `cert_common_name` and `cert_subject` placeholder support for authz_http and authz_mongo.[#8973](https://github.com/emqx/emqx/pull/8973)
## Bug fixes
* Check ACLs for last will testament topic before publishing the message. [#8930](https://github.com/emqx/emqx/pull/8930)
* Fix GET /listeners API crash When some nodes still in initial configuration. [#9002](https://github.com/emqx/emqx/pull/9002)
* Fix empty variable interpolation in authentication and authorization. Placeholders for undefined variables are rendered now as empty strings and do not cause errors anymore. [#8963](https://github.com/emqx/emqx/pull/8963)
* Fix the latency statistics error of the slow subscription module when `stats_type` is `internal` or `response`. [#8986](https://github.com/emqx/emqx/pull/8986)
# 5.0.8
## Bug fixes
* Fix exhook `client.authorize` never being execauted. [#8780](https://github.com/emqx/emqx/pull/8780)
* Fix JWT plugin don't support non-integer timestamp claims. [#8867](https://github.com/emqx/emqx/pull/8867)
* Avoid publishing will message when client fails to auhtenticate. [#8887](https://github.com/emqx/emqx/pull/8887)
* Speed up dispatching of shared subscription messages in a cluster [#8893](https://github.com/emqx/emqx/pull/8893)
* Fix the extra / prefix when CoAP gateway parsing client topics. [#8658](https://github.com/emqx/emqx/pull/8658)
* Speed up updating the configuration, When some nodes in the cluster are down. [#8857](https://github.com/emqx/emqx/pull/8857)
* Fix delayed publish inaccurate caused by os time change. [#8926](https://github.com/emqx/emqx/pull/8926)
* Fix that EMQX can't start when the retainer is disabled [#8911](https://github.com/emqx/emqx/pull/8911)
* Fix that redis authn will deny the unknown users [#8934](https://github.com/emqx/emqx/pull/8934)
* Fix ExProto UDP client keepalive checking error.
This causes the clients to not expire as long as a new UDP packet arrives [#8866](https://github.com/emqx/emqx/pull/8866)
* Fix that MQTT Bridge message payload could be empty string. [#8949](https://github.com/emqx/emqx/pull/8949)
## Enhancements
* Print a warning message when boot with the default (insecure) Erlang cookie. [#8905](https://github.com/emqx/emqx/pull/8905)
* Change the `/gateway` API path to plural form. [#8823](https://github.com/emqx/emqx/pull/8823)
* Don't allow updating config items when they already exist in `local-override.conf`. [#8851](https://github.com/emqx/emqx/pull/8851)
* Remove `node.etc_dir` from emqx.conf, because it is never used.
Also allow user to customize the logging directory [#8892](https://github.com/emqx/emqx/pull/8892)
* Added a new API `POST /listeners` for creating listener. [#8876](https://github.com/emqx/emqx/pull/8876)
* Close ExProto client process immediately if it's keepalive timeouted. [#8866](https://github.com/emqx/emqx/pull/8866)
* Upgrade grpc-erl driver to 0.6.7 to support batch operation in sending stream. [#8866](https://github.com/emqx/emqx/pull/8866)
# 5.0.7
## Bug fixes
* Remove `will_msg` (not used) field from the client API. [#8721](https://github.com/emqx/emqx/pull/8721)
* Fix `$queue` topic name error in management API return. [#8728](https://github.com/emqx/emqx/pull/8728)
* Fix race condition which may cause `client.connected` and `client.disconnected` out of order. [#8625](https://github.com/emqx/emqx/pull/8625)
* Fix quic listener default idle timeout's type. [#8826](https://github.com/emqx/emqx/pull/8826)
## Enhancements
* Do not auto-populate default SSL cipher suites, so that the configs are less bloated. [#8769](https://github.com/emqx/emqx/pull/8769)
# 5.0.6
## Bug fixes
* Upgrade Dashboard version to fix an issue where the node status was not displayed correctly. [#8771](https://github.com/emqx/emqx/pull/8771)
# 5.0.5
## Bug fixes
* Allow changing the license type from key to file (and vice-versa). [#8598](https://github.com/emqx/emqx/pull/8598)
* Add back http connector config keys `max_retries` `retry_interval` as deprecated fields [#8672](https://github.com/emqx/emqx/issues/8672)
This caused upgrade failure in 5.0.4, because it would fail to boot on configs created from older version.
## Enhancements
* Add `bootstrap_users_file` configuration to add default Dashboard username list, which is only added when EMQX is first started.
* The license is now copied to all nodes in the cluster when it's reloaded. [#8598](https://github.com/emqx/emqx/pull/8598)
* Added a HTTP API to manage licenses. [#8610](https://github.com/emqx/emqx/pull/8610)
* Updated `/nodes` API node_status from `Running/Stopped` to `running/stopped`. [#8642](https://github.com/emqx/emqx/pull/8642)
* Improve handling of placeholder interpolation errors [#8635](https://github.com/emqx/emqx/pull/8635)
* Better logging on unknown object IDs. [#8670](https://github.com/emqx/emqx/pull/8670)
* The bind option support `:1883` style. [#8758](https://github.com/emqx/emqx/pull/8758)
# 5.0.4
## Bug fixes
* The `data/configs/cluster-override.conf` is cleared to 0KB if `hocon_pp:do/2` failed [commits/71f64251](https://github.com/emqx/emqx/pull/8443/commits/71f642518a683cc91a32fd542aafaac6ef915720)
* Improve the health_check for webhooks.
Prior to this change, the webhook only checks the connectivity of the TCP port using `gen_tcp:connect/2`, so
if it's a HTTPs server, we didn't check if TLS handshake was successful.
[commits/6b45d2ea](https://github.com/emqx/emqx/commit/6b45d2ea9fde6d3b4a5b007f7a8c5a1c573d141e)
* The `created_at` field of rules is missing after emqx restarts. [commits/5fc09e6b](https://github.com/emqx/emqx/commit/5fc09e6b950c340243d7be627a0ce1700691221c)
* The rule engine's jq function now works even when the path to the EMQX install dir contains spaces [jq#35](https://github.com/emqx/jq/pull/35) [#8455](https://github.com/emqx/emqx/pull/8455)
* Avoid applying any ACL checks on superusers [#8452](https://github.com/emqx/emqx/pull/8452)
* Fix statistics related system topic name error
* Fix AuthN JWKS SSL schema. Using schema in `emqx_schema`. [#8458](https://github.com/emqx/emqx/pull/8458)
* `sentinel` field should be required when AuthN/AuthZ Redis using sentinel mode. [#8458](https://github.com/emqx/emqx/pull/8458)
* Fix bad swagger format. [#8517](https://github.com/emqx/emqx/pull/8517)
* Fix `chars_limit` is not working when `formatter` is `json`. [#8518](http://github.com/emqx/emqx/pull/8518)
* Ensuring that exhook dispatches the client events are sequential. [#8530](https://github.com/emqx/emqx/pull/8530)
* Avoid using RocksDB backend for persistent sessions when such backend is unavailable. [#8528](https://github.com/emqx/emqx/pull/8528)
* Fix AuthN `cert_subject` and `cert_common_name` placeholder rendering failure. [#8531](https://github.com/emqx/emqx/pull/8531)
* Support listen on an IPv6 address, e.g: [::1]:1883 or ::1:1883. [#8547](https://github.com/emqx/emqx/pull/8547)
* GET '/rules' support for pagination and fuzzy search. [#8472](https://github.com/emqx/emqx/pull/8472)
**‼️ Note** : The previous API only returns array: `[RuleObj1,RuleObj2]`, after updating, it will become
`{"data": [RuleObj1,RuleObj2], "meta":{"count":2, "limit":100, "page":1}`,
which will carry the paging meta information.
* Fix the issue that webhook leaks TCP connections. [ehttpc#34](https://github.com/emqx/ehttpc/pull/34), [#8580](https://github.com/emqx/emqx/pull/8580)
## Enhancements
* Improve the dashboard listener startup log, the listener name is no longer spliced with port information,
and the colon(:) is no longer displayed when IP is not specified. [#8480](https://github.com/emqx/emqx/pull/8480)
* Remove `/configs/listeners` API, use `/listeners/` instead. [#8485](https://github.com/emqx/emqx/pull/8485)
* Optimize performance of builtin database operations in processes with long message queue [#8439](https://github.com/emqx/emqx/pull/8439)
* Improve authentication tracing. [#8554](https://github.com/emqx/emqx/pull/8554)
* Standardize the '/listeners' and `/gateway/<name>/listeners` API fields.
It will introduce some incompatible updates, see [#8571](https://github.com/emqx/emqx/pull/8571)
* Add option to perform GC on connection process after TLS/SSL handshake is performed. [#8637](https://github.com/emqx/emqx/pull/8637)
# 5.0.3
## Bug fixes
* Websocket listener failed to read headers `X-Forwarded-For` and `X-Forwarded-Port` [#8415](https://github.com/emqx/emqx/pull/8415)
* Deleted `cluster_singleton` from MQTT bridge config document. This config is no longer applicable in 5.0 [#8407](https://github.com/emqx/emqx/pull/8407)
* Fix `emqx/emqx:latest` docker image publish to use the Erlang flavor, but not Elixir flavor [#8414](https://github.com/emqx/emqx/pull/8414)
* Changed the `exp` field in JWT auth to be optional rather than required to fix backwards compatability with 4.X releases. [#8425](https://github.com/emqx/emqx/pull/8425)
## Enhancements
* Improve the speed of dashboard's HTTP API routing rule generation, which sometimes causes timeout [#8438](https://github.com/emqx/emqx/pull/8438)
# 5.0.2
Announcement: EMQX team has decided to stop supporting relup for opensource edition.
Going forward, it will be an enterprise-only feature.
Main reason: relup requires carefully crafted upgrade instructions from ALL previous versions.
For example, 4.3 is now at 4.3.16, we have `4.3.0->4.3.16`, `4.3.1->4.3.16`, ... 16 such upgrade paths in total to maintain.
This had been the biggest obstacle for EMQX team to act agile enough in delivering enhancements and fixes.
## Enhancements
## Bug fixes
* Fixed a typo in `bin/emqx` which affects MacOs release when trying to enable Erlang distribution over TLS [#8398](https://github.com/emqx/emqx/pull/8398)
* Restricted shell was accidentally disabled in 5.0.1, it has been added back. [#8396](https://github.com/emqx/emqx/pull/8396)
# 5.0.1
5.0.1 is built on [Erlang/OTP 24.2.1-1](https://github.com/emqx/otp/tree/OTP-24.2.1-1). Same as 5.0.0.
5.0.0 (like 4.4.x) had Erlang/OTP version number in the package name.
This is because we wanted to release different flavor packages (on different Elixir/Erlang/OTP platforms).
However the long package names also causes confusion, as users may not know which to choose if there were more than
one presented at the same time.
Going forward, (starting from 5.0.1), packages will be released in both default (short) and flavored (long) package names.
For example: `emqx-5.0.1-otp24.2.1-1-ubuntu20.04-amd64.tar.gz`,
but only the default one is presented to the users: `emqx-5.0.1-ubuntu20.04-amd64.tar.gz`.
In case anyone wants to try a different flavor package, it can be downlowded from the public s3 bucket,
for example:
https://s3.us-west-2.amazonaws.com/packages.emqx/emqx-ce/v5.0.1/emqx-5.0.1-otp24.2.1-1-ubuntu20.04-arm64.tar.gz
Exceptions:
* Windows package is always presented with short name (currently on Erlang/OTP 24.2.1).
* Elixir package name is flavored with both Elixir and Erlang/OTP version numbers,
for example: `emqx-5.0.1-elixir1.13.4-otp24.2.1-1-ubuntu20.04-amd64.tar.gz`
## Enhancements
* Removed management API auth for prometheus scraping endpoint /api/v5/prometheus/stats [#8299](https://github.com/emqx/emqx/pull/8299)
* Added more TCP options for exhook (gRPC) connections. [#8317](https://github.com/emqx/emqx/pull/8317)
* HTTP Servers used for authentication and authorization will now indicate the result via the response body. [#8374](https://github.com/emqx/emqx/pull/8374) [#8377](https://github.com/emqx/emqx/pull/8377)
* Bulk subscribe/unsubscribe APIs [#8356](https://github.com/emqx/emqx/pull/8356)
* Added exclusive subscription [#8315](https://github.com/emqx/emqx/pull/8315)
* Provide authentication counter metrics [#8352](https://github.com/emqx/emqx/pull/8352) [#8375](https://github.com/emqx/emqx/pull/8375)
* Do not allow admin user self-deletion [#8286](https://github.com/emqx/emqx/pull/8286)
* After restart, ensure to copy `cluster-override.conf` from the clustered node which has the greatest `tnxid`. [#8333](https://github.com/emqx/emqx/pull/8333)
## Bug fixes
* A bug fix ported from 4.x: allow deleting subscriptions from `client.subscribe` hookpoint callback result. [#8304](https://github.com/emqx/emqx/pull/8304) [#8347](https://github.com/emqx/emqx/pull/8377)
* Fixed Erlang distribution over TLS [#8309](https://github.com/emqx/emqx/pull/8309)
* Made possible to override authentication configs from environment variables [#8323](https://github.com/emqx/emqx/pull/8309)
* Made authentication passwords in Mnesia database backward compatible to 4.x, so we can support data migration better. [#8351](https://github.com/emqx/emqx/pull/8351)
* Fix plugins upload for rpm/deb installations [#8379](https://github.com/emqx/emqx/pull/8379)
* Sync data/authz/acl.conf and data/certs from clustered nodes after a new node joins the cluster [#8369](https://github.com/emqx/emqx/pull/8369)
* Ensure auto-retry of failed resources [#8371](https://github.com/emqx/emqx/pull/8371)
* Fix the issue that the count of `packets.connack.auth_error` is inaccurate when the client uses a protocol version below MQTT v5.0 to access [#8178](https://github.com/emqx/emqx/pull/8178)
## Others
* Rate limiter interface is hidden so far, it's subject to a UX redesign.
* QUIC library upgraded to 0.0.14.
* Now the default packages will be released withot otp version number in the package name.
* Renamed config exmpale file name in `etc` dir.

View File

@ -27,15 +27,14 @@ VOLUME ["/opt/emqx/log", "/opt/emqx/data"]
# emqx will occupy these port:
# - 1883 port for MQTT
# - 8081 for mgmt API
# - 8083 for WebSocket/HTTP
# - 8084 for WSS/HTTPS
# - 8883 port for MQTT(SSL)
# - 11883 port for internal MQTT/TCP
# - 18083 for dashboard
# - 18083 for dashboard and API
# - 4370 default Erlang distrbution port
# - 5369 for backplain gen_rpc
EXPOSE 1883 8081 8083 8084 8883 11883 18083 4370 5369
EXPOSE 1883 8083 8084 8883 11883 18083 4370 5369
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]

View File

@ -6,7 +6,7 @@ export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-d
export EMQX_DEFAULT_RUNNER = debian:11-slim
export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh)
export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh)
export EMQX_DASHBOARD_VERSION ?= v1.0.9
export EMQX_DASHBOARD_VERSION ?= v1.1.1
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.1-beta.5
export EMQX_REL_FORM ?= tgz
export QUICER_DOWNLOAD_FROM_RELEASE = 1
@ -30,12 +30,10 @@ export REBAR_GIT_CLONE_OPTIONS += --depth=1
.PHONY: default
default: $(REBAR) $(PROFILE)
.PHONY: prepare
prepare: FORCE
.prepare:
@$(SCRIPTS)/git-hooks-init.sh # this is no longer needed since 5.0 but we keep it anyway
@$(SCRIPTS)/prepare-build-deps.sh
FORCE:
@touch .prepare
.PHONY: all
all: $(REBAR) $(PROFILES)
@ -44,7 +42,23 @@ all: $(REBAR) $(PROFILES)
ensure-rebar3:
@$(SCRIPTS)/ensure-rebar3.sh
$(REBAR): prepare ensure-rebar3
$(REBAR): .prepare ensure-rebar3
.PHONY: ensure-hex
ensure-hex:
@mix local.hex --if-missing --force
.PHONY: ensure-mix-rebar3
ensure-mix-rebar3: $(REBAR)
@mix local.rebar rebar3 $(CURDIR)/rebar3 --if-missing --force
.PHONY: ensure-mix-rebar
ensure-mix-rebar: $(REBAR)
@mix local.rebar --if-missing --force
.PHONY: mix-deps-get
mix-deps-get: $(ELIXIR_COMMON_DEPS)
@mix deps.get
.PHONY: eunit
eunit: $(REBAR) conf-segs

View File

@ -32,12 +32,6 @@ EMQX 自 2013 年在 GitHub 发布开源版本以来,获得了来自 50 多个
docker run -d --name emqx -p 1883:1883 -p 8083:8083 -p 8084:8084 -p 8883:8883 -p 18083:18083 emqx/emqx:latest
```
或直接试用 EMQX 企业版(已内置 10 个并发连接的永不过期 License
```
docker run -d --name emqx-ee -p 1883:1883 -p 8081:8081 -p 8083:8083 -p 8084:8084 -p 8883:8883 -p 18083:18083 emqx/emqx-ee:latest
```
接下来请参考 [入门指南](https://www.emqx.io/docs/zh/v5.0/getting-started/getting-started.html#启动-emqx) 开启您的 EMQX 之旅。
#### 在 Kubernetes 上运行 EMQX 集群

View File

@ -28,13 +28,7 @@
#### Установка EMQX с помощью Docker
```
docker run -d --name emqx -p 1883:1883 -p 8081:8081 -p 8083:8083 -p 8883:8883 -p 8084:8084 -p 18083:18083 emqx/emqx
```
Или запустите EMQX Enterprise со встроенной бессрочной лицензией на 10 соединений.
```
docker run -d --name emqx-ee -p 1883:1883 -p 8081:8081 -p 8083:8083 -p 8084:8084 -p 8883:8883 -p 18083:18083 emqx/emqx-ee:latest
docker run -d --name emqx -p 1883:1883 -p 8083:8083 -p 8883:8883 -p 8084:8084 -p 18083:18083 emqx/emqx
```
Чтобы ознакомиться с функциональностью EMQX, пожалуйста, следуйте [руководству по началу работы](https://www.emqx.io/docs/en/v5.0/getting-started/getting-started.html#start-emqx).

View File

@ -33,12 +33,6 @@ The simplest way to set up EMQX is to create a managed deployment with EMQX Clou
docker run -d --name emqx -p 1883:1883 -p 8083:8083 -p 8084:8084 -p 8883:8883 -p 18083:18083 emqx/emqx:latest
```
Or install EMQX Enterprise with a built-in license for ten connections that never expire.
```
docker run -d --name emqx-ee -p 1883:1883 -p 8081:8081 -p 8083:8083 -p 8084:8084 -p 8883:8883 -p 18083:18083 emqx/emqx-ee:latest
```
Next, please follow the [getting started guide](https://www.emqx.io/docs/en/v5.0/getting-started/getting-started.html#start-emqx) to tour the EMQX features.
#### Run EMQX cluster on kubernetes

View File

@ -113,7 +113,7 @@ the check/consume will succeed, but it will be forced to wait for a short period
burst {
desc {
en: """The burst, This value is based on rate.</br>
en: """The burst, This value is based on rate.<br/>
This value + rate = the maximum limit that can be achieved when limiter burst."""
zh: """突发速率。
突发速率允许短时间内速率超过设置的速率值,突发速率 + 速率 = 当前桶能达到的最大速率值"""
@ -171,7 +171,7 @@ Once the limit is reached, the restricted client will be slow down even be hung
en: """The bytes_in limiter.
This is used to limit the inbound bytes rate for this EMQX node.
Once the limit is reached, the restricted client will be slow down even be hung for a while."""
zh: """流入字节率控制器.
zh: """流入字节率控制器
这个是用来控制当前节点上的数据流入的字节率,每条消息将会消耗和其二进制大小等量的令牌,当达到最大速率后,会话将会被限速甚至被强制挂起一小段时间"""
}
label: {

File diff suppressed because it is too large Load Diff

View File

@ -32,7 +32,7 @@
%% `apps/emqx/src/bpapi/README.md'
%% Community edition
-define(EMQX_RELEASE_CE, "5.0.8").
-define(EMQX_RELEASE_CE, "5.0.10").
%% Enterprise edition
-define(EMQX_RELEASE_EE, "5.0.0-beta.4").

View File

@ -17,9 +17,11 @@
{emqx_license,2}.
{emqx_management,1}.
{emqx_management,2}.
{emqx_management,3}.
{emqx_mgmt_api_plugins,1}.
{emqx_mgmt_cluster,1}.
{emqx_mgmt_trace,1}.
{emqx_mgmt_trace,2}.
{emqx_persistent_session,1}.
{emqx_plugin_libs,1}.
{emqx_prometheus,1}.

View File

@ -27,7 +27,7 @@
{jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}},
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}},
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.4"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.5"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.6"}}},
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.30.0"}}},
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},

View File

@ -3,7 +3,7 @@
{id, "emqx"},
{description, "EMQX Core"},
% strict semver, bump manually!
{vsn, "5.0.9"},
{vsn, "5.0.10"},
{modules, []},
{registered, []},
{applications, [

View File

@ -24,6 +24,11 @@
authorize/3
]).
-ifdef(TEST).
-compile(export_all).
-compile(nowarn_export_all).
-endif.
%%--------------------------------------------------------------------
%% APIs
%%--------------------------------------------------------------------
@ -45,6 +50,19 @@ authenticate(Credential) ->
%% @doc Check Authorization
-spec authorize(emqx_types:clientinfo(), emqx_types:pubsub(), emqx_types:topic()) ->
allow | deny.
authorize(ClientInfo, PubSub, <<"$delayed/", Data/binary>> = RawTopic) ->
case binary:split(Data, <<"/">>) of
[_, Topic] ->
authorize(ClientInfo, PubSub, Topic);
_ ->
?SLOG(warning, #{
msg => "invalid_dealyed_topic_format",
expected_example => "$delayed/1/t/foo",
got => RawTopic
}),
inc_authz_metrics(deny),
deny
end;
authorize(ClientInfo, PubSub, Topic) ->
Result =
case emqx_authz_cache:is_enabled() of

View File

@ -64,7 +64,7 @@
pre_config_update(_, UpdateReq, OldConfig) ->
try do_pre_config_update(UpdateReq, to_list(OldConfig)) of
{error, Reason} -> {error, Reason};
{ok, NewConfig} -> {ok, return_map(NewConfig)}
{ok, NewConfig} -> {ok, NewConfig}
catch
throw:Reason ->
{error, Reason}
@ -225,9 +225,6 @@ do_check_config(Type, Config, Module) ->
throw({bad_authenticator_config, #{type => Type, reason => E}})
end.
return_map([L]) -> L;
return_map(L) -> L.
to_list(undefined) -> [];
to_list(M) when M =:= #{} -> [];
to_list(M) when is_map(M) -> [M];

View File

@ -345,7 +345,8 @@ handle_in(?CONNECT_PACKET(ConnPkt) = Packet, Channel) ->
fun check_connect/2,
fun enrich_client/2,
fun set_log_meta/2,
fun check_banned/2
fun check_banned/2,
fun count_flapping_event/2
],
ConnPkt,
Channel#channel{conn_state = connecting}
@ -997,8 +998,13 @@ maybe_nack(Delivers) ->
lists:filter(fun not_nacked/1, Delivers).
not_nacked({deliver, _Topic, Msg}) ->
not (emqx_shared_sub:is_ack_required(Msg) andalso
(ok == emqx_shared_sub:nack_no_connection(Msg))).
case emqx_shared_sub:is_ack_required(Msg) of
true ->
ok = emqx_shared_sub:nack_no_connection(Msg),
false;
false ->
true
end.
maybe_mark_as_delivered(Session, Delivers) ->
case emqx_session:info(is_persistent, Session) of
@ -1222,6 +1228,8 @@ handle_call(
ChanInfo1 = info(NChannel),
emqx_cm:set_chan_info(ClientId, ChanInfo1#{sockinfo => SockInfo}),
reply(ok, reset_timer(alive_timer, NChannel));
handle_call(get_mqueue, Channel) ->
reply({ok, get_mqueue(Channel)}, Channel);
handle_call(Req, Channel) ->
?SLOG(error, #{msg => "unexpected_call", call => Req}),
reply(ignored, Channel).
@ -1253,14 +1261,11 @@ handle_info(
{sock_closed, Reason},
Channel =
#channel{
conn_state = ConnState,
clientinfo = ClientInfo = #{zone := Zone}
conn_state = ConnState
}
) when
ConnState =:= connected orelse ConnState =:= reauthenticating
->
emqx_config:get_zone_conf(Zone, [flapping_detect, enable]) andalso
emqx_flapping:detect(ClientInfo),
Channel1 = ensure_disconnected(Reason, maybe_publish_will_msg(Channel)),
case maybe_shutdown(Reason, Channel1) of
{ok, Channel2} -> {ok, {event, disconnected}, Channel2};
@ -1629,6 +1634,14 @@ check_banned(_ConnPkt, #channel{clientinfo = ClientInfo}) ->
false -> ok
end.
%%--------------------------------------------------------------------
%% Flapping
count_flapping_event(_ConnPkt, Channel = #channel{clientinfo = ClientInfo = #{zone := Zone}}) ->
emqx_config:get_zone_conf(Zone, [flapping_detect, enable]) andalso
emqx_flapping:detect(ClientInfo),
{ok, Channel}.
%%--------------------------------------------------------------------
%% Authenticate
@ -2085,7 +2098,7 @@ parse_topic_filters(TopicFilters) ->
lists:map(fun emqx_topic:parse/1, TopicFilters).
%%--------------------------------------------------------------------
%% Ensure disconnected
%% Maybe & Ensure disconnected
ensure_disconnected(
Reason,
@ -2130,11 +2143,7 @@ publish_will_msg(ClientInfo, Msg = #message{topic = Topic}) ->
?tp(
warning,
last_will_testament_publish_denied,
#{
client_info => ClientInfo,
topic => Topic,
message => Msg
}
#{topic => Topic}
),
ok
end.
@ -2196,6 +2205,7 @@ shutdown(success, Reply, Packet, Channel) ->
shutdown(Reason, Reply, Packet, Channel) ->
{shutdown, Reason, Reply, Packet, Channel}.
%% mqtt v5 connected sessions
disconnect_and_shutdown(
Reason,
Reply,
@ -2205,9 +2215,12 @@ disconnect_and_shutdown(
) when
ConnState =:= connected orelse ConnState =:= reauthenticating
->
shutdown(Reason, Reply, ?DISCONNECT_PACKET(reason_code(Reason)), Channel);
NChannel = ensure_disconnected(Reason, Channel),
shutdown(Reason, Reply, ?DISCONNECT_PACKET(reason_code(Reason)), NChannel);
%% mqtt v3/v4 sessions, mqtt v5 other conn_state sessions
disconnect_and_shutdown(Reason, Reply, Channel) ->
shutdown(Reason, Reply, Channel).
NChannel = ensure_disconnected(Reason, Channel),
shutdown(Reason, Reply, NChannel).
sp(true) -> 1;
sp(false) -> 0.
@ -2228,3 +2241,6 @@ get_mqtt_conf(Zone, Key, Default) ->
set_field(Name, Value, Channel) ->
Pos = emqx_misc:index_of(Name, record_info(fields, channel)),
setelement(Pos + 1, Channel, Value).
get_mqueue(#channel{session = Session}) ->
emqx_session:get_mqueue(Session).

View File

@ -414,9 +414,9 @@ check_config(SchemaMod, RawConf) ->
check_config(SchemaMod, RawConf, Opts0) ->
Opts1 = #{
return_plain => true,
%% TODO: evil, remove, required should be declared in schema
required => false,
format => map
format => map,
%% Don't check lazy types, such as authenticate
check_lazy => false
},
Opts = maps:merge(Opts0, Opts1),
{AppEnvs, CheckedConf} =

View File

@ -49,7 +49,8 @@
-export([
listener_id/2,
parse_listener_id/1,
ensure_override_limiter_conf/2
ensure_override_limiter_conf/2,
esockd_access_rules/1
]).
-export([pre_config_update/3, post_config_update/5]).
@ -497,17 +498,28 @@ ip_port({Addr, Port}) ->
[{ip, Addr}, {port, Port}].
esockd_access_rules(StrRules) ->
Access = fun(S) ->
Access = fun(S, Acc) ->
[A, CIDR] = string:tokens(S, " "),
%% esockd rules only use words 'allow' and 'deny', both are existing
%% comparison of strings may be better, but there is a loss of backward compatibility
case emqx_misc:safe_to_existing_atom(A) of
{ok, Action} ->
[
{
list_to_atom(A),
Action,
case CIDR of
"all" -> all;
_ -> CIDR
end
}
| Acc
];
_ ->
?SLOG(warning, #{msg => "invalid esockd access rule", rule => S}),
Acc
end
end,
[Access(R) || R <- StrRules].
lists:foldr(Access, [], StrRules).
merge_default(Options) ->
case lists:keytake(tcp_options, 1, Options) of
@ -521,12 +533,16 @@ merge_default(Options) ->
integer() | {tuple(), integer()} | string() | binary()
) -> io_lib:chars().
format_bind(Port) when is_integer(Port) ->
%% **Note**:
%% 'For TCP, UDP and IP networks, if the host is empty or a literal
%% unspecified IP address, as in ":80", "0.0.0.0:80" or "[::]:80" for
%% TCP and UDP, "", "0.0.0.0" or "::" for IP, the local system is
%% assumed.'
%%
%% Quoted from: https://pkg.go.dev/net
%% Decided to use this format to display the bind for all interfaces and
%% IPv4/IPv6 support
io_lib:format(":~w", [Port]);
%% Print only the port number when bound on all interfaces
format_bind({{0, 0, 0, 0}, Port}) ->
format_bind(Port);
format_bind({{0, 0, 0, 0, 0, 0, 0, 0}, Port}) ->
format_bind(Port);
format_bind({Addr, Port}) when is_list(Addr) ->
io_lib:format("~ts:~w", [Addr, Port]);
format_bind({Addr, Port}) when is_tuple(Addr), tuple_size(Addr) == 4 ->
@ -538,6 +554,8 @@ format_bind(Str) when is_list(Str) ->
case emqx_schema:to_ip_port(Str) of
{ok, {Ip, Port}} ->
format_bind({Ip, Port});
{ok, Port} ->
format_bind(Port);
{error, _} ->
format_bind(list_to_integer(Str))
end;

View File

@ -74,7 +74,8 @@
to_map/1,
to_log_map/1,
to_list/1,
from_map/1
from_map/1,
estimate_size/1
]).
-export_type([message_map/0]).
@ -175,6 +176,18 @@ make(MsgId, From, QoS, Topic, Payload, Flags, Headers) when
timestamp = Now
}.
%% optimistic esitmation of a message size after serialization
%% not including MQTT v5 message headers/user properties etc.
-spec estimate_size(emqx_types:message()) -> non_neg_integer().
estimate_size(#message{topic = Topic, payload = Payload}) ->
FixedHeaderSize = 1,
VarLenSize = 4,
TopicSize = iolist_size(Topic),
PayloadSize = iolist_size(Payload),
PacketIdSize = 2,
TopicLengthSize = 2,
FixedHeaderSize + VarLenSize + TopicLengthSize + TopicSize + PacketIdSize + PayloadSize.
-spec id(emqx_types:message()) -> maybe(binary()).
id(#message{id = Id}) -> Id.

View File

@ -52,7 +52,9 @@
explain_posix/1,
pmap/2,
pmap/3,
readable_error_msg/1
readable_error_msg/1,
safe_to_existing_atom/1,
safe_to_existing_atom/2
]).
-export([
@ -463,6 +465,18 @@ nolink_apply(Fun, Timeout) when is_function(Fun, 0) ->
exit(timeout)
end.
safe_to_existing_atom(In) ->
safe_to_existing_atom(In, utf8).
safe_to_existing_atom(Bin, Encoding) when is_binary(Bin) ->
try_to_existing_atom(fun erlang:binary_to_existing_atom/2, Bin, Encoding);
safe_to_existing_atom(List, Encoding) when is_list(List) ->
try_to_existing_atom(fun(In, _) -> erlang:list_to_existing_atom(In) end, List, Encoding);
safe_to_existing_atom(Atom, _Encoding) when is_atom(Atom) ->
{ok, Atom};
safe_to_existing_atom(_Any, _Encoding) ->
{error, invalid_type}.
%%------------------------------------------------------------------------------
%% Internal Functions
%%------------------------------------------------------------------------------
@ -533,6 +547,14 @@ readable_error_msg(Error) ->
end
end.
try_to_existing_atom(Convert, Data, Encoding) ->
try Convert(Data, Encoding) of
Atom ->
{ok, Atom}
catch
_:Reason -> {error, Reason}
end.
-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").

View File

@ -66,7 +66,8 @@
in/2,
out/1,
stats/1,
dropped/1
dropped/1,
to_list/1
]).
-define(NO_PRIORITY_TABLE, disabled).
@ -109,7 +110,7 @@
dropped = 0 :: count(),
p_table = ?NO_PRIORITY_TABLE :: p_table(),
default_p = ?LOWEST_PRIORITY :: priority(),
q = ?PQUEUE:new() :: pq(),
q = emqx_pqueue:new() :: pq(),
shift_opts :: #shift_opts{},
last_prio :: non_neg_integer() | undefined,
p_credit :: non_neg_integer() | undefined
@ -118,7 +119,7 @@
-type mqueue() :: #mqueue{}.
-spec init(options()) -> mqueue().
init(Opts = #{max_len := MaxLen0, store_qos0 := QoS_0}) ->
init(Opts = #{max_len := MaxLen0, store_qos0 := Qos0}) ->
MaxLen =
case (is_integer(MaxLen0) andalso MaxLen0 > ?MAX_LEN_INFINITY) of
true -> MaxLen0;
@ -126,7 +127,7 @@ init(Opts = #{max_len := MaxLen0, store_qos0 := QoS_0}) ->
end,
#mqueue{
max_len = MaxLen,
store_qos0 = QoS_0,
store_qos0 = Qos0,
p_table = get_opt(priorities, Opts, ?NO_PRIORITY_TABLE),
default_p = get_priority_opt(Opts),
shift_opts = get_shift_opt(Opts)
@ -152,6 +153,19 @@ len(#mqueue{len = Len}) -> Len.
max_len(#mqueue{max_len = MaxLen}) -> MaxLen.
%% @doc Return all queued items in a list.
-spec to_list(mqueue()) -> list().
to_list(MQ) ->
to_list(MQ, []).
to_list(MQ, Acc) ->
case out(MQ) of
{empty, _MQ} ->
lists:reverse(Acc);
{{value, Msg}, Q1} ->
to_list(Q1, [Msg | Acc])
end.
%% @doc Return number of dropped messages.
-spec dropped(mqueue()) -> count().
dropped(#mqueue{dropped = Dropped}) -> Dropped.

View File

@ -124,7 +124,10 @@ filter_result(Delivery) ->
max_client_num() ->
emqx:get_config([rpc, tcp_client_num], ?DefaultClientNum).
-spec unwrap_erpc(emqx_rpc:erpc(A)) -> A | {error, _Err}.
-spec unwrap_erpc(emqx_rpc:erpc(A) | [emqx_rpc:erpc(A)]) -> A | {error, _Err} | list().
unwrap_erpc(Res) when is_list(Res) ->
[unwrap_erpc(R) || R <- Res];
unwrap_erpc({ok, A}) ->
A;
unwrap_erpc({throw, A}) ->

View File

@ -39,7 +39,8 @@
-type comma_separated_binary() :: [binary()].
-type comma_separated_atoms() :: [atom()].
-type bar_separated_list() :: list().
-type ip_port() :: tuple().
-type ip_port() :: tuple() | integer().
-type host_port() :: tuple().
-type cipher() :: map().
-typerefl_from_string({duration/0, emqx_schema, to_duration}).
@ -52,6 +53,7 @@
-typerefl_from_string({comma_separated_binary/0, emqx_schema, to_comma_separated_binary}).
-typerefl_from_string({bar_separated_list/0, emqx_schema, to_bar_separated_list}).
-typerefl_from_string({ip_port/0, emqx_schema, to_ip_port}).
-typerefl_from_string({host_port/0, emqx_schema, to_host_port}).
-typerefl_from_string({cipher/0, emqx_schema, to_erl_cipher_suite}).
-typerefl_from_string({comma_separated_atoms/0, emqx_schema, to_comma_separated_atoms}).
@ -78,6 +80,7 @@
to_comma_separated_binary/1,
to_bar_separated_list/1,
to_ip_port/1,
to_host_port/1,
to_erl_cipher_suite/1,
to_comma_separated_atoms/1
]).
@ -96,6 +99,7 @@
comma_separated_binary/0,
bar_separated_list/0,
ip_port/0,
host_port/0,
cipher/0,
comma_separated_atoms/0
]).
@ -1686,7 +1690,7 @@ desc("stats") ->
desc("authorization") ->
"Settings for client authorization.";
desc("mqtt") ->
"Global MQTT configuration.</br>\n"
"Global MQTT configuration.<br/>"
"The configs here work as default values which can be overridden\n"
"in <code>zone</code> configs";
desc("cache") ->
@ -2104,11 +2108,11 @@ ref(Module, StructName) -> hoconsc:ref(Module, StructName).
mk_duration(Desc, OverrideMeta) ->
DefaultMeta = #{
desc => Desc ++
" Time interval is a string that contains a number followed by time unit:</br>\n"
" Time interval is a string that contains a number followed by time unit:<br/>"
"- `ms` for milliseconds,\n"
"- `s` for seconds,\n"
"- `m` for minutes,\n"
"- `h` for hours;\n</br>"
"- `h` for hours;\n<br/>"
"or combination of whereof: `1h5m0s`"
},
hoconsc:mk(typerefl:alias("string", duration()), maps:merge(DefaultMeta, OverrideMeta)).
@ -2168,33 +2172,60 @@ to_bar_separated_list(Str) ->
%% - :1883
%% - :::1883
to_ip_port(Str) ->
case split_ip_port(Str) of
{"", Port} ->
{ok, {{0, 0, 0, 0}, list_to_integer(Port)}};
{Ip, Port} ->
to_host_port(Str, ip_addr).
%% @doc support the following format:
%% - 127.0.0.1:1883
%% - ::1:1883
%% - [::1]:1883
%% - :1883
%% - :::1883
%% - example.com:80
to_host_port(Str) ->
to_host_port(Str, hostname).
%% - example.com:80
to_host_port(Str, IpOrHost) ->
case split_host_port(Str) of
{"", Port} when IpOrHost =:= ip_addr ->
%% this is a local address
{ok, list_to_integer(Port)};
{"", _Port} ->
%% must specify host part when it's a remote endpoint
{error, bad_host_port};
{MaybeIp, Port} ->
PortVal = list_to_integer(Port),
case inet:parse_address(Ip) of
{ok, R} ->
{ok, {R, PortVal}};
_ ->
case inet:parse_address(MaybeIp) of
{ok, IpTuple} ->
{ok, {IpTuple, PortVal}};
_ when IpOrHost =:= hostname ->
%% check is a rfc1035's hostname
case inet_parse:domain(Ip) of
case inet_parse:domain(MaybeIp) of
true ->
{ok, {Ip, PortVal}};
{ok, {MaybeIp, PortVal}};
_ ->
{error, Str}
end
{error, bad_hostname}
end;
_ ->
{error, Str}
{error, bad_ip_port}
end;
_ ->
{error, bad_ip_port}
end.
split_ip_port(Str0) ->
split_host_port(Str0) ->
Str = re:replace(Str0, " ", "", [{return, list}, global]),
case lists:split(string:rchr(Str, $:), Str) of
%% no port
%% no colon
{[], Str} ->
error;
try
%% if it's just a port number, then return as-is
_ = list_to_integer(Str),
{"", Str}
catch
_:_ ->
error
end;
{IpPlusColon, PortString} ->
IpStr0 = lists:droplast(IpPlusColon),
case IpStr0 of
@ -2246,6 +2277,7 @@ validate_alarm_actions(Actions) ->
Error -> {error, Error}
end.
parse_user_lookup_fun({Fun, _} = Lookup) when is_function(Fun, 3) -> Lookup;
parse_user_lookup_fun(StrConf) ->
[ModStr, FunStr] = string:tokens(str(StrConf), ": "),
Mod = list_to_atom(ModStr),

View File

@ -47,6 +47,7 @@
-include("emqx_mqtt.hrl").
-include("logger.hrl").
-include("types.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-ifdef(TEST).
-compile(export_all).
@ -60,7 +61,8 @@
info/2,
is_session/1,
stats/1,
obtain_next_pkt_id/1
obtain_next_pkt_id/1,
get_mqueue/1
]).
-export([
@ -801,7 +803,7 @@ replay(ClientInfo, Session = #session{inflight = Inflight}) ->
-spec terminate(emqx_types:clientinfo(), Reason :: term(), session()) -> ok.
terminate(ClientInfo, Reason, Session) ->
run_terminate_hooks(ClientInfo, Reason, Session),
redispatch_shared_messages(Session),
maybe_redispatch_shared_messages(Reason, Session),
ok.
run_terminate_hooks(ClientInfo, discarded, Session) ->
@ -811,29 +813,27 @@ run_terminate_hooks(ClientInfo, takenover, Session) ->
run_terminate_hooks(ClientInfo, Reason, Session) ->
run_hook('session.terminated', [ClientInfo, Reason, info(Session)]).
redispatch_shared_messages(#session{inflight = Inflight}) ->
InflightList = emqx_inflight:to_list(Inflight),
lists:foreach(
fun
%% Only QoS1 messages get redispatched, because QoS2 messages
%% must be sent to the same client, once they're in flight
({_, #inflight_data{message = #message{qos = ?QOS_2} = Msg}}) ->
?SLOG(warning, #{msg => qos2_lost_no_redispatch}, #{message => Msg});
({_, #inflight_data{message = #message{topic = Topic, qos = ?QOS_1} = Msg}}) ->
case emqx_shared_sub:get_group(Msg) of
{ok, Group} ->
%% Note that dispatch is called with self() in failed subs
%% This is done to avoid dispatching back to caller
Delivery = #delivery{sender = self(), message = Msg},
emqx_shared_sub:dispatch(Group, Topic, Delivery, [self()]);
_ ->
maybe_redispatch_shared_messages(takenover, _Session) ->
ok;
maybe_redispatch_shared_messages(kicked, _Session) ->
ok;
maybe_redispatch_shared_messages(_Reason, Session) ->
redispatch_shared_messages(Session).
redispatch_shared_messages(#session{inflight = Inflight, mqueue = Q}) ->
AllInflights = emqx_inflight:to_list(fun sort_fun/2, Inflight),
F = fun
({_PacketId, #inflight_data{message = #message{qos = ?QOS_1} = Msg}}) ->
%% For QoS 2, here is what the spec says:
%% If the Client's Session terminates before the Client reconnects,
%% the Server MUST NOT send the Application Message to any other
%% subscribed Client [MQTT-4.8.2-5].
{true, Msg};
({_PacketId, #inflight_data{}}) ->
false
end;
(_) ->
ok
end,
InflightList
).
InflightList = lists:filtermap(F, AllInflights),
emqx_shared_sub:redispatch(InflightList ++ emqx_mqueue:to_list(Q)).
-compile({inline, [run_hook/2]}).
run_hook(Name, Args) ->
@ -925,3 +925,6 @@ age(Now, Ts) -> Now - Ts.
set_field(Name, Value, Session) ->
Pos = emqx_misc:index_of(Name, record_info(fields, session)),
setelement(Pos + 1, Session, Value).
get_mqueue(#session{mqueue = Q}) ->
emqx_mqueue:to_list(Q).

View File

@ -39,15 +39,15 @@
-export([
dispatch/3,
dispatch/4,
do_dispatch_with_ack/4
do_dispatch_with_ack/4,
redispatch/1
]).
-export([
maybe_ack/1,
maybe_nack_dropped/1,
nack_no_connection/1,
is_ack_required/1,
get_group/1
is_ack_required/1
]).
%% for testing
@ -96,6 +96,9 @@
-define(ACK, shared_sub_ack).
-define(NACK(Reason), {shared_sub_nack, Reason}).
-define(NO_ACK, no_ack).
-define(REDISPATCH_TO(GROUP, TOPIC), {GROUP, TOPIC}).
-type redispatch_to() :: ?REDISPATCH_TO(emqx_topic:group(), emqx_topic:topic()).
-record(state, {pmon}).
@ -144,7 +147,8 @@ dispatch(Group, Topic, Delivery = #delivery{message = Msg}, FailedSubs) ->
false ->
{error, no_subscribers};
{Type, SubPid} ->
case do_dispatch(SubPid, Group, Topic, Msg, Type) of
Msg1 = with_redispatch_to(Msg, Group, Topic),
case do_dispatch(SubPid, Group, Topic, Msg1, Type) of
ok ->
{ok, 1};
{error, _Reason} ->
@ -223,16 +227,53 @@ without_group_ack(Msg) ->
get_group_ack(Msg) ->
emqx_message:get_header(shared_dispatch_ack, Msg, ?NO_ACK).
with_redispatch_to(#message{qos = ?QOS_0} = Msg, _Group, _Topic) ->
Msg;
with_redispatch_to(Msg, Group, Topic) ->
emqx_message:set_headers(#{redispatch_to => ?REDISPATCH_TO(Group, Topic)}, Msg).
%% @hidden Redispatch is neede only for the messages with redispatch_to header added.
is_redispatch_needed(#message{} = Msg) ->
case get_redispatch_to(Msg) of
?REDISPATCH_TO(_, _) ->
true;
_ ->
false
end.
%% @doc Redispatch shared deliveries to other members in the group.
redispatch(Messages0) ->
Messages = lists:filter(fun is_redispatch_needed/1, Messages0),
case length(Messages) of
L when L > 0 ->
?SLOG(info, #{
msg => "redispatching_shared_subscription_message",
count => L
}),
lists:foreach(fun redispatch_shared_message/1, Messages);
_ ->
ok
end.
redispatch_shared_message(#message{} = Msg) ->
%% As long as it's still a #message{} record in inflight,
%% we should try to re-dispatch
?REDISPATCH_TO(Group, Topic) = get_redispatch_to(Msg),
%% Note that dispatch is called with self() in failed subs
%% This is done to avoid dispatching back to caller
Delivery = #delivery{sender = self(), message = Msg},
dispatch(Group, Topic, Delivery, [self()]).
%% @hidden Return the `redispatch_to` group-topic in the message header.
%% `false` is returned if the message is not a shared dispatch.
%% or when it's a QoS 0 message.
-spec get_redispatch_to(emqx_types:message()) -> redispatch_to() | false.
get_redispatch_to(Msg) ->
emqx_message:get_header(redispatch_to, Msg, false).
-spec is_ack_required(emqx_types:message()) -> boolean().
is_ack_required(Msg) -> ?NO_ACK =/= get_group_ack(Msg).
-spec get_group(emqx_types:message()) -> {ok, any()} | error.
get_group(Msg) ->
case get_group_ack(Msg) of
?NO_ACK -> error;
{Group, _Sender, _Ref} -> {ok, Group}
end.
%% @doc Negative ack dropped message due to inflight window or message queue being full.
-spec maybe_nack_dropped(emqx_types:message()) -> boolean().
maybe_nack_dropped(Msg) ->

View File

@ -19,6 +19,7 @@
-include_lib("emqx/include/emqx.hrl").
-include_lib("emqx/include/logger.hrl").
-include_lib("kernel/include/file.hrl").
-include_lib("snabbkaffe/include/trace.hrl").
-export([
@ -46,6 +47,7 @@
filename/2,
trace_dir/0,
trace_file/1,
trace_file_detail/1,
delete_files_after_send/2
]).
@ -193,6 +195,16 @@ trace_file(File) ->
{error, Reason} -> {error, Node, Reason}
end.
trace_file_detail(File) ->
FileName = filename:join(trace_dir(), File),
Node = atom_to_binary(node()),
case file:read_file_info(FileName, [{'time', 'posix'}]) of
{ok, #file_info{size = Size, mtime = Mtime}} ->
{ok, #{size => Size, mtime => Mtime, node => Node}};
{error, Reason} ->
{error, #{reason => Reason, node => Node, file => File}}
end.
delete_files_after_send(TraceLog, Zips) ->
gen_server:cast(?MODULE, {delete_tag, self(), [TraceLog | Zips]}).

View File

@ -32,6 +32,12 @@ init_per_suite(Config) ->
end_per_suite(_Config) ->
emqx_common_test_helpers:stop_apps([]).
end_per_testcase(t_delayed_authorize, Config) ->
meck:unload(emqx_access_control),
Config;
end_per_testcase(_, Config) ->
Config.
t_authenticate(_) ->
?assertMatch({ok, _}, emqx_access_control:authenticate(clientinfo())).
@ -39,6 +45,28 @@ t_authorize(_) ->
Publish = ?PUBLISH_PACKET(?QOS_0, <<"t">>, 1, <<"payload">>),
?assertEqual(allow, emqx_access_control:authorize(clientinfo(), Publish, <<"t">>)).
t_delayed_authorize(_) ->
RawTopic = "$dealyed/1/foo/2",
InvalidTopic = "$dealyed/1/foo/3",
Topic = "foo/2",
ok = meck:new(emqx_access_control, [passthrough, no_history, no_link]),
ok = meck:expect(
emqx_access_control,
do_authorize,
fun
(_, _, Topic) -> allow;
(_, _, _) -> deny
end
),
Publish1 = ?PUBLISH_PACKET(?QOS_0, RawTopic, 1, <<"payload">>),
?assertEqual(allow, emqx_access_control:authorize(clientinfo(), Publish1, RawTopic)),
Publish2 = ?PUBLISH_PACKET(?QOS_0, InvalidTopic, 1, <<"payload">>),
?assertEqual(allow, emqx_access_control:authorize(clientinfo(), Publish2, InvalidTopic)),
ok.
%%--------------------------------------------------------------------
%% Helper functions
%%--------------------------------------------------------------------

View File

@ -207,14 +207,6 @@ init_per_suite(Config) ->
ok = meck:new(emqx_cm, [passthrough, no_history, no_link]),
ok = meck:expect(emqx_cm, mark_channel_connected, fun(_) -> ok end),
ok = meck:expect(emqx_cm, mark_channel_disconnected, fun(_) -> ok end),
%% Access Control Meck
ok = meck:new(emqx_access_control, [passthrough, no_history, no_link]),
ok = meck:expect(
emqx_access_control,
authenticate,
fun(_) -> {ok, #{is_superuser => false}} end
),
ok = meck:expect(emqx_access_control, authorize, fun(_, _, _) -> allow end),
%% Broker Meck
ok = meck:new(emqx_broker, [passthrough, no_history, no_link]),
%% Hooks Meck
@ -234,7 +226,6 @@ init_per_suite(Config) ->
end_per_suite(_Config) ->
meck:unload([
emqx_access_control,
emqx_metrics,
emqx_session,
emqx_broker,
@ -244,11 +235,21 @@ end_per_suite(_Config) ->
]).
init_per_testcase(_TestCase, Config) ->
%% Access Control Meck
ok = meck:new(emqx_access_control, [passthrough, no_history, no_link]),
ok = meck:expect(
emqx_access_control,
authenticate,
fun(_) -> {ok, #{is_superuser => false}} end
),
ok = meck:expect(emqx_access_control, authorize, fun(_, _, _) -> allow end),
%% Set confs
OldConf = set_test_listener_confs(),
emqx_common_test_helpers:start_apps([]),
[{config, OldConf} | Config].
end_per_testcase(_TestCase, Config) ->
meck:unload([emqx_access_control]),
emqx_config:put(?config(config, Config)),
emqx_common_test_helpers:stop_apps([]),
Config.
@ -1115,6 +1116,32 @@ t_ws_cookie_init(_) ->
),
?assertMatch(#{ws_cookie := WsCookie}, emqx_channel:info(clientinfo, Channel)).
%%--------------------------------------------------------------------
%% Test cases for other mechnisms
%%--------------------------------------------------------------------
t_flapping_detect(_) ->
emqx_config:put_zone_conf(default, [flapping_detect, enable], true),
Parent = self(),
ok = meck:expect(
emqx_cm,
open_session,
fun(true, _ClientInfo, _ConnInfo) ->
{ok, #{session => session(), present => false}}
end
),
ok = meck:expect(emqx_access_control, authenticate, fun(_) -> {error, not_authorized} end),
ok = meck:expect(emqx_flapping, detect, fun(_) -> Parent ! flapping_detect end),
IdleChannel = channel(#{conn_state => idle}),
{shutdown, not_authorized, _ConnAck, _Channel} =
emqx_channel:handle_in(?CONNECT_PACKET(connpkt()), IdleChannel),
receive
flapping_detect -> ok
after 2000 ->
?assert(false, "Flapping detect should be exected in connecting progress")
end,
meck:unload([emqx_flapping]).
%%--------------------------------------------------------------------
%% Helper functions
%%--------------------------------------------------------------------

View File

@ -148,6 +148,32 @@ t_wss_conn(_) ->
{ok, Socket} = ssl:connect({127, 0, 0, 1}, 9998, [{verify, verify_none}], 1000),
ok = ssl:close(Socket).
t_format_bind(_) ->
?assertEqual(
":1883",
lists:flatten(emqx_listeners:format_bind(1883))
),
?assertEqual(
"0.0.0.0:1883",
lists:flatten(emqx_listeners:format_bind({{0, 0, 0, 0}, 1883}))
),
?assertEqual(
"[::]:1883",
lists:flatten(emqx_listeners:format_bind({{0, 0, 0, 0, 0, 0, 0, 0}, 1883}))
),
?assertEqual(
"127.0.0.1:1883",
lists:flatten(emqx_listeners:format_bind({{127, 0, 0, 1}, 1883}))
),
?assertEqual(
":1883",
lists:flatten(emqx_listeners:format_bind("1883"))
),
?assertEqual(
":1883",
lists:flatten(emqx_listeners:format_bind(":1883"))
).
render_config_file() ->
Path = local_path(["etc", "emqx.conf"]),
{ok, Temp} = file:read_file(Path),

View File

@ -175,3 +175,30 @@ ssl_opts_gc_after_handshake_test_not_rancher_listener_test() ->
Checked
),
ok.
to_ip_port_test_() ->
Ip = fun emqx_schema:to_ip_port/1,
Host = fun(Str) ->
case Ip(Str) of
{ok, {_, _} = Res} ->
%% assert
{ok, Res} = emqx_schema:to_host_port(Str);
_ ->
emqx_schema:to_host_port(Str)
end
end,
[
?_assertEqual({ok, 80}, Ip("80")),
?_assertEqual({error, bad_host_port}, Host("80")),
?_assertEqual({ok, 80}, Ip(":80")),
?_assertEqual({error, bad_host_port}, Host(":80")),
?_assertEqual({error, bad_ip_port}, Ip("localhost:80")),
?_assertEqual({ok, {"localhost", 80}}, Host("localhost:80")),
?_assertEqual({ok, {"example.com", 80}}, Host("example.com:80")),
?_assertEqual({ok, {{127, 0, 0, 1}, 80}}, Ip("127.0.0.1:80")),
?_assertEqual({error, bad_ip_port}, Ip("$:1900")),
?_assertEqual({error, bad_hostname}, Host("$:1900")),
?_assertMatch({ok, {_, 1883}}, Ip("[::1]:1883")),
?_assertMatch({ok, {_, 1883}}, Ip("::1:1883")),
?_assertMatch({ok, {_, 1883}}, Ip(":::1883"))
].

View File

@ -22,13 +22,24 @@
-include_lib("emqx/include/emqx.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-define(SUITE, ?MODULE).
-define(wait(For, Timeout),
emqx_common_test_helpers:wait_for(
?FUNCTION_NAME, ?LINE, fun() -> For end, Timeout
)
-define(WAIT(TIMEOUT, PATTERN, Res),
(fun() ->
receive
PATTERN ->
Res;
Other ->
ct:fail(#{
expected => ??PATTERN,
got => Other
})
after TIMEOUT ->
ct:fail({timeout, ??PATTERN})
end
end)()
).
-define(ack, shared_sub_ack).
@ -45,10 +56,26 @@ init_per_suite(Config) ->
end_per_suite(_Config) ->
emqx_common_test_helpers:stop_apps([]).
t_is_ack_required(_) ->
init_per_testcase(Case, Config) ->
try
?MODULE:Case({'init', Config})
catch
error:function_clause ->
Config
end.
end_per_testcase(Case, Config) ->
try
?MODULE:Case({'end', Config})
catch
error:function_clause ->
ok
end.
t_is_ack_required(Config) when is_list(Config) ->
?assertEqual(false, emqx_shared_sub:is_ack_required(#message{headers = #{}})).
t_maybe_nack_dropped(_) ->
t_maybe_nack_dropped(Config) when is_list(Config) ->
?assertEqual(false, emqx_shared_sub:maybe_nack_dropped(#message{headers = #{}})),
Msg = #message{headers = #{shared_dispatch_ack => {<<"group">>, self(), for_test}}},
?assertEqual(true, emqx_shared_sub:maybe_nack_dropped(Msg)),
@ -60,7 +87,7 @@ t_maybe_nack_dropped(_) ->
end
).
t_nack_no_connection(_) ->
t_nack_no_connection(Config) when is_list(Config) ->
Msg = #message{headers = #{shared_dispatch_ack => {<<"group">>, self(), for_test}}},
?assertEqual(ok, emqx_shared_sub:nack_no_connection(Msg)),
?assertEqual(
@ -71,7 +98,7 @@ t_nack_no_connection(_) ->
end
).
t_maybe_ack(_) ->
t_maybe_ack(Config) when is_list(Config) ->
?assertEqual(#message{headers = #{}}, emqx_shared_sub:maybe_ack(#message{headers = #{}})),
Msg = #message{headers = #{shared_dispatch_ack => {<<"group">>, self(), for_test}}},
?assertEqual(
@ -86,10 +113,7 @@ t_maybe_ack(_) ->
end
).
% t_subscribers(_) ->
% error('TODO').
t_random_basic(_) ->
t_random_basic(Config) when is_list(Config) ->
ok = ensure_config(random),
ClientId = <<"ClientId">>,
Topic = <<"foo">>,
@ -121,7 +145,7 @@ t_random_basic(_) ->
%% After the connection for the 2nd session is also closed,
%% i.e. when all clients are offline, the following message(s)
%% should be delivered randomly.
t_no_connection_nack(_) ->
t_no_connection_nack(Config) when is_list(Config) ->
ok = ensure_config(sticky),
Publisher = <<"publisher">>,
Subscriber1 = <<"Subscriber1">>,
@ -153,54 +177,22 @@ t_no_connection_nack(_) ->
%% This is the connection which was picked by broker to dispatch (sticky) for 1st message
?assertMatch([#{packet_id := 1}], recv_msgs(1)),
%% Now kill the connection, expect all following messages to be delivered to the other
%% subscriber.
%emqx_mock_client:stop(ConnPid),
%% sleep then make synced calls to session processes to ensure that
%% the connection pid's 'EXIT' message is propagated to the session process
%% also to be sure sessions are still alive
% timer:sleep(2),
% _ = emqx_session:info(SPid1),
% _ = emqx_session:info(SPid2),
% %% Now we know what is the other still alive connection
% [TheOtherConnPid] = [SubConnPid1, SubConnPid2] -- [ConnPid],
% %% Send some more messages
% PacketIdList = lists:seq(2, 10),
% lists:foreach(fun(Id) ->
% SendF(Id),
% ?wait(Received(Id, TheOtherConnPid), 1000)
% end, PacketIdList),
% %% Now close the 2nd (last connection)
% emqx_mock_client:stop(TheOtherConnPid),
% timer:sleep(2),
% %% both sessions should have conn_pid = undefined
% ?assertEqual({conn_pid, undefined}, lists:keyfind(conn_pid, 1, emqx_session:info(SPid1))),
% ?assertEqual({conn_pid, undefined}, lists:keyfind(conn_pid, 1, emqx_session:info(SPid2))),
% %% send more messages, but all should be queued in session state
% lists:foreach(fun(Id) -> SendF(Id) end, PacketIdList),
% {_, L1} = lists:keyfind(mqueue_len, 1, emqx_session:info(SPid1)),
% {_, L2} = lists:keyfind(mqueue_len, 1, emqx_session:info(SPid2)),
% ?assertEqual(length(PacketIdList), L1 + L2),
% %% clean up
% emqx_mock_client:close_session(PubConnPid),
% emqx_sm:close_session(SPid1),
% emqx_sm:close_session(SPid2),
ok.
t_random(_) ->
t_random(Config) when is_list(Config) ->
ok = ensure_config(random, true),
test_two_messages(random).
t_round_robin(_) ->
t_round_robin(Config) when is_list(Config) ->
ok = ensure_config(round_robin, true),
test_two_messages(round_robin).
t_round_robin_per_group(_) ->
t_round_robin_per_group(Config) when is_list(Config) ->
ok = ensure_config(round_robin_per_group, true),
test_two_messages(round_robin_per_group).
%% this would fail if executed with the standard round_robin strategy
t_round_robin_per_group_even_distribution_one_group(_) ->
t_round_robin_per_group_even_distribution_one_group(Config) when is_list(Config) ->
ok = ensure_config(round_robin_per_group, true),
Topic = <<"foo/bar">>,
Group = <<"group1">>,
@ -264,7 +256,7 @@ t_round_robin_per_group_even_distribution_one_group(_) ->
),
ok.
t_round_robin_per_group_even_distribution_two_groups(_) ->
t_round_robin_per_group_even_distribution_two_groups(Config) when is_list(Config) ->
ok = ensure_config(round_robin_per_group, true),
Topic = <<"foo/bar">>,
{ok, ConnPid1} = emqtt:start_link([{clientid, <<"C0">>}]),
@ -350,19 +342,19 @@ t_round_robin_per_group_even_distribution_two_groups(_) ->
),
ok.
t_sticky(_) ->
t_sticky(Config) when is_list(Config) ->
ok = ensure_config(sticky, true),
test_two_messages(sticky).
t_hash(_) ->
t_hash(Config) when is_list(Config) ->
ok = ensure_config(hash, false),
test_two_messages(hash).
t_hash_clinetid(_) ->
t_hash_clinetid(Config) when is_list(Config) ->
ok = ensure_config(hash_clientid, false),
test_two_messages(hash_clientid).
t_hash_topic(_) ->
t_hash_topic(Config) when is_list(Config) ->
ok = ensure_config(hash_topic, false),
ClientId1 = <<"ClientId1">>,
ClientId2 = <<"ClientId2">>,
@ -407,7 +399,7 @@ t_hash_topic(_) ->
ok.
%% if the original subscriber dies, change to another one alive
t_not_so_sticky(_) ->
t_not_so_sticky(Config) when is_list(Config) ->
ok = ensure_config(sticky),
ClientId1 = <<"ClientId1">>,
ClientId2 = <<"ClientId2">>,
@ -474,14 +466,14 @@ last_message(ExpectedPayload, Pids) ->
last_message(ExpectedPayload, Pids, Timeout) ->
receive
{publish, #{client_pid := Pid, payload := ExpectedPayload}} ->
ct:pal("~p ====== ~p", [Pids, Pid]),
?assert(lists:member(Pid, Pids)),
{true, Pid}
after Timeout ->
ct:pal("not yet"),
<<"not yet?">>
end.
t_dispatch(_) ->
t_dispatch(Config) when is_list(Config) ->
ok = ensure_config(random),
Topic = <<"foo">>,
?assertEqual(
@ -494,13 +486,13 @@ t_dispatch(_) ->
emqx_shared_sub:dispatch(<<"group1">>, Topic, #delivery{message = #message{}})
).
t_uncovered_func(_) ->
t_uncovered_func(Config) when is_list(Config) ->
ignored = gen_server:call(emqx_shared_sub, ignored),
ok = gen_server:cast(emqx_shared_sub, ignored),
ignored = emqx_shared_sub ! ignored,
{mnesia_table_event, []} = emqx_shared_sub ! {mnesia_table_event, []}.
t_per_group_config(_) ->
t_per_group_config(Config) when is_list(Config) ->
ok = ensure_group_config(#{
<<"local_group">> => local,
<<"round_robin_group">> => round_robin,
@ -521,7 +513,7 @@ t_per_group_config(_) ->
test_two_messages(round_robin_per_group, <<"round_robin_per_group_group">>),
test_two_messages(round_robin_per_group, <<"round_robin_per_group_group">>).
t_local(_) ->
t_local(Config) when is_list(Config) ->
GroupConfig = #{
<<"local_group">> => local,
<<"round_robin_group">> => round_robin,
@ -567,7 +559,7 @@ t_local(_) ->
?assertNotEqual(UsedSubPid1, UsedSubPid2),
ok.
t_remote(_) ->
t_remote(Config) when is_list(Config) ->
%% This testcase verifies dispatching of shared messages to the remote nodes via backplane API.
%%
%% In this testcase we start two EMQX nodes: local and remote.
@ -594,7 +586,7 @@ t_remote(_) ->
try
{ok, ClientPidLocal} = emqtt:connect(ConnPidLocal),
{ok, ClientPidRemote} = emqtt:connect(ConnPidRemote),
{ok, _ClientPidRemote} = emqtt:connect(ConnPidRemote),
emqtt:subscribe(ConnPidRemote, {<<"$share/remote_group/", Topic/binary>>, 0}),
@ -620,7 +612,7 @@ t_remote(_) ->
stop_slave(Node)
end.
t_local_fallback(_) ->
t_local_fallback(Config) when is_list(Config) ->
ok = ensure_group_config(#{
<<"local_group">> => local,
<<"round_robin_group">> => round_robin,
@ -653,9 +645,14 @@ t_local_fallback(_) ->
%% This one tests that broker tries to select another shared subscriber
%% If the first one doesn't return an ACK
t_redispatch(_) ->
ok = ensure_config(sticky, true),
t_redispatch_qos1_with_ack(Config) when is_list(Config) ->
test_redispatch_qos1(Config, true).
t_redispatch_qos1_no_ack(Config) when is_list(Config) ->
test_redispatch_qos1(Config, false).
test_redispatch_qos1(_Config, AckEnabled) ->
ok = ensure_config(sticky, AckEnabled),
Group = <<"group1">>,
Topic = <<"foo/bar">>,
ClientId1 = <<"ClientId1">>,
@ -682,10 +679,292 @@ t_redispatch(_) ->
emqtt:stop(UsedSubPid2),
ok.
t_qos1_random_dispatch_if_all_members_are_down(Config) when is_list(Config) ->
ok = ensure_config(sticky, true),
Group = <<"group1">>,
Topic = <<"foo/bar">>,
ClientId1 = <<"ClientId1">>,
ClientId2 = <<"ClientId2">>,
SubOpts = [{clean_start, false}],
{ok, ConnPub} = emqtt:start_link([{clientid, <<"pub">>}]),
{ok, _} = emqtt:connect(ConnPub),
{ok, ConnPid1} = emqtt:start_link([{clientid, ClientId1} | SubOpts]),
{ok, ConnPid2} = emqtt:start_link([{clientid, ClientId2} | SubOpts]),
{ok, _} = emqtt:connect(ConnPid1),
{ok, _} = emqtt:connect(ConnPid2),
emqtt:subscribe(ConnPid1, {<<"$share/", Group/binary, "/foo/bar">>, 1}),
emqtt:subscribe(ConnPid2, {<<"$share/", Group/binary, "/foo/bar">>, 1}),
ok = emqtt:stop(ConnPid1),
ok = emqtt:stop(ConnPid2),
[Pid1, Pid2] = emqx_shared_sub:subscribers(Group, Topic),
?assert(is_process_alive(Pid1)),
?assert(is_process_alive(Pid2)),
{ok, _} = emqtt:publish(ConnPub, Topic, <<"hello11">>, 1),
ct:sleep(100),
{ok, Msgs1} = gen_server:call(Pid1, get_mqueue),
{ok, Msgs2} = gen_server:call(Pid2, get_mqueue),
%% assert the message is in mqueue (because socket is closed)
?assertMatch([#message{payload = <<"hello11">>}], Msgs1 ++ Msgs2),
emqtt:stop(ConnPub),
ok.
%% No ack, QoS 2 subscriptions,
%% client1 receives one message, send pubrec, then suspend
%% client2 acts normal (auto_ack=true)
%% Expected behaviour:
%% the messages sent to client1's inflight and mq are re-dispatched after client1 is down
t_dispatch_qos2({init, Config}) when is_list(Config) ->
emqx_config:put_zone_conf(default, [mqtt, max_inflight], 1),
Config;
t_dispatch_qos2({'end', Config}) when is_list(Config) ->
emqx_config:put_zone_conf(default, [mqtt, max_inflight], 0);
t_dispatch_qos2(Config) when is_list(Config) ->
ok = ensure_config(round_robin, _AckEnabled = false),
Topic = <<"foo/bar/1">>,
ClientId1 = <<"ClientId1">>,
ClientId2 = <<"ClientId2">>,
{ok, ConnPid1} = emqtt:start_link([{clientid, ClientId1}, {auto_ack, false}]),
{ok, ConnPid2} = emqtt:start_link([{clientid, ClientId2}, {auto_ack, true}]),
{ok, _} = emqtt:connect(ConnPid1),
{ok, _} = emqtt:connect(ConnPid2),
emqtt:subscribe(ConnPid1, {<<"$share/group/foo/bar/#">>, 2}),
emqtt:subscribe(ConnPid2, {<<"$share/group/foo/bar/#">>, 2}),
Message1 = emqx_message:make(ClientId1, 2, Topic, <<"hello1">>),
Message2 = emqx_message:make(ClientId1, 2, Topic, <<"hello2">>),
Message3 = emqx_message:make(ClientId1, 2, Topic, <<"hello3">>),
Message4 = emqx_message:make(ClientId1, 2, Topic, <<"hello4">>),
ct:sleep(100),
ok = sys:suspend(ConnPid1),
%% One message is inflight
?assertMatch([{_, _, {ok, 1}}], emqx:publish(Message1)),
?assertMatch([{_, _, {ok, 1}}], emqx:publish(Message2)),
?assertMatch([{_, _, {ok, 1}}], emqx:publish(Message3)),
?assertMatch([{_, _, {ok, 1}}], emqx:publish(Message4)),
%% assert client 2 receives two messages, they are eiter 1,3 or 2,4 depending
%% on if it's picked as the first one for round_robin
MsgRec1 = ?WAIT(2000, {publish, #{client_pid := ConnPid2, payload := P1}}, P1),
MsgRec2 = ?WAIT(2000, {publish, #{client_pid := ConnPid2, payload := P2}}, P2),
case MsgRec2 of
<<"hello3">> ->
?assertEqual(<<"hello1">>, MsgRec1);
<<"hello4">> ->
?assertEqual(<<"hello2">>, MsgRec1)
end,
sys:resume(ConnPid1),
%% emqtt subscriber automatically sends PUBREC, but since auto_ack is set to false
%% so it will never send PUBCOMP, hence EMQX should not attempt to send
%% the 4th message yet since max_inflight is 1.
MsgRec3 = ?WAIT(2000, {publish, #{client_pid := ConnPid1, payload := P3}}, P3),
ct:sleep(100),
%% no message expected
?assertEqual([], collect_msgs(0)),
%% now kill client 1
kill_process(ConnPid1),
%% client 2 should receive the message
MsgRec4 = ?WAIT(2000, {publish, #{client_pid := ConnPid2, payload := P4}}, P4),
case MsgRec2 of
<<"hello3">> ->
?assertEqual(<<"hello2">>, MsgRec3),
?assertEqual(<<"hello4">>, MsgRec4);
<<"hello4">> ->
?assertEqual(<<"hello1">>, MsgRec3),
?assertEqual(<<"hello3">>, MsgRec4)
end,
emqtt:stop(ConnPid2),
ok.
t_dispatch_qos0({init, Config}) when is_list(Config) ->
Config;
t_dispatch_qos0({'end', Config}) when is_list(Config) ->
ok;
t_dispatch_qos0(Config) when is_list(Config) ->
ok = ensure_config(round_robin, _AckEnabled = false),
Topic = <<"foo/bar/1">>,
ClientId1 = <<"ClientId1">>,
ClientId2 = <<"ClientId2">>,
{ok, ConnPid1} = emqtt:start_link([{clientid, ClientId1}, {auto_ack, false}]),
{ok, ConnPid2} = emqtt:start_link([{clientid, ClientId2}, {auto_ack, true}]),
{ok, _} = emqtt:connect(ConnPid1),
{ok, _} = emqtt:connect(ConnPid2),
%% subscribe with QoS 0
emqtt:subscribe(ConnPid1, {<<"$share/group/foo/bar/#">>, 0}),
emqtt:subscribe(ConnPid2, {<<"$share/group/foo/bar/#">>, 0}),
%% publish with QoS 2, but should be downgraded to 0 as the subscribers
%% subscribe with QoS 0
Message1 = emqx_message:make(ClientId1, 2, Topic, <<"hello1">>),
Message2 = emqx_message:make(ClientId1, 2, Topic, <<"hello2">>),
Message3 = emqx_message:make(ClientId1, 2, Topic, <<"hello3">>),
Message4 = emqx_message:make(ClientId1, 2, Topic, <<"hello4">>),
ct:sleep(100),
ok = sys:suspend(ConnPid1),
?assertMatch([_], emqx:publish(Message1)),
?assertMatch([_], emqx:publish(Message2)),
?assertMatch([_], emqx:publish(Message3)),
?assertMatch([_], emqx:publish(Message4)),
MsgRec1 = ?WAIT(2000, {publish, #{client_pid := ConnPid2, payload := P1}}, P1),
MsgRec2 = ?WAIT(2000, {publish, #{client_pid := ConnPid2, payload := P2}}, P2),
%% assert hello2 > hello1 or hello4 > hello3
?assert(MsgRec2 > MsgRec1),
kill_process(ConnPid1),
%% expect no redispatch
?assertEqual([], collect_msgs(timer:seconds(2))),
emqtt:stop(ConnPid2),
ok.
t_session_takeover({init, Config}) when is_list(Config) ->
Config;
t_session_takeover({'end', Config}) when is_list(Config) ->
ok;
t_session_takeover(Config) when is_list(Config) ->
Topic = <<"t1/a">>,
ClientId = iolist_to_binary("c" ++ integer_to_list(erlang:system_time())),
Opts = [
{clientid, ClientId},
{auto_ack, true},
{proto_ver, v5},
{clean_start, false},
{properties, #{'Session-Expiry-Interval' => 60}}
],
{ok, ConnPid1} = emqtt:start_link(Opts),
%% with the same client ID, start another client
{ok, ConnPid2} = emqtt:start_link(Opts),
{ok, _} = emqtt:connect(ConnPid1),
emqtt:subscribe(ConnPid1, {<<"$share/t1/", Topic/binary>>, _QoS = 1}),
Message1 = emqx_message:make(<<"dummypub">>, 2, Topic, <<"hello1">>),
Message2 = emqx_message:make(<<"dummypub">>, 2, Topic, <<"hello2">>),
Message3 = emqx_message:make(<<"dummypub">>, 2, Topic, <<"hello3">>),
Message4 = emqx_message:make(<<"dummypub">>, 2, Topic, <<"hello4">>),
%% Make sure client1 is functioning
?assertMatch([_], emqx:publish(Message1)),
{true, _} = last_message(<<"hello1">>, [ConnPid1]),
%% Kill client1
emqtt:stop(ConnPid1),
%% publish another message (should end up in client1's session)
?assertMatch([_], emqx:publish(Message2)),
%% connect client2 (with the same clientid)
%% should trigger session take over
{ok, _} = emqtt:connect(ConnPid2),
?assertMatch([_], emqx:publish(Message3)),
?assertMatch([_], emqx:publish(Message4)),
{true, _} = last_message(<<"hello2">>, [ConnPid2]),
{true, _} = last_message(<<"hello3">>, [ConnPid2]),
{true, _} = last_message(<<"hello4">>, [ConnPid2]),
?assertEqual([], collect_msgs(timer:seconds(2))),
emqtt:stop(ConnPid2),
ok.
t_session_kicked({init, Config}) when is_list(Config) ->
emqx_config:put_zone_conf(default, [mqtt, max_inflight], 1),
Config;
t_session_kicked({'end', Config}) when is_list(Config) ->
emqx_config:put_zone_conf(default, [mqtt, max_inflight], 0);
t_session_kicked(Config) when is_list(Config) ->
ok = ensure_config(round_robin, _AckEnabled = false),
Topic = <<"foo/bar/1">>,
ClientId1 = <<"ClientId1">>,
ClientId2 = <<"ClientId2">>,
{ok, ConnPid1} = emqtt:start_link([{clientid, ClientId1}, {auto_ack, false}]),
{ok, ConnPid2} = emqtt:start_link([{clientid, ClientId2}, {auto_ack, true}]),
{ok, _} = emqtt:connect(ConnPid1),
{ok, _} = emqtt:connect(ConnPid2),
emqtt:subscribe(ConnPid1, {<<"$share/group/foo/bar/#">>, 2}),
emqtt:subscribe(ConnPid2, {<<"$share/group/foo/bar/#">>, 2}),
Message1 = emqx_message:make(ClientId1, 2, Topic, <<"hello1">>),
Message2 = emqx_message:make(ClientId1, 2, Topic, <<"hello2">>),
Message3 = emqx_message:make(ClientId1, 2, Topic, <<"hello3">>),
Message4 = emqx_message:make(ClientId1, 2, Topic, <<"hello4">>),
ct:sleep(100),
ok = sys:suspend(ConnPid1),
%% One message is inflight
?assertMatch([{_, _, {ok, 1}}], emqx:publish(Message1)),
?assertMatch([{_, _, {ok, 1}}], emqx:publish(Message2)),
?assertMatch([{_, _, {ok, 1}}], emqx:publish(Message3)),
?assertMatch([{_, _, {ok, 1}}], emqx:publish(Message4)),
%% assert client 2 receives two messages, they are eiter 1,3 or 2,4 depending
%% on if it's picked as the first one for round_robin
MsgRec1 = ?WAIT(2000, {publish, #{client_pid := ConnPid2, payload := P1}}, P1),
MsgRec2 = ?WAIT(2000, {publish, #{client_pid := ConnPid2, payload := P2}}, P2),
case MsgRec2 of
<<"hello3">> ->
?assertEqual(<<"hello1">>, MsgRec1);
<<"hello4">> ->
?assertEqual(<<"hello2">>, MsgRec1)
end,
sys:resume(ConnPid1),
%% emqtt subscriber automatically sends PUBREC, but since auto_ack is set to false
%% so it will never send PUBCOMP, hence EMQX should not attempt to send
%% the 4th message yet since max_inflight is 1.
MsgRec3 = ?WAIT(2000, {publish, #{client_pid := ConnPid1, payload := P3}}, P3),
case MsgRec2 of
<<"hello3">> ->
?assertEqual(<<"hello2">>, MsgRec3);
<<"hello4">> ->
?assertEqual(<<"hello1">>, MsgRec3)
end,
%% no message expected
?assertEqual([], collect_msgs(0)),
%% now kick client 1
kill_process(ConnPid1, fun(_Pid) -> emqx_cm:kick_session(ClientId1) end),
%% client 2 should NOT receive the message
?assertEqual([], collect_msgs(1000)),
emqtt:stop(ConnPid2),
?assertEqual([], collect_msgs(0)),
ok.
%%--------------------------------------------------------------------
%% help functions
%%--------------------------------------------------------------------
kill_process(Pid) ->
kill_process(Pid, fun(_) -> erlang:exit(Pid, kill) end).
kill_process(Pid, WithFun) ->
_ = unlink(Pid),
_ = monitor(process, Pid),
_ = WithFun(Pid),
receive
{'DOWN', _, process, Pid, _} ->
ok
after 10_000 ->
error(timeout)
end.
collect_msgs(Timeout) ->
collect_msgs([], Timeout).
collect_msgs(Acc, Timeout) ->
receive
Msg ->
collect_msgs([Msg | Acc], Timeout)
after Timeout ->
lists:reverse(Acc)
end.
ensure_config(Strategy) ->
ensure_config(Strategy, _AckEnabled = true).

View File

@ -33,6 +33,7 @@
all() -> emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) ->
emqx_common_test_helpers:boot_modules(all),
emqx_channel_SUITE:set_test_listener_confs(),
?check_trace(
?wait_async_action(

View File

@ -57,7 +57,7 @@ emqx_authn_jwt {
endpoint {
desc {
en: """JWKS endpoint, it's a read-only endpoint that returns the server's public key set in the JWKS format."""
zh: """JWKS 端点, 它是一个以 JWKS 格式返回服务端的公钥集的只读端点。"""
zh: """JWKS 端点 它是一个以 JWKS 格式返回服务端的公钥集的只读端点。"""
}
label {
en: """JWKS Endpoint"""

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_authn, [
{description, "EMQX Authentication"},
{vsn, "0.1.7"},
{vsn, "0.1.8"},
{modules, []},
{registered, [emqx_authn_sup, emqx_authn_registry]},
{applications, [kernel, stdlib, emqx_resource, emqx_connector, ehttpc, epgsql, mysql, jose]},

View File

@ -70,7 +70,9 @@ do_check_config(#{<<"mechanism">> := Mec} = Config, Opts) ->
#{?CONF_NS_BINARY => Config},
Opts#{atom_key => true}
)
end.
end;
do_check_config(_Config, _Opts) ->
throw({invalid_config, "mechanism_field_required"}).
atom(Bin) ->
try

View File

@ -34,7 +34,7 @@
% Swagger
-define(API_TAGS_GLOBAL, [<<"Authentication">>]).
-define(API_TAGS_SINGLE, [<<"Listener authentication">>]).
-define(API_TAGS_SINGLE, [<<"Listener Authentication">>]).
-export([
api_spec/0,

View File

@ -37,8 +37,10 @@
start(_StartType, _StartArgs) ->
ok = mria_rlog:wait_for_shards([?AUTH_SHARD], infinity),
{ok, Sup} = emqx_authn_sup:start_link(),
ok = initialize(),
{ok, Sup}.
case initialize() of
ok -> {ok, Sup};
{error, Reason} -> {error, Reason}
end.
stop(_State) ->
ok = deinitialize(),
@ -49,6 +51,7 @@ stop(_State) ->
%%------------------------------------------------------------------------------
initialize() ->
try
ok = ?AUTHN:register_providers(emqx_authn:providers()),
lists:foreach(
@ -60,7 +63,14 @@ initialize() ->
)
end,
chain_configs()
).
)
of
ok -> ok
catch
throw:Reason ->
?SLOG(error, #{msg => "failed_to_initialize_authentication", reason => Reason}),
{error, {failed_to_initialize_authentication, Reason}}
end.
deinitialize() ->
ok = ?AUTHN:deregister_providers(provider_types()),

View File

@ -30,7 +30,7 @@
% Swagger
-define(API_TAGS_GLOBAL, [<<"Authentication">>]).
-define(API_TAGS_SINGLE, [<<"Listener authentication">>]).
-define(API_TAGS_SINGLE, [<<"Listener Authentication">>]).
-export([
api_spec/0,

View File

@ -2,7 +2,7 @@ emqx_authz_api_cache {
authorization_cache_delete {
desc {
en: """Clean all authorization cache in the cluster."""
zh: """清除集群中所有鉴权数据缓存"""
zh: """清除集群中所有授权数据缓存。"""
}
}
}

View File

@ -1,8 +1,8 @@
emqx_authz_api_schema {
enable {
desc {
en: """Set to <code>true</code> or <code>false</code> to disable this ACL provider"""
zh: """设为 <code>true</code> 或 <code>false</code> 以启用或禁用此访问控制数据源"""
en: """Set to <code>true</code> or <code>false</code> to disable this ACL provider."""
zh: """设为 <code>true</code> 或 <code>false</code> 以启用或禁用此访问控制数据源"""
}
label {
en: """enable"""
@ -13,7 +13,7 @@ emqx_authz_api_schema {
type {
desc {
en: """Backend type."""
zh: """数据后端类型"""
zh: """数据后端类型"""
}
label {
en: """type"""
@ -26,7 +26,7 @@ emqx_authz_api_schema {
rules {
desc {
en: """Authorization static file rules."""
zh: """静态鉴权文件规则"""
zh: """静态授权文件规则。"""
}
label {
en: """rules"""
@ -39,7 +39,7 @@ emqx_authz_api_schema {
method {
desc {
en: """HTTP method."""
zh: """HTTP 请求方法"""
zh: """HTTP 请求方法"""
}
label {
en: """method"""
@ -50,7 +50,7 @@ emqx_authz_api_schema {
url {
desc {
en: """URL of the auth server."""
zh: """认证服务器 URL"""
zh: """认证服务器 URL"""
}
label {
en: """url"""
@ -72,7 +72,7 @@ emqx_authz_api_schema {
headers_no_content_type {
desc {
en: """List of HTTP headers (without <code>content-type</code>)."""
zh: """HTTP Headers 列表(无 <code>content-type</code>"""
zh: """HTTP Headers 列表(无 <code>content-type</code>"""
}
label {
en: """headers_no_content_type"""
@ -83,7 +83,7 @@ emqx_authz_api_schema {
body {
desc {
en: """HTTP request body."""
zh: """HTTP 请求体"""
zh: """HTTP 请求体"""
}
label {
en: """body"""
@ -94,7 +94,7 @@ emqx_authz_api_schema {
request_timeout {
desc {
en: """Request timeout."""
zh: """请求超时时间"""
zh: """请求超时时间"""
}
label {
en: """request_timeout"""
@ -111,7 +111,7 @@ emqx_authz_api_schema {
collection {
desc {
en: """`MongoDB` collection containing the authorization data."""
zh: """`MongoDB` 鉴权数据集"""
zh: """`MongoDB` 授权数据集。"""
}
label {
en: """collection"""
@ -153,7 +153,7 @@ Filter supports the following placeholders:
cmd {
desc {
en: """Database query used to retrieve authorization data."""
zh: """访问控制数据查询命令"""
zh: """访问控制数据查询命令"""
}
label {
en: """cmd"""
@ -166,7 +166,7 @@ Filter supports the following placeholders:
query {
desc {
en: """Database query used to retrieve authorization data."""
zh: """访问控制数据查询语句"""
zh: """访问控制数据查询语句"""
}
label {
en: """query"""
@ -178,8 +178,8 @@ Filter supports the following placeholders:
position {
desc {
en: """Where to place the source"""
zh: """认证数据源位置"""
en: """Where to place the source."""
zh: """认证数据源位置"""
}
label {
en: """position"""

View File

@ -2,14 +2,14 @@ emqx_authz_api_settings {
authorization_settings_get {
desc {
en: """Get authorization settings"""
zh: """获取权配置"""
zh: """获取权配置"""
}
}
authorization_settings_put {
desc {
en: """Update authorization settings"""
zh: """更新权配置"""
zh: """更新权配置"""
}
}
}

View File

@ -2,56 +2,56 @@ emqx_authz_api_sources {
authorization_sources_get {
desc {
en: """List all authorization sources"""
zh: """列出所有权数据源"""
zh: """列出所有权数据源"""
}
}
authorization_sources_post {
desc {
en: """Add a new source"""
zh: """添加权数据源"""
zh: """添加权数据源"""
}
}
authorization_sources_type_get {
desc {
en: """Get a authorization source"""
zh: """获取指定类型的权数据源"""
zh: """获取指定类型的权数据源"""
}
}
authorization_sources_type_put {
desc {
en: """Update source"""
zh: """更新指定类型的权数据源"""
zh: """更新指定类型的权数据源"""
}
}
authorization_sources_type_delete {
desc {
en: """Delete source"""
zh: """删除指定类型的权数据源"""
zh: """删除指定类型的权数据源"""
}
}
authorization_sources_type_status_get {
desc {
en: """Get a authorization source"""
zh: """获取指定权数据源的状态"""
zh: """获取指定权数据源的状态"""
}
}
authorization_sources_type_move_post {
desc {
en: """Change the exection order of sources"""
zh: """更新权数据源的优先执行顺序"""
zh: """更新权数据源的优先执行顺序"""
}
}
sources {
desc {
en: """Authorization source"""
zh: """权数据源列表"""
zh: """权数据源列表"""
}
label {
en: """sources"""
@ -62,7 +62,7 @@ emqx_authz_api_sources {
sources {
desc {
en: """Authorization sources"""
zh: """权数据源列表"""
zh: """权数据源列表"""
}
label {
en: """sources"""
@ -84,7 +84,7 @@ emqx_authz_api_sources {
source {
desc {
en: """Authorization source"""
zh: """权数据源"""
zh: """权数据源"""
}
label {
en: """source"""

View File

@ -2,41 +2,41 @@ emqx_authz_schema {
sources {
desc {
en: """
Authorization data sources.</br>
Authorization data sources.<br/>
An array of authorization (ACL) data providers.
It is designed as an array, not a hash-map, so the sources can be
ordered to form a chain of access controls.</br>
ordered to form a chain of access controls.<br/>
When authorizing a 'publish' or 'subscribe' action, the configured
sources are checked in order. When checking an ACL source,
in case the client (identified by username or client ID) is not found,
it moves on to the next source. And it stops immediately
once an 'allow' or 'deny' decision is returned.</br>
once an 'allow' or 'deny' decision is returned.<br/>
If the client is not found in any of the sources,
the default action configured in 'authorization.no_match' is applied.</br>
the default action configured in 'authorization.no_match' is applied.<br/>
NOTE:
The source elements are identified by their 'type'.
It is NOT allowed to configure two or more sources of the same type.
"""
zh: """
鉴权数据源.</br>
鉴权(ACL)数据源的列表.
它被设计为一个数组,而不是一个散列映射,
所以可以作为链式访问控制.</br>
授权数据源。<br/>
授权ACL数据源的列表。
它被设计为一个数组,而不是一个散列映射,
所以可以作为链式访问控制。<br/>
当授权一个 'publish' 或 'subscribe' 行为时,
当授权一个 'publish' 或 'subscribe' 行为时
该配置列表中的所有数据源将按顺序进行检查。
如果在某个客户端未找到时(使用 ClientID 或 Username),
将会移动到下一个数据源. 直至得到 'allow' 或 'deny' 的结果.</br>
如果在某个客户端未找到时(使用 ClientID 或 Username)
将会移动到下一个数据源。直至得到 'allow' 或 'deny' 的结果。<br/>
如果在任何数据源中都未找到对应的客户端信息,
配置的默认行为 ('authorization.no_match') 将生效.</br>
如果在任何数据源中都未找到对应的客户端信息
配置的默认行为 ('authorization.no_match') 将生效。<br/>
注意:
数据源使用 'type' 进行标识.
使用同一类型的数据源多于一次不被允许.
注意
数据源使用 'type' 进行标识
使用同一类型的数据源多于一次不被允许
"""
}
label {
@ -83,7 +83,7 @@ It is NOT allowed to configure two or more sources of the same type.
file {
desc {
en: """Authorization using a static file."""
zh: """使用静态文件权"""
zh: """使用静态文件权"""
}
label {
en: """file"""
@ -109,7 +109,7 @@ and the old file will not be used anymore.
那么可以将该文件置于任何 EMQX 可以访问到的位置。
如果从 EMQX Dashboard 或 HTTP API 创建或修改了规则集,
那么EMQX将会生成一个新的文件并将它存放在 `data_dir` 下的 `authz` 子目录中,
那么EMQX将会生成一个新的文件并将它存放在 `data_dir` 下的 `authz` 子目录中
并从此弃用旧的文件。"""
}
label {
@ -123,7 +123,7 @@ and the old file will not be used anymore.
http_get {
desc {
en: """Authorization using an external HTTP server (via GET requests)."""
zh: """使用外部 HTTP 服务器权(GET 请求)。"""
zh: """使用外部 HTTP 服务器权(GET 请求)。"""
}
label {
en: """http_get"""
@ -134,7 +134,7 @@ and the old file will not be used anymore.
http_post {
desc {
en: """Authorization using an external HTTP server (via POST requests)."""
zh: """使用外部 HTTP 服务器权(POST 请求)。"""
zh: """使用外部 HTTP 服务器权(POST 请求)。"""
}
label {
en: """http_post"""
@ -156,7 +156,7 @@ and the old file will not be used anymore.
url {
desc {
en: """URL of the auth server."""
zh: """权 HTTP 服务器地址。"""
zh: """权 HTTP 服务器地址。"""
}
label {
en: """URL"""
@ -213,7 +213,7 @@ and the old file will not be used anymore.
mnesia {
desc {
en: """Authorization using a built-in database (mnesia)."""
zh: """使用内部数据库鉴权 (mnesia)."""
zh: """使用内部数据库授权mnesia"""
}
label {
en: """mnesia"""
@ -226,7 +226,7 @@ and the old file will not be used anymore.
mongo_single {
desc {
en: """Authorization using a single MongoDB instance."""
zh: """使用 MongoDB 鉴权(单实例)"""
zh: """使用 MongoDB 授权(单实例)。"""
}
label {
en: """mongo_single"""
@ -237,7 +237,7 @@ and the old file will not be used anymore.
mongo_rs {
desc {
en: """Authorization using a MongoDB replica set."""
zh: """使用 MongoDB 鉴权(副本集模式)"""
zh: """使用 MongoDB 授权(副本集模式)"""
}
label {
en: """mongo_rs"""
@ -248,7 +248,7 @@ and the old file will not be used anymore.
mongo_sharded {
desc {
en: """Authorization using a sharded MongoDB cluster."""
zh: """使用 MongoDB 鉴权(分片集群模式)"""
zh: """使用 MongoDB 授权(分片集群模式)。"""
}
label {
en: """mongo_sharded"""
@ -259,7 +259,7 @@ and the old file will not be used anymore.
collection {
desc {
en: """`MongoDB` collection containing the authorization data."""
zh: """`MongoDB` 鉴权数据集"""
zh: """`MongoDB` 授权数据集。"""
}
label {
en: """collection"""
@ -278,8 +278,8 @@ Filter supports the following placeholders:
zh: """
在查询中定义过滤条件的条件表达式。
过滤器支持如下占位符:
- <code>${username}</code>: 将在运行时被替换为客户端连接时使用的用户名
- <code>${clientid}</code>: 将在运行时被替换为客户端连接时使用的客户端标识符
- <code>${username}</code>将在运行时被替换为客户端连接时使用的用户名
- <code>${clientid}</code>将在运行时被替换为客户端连接时使用的客户端标识符
"""
}
label {
@ -293,7 +293,7 @@ Filter supports the following placeholders:
mysql {
desc {
en: """Authorization using a MySQL database."""
zh: """使用 MySOL 数据库权"""
zh: """使用 MySOL 数据库权"""
}
label {
en: """mysql"""
@ -306,7 +306,7 @@ Filter supports the following placeholders:
postgresql {
desc {
en: """Authorization using a PostgreSQL database."""
zh: """使用 PostgreSQL 数据库权"""
zh: """使用 PostgreSQL 数据库权"""
}
label {
en: """postgresql"""
@ -319,7 +319,7 @@ Filter supports the following placeholders:
redis_single {
desc {
en: """Authorization using a single Redis instance."""
zh: """使用 Redis 鉴权(单实例)"""
zh: """使用 Redis 授权(单实例)。"""
}
label {
en: """redis_single"""
@ -330,7 +330,7 @@ Filter supports the following placeholders:
redis_sentinel {
desc {
en: """Authorization using a Redis Sentinel."""
zh: """使用 Redis 鉴权(哨兵模式)"""
zh: """使用 Redis 授权(哨兵模式)。"""
}
label {
en: """redis_sentinel"""
@ -341,7 +341,7 @@ Filter supports the following placeholders:
redis_cluster {
desc {
en: """Authorization using a Redis cluster."""
zh: """使用 Redis 鉴权(集群模式)"""
zh: """使用 Redis 授权(集群模式)。"""
}
label {
en: """redis_cluster"""
@ -365,7 +365,7 @@ Filter supports the following placeholders:
query {
desc {
en: """Database query used to retrieve authorization data."""
zh: """访问控制数据查询语句/查询命令"""
zh: """访问控制数据查询语句/查询命令"""
}
label {
en: """query"""
@ -510,44 +510,44 @@ Filter supports the following placeholders:
metrics_total {
desc {
en: """The total number of times the authorization rule was triggered."""
zh: """权实例被触发的总次数。"""
zh: """权实例被触发的总次数。"""
}
label: {
en: """The Total Number of Times the Authorization Rule was Triggered"""
zh: """权实例被触发的总次数"""
zh: """权实例被触发的总次数"""
}
}
nomatch {
desc {
en: """The number of times that no authorization rules were matched."""
zh: """没有匹配到任何权规则的次数。"""
zh: """没有匹配到任何权规则的次数。"""
}
label: {
en: """The Number of Times that no Authorization Rules were Matched"""
zh: """没有匹配到任何权规则的次数"""
zh: """没有匹配到任何权规则的次数"""
}
}
allow {
desc {
en: """The number of times the authentication was successful."""
zh: """权成功的次数。"""
zh: """权成功的次数。"""
}
label: {
en: """The Number of Times the Authentication was Successful"""
zh: """权成功次数"""
zh: """权成功次数"""
}
}
deny {
desc {
en: """The number of authentication failures."""
zh: """权失败的次数。"""
zh: """权失败的次数。"""
}
label: {
en: """The Number of Authentication Failures"""
zh: """权失败次数"""
zh: """权失败次数"""
}
}
}

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_authz, [
{description, "An OTP application"},
{vsn, "0.1.6"},
{vsn, "0.1.7"},
{registered, []},
{mod, {emqx_authz_app, []}},
{applications, [

View File

@ -40,7 +40,8 @@
-export([
api_spec/0,
paths/0,
schema/1
schema/1,
fields/1
]).
-export([
@ -63,6 +64,9 @@ paths() ->
"/authorization/sources/:type/move"
].
fields(sources) ->
[{sources, mk(array(hoconsc:union(authz_sources_type_refs())), #{desc => ?DESC(sources)})}].
%%--------------------------------------------------------------------
%% Schema for each URI
%%--------------------------------------------------------------------
@ -75,10 +79,7 @@ schema("/authorization/sources") ->
tags => ?TAGS,
responses =>
#{
200 => mk(
array(hoconsc:union(authz_sources_type_refs())),
#{desc => ?DESC(sources)}
)
200 => ref(?MODULE, sources)
}
},
post =>
@ -241,7 +242,7 @@ source(Method, #{bindings := #{type := Type} = Bindings} = Req) when
source(get, #{bindings := #{type := Type}}) ->
case get_raw_source(Type) of
[] ->
{404, #{message => <<"Not found ", Type/binary>>}};
{404, #{code => <<"NOT_FOUND">>, message => <<"Not found: ", Type/binary>>}};
[#{<<"type">> := <<"file">>, <<"enable">> := Enable, <<"path">> := Path}] ->
case file:read_file(Path) of
{ok, Rules} ->

View File

@ -181,6 +181,12 @@ t_api(_) ->
{ok, 200, Result1} = request(get, uri(["authorization", "sources"]), []),
?assertEqual([], get_sources(Result1)),
{ok, 404, ErrResult} = request(get, uri(["authorization", "sources", "http"]), []),
?assertMatch(
#{<<"code">> := <<"NOT_FOUND">>, <<"message">> := <<"Not found: http">>},
jsx:decode(ErrResult)
),
[
begin
{ok, 204, _} = request(post, uri(["authorization", "sources"]), Source)

View File

@ -2,7 +2,7 @@ emqx_auto_subscribe_schema {
auto_subscribe {
desc {
en: """After the device logs in successfully, the subscription is automatically completed for the device through the pre-defined subscription representation. Supports the use of placeholders."""
zh: """设备登成功之后,通过预设的订阅表示符,为设备自动完成订阅。支持使用占位符。"""
zh: """设备登成功之后,通过预设的订阅表示符,为设备自动完成订阅。支持使用占位符。"""
}
lable {
en: """Auto Subscribe"""

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_auto_subscribe, [
{description, "An OTP application"},
{vsn, "0.1.1"},
{vsn, "0.1.2"},
{registered, []},
{mod, {emqx_auto_subscribe_app, []}},
{applications, [

View File

@ -44,14 +44,14 @@ schema("/mqtt/auto_subscribe") ->
'operationId' => auto_subscribe,
get => #{
description => ?DESC(list_auto_subscribe_api),
tags => [<<"Auto subscribe">>],
tags => [<<"Auto Subscribe">>],
responses => #{
200 => hoconsc:ref(emqx_auto_subscribe_schema, "auto_subscribe")
}
},
put => #{
description => ?DESC(update_auto_subscribe_api),
tags => [<<"Auto subscribe">>],
tags => [<<"Auto Subscribe">>],
'requestBody' => hoconsc:ref(emqx_auto_subscribe_schema, "auto_subscribe"),
responses => #{
200 => hoconsc:ref(emqx_auto_subscribe_schema, "auto_subscribe"),

View File

@ -14,16 +14,16 @@ emqx_bridge_webhook_schema {
config_url {
desc {
en: """
The URL of the HTTP Bridge.</br>
The URL of the HTTP Bridge.<br/>
Template with variables is allowed in the path, but variables cannot be used in the scheme, host,
or port part.</br>
or port part.<br/>
For example, <code> http://localhost:9901/${topic} </code> is allowed, but
<code> http://${host}:9901/message </code> or <code> http://localhost:${port}/message </code>
is not allowed.
"""
zh: """
HTTP Bridge 的 URL。</br>
路径中允许使用带变量的模板,但是 host port 不允许使用变量模板。</br>
HTTP Bridge 的 URL。<br/>
路径中允许使用带变量的模板,但是 host port 不允许使用变量模板。<br/>
例如,<code> http://localhost:9901/${topic} </code> 是允许的,
但是<code> http://${host}:9901/message </code>
或 <code> http://localhost:${port}/message </code>
@ -40,13 +40,13 @@ HTTP Bridge 的 URL。</br>
desc {
en: """
The MQTT topic filter to be forwarded to the HTTP server. All MQTT 'PUBLISH' messages with the topic
matching the local_topic will be forwarded.</br>
matching the local_topic will be forwarded.<br/>
NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is
configured, then both the data got from the rule and the MQTT messages that match local_topic
will be forwarded.
"""
zh: """
发送到 'local_topic' 的消息都会转发到 HTTP 服务器。 </br>
发送到 'local_topic' 的消息都会转发到 HTTP 服务器。 <br/>
注意:如果这个 Bridge 被用作规则EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发到 HTTP 服务器。
"""
}
@ -59,12 +59,12 @@ will be forwarded.
config_method {
desc {
en: """
The method of the HTTP request. All the available methods are: post, put, get, delete.</br>
Template with variables is allowed.</br>
The method of the HTTP request. All the available methods are: post, put, get, delete.<br/>
Template with variables is allowed.<br/>
"""
zh: """
HTTP 请求的方法。 所有可用的方法包括post、put、get、delete。</br>
允许使用带有变量的模板。</br>"""
HTTP 请求的方法。 所有可用的方法包括post、put、get、delete。<br/>
允许使用带有变量的模板。<br/>"""
}
label: {
en: "HTTP Method"
@ -75,11 +75,11 @@ HTTP 请求的方法。 所有可用的方法包括post、put、get、delete
config_headers {
desc {
en: """
The headers of the HTTP request.</br>
The headers of the HTTP request.<br/>
Template with variables is allowed.
"""
zh: """
HTTP 请求的标头。</br>
HTTP 请求的标头。<br/>
允许使用带有变量的模板。
"""
}
@ -92,11 +92,11 @@ HTTP 请求的标头。</br>
config_body {
desc {
en: """
The body of the HTTP request.</br>
The body of the HTTP request.<br/>
Template with variables is allowed.
"""
zh: """
HTTP 请求的正文。</br>
HTTP 请求的正文。<br/>
允许使用带有变量的模板。"""
}
label: {

View File

@ -518,34 +518,16 @@ lookup_from_local_node(BridgeType, BridgeName) ->
invalid ->
{400, error_msg('BAD_REQUEST', <<"invalid operation">>)};
OperFunc ->
TargetNode = binary_to_atom(Node, utf8),
ConfMap = emqx:get_config([bridges, BridgeType, BridgeName]),
case maps:get(enable, ConfMap, false) of
false ->
{403,
error_msg(
'FORBIDDEN_REQUEST', <<"forbidden operation: bridge disabled">>
'FORBIDDEN_REQUEST',
<<"forbidden operation: bridge disabled">>
)};
true ->
case emqx_bridge_proto_v1:OperFunc(TargetNode, BridgeType, BridgeName) of
ok ->
{200};
{error, timeout} ->
{503, error_msg('SERVICE_UNAVAILABLE', <<"request timeout">>)};
{error, {start_pool_failed, Name, Reason}} ->
{503,
error_msg(
'SERVICE_UNAVAILABLE',
bin(
io_lib:format(
"failed to start ~p pool for reason ~p",
[Name, Reason]
)
)
)};
{error, Reason} ->
{500, error_msg('INTERNAL_ERROR', Reason)}
end
call_operation(Node, OperFunc, BridgeType, BridgeName)
end
end
).
@ -794,3 +776,33 @@ bin(S) when is_atom(S) ->
atom_to_binary(S, utf8);
bin(S) when is_binary(S) ->
S.
call_operation(Node, OperFunc, BridgeType, BridgeName) ->
case emqx_misc:safe_to_existing_atom(Node, utf8) of
{ok, TargetNode} ->
case
emqx_bridge_proto_v1:OperFunc(
TargetNode, BridgeType, BridgeName
)
of
ok ->
{200};
{error, timeout} ->
{503, error_msg('SERVICE_UNAVAILABLE', <<"request timeout">>)};
{error, {start_pool_failed, Name, Reason}} ->
{503,
error_msg(
'SERVICE_UNAVAILABLE',
bin(
io_lib:format(
"failed to start ~p pool for reason ~p",
[Name, Reason]
)
)
)};
{error, Reason} ->
{500, error_msg('INTERNAL_ERROR', Reason)}
end;
{error, _} ->
{400, error_msg('INVALID_NODE', <<"invalid node">>)}
end.

View File

@ -71,9 +71,10 @@ bridge_id(BridgeType, BridgeName) ->
Type = bin(BridgeType),
<<Type/binary, ":", Name/binary>>.
-spec parse_bridge_id(list() | binary() | atom()) -> {atom(), binary()}.
parse_bridge_id(BridgeId) ->
case string:split(bin(BridgeId), ":", all) of
[Type, Name] -> {binary_to_atom(Type, utf8), binary_to_atom(Name, utf8)};
[Type, Name] -> {binary_to_atom(Type, utf8), Name};
_ -> error({invalid_bridge_id, BridgeId})
end.

View File

@ -71,12 +71,12 @@ For more information, see: https://www.erlang.org/doc/man/erl.html
desc {
en: """Service discovery method for the cluster nodes."""
zh: """集群节点发现方式。可选值为:
- manual: 手动加入集群</br>
- static: 配置静态节点。配置几个固定的节点,新节点通过连接固定节点中的某一个来加入集群。</br>
- mcast: 使用 UDP 多播的方式发现节点。</br>
- dns: 使用 DNS A 记录的方式发现节点。</br>
- etcd: 使用 etcd 发现节点。</br>
- k8s: 使用 Kubernetes 发现节点。</br>
- manual: 手动加入集群<br/>
- static: 配置静态节点。配置几个固定的节点,新节点通过连接固定节点中的某一个来加入集群。<br/>
- mcast: 使用 UDP 多播的方式发现节点。<br/>
- dns: 使用 DNS A 记录的方式发现节点。<br/>
- etcd: 使用 etcd 发现节点。<br/>
- k8s: 使用 Kubernetes 发现节点。<br/>
"""
}
label {
@ -111,9 +111,9 @@ For more information, see: https://www.erlang.org/doc/man/erl.html
desc {
en: """The Erlang distribution protocol for the cluster."""
zh: """分布式 Erlang 集群协议类型。可选值为:
- inet_tcp: 使用 IPv4 </br>
- inet6_tcp 使用 IPv6 </br>
- inet_tls: 使用 TLS需要与 node.ssl_dist_optfile 配置一起使用。</br>
- inet_tcp: 使用 IPv4 <br/>
- inet6_tcp 使用 IPv6 <br/>
- inet_tls: 使用 TLS需要与 node.ssl_dist_optfile 配置一起使用。<br/>
"""
}
label {
@ -152,7 +152,7 @@ For more information, see: https://www.erlang.org/doc/man/erl.html
cluster_mcast_ports {
desc {
en: """List of UDP ports used for service discovery.</br>
en: """List of UDP ports used for service discovery.<br/>
Note: probe messages are broadcast to all the specified ports.
"""
zh: """指定多播端口。如有多个端口使用逗号 , 分隔。
@ -286,7 +286,7 @@ Applicable when <code>cluster.discovery_strategy = dns</code>
desc {
en: """Key prefix used for EMQX service discovery."""
zh: """指定 etcd 路径的前缀。每个节点在 etcd 中都会创建一个路径:
v2/keys/<prefix>/<cluster.name>/<node.name> </br>
v2/keys/<prefix>/<cluster.name>/<node.name> <br/>
当 cluster.discovery_strategy 为 etcd 时,此配置项才有效。
"""
}
@ -357,7 +357,7 @@ Setting <code>cluster.k8s.address_type</code> to <code>ip</code> will
make EMQX to discover IP addresses of peer nodes from Kubernetes API.
"""
zh: """当使用 k8s 方式集群时address_type 用来从 Kubernetes 接口的应答里获取什么形式的 Host 列表。
指定 <code>cluster.k8s.address_type</code. 为 <code>ip</code>,则将从 Kubernetes 接口中获取集群中其他节点
指定 <code>cluster.k8s.address_type</code> 为 <code>ip</code>,则将从 Kubernetes 接口中获取集群中其他节点
的IP地址。
"""
}
@ -382,7 +382,7 @@ make EMQX to discover IP addresses of peer nodes from Kubernetes API.
cluster_k8s_suffix {
desc {
en: """Node name suffix.</br>
en: """Node name suffix.<br/>
Note: this parameter is only relevant when <code>address_type</code> is <code>dns</code>
or <code>hostname</code>."""
zh: """当使用 k8s 方式并且 cluster.k8s.address_type 指定为 dns 类型时,可设置 emqx 节点名的后缀。
@ -426,26 +426,26 @@ belong to different clusters from accidentally connecting to each other."""
node_data_dir {
desc {
en: """
Path to the persistent data directory.</br>
Possible auto-created subdirectories are:</br>
- `mnesia/<node_name>`: EMQX's built-in database directory.</br>
For example, `mnesia/emqx@127.0.0.1`.</br>
There should be only one such subdirectory.</br>
Meaning, in case the node is to be renamed (to e.g. `emqx@10.0.1.1`),</br>
the old dir should be deleted first.</br>
- `configs`: Generated configs at boot time, and cluster/local override configs.</br>
- `patches`: Hot-patch beam files are to be placed here.</br>
- `trace`: Trace log files.</br>
Path to the persistent data directory.<br/>
Possible auto-created subdirectories are:<br/>
- `mnesia/<node_name>`: EMQX's built-in database directory.<br/>
For example, `mnesia/emqx@127.0.0.1`.<br/>
There should be only one such subdirectory.<br/>
Meaning, in case the node is to be renamed (to e.g. `emqx@10.0.1.1`),<br/>
the old dir should be deleted first.<br/>
- `configs`: Generated configs at boot time, and cluster/local override configs.<br/>
- `patches`: Hot-patch beam files are to be placed here.<br/>
- `trace`: Trace log files.<br/>
**NOTE**: One data dir cannot be shared by two or more EMQX nodes.
"""
zh: """
节点数据存放目录,可能会自动创建的子目录如下:</br>
- `mnesia/<node_name>`。EMQX的内置数据库目录。例如`mnesia/emqx@127.0.0.1`。</br>
如果节点要被重新命名(例如,`emqx@10.0.1.1`)。旧目录应该首先被删除。</br>
- `configs`。在启动时生成的配置,以及集群/本地覆盖的配置。</br>
- `patches`: 热补丁文件将被放在这里。</br>
- `trace`: 日志跟踪文件。</br>
节点数据存放目录,可能会自动创建的子目录如下:<br/>
- `mnesia/<node_name>`。EMQX的内置数据库目录。例如`mnesia/emqx@127.0.0.1`。<br/>
如果节点要被重新命名(例如,`emqx@10.0.1.1`)。旧目录应该首先被删除。<br/>
- `configs`。在启动时生成的配置,以及集群/本地覆盖的配置。<br/>
- `patches`: 热补丁文件将被放在这里。<br/>
- `trace`: 日志跟踪文件。<br/>
**注意**: 一个数据dir不能被两个或更多的EMQX节点同时使用。
"""
@ -566,9 +566,9 @@ significant: later configuration files override the previous ones.
db_backend {
desc {
en: """
Select the backend for the embedded database.</br>
Select the backend for the embedded database.<br/>
<code>rlog</code> is the default backend,
that is suitable for very large clusters.</br>
that is suitable for very large clusters.<br/>
<code>mnesia</code> is a backend that offers decent performance in small clusters.
"""
zh: """ rlog是默认的数据库他适用于大规模的集群。
@ -584,20 +584,20 @@ mnesia是备选数据库在小集群中提供了很好的性能。
db_role {
desc {
en: """
Select a node role.</br>
Select a node role.<br/>
<code>core</code> nodes provide durability of the data, and take care of writes.
It is recommended to place core nodes in different racks or different availability zones.</br>
It is recommended to place core nodes in different racks or different availability zones.<br/>
<code>replicant</code> nodes are ephemeral worker nodes. Removing them from the cluster
doesn't affect database redundancy</br>
It is recommended to have more replicant nodes than core nodes.</br>
doesn't affect database redundancy<br/>
It is recommended to have more replicant nodes than core nodes.<br/>
Note: this parameter only takes effect when the <code>backend</code> is set
to <code>rlog</code>.
"""
zh: """
选择节点的角色。</br>
<code>core</code> 节点提供数据的持久性,并负责写入。建议将核心节点放置在不同的机架或不同的可用区。</br>
<code>repliant</code> 节点是临时工作节点。 从集群中删除它们,不影响数据库冗余</br>
建议复制节点多于核心节点。</br>
选择节点的角色。<br/>
<code>core</code> 节点提供数据的持久性,并负责写入。建议将核心节点放置在不同的机架或不同的可用区。<br/>
<code>repliant</code> 节点是临时工作节点。 从集群中删除它们,不影响数据库冗余<br/>
建议复制节点多于核心节点。<br/>
注意:该参数仅在设置<code>backend</code>时生效到 <code>rlog</code>。
"""
}
@ -610,17 +610,17 @@ to <code>rlog</code>.
db_core_nodes {
desc {
en: """
List of core nodes that the replicant will connect to.</br>
List of core nodes that the replicant will connect to.<br/>
Note: this parameter only takes effect when the <code>backend</code> is set
to <code>rlog</code> and the <code>role</code> is set to <code>replicant</code>.</br>
This value needs to be defined for manual or static cluster discovery mechanisms.</br>
to <code>rlog</code> and the <code>role</code> is set to <code>replicant</code>.<br/>
This value needs to be defined for manual or static cluster discovery mechanisms.<br/>
If an automatic cluster discovery mechanism is being used (such as <code>etcd</code>),
there is no need to set this value.
"""
zh: """当前节点连接的核心节点列表。</br>
zh: """当前节点连接的核心节点列表。<br/>
注意:该参数仅在设置<code>backend</code>时生效到 <code>rlog</code>
并且设置<code>role</code>为<code>replicant</code>时生效。</br>
该值需要在手动或静态集群发现机制下设置。</br>
并且设置<code>role</code>为<code>replicant</code>时生效。<br/>
该值需要在手动或静态集群发现机制下设置。<br/>
如果使用了自动集群发现机制(如<code>etcd</code>),则不需要设置该值。
"""
}
@ -657,15 +657,15 @@ transaction log entry.
db_default_shard_transport {
desc {
en: """Defines the default transport for pushing transaction logs.</br>
en: """Defines the default transport for pushing transaction logs.<br/>
This may be overridden on a per-shard basis in <code>db.shard_transports</code>.
<code>gen_rpc</code> uses the <code>gen_rpc</code> library,
<code>distr</code> uses the Erlang distribution.</br>"""
<code>distr</code> uses the Erlang distribution.<br/>"""
zh: """
定义用于推送事务日志的默认传输。</br>
定义用于推送事务日志的默认传输。<br/>
这可以在 <code>db.shard_transports</code> 中基于每个分片被覆盖。
<code>gen_rpc</code> 使用 <code>gen_rpc</code> 库,
<code>distr</code> 使用 Erlang 发行版。</br>
<code>distr</code> 使用 Erlang 发行版。<br/>
"""
}
label {
@ -676,13 +676,13 @@ This may be overridden on a per-shard basis in <code>db.shard_transports</code>.
db_shard_transports {
desc {
en: """Allows to tune the transport method used for transaction log replication, on a per-shard basis.</br>
en: """Allows to tune the transport method used for transaction log replication, on a per-shard basis.<br/>
<code>gen_rpc</code> uses the <code>gen_rpc</code> library,
<code>distr</code> uses the Erlang distribution.</br>If not specified,
<code>distr</code> uses the Erlang distribution.<br/>If not specified,
the default is to use the value set in <code>db.default_shard_transport</code>."""
zh: """允许为每个 shard 下的事务日志复制操作的传输方法进行调优。</br>
zh: """允许为每个 shard 下的事务日志复制操作的传输方法进行调优。<br/>
<code>gen_rpc</code> 使用 <code>gen_rpc</code> 库,
<code>distr</code> 使用 Erlang 自带的 rpc 库。</br>如果未指定,
<code>distr</code> 使用 Erlang 自带的 rpc 库。<br/>如果未指定,
默认是使用 <code>db.default_shard_transport</code> 中设置的值。
"""
}
@ -763,12 +763,12 @@ Ensure that the number of completed transactions is less than the <code>max_hist
rpc_port_discovery {
desc {
en: """<code>manual</code>: discover ports by <code>tcp_server_port</code>.</br>
en: """<code>manual</code>: discover ports by <code>tcp_server_port</code>.<br/>
<code>stateless</code>: discover ports in a stateless manner, using the following algorithm.
If node name is <code>emqxN@127.0.0.1</code>, where the N is an integer,
then the listening port will be 5370 + N."""
zh: """<code>manual</code>: 通过 <code>tcp_server_port</code> 来发现端口。
</br><code>stateless</code>: 使用无状态的方式来发现端口,使用如下算法。如果节点名称是 <code>
<br/><code>stateless</code>: 使用无状态的方式来发现端口,使用如下算法。如果节点名称是 <code>
emqxN@127.0.0.1</code>, N 是一个数字,那么监听端口就是 5370 + N。
"""
}
@ -780,9 +780,9 @@ emqxN@127.0.0.1</code>, N 是一个数字,那么监听端口就是 5370 + N。
rpc_tcp_server_port {
desc {
en: """Listening port used by RPC local service.</br>
en: """Listening port used by RPC local service.<br/>
Note that this config only takes effect when rpc.port_discovery is set to manual."""
zh: """RPC 本地服务使用的 TCP 端口。</br>
zh: """RPC 本地服务使用的 TCP 端口。<br/>
只有当 rpc.port_discovery 设置为 manual 时,此配置才会生效。
"""
}
@ -794,10 +794,10 @@ Note that this config only takes effect when rpc.port_discovery is set to manual
rpc_ssl_server_port {
desc {
en: """Listening port used by RPC local service.</br>
en: """Listening port used by RPC local service.<br/>
Note that this config only takes effect when rpc.port_discovery is set to manual
and <code>driver</code> is set to <code>ssl</code>."""
zh: """RPC 本地服务使用的监听SSL端口。</br>
zh: """RPC 本地服务使用的监听SSL端口。<br/>
只有当 rpc.port_discovery 设置为 manual 且 <code> dirver </code> 设置为 <code>ssl</code>
此配置才会生效。
"""
@ -847,9 +847,9 @@ Note that this config only takes effect when <code>rpc.driver</code> is set to <
rpc_keyfile {
desc {
en: """Path to the private key file for the <code>rpc.certfile</code>.</br>
en: """Path to the private key file for the <code>rpc.certfile</code>.<br/>
Note: contents of this file are secret, so it's necessary to set permissions to 600."""
zh: """<code>rpc.certfile</code> 的私钥文件的路径。</br>
zh: """<code>rpc.certfile</code> 的私钥文件的路径。<br/>
注意:此文件内容是私钥,所以需要设置权限为 600。
"""
}
@ -861,9 +861,9 @@ Note: contents of this file are secret, so it's necessary to set permissions to
rpc_cacertfile {
desc {
en: """Path to certification authority TLS certificate file used to validate <code>rpc.certfile</code>.</br>
en: """Path to certification authority TLS certificate file used to validate <code>rpc.certfile</code>.<br/>
Note: certificates of all nodes in the cluster must be signed by the same CA."""
zh: """验证 <code>rpc.certfile</code> 的 CA 证书文件的路径。</br>
zh: """验证 <code>rpc.certfile</code> 的 CA 证书文件的路径。<br/>
注意:集群中所有节点的证书必须使用同一个 CA 签发。
"""
}
@ -973,6 +973,17 @@ until the RPC connection is considered lost."""
}
}
rpc_insecure_fallback {
desc {
en: """Enable compatibility with old RPC authentication."""
zh: """兼容旧的无鉴权模式"""
}
label {
en: "RPC insecure fallback"
zh: "向后兼容旧的无鉴权模式"
}
}
log_file_handlers {
desc {
en: """File-based log handlers."""
@ -1190,7 +1201,7 @@ Supervisor 报告的类型。默认为 error 类型。
desc {
en: """Enable log rotation feature."""
zh: """启用日志轮换功能。启动后生成日志文件后缀会加上对应的索引数字比如log/emqx.log.1。
系统会默认生成<code>*.siz/*.idx<code>用于记录日志位置,请不要手动修改这两个文件。
系统会默认生成<code>*.siz/*.idx</code>用于记录日志位置,请不要手动修改这两个文件。
"""
}
label {
@ -1290,17 +1301,17 @@ Supervisor 报告的类型。默认为 error 类型。
authorization {
desc {
en: """
Authorization a.k.a. ACL.</br>
In EMQX, MQTT client access control is extremely flexible.</br>
Authorization a.k.a. ACL.<br/>
In EMQX, MQTT client access control is extremely flexible.<br/>
An out-of-the-box set of authorization data sources are supported.
For example,</br>
'file' source is to support concise and yet generic ACL rules in a file;</br>
For example,<br/>
'file' source is to support concise and yet generic ACL rules in a file;<br/>
'built_in_database' source can be used to store per-client customizable rule sets,
natively in the EMQX node;</br>
'http' source to make EMQX call an external HTTP API to make the decision;</br>
'PostgreSQL' etc. to look up clients or rules from external databases;</br>
natively in the EMQX node;<br/>
'http' source to make EMQX call an external HTTP API to make the decision;<br/>
'PostgreSQL' etc. to look up clients or rules from external databases;<br/>
"""
zh: """ 授权ACL。EMQX 支持完整的客户端访问控制ACL。</br> """
zh: """ 授权ACL。EMQX 支持完整的客户端访问控制ACL。<br/> """
}
label {
en: "Authorization"
@ -1310,9 +1321,9 @@ natively in the EMQX node;</br>
desc_cluster {
desc {
en: """EMQX nodes can form a cluster to scale up the total capacity.</br>
en: """EMQX nodes can form a cluster to scale up the total capacity.<br/>
Here holds the configs to instruct how individual nodes can discover each other."""
zh: """EMQX 节点可以组成一个集群,以提高总容量。</br> 这里指定了节点之间如何连接。"""
zh: """EMQX 节点可以组成一个集群,以提高总容量。<br/> 这里指定了节点之间如何连接。"""
}
label {
en: "Cluster"
@ -1411,11 +1422,11 @@ The new node joins the cluster by connecting to one of the bootstrap nodes."""
desc_rpc {
desc {
en: """EMQX uses a library called <code>gen_rpc</code> for inter-broker communication.</br>
en: """EMQX uses a library called <code>gen_rpc</code> for inter-broker communication.<br/>
Most of the time the default config should work,
but in case you need to do performance fine-tuning or experiment a bit,
this is where to look."""
zh: """EMQX 使用 <code>gen_rpc</code> 库来实现跨节点通信。</br>
zh: """EMQX 使用 <code>gen_rpc</code> 库来实现跨节点通信。<br/>
大多数情况下,默认的配置应该可以工作,但如果你需要做一些性能优化或者实验,可以尝试调整这些参数。"""
}
label {
@ -1461,11 +1472,11 @@ Each sink is represented by a _log handler_, which can be configured independent
desc_log_rotation {
desc {
en: """
By default, the logs are stored in `./log` directory (for installation from zip file) or in `/var/log/emqx` (for binary installation).</br>
By default, the logs are stored in `./log` directory (for installation from zip file) or in `/var/log/emqx` (for binary installation).<br/>
This section of the configuration controls the number of files kept for each log handler.
"""
zh: """
默认情况下,日志存储在 `./log` 目录(用于从 zip 文件安装)或 `/var/log/emqx`(用于二进制安装)。</br>
默认情况下,日志存储在 `./log` 目录(用于从 zip 文件安装)或 `/var/log/emqx`(用于二进制安装)。<br/>
这部分配置,控制每个日志处理进程保留的文件数量。
"""
}
@ -1478,11 +1489,11 @@ This section of the configuration controls the number of files kept for each log
desc_log_overload_kill {
desc {
en: """
Log overload kill features an overload protection that activates when the log handlers use too much memory or have too many buffered log messages.</br>
Log overload kill features an overload protection that activates when the log handlers use too much memory or have too many buffered log messages.<br/>
When the overload is detected, the log handler is terminated and restarted after a cooldown period.
"""
zh: """
日志过载终止,具有过载保护功能。当日志处理进程使用过多内存,或者缓存的日志消息过多时该功能被激活。</br>
日志过载终止,具有过载保护功能。当日志处理进程使用过多内存,或者缓存的日志消息过多时该功能被激活。<br/>
检测到过载时,日志处理进程将终止,并在冷却期后重新启动。
"""
}

View File

@ -1,6 +1,6 @@
{application, emqx_conf, [
{description, "EMQX configuration management"},
{vsn, "0.1.5"},
{vsn, "0.1.6"},
{registered, []},
{mod, {emqx_conf_app, []}},
{applications, [kernel, stdlib]},

View File

@ -813,6 +813,15 @@ fields("rpc") ->
default => "1MB",
desc => ?DESC(rpc_socket_buffer)
}
)},
{"insecure_fallback",
sc(
boolean(),
#{
mapping => "gen_rpc.insecure_auth_fallback_allowed",
default => true,
desc => ?DESC(rpc_insecure_fallback)
}
)}
];
fields("log") ->
@ -970,7 +979,7 @@ desc("authorization") ->
desc(_) ->
undefined.
translations() -> ["ekka", "kernel", "emqx", "gen_rpc"].
translations() -> ["ekka", "kernel", "emqx", "gen_rpc", "prometheus"].
translation("ekka") ->
[{"cluster_discovery", fun tr_cluster_discovery/1}];
@ -987,7 +996,37 @@ translation("emqx") ->
{"local_override_conf_file", fun tr_local_override_conf_file/1}
];
translation("gen_rpc") ->
[{"default_client_driver", fun tr_default_config_driver/1}].
[{"default_client_driver", fun tr_default_config_driver/1}];
translation("prometheus") ->
[
{"vm_dist_collector_metrics", fun tr_vm_dist_collector/1},
{"mnesia_collector_metrics", fun tr_mnesia_collector/1},
{"vm_statistics_collector_metrics", fun tr_vm_statistics_collector/1},
{"vm_system_info_collector_metrics", fun tr_vm_system_info_collector/1},
{"vm_memory_collector_metrics", fun tr_vm_memory_collector/1},
{"vm_msacc_collector_metrics", fun tr_vm_msacc_collector/1}
].
tr_vm_dist_collector(Conf) ->
metrics_enabled(conf_get("prometheus.vm_dist_collector", Conf, enabled)).
tr_mnesia_collector(Conf) ->
metrics_enabled(conf_get("prometheus.mnesia_collector", Conf, enabled)).
tr_vm_statistics_collector(Conf) ->
metrics_enabled(conf_get("prometheus.vm_statistics_collector", Conf, enabled)).
tr_vm_system_info_collector(Conf) ->
metrics_enabled(conf_get("prometheus.vm_system_info_collector", Conf, enabled)).
tr_vm_memory_collector(Conf) ->
metrics_enabled(conf_get("prometheus.vm_memory_collector", Conf, enabled)).
tr_vm_msacc_collector(Conf) ->
metrics_enabled(conf_get("prometheus.vm_msacc_collector", Conf, enabled)).
metrics_enabled(enabled) -> all;
metrics_enabled(disabled) -> [].
tr_default_config_driver(Conf) ->
conf_get("rpc.driver", Conf).

View File

@ -14,11 +14,11 @@ emqx_connector_api {
conn_test_post {
desc {
en: """
Test creating a new connector by given ID </br>
Test creating a new connector by given ID <br/>
The ID must be of format '{type}:{name}'
"""
zh: """
通过给定的 ID 测试创建一个新的连接器 </br>
通过给定的 ID 测试创建一个新的连接器 <br/>
ID 的格式必须为“{type}:{name}”
"""
}

View File

@ -2,14 +2,14 @@ emqx_connector_http {
base_url {
desc {
en: """
The base URL is the URL includes only the scheme, host and port.</br>
The base URL is the URL includes only the scheme, host and port.<br/>
When send an HTTP request, the real URL to be used is the concatenation of the base URL and the
path parameter (passed by the emqx_resource:query/2,3 or provided by the request parameter).</br>
path parameter (passed by the emqx_resource:query/2,3 or provided by the request parameter).<br/>
For example: `http://localhost:9901/`
"""
zh: """
base URL 只包含host和port。</br>
发送HTTP请求时真实的URL是由base URL 和 path parameter连接而成通过emqx_resource:query/2,3传递或者通过请求参数提供。</br>
base URL 只包含host和port。<br/>
发送HTTP请求时真实的URL是由base URL 和 path parameter连接而成通过emqx_resource:query/2,3传递或者通过请求参数提供。<br/>
示例:`http://localhost:9901/`
"""
}

View File

@ -47,13 +47,13 @@ emqx_connector_mongo {
server {
desc {
en: """
The IPv4 or IPv6 address or the hostname to connect to.</br>
A host entry has the following form: `Host[:Port]`.</br>
The IPv4 or IPv6 address or the hostname to connect to.<br/>
A host entry has the following form: `Host[:Port]`.<br/>
The MongoDB default port 27017 is used if `[:Port]` is not specified.
"""
zh: """
将要连接的 IPv4 或 IPv6 地址,或者主机名。</br>
主机名具有以下形式:`Host[:Port]`。</br>
将要连接的 IPv4 或 IPv6 地址,或者主机名。<br/>
主机名具有以下形式:`Host[:Port]`。<br/>
如果未指定 `[:Port]`,则使用 MongoDB 默认端口 27017。
"""
}

View File

@ -2,13 +2,13 @@ emqx_connector_mqtt_schema {
ingress_desc {
desc {
en: """The ingress config defines how this bridge receive messages from the remote MQTT broker, and then
send them to the local broker.</br>
Template with variables is allowed in 'remote.qos', 'local.topic', 'local.qos', 'local.retain', 'local.payload'.</br>
send them to the local broker.<br/>
Template with variables is allowed in 'remote.qos', 'local.topic', 'local.qos', 'local.retain', 'local.payload'.<br/>
NOTE: if this bridge is used as the input of a rule, and also 'local.topic' is
configured, then messages got from the remote broker will be sent to both the 'local.topic' and
the rule."""
zh: """入口配置定义了该桥接如何从远程 MQTT Broker 接收消息,然后将消息发送到本地 Broker。</br>
以下字段中允许使用带有变量的模板:'remote.qos', 'local.topic', 'local.qos', 'local.retain', 'local.payload'。</br>
zh: """入口配置定义了该桥接如何从远程 MQTT Broker 接收消息,然后将消息发送到本地 Broker。<br/>
以下字段中允许使用带有变量的模板:'remote.qos', 'local.topic', 'local.qos', 'local.retain', 'local.payload'。<br/>
注意:如果此桥接被用作规则的输入,并且配置了 'local.topic',则从远程代理获取的消息将同时被发送到 'local.topic' 和规则。
"""
}
@ -20,13 +20,13 @@ emqx_connector_mqtt_schema {
egress_desc {
desc {
en: """The egress config defines how this bridge forwards messages from the local broker to the remote broker.</br>
Template with variables is allowed in 'remote.topic', 'local.qos', 'local.retain', 'local.payload'.</br>
en: """The egress config defines how this bridge forwards messages from the local broker to the remote broker.<br/>
Template with variables is allowed in 'remote.topic', 'local.qos', 'local.retain', 'local.payload'.<br/>
NOTE: if this bridge is used as the action of a rule, and also 'local.topic'
is configured, then both the data got from the rule and the MQTT messages that matches
'local.topic' will be forwarded."""
zh: """出口配置定义了该桥接如何将消息从本地 Broker 转发到远程 Broker。
以下字段中允许使用带有变量的模板:'remote.topic', 'local.qos', 'local.retain', 'local.payload'。</br>
以下字段中允许使用带有变量的模板:'remote.topic', 'local.qos', 'local.retain', 'local.payload'。<br/>
注意:如果此桥接被用作规则的动作,并且配置了 'local.topic',则从规则输出的数据以及匹配到 'local.topic' 的 MQTT 消息都会被转发。
"""
}
@ -83,22 +83,22 @@ is configured, then both the data got from the rule and the MQTT messages that m
mode {
desc {
en: """
The mode of the MQTT Bridge.</br>
The mode of the MQTT Bridge.<br/>
- cluster_shareload: create an MQTT connection on each node in the emqx cluster.</br>
- cluster_shareload: create an MQTT connection on each node in the emqx cluster.<br/>
In 'cluster_shareload' mode, the incoming load from the remote broker is shared by
using shared subscription.</br>
using shared subscription.<br/>
Note that the 'clientid' is suffixed by the node name, this is to avoid
clientid conflicts between different nodes. And we can only use shared subscription
topic filters for 'remote.topic' of ingress connections.
topic filters for <code>remote.topic</code> of ingress connections.
"""
zh: """
MQTT 桥的模式。 </br>
MQTT 桥的模式。 <br/>
- cluster_shareload在 emqx 集群的每个节点上创建一个 MQTT 连接。</br>
在“cluster_shareload”模式下来自远程代理的传入负载通过共享订阅的方式接收。</br>
请注意,“clientid”以节点名称为后缀这是为了避免不同节点之间的clientid冲突。
而且对于入口连接的“remote.topic”,我们只能使用共享订阅主题过滤器。
- cluster_shareload在 emqx 集群的每个节点上创建一个 MQTT 连接。<br/>
在“cluster_shareload”模式下来自远程代理的传入负载通过共享订阅的方式接收。<br/>
请注意,<code>clientid</code> 以节点名称为后缀,这是为了避免不同节点之间的 <code> clientid</code> 冲突。
而且对于入口连接的 <code>remote.topic</code>,我们只能使用共享订阅主题过滤器。
"""
}
label: {
@ -216,11 +216,11 @@ broker MUST support this feature.
ingress_local_topic {
desc {
en: """
Send messages to which topic of the local broker.</br>
Send messages to which topic of the local broker.<br/>
Template with variables is allowed.
"""
zh: """
向本地broker的哪个topic发送消息。</br>
向本地broker的哪个topic发送消息。<br/>
允许使用带有变量的模板。
"""
}
@ -233,11 +233,11 @@ Template with variables is allowed.
ingress_local_qos {
desc {
en: """
The QoS of the MQTT message to be sent.</br>
The QoS of the MQTT message to be sent.<br/>
Template with variables is allowed.
"""
zh: """
待发送 MQTT 消息的 QoS。</br>
待发送 MQTT 消息的 QoS。<br/>
允许使用带有变量的模板。
"""
}
@ -261,11 +261,11 @@ Template with variables is allowed.
egress_remote_topic {
desc {
en: """
Forward to which topic of the remote broker.</br>
Forward to which topic of the remote broker.<br/>
Template with variables is allowed.
"""
zh: """
转发到远程broker的哪个topic。</br>
转发到远程broker的哪个topic。<br/>
允许使用带有变量的模板。
"""
}
@ -278,11 +278,11 @@ Template with variables is allowed.
egress_remote_qos {
desc {
en: """
The QoS of the MQTT message to be sent.</br>
The QoS of the MQTT message to be sent.<br/>
Template with variables is allowed.
"""
zh: """
待发送 MQTT 消息的 QoS。</br>
待发送 MQTT 消息的 QoS。<br/>
允许使用带有变量的模板。
"""
}
@ -295,11 +295,11 @@ Template with variables is allowed.
retain {
desc {
en: """
The 'retain' flag of the MQTT message to be sent.</br>
The 'retain' flag of the MQTT message to be sent.<br/>
Template with variables is allowed.
"""
zh: """
要发送的 MQTT 消息的“保留”标志。</br>
要发送的 MQTT 消息的“保留”标志。<br/>
允许使用带有变量的模板。
"""
}
@ -312,11 +312,11 @@ Template with variables is allowed.
payload {
desc {
en: """
The payload of the MQTT message to be sent.</br>
The payload of the MQTT message to be sent.<br/>
Template with variables is allowed.
"""
zh: """
要发送的 MQTT 消息的负载。</br>
要发送的 MQTT 消息的负载。<br/>
允许使用带有变量的模板。
"""
}

View File

@ -3,13 +3,13 @@ emqx_connector_mysql {
server {
desc {
en: """
The IPv4 or IPv6 address or the hostname to connect to.</br>
A host entry has the following form: `Host[:Port]`.</br>
The IPv4 or IPv6 address or the hostname to connect to.<br/>
A host entry has the following form: `Host[:Port]`.<br/>
The MySQL default port 3306 is used if `[:Port]` is not specified.
"""
zh: """
将要连接的 IPv4 或 IPv6 地址,或者主机名。</br>
主机名具有以下形式:`Host[:Port]`。</br>
将要连接的 IPv4 或 IPv6 地址,或者主机名。<br/>
主机名具有以下形式:`Host[:Port]`。<br/>
如果未指定 `[:Port]`,则使用 MySQL 默认端口 3306。
"""
}

View File

@ -3,13 +3,13 @@ emqx_connector_pgsql {
server {
desc {
en: """
The IPv4 or IPv6 address or the hostname to connect to.</br>
A host entry has the following form: `Host[:Port]`.</br>
The IPv4 or IPv6 address or the hostname to connect to.<br/>
A host entry has the following form: `Host[:Port]`.<br/>
The PostgreSQL default port 5432 is used if `[:Port]` is not specified.
"""
zh: """
将要连接的 IPv4 或 IPv6 地址,或者主机名。</br>
主机名具有以下形式:`Host[:Port]`。</br>
将要连接的 IPv4 或 IPv6 地址,或者主机名。<br/>
主机名具有以下形式:`Host[:Port]`。<br/>
如果未指定 `[:Port]`,则使用 PostgreSQL 默认端口 5432。
"""
}

View File

@ -47,13 +47,13 @@ emqx_connector_redis {
server {
desc {
en: """
The IPv4 or IPv6 address or the hostname to connect to.</br>
A host entry has the following form: `Host[:Port]`.</br>
The IPv4 or IPv6 address or the hostname to connect to.<br/>
A host entry has the following form: `Host[:Port]`.<br/>
The Redis default port 6379 is used if `[:Port]` is not specified.
"""
zh: """
将要连接的 IPv4 或 IPv6 地址,或者主机名。</br>
主机名具有以下形式:`Host[:Port]`。</br>
将要连接的 IPv4 或 IPv6 地址,或者主机名。<br/>
主机名具有以下形式:`Host[:Port]`。<br/>
如果未指定 `[:Port]`,则使用 MongoDB 默认端口 27017。
"""
}

View File

@ -25,14 +25,13 @@
-define(PGSQL_DEFAULT_PORT, 5432).
-define(SERVERS_DESC,
"A Node list for Cluster to connect to. The nodes should be separated with commas, such as: `Node[,Node].`\n"
"A Node list for Cluster to connect to. The nodes should be separated with commas, such as: `Node[,Node].`<br/>"
"For each Node should be: "
).
-define(SERVER_DESC(TYPE, DEFAULT_PORT),
"\n"
"The IPv4 or IPv6 address or the hostname to connect to.</br>\n"
"A host entry has the following form: `Host[:Port]`.</br>\n"
"The IPv4 or IPv6 address or the hostname to connect to.<br/>"
"A host entry has the following form: `Host[:Port]`.<br/>"
"The " ++ TYPE ++ " default port " ++ DEFAULT_PORT ++ " is used if `[:Port]` is not specified."
).

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_connector, [
{description, "An OTP application"},
{vsn, "0.1.6"},
{vsn, "0.1.8"},
{registered, []},
{mod, {emqx_connector_app, []}},
{applications, [

View File

@ -387,7 +387,7 @@ init_worker_options([], Acc) ->
%% ===================================================================
%% Schema funcs
server(type) -> emqx_schema:ip_port();
server(type) -> emqx_schema:host_port();
server(required) -> true;
server(validator) -> [?NOT_EMPTY("the value of the field 'server' cannot be empty")];
server(converter) -> fun to_server_raw/1;

View File

@ -70,7 +70,7 @@ fields(config) ->
emqx_connector_schema_lib:ssl_fields() ++
emqx_connector_schema_lib:prepare_statement_fields().
server(type) -> emqx_schema:ip_port();
server(type) -> emqx_schema:host_port();
server(required) -> true;
server(validator) -> [?NOT_EMPTY("the value of the field 'server' cannot be empty")];
server(converter) -> fun to_server/1;

View File

@ -59,7 +59,7 @@ fields(config) ->
emqx_connector_schema_lib:ssl_fields() ++
emqx_connector_schema_lib:prepare_statement_fields().
server(type) -> emqx_schema:ip_port();
server(type) -> emqx_schema:host_port();
server(required) -> true;
server(validator) -> [?NOT_EMPTY("the value of the field 'server' cannot be empty")];
server(converter) -> fun to_server/1;

View File

@ -98,7 +98,7 @@ fields(sentinel) ->
redis_fields() ++
emqx_connector_schema_lib:ssl_fields().
server(type) -> emqx_schema:ip_port();
server(type) -> emqx_schema:host_port();
server(required) -> true;
server(validator) -> [?NOT_EMPTY("the value of the field 'server' cannot be empty")];
server(converter) -> fun to_server_raw/1;

View File

@ -69,7 +69,7 @@ fields("server_configs") ->
)},
{server,
mk(
emqx_schema:ip_port(),
emqx_schema:host_port(),
#{
required => true,
desc => ?DESC("server")

View File

@ -2,7 +2,7 @@
{application, emqx_dashboard, [
{description, "EMQX Web Dashboard"},
% strict semver, bump manually!
{vsn, "5.0.6"},
{vsn, "5.0.8"},
{modules, []},
{registered, [emqx_dashboard_sup]},
{applications, [kernel, stdlib, mnesia, minirest, emqx]},

View File

@ -235,7 +235,7 @@ authorize(Req) ->
)
end;
{error, _} ->
return_unauthorized(<<"WORNG_USERNAME_OR_PWD">>, <<"Check username/password">>)
return_unauthorized(?WRONG_USERNAME_OR_PWD, <<"Check username/password">>)
end;
{bearer, Token} ->
case emqx_dashboard_admin:verify_token(Token) of

View File

@ -51,7 +51,7 @@ schema("/error_codes") ->
get => #{
security => [],
description => <<"API Error Codes">>,
tags => [<<"Error codes">>],
tags => [<<"Error Codes">>],
responses => #{
200 => hoconsc:array(hoconsc:ref(?MODULE, error_code))
}
@ -63,7 +63,7 @@ schema("/error_codes/:code") ->
get => #{
security => [],
description => <<"API Error Codes">>,
tags => [<<"Error codes">>],
tags => [<<"Error Codes">>],
parameters => [
{code,
hoconsc:mk(hoconsc:enum(emqx_dashboard_error_code:all()), #{

View File

@ -131,12 +131,20 @@ monitor(get, #{query_string := QS, bindings := Bindings}) ->
end.
monitor_current(get, #{bindings := Bindings}) ->
NodeOrCluster = binary_to_atom(maps:get(node, Bindings, <<"all">>), utf8),
RawNode = maps:get(node, Bindings, all),
case emqx_misc:safe_to_existing_atom(RawNode, utf8) of
{ok, NodeOrCluster} ->
case emqx_dashboard_monitor:current_rate(NodeOrCluster) of
{ok, CurrentRate} ->
{200, CurrentRate};
{badrpc, {Node, Reason}} ->
Message = list_to_binary(io_lib:format("Bad node ~p, rpc failed ~p", [Node, Reason])),
Message = list_to_binary(
io_lib:format("Bad node ~p, rpc failed ~p", [Node, Reason])
),
{400, 'BAD_RPC', Message}
end;
{error, _} ->
Message = list_to_binary(io_lib:format("Bad node ~p", [RawNode])),
{400, 'BAD_RPC', Message}
end.

View File

@ -367,11 +367,13 @@ parameters(Params, Module) ->
Required = hocon_schema:field_schema(Type, required),
Default = hocon_schema:field_schema(Type, default),
HoconType = hocon_schema:field_schema(Type, type),
SchemaExtras = hocon_extract_map([enum, default], Type),
Meta = init_meta(Default),
{ParamType, Refs} = hocon_schema_to_spec(HoconType, Module),
Schema = maps:merge(maps:merge(ParamType, Meta), SchemaExtras),
Spec0 = init_prop(
[required | ?DEFAULT_FIELDS],
#{schema => maps:merge(ParamType, Meta), name => Name, in => In},
#{schema => Schema, name => Name, in => In},
Type
),
Spec1 = trans_required(Spec0, Required, In),
@ -384,6 +386,18 @@ parameters(Params, Module) ->
),
{lists:reverse(SpecList), AllRefs}.
hocon_extract_map(Keys, Type) ->
lists:foldl(
fun(K, M) ->
case hocon_schema:field_schema(Type, K) of
undefined -> M;
V -> M#{K => V}
end
end,
#{},
Keys
).
init_meta(undefined) -> #{};
init_meta(Default) -> #{default => Default}.
@ -427,7 +441,7 @@ trans_description(Spec, Hocon) ->
undefined ->
Spec;
Desc ->
Desc1 = binary:replace(Desc, [<<"</br>\n">>, <<"\n">>], <<"</br>">>, [global]),
Desc1 = binary:replace(Desc, [<<"\n">>], <<"<br/>">>, [global]),
Spec#{description => Desc1}
end.
@ -656,6 +670,8 @@ typename_to_spec("file()", _Mod) ->
#{type => string, example => <<"/path/to/file">>};
typename_to_spec("ip_port()", _Mod) ->
#{type => string, example => <<"127.0.0.1:80">>};
typename_to_spec("host_port()", _Mod) ->
#{type => string, example => <<"example.host.domain:80">>};
typename_to_spec("write_syntax()", _Mod) ->
#{
type => string,
@ -663,8 +679,6 @@ typename_to_spec("write_syntax()", _Mod) ->
<<"${topic},clientid=${clientid}", " ", "payload=${payload},",
"${clientid}_int_value=${payload.int_key}i,", "bool=${payload.bool}">>
};
typename_to_spec("ip_ports()", _Mod) ->
#{type => string, example => <<"127.0.0.1:80, 127.0.0.2:80">>};
typename_to_spec("url()", _Mod) ->
#{type => string, example => <<"http://127.0.0.1">>};
typename_to_spec("connect_timeout()", Mod) ->

View File

@ -74,14 +74,6 @@ end_per_suite(_Config) ->
emqx_common_test_helpers:stop_apps([emqx_dashboard, emqx_management]),
mria:stop().
set_special_configs(emqx_management) ->
Listeners = #{http => #{port => 8081}},
Config = #{
listeners => Listeners,
applications => [#{id => "admin", secret => "public"}]
},
emqx_config:put([emqx_management], Config),
ok;
set_special_configs(emqx_dashboard) ->
emqx_dashboard_api_test_helpers:set_default_config(),
ok;

View File

@ -6,7 +6,7 @@
-export([paths/0, api_spec/0, schema/1, fields/1]).
-export([init_per_suite/1, end_per_suite/1]).
-export([t_in_path/1, t_in_query/1, t_in_mix/1, t_without_in/1, t_ref/1, t_public_ref/1]).
-export([t_require/1, t_nullable/1, t_method/1, t_api_spec/1]).
-export([t_require/1, t_query_enum/1, t_nullable/1, t_method/1, t_api_spec/1]).
-export([t_in_path_trans/1, t_in_query_trans/1, t_in_mix_trans/1, t_ref_trans/1]).
-export([t_in_path_trans_error/1, t_in_query_trans_error/1, t_in_mix_trans_error/1]).
-export([all/0, suite/0, groups/0]).
@ -30,6 +30,7 @@ groups() ->
t_in_mix,
t_without_in,
t_require,
t_query_enum,
t_nullable,
t_method,
t_public_ref
@ -226,6 +227,17 @@ t_require(_Config) ->
validate("/required/false", ExpectSpec),
ok.
t_query_enum(_Config) ->
ExpectSpec = [
#{
in => query,
name => userid,
schema => #{type => string, enum => [<<"a">>], default => <<"a">>}
}
],
validate("/query/enum", ExpectSpec),
ok.
t_nullable(_Config) ->
NullableFalse = [
#{
@ -528,6 +540,8 @@ schema("/test/without/in") ->
};
schema("/required/false") ->
to_schema([{'userid', mk(binary(), #{in => query, required => false})}]);
schema("/query/enum") ->
to_schema([{'userid', mk(binary(), #{in => query, enum => [<<"a">>], default => <<"a">>})}]);
schema("/nullable/false") ->
to_schema([{'userid', mk(binary(), #{in => query, required => true})}]);
schema("/nullable/true") ->

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_exhook, [
{description, "EMQX Extension for Hook"},
{vsn, "5.0.4"},
{vsn, "5.0.6"},
{modules, []},
{registered, []},
{mod, {emqx_exhook_app, []}},

Some files were not shown because too many files have changed in this diff Show More