diff --git a/.ci/docker-compose-file/docker-compose.yaml b/.ci/docker-compose-file/docker-compose.yaml index 8db53d562..4a5ef7070 100644 --- a/.ci/docker-compose-file/docker-compose.yaml +++ b/.ci/docker-compose-file/docker-compose.yaml @@ -23,6 +23,7 @@ services: - ./kerberos/krb5.conf:/etc/krb5.conf working_dir: /emqx tty: true + user: "${UID_GID}" networks: emqx_bridge: diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index e69de29bb..38c008b04 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,19 @@ + +Fixes + +**If your build fails** due to your commit message not passing the build checks, please review the guidelines here: https://github.com/emqx/emqx/blob/master/CONTRIBUTING.md. + +## PR Checklist +Please convert it to a draft if any of the following conditions are not met. Reviewers may skip over until all the items are checked: + +- [ ] Added tests for the changes +- [ ] Changed lines covered in coverage report +- [ ] Change log has been added to `changes/` dir +- [ ] For EMQX 4.x: `appup` files updated (execute `scripts/update-appup.sh emqx`) +- [ ] For internal contributor: there is a jira ticket to track this change +- [ ] If there should be document changes, a PR to emqx-docs.git is sent, or a jira ticket is created to follow up +- [ ] In case of non-backward compatible changes, reviewer should check this item as a write-off, and add details in **Backward Compatibility** section + +## Backward Compatibility + +## More information diff --git a/.github/actions/docker-meta/action.yaml b/.github/actions/docker-meta/action.yaml new file mode 100644 index 000000000..13ab21da6 --- /dev/null +++ b/.github/actions/docker-meta/action.yaml @@ -0,0 +1,81 @@ +name: 'Docker meta' +inputs: + profile: + required: true + type: string + registry: + required: true + type: string + arch: + required: true + type: string + otp: + required: true + type: string + elixir: + required: false + type: string + default: '' + builder_base: + required: true + type: string + owner: + required: true + type: string + docker_tags: + required: true + type: string + +outputs: + emqx_name: + description: "EMQX name" + value: ${{ steps.pre-meta.outputs.emqx_name }} + version: + description: "docker image version" + value: ${{ steps.meta.outputs.version }} + tags: + description: "docker image tags" + value: ${{ steps.meta.outputs.tags }} + labels: + description: "docker image labels" + value: ${{ steps.meta.outputs.labels }} + +runs: + using: composite + steps: + - name: prepare for docker/metadata-action + id: pre-meta + shell: bash + run: | + emqx_name=${{ inputs.profile }} + img_suffix=${{ inputs.arch }} + img_labels="org.opencontainers.image.otp.version=${{ inputs.otp }}" + if [ -n "${{ inputs.elixir }}" ]; then + emqx_name="emqx-elixir" + img_suffix="elixir-${{ inputs.arch }}" + img_labels="org.opencontainers.image.elixir.version=${{ inputs.elixir }}\n${img_labels}" + fi + if [ "${{ inputs.profile }}" = "emqx" ]; then + img_labels="org.opencontainers.image.edition=Opensource\n${img_labels}" + fi + if [ "${{ inputs.profile }}" = "emqx-enterprise" ]; then + img_labels="org.opencontainers.image.edition=Enterprise\n${img_labels}" + fi + if [[ "${{ inputs.builder_base }}" =~ "alpine" ]]; then + img_suffix="${img_suffix}-alpine" + fi + echo "emqx_name=${emqx_name}" >> $GITHUB_OUTPUT + echo "img_suffix=${img_suffix}" >> $GITHUB_OUTPUT + echo "img_labels=${img_labels}" >> $GITHUB_OUTPUT + echo "img_name=${{ inputs.registry }}/${{ inputs.owner }}/${{ inputs.profile }}" >> $GITHUB_OUTPUT + - uses: docker/metadata-action@v4 + id: meta + with: + images: + ${{ steps.pre-meta.outputs.img_name }} + flavor: | + suffix=-${{ steps.pre-meta.outputs.img_suffix }} + tags: | + type=raw,value=${{ inputs.docker_tags }} + labels: + ${{ steps.pre-meta.outputs.img_labels }} diff --git a/.github/actions/package-macos/action.yaml b/.github/actions/package-macos/action.yaml index 177fdf6b8..57c7910a8 100644 --- a/.github/actions/package-macos/action.yaml +++ b/.github/actions/package-macos/action.yaml @@ -33,7 +33,7 @@ runs: brew install curl zip unzip kerl coreutils openssl@1.1 echo "/usr/local/opt/bison/bin" >> $GITHUB_PATH echo "/usr/local/bin" >> $GITHUB_PATH - - uses: actions/cache@v2 + - uses: actions/cache@v3 id: cache with: path: ~/.kerl/${{ inputs.otp }} diff --git a/.github/workflows/apps_version_check.yaml b/.github/workflows/apps_version_check.yaml index 94d28dd87..13e26b204 100644 --- a/.github/workflows/apps_version_check.yaml +++ b/.github/workflows/apps_version_check.yaml @@ -7,7 +7,7 @@ jobs: runs-on: ubuntu-20.04 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: fetch-depth: 0 - name: Check apps version diff --git a/.github/workflows/build_and_push_docker_images.yaml b/.github/workflows/build_and_push_docker_images.yaml index 5fdfd1416..565fe5147 100644 --- a/.github/workflows/build_and_push_docker_images.yaml +++ b/.github/workflows/build_and_push_docker_images.yaml @@ -20,7 +20,7 @@ jobs: prepare: runs-on: ubuntu-20.04 # prepare source with any OTP version, no need for a matrix - container: "ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-ubuntu20.04" + container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04" outputs: BUILD_PROFILE: ${{ steps.get_profile.outputs.BUILD_PROFILE }} @@ -29,7 +29,7 @@ jobs: DOCKER_TAG_VERSION: ${{ steps.get_profile.outputs.DOCKER_TAG_VERSION }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: ref: ${{ github.event.inputs.branch_or_tag }} # when input is not given, the event tag is used path: source @@ -46,7 +46,6 @@ jobs: else docker_latest=false fi - echo "::set-output name=IS_DOCKER_LATEST::${docker_latest}" if git describe --tags --match "[v|e]*" --exact; then echo "This is an exact git tag, will publish images" is_exact='true' @@ -54,7 +53,6 @@ jobs: echo "This is NOT an exact git tag, will not publish images" is_exact='false' fi - echo "::set-output name=IS_EXACT_TAG::${is_exact}" case $tag in refs/tags/v*) PROFILE='emqx' @@ -78,94 +76,69 @@ jobs: esac ;; esac - echo "::set-output name=BUILD_PROFILE::$PROFILE" VSN="$(./pkg-vsn.sh "$PROFILE")" echo "Building $PROFILE image with tag $VSN (latest=$docker_latest)" - echo "::set-output name=DOCKER_TAG_VERSION::$VSN" + echo "IS_DOCKER_LATEST=$docker_latest" >> $GITHUB_OUTPUT + echo "IS_EXACT_TAG=$is_exact" >> $GITHUB_OUTPUT + echo "BUILD_PROFILE=$PROFILE" >> $GITHUB_OUTPUT + echo "DOCKER_TAG_VERSION=$VSN" >> $GITHUB_OUTPUT - name: get_all_deps run: | make -C source deps-all zip -ryq source.zip source/* source/.[^.]* - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 with: name: source path: source.zip docker: - runs-on: ${{ matrix.build_machine }} + runs-on: ${{ matrix.arch[1] }} needs: prepare strategy: fail-fast: false matrix: arch: - - amd64 - - arm64 + - [amd64, ubuntu-20.04] + - [arm64, aws-arm64] profile: - ${{ needs.prepare.outputs.BUILD_PROFILE }} - build_elixir: - - no_elixir registry: - 'docker.io' - 'public.ecr.aws' os: - [alpine3.15.1, "alpine:3.15.1", "deploy/docker/Dockerfile.alpine"] - [debian11, "debian:11-slim", "deploy/docker/Dockerfile"] - # NOTE: for docker, only support latest otp and elixir - # versions, not a matrix + # NOTE: 'otp' and 'elixir' are to configure emqx-builder image + # only support latest otp and elixir, not a matrix otp: - - 24.2.1-1 # update to latest + - 24.3.4.2-1 # update to latest elixir: - 1.13.4 # update to latest - build_machine: - - aws-arm64 - - ubuntu-20.04 - exclude: + exclude: # TODO: publish enterprise to ecr too? - registry: 'public.ecr.aws' profile: emqx-enterprise - - arch: arm64 - build_machine: ubuntu-20.04 - - arch: amd64 - build_machine: aws-arm64 - include: - - arch: amd64 - profile: emqx - build_elixir: with_elixir - registry: 'docker.io' - os: [debian11, "debian:11-slim", "deploy/docker/Dockerfile"] - otp: 24.2.1-1 - elixir: 1.13.4 - build_machine: ubuntu-20.04 - - arch: arm64 - profile: emqx - build_elixir: with_elixir - registry: 'docker.io' - os: [debian11, "debian:11-slim", "deploy/docker/Dockerfile"] - otp: 24.2.1-1 - elixir: 1.13.4 - build_machine: aws-arm64 - steps: - uses: AutoModality/action-clean@v1 - if: matrix.build_machine == 'aws-arm64' - - uses: actions/download-artifact@v2 + if: matrix.arch[1] == 'aws-arm64' + - uses: actions/download-artifact@v3 with: name: source path: . - name: unzip source code run: unzip -q source.zip - - uses: docker/setup-buildx-action@v1 + - uses: docker/setup-buildx-action@v2 - name: Login for docker. - uses: docker/login-action@v1 + uses: docker/login-action@v2 if: matrix.registry == 'docker.io' with: username: ${{ secrets.DOCKER_HUB_USER }} password: ${{ secrets.DOCKER_HUB_TOKEN }} - name: Login for AWS ECR - uses: docker/login-action@v1 + uses: docker/login-action@v2 if: matrix.registry == 'public.ecr.aws' with: registry: public.ecr.aws @@ -173,113 +146,129 @@ jobs: password: ${{ secrets.AWS_SECRET_ACCESS_KEY }} ecr: true - - name: prepare for docker-action-parms - id: pre-meta - run: | - emqx_name=${{ matrix.profile }} - img_suffix=${{ matrix.arch }} - img_labels="org.opencontainers.image.otp.version=${{ matrix.otp }}" - - if [ ${{ matrix.build_elixir }} = "with_elixir" ]; then - emqx_name="emqx-elixir" - img_suffix="elixir-${{ matrix.arch }}" - img_labels="org.opencontainers.image.elixir.version=${{ matrix.elixir }}\n${img_labels}" - fi - - if [ ${{ matrix.profile }} = "emqx" ]; then - img_labels="org.opencontainers.image.edition=Opensource\n${img_labels}" - fi - - if [ ${{ matrix.profile }} = "emqx-enterprise" ]; then - img_labels="org.opencontainers.image.edition=Enterprise\n${img_labels}" - fi - - if [[ ${{ matrix.os[0] }} =~ "alpine" ]]; then - img_suffix="${img_suffix}-alpine" - fi - - echo "::set-output name=emqx_name::${emqx_name}" - echo "::set-output name=img_suffix::${img_suffix}" - echo "::set-output name=img_labels::${img_labels}" - - # NOTE, Pls make sure this is identical as the one in job 'docker-push-multi-arch-manifest' - - uses: docker/metadata-action@v3 + - uses: ./source/.github/actions/docker-meta id: meta with: - images: ${{ matrix.registry }}/${{ github.repository_owner }}/${{ matrix.profile }} - flavor: | - suffix=-${{ steps.pre-meta.outputs.img_suffix }} - tags: | - type=raw,value=${{ needs.prepare.outputs.DOCKER_TAG_VERSION }} - labels: - ${{ steps.pre-meta.outputs.img_labels }} + profile: ${{ matrix.profile }} + registry: ${{ matrix.registry }} + arch: ${{ matrix.arch[0] }} + otp: ${{ matrix.otp }} + builder_base: ${{ matrix.os[0] }} + owner: ${{ github.repository_owner }} + docker_tags: ${{ needs.prepare.outputs.DOCKER_TAG_VERSION }} - - uses: docker/build-push-action@v2 + - uses: docker/build-push-action@v3 with: push: ${{ needs.prepare.outputs.IS_EXACT_TAG }} pull: true no-cache: true - platforms: linux/${{ matrix.arch }} + platforms: linux/${{ matrix.arch[0] }} tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} build-args: | - BUILD_FROM=ghcr.io/emqx/emqx-builder/5.0-17:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }} + BUILD_FROM=ghcr.io/emqx/emqx-builder/5.0-18:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }} RUN_FROM=${{ matrix.os[1] }} - EMQX_NAME=${{ steps.pre-meta.outputs.emqx_name }} + EMQX_NAME=${{ steps.meta.outputs.emqx_name }} + file: source/${{ matrix.os[2] }} + context: source + + docker-elixir: + runs-on: ${{ matrix.arch[1] }} + needs: prepare + # do not build elixir images for ee for now + if: needs.prepare.outputs.BUILD_PROFILE == 'emqx' + + strategy: + fail-fast: false + matrix: + arch: + - [amd64, ubuntu-20.04] + - [arm64, aws-arm64] + profile: + - ${{ needs.prepare.outputs.BUILD_PROFILE }} + registry: + - 'docker.io' + os: + - [debian11, "debian:11-slim", "deploy/docker/Dockerfile"] + otp: + - 24.3.4.2-1 # update to latest + elixir: + - 1.13.4 # update to latest + + steps: + - uses: AutoModality/action-clean@v1 + if: matrix.arch[1] == 'aws-arm64' + - uses: actions/download-artifact@v3 + with: + name: source + path: . + - name: unzip source code + run: unzip -q source.zip + + - uses: docker/setup-buildx-action@v2 + + - name: Login for docker. + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_HUB_USER }} + password: ${{ secrets.DOCKER_HUB_TOKEN }} + + - uses: ./source/.github/actions/docker-meta + id: meta + with: + profile: ${{ matrix.profile }} + registry: ${{ matrix.registry }} + arch: ${{ matrix.arch[0] }} + otp: ${{ matrix.otp }} + elixir: ${{ matrix.elixir }} + builder_base: ${{ matrix.os[0] }} + owner: ${{ github.repository_owner }} + docker_tags: ${{ needs.prepare.outputs.DOCKER_TAG_VERSION }} + + - uses: docker/build-push-action@v3 + with: + push: ${{ needs.prepare.outputs.IS_EXACT_TAG }} + pull: true + no-cache: true + platforms: linux/${{ matrix.arch[0] }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + build-args: | + BUILD_FROM=ghcr.io/emqx/emqx-builder/5.0-18:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }} + RUN_FROM=${{ matrix.os[1] }} + EMQX_NAME=${{ steps.meta.outputs.emqx_name }} file: source/${{ matrix.os[2] }} context: source docker-push-multi-arch-manifest: # note, we only run on amd64 - if: needs.prepare.outputs.IS_EXACT_TAG == 'true' + if: needs.prepare.outputs.IS_EXACT_TAG needs: - prepare - docker - runs-on: ubuntu-latest + runs-on: ${{ matrix.arch[1] }} strategy: fail-fast: false matrix: + arch: + - [amd64, ubuntu-20.04] profile: - ${{ needs.prepare.outputs.BUILD_PROFILE }} - build_elixir: - - no_elixir os: - [alpine3.15.1, "alpine:3.15.1", "deploy/docker/Dockerfile.alpine"] - [debian11, "debian:11-slim", "deploy/docker/Dockerfile"] - # NOTE: for docker, only support latest otp version, not a matrix + # NOTE: only support latest otp version, not a matrix otp: - - 24.2.1-1 # update to latest - # - elixir: - - 1.13.4 # update to latest - arch: - - amd64 - #- arm64 - build_machine: - - aws-arm64 - - ubuntu-20.04 + - 24.3.4.2-1 # update to latest registry: - 'docker.io' - 'public.ecr.aws' exclude: - registry: 'public.ecr.aws' profile: emqx-enterprise - - arch: arm64 - build_machine: ubuntu-20.04 - - arch: amd64 - build_machine: aws-arm64 - include: - - arch: amd64 - profile: emqx - build_elixir: with_elixir - os: [debian11, "debian:11-slim", "deploy/docker/Dockerfile"] - otp: 24.2.1-1 - elixir: 1.13.4 - build_machine: ubuntu-20.04 - registry: docker.io steps: - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: name: source path: . @@ -287,13 +276,13 @@ jobs: - name: unzip source code run: unzip -q source.zip - - uses: docker/login-action@v1 + - uses: docker/login-action@v2 if: matrix.registry == 'docker.io' with: username: ${{ secrets.DOCKER_HUB_USER }} password: ${{ secrets.DOCKER_HUB_TOKEN }} - - uses: docker/login-action@v1 + - uses: docker/login-action@v2 if: matrix.registry == 'public.ecr.aws' with: registry: public.ecr.aws @@ -301,54 +290,73 @@ jobs: password: ${{ secrets.AWS_SECRET_ACCESS_KEY }} ecr: true - - name: prepare for docker-action-parms - id: pre-meta - run: | - emqx_name=${{ matrix.profile }} - img_suffix=${{ matrix.arch }} - img_labels="org.opencontainers.image.otp.version=${{ matrix.otp }}" - - if [ ${{ matrix.build_elixir }} = 'with_elixir' ]; then - emqx_name="emqx-elixir" - img_suffix="elixir-${{ matrix.arch }}" - img_labels="org.opencontainers.image.elixir.version=${{ matrix.elixir }}\n$img_labels" - fi - - if [ ${{ matrix.profile }} = "emqx" ]; then - img_labels="org.opencontainers.image.edition=Opensource\n${img_labels}" - fi - - if [ ${{ matrix.profile }} = "emqx-enterprise" ]; then - img_labels="org.opencontainers.image.edition=Enterprise\n${img_labels}" - fi - - if [[ ${{ matrix.os[0] }} =~ "alpine" ]]; then - img_suffix="${img_suffix}-alpine" - fi - - echo "::set-output name=emqx_name::${emqx_name}" - echo "::set-output name=img_suffix::${img_suffix}" - echo "::set-output name=img_labels::${img_labels}" - - # NOTE, Pls make sure this is identical as the one in job 'docker' - - uses: docker/metadata-action@v3 + - uses: ./source/.github/actions/docker-meta id: meta with: - images: ${{ matrix.registry }}/${{ github.repository_owner }}/${{ matrix.profile }} - flavor: | - suffix=-${{ steps.pre-meta.outputs.img_suffix }} - tags: | - type=raw,value=${{ needs.prepare.outputs.DOCKER_TAG_VERSION }} - labels: - ${{ steps.pre-meta.outputs.img_labels }} + profile: ${{ matrix.profile }} + registry: ${{ matrix.registry }} + arch: ${{ matrix.arch[0] }} + otp: ${{ matrix.otp }} + builder_base: ${{ matrix.os[0] }} + owner: ${{ github.repository_owner }} + docker_tags: ${{ needs.prepare.outputs.DOCKER_TAG_VERSION }} - name: update manifest for multiarch image - if: needs.prepare.outputs.IS_EXACT_TAG == 'true' working-directory: source run: | - if [ ${{ matrix.build_elixir }} = 'with_elixir' ]; then - is_latest=false - else - is_latest="${{ needs.prepare.outputs.IS_DOCKER_LATEST }}" - fi + is_latest="${{ needs.prepare.outputs.IS_DOCKER_LATEST }}" scripts/docker-create-push-manifests.sh "${{ steps.meta.outputs.tags }}" "$is_latest" + + docker-elixir-push-multi-arch-manifest: + # note, we only run on amd64 + # do not build enterprise elixir images for now + if: needs.prepare.outputs.IS_EXACT_TAG && needs.prepare.outputs.BUILD_PROFILE == 'emqx' + needs: + - prepare + - docker-elixir + runs-on: ${{ matrix.arch[1] }} + strategy: + fail-fast: false + matrix: + arch: + - [amd64, ubuntu-20.04] + profile: + - ${{ needs.prepare.outputs.BUILD_PROFILE }} + # NOTE: for docker, only support latest otp version, not a matrix + otp: + - 24.3.4.2-1 # update to latest + elixir: + - 1.13.4 # update to latest + registry: + - 'docker.io' + + steps: + - uses: actions/download-artifact@v3 + with: + name: source + path: . + + - name: unzip source code + run: unzip -q source.zip + + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_HUB_USER }} + password: ${{ secrets.DOCKER_HUB_TOKEN }} + + - uses: ./source/.github/actions/docker-meta + id: meta + with: + profile: ${{ matrix.profile }} + registry: ${{ matrix.registry }} + arch: ${{ matrix.arch[0] }} + otp: ${{ matrix.otp }} + elixir: ${{ matrix.elixir }} + builder_base: ${{ matrix.os[0] }} + owner: ${{ github.repository_owner }} + docker_tags: ${{ needs.prepare.outputs.DOCKER_TAG_VERSION }} + + - name: update manifest for multiarch image + working-directory: source + run: | + scripts/docker-create-push-manifests.sh "${{ steps.meta.outputs.tags }}" false diff --git a/.github/workflows/build_packages.yaml b/.github/workflows/build_packages.yaml index 9c7fae257..94eb64caa 100644 --- a/.github/workflows/build_packages.yaml +++ b/.github/workflows/build_packages.yaml @@ -23,13 +23,13 @@ on: jobs: prepare: runs-on: ubuntu-20.04 - container: ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-ubuntu20.04 + container: ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04 outputs: BUILD_PROFILE: ${{ steps.get_profile.outputs.BUILD_PROFILE }} IS_EXACT_TAG: ${{ steps.get_profile.outputs.IS_EXACT_TAG }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: ref: ${{ github.event.inputs.branch_or_tag }} # when input is not given, the event tag is used path: source @@ -79,7 +79,7 @@ jobs: run: | make -C source deps-all zip -ryq source.zip source/* source/.[^.]* - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 with: name: source path: source.zip @@ -95,13 +95,13 @@ jobs: otp: - 24.2.1 steps: - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: name: source path: . - name: unzip source code run: Expand-Archive -Path source.zip -DestinationPath ./ - - uses: ilammy/msvc-dev-cmd@v1 + - uses: ilammy/msvc-dev-cmd@v1.12.0 - uses: erlef/setup-beam@v1 with: otp-version: ${{ matrix.otp }} @@ -127,10 +127,10 @@ jobs: echo "EMQX installed" ./_build/${{ matrix.profile }}/rel/emqx/bin/emqx uninstall echo "EMQX uninstalled" - - uses: actions/upload-artifact@v1 + - uses: actions/upload-artifact@v3 with: name: ${{ matrix.profile }}-windows - path: source/_packages/${{ matrix.profile }}/. + path: source/_packages/${{ matrix.profile }}/ mac: needs: prepare @@ -140,12 +140,12 @@ jobs: profile: - ${{ needs.prepare.outputs.BUILD_PROFILE }} otp: - - 24.2.1-1 + - 24.3.4.2-1 os: - macos-11 runs-on: ${{ matrix.os }} steps: - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: name: source path: . @@ -166,16 +166,16 @@ jobs: apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }} apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }} apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }} - - uses: actions/upload-artifact@v1 + - uses: actions/upload-artifact@v3 with: name: ${{ matrix.profile }}-${{ matrix.otp }} - path: _packages/${{ matrix.profile }}/. + path: _packages/${{ matrix.profile }}/ linux: needs: prepare runs-on: ${{ matrix.build_machine }} container: - image: "ghcr.io/emqx/emqx-builder/5.0-17:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}" + image: "ghcr.io/emqx/emqx-builder/5.0-18:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}" strategy: fail-fast: false @@ -183,7 +183,7 @@ jobs: profile: - ${{ needs.prepare.outputs.BUILD_PROFILE }} otp: - - 24.2.1-1 # we test with OTP 23, but only build package on OTP 24 versions + - 24.3.4.2-1 # we test with OTP 23, but only build package on OTP 24 versions elixir: - 1.13.4 # used to split elixir packages into a separate job, since the @@ -232,14 +232,14 @@ jobs: profile: emqx-enterprise include: - profile: emqx - otp: 24.2.1-1 + otp: 24.3.4.2-1 elixir: 1.13.4 build_elixir: with_elixir arch: amd64 os: ubuntu20.04 build_machine: ubuntu-20.04 - profile: emqx - otp: 24.2.1-1 + otp: 24.3.4.2-1 elixir: 1.13.4 build_elixir: with_elixir arch: amd64 @@ -253,7 +253,7 @@ jobs: steps: - uses: AutoModality/action-clean@v1 if: matrix.build_machine == 'aws-arm64' - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: name: source path: . @@ -290,12 +290,12 @@ jobs: --pkgtype "${PKGTYPE}" \ --arch "${ARCH}" \ --elixir "${IsElixir}" \ - --builder "ghcr.io/emqx/emqx-builder/5.0-17:${ELIXIR}-${OTP}-${SYSTEM}" + --builder "ghcr.io/emqx/emqx-builder/5.0-18:${ELIXIR}-${OTP}-${SYSTEM}" done - - uses: actions/upload-artifact@v1 + - uses: actions/upload-artifact@v3 with: name: ${{ matrix.profile }}-${{ matrix.otp }} - path: source/_packages/${{ matrix.profile }}/. + path: source/_packages/${{ matrix.profile }}/ publish_artifacts: runs-on: ubuntu-20.04 @@ -307,12 +307,12 @@ jobs: profile: - ${{ needs.prepare.outputs.BUILD_PROFILE }} otp: - - 24.2.1-1 + - 24.3.4.2-1 include: - profile: emqx otp: windows # otp version on windows is rather fixed steps: - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: name: ${{ matrix.profile }}-${{ matrix.otp }} path: packages/${{ matrix.profile }} @@ -320,7 +320,7 @@ jobs: run: sudo apt-get update && sudo apt install -y dos2unix - name: get packages run: | - DEFAULT_BEAM_PLATFORM='otp24.2.1-1' + DEFAULT_BEAM_PLATFORM='otp24.3.4.2-1' set -e -u cd packages/${{ matrix.profile }} # Make a copy of the default OTP version package to a file without OTP version infix @@ -334,7 +334,7 @@ jobs: echo "$(cat $var.sha256) $var" | sha256sum -c || exit 1 done cd - - - uses: aws-actions/configure-aws-credentials@v1 + - uses: aws-actions/configure-aws-credentials@v1-node16 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} diff --git a/.github/workflows/build_slim_packages.yaml b/.github/workflows/build_slim_packages.yaml index cf5df532c..0e3e3d036 100644 --- a/.github/workflows/build_slim_packages.yaml +++ b/.github/workflows/build_slim_packages.yaml @@ -32,18 +32,18 @@ jobs: - emqx - emqx-enterprise otp: - - 24.2.1-1 + - 24.3.4.2-1 elixir: - 1.13.4 os: - ubuntu20.04 - el8 - container: "ghcr.io/emqx/emqx-builder/5.0-17:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}" + container: "ghcr.io/emqx/emqx-builder/5.0-18:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}" steps: - uses: AutoModality/action-clean@v1 - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: fetch-depth: 0 - name: prepare @@ -73,11 +73,11 @@ jobs: run: | make ${EMQX_NAME}-elixir-pkg ./scripts/pkg-tests.sh ${EMQX_NAME}-elixir-pkg - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 with: name: ${{ matrix.profile}}-${{ matrix.otp }}-${{ matrix.os }} path: _packages/${{ matrix.profile}}/* - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 with: name: "${{ matrix.profile }}_schema_dump" path: | @@ -94,8 +94,8 @@ jobs: otp: - 24.2.1 steps: - - uses: actions/checkout@v2 - - uses: ilammy/msvc-dev-cmd@v1 + - uses: actions/checkout@v3 + - uses: ilammy/msvc-dev-cmd@v1.12.0 - uses: erlef/setup-beam@v1 with: otp-version: ${{ matrix.otp }} @@ -119,7 +119,7 @@ jobs: echo "EMQX installed" ./_build/${{ matrix.profile }}/rel/emqx/bin/emqx uninstall echo "EMQX uninstalled" - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 with: name: windows path: _packages/${{ matrix.profile}}/* @@ -132,14 +132,14 @@ jobs: - emqx - emqx-enterprise otp: - - 24.2.1-1 + - 24.3.4.2-1 os: - macos-11 runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: prepare run: | echo "EMQX_NAME=${{ matrix.profile }}" >> $GITHUB_ENV @@ -153,7 +153,7 @@ jobs: apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }} apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }} apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }} - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 with: name: macos path: _packages/**/* @@ -167,7 +167,7 @@ jobs: - emqx-enterprise runs-on: aws-amd64 steps: - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 name: Download schema dump with: name: "${{ matrix.profile }}_schema_dump" diff --git a/.github/workflows/check_deps_integrity.yaml b/.github/workflows/check_deps_integrity.yaml index 4a6c31b5e..c5c509f0c 100644 --- a/.github/workflows/check_deps_integrity.yaml +++ b/.github/workflows/check_deps_integrity.yaml @@ -5,9 +5,9 @@ on: [pull_request, push] jobs: check_deps_integrity: runs-on: ubuntu-20.04 - container: ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-ubuntu20.04 + container: ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Run check-deps-integrity.escript run: ./scripts/check-deps-integrity.escript diff --git a/.github/workflows/code_style_check.yaml b/.github/workflows/code_style_check.yaml index 5fbf91236..bc15d696f 100644 --- a/.github/workflows/code_style_check.yaml +++ b/.github/workflows/code_style_check.yaml @@ -5,9 +5,9 @@ on: [pull_request] jobs: code_style_check: runs-on: ubuntu-20.04 - container: "ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-ubuntu20.04" + container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04" steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: fetch-depth: 1000 - name: Work around https://github.com/actions/checkout/issues/766 diff --git a/.github/workflows/elixir_apps_check.yaml b/.github/workflows/elixir_apps_check.yaml index 440c91545..8dc9e54cd 100644 --- a/.github/workflows/elixir_apps_check.yaml +++ b/.github/workflows/elixir_apps_check.yaml @@ -8,7 +8,7 @@ jobs: elixir_apps_check: runs-on: ubuntu-latest # just use the latest builder - container: "ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-ubuntu20.04" + container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04" strategy: fail-fast: false @@ -23,7 +23,7 @@ jobs: - name: fix_git_permission run: git config --global --add safe.directory '/__w/emqx/emqx' - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: fetch-depth: 0 - name: ensure rebar diff --git a/.github/workflows/elixir_deps_check.yaml b/.github/workflows/elixir_deps_check.yaml index 312278caa..210eda570 100644 --- a/.github/workflows/elixir_deps_check.yaml +++ b/.github/workflows/elixir_deps_check.yaml @@ -7,11 +7,11 @@ on: [pull_request, push] jobs: elixir_deps_check: runs-on: ubuntu-20.04 - container: ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-ubuntu20.04 + container: ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04 steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: ensure rebar run: ./scripts/ensure-rebar3.sh - name: Work around https://github.com/actions/checkout/issues/766 diff --git a/.github/workflows/elixir_release.yml b/.github/workflows/elixir_release.yml index b703f2fde..7301073a1 100644 --- a/.github/workflows/elixir_release.yml +++ b/.github/workflows/elixir_release.yml @@ -12,22 +12,11 @@ on: jobs: elixir_release_build: runs-on: ubuntu-latest - strategy: - matrix: - otp: - - 24.2.1-1 - elixir: - - 1.13.4 - os: - - ubuntu20.04 - profile: - - emqx - - emqx-enterprise - container: ghcr.io/emqx/emqx-builder/5.0-17:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }} + container: ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04 steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 - name: install tools run: apt update && apt install netcat-openbsd - name: Work around https://github.com/actions/checkout/issues/766 diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index a7abcb244..05dcb62e3 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -10,7 +10,7 @@ jobs: strategy: fail-fast: false steps: - - uses: aws-actions/configure-aws-credentials@v1 + - uses: aws-actions/configure-aws-credentials@v1-node16 with: aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} @@ -32,7 +32,7 @@ jobs: esac aws s3 cp --recursive s3://${{ secrets.AWS_S3_BUCKET }}/$s3dir/${{ github.ref_name }} packages cd packages - DEFAULT_BEAM_PLATFORM='otp24.2.1-1' + DEFAULT_BEAM_PLATFORM='otp24.3.4.2-1' # all packages including full-name and default-name are uploaded to s3 # but we only upload default-name packages (and elixir) as github artifacts # so we rename (overwrite) non-default packages before uploading @@ -41,7 +41,7 @@ jobs: echo "$fname -> $default_fname" mv -f "$fname" "$default_fname" done < <(find . -maxdepth 1 -type f | grep -E "emqx(-enterprise)?-5\.[0-9]+\.[0-9]+.*-${DEFAULT_BEAM_PLATFORM}" | grep -v elixir) - - uses: alexellis/upload-assets@0.2.2 + - uses: alexellis/upload-assets@0.4.0 env: GITHUB_TOKEN: ${{ github.token }} with: @@ -57,24 +57,6 @@ jobs: -X POST \ -d "{\"repo\":\"emqx/emqx\", \"tag\": \"${{ github.ref_name }}\" }" \ ${{ secrets.EMQX_IO_RELEASE_API }} - - uses: emqx/push-helm-action@v1 - if: github.event_name == 'release' && startsWith(github.ref_name, 'v') - with: - charts_dir: "${{ github.workspace }}/deploy/charts/emqx" - version: ${{ github.ref_name }} - aws_access_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }} - aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - aws_region: "us-west-2" - aws_bucket_name: "repos-emqx-io" - - uses: emqx/push-helm-action@v1 - if: github.event_name == 'release' && startsWith(github.ref_name, 'e') - with: - charts_dir: "${{ github.workspace }}/deploy/charts/emqx-enterprise" - version: ${{ github.ref_name }} - aws_access_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }} - aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - aws_region: "us-west-2" - aws_bucket_name: "repos-emqx-io" - name: update homebrew packages if: github.event_name == 'release' run: | @@ -96,3 +78,31 @@ jobs: -d "{\"ref\":\"v1.0.4\",\"inputs\":{\"version\": \"${{ github.ref_name }}\"}}" \ "https://api.github.com/repos/emqx/emqx-ci-helper/actions/workflows/update_emqx_homebrew.yaml/dispatches" fi + + upload-helm: + runs-on: ubuntu-20.04 + if: github.event_name == 'release' + strategy: + fail-fast: false + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ github.ref }} + - uses: emqx/push-helm-action@v1 + if: startsWith(github.ref_name, 'v') + with: + charts_dir: "${{ github.workspace }}/deploy/charts/emqx" + version: ${{ github.ref_name }} + aws_access_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws_region: "us-west-2" + aws_bucket_name: "repos-emqx-io" + - uses: emqx/push-helm-action@v1 + if: startsWith(github.ref_name, 'e') + with: + charts_dir: "${{ github.workspace }}/deploy/charts/emqx-enterprise" + version: ${{ github.ref_name }} + aws_access_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws_region: "us-west-2" + aws_bucket_name: "repos-emqx-io" diff --git a/.github/workflows/run_emqx_app_tests.yaml b/.github/workflows/run_emqx_app_tests.yaml index b2f13e8be..5b68e3dda 100644 --- a/.github/workflows/run_emqx_app_tests.yaml +++ b/.github/workflows/run_emqx_app_tests.yaml @@ -12,7 +12,7 @@ jobs: strategy: matrix: otp: - - 24.2.1-1 + - 24.3.4.2-1 # no need to use more than 1 version of Elixir, since tests # run using only Erlang code. This is needed just to specify # the base image. @@ -24,14 +24,14 @@ jobs: - amd64 runs-on: aws-amd64 - container: "ghcr.io/emqx/emqx-builder/5.0-17:${{ matrix.elixir}}-${{ matrix.otp }}-${{ matrix.os }}" + container: "ghcr.io/emqx/emqx-builder/5.0-18:${{ matrix.elixir}}-${{ matrix.otp }}-${{ matrix.os }}" defaults: run: shell: bash steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: fetch-depth: 0 - name: run @@ -61,7 +61,7 @@ jobs: ./rebar3 eunit -v ./rebar3 ct -v ./rebar3 proper -d test/props - - uses: actions/upload-artifact@v1 + - uses: actions/upload-artifact@v3 if: failure() with: name: logs diff --git a/.github/workflows/run_fvt_tests.yaml b/.github/workflows/run_fvt_tests.yaml index fbd18f1ce..0464b5e50 100644 --- a/.github/workflows/run_fvt_tests.yaml +++ b/.github/workflows/run_fvt_tests.yaml @@ -16,10 +16,10 @@ jobs: prepare: runs-on: ubuntu-20.04 # prepare source with any OTP version, no need for a matrix - container: ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-alpine3.15.1 + container: ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-alpine3.15.1 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: path: source fetch-depth: 0 @@ -27,7 +27,7 @@ jobs: run: | make -C source deps-all zip -ryq source.zip source/* source/.[^.]* - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 with: name: source path: source.zip @@ -49,7 +49,7 @@ jobs: os: - ["alpine3.15.1", "alpine:3.15.1"] otp: - - 24.2.1-1 + - 24.3.4.2-1 elixir: - 1.13.4 arch: @@ -58,7 +58,7 @@ jobs: - uses: erlef/setup-beam@v1 with: otp-version: "24.2" - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: name: source path: . @@ -68,7 +68,7 @@ jobs: - name: make docker image working-directory: source env: - EMQX_BUILDER: ghcr.io/emqx/emqx-builder/5.0-17:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }} + EMQX_BUILDER: ghcr.io/emqx/emqx-builder/5.0-18:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }} EMQX_RUNNER: ${{ matrix.os[1] }} run: | make ${{ matrix.profile }}-docker @@ -120,7 +120,7 @@ jobs: os: - ["debian11", "debian:11-slim"] otp: - - 24.2.1-1 + - 24.3.4.2-1 elixir: - 1.13.4 arch: @@ -131,7 +131,7 @@ jobs: - uses: erlef/setup-beam@v1 with: otp-version: "24.2" - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: name: source path: . @@ -141,7 +141,7 @@ jobs: - name: make docker image working-directory: source env: - EMQX_BUILDER: ghcr.io/emqx/emqx-builder/5.0-17:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }} + EMQX_BUILDER: ghcr.io/emqx/emqx-builder/5.0-18:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }} EMQX_RUNNER: ${{ matrix.os[1] }} run: | make ${{ matrix.profile }}-docker @@ -207,7 +207,7 @@ jobs: echo "waiting ${{ matrix.profile }} cluster scale" sleep 1 done - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: repository: emqx/paho.mqtt.testing ref: develop-4.0 diff --git a/.github/workflows/run_gitlint.yaml b/.github/workflows/run_gitlint.yaml index 01b35461f..9eb03c0b8 100644 --- a/.github/workflows/run_gitlint.yaml +++ b/.github/workflows/run_gitlint.yaml @@ -7,7 +7,7 @@ jobs: runs-on: ubuntu-20.04 steps: - name: Checkout source code - uses: actions/checkout@master + uses: actions/checkout@v3 - name: Install gitlint run: | sudo apt-get update diff --git a/.github/workflows/run_jmeter_tests.yaml b/.github/workflows/run_jmeter_tests.yaml index c2b0442ca..c6d819ba2 100644 --- a/.github/workflows/run_jmeter_tests.yaml +++ b/.github/workflows/run_jmeter_tests.yaml @@ -5,7 +5,7 @@ on: tags: - "v5.*" pull_request: - branchs: + branches: - "master" jobs: @@ -23,11 +23,11 @@ jobs: JMETER_VERSION: 5.4.3 run: | wget --no-verbose --no-check-certificate -O /tmp/apache-jmeter.tgz https://downloads.apache.org/jmeter/binaries/apache-jmeter-$JMETER_VERSION.tgz - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 with: name: apache-jmeter.tgz path: /tmp/apache-jmeter.tgz - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: zip emqx docker image id: build_docker if: endsWith(github.repository, 'emqx') @@ -38,7 +38,7 @@ jobs: VSN="$(./pkg-vsn.sh $PROFILE)" echo "::set-output name=version::${VSN}" docker save -o emqx.tar emqx/emqx:${VSN} - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 with: name: emqx.tar path: ./emqx.tar @@ -60,8 +60,8 @@ jobs: - uses: erlef/setup-beam@v1 with: otp-version: "24.2" - - uses: actions/checkout@v2 - - uses: actions/download-artifact@v2 + - uses: actions/checkout@v3 + - uses: actions/download-artifact@v3 with: name: emqx.tar path: /tmp @@ -89,17 +89,19 @@ jobs: done docker ps -a echo HAPROXY_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' haproxy) >> $GITHUB_ENV - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: repository: emqx/emqx-fvt ref: broker-autotest path: scripts - - uses: actions/setup-java@v1 + - uses: actions/setup-java@v3 with: java-version: '8.0.282' # The JDK version to make available on the path. java-package: jdk # (jre, jdk, or jdk+fx) - defaults to jdk architecture: x64 # (x64 or x86) - defaults to x64 - - uses: actions/download-artifact@v2 + # https://github.com/actions/setup-java/blob/main/docs/switching-to-v2.md + distribution: 'zulu' + - uses: actions/download-artifact@v3 with: name: apache-jmeter.tgz path: /tmp @@ -127,7 +129,7 @@ jobs: echo "check logs filed" exit 1 fi - - uses: actions/upload-artifact@v1 + - uses: actions/upload-artifact@v3 if: always() with: name: jmeter_logs @@ -154,8 +156,8 @@ jobs: - uses: erlef/setup-beam@v1 with: otp-version: "24.2" - - uses: actions/checkout@v2 - - uses: actions/download-artifact@v2 + - uses: actions/checkout@v3 + - uses: actions/download-artifact@v3 with: name: emqx.tar path: /tmp @@ -186,17 +188,19 @@ jobs: docker ps -a echo HAPROXY_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' haproxy) >> $GITHUB_ENV echo PGSQL_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' pgsql-tls) >> $GITHUB_ENV - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: repository: emqx/emqx-fvt ref: broker-autotest path: scripts - - uses: actions/setup-java@v1 + - uses: actions/setup-java@v3 with: java-version: '8.0.282' # The JDK version to make available on the path. java-package: jdk # (jre, jdk, or jdk+fx) - defaults to jdk architecture: x64 # (x64 or x86) - defaults to x64 - - uses: actions/download-artifact@v2 + # https://github.com/actions/setup-java/blob/main/docs/switching-to-v2.md + distribution: 'zulu' + - uses: actions/download-artifact@v3 with: name: apache-jmeter.tgz path: /tmp @@ -234,7 +238,7 @@ jobs: echo "check logs filed" exit 1 fi - - uses: actions/upload-artifact@v1 + - uses: actions/upload-artifact@v3 if: always() with: name: jmeter_logs @@ -258,8 +262,8 @@ jobs: - uses: erlef/setup-beam@v1 with: otp-version: "24.2" - - uses: actions/checkout@v2 - - uses: actions/download-artifact@v2 + - uses: actions/checkout@v3 + - uses: actions/download-artifact@v3 with: name: emqx.tar path: /tmp @@ -290,17 +294,19 @@ jobs: docker ps -a echo HAPROXY_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' haproxy) >> $GITHUB_ENV echo MYSQL_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' mysql-tls) >> $GITHUB_ENV - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: repository: emqx/emqx-fvt ref: broker-autotest path: scripts - - uses: actions/setup-java@v1 + - uses: actions/setup-java@v3 with: java-version: '8.0.282' # The JDK version to make available on the path. java-package: jdk # (jre, jdk, or jdk+fx) - defaults to jdk architecture: x64 # (x64 or x86) - defaults to x64 - - uses: actions/download-artifact@v2 + # https://github.com/actions/setup-java/blob/main/docs/switching-to-v2.md + distribution: 'zulu' + - uses: actions/download-artifact@v3 with: name: apache-jmeter.tgz path: /tmp @@ -338,7 +344,7 @@ jobs: echo "check logs filed" exit 1 fi - - uses: actions/upload-artifact@v1 + - uses: actions/upload-artifact@v3 if: always() with: name: jmeter_logs @@ -358,8 +364,8 @@ jobs: - uses: erlef/setup-beam@v1 with: otp-version: "24.2" - - uses: actions/checkout@v2 - - uses: actions/download-artifact@v2 + - uses: actions/checkout@v3 + - uses: actions/download-artifact@v3 with: name: emqx.tar path: /tmp @@ -387,7 +393,7 @@ jobs: done docker ps -a echo HAPROXY_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' haproxy) >> $GITHUB_ENV - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: repository: emqx/emqx-fvt ref: broker-autotest @@ -400,12 +406,14 @@ jobs: cd target docker run --name jwks_server --network emqx_bridge --ip 172.100.239.88 -d -v $(pwd)/jwkserver-0.0.1.jar:/jwks_server/jwkserver-0.0.1.jar --workdir /jwks_server openjdk:8-jdk bash \ -c "java -jar jwkserver-0.0.1.jar" - - uses: actions/setup-java@v1 + - uses: actions/setup-java@v3 with: java-version: '8.0.282' # The JDK version to make available on the path. java-package: jdk # (jre, jdk, or jdk+fx) - defaults to jdk architecture: x64 # (x64 or x86) - defaults to x64 - - uses: actions/download-artifact@v2 + # https://github.com/actions/setup-java/blob/main/docs/switching-to-v2.md + distribution: 'zulu' + - uses: actions/download-artifact@v3 with: name: apache-jmeter.tgz path: /tmp @@ -434,7 +442,7 @@ jobs: echo "check logs filed" exit 1 fi - - uses: actions/upload-artifact@v1 + - uses: actions/upload-artifact@v3 if: always() with: name: jmeter_logs @@ -455,8 +463,8 @@ jobs: - uses: erlef/setup-beam@v1 with: otp-version: "24.2" - - uses: actions/checkout@v2 - - uses: actions/download-artifact@v2 + - uses: actions/checkout@v3 + - uses: actions/download-artifact@v3 with: name: emqx.tar path: /tmp @@ -485,17 +493,19 @@ jobs: done docker ps -a echo HAPROXY_IP=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' haproxy) >> $GITHUB_ENV - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: repository: emqx/emqx-fvt ref: broker-autotest path: scripts - - uses: actions/setup-java@v1 + - uses: actions/setup-java@v3 with: java-version: '8.0.282' # The JDK version to make available on the path. java-package: jdk # (jre, jdk, or jdk+fx) - defaults to jdk architecture: x64 # (x64 or x86) - defaults to x64 - - uses: actions/download-artifact@v2 + # https://github.com/actions/setup-java/blob/main/docs/switching-to-v2.md + distribution: 'zulu' + - uses: actions/download-artifact@v3 with: name: apache-jmeter.tgz path: /tmp @@ -524,7 +534,7 @@ jobs: echo "check logs filed" exit 1 fi - - uses: actions/upload-artifact@v1 + - uses: actions/upload-artifact@v3 if: always() with: name: jmeter_logs @@ -534,7 +544,7 @@ jobs: runs-on: ubuntu-latest needs: [advanced_feat,pgsql_authn_authz,JWT_authn,mysql_authn_authz,built_in_database_authn_authz] steps: - - uses: geekyeggo/delete-artifact@v1 + - uses: geekyeggo/delete-artifact@v2 with: name: emqx.tar diff --git a/.github/workflows/run_relup_tests.yaml b/.github/workflows/run_relup_tests.yaml index 7f33d0a31..ea0dff9e9 100644 --- a/.github/workflows/run_relup_tests.yaml +++ b/.github/workflows/run_relup_tests.yaml @@ -16,7 +16,7 @@ on: jobs: relup_test_plan: runs-on: ubuntu-20.04 - container: "ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-ubuntu20.04" + container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04" outputs: CUR_EE_VSN: ${{ steps.find-versions.outputs.CUR_EE_VSN }} OLD_VERSIONS: ${{ steps.find-versions.outputs.OLD_VERSIONS }} @@ -24,7 +24,7 @@ jobs: run: shell: bash steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 name: Checkout with: path: emqx @@ -45,7 +45,7 @@ jobs: cd emqx make emqx-tgz make emqx-enterprise-tgz - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 name: Upload built emqx and test scenario with: name: emqx_built @@ -75,7 +75,7 @@ jobs: - uses: erlef/setup-beam@v1 with: otp-version: "24.2" - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: repository: hawk/lux ref: lux-2.8.1 @@ -88,7 +88,7 @@ jobs: ./configure make echo "$(pwd)/bin" >> $GITHUB_PATH - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 name: Download built emqx and test scenario with: name: emqx_built @@ -114,7 +114,7 @@ jobs: docker logs node2.emqx.io | tee lux_logs/emqx2.log exit 1 fi - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 name: Save debug data if: failure() with: diff --git a/.github/workflows/run_test_cases.yaml b/.github/workflows/run_test_cases.yaml index 540383ed6..03030908c 100644 --- a/.github/workflows/run_test_cases.yaml +++ b/.github/workflows/run_test_cases.yaml @@ -17,12 +17,12 @@ jobs: prepare: runs-on: ubuntu-20.04 # prepare source with any OTP version, no need for a matrix - container: "ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-ubuntu20.04" + container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04" outputs: fast_ct_apps: ${{ steps.run_find_apps.outputs.fast_ct_apps }} docker_ct_apps: ${{ steps.run_find_apps.outputs.docker_ct_apps }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 with: path: source fetch-depth: 0 @@ -43,7 +43,7 @@ jobs: ./rebar3 as test compile cd .. zip -ryq source.zip source/* source/.[^.]* - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 with: name: source path: source.zip @@ -60,11 +60,11 @@ jobs: defaults: run: shell: bash - container: "ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-ubuntu20.04" + container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04" steps: - uses: AutoModality/action-clean@v1 - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: name: source path: . @@ -86,7 +86,7 @@ jobs: working-directory: source run: make proper - - uses: actions/upload-artifact@v2 + - uses: actions/upload-artifact@v3 with: name: coverdata path: source/_build/test/cover @@ -107,7 +107,7 @@ jobs: steps: - uses: AutoModality/action-clean@v1 - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: name: source path: . @@ -121,12 +121,13 @@ jobs: PGSQL_TAG: 13 REDIS_TAG: 6 run: | + rm _build/default/lib/rocksdb/_build/cmake/CMakeCache.txt ./scripts/ct/run.sh --app ${{ matrix.app_name }} - - uses: actions/upload-artifact@v1 + - uses: actions/upload-artifact@v3 with: name: coverdata path: source/_build/test/cover - - uses: actions/upload-artifact@v1 + - uses: actions/upload-artifact@v3 if: failure() with: name: logs_${{ matrix.otp_release }}-${{ matrix.profile }} @@ -143,14 +144,14 @@ jobs: - emqx-enterprise runs-on: aws-amd64 - container: "ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-ubuntu20.04" + container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04" defaults: run: shell: bash steps: - uses: AutoModality/action-clean@v1 - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: name: source path: . @@ -200,17 +201,17 @@ jobs: - ct - ct_docker runs-on: ubuntu-20.04 - container: "ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-ubuntu20.04" + container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04" steps: - uses: AutoModality/action-clean@v1 - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 with: name: source path: . - name: unzip source code run: unzip -q source.zip - - uses: actions/download-artifact@v2 + - uses: actions/download-artifact@v3 name: download coverdata with: name: coverdata diff --git a/.github/workflows/shellcheck.yaml b/.github/workflows/shellcheck.yaml index 04a92585a..56a6645e1 100644 --- a/.github/workflows/shellcheck.yaml +++ b/.github/workflows/shellcheck.yaml @@ -7,7 +7,7 @@ jobs: runs-on: ubuntu-20.04 steps: - name: Checkout source code - uses: actions/checkout@master + uses: actions/checkout@v3 - name: Install shellcheck run: | sudo apt-get update diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml index 378a695cf..32abe1721 100644 --- a/.github/workflows/stale.yaml +++ b/.github/workflows/stale.yaml @@ -17,7 +17,7 @@ jobs: steps: - name: Close Stale Issues - uses: actions/stale@v4.1.0 + uses: actions/stale@v6 with: days-before-stale: 7 days-before-close: 7 diff --git a/.gitignore b/.gitignore index d8b3806e3..d01c764d0 100644 --- a/.gitignore +++ b/.gitignore @@ -68,3 +68,4 @@ apps/emqx/test/emqx_static_checks_data/master.bpapi # rendered configurations *.conf.rendered lux_logs/ +/.prepare diff --git a/.tool-versions b/.tool-versions index 0b6e665c1..0f7c9b32e 100644 --- a/.tool-versions +++ b/.tool-versions @@ -1,2 +1,2 @@ -erlang 24.2.1-1 +erlang 24.3.4.2-1 elixir 1.13.4-otp-24 diff --git a/CHANGES-5.0.md b/CHANGES-5.0.md deleted file mode 100644 index 20d972096..000000000 --- a/CHANGES-5.0.md +++ /dev/null @@ -1,199 +0,0 @@ -# 5.0.9 - -## Enhancements - -* Add `cert_common_name` and `cert_subject` placeholder support for authz_http and authz_mongo.[#8973](https://github.com/emqx/emqx/pull/8973) - -## Bug fixes - -* Check ACLs for last will testament topic before publishing the message. [#8930](https://github.com/emqx/emqx/pull/8930) -* Fix GET /listeners API crash When some nodes still in initial configuration. [#9002](https://github.com/emqx/emqx/pull/9002) -* Fix empty variable interpolation in authentication and authorization. Placeholders for undefined variables are rendered now as empty strings and do not cause errors anymore. [#8963](https://github.com/emqx/emqx/pull/8963) -* Fix the latency statistics error of the slow subscription module when `stats_type` is `internal` or `response`. [#8986](https://github.com/emqx/emqx/pull/8986) - -# 5.0.8 - -## Bug fixes - -* Fix exhook `client.authorize` never being execauted. [#8780](https://github.com/emqx/emqx/pull/8780) -* Fix JWT plugin don't support non-integer timestamp claims. [#8867](https://github.com/emqx/emqx/pull/8867) -* Avoid publishing will message when client fails to auhtenticate. [#8887](https://github.com/emqx/emqx/pull/8887) -* Speed up dispatching of shared subscription messages in a cluster [#8893](https://github.com/emqx/emqx/pull/8893) -* Fix the extra / prefix when CoAP gateway parsing client topics. [#8658](https://github.com/emqx/emqx/pull/8658) -* Speed up updating the configuration, When some nodes in the cluster are down. [#8857](https://github.com/emqx/emqx/pull/8857) -* Fix delayed publish inaccurate caused by os time change. [#8926](https://github.com/emqx/emqx/pull/8926) -* Fix that EMQX can't start when the retainer is disabled [#8911](https://github.com/emqx/emqx/pull/8911) -* Fix that redis authn will deny the unknown users [#8934](https://github.com/emqx/emqx/pull/8934) -* Fix ExProto UDP client keepalive checking error. - This causes the clients to not expire as long as a new UDP packet arrives [#8866](https://github.com/emqx/emqx/pull/8866) -* Fix that MQTT Bridge message payload could be empty string. [#8949](https://github.com/emqx/emqx/pull/8949) - -## Enhancements - -* Print a warning message when boot with the default (insecure) Erlang cookie. [#8905](https://github.com/emqx/emqx/pull/8905) -* Change the `/gateway` API path to plural form. [#8823](https://github.com/emqx/emqx/pull/8823) -* Don't allow updating config items when they already exist in `local-override.conf`. [#8851](https://github.com/emqx/emqx/pull/8851) -* Remove `node.etc_dir` from emqx.conf, because it is never used. - Also allow user to customize the logging directory [#8892](https://github.com/emqx/emqx/pull/8892) -* Added a new API `POST /listeners` for creating listener. [#8876](https://github.com/emqx/emqx/pull/8876) -* Close ExProto client process immediately if it's keepalive timeouted. [#8866](https://github.com/emqx/emqx/pull/8866) -* Upgrade grpc-erl driver to 0.6.7 to support batch operation in sending stream. [#8866](https://github.com/emqx/emqx/pull/8866) - -# 5.0.7 - -## Bug fixes - -* Remove `will_msg` (not used) field from the client API. [#8721](https://github.com/emqx/emqx/pull/8721) -* Fix `$queue` topic name error in management API return. [#8728](https://github.com/emqx/emqx/pull/8728) -* Fix race condition which may cause `client.connected` and `client.disconnected` out of order. [#8625](https://github.com/emqx/emqx/pull/8625) -* Fix quic listener default idle timeout's type. [#8826](https://github.com/emqx/emqx/pull/8826) - -## Enhancements - -* Do not auto-populate default SSL cipher suites, so that the configs are less bloated. [#8769](https://github.com/emqx/emqx/pull/8769) - -# 5.0.6 - -## Bug fixes - -* Upgrade Dashboard version to fix an issue where the node status was not displayed correctly. [#8771](https://github.com/emqx/emqx/pull/8771) - -# 5.0.5 - -## Bug fixes - -* Allow changing the license type from key to file (and vice-versa). [#8598](https://github.com/emqx/emqx/pull/8598) -* Add back http connector config keys `max_retries` `retry_interval` as deprecated fields [#8672](https://github.com/emqx/emqx/issues/8672) - This caused upgrade failure in 5.0.4, because it would fail to boot on configs created from older version. - -## Enhancements - -* Add `bootstrap_users_file` configuration to add default Dashboard username list, which is only added when EMQX is first started. -* The license is now copied to all nodes in the cluster when it's reloaded. [#8598](https://github.com/emqx/emqx/pull/8598) -* Added a HTTP API to manage licenses. [#8610](https://github.com/emqx/emqx/pull/8610) -* Updated `/nodes` API node_status from `Running/Stopped` to `running/stopped`. [#8642](https://github.com/emqx/emqx/pull/8642) -* Improve handling of placeholder interpolation errors [#8635](https://github.com/emqx/emqx/pull/8635) -* Better logging on unknown object IDs. [#8670](https://github.com/emqx/emqx/pull/8670) -* The bind option support `:1883` style. [#8758](https://github.com/emqx/emqx/pull/8758) - -# 5.0.4 - -## Bug fixes - -* The `data/configs/cluster-override.conf` is cleared to 0KB if `hocon_pp:do/2` failed [commits/71f64251](https://github.com/emqx/emqx/pull/8443/commits/71f642518a683cc91a32fd542aafaac6ef915720) -* Improve the health_check for webhooks. - Prior to this change, the webhook only checks the connectivity of the TCP port using `gen_tcp:connect/2`, so - if it's a HTTPs server, we didn't check if TLS handshake was successful. - [commits/6b45d2ea](https://github.com/emqx/emqx/commit/6b45d2ea9fde6d3b4a5b007f7a8c5a1c573d141e) -* The `created_at` field of rules is missing after emqx restarts. [commits/5fc09e6b](https://github.com/emqx/emqx/commit/5fc09e6b950c340243d7be627a0ce1700691221c) -* The rule engine's jq function now works even when the path to the EMQX install dir contains spaces [jq#35](https://github.com/emqx/jq/pull/35) [#8455](https://github.com/emqx/emqx/pull/8455) -* Avoid applying any ACL checks on superusers [#8452](https://github.com/emqx/emqx/pull/8452) -* Fix statistics related system topic name error -* Fix AuthN JWKS SSL schema. Using schema in `emqx_schema`. [#8458](https://github.com/emqx/emqx/pull/8458) -* `sentinel` field should be required when AuthN/AuthZ Redis using sentinel mode. [#8458](https://github.com/emqx/emqx/pull/8458) -* Fix bad swagger format. [#8517](https://github.com/emqx/emqx/pull/8517) -* Fix `chars_limit` is not working when `formatter` is `json`. [#8518](http://github.com/emqx/emqx/pull/8518) -* Ensuring that exhook dispatches the client events are sequential. [#8530](https://github.com/emqx/emqx/pull/8530) -* Avoid using RocksDB backend for persistent sessions when such backend is unavailable. [#8528](https://github.com/emqx/emqx/pull/8528) -* Fix AuthN `cert_subject` and `cert_common_name` placeholder rendering failure. [#8531](https://github.com/emqx/emqx/pull/8531) -* Support listen on an IPv6 address, e.g: [::1]:1883 or ::1:1883. [#8547](https://github.com/emqx/emqx/pull/8547) -* GET '/rules' support for pagination and fuzzy search. [#8472](https://github.com/emqx/emqx/pull/8472) - **‼️ Note** : The previous API only returns array: `[RuleObj1,RuleObj2]`, after updating, it will become - `{"data": [RuleObj1,RuleObj2], "meta":{"count":2, "limit":100, "page":1}`, - which will carry the paging meta information. -* Fix the issue that webhook leaks TCP connections. [ehttpc#34](https://github.com/emqx/ehttpc/pull/34), [#8580](https://github.com/emqx/emqx/pull/8580) - -## Enhancements - -* Improve the dashboard listener startup log, the listener name is no longer spliced with port information, - and the colon(:) is no longer displayed when IP is not specified. [#8480](https://github.com/emqx/emqx/pull/8480) -* Remove `/configs/listeners` API, use `/listeners/` instead. [#8485](https://github.com/emqx/emqx/pull/8485) -* Optimize performance of builtin database operations in processes with long message queue [#8439](https://github.com/emqx/emqx/pull/8439) -* Improve authentication tracing. [#8554](https://github.com/emqx/emqx/pull/8554) -* Standardize the '/listeners' and `/gateway//listeners` API fields. - It will introduce some incompatible updates, see [#8571](https://github.com/emqx/emqx/pull/8571) -* Add option to perform GC on connection process after TLS/SSL handshake is performed. [#8637](https://github.com/emqx/emqx/pull/8637) - -# 5.0.3 - -## Bug fixes - -* Websocket listener failed to read headers `X-Forwarded-For` and `X-Forwarded-Port` [#8415](https://github.com/emqx/emqx/pull/8415) -* Deleted `cluster_singleton` from MQTT bridge config document. This config is no longer applicable in 5.0 [#8407](https://github.com/emqx/emqx/pull/8407) -* Fix `emqx/emqx:latest` docker image publish to use the Erlang flavor, but not Elixir flavor [#8414](https://github.com/emqx/emqx/pull/8414) -* Changed the `exp` field in JWT auth to be optional rather than required to fix backwards compatability with 4.X releases. [#8425](https://github.com/emqx/emqx/pull/8425) - -## Enhancements - -* Improve the speed of dashboard's HTTP API routing rule generation, which sometimes causes timeout [#8438](https://github.com/emqx/emqx/pull/8438) - -# 5.0.2 - -Announcement: EMQX team has decided to stop supporting relup for opensource edition. -Going forward, it will be an enterprise-only feature. - -Main reason: relup requires carefully crafted upgrade instructions from ALL previous versions. - -For example, 4.3 is now at 4.3.16, we have `4.3.0->4.3.16`, `4.3.1->4.3.16`, ... 16 such upgrade paths in total to maintain. -This had been the biggest obstacle for EMQX team to act agile enough in delivering enhancements and fixes. - -## Enhancements - -## Bug fixes - -* Fixed a typo in `bin/emqx` which affects MacOs release when trying to enable Erlang distribution over TLS [#8398](https://github.com/emqx/emqx/pull/8398) -* Restricted shell was accidentally disabled in 5.0.1, it has been added back. [#8396](https://github.com/emqx/emqx/pull/8396) - -# 5.0.1 - -5.0.1 is built on [Erlang/OTP 24.2.1-1](https://github.com/emqx/otp/tree/OTP-24.2.1-1). Same as 5.0.0. - -5.0.0 (like 4.4.x) had Erlang/OTP version number in the package name. -This is because we wanted to release different flavor packages (on different Elixir/Erlang/OTP platforms). - -However the long package names also causes confusion, as users may not know which to choose if there were more than -one presented at the same time. - -Going forward, (starting from 5.0.1), packages will be released in both default (short) and flavored (long) package names. - -For example: `emqx-5.0.1-otp24.2.1-1-ubuntu20.04-amd64.tar.gz`, -but only the default one is presented to the users: `emqx-5.0.1-ubuntu20.04-amd64.tar.gz`. - -In case anyone wants to try a different flavor package, it can be downlowded from the public s3 bucket, -for example: -https://s3.us-west-2.amazonaws.com/packages.emqx/emqx-ce/v5.0.1/emqx-5.0.1-otp24.2.1-1-ubuntu20.04-arm64.tar.gz - -Exceptions: - -* Windows package is always presented with short name (currently on Erlang/OTP 24.2.1). -* Elixir package name is flavored with both Elixir and Erlang/OTP version numbers, - for example: `emqx-5.0.1-elixir1.13.4-otp24.2.1-1-ubuntu20.04-amd64.tar.gz` - -## Enhancements - -* Removed management API auth for prometheus scraping endpoint /api/v5/prometheus/stats [#8299](https://github.com/emqx/emqx/pull/8299) -* Added more TCP options for exhook (gRPC) connections. [#8317](https://github.com/emqx/emqx/pull/8317) -* HTTP Servers used for authentication and authorization will now indicate the result via the response body. [#8374](https://github.com/emqx/emqx/pull/8374) [#8377](https://github.com/emqx/emqx/pull/8377) -* Bulk subscribe/unsubscribe APIs [#8356](https://github.com/emqx/emqx/pull/8356) -* Added exclusive subscription [#8315](https://github.com/emqx/emqx/pull/8315) -* Provide authentication counter metrics [#8352](https://github.com/emqx/emqx/pull/8352) [#8375](https://github.com/emqx/emqx/pull/8375) -* Do not allow admin user self-deletion [#8286](https://github.com/emqx/emqx/pull/8286) -* After restart, ensure to copy `cluster-override.conf` from the clustered node which has the greatest `tnxid`. [#8333](https://github.com/emqx/emqx/pull/8333) - -## Bug fixes - -* A bug fix ported from 4.x: allow deleting subscriptions from `client.subscribe` hookpoint callback result. [#8304](https://github.com/emqx/emqx/pull/8304) [#8347](https://github.com/emqx/emqx/pull/8377) -* Fixed Erlang distribution over TLS [#8309](https://github.com/emqx/emqx/pull/8309) -* Made possible to override authentication configs from environment variables [#8323](https://github.com/emqx/emqx/pull/8309) -* Made authentication passwords in Mnesia database backward compatible to 4.x, so we can support data migration better. [#8351](https://github.com/emqx/emqx/pull/8351) -* Fix plugins upload for rpm/deb installations [#8379](https://github.com/emqx/emqx/pull/8379) -* Sync data/authz/acl.conf and data/certs from clustered nodes after a new node joins the cluster [#8369](https://github.com/emqx/emqx/pull/8369) -* Ensure auto-retry of failed resources [#8371](https://github.com/emqx/emqx/pull/8371) -* Fix the issue that the count of `packets.connack.auth_error` is inaccurate when the client uses a protocol version below MQTT v5.0 to access [#8178](https://github.com/emqx/emqx/pull/8178) - -## Others - -* Rate limiter interface is hidden so far, it's subject to a UX redesign. -* QUIC library upgraded to 0.0.14. -* Now the default packages will be released withot otp version number in the package name. -* Renamed config exmpale file name in `etc` dir. diff --git a/Dockerfile.ubuntu20.04.runner b/Dockerfile.ubuntu20.04.runner index 124021c89..1bb44a6e9 100644 --- a/Dockerfile.ubuntu20.04.runner +++ b/Dockerfile.ubuntu20.04.runner @@ -27,15 +27,14 @@ VOLUME ["/opt/emqx/log", "/opt/emqx/data"] # emqx will occupy these port: # - 1883 port for MQTT -# - 8081 for mgmt API # - 8083 for WebSocket/HTTP # - 8084 for WSS/HTTPS # - 8883 port for MQTT(SSL) # - 11883 port for internal MQTT/TCP -# - 18083 for dashboard +# - 18083 for dashboard and API # - 4370 default Erlang distrbution port # - 5369 for backplain gen_rpc -EXPOSE 1883 8081 8083 8084 8883 11883 18083 4370 5369 +EXPOSE 1883 8083 8084 8883 11883 18083 4370 5369 ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"] diff --git a/Makefile b/Makefile index 3dd11aec3..63970bbad 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-d export EMQX_DEFAULT_RUNNER = debian:11-slim export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh) export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh) -export EMQX_DASHBOARD_VERSION ?= v1.0.9 +export EMQX_DASHBOARD_VERSION ?= v1.1.1 export EMQX_EE_DASHBOARD_VERSION ?= e1.0.1-beta.5 export EMQX_REL_FORM ?= tgz export QUICER_DOWNLOAD_FROM_RELEASE = 1 @@ -30,12 +30,10 @@ export REBAR_GIT_CLONE_OPTIONS += --depth=1 .PHONY: default default: $(REBAR) $(PROFILE) -.PHONY: prepare -prepare: FORCE +.prepare: @$(SCRIPTS)/git-hooks-init.sh # this is no longer needed since 5.0 but we keep it anyway @$(SCRIPTS)/prepare-build-deps.sh - -FORCE: + @touch .prepare .PHONY: all all: $(REBAR) $(PROFILES) @@ -44,7 +42,23 @@ all: $(REBAR) $(PROFILES) ensure-rebar3: @$(SCRIPTS)/ensure-rebar3.sh -$(REBAR): prepare ensure-rebar3 +$(REBAR): .prepare ensure-rebar3 + +.PHONY: ensure-hex +ensure-hex: + @mix local.hex --if-missing --force + +.PHONY: ensure-mix-rebar3 +ensure-mix-rebar3: $(REBAR) + @mix local.rebar rebar3 $(CURDIR)/rebar3 --if-missing --force + +.PHONY: ensure-mix-rebar +ensure-mix-rebar: $(REBAR) + @mix local.rebar --if-missing --force + +.PHONY: mix-deps-get +mix-deps-get: $(ELIXIR_COMMON_DEPS) + @mix deps.get .PHONY: eunit eunit: $(REBAR) conf-segs diff --git a/README-CN.md b/README-CN.md index 250d3e1c6..ea584c66a 100644 --- a/README-CN.md +++ b/README-CN.md @@ -32,12 +32,6 @@ EMQX 自 2013 年在 GitHub 发布开源版本以来,获得了来自 50 多个 docker run -d --name emqx -p 1883:1883 -p 8083:8083 -p 8084:8084 -p 8883:8883 -p 18083:18083 emqx/emqx:latest ``` -或直接试用 EMQX 企业版(已内置 10 个并发连接的永不过期 License) - -``` -docker run -d --name emqx-ee -p 1883:1883 -p 8081:8081 -p 8083:8083 -p 8084:8084 -p 8883:8883 -p 18083:18083 emqx/emqx-ee:latest -``` - 接下来请参考 [入门指南](https://www.emqx.io/docs/zh/v5.0/getting-started/getting-started.html#启动-emqx) 开启您的 EMQX 之旅。 #### 在 Kubernetes 上运行 EMQX 集群 diff --git a/README-RU.md b/README-RU.md index cd8943795..76b1f0ae0 100644 --- a/README-RU.md +++ b/README-RU.md @@ -28,13 +28,7 @@ #### Установка EMQX с помощью Docker ``` -docker run -d --name emqx -p 1883:1883 -p 8081:8081 -p 8083:8083 -p 8883:8883 -p 8084:8084 -p 18083:18083 emqx/emqx -``` - -Или запустите EMQX Enterprise со встроенной бессрочной лицензией на 10 соединений. - -``` -docker run -d --name emqx-ee -p 1883:1883 -p 8081:8081 -p 8083:8083 -p 8084:8084 -p 8883:8883 -p 18083:18083 emqx/emqx-ee:latest +docker run -d --name emqx -p 1883:1883 -p 8083:8083 -p 8883:8883 -p 8084:8084 -p 18083:18083 emqx/emqx ``` Чтобы ознакомиться с функциональностью EMQX, пожалуйста, следуйте [руководству по началу работы](https://www.emqx.io/docs/en/v5.0/getting-started/getting-started.html#start-emqx). diff --git a/README.md b/README.md index 4461f70c6..9543cf0de 100644 --- a/README.md +++ b/README.md @@ -33,12 +33,6 @@ The simplest way to set up EMQX is to create a managed deployment with EMQX Clou docker run -d --name emqx -p 1883:1883 -p 8083:8083 -p 8084:8084 -p 8883:8883 -p 18083:18083 emqx/emqx:latest ``` -Or install EMQX Enterprise with a built-in license for ten connections that never expire. - -``` -docker run -d --name emqx-ee -p 1883:1883 -p 8081:8081 -p 8083:8083 -p 8084:8084 -p 8883:8883 -p 18083:18083 emqx/emqx-ee:latest -``` - Next, please follow the [getting started guide](https://www.emqx.io/docs/en/v5.0/getting-started/getting-started.html#start-emqx) to tour the EMQX features. #### Run EMQX cluster on kubernetes diff --git a/apps/emqx/i18n/emqx_limiter_i18n.conf b/apps/emqx/i18n/emqx_limiter_i18n.conf index 99ecc9e1e..3657df694 100644 --- a/apps/emqx/i18n/emqx_limiter_i18n.conf +++ b/apps/emqx/i18n/emqx_limiter_i18n.conf @@ -113,7 +113,7 @@ the check/consume will succeed, but it will be forced to wait for a short period burst { desc { - en: """The burst, This value is based on rate.
+ en: """The burst, This value is based on rate.
This value + rate = the maximum limit that can be achieved when limiter burst.""" zh: """突发速率。 突发速率允许短时间内速率超过设置的速率值,突发速率 + 速率 = 当前桶能达到的最大速率值""" @@ -171,7 +171,7 @@ Once the limit is reached, the restricted client will be slow down even be hung en: """The bytes_in limiter. This is used to limit the inbound bytes rate for this EMQX node. Once the limit is reached, the restricted client will be slow down even be hung for a while.""" - zh: """流入字节率控制器. + zh: """流入字节率控制器。 这个是用来控制当前节点上的数据流入的字节率,每条消息将会消耗和其二进制大小等量的令牌,当达到最大速率后,会话将会被限速甚至被强制挂起一小段时间""" } label: { diff --git a/apps/emqx/i18n/emqx_schema_i18n.conf b/apps/emqx/i18n/emqx_schema_i18n.conf index 17e59a4dd..714a08704 100644 --- a/apps/emqx/i18n/emqx_schema_i18n.conf +++ b/apps/emqx/i18n/emqx_schema_i18n.conf @@ -3,10 +3,10 @@ emqx_schema { force_shutdown_enable { desc { en: "Enable `force_shutdown` feature." - zh: "启用 `force_shutdown` 功能" + zh: "启用 `force_shutdown` 功能。" } label { - en: "Enable `force_shutdown` feature." + en: "Enable `force_shutdown` feature" zh: "启用 `force_shutdown` 功能" } } @@ -14,7 +14,7 @@ emqx_schema { force_shutdown_max_message_queue_len { desc { en: "Maximum message queue length." - zh: "消息队列的最大长度" + zh: "消息队列的最大长度。" } label { en: "Maximum mailbox queue length of process." @@ -25,7 +25,7 @@ emqx_schema { force_shutdown_max_heap_size { desc { en: "Total heap size" - zh: "Heap 的总大小" + zh: "Heap 的总大小。" } label { en: "Total heap size" @@ -35,8 +35,8 @@ emqx_schema { overload_protection_enable { desc { - en: "React on system overload or not" - zh: "是否对系统过载做出反应" + en: "React on system overload or not." + zh: "是否对系统过载做出反应。" } label { en: "React on system overload or not" @@ -46,9 +46,8 @@ emqx_schema { overload_protection_backoff_delay { desc { - en: "Some unimportant tasks could be delayed " - "for execution, here set the delays in ms" - zh: "一些不重要的任务可能会延迟执行,以毫秒为单位设置延迟" + en: "When at high load, some unimportant tasks could be delayed for execution, here set the duration in milliseconds precision." + zh: "高负载时,一些不重要的任务可能会延迟执行,在这里设置允许延迟的时间。单位为毫秒。" } label { en: "Delay Time" @@ -58,8 +57,8 @@ emqx_schema { overload_protection_backoff_gc { desc { - en: "Skip forceful GC if necessary" - zh: "如有必要,跳过强制GC" + en: "When at high load, skip forceful GC." + zh: "高负载时,跳过强制 GC。" } label { en: "Skip GC" @@ -69,8 +68,8 @@ emqx_schema { overload_protection_backoff_hibernation { desc { - en: "Skip process hibernation if necessary" - zh: "如有必要,跳过进程休眠" + en: "When at high load, skip process hibernation." + zh: "高负载时,跳过进程休眠。" } label { en: "Skip hibernation" @@ -80,8 +79,8 @@ emqx_schema { overload_protection_backoff_new_conn { desc { - en: "Close new incoming connections if necessary" - zh: "如有必要,关闭新进来的连接" + en: "When at high load, close new incoming connections." + zh: "高负载时,拒绝新进来的客户端连接。" } label { en: "Close new connections" @@ -92,7 +91,7 @@ emqx_schema { conn_congestion_enable_alarm { desc { en: "Enable or disable connection congestion alarm." - zh: "启用或者禁用连接阻塞告警功能" + zh: "启用或者禁用连接阻塞告警功能。" } label { en: "Enable/disable congestion alarm" @@ -102,14 +101,14 @@ emqx_schema { conn_congestion_min_alarm_sustain_duration { desc { - en: "Minimal time before clearing the alarm.\n\n" - "The alarm is cleared only when there's no pending data in\n" - "the queue, and at least `min_alarm_sustain_duration`\n" - "milliseconds passed since the last time we considered the connection \"congested\".\n\n" + en: "Minimal time before clearing the alarm.
" + "The alarm is cleared only when there's no pending data in
" + "the queue, and at least min_alarm_sustain_duration" + "milliseconds passed since the last time we considered the connection 'congested'.
" "This is to avoid clearing and raising the alarm again too often." - zh: "清除警报前的最短时间。\n\n" - "只有当队列中没有挂起的数据,并且连接至少被堵塞了 \"min_alarm_sustain_duration\" 毫秒时,\n" - "报警才会被清除。这是为了避免太频繁地清除和再次发出警报." + zh: "清除警报前的最短时间。
" + "只有当队列中没有挂起的数据,并且连接至少被堵塞了 min_alarm_sustain_duration 毫秒时,
" + "报警才会被清除。这是为了避免太频繁地清除和再次发出警报。" } label { en: "Sustain duration" @@ -120,10 +119,10 @@ emqx_schema { force_gc_enable { desc { en: "Enable forced garbage collection." - zh: "启用强制垃圾回收" + zh: "启用强制垃圾回收。" } label { - en: "Enable forced garbage collection." + en: "Enable forced garbage collection" zh: "启用强制垃圾回收" } } @@ -131,7 +130,7 @@ emqx_schema { force_gc_count { desc { en: "GC the process after this many received messages." - zh: "在进程收到多少消息之后,对此进程执行垃圾回收" + zh: "在进程收到多少消息之后,对此进程执行垃圾回收。" } label { en: "Process GC messages num" @@ -142,7 +141,7 @@ emqx_schema { force_gc_bytes { desc { en: "GC the process after specified number of bytes have passed through." - zh: "在进程处理过多少个字节之后,对此进程执行垃圾回收" + zh: "在进程处理过多少个字节之后,对此进程执行垃圾回收。" } label { en: "Process GC bytes" @@ -190,7 +189,7 @@ emqx_schema { sysmon_vm_long_gc { desc { en: "Enable Long GC monitoring." - zh: "启用长垃圾回收监控" + zh: "启用长垃圾回收监控。" } label { en: "Enable Long GC monitoring." @@ -201,7 +200,7 @@ emqx_schema { sysmon_vm_long_schedule { desc { en: "Enable Long Schedule monitoring." - zh: "启用长调度监控" + zh: "启用长调度监控。" } label { en: "Enable Long Schedule monitoring." @@ -212,7 +211,7 @@ emqx_schema { sysmon_vm_large_heap { desc { en: "Enable Large Heap monitoring." - zh: "启用大 heap 监控" + zh: "启用大 heap 监控。" } label { en: "Enable Large Heap monitoring." @@ -223,7 +222,7 @@ emqx_schema { sysmon_vm_busy_dist_port { desc { en: "Enable Busy Distribution Port monitoring." - zh: "启用分布式端口过忙监控" + zh: "启用分布式端口过忙监控。" } label { en: "Enable Busy Distribution Port monitoring." @@ -234,7 +233,7 @@ emqx_schema { sysmon_vm_busy_port { desc { en: "Enable Busy Port monitoring." - zh: "启用端口过忙监控" + zh: "启用端口过忙监控。" } label { en: "Enable Busy Port monitoring." @@ -316,7 +315,7 @@ emqx_schema { sysmon_top_num_items { desc { en: "The number of top processes per monitoring group" - zh: "每个监视组的顶级进程数" + zh: "每个监视组的顶级进程数。" } label { en: "Top num items" @@ -327,7 +326,7 @@ emqx_schema { sysmon_top_sample_interval { desc { en: "Specifies how often process top should be collected" - zh: "指定应收集进程顶部的频率" + zh: "指定应收集进程顶部的频率。" } label { en: "Top sample interval" @@ -339,7 +338,7 @@ emqx_schema { desc { en: "Stop collecting data when the number of processes\n" "in the VM exceeds this value" - zh: "当VM中的进程数超过此值时,停止收集数据" + zh: "当 VM 中的进程数超过此值时,停止收集数据。" } label { en: "Max procs" @@ -350,7 +349,7 @@ emqx_schema { sysmon_top_db_hostname { desc { en: "Hostname of the PostgreSQL database that collects the data points" - zh: "收集数据点的 PostgreSQL 数据库的主机名" + zh: "收集数据点的 PostgreSQL 数据库的主机名。" } label { en: "DB Hostname" @@ -360,8 +359,8 @@ emqx_schema { sysmon_top_db_port { desc { - en: "Port of the PostgreSQL database that collects the data points" - zh: "收集数据点的 PostgreSQL 数据库的端口" + en: "Port of the PostgreSQL database that collects the data points." + zh: "收集数据点的 PostgreSQL 数据库的端口。" } label { en: "DB Port" @@ -404,7 +403,7 @@ emqx_schema { alarm_actions { desc { - en: "The actions triggered when the alarm is activated.
\n" + en: "The actions triggered when the alarm is activated.
" "Currently, the following actions are supported: log and " "publish.\n" "log is to write the alarm to log (console or file).\n" @@ -412,7 +411,7 @@ emqx_schema { "the system topics:\n" "$SYS/brokers/emqx@xx.xx.xx.x/alarms/activate and\n" "$SYS/brokers/emqx@xx.xx.xx.x/alarms/deactivate" - zh: "警报激活时触发的动作。
\n" + zh: "警报激活时触发的动作。
" "目前,支持以下操作:log 和 " "publish.\n" "log 将告警写入日志 (控制台或者文件).\n" @@ -428,7 +427,7 @@ emqx_schema { alarm_size_limit { desc { - en: "The maximum total number of deactivated alarms to keep as history.
\n" + en: "The maximum total number of deactivated alarms to keep as history.
" "When this limit is exceeded, the oldest deactivated alarms are " "deleted to cap the total number.\n" zh: "要保留为历史记录的已停用报警的最大总数。当超过此限制时,将删除最旧的停用报警,以限制总数。" @@ -454,7 +453,7 @@ emqx_schema { flapping_detect_enable { desc { en: "Enable flapping connection detection feature." - zh: "启用抖动检测功能" + zh: "启用抖动检测功能。" } label: { en: "Enable flapping detection" @@ -465,7 +464,7 @@ emqx_schema { flapping_detect_max_count { desc { en: "The maximum number of disconnects allowed for a MQTT Client in `window_time`" - zh: "MQTT 客户端在\"窗口\"时间内允许的最大断开次数" + zh: "MQTT 客户端在“窗口”时间内允许的最大断开次数。" } label: { en: "Max count" @@ -487,11 +486,11 @@ emqx_schema { flapping_detect_ban_time { desc { en: "How long the flapping clientid will be banned." - zh: "抖动的客户端将会被禁止登陆多长时间" + zh: "抖动的客户端将会被禁止登录多长时间。" } label: { en: "Ban time" - zh: "禁止登陆时长" + zh: "禁止登录时长" } } @@ -592,7 +591,7 @@ emqx_schema { persistent_session_builtin_session_table { desc { en: "Performance tuning options for built-in session table." - zh: "用于内建会话表的性能调优参数" + zh: "用于内建会话表的性能调优参数。" } label: { en: "Persistent session" @@ -614,7 +613,7 @@ emqx_schema { persistent_session_builtin_messages_table { desc { en: "Performance tuning options for built-in messages table." - zh: "用于内建消息表的性能调优参数" + zh: "用于内建消息表的性能调优参数。" } label: { en: "Persistent messages" @@ -625,7 +624,7 @@ emqx_schema { stats_enable { desc { en: "Enable/disable statistic data collection." - zh: "启用/禁用统计数据收集功能" + zh: "启用/禁用统计数据收集功能。" } label: { en: "Enable/disable statistic data collection." @@ -767,8 +766,8 @@ mqtt 下所有的配置作为全局的默认值存在,它可以被 zone< mqtt_ignore_loop_deliver { desc { - en: """Ignore loop delivery of messages for MQTT v3.1.1/v3.1.0, similar to No Local subscription option in MQTT 5.0""" - zh: """是否为 MQTT v3.1.1/v3.1.0 客户端忽略投递自己发布的消息,类似于 MQTT 5.0 中的 No Local 订阅选项""" + en: """Ignore loop delivery of messages for MQTT v3.1.1/v3.1.0, similar to No Local subscription option in MQTT 5.0.""" + zh: """是否为 MQTT v3.1.1/v3.1.0 客户端忽略投递自己发布的消息,类似于 MQTT 5.0 中的 No Local 订阅选项。""" } label: { en: """Ignore Loop Deliver""" @@ -869,23 +868,23 @@ When set to true, invalid utf8 strings in for example client ID, topic name, etc mqtt_max_awaiting_rel { desc { - en: """Maximum QoS 2 packets (Client -> Broker) awaiting PUBREL.""" - zh: """PUBREL (Client -> Broker) 最大等待队列长度。""" + en: """For each publisher session, the maximum number of outstanding QoS 2 messages pending on the client to send PUBREL. After reaching this limit, new QoS 2 PUBLISH requests will be rejected with `147(0x93)` until either PUBREL is received or timed out.""" + zh: """每个发布者的会话中,都存在一个队列来处理客户端发送的 QoS 2 消息。该队列会存储 QoS 2 消息的报文 ID 直到收到客户端的 PUBREL 或超时,达到队列长度的限制后,新的 QoS 2 消息发布会被拒绝,并返回 `147(0x93)` 错误。""" } label: { en: """Max Awaiting PUBREL""" - zh: """Max Awaiting PUBREL""" + zh: """PUBREL 等待队列长度""" } } mqtt_await_rel_timeout { desc { - en: """The QoS 2 messages (Client -> Broker) will be dropped if awaiting PUBREL timeout.""" - zh: """PUBREL (Client -> Broker) 最大等待时间,超时则会被丢弃。""" + en: """For client to broker QoS 2 message, the time limit for the broker to wait before the `PUBREL` message is received. The wait is aborted after timed out, meaning the packet ID is freed for new `PUBLISH` requests. Receiving a stale `PUBREL` causes a warning level log. Note, the message is delivered to subscribers before entering the wait for PUBREL.""" + zh: """客户端发布 QoS 2 消息时,服务器等待 `PUBREL` 的最长时延。超过该时长后服务器会放弃等待,该PACKET ID 会被释放,从而允许后续新的 PUBLISH 消息使用。如果超时后收到 PUBREL,服务器将会产生一条告警日志。注意,向订阅客户端转发消息的动作发生在进入等待之前。""" } label: { en: """Max Awaiting PUBREL TIMEOUT""" - zh: """Max Awaiting PUBREL TIMEOUT""" + zh: """PUBREL 最大等待时间""" } } @@ -987,7 +986,7 @@ Supported configurations are the following: - pem: Convert DER certificate content to PEM format as Username - md5: Take the MD5 value of the content of the DER or PEM certificate as Username """ - zh: """使用对端证书中的 CN, DN 字段或整个证书内容来作为用户名。仅适用于 TLS 连接。 + zh: """使用对端证书中的 CN、DN 字段或整个证书内容来作为用户名。仅适用于 TLS 连接。 目前支持配置为以下内容: - cn: 取证书的 CN 字段作为 Username - dn: 取证书的 DN 字段作为 Username @@ -1012,7 +1011,7 @@ Supported configurations are the following: - pem: Convert DER certificate content to PEM format as Client ID - md5: Take the MD5 value of the content of the DER or PEM certificate as Client ID """ - zh: """使用对端证书中的 CN, DN 字段或整个证书内容来作为客户端 ID。仅适用于 TLS 连接。 + zh: """使用对端证书中的 CN、DN 字段或整个证书内容来作为客户端 ID。仅适用于 TLS 连接。 目前支持配置为以下内容: - cn: 取证书的 CN 字段作为 Client ID - dn: 取证书的 DN 字段作为 Client ID @@ -1051,10 +1050,10 @@ Supported configurations are the following: """ zh: """Session 在集群中的锁策略。 - - `loca`: 仅锁本节点的 Session - - `one`: 任选一个其它节点加锁 - - `quorum`: 选择集群中半数以上的节点加锁 - - `all`: 选择所有节点加锁 + - `loca`:仅锁本节点的 Session; + - `one`:任选一个其它节点加锁; + - `quorum`:选择集群中半数以上的节点加锁; + - `all`:选择所有节点加锁。 """ } } @@ -1069,10 +1068,10 @@ Supported configurations are the following: """ zh: """共享订阅消息派发策略。 - - `random`: 随机挑选一个共享订阅者派发 - - `round_robin`: 使用 round-robin 策略派发 - - `sticky`: 总是使用上次选中的订阅者派发,直到它断开连接 - - `hash`: 使用发送者的 Client ID 进行 Hash 来选择订阅者 + - `random`:随机挑选一个共享订阅者派发; + - `round_robin`:使用 round-robin 策略派发; + - `sticky`:总是使用上次选中的订阅者派发,直到它断开连接; + - `hash`:使用发送者的 Client ID 进行 Hash 来选择订阅者。 """ } } @@ -1123,13 +1122,12 @@ until the subscriber disconnects. subscriber was not found, send to a random subscriber cluster-wide """ cn: """共享订阅的分发策略名称。 -- `random`: 随机选择一个组内成员; -- `round_robin`: 循环选择下一个成员; -- `round_robin_per_group`: 在共享组内循环选择下一个成员; -- `sticky`: 使用上一次选中的成员; -- `hash`: 根据 ClientID 哈希映射到一个成员; -- `local`: 随机分发到节点本地成成员,如果本地成员不存在,则随机分发 -到任意一个成员。 +- `random`:随机选择一个组内成员; +- `round_robin`:循环选择下一个成员; +- `round_robin_per_group`:在共享组内循环选择下一个成员; +- `sticky`:使用上一次选中的成员; +- `hash`:根据 ClientID 哈希映射到一个成员; +- `local`:随机分发到节点本地成成员,如果本地成员不存在,则随机分发到任意一个成员。 """ } @@ -1149,9 +1147,9 @@ NOTE: when changing from/to `global` lock, it requires all nodes in the cluster 建议仅当通配符主题较多时才更改此参数。 注:当从/更改为 `global` 锁时,它要求集群中的所有节点在更改之前停止。 - - `key`: 为 Mnesia 事务涉及到的每个 key 上锁,建议单节点时使用。 - - `tab`: 为 Mnesia 事务涉及到的表上锁,建议在集群中使用。 - - `global`: 所以更新操作都被全局的锁保护,仅建议在超大规模集群中使用。 + - `key`:为 Mnesia 事务涉及到的每个 key 上锁,建议单节点时使用。 + - `tab`:为 Mnesia 事务涉及到的表上锁,建议在集群中使用。 + - `global`:所以更新操作都被全局的锁保护,仅建议在超大规模集群中使用。 """ } } @@ -1202,8 +1200,8 @@ NOTE: This is a cluster-wide configuration. It requires all nodes to be stopped sys_event_messages { desc { - en: """Client events messages""" - zh: """客户端事件消息""" + en: """Client events messages.""" + zh: """客户端事件消息。""" } } @@ -1258,12 +1256,8 @@ Find more details in 'authorization.sources' config. fields_authorization_deny_action { desc { - en: """ -The action when the authorization check rejects an operation. -""" - zh: """ -授权检查拒绝操作时的操作。 -""" + en: """The action when the authorization check rejects an operation.""" + zh: """授权检查拒绝操作时的操作。""" } label: { en: "Authorization deny action" @@ -1273,12 +1267,8 @@ The action when the authorization check rejects an operation. fields_cache_enable { desc { - en: """ -Enable or disable the authorization cache. -""" - zh: """ -启用或禁用授权缓存。 -""" + en: """Enable or disable the authorization cache.""" + zh: """启用或禁用授权缓存。""" } label: { en: "Enable or disable the authorization cache." @@ -1288,12 +1278,8 @@ Enable or disable the authorization cache. fields_cache_max_size { desc { - en: """ -Maximum number of cached items. -""" - zh: """ -缓存项的最大数量。 -""" + en: """Maximum number of cached items.""" + zh: """缓存项的最大数量。""" } label: { en: "Maximum number of cached items." @@ -1303,12 +1289,8 @@ Maximum number of cached items. fields_cache_ttl { desc { - en: """ -Time to live for the cached data. -""" - zh: """ -缓存数据的生存时间。 -""" + en: """Time to live for the cached data. """ + zh: """缓存数据的生存时间。""" } label: { en: "Time to live for the cached data." @@ -1318,9 +1300,7 @@ Time to live for the cached data. fields_deflate_opts_level { desc { - en: """ -Compression level. -""" + en: """Compression level. """ zh: """压缩级别""" } label: { @@ -1332,11 +1312,11 @@ Compression level. fields_deflate_opts_mem_level { desc { en: """ -Specifies the size of the compression state.
+Specifies the size of the compression state.
Lower values decrease memory usage per connection. """ zh: """ -指定压缩状态的大小
+指定压缩状态的大小
较低的值会减少每个连接的内存使用。 """ } @@ -1348,12 +1328,8 @@ Lower values decrease memory usage per connection. fields_deflate_opts_strategy { desc { - en: """ -Specifies the compression strategy. -""" - zh: """ -指定压缩策略。 -""" + en: """Specifies the compression strategy.""" + zh: """指定压缩策略。""" } label: { en: "compression strategy" @@ -1363,9 +1339,7 @@ Specifies the compression strategy. fields_deflate_opts_server_context_takeover { desc { - en: """ -Takeover means the compression state is retained between server messages. -""" + en: """Takeover means the compression state is retained between server messages. """ zh: """接管意味着在服务器消息之间保留压缩状态。""" } label: { @@ -1376,12 +1350,8 @@ Takeover means the compression state is retained between server messages. fields_deflate_opts_client_context_takeover { desc { - en: """ -Takeover means the compression state is retained between client messages. -""" - zh: """ -接管意味着在客户端消息之间保留压缩状态。 -""" + en: """Takeover means the compression state is retained between client messages. """ + zh: """接管意味着在客户端消息之间保留压缩状态。""" } label: { en: "Client context takeover" @@ -1391,12 +1361,8 @@ Takeover means the compression state is retained between client messages. fields_deflate_opts_server_max_window_bits { desc { - en: """ -Specifies the size of the compression context for the server. -""" - zh: """ -指定服务器压缩上下文的大小。 -""" + en: """Specifies the size of the compression context for the server.""" + zh: """指定服务器压缩上下文的大小。""" } label: { en: "Server compression max window size" @@ -1406,12 +1372,8 @@ Specifies the size of the compression context for the server. fields_deflate_opts_client_max_window_bits { desc { - en: """ -Specifies the size of the compression context for the client. -""" - zh: """ -指定客户端压缩上下文的大小。 -""" + en: """Specifies the size of the compression context for the client.""" + zh: """指定客户端压缩上下文的大小。""" } label: { en: "Client compression max window size" @@ -1421,10 +1383,8 @@ Specifies the size of the compression context for the client. client_ssl_opts_schema_enable { desc { - en: """ -Enable TLS. -""" - zh: """启用 TLS""" + en: """Enable TLS. """ + zh: """启用 TLS。""" } label: { en: "Enable TLS." @@ -1435,19 +1395,19 @@ Enable TLS. common_ssl_opts_schema_cacertfile { desc { en: """ -Trusted PEM format CA certificates bundle file.
+Trusted PEM format CA certificates bundle file.
The certificates in this file are used to verify the TLS peer's certificates. Append new certificates to the file if new CAs are to be trusted. There is no need to restart EMQX to have the updated file loaded, because -the system regularly checks if file has been updated (and reload).
+the system regularly checks if file has been updated (and reload).
NOTE: invalidating (deleting) a certificate from the file will not affect already established connections. """ zh: """ -受信任的PEM格式CA证书捆绑文件
+受信任的PEM格式 CA 证书捆绑文件
此文件中的证书用于验证TLS对等方的证书。 -如果要信任新CA,请将新证书附加到文件中。 -无需重启EMQX即可加载更新的文件,因为系统会定期检查文件是否已更新(并重新加载)
+如果要信任新 CA,请将新证书附加到文件中。 +无需重启EMQX即可加载更新的文件,因为系统会定期检查文件是否已更新(并重新加载)
注意:从文件中失效(删除)证书不会影响已建立的连接。 """ } @@ -1460,7 +1420,7 @@ already established connections. common_ssl_opts_schema_certfile { desc { en: """ -PEM format certificates chain file.
+PEM format certificates chain file.
The certificates in this file should be in reversed order of the certificate issue chain. That is, the host's certificate should be placed in the beginning of the file, followed by the immediate issuer certificate and so on. @@ -1468,10 +1428,10 @@ Although the root CA certificate is optional, it should be placed at the end of the file if it is to be added. """ zh: """ -PEM格式证书链文件
-此文件中的证书应与证书颁发链的顺序相反。也就是说,主机的证书应该放在文件的开头,然后是直接颁发者证书,依此类推。 -虽然根CA证书是可选的,但它应该放在 -如果要添加文件,请将其删除。 +PEM格式证书链文件
+此文件中的证书应与证书颁发链的顺序相反。也就是说,主机的证书应该放在文件的开头, +然后是直接颁发者 CA 证书,依此类推,一直到根 CA 证书。 +根 CA 证书是可选的,如果想要添加,应加到文件到最末端。 """ } label: { @@ -1482,12 +1442,8 @@ PEM格式证书链文件
common_ssl_opts_schema_keyfile { desc { - en: """ -PEM format private key file. -""" - zh: """ -PEM格式的私钥文件。 -""" + en: """PEM format private key file. """ + zh: """PEM格式的私钥文件。""" } label: { en: "Keyfile" @@ -1497,12 +1453,8 @@ PEM格式的私钥文件。 common_ssl_opts_schema_verify { desc { - en: """ -Enable or disable peer verification. -""" - zh: """ -启用或禁用对等验证。 -""" + en: """Enable or disable peer verification. """ + zh: """启用或禁用对等验证。""" } label: { en: "Verify peer" @@ -1512,12 +1464,8 @@ Enable or disable peer verification. common_ssl_opts_schema_reuse_sessions { desc { - en: """ -Enable TLS session reuse. -""" - zh: """ -启用 TLS 会话重用。 -""" + en: """Enable TLS session reuse. """ + zh: """启用 TLS 会话重用。""" } label: { en: "TLS session reuse" @@ -1528,10 +1476,16 @@ Enable TLS session reuse. common_ssl_opts_schema_depth { desc { en: """ -Maximum number of non-self-issued intermediate certificates that can follow the peer certificate in a valid certification path. So, if depth is 0 the PEER must be signed by the trusted ROOT-CA directly; if 1 the path can be PEER, CA, ROOT-CA; if 2 the path can be PEER, CA, CA, ROOT-CA, and so on. The default value is 10. +Maximum number of non-self-issued intermediate certificates that can follow the peer certificate in a valid certification path. +So, if depth is 0 the PEER must be signed by the trusted ROOT-CA directly;
+if 1 the path can be PEER, Intermediate-CA, ROOT-CA;
+if 2 the path can be PEER, Intermediate-CA1, Intermediate-CA2, ROOT-CA.
""" zh: """ -在有效的证书路径中,可以跟随对等证书的非自颁发中间证书的最大数量。因此,如果深度为0,则对等方必须由受信任的根CA直接签名;如果1,路径可以是PEER、CA、ROOT-CA;如果是2,则路径可以是PEER、CA、CA、ROOT-CA等等。默认值为10。 +在有效的证书路径中,可以跟随对等证书的非自颁发中间证书的最大数量。 +因此,如果深度为0,则对等方必须由受信任的根 CA 直接签名;
+如果是1,路径可以是 PEER、中间 CA、ROOT-CA;
+如果是2,则路径可以是PEER、中间 CA1、中间 CA2、ROOT-CA。 """ } label: { @@ -1560,13 +1514,13 @@ Only used if the private key file is password-protected. common_ssl_opts_schema_versions { desc { en: """ -All TLS/DTLS versions to be supported.
-NOTE: PSK ciphers are suppressed by 'tlsv1.3' version config.
-In case PSK cipher suites are intended, make sure to configured +All TLS/DTLS versions to be supported.
+NOTE: PSK ciphers are suppressed by 'tlsv1.3' version config.
+In case PSK cipher suites are intended, make sure to configure ['tlsv1.2', 'tlsv1.1'] here. """ zh: """ -支持所有TLS/DTLS版本
+支持所有TLS/DTLS版本
注:PSK 的 Ciphers 无法在 tlsv1.3 中使用,如果打算使用 PSK 密码套件,请确保这里配置为 ["tlsv1.2","tlsv1.1"]。 """ @@ -1584,7 +1538,7 @@ This config holds TLS cipher suite names separated by comma, or as an array of strings. e.g. "TLS_AES_256_GCM_SHA384,TLS_AES_128_GCM_SHA256" or ["TLS_AES_256_GCM_SHA384","TLS_AES_128_GCM_SHA256"]. -
+
Ciphers (and their ordering) define the way in which the client and server encrypts information over the network connection. Selecting a good cipher suite is critical for the @@ -1592,47 +1546,47 @@ application's data security, confidentiality and performance. The names should be in OpenSSL string format (not RFC format). All default values and examples provided by EMQX config -documentation are all in OpenSSL format.
+documentation are all in OpenSSL format.
NOTE: Certain cipher suites are only compatible with specific TLS versions ('tlsv1.1', 'tlsv1.2' or 'tlsv1.3') incompatible cipher suites will be silently dropped. For instance, if only 'tlsv1.3' is given in the versions, configuring cipher suites for other versions will have no effect. -
+
-NOTE: PSK ciphers are suppressed by 'tlsv1.3' version config
-If PSK cipher suites are intended, 'tlsv1.3' should be disabled from versions.
+NOTE: PSK ciphers are suppressed by 'tlsv1.3' version config
+If PSK cipher suites are intended, 'tlsv1.3' should be disabled from versions.
PSK cipher suites: "RSA-PSK-AES256-GCM-SHA384,RSA-PSK-AES256-CBC-SHA384, RSA-PSK-AES128-GCM-SHA256,RSA-PSK-AES128-CBC-SHA256, RSA-PSK-AES256-CBC-SHA,RSA-PSK-AES128-CBC-SHA, -RSA-PSK-DES-CBC3-SHA,RSA-PSK-RC4-SHA"
+RSA-PSK-DES-CBC3-SHA,RSA-PSK-RC4-SHA"

""" zh: """ 此配置保存由逗号分隔的 TLS 密码套件名称,或作为字符串数组。例如 "TLS_AES_256_GCM_SHA384,TLS_AES_128_GCM_SHA256"["TLS_AES_256_GCM_SHA384","TLS_AES_128_GCM_SHA256"]。 -
+
密码(及其顺序)定义了客户端和服务器通过网络连接加密信息的方式。 选择一个好的密码套件对于应用程序的数据安全性、机密性和性能至关重要。 名称应为 OpenSSL 字符串格式(而不是 RFC 格式)。 -EMQX 配置文档提供的所有默认值和示例都是 OpenSSL 格式
+EMQX 配置文档提供的所有默认值和示例都是 OpenSSL 格式
注意:某些密码套件仅与特定的 TLS 版本兼容('tlsv1.1'、'tlsv1.2'或'tlsv1.3')。 不兼容的密码套件将被自动删除。 例如,如果只有 versions 仅配置为 tlsv1.3。为其他版本配置密码套件将无效。 -
-注:PSK 的 Ciphers 不支持 tlsv1.3
+
+注:PSK 的 Ciphers 不支持 tlsv1.3
如果打算使用PSK密码套件 tlsv1.3。应在ssl.versions中禁用。 -
+
PSK 密码套件: "RSA-PSK-AES256-GCM-SHA384,RSA-PSK-AES256-CBC-SHA384, RSA-PSK-AES128-GCM-SHA256,RSA-PSK-AES128-CBC-SHA256, RSA-PSK-AES256-CBC-SHA,RSA-PSK-AES128-CBC-SHA, -RSA-PSK-DES-CBC3-SHA,RSA-PSK-RC4-SHA"
+RSA-PSK-DES-CBC3-SHA,RSA-PSK-RC4-SHA"
""" } label: { @@ -1648,7 +1602,7 @@ This config holds TLS cipher suite names separated by comma, or as an array of strings. e.g. "TLS_AES_256_GCM_SHA384,TLS_AES_128_GCM_SHA256" or ["TLS_AES_256_GCM_SHA384","TLS_AES_128_GCM_SHA256"]. -
+
Ciphers (and their ordering) define the way in which the client and server encrypts information over the network connection. Selecting a good cipher suite is critical for the @@ -1656,49 +1610,49 @@ application's data security, confidentiality and performance. The names should be in OpenSSL string format (not RFC format). All default values and examples provided by EMQX config -documentation are all in OpenSSL format.
+documentation are all in OpenSSL format.
NOTE: Certain cipher suites are only compatible with specific TLS versions ('tlsv1.1', 'tlsv1.2' or 'tlsv1.3') incompatible cipher suites will be silently dropped. For instance, if only 'tlsv1.3' is given in the versions, configuring cipher suites for other versions will have no effect. -
+
-NOTE: PSK ciphers are suppressed by 'tlsv1.3' version config
-If PSK cipher suites are intended, 'tlsv1.3' should be disabled from versions.
+NOTE: PSK ciphers are suppressed by 'tlsv1.3' version config
+If PSK cipher suites are intended, 'tlsv1.3' should be disabled from versions.
PSK cipher suites: "RSA-PSK-AES256-GCM-SHA384,RSA-PSK-AES256-CBC-SHA384, RSA-PSK-AES128-GCM-SHA256,RSA-PSK-AES128-CBC-SHA256, RSA-PSK-AES256-CBC-SHA,RSA-PSK-AES128-CBC-SHA, -RSA-PSK-DES-CBC3-SHA,RSA-PSK-RC4-SHA"
+RSA-PSK-DES-CBC3-SHA,RSA-PSK-RC4-SHA"
-NOTE: QUIC listener supports only 'tlsv1.3' ciphers
+NOTE: QUIC listener supports only 'tlsv1.3' ciphers
""" zh: """ 此配置保存由逗号分隔的 TLS 密码套件名称,或作为字符串数组。例如 "TLS_AES_256_GCM_SHA384,TLS_AES_128_GCM_SHA256"["TLS_AES_256_GCM_SHA384","TLS_AES_128_GCM_SHA256"]。 -
+
密码(及其顺序)定义了客户端和服务器通过网络连接加密信息的方式。 选择一个好的密码套件对于应用程序的数据安全性、机密性和性能至关重要。 名称应为 OpenSSL 字符串格式(而不是 RFC 格式)。 -EMQX 配置文档提供的所有默认值和示例都是 OpenSSL 格式
+EMQX 配置文档提供的所有默认值和示例都是 OpenSSL 格式
注意:某些密码套件仅与特定的 TLS 版本兼容('tlsv1.1'、'tlsv1.2'或'tlsv1.3')。 不兼容的密码套件将被自动删除。 例如,如果只有 versions 仅配置为 tlsv1.3。为其他版本配置密码套件将无效。 -
-注:PSK 的 Ciphers 不支持 tlsv1.3
+
+注:PSK 的 Ciphers 不支持 tlsv1.3
如果打算使用PSK密码套件,tlsv1.3。应在ssl.versions中禁用。 -
+
PSK 密码套件: "RSA-PSK-AES256-GCM-SHA384,RSA-PSK-AES256-CBC-SHA384, RSA-PSK-AES128-GCM-SHA256,RSA-PSK-AES128-CBC-SHA256, RSA-PSK-AES256-CBC-SHA,RSA-PSK-AES128-CBC-SHA, -RSA-PSK-DES-CBC3-SHA,RSA-PSK-RC4-SHA"
+RSA-PSK-DES-CBC3-SHA,RSA-PSK-RC4-SHA"
注:QUIC 监听器不支持 tlsv1.3 的 ciphers """ @@ -1711,12 +1665,8 @@ RSA-PSK-DES-CBC3-SHA,RSA-PSK-RC4-SHA"
common_ssl_opts_schema_user_lookup_fun { desc { - en: """ -EMQX-internal callback that is used to lookup pre-shared key (PSK) identity. -""" - zh: """ -用于查找预共享密钥(PSK)标识的 EMQX 内部回调。 -""" + en: """EMQX-internal callback that is used to lookup pre-shared key (PSK) identity. """ + zh: """用于查找预共享密钥(PSK)标识的 EMQX 内部回调。""" } label: { en: "SSL PSK user lookup fun" @@ -1749,11 +1699,11 @@ server_ssl_opts_schema_dhfile { Path to a file containing PEM-encoded Diffie-Hellman parameters to be used by the server if a cipher suite using Diffie-Hellman key exchange is negotiated. If not specified, default parameters -are used.
+are used.
NOTE: The dhfile option is not supported by TLS 1.3. """ zh: """ -如果协商使用Diffie-Hellman密钥交换的密码套件,则服务器将使用包含PEM编码的Diffie-Hellman参数的文件的路径。如果未指定,则使用默认参数
+如果协商使用Diffie-Hellman密钥交换的密码套件,则服务器将使用包含PEM编码的Diffie-Hellman参数的文件的路径。如果未指定,则使用默认参数。
注意:TLS 1.3不支持dhfile选项。 """ @@ -1862,10 +1812,8 @@ TLS/SSL握手建立后立即进行GC。 fields_listeners_tcp { desc { - en: """ -TCP listeners -""" - zh: """TCP 监听器""" + en: """TCP listeners.""" + zh: """TCP 监听器。""" } label: { en: "TCP listeners" @@ -1875,10 +1823,8 @@ TCP listeners fields_listeners_ssl { desc { - en: """ -SSL listeners -""" - zh: """SSL 监听器""" + en: """SSL listeners.""" + zh: """SSL 监听器。""" } label: { en: "SSL listeners" @@ -1888,10 +1834,8 @@ SSL listeners fields_listeners_ws { desc { - en: """ -HTTP websocket listeners -""" - zh: """HTTP websocket 监听器""" + en: """HTTP websocket listeners.""" + zh: """HTTP websocket 监听器。""" } label: { en: "HTTP websocket listeners" @@ -1901,10 +1845,8 @@ HTTP websocket listeners fields_listeners_wss { desc { - en: """ -HTTPS websocket listeners -""" - zh: """HTTPS websocket 监听器""" + en: """HTTPS websocket listeners.""" + zh: """HTTPS websocket 监听器。""" } label: { en: "HTTPS websocket listeners" @@ -1914,10 +1856,8 @@ HTTPS websocket listeners fields_listeners_quic { desc { - en: """ -QUIC listeners -""" - zh: """QUIC 监听器""" + en: """QUIC listeners.""" + zh: """QUIC 监听器。""" } label: { en: "QUIC listeners" @@ -1927,10 +1867,8 @@ QUIC listeners fields_listener_enabled { desc { - en: """ -Enable listener. -""" - zh: """启停监听器""" + en: """Enable listener. """ + zh: """启停监听器。""" } label: { en: "Enable listener" @@ -1940,10 +1878,8 @@ Enable listener. fields_mqtt_quic_listener_certfile { desc { - en: """ -Path to the certificate file. -""" - zh: """证书文件""" + en: """Path to the certificate file.""" + zh: """证书文件。""" } label: { en: "Certificate file" @@ -1953,10 +1889,8 @@ Path to the certificate file. fields_mqtt_quic_listener_keyfile { desc { - en: """ -Path to the secret key file. -""" - zh: """私钥文件""" + en: """Path to the secret key file. """ + zh: """私钥文件。""" } label: { en: "Key file" @@ -1966,31 +1900,23 @@ Path to the secret key file. fields_mqtt_quic_listener_idle_timeout { desc { - en: """ -How long a connection can go idle before it is gracefully shut down. 0 to disable -""" - zh: """ -一个连接在被关闭之前可以空闲多长时间。0表示禁用 -""" + en: """How long a connection can go idle before it is gracefully shut down. 0 to disable""" + zh: """一个连接在被关闭之前可以空闲多长时间。0表示禁用。""" } label: { en: "Idle Timeout" - zh: "发呆超时时间" + zh: "空闲超时时间" } } fields_mqtt_quic_listener_handshake_idle_timeout { desc { - en: """ -How long a handshake can idle before it is discarded. -""" - zh: """ -一个握手在被丢弃之前可以空闲多长时间。 -""" + en: """How long a handshake can idle before it is discarded. """ + zh: """一个握手在被丢弃之前可以空闲多长时间。""" } label: { en: "Handshake Idle Timeout" - zh: "握手发呆超时时间" + zh: "握手空闲超时时间" } } @@ -2000,7 +1926,7 @@ fields_mqtt_quic_listener_keep_alive_interval { How often to send PING frames to keep a connection alive. 0 means disabled. """ zh: """ -发送 PING 帧的频率,以保活连接. 设为0,禁用 +发送 PING 帧的频率,以保活连接. 设为 0 表示禁用。 """ } label: { @@ -2026,9 +1952,7 @@ IP address and port for the listening socket. base_listener_acceptors { desc { - en: """ -The size of the listener's receiving pool. -""" + en: """The size of the listener's receiving pool.""" zh: """监听器接收池的大小。""" } label: { @@ -2039,12 +1963,8 @@ The size of the listener's receiving pool. base_listener_max_connections { desc { - en: """ -The maximum number of concurrent connections allowed by the listener. -""" - zh: """ -监听器允许的最大并发连接数。 -""" + en: """The maximum number of concurrent connections allowed by the listener. """ + zh: """监听器允许的最大并发连接数。""" } label: { en: "Max connections" @@ -2064,8 +1984,8 @@ set to `some_tenant`, then the client actually subscribes to the topic `some_tenant/t`. Similarly, if another client B (connected to the same listener as the client A) sends a message to topic `t`, the message is routed to all the clients subscribed `some_tenant/t`, so client A will receive the -message, with topic name `t`.
-Set to `""` to disable the feature.
+message, with topic name `t`.
+Set to `""` to disable the feature.
Variables in mountpoint string: - ${clientid}: clientid @@ -2076,10 +1996,10 @@ Variables in mountpoint string: 将消息传递给订阅者时,将从主题名称中删除带前缀的字符串。挂载点是一种用户可以用来实现不同侦听器之间消息路由隔离的方法。 -例如,如果客户机 A 使用 listeners.tcp.\.mountpoint 设置为'some_tenant',那么客户端实际上订阅了主题'some_tenant/t'。
-类似地,如果另一个客户端B(与客户端A连接到同一个侦听器)向主题 't' 发送消息,该消息将路由到所有订阅了'some_租户/t'的客户端,因此客户端 A 将接收主题名为't'的消息
+例如,如果客户机 A 使用 listeners.tcp.\.mountpoint 设置为'some_tenant',那么客户端实际上订阅了主题'some_tenant/t'。
+类似地,如果另一个客户端B(与客户端A连接到同一个侦听器)向主题 't' 发送消息,该消息将路由到所有订阅了'some_租户/t'的客户端,因此客户端 A 将接收主题名为't'的消息
-设置为"" 以禁用该功能
+设置为"" 以禁用该功能
mountpoint 字符串中的变量: - ${clientid}: clientid @@ -2142,7 +2062,7 @@ When set to false clients will be allowed to connect without authen mqtt_listener_access_rules { desc { en: """ -The access control rules for this listener.
See: https://github.com/emqtt/esockd#allowdeny +The access control rules for this listener.
See: https://github.com/emqtt/esockd#allowdeny """ zh: """此监听器的访问控制规则。""" } @@ -2155,11 +2075,11 @@ The access control rules for this listener.
See: https://github.com/emqtt/es mqtt_listener_proxy_protocol { desc { en: """ -Enable the Proxy Protocol V1/2 if the EMQX cluster is deployed behind HAProxy or Nginx.
+Enable the Proxy Protocol V1/2 if the EMQX cluster is deployed behind HAProxy or Nginx.
See: https://www.haproxy.com/blog/haproxy/proxy-protocol/ """ zh: """ -如果EMQX集群部署在 HAProxy 或 Nginx 之后,请启用代理协议 V1/2
+如果EMQX集群部署在 HAProxy 或 Nginx 之后,请启用代理协议 V1/2
详情见: https://www.haproxy.com/blog/haproxy/proxy-protocol/ """ } @@ -2205,7 +2125,7 @@ If there is no decision after a full chain exhaustion, the login is rejected. 该配置可以被配置为: @@ -2222,14 +2142,14 @@ listener_authentication { en: """ Per-listener authentication override. Authentication can be one single authenticator instance or a chain of authenticators as an array. -When authenticating a login (username, client ID, etc.) the authenticators are checked in the configured order.
+When authenticating a login (username, client ID, etc.) the authenticators are checked in the configured order.
""" zh: """ 监听器认证重载。 认证配置可以是单个认证器实例,也可以是一个认证器数组组成的认证链。 -执行登录验证时(用户名、客户端 ID 等),将按配置的顺序执行
+执行登录验证时(用户名、客户端 ID 等),将按配置的顺序执行。 """ } label: { @@ -2240,12 +2160,8 @@ When authenticating a login (username, client ID, etc.) the authenticators are c fields_rate_limit_max_conn_rate { desc { - en: """ -Maximum connections per second. -""" - zh: """ -每秒最大连接数。 -""" + en: """Maximum connections per second.""" + zh: """每秒最大连接数。""" } label: { en: "Max connection rate" @@ -2255,12 +2171,8 @@ Maximum connections per second. fields_rate_limit_conn_messages_in { desc { - en: """ -Message limit for the external MQTT connections. -""" - zh: """ -外部 MQTT 连接的消息限制。 -""" + en: """Message limit for the external MQTT connections.""" + zh: """外部 MQTT 连接的消息限制。""" } label: { en: "connecting messages in" @@ -2288,26 +2200,26 @@ The rate is counted by bytes of packets per second. client_ssl_opts_schema_server_name_indication { desc { en: """ -Specify the host name to be used in TLS Server Name Indication extension.
+Specify the host name to be used in TLS Server Name Indication extension.
For instance, when connecting to "server.example.net", the genuine server which accepts the connection and performs TLS handshake may differ from the host the TLS client initially connects to, e.g. when connecting to an IP address -or when the host has multiple resolvable DNS records
+or when the host has multiple resolvable DNS records
If not specified, it will default to the host name string which is used -to establish the connection, unless it is IP addressed used.
+to establish the connection, unless it is IP addressed used.
The host name is then also used in the host name verification of the peer -certificate.
The special value 'disable' prevents the Server Name +certificate.
The special value 'disable' prevents the Server Name Indication extension from being sent and disables the hostname verification check. """ zh: """ -指定要在 TLS 服务器名称指示扩展中使用的主机名
+指定要在 TLS 服务器名称指示扩展中使用的主机名。
例如,当连接到 "server.example.net" 时,接受连接并执行 TLS 握手的真正服务器可能与 TLS 客户端最初连接到的主机不同, -例如,当连接到 IP 地址时,或者当主机具有多个可解析的 DNS 记录时
+例如,当连接到 IP 地址时,或者当主机具有多个可解析的 DNS 记录时
如果未指定,它将默认为使用的主机名字符串 -建立连接,除非使用 IP 地址
-然后,主机名也用于对等机的主机名验证 -证书
特殊值'disable'阻止发送服务器名称指示扩展,并禁用主机名验证检查。 +建立连接,除非使用 IP 地址
+然后,主机名也用于对等机的主机名验证证书
+特殊值 disable 阻止发送服务器名称指示扩展,并禁用主机名验证检查。 """ } label: { @@ -2319,11 +2231,11 @@ verification check. fields_tcp_opts_active_n { desc { en: """ -Specify the {active, N} option for this Socket.
+Specify the {active, N} option for this Socket.
See: https://erlang.org/doc/man/inet.html#setopts-2 """ zh: """ -为此套接字指定{active,N}选项
+为此套接字指定{active,N}选项
See: https://erlang.org/doc/man/inet.html#setopts-2 """ } @@ -2351,12 +2263,8 @@ TCP backlog 定义了挂起连接队列可以增长到的最大长度。 fields_tcp_opts_send_timeout { desc { - en: """ -The TCP send timeout for the connections. -""" - zh: """ -连接的TCP发送超时。 -""" + en: """The TCP send timeout for the connections. """ + zh: """连接的 TCP 发送超时。""" } label: { en: "TCP send timeout" @@ -2385,7 +2293,7 @@ fields_tcp_opts_recbuf { The TCP receive buffer (OS kernel) for the connections. """ zh: """ -连接的 TCP 接收缓冲区(OS内核)。 +连接的 TCP 接收缓冲区(OS 内核)。 """ } label: { @@ -2400,7 +2308,7 @@ fields_tcp_opts_sndbuf { The TCP send buffer (OS kernel) for the connections. """ zh: """ -连接的 TCP 发送缓冲区(OS内核)。 +连接的 TCP 发送缓冲区(OS 内核)。 """ } label: { @@ -2473,18 +2381,18 @@ The SO_REUSEADDR flag for the connections. fields_trace_payload_encode { desc { en: """ -Determine the format of the payload format in the trace file.
+Determine the format of the payload format in the trace file.
`text`: Text-based protocol or plain text protocol. - It is recommended when payload is JSON encoded.
-`hex`: Binary hexadecimal encode. It is recommended when payload is a custom binary protocol.
+ It is recommended when payload is JSON encoded.
+`hex`: Binary hexadecimal encode. It is recommended when payload is a custom binary protocol.
`hidden`: payload is obfuscated as `******` """ zh: """ -确定跟踪文件中有效负载格式的格式
+确定跟踪文件中有效负载格式的格式。
`text`:基于文本的协议或纯文本协议。 -建议在有效负载为JSON编码时使用
-`hex`:二进制十六进制编码。当有效负载是自定义二进制协议时,建议使用此选项
+建议在有效负载为JSON编码时使用
+`hex`:二进制十六进制编码。当有效负载是自定义二进制协议时,建议使用此选项
`hidden`:有效负载被模糊化为 `******` """ } @@ -2529,11 +2437,11 @@ WebSocket消息是否允许包含多个 MQTT 数据包。 fields_ws_opts_compress { desc { en: """ -If true, compress WebSocket messages using zlib.
+If true, compress WebSocket messages using zlib.
The configuration items under deflate_opts belong to the compression-related parameter configuration. """ zh: """ -如果 true,则使用zlib 压缩 WebSocket 消息
+如果 true,则使用zlib 压缩 WebSocket 消息
deflate_opts 下的配置项属于压缩相关参数配置。 """ } @@ -2555,7 +2463,7 @@ message within this interval. } label: { en: "WS idle timeout" - zh: "WS 发呆时间" + zh: "WS 空闲时间" } } @@ -2579,11 +2487,11 @@ fields_ws_opts_fail_if_no_subprotocol { en: """ If true, the server will return an error when the client does not carry the Sec-WebSocket-Protocol field. -
Note: WeChat applet needs to disable this verification. +
Note: WeChat applet needs to disable this verification. """ zh: """ 如果true,当客户端未携带Sec WebSocket Protocol字段时,服务器将返回一个错误。 -
注意:微信小程序需要禁用此验证。 +
注意:微信小程序需要禁用此验证。 """ } label: { @@ -2644,7 +2552,7 @@ If false and check_origin_enable is true, fields_ws_opts_check_origins { desc { en: """ -List of allowed origins.
See check_origin_enable. +List of allowed origins.
See check_origin_enable. """ zh: """ 允许的 origins 列表 diff --git a/apps/emqx/include/emqx_release.hrl b/apps/emqx/include/emqx_release.hrl index 75cae1638..898b9551f 100644 --- a/apps/emqx/include/emqx_release.hrl +++ b/apps/emqx/include/emqx_release.hrl @@ -32,7 +32,7 @@ %% `apps/emqx/src/bpapi/README.md' %% Community edition --define(EMQX_RELEASE_CE, "5.0.8"). +-define(EMQX_RELEASE_CE, "5.0.10"). %% Enterprise edition -define(EMQX_RELEASE_EE, "5.0.0-beta.4"). diff --git a/apps/emqx/priv/bpapi.versions b/apps/emqx/priv/bpapi.versions index e224e6425..9997055dc 100644 --- a/apps/emqx/priv/bpapi.versions +++ b/apps/emqx/priv/bpapi.versions @@ -17,9 +17,11 @@ {emqx_license,2}. {emqx_management,1}. {emqx_management,2}. +{emqx_management,3}. {emqx_mgmt_api_plugins,1}. {emqx_mgmt_cluster,1}. {emqx_mgmt_trace,1}. +{emqx_mgmt_trace,2}. {emqx_persistent_session,1}. {emqx_plugin_libs,1}. {emqx_prometheus,1}. diff --git a/apps/emqx/rebar.config b/apps/emqx/rebar.config index ed2304402..e50264693 100644 --- a/apps/emqx/rebar.config +++ b/apps/emqx/rebar.config @@ -27,7 +27,7 @@ {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}}, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}}, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.4"}}}, - {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.5"}}}, + {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.6"}}}, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}, {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.30.0"}}}, {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}}, diff --git a/apps/emqx/src/emqx.app.src b/apps/emqx/src/emqx.app.src index cc1e7eaea..5d2d8eb2c 100644 --- a/apps/emqx/src/emqx.app.src +++ b/apps/emqx/src/emqx.app.src @@ -3,7 +3,7 @@ {id, "emqx"}, {description, "EMQX Core"}, % strict semver, bump manually! - {vsn, "5.0.9"}, + {vsn, "5.0.10"}, {modules, []}, {registered, []}, {applications, [ diff --git a/apps/emqx/src/emqx_access_control.erl b/apps/emqx/src/emqx_access_control.erl index 1345c78e0..66d45b29a 100644 --- a/apps/emqx/src/emqx_access_control.erl +++ b/apps/emqx/src/emqx_access_control.erl @@ -24,6 +24,11 @@ authorize/3 ]). +-ifdef(TEST). +-compile(export_all). +-compile(nowarn_export_all). +-endif. + %%-------------------------------------------------------------------- %% APIs %%-------------------------------------------------------------------- @@ -45,6 +50,19 @@ authenticate(Credential) -> %% @doc Check Authorization -spec authorize(emqx_types:clientinfo(), emqx_types:pubsub(), emqx_types:topic()) -> allow | deny. +authorize(ClientInfo, PubSub, <<"$delayed/", Data/binary>> = RawTopic) -> + case binary:split(Data, <<"/">>) of + [_, Topic] -> + authorize(ClientInfo, PubSub, Topic); + _ -> + ?SLOG(warning, #{ + msg => "invalid_dealyed_topic_format", + expected_example => "$delayed/1/t/foo", + got => RawTopic + }), + inc_authz_metrics(deny), + deny + end; authorize(ClientInfo, PubSub, Topic) -> Result = case emqx_authz_cache:is_enabled() of diff --git a/apps/emqx/src/emqx_authentication_config.erl b/apps/emqx/src/emqx_authentication_config.erl index 681ed1394..b867800ae 100644 --- a/apps/emqx/src/emqx_authentication_config.erl +++ b/apps/emqx/src/emqx_authentication_config.erl @@ -64,7 +64,7 @@ pre_config_update(_, UpdateReq, OldConfig) -> try do_pre_config_update(UpdateReq, to_list(OldConfig)) of {error, Reason} -> {error, Reason}; - {ok, NewConfig} -> {ok, return_map(NewConfig)} + {ok, NewConfig} -> {ok, NewConfig} catch throw:Reason -> {error, Reason} @@ -225,9 +225,6 @@ do_check_config(Type, Config, Module) -> throw({bad_authenticator_config, #{type => Type, reason => E}}) end. -return_map([L]) -> L; -return_map(L) -> L. - to_list(undefined) -> []; to_list(M) when M =:= #{} -> []; to_list(M) when is_map(M) -> [M]; diff --git a/apps/emqx/src/emqx_channel.erl b/apps/emqx/src/emqx_channel.erl index 8335a2a5d..ea35abfba 100644 --- a/apps/emqx/src/emqx_channel.erl +++ b/apps/emqx/src/emqx_channel.erl @@ -345,7 +345,8 @@ handle_in(?CONNECT_PACKET(ConnPkt) = Packet, Channel) -> fun check_connect/2, fun enrich_client/2, fun set_log_meta/2, - fun check_banned/2 + fun check_banned/2, + fun count_flapping_event/2 ], ConnPkt, Channel#channel{conn_state = connecting} @@ -997,8 +998,13 @@ maybe_nack(Delivers) -> lists:filter(fun not_nacked/1, Delivers). not_nacked({deliver, _Topic, Msg}) -> - not (emqx_shared_sub:is_ack_required(Msg) andalso - (ok == emqx_shared_sub:nack_no_connection(Msg))). + case emqx_shared_sub:is_ack_required(Msg) of + true -> + ok = emqx_shared_sub:nack_no_connection(Msg), + false; + false -> + true + end. maybe_mark_as_delivered(Session, Delivers) -> case emqx_session:info(is_persistent, Session) of @@ -1222,6 +1228,8 @@ handle_call( ChanInfo1 = info(NChannel), emqx_cm:set_chan_info(ClientId, ChanInfo1#{sockinfo => SockInfo}), reply(ok, reset_timer(alive_timer, NChannel)); +handle_call(get_mqueue, Channel) -> + reply({ok, get_mqueue(Channel)}, Channel); handle_call(Req, Channel) -> ?SLOG(error, #{msg => "unexpected_call", call => Req}), reply(ignored, Channel). @@ -1253,14 +1261,11 @@ handle_info( {sock_closed, Reason}, Channel = #channel{ - conn_state = ConnState, - clientinfo = ClientInfo = #{zone := Zone} + conn_state = ConnState } ) when ConnState =:= connected orelse ConnState =:= reauthenticating -> - emqx_config:get_zone_conf(Zone, [flapping_detect, enable]) andalso - emqx_flapping:detect(ClientInfo), Channel1 = ensure_disconnected(Reason, maybe_publish_will_msg(Channel)), case maybe_shutdown(Reason, Channel1) of {ok, Channel2} -> {ok, {event, disconnected}, Channel2}; @@ -1629,6 +1634,14 @@ check_banned(_ConnPkt, #channel{clientinfo = ClientInfo}) -> false -> ok end. +%%-------------------------------------------------------------------- +%% Flapping + +count_flapping_event(_ConnPkt, Channel = #channel{clientinfo = ClientInfo = #{zone := Zone}}) -> + emqx_config:get_zone_conf(Zone, [flapping_detect, enable]) andalso + emqx_flapping:detect(ClientInfo), + {ok, Channel}. + %%-------------------------------------------------------------------- %% Authenticate @@ -2085,7 +2098,7 @@ parse_topic_filters(TopicFilters) -> lists:map(fun emqx_topic:parse/1, TopicFilters). %%-------------------------------------------------------------------- -%% Ensure disconnected +%% Maybe & Ensure disconnected ensure_disconnected( Reason, @@ -2130,11 +2143,7 @@ publish_will_msg(ClientInfo, Msg = #message{topic = Topic}) -> ?tp( warning, last_will_testament_publish_denied, - #{ - client_info => ClientInfo, - topic => Topic, - message => Msg - } + #{topic => Topic} ), ok end. @@ -2196,6 +2205,7 @@ shutdown(success, Reply, Packet, Channel) -> shutdown(Reason, Reply, Packet, Channel) -> {shutdown, Reason, Reply, Packet, Channel}. +%% mqtt v5 connected sessions disconnect_and_shutdown( Reason, Reply, @@ -2205,9 +2215,12 @@ disconnect_and_shutdown( ) when ConnState =:= connected orelse ConnState =:= reauthenticating -> - shutdown(Reason, Reply, ?DISCONNECT_PACKET(reason_code(Reason)), Channel); + NChannel = ensure_disconnected(Reason, Channel), + shutdown(Reason, Reply, ?DISCONNECT_PACKET(reason_code(Reason)), NChannel); +%% mqtt v3/v4 sessions, mqtt v5 other conn_state sessions disconnect_and_shutdown(Reason, Reply, Channel) -> - shutdown(Reason, Reply, Channel). + NChannel = ensure_disconnected(Reason, Channel), + shutdown(Reason, Reply, NChannel). sp(true) -> 1; sp(false) -> 0. @@ -2228,3 +2241,6 @@ get_mqtt_conf(Zone, Key, Default) -> set_field(Name, Value, Channel) -> Pos = emqx_misc:index_of(Name, record_info(fields, channel)), setelement(Pos + 1, Channel, Value). + +get_mqueue(#channel{session = Session}) -> + emqx_session:get_mqueue(Session). diff --git a/apps/emqx/src/emqx_config.erl b/apps/emqx/src/emqx_config.erl index 4ab7caf77..fa1c63868 100644 --- a/apps/emqx/src/emqx_config.erl +++ b/apps/emqx/src/emqx_config.erl @@ -414,9 +414,9 @@ check_config(SchemaMod, RawConf) -> check_config(SchemaMod, RawConf, Opts0) -> Opts1 = #{ return_plain => true, - %% TODO: evil, remove, required should be declared in schema - required => false, - format => map + format => map, + %% Don't check lazy types, such as authenticate + check_lazy => false }, Opts = maps:merge(Opts0, Opts1), {AppEnvs, CheckedConf} = diff --git a/apps/emqx/src/emqx_listeners.erl b/apps/emqx/src/emqx_listeners.erl index 67f452e1d..a0f2b1e7d 100644 --- a/apps/emqx/src/emqx_listeners.erl +++ b/apps/emqx/src/emqx_listeners.erl @@ -49,7 +49,8 @@ -export([ listener_id/2, parse_listener_id/1, - ensure_override_limiter_conf/2 + ensure_override_limiter_conf/2, + esockd_access_rules/1 ]). -export([pre_config_update/3, post_config_update/5]). @@ -497,17 +498,28 @@ ip_port({Addr, Port}) -> [{ip, Addr}, {port, Port}]. esockd_access_rules(StrRules) -> - Access = fun(S) -> + Access = fun(S, Acc) -> [A, CIDR] = string:tokens(S, " "), - { - list_to_atom(A), - case CIDR of - "all" -> all; - _ -> CIDR - end - } + %% esockd rules only use words 'allow' and 'deny', both are existing + %% comparison of strings may be better, but there is a loss of backward compatibility + case emqx_misc:safe_to_existing_atom(A) of + {ok, Action} -> + [ + { + Action, + case CIDR of + "all" -> all; + _ -> CIDR + end + } + | Acc + ]; + _ -> + ?SLOG(warning, #{msg => "invalid esockd access rule", rule => S}), + Acc + end end, - [Access(R) || R <- StrRules]. + lists:foldr(Access, [], StrRules). merge_default(Options) -> case lists:keytake(tcp_options, 1, Options) of @@ -521,12 +533,16 @@ merge_default(Options) -> integer() | {tuple(), integer()} | string() | binary() ) -> io_lib:chars(). format_bind(Port) when is_integer(Port) -> + %% **Note**: + %% 'For TCP, UDP and IP networks, if the host is empty or a literal + %% unspecified IP address, as in ":80", "0.0.0.0:80" or "[::]:80" for + %% TCP and UDP, "", "0.0.0.0" or "::" for IP, the local system is + %% assumed.' + %% + %% Quoted from: https://pkg.go.dev/net + %% Decided to use this format to display the bind for all interfaces and + %% IPv4/IPv6 support io_lib:format(":~w", [Port]); -%% Print only the port number when bound on all interfaces -format_bind({{0, 0, 0, 0}, Port}) -> - format_bind(Port); -format_bind({{0, 0, 0, 0, 0, 0, 0, 0}, Port}) -> - format_bind(Port); format_bind({Addr, Port}) when is_list(Addr) -> io_lib:format("~ts:~w", [Addr, Port]); format_bind({Addr, Port}) when is_tuple(Addr), tuple_size(Addr) == 4 -> @@ -538,6 +554,8 @@ format_bind(Str) when is_list(Str) -> case emqx_schema:to_ip_port(Str) of {ok, {Ip, Port}} -> format_bind({Ip, Port}); + {ok, Port} -> + format_bind(Port); {error, _} -> format_bind(list_to_integer(Str)) end; diff --git a/apps/emqx/src/emqx_message.erl b/apps/emqx/src/emqx_message.erl index ae74a614b..03f7ca6a2 100644 --- a/apps/emqx/src/emqx_message.erl +++ b/apps/emqx/src/emqx_message.erl @@ -74,7 +74,8 @@ to_map/1, to_log_map/1, to_list/1, - from_map/1 + from_map/1, + estimate_size/1 ]). -export_type([message_map/0]). @@ -175,6 +176,18 @@ make(MsgId, From, QoS, Topic, Payload, Flags, Headers) when timestamp = Now }. +%% optimistic esitmation of a message size after serialization +%% not including MQTT v5 message headers/user properties etc. +-spec estimate_size(emqx_types:message()) -> non_neg_integer(). +estimate_size(#message{topic = Topic, payload = Payload}) -> + FixedHeaderSize = 1, + VarLenSize = 4, + TopicSize = iolist_size(Topic), + PayloadSize = iolist_size(Payload), + PacketIdSize = 2, + TopicLengthSize = 2, + FixedHeaderSize + VarLenSize + TopicLengthSize + TopicSize + PacketIdSize + PayloadSize. + -spec id(emqx_types:message()) -> maybe(binary()). id(#message{id = Id}) -> Id. diff --git a/apps/emqx/src/emqx_misc.erl b/apps/emqx/src/emqx_misc.erl index 6833cbabb..674465e6a 100644 --- a/apps/emqx/src/emqx_misc.erl +++ b/apps/emqx/src/emqx_misc.erl @@ -52,7 +52,9 @@ explain_posix/1, pmap/2, pmap/3, - readable_error_msg/1 + readable_error_msg/1, + safe_to_existing_atom/1, + safe_to_existing_atom/2 ]). -export([ @@ -463,6 +465,18 @@ nolink_apply(Fun, Timeout) when is_function(Fun, 0) -> exit(timeout) end. +safe_to_existing_atom(In) -> + safe_to_existing_atom(In, utf8). + +safe_to_existing_atom(Bin, Encoding) when is_binary(Bin) -> + try_to_existing_atom(fun erlang:binary_to_existing_atom/2, Bin, Encoding); +safe_to_existing_atom(List, Encoding) when is_list(List) -> + try_to_existing_atom(fun(In, _) -> erlang:list_to_existing_atom(In) end, List, Encoding); +safe_to_existing_atom(Atom, _Encoding) when is_atom(Atom) -> + {ok, Atom}; +safe_to_existing_atom(_Any, _Encoding) -> + {error, invalid_type}. + %%------------------------------------------------------------------------------ %% Internal Functions %%------------------------------------------------------------------------------ @@ -533,6 +547,14 @@ readable_error_msg(Error) -> end end. +try_to_existing_atom(Convert, Data, Encoding) -> + try Convert(Data, Encoding) of + Atom -> + {ok, Atom} + catch + _:Reason -> {error, Reason} + end. + -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). diff --git a/apps/emqx/src/emqx_mqueue.erl b/apps/emqx/src/emqx_mqueue.erl index f8556bc39..ae55d4b6e 100644 --- a/apps/emqx/src/emqx_mqueue.erl +++ b/apps/emqx/src/emqx_mqueue.erl @@ -66,7 +66,8 @@ in/2, out/1, stats/1, - dropped/1 + dropped/1, + to_list/1 ]). -define(NO_PRIORITY_TABLE, disabled). @@ -109,7 +110,7 @@ dropped = 0 :: count(), p_table = ?NO_PRIORITY_TABLE :: p_table(), default_p = ?LOWEST_PRIORITY :: priority(), - q = ?PQUEUE:new() :: pq(), + q = emqx_pqueue:new() :: pq(), shift_opts :: #shift_opts{}, last_prio :: non_neg_integer() | undefined, p_credit :: non_neg_integer() | undefined @@ -118,7 +119,7 @@ -type mqueue() :: #mqueue{}. -spec init(options()) -> mqueue(). -init(Opts = #{max_len := MaxLen0, store_qos0 := QoS_0}) -> +init(Opts = #{max_len := MaxLen0, store_qos0 := Qos0}) -> MaxLen = case (is_integer(MaxLen0) andalso MaxLen0 > ?MAX_LEN_INFINITY) of true -> MaxLen0; @@ -126,7 +127,7 @@ init(Opts = #{max_len := MaxLen0, store_qos0 := QoS_0}) -> end, #mqueue{ max_len = MaxLen, - store_qos0 = QoS_0, + store_qos0 = Qos0, p_table = get_opt(priorities, Opts, ?NO_PRIORITY_TABLE), default_p = get_priority_opt(Opts), shift_opts = get_shift_opt(Opts) @@ -152,6 +153,19 @@ len(#mqueue{len = Len}) -> Len. max_len(#mqueue{max_len = MaxLen}) -> MaxLen. +%% @doc Return all queued items in a list. +-spec to_list(mqueue()) -> list(). +to_list(MQ) -> + to_list(MQ, []). + +to_list(MQ, Acc) -> + case out(MQ) of + {empty, _MQ} -> + lists:reverse(Acc); + {{value, Msg}, Q1} -> + to_list(Q1, [Msg | Acc]) + end. + %% @doc Return number of dropped messages. -spec dropped(mqueue()) -> count(). dropped(#mqueue{dropped = Dropped}) -> Dropped. diff --git a/apps/emqx/src/emqx_rpc.erl b/apps/emqx/src/emqx_rpc.erl index 939b5395d..961bfd5d6 100644 --- a/apps/emqx/src/emqx_rpc.erl +++ b/apps/emqx/src/emqx_rpc.erl @@ -124,7 +124,10 @@ filter_result(Delivery) -> max_client_num() -> emqx:get_config([rpc, tcp_client_num], ?DefaultClientNum). --spec unwrap_erpc(emqx_rpc:erpc(A)) -> A | {error, _Err}. +-spec unwrap_erpc(emqx_rpc:erpc(A) | [emqx_rpc:erpc(A)]) -> A | {error, _Err} | list(). + +unwrap_erpc(Res) when is_list(Res) -> + [unwrap_erpc(R) || R <- Res]; unwrap_erpc({ok, A}) -> A; unwrap_erpc({throw, A}) -> diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl index 36687b444..7e30b4fe4 100644 --- a/apps/emqx/src/emqx_schema.erl +++ b/apps/emqx/src/emqx_schema.erl @@ -39,7 +39,8 @@ -type comma_separated_binary() :: [binary()]. -type comma_separated_atoms() :: [atom()]. -type bar_separated_list() :: list(). --type ip_port() :: tuple(). +-type ip_port() :: tuple() | integer(). +-type host_port() :: tuple(). -type cipher() :: map(). -typerefl_from_string({duration/0, emqx_schema, to_duration}). @@ -52,6 +53,7 @@ -typerefl_from_string({comma_separated_binary/0, emqx_schema, to_comma_separated_binary}). -typerefl_from_string({bar_separated_list/0, emqx_schema, to_bar_separated_list}). -typerefl_from_string({ip_port/0, emqx_schema, to_ip_port}). +-typerefl_from_string({host_port/0, emqx_schema, to_host_port}). -typerefl_from_string({cipher/0, emqx_schema, to_erl_cipher_suite}). -typerefl_from_string({comma_separated_atoms/0, emqx_schema, to_comma_separated_atoms}). @@ -78,6 +80,7 @@ to_comma_separated_binary/1, to_bar_separated_list/1, to_ip_port/1, + to_host_port/1, to_erl_cipher_suite/1, to_comma_separated_atoms/1 ]). @@ -96,6 +99,7 @@ comma_separated_binary/0, bar_separated_list/0, ip_port/0, + host_port/0, cipher/0, comma_separated_atoms/0 ]). @@ -1686,7 +1690,7 @@ desc("stats") -> desc("authorization") -> "Settings for client authorization."; desc("mqtt") -> - "Global MQTT configuration.
\n" + "Global MQTT configuration.
" "The configs here work as default values which can be overridden\n" "in zone configs"; desc("cache") -> @@ -2104,11 +2108,11 @@ ref(Module, StructName) -> hoconsc:ref(Module, StructName). mk_duration(Desc, OverrideMeta) -> DefaultMeta = #{ desc => Desc ++ - " Time interval is a string that contains a number followed by time unit:
\n" + " Time interval is a string that contains a number followed by time unit:
" "- `ms` for milliseconds,\n" "- `s` for seconds,\n" "- `m` for minutes,\n" - "- `h` for hours;\n
" + "- `h` for hours;\n
" "or combination of whereof: `1h5m0s`" }, hoconsc:mk(typerefl:alias("string", duration()), maps:merge(DefaultMeta, OverrideMeta)). @@ -2168,33 +2172,60 @@ to_bar_separated_list(Str) -> %% - :1883 %% - :::1883 to_ip_port(Str) -> - case split_ip_port(Str) of - {"", Port} -> - {ok, {{0, 0, 0, 0}, list_to_integer(Port)}}; - {Ip, Port} -> + to_host_port(Str, ip_addr). + +%% @doc support the following format: +%% - 127.0.0.1:1883 +%% - ::1:1883 +%% - [::1]:1883 +%% - :1883 +%% - :::1883 +%% - example.com:80 +to_host_port(Str) -> + to_host_port(Str, hostname). + +%% - example.com:80 +to_host_port(Str, IpOrHost) -> + case split_host_port(Str) of + {"", Port} when IpOrHost =:= ip_addr -> + %% this is a local address + {ok, list_to_integer(Port)}; + {"", _Port} -> + %% must specify host part when it's a remote endpoint + {error, bad_host_port}; + {MaybeIp, Port} -> PortVal = list_to_integer(Port), - case inet:parse_address(Ip) of - {ok, R} -> - {ok, {R, PortVal}}; - _ -> + case inet:parse_address(MaybeIp) of + {ok, IpTuple} -> + {ok, {IpTuple, PortVal}}; + _ when IpOrHost =:= hostname -> %% check is a rfc1035's hostname - case inet_parse:domain(Ip) of + case inet_parse:domain(MaybeIp) of true -> - {ok, {Ip, PortVal}}; + {ok, {MaybeIp, PortVal}}; _ -> - {error, Str} - end + {error, bad_hostname} + end; + _ -> + {error, bad_ip_port} end; _ -> - {error, Str} + {error, bad_ip_port} end. -split_ip_port(Str0) -> +split_host_port(Str0) -> Str = re:replace(Str0, " ", "", [{return, list}, global]), case lists:split(string:rchr(Str, $:), Str) of - %% no port + %% no colon {[], Str} -> - error; + try + %% if it's just a port number, then return as-is + _ = list_to_integer(Str), + {"", Str} + catch + _:_ -> + error + end; {IpPlusColon, PortString} -> IpStr0 = lists:droplast(IpPlusColon), case IpStr0 of @@ -2246,6 +2277,7 @@ validate_alarm_actions(Actions) -> Error -> {error, Error} end. +parse_user_lookup_fun({Fun, _} = Lookup) when is_function(Fun, 3) -> Lookup; parse_user_lookup_fun(StrConf) -> [ModStr, FunStr] = string:tokens(str(StrConf), ": "), Mod = list_to_atom(ModStr), diff --git a/apps/emqx/src/emqx_session.erl b/apps/emqx/src/emqx_session.erl index 8ce8a1802..5e98ebbeb 100644 --- a/apps/emqx/src/emqx_session.erl +++ b/apps/emqx/src/emqx_session.erl @@ -47,6 +47,7 @@ -include("emqx_mqtt.hrl"). -include("logger.hrl"). -include("types.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). -ifdef(TEST). -compile(export_all). @@ -60,7 +61,8 @@ info/2, is_session/1, stats/1, - obtain_next_pkt_id/1 + obtain_next_pkt_id/1, + get_mqueue/1 ]). -export([ @@ -801,7 +803,7 @@ replay(ClientInfo, Session = #session{inflight = Inflight}) -> -spec terminate(emqx_types:clientinfo(), Reason :: term(), session()) -> ok. terminate(ClientInfo, Reason, Session) -> run_terminate_hooks(ClientInfo, Reason, Session), - redispatch_shared_messages(Session), + maybe_redispatch_shared_messages(Reason, Session), ok. run_terminate_hooks(ClientInfo, discarded, Session) -> @@ -811,29 +813,27 @@ run_terminate_hooks(ClientInfo, takenover, Session) -> run_terminate_hooks(ClientInfo, Reason, Session) -> run_hook('session.terminated', [ClientInfo, Reason, info(Session)]). -redispatch_shared_messages(#session{inflight = Inflight}) -> - InflightList = emqx_inflight:to_list(Inflight), - lists:foreach( - fun - %% Only QoS1 messages get redispatched, because QoS2 messages - %% must be sent to the same client, once they're in flight - ({_, #inflight_data{message = #message{qos = ?QOS_2} = Msg}}) -> - ?SLOG(warning, #{msg => qos2_lost_no_redispatch}, #{message => Msg}); - ({_, #inflight_data{message = #message{topic = Topic, qos = ?QOS_1} = Msg}}) -> - case emqx_shared_sub:get_group(Msg) of - {ok, Group} -> - %% Note that dispatch is called with self() in failed subs - %% This is done to avoid dispatching back to caller - Delivery = #delivery{sender = self(), message = Msg}, - emqx_shared_sub:dispatch(Group, Topic, Delivery, [self()]); - _ -> - false - end; - (_) -> - ok - end, - InflightList - ). +maybe_redispatch_shared_messages(takenover, _Session) -> + ok; +maybe_redispatch_shared_messages(kicked, _Session) -> + ok; +maybe_redispatch_shared_messages(_Reason, Session) -> + redispatch_shared_messages(Session). + +redispatch_shared_messages(#session{inflight = Inflight, mqueue = Q}) -> + AllInflights = emqx_inflight:to_list(fun sort_fun/2, Inflight), + F = fun + ({_PacketId, #inflight_data{message = #message{qos = ?QOS_1} = Msg}}) -> + %% For QoS 2, here is what the spec says: + %% If the Client's Session terminates before the Client reconnects, + %% the Server MUST NOT send the Application Message to any other + %% subscribed Client [MQTT-4.8.2-5]. + {true, Msg}; + ({_PacketId, #inflight_data{}}) -> + false + end, + InflightList = lists:filtermap(F, AllInflights), + emqx_shared_sub:redispatch(InflightList ++ emqx_mqueue:to_list(Q)). -compile({inline, [run_hook/2]}). run_hook(Name, Args) -> @@ -925,3 +925,6 @@ age(Now, Ts) -> Now - Ts. set_field(Name, Value, Session) -> Pos = emqx_misc:index_of(Name, record_info(fields, session)), setelement(Pos + 1, Session, Value). + +get_mqueue(#session{mqueue = Q}) -> + emqx_mqueue:to_list(Q). diff --git a/apps/emqx/src/emqx_shared_sub.erl b/apps/emqx/src/emqx_shared_sub.erl index 0527cbfe7..975b403b9 100644 --- a/apps/emqx/src/emqx_shared_sub.erl +++ b/apps/emqx/src/emqx_shared_sub.erl @@ -39,15 +39,15 @@ -export([ dispatch/3, dispatch/4, - do_dispatch_with_ack/4 + do_dispatch_with_ack/4, + redispatch/1 ]). -export([ maybe_ack/1, maybe_nack_dropped/1, nack_no_connection/1, - is_ack_required/1, - get_group/1 + is_ack_required/1 ]). %% for testing @@ -96,6 +96,9 @@ -define(ACK, shared_sub_ack). -define(NACK(Reason), {shared_sub_nack, Reason}). -define(NO_ACK, no_ack). +-define(REDISPATCH_TO(GROUP, TOPIC), {GROUP, TOPIC}). + +-type redispatch_to() :: ?REDISPATCH_TO(emqx_topic:group(), emqx_topic:topic()). -record(state, {pmon}). @@ -144,7 +147,8 @@ dispatch(Group, Topic, Delivery = #delivery{message = Msg}, FailedSubs) -> false -> {error, no_subscribers}; {Type, SubPid} -> - case do_dispatch(SubPid, Group, Topic, Msg, Type) of + Msg1 = with_redispatch_to(Msg, Group, Topic), + case do_dispatch(SubPid, Group, Topic, Msg1, Type) of ok -> {ok, 1}; {error, _Reason} -> @@ -223,16 +227,53 @@ without_group_ack(Msg) -> get_group_ack(Msg) -> emqx_message:get_header(shared_dispatch_ack, Msg, ?NO_ACK). +with_redispatch_to(#message{qos = ?QOS_0} = Msg, _Group, _Topic) -> + Msg; +with_redispatch_to(Msg, Group, Topic) -> + emqx_message:set_headers(#{redispatch_to => ?REDISPATCH_TO(Group, Topic)}, Msg). + +%% @hidden Redispatch is neede only for the messages with redispatch_to header added. +is_redispatch_needed(#message{} = Msg) -> + case get_redispatch_to(Msg) of + ?REDISPATCH_TO(_, _) -> + true; + _ -> + false + end. + +%% @doc Redispatch shared deliveries to other members in the group. +redispatch(Messages0) -> + Messages = lists:filter(fun is_redispatch_needed/1, Messages0), + case length(Messages) of + L when L > 0 -> + ?SLOG(info, #{ + msg => "redispatching_shared_subscription_message", + count => L + }), + lists:foreach(fun redispatch_shared_message/1, Messages); + _ -> + ok + end. + +redispatch_shared_message(#message{} = Msg) -> + %% As long as it's still a #message{} record in inflight, + %% we should try to re-dispatch + ?REDISPATCH_TO(Group, Topic) = get_redispatch_to(Msg), + %% Note that dispatch is called with self() in failed subs + %% This is done to avoid dispatching back to caller + Delivery = #delivery{sender = self(), message = Msg}, + dispatch(Group, Topic, Delivery, [self()]). + +%% @hidden Return the `redispatch_to` group-topic in the message header. +%% `false` is returned if the message is not a shared dispatch. +%% or when it's a QoS 0 message. +-spec get_redispatch_to(emqx_types:message()) -> redispatch_to() | false. +get_redispatch_to(Msg) -> + emqx_message:get_header(redispatch_to, Msg, false). + -spec is_ack_required(emqx_types:message()) -> boolean(). is_ack_required(Msg) -> ?NO_ACK =/= get_group_ack(Msg). --spec get_group(emqx_types:message()) -> {ok, any()} | error. -get_group(Msg) -> - case get_group_ack(Msg) of - ?NO_ACK -> error; - {Group, _Sender, _Ref} -> {ok, Group} - end. - %% @doc Negative ack dropped message due to inflight window or message queue being full. -spec maybe_nack_dropped(emqx_types:message()) -> boolean(). maybe_nack_dropped(Msg) -> diff --git a/apps/emqx/src/emqx_trace/emqx_trace.erl b/apps/emqx/src/emqx_trace/emqx_trace.erl index e9866fb16..65756fc2f 100644 --- a/apps/emqx/src/emqx_trace/emqx_trace.erl +++ b/apps/emqx/src/emqx_trace/emqx_trace.erl @@ -19,6 +19,7 @@ -include_lib("emqx/include/emqx.hrl"). -include_lib("emqx/include/logger.hrl"). +-include_lib("kernel/include/file.hrl"). -include_lib("snabbkaffe/include/trace.hrl"). -export([ @@ -46,6 +47,7 @@ filename/2, trace_dir/0, trace_file/1, + trace_file_detail/1, delete_files_after_send/2 ]). @@ -193,6 +195,16 @@ trace_file(File) -> {error, Reason} -> {error, Node, Reason} end. +trace_file_detail(File) -> + FileName = filename:join(trace_dir(), File), + Node = atom_to_binary(node()), + case file:read_file_info(FileName, [{'time', 'posix'}]) of + {ok, #file_info{size = Size, mtime = Mtime}} -> + {ok, #{size => Size, mtime => Mtime, node => Node}}; + {error, Reason} -> + {error, #{reason => Reason, node => Node, file => File}} + end. + delete_files_after_send(TraceLog, Zips) -> gen_server:cast(?MODULE, {delete_tag, self(), [TraceLog | Zips]}). diff --git a/apps/emqx/test/emqx_access_control_SUITE.erl b/apps/emqx/test/emqx_access_control_SUITE.erl index 23c43fa65..ee594ec0a 100644 --- a/apps/emqx/test/emqx_access_control_SUITE.erl +++ b/apps/emqx/test/emqx_access_control_SUITE.erl @@ -32,6 +32,12 @@ init_per_suite(Config) -> end_per_suite(_Config) -> emqx_common_test_helpers:stop_apps([]). +end_per_testcase(t_delayed_authorize, Config) -> + meck:unload(emqx_access_control), + Config; +end_per_testcase(_, Config) -> + Config. + t_authenticate(_) -> ?assertMatch({ok, _}, emqx_access_control:authenticate(clientinfo())). @@ -39,6 +45,28 @@ t_authorize(_) -> Publish = ?PUBLISH_PACKET(?QOS_0, <<"t">>, 1, <<"payload">>), ?assertEqual(allow, emqx_access_control:authorize(clientinfo(), Publish, <<"t">>)). +t_delayed_authorize(_) -> + RawTopic = "$dealyed/1/foo/2", + InvalidTopic = "$dealyed/1/foo/3", + Topic = "foo/2", + + ok = meck:new(emqx_access_control, [passthrough, no_history, no_link]), + ok = meck:expect( + emqx_access_control, + do_authorize, + fun + (_, _, Topic) -> allow; + (_, _, _) -> deny + end + ), + + Publish1 = ?PUBLISH_PACKET(?QOS_0, RawTopic, 1, <<"payload">>), + ?assertEqual(allow, emqx_access_control:authorize(clientinfo(), Publish1, RawTopic)), + + Publish2 = ?PUBLISH_PACKET(?QOS_0, InvalidTopic, 1, <<"payload">>), + ?assertEqual(allow, emqx_access_control:authorize(clientinfo(), Publish2, InvalidTopic)), + ok. + %%-------------------------------------------------------------------- %% Helper functions %%-------------------------------------------------------------------- diff --git a/apps/emqx/test/emqx_channel_SUITE.erl b/apps/emqx/test/emqx_channel_SUITE.erl index df1720772..a3fa3e5bc 100644 --- a/apps/emqx/test/emqx_channel_SUITE.erl +++ b/apps/emqx/test/emqx_channel_SUITE.erl @@ -207,14 +207,6 @@ init_per_suite(Config) -> ok = meck:new(emqx_cm, [passthrough, no_history, no_link]), ok = meck:expect(emqx_cm, mark_channel_connected, fun(_) -> ok end), ok = meck:expect(emqx_cm, mark_channel_disconnected, fun(_) -> ok end), - %% Access Control Meck - ok = meck:new(emqx_access_control, [passthrough, no_history, no_link]), - ok = meck:expect( - emqx_access_control, - authenticate, - fun(_) -> {ok, #{is_superuser => false}} end - ), - ok = meck:expect(emqx_access_control, authorize, fun(_, _, _) -> allow end), %% Broker Meck ok = meck:new(emqx_broker, [passthrough, no_history, no_link]), %% Hooks Meck @@ -234,7 +226,6 @@ init_per_suite(Config) -> end_per_suite(_Config) -> meck:unload([ - emqx_access_control, emqx_metrics, emqx_session, emqx_broker, @@ -244,11 +235,21 @@ end_per_suite(_Config) -> ]). init_per_testcase(_TestCase, Config) -> + %% Access Control Meck + ok = meck:new(emqx_access_control, [passthrough, no_history, no_link]), + ok = meck:expect( + emqx_access_control, + authenticate, + fun(_) -> {ok, #{is_superuser => false}} end + ), + ok = meck:expect(emqx_access_control, authorize, fun(_, _, _) -> allow end), + %% Set confs OldConf = set_test_listener_confs(), emqx_common_test_helpers:start_apps([]), [{config, OldConf} | Config]. end_per_testcase(_TestCase, Config) -> + meck:unload([emqx_access_control]), emqx_config:put(?config(config, Config)), emqx_common_test_helpers:stop_apps([]), Config. @@ -1115,6 +1116,32 @@ t_ws_cookie_init(_) -> ), ?assertMatch(#{ws_cookie := WsCookie}, emqx_channel:info(clientinfo, Channel)). +%%-------------------------------------------------------------------- +%% Test cases for other mechnisms +%%-------------------------------------------------------------------- + +t_flapping_detect(_) -> + emqx_config:put_zone_conf(default, [flapping_detect, enable], true), + Parent = self(), + ok = meck:expect( + emqx_cm, + open_session, + fun(true, _ClientInfo, _ConnInfo) -> + {ok, #{session => session(), present => false}} + end + ), + ok = meck:expect(emqx_access_control, authenticate, fun(_) -> {error, not_authorized} end), + ok = meck:expect(emqx_flapping, detect, fun(_) -> Parent ! flapping_detect end), + IdleChannel = channel(#{conn_state => idle}), + {shutdown, not_authorized, _ConnAck, _Channel} = + emqx_channel:handle_in(?CONNECT_PACKET(connpkt()), IdleChannel), + receive + flapping_detect -> ok + after 2000 -> + ?assert(false, "Flapping detect should be exected in connecting progress") + end, + meck:unload([emqx_flapping]). + %%-------------------------------------------------------------------- %% Helper functions %%-------------------------------------------------------------------- diff --git a/apps/emqx/test/emqx_listeners_SUITE.erl b/apps/emqx/test/emqx_listeners_SUITE.erl index 6ea4d043d..5eb216be5 100644 --- a/apps/emqx/test/emqx_listeners_SUITE.erl +++ b/apps/emqx/test/emqx_listeners_SUITE.erl @@ -148,6 +148,32 @@ t_wss_conn(_) -> {ok, Socket} = ssl:connect({127, 0, 0, 1}, 9998, [{verify, verify_none}], 1000), ok = ssl:close(Socket). +t_format_bind(_) -> + ?assertEqual( + ":1883", + lists:flatten(emqx_listeners:format_bind(1883)) + ), + ?assertEqual( + "0.0.0.0:1883", + lists:flatten(emqx_listeners:format_bind({{0, 0, 0, 0}, 1883})) + ), + ?assertEqual( + "[::]:1883", + lists:flatten(emqx_listeners:format_bind({{0, 0, 0, 0, 0, 0, 0, 0}, 1883})) + ), + ?assertEqual( + "127.0.0.1:1883", + lists:flatten(emqx_listeners:format_bind({{127, 0, 0, 1}, 1883})) + ), + ?assertEqual( + ":1883", + lists:flatten(emqx_listeners:format_bind("1883")) + ), + ?assertEqual( + ":1883", + lists:flatten(emqx_listeners:format_bind(":1883")) + ). + render_config_file() -> Path = local_path(["etc", "emqx.conf"]), {ok, Temp} = file:read_file(Path), diff --git a/apps/emqx/test/emqx_schema_tests.erl b/apps/emqx/test/emqx_schema_tests.erl index 9118ac226..fdda9ef44 100644 --- a/apps/emqx/test/emqx_schema_tests.erl +++ b/apps/emqx/test/emqx_schema_tests.erl @@ -175,3 +175,30 @@ ssl_opts_gc_after_handshake_test_not_rancher_listener_test() -> Checked ), ok. + +to_ip_port_test_() -> + Ip = fun emqx_schema:to_ip_port/1, + Host = fun(Str) -> + case Ip(Str) of + {ok, {_, _} = Res} -> + %% assert + {ok, Res} = emqx_schema:to_host_port(Str); + _ -> + emqx_schema:to_host_port(Str) + end + end, + [ + ?_assertEqual({ok, 80}, Ip("80")), + ?_assertEqual({error, bad_host_port}, Host("80")), + ?_assertEqual({ok, 80}, Ip(":80")), + ?_assertEqual({error, bad_host_port}, Host(":80")), + ?_assertEqual({error, bad_ip_port}, Ip("localhost:80")), + ?_assertEqual({ok, {"localhost", 80}}, Host("localhost:80")), + ?_assertEqual({ok, {"example.com", 80}}, Host("example.com:80")), + ?_assertEqual({ok, {{127, 0, 0, 1}, 80}}, Ip("127.0.0.1:80")), + ?_assertEqual({error, bad_ip_port}, Ip("$:1900")), + ?_assertEqual({error, bad_hostname}, Host("$:1900")), + ?_assertMatch({ok, {_, 1883}}, Ip("[::1]:1883")), + ?_assertMatch({ok, {_, 1883}}, Ip("::1:1883")), + ?_assertMatch({ok, {_, 1883}}, Ip(":::1883")) + ]. diff --git a/apps/emqx/test/emqx_shared_sub_SUITE.erl b/apps/emqx/test/emqx_shared_sub_SUITE.erl index f53e8f374..7c38fdf17 100644 --- a/apps/emqx/test/emqx_shared_sub_SUITE.erl +++ b/apps/emqx/test/emqx_shared_sub_SUITE.erl @@ -22,13 +22,24 @@ -include_lib("emqx/include/emqx.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). -define(SUITE, ?MODULE). --define(wait(For, Timeout), - emqx_common_test_helpers:wait_for( - ?FUNCTION_NAME, ?LINE, fun() -> For end, Timeout - ) +-define(WAIT(TIMEOUT, PATTERN, Res), + (fun() -> + receive + PATTERN -> + Res; + Other -> + ct:fail(#{ + expected => ??PATTERN, + got => Other + }) + after TIMEOUT -> + ct:fail({timeout, ??PATTERN}) + end + end)() ). -define(ack, shared_sub_ack). @@ -45,10 +56,26 @@ init_per_suite(Config) -> end_per_suite(_Config) -> emqx_common_test_helpers:stop_apps([]). -t_is_ack_required(_) -> +init_per_testcase(Case, Config) -> + try + ?MODULE:Case({'init', Config}) + catch + error:function_clause -> + Config + end. + +end_per_testcase(Case, Config) -> + try + ?MODULE:Case({'end', Config}) + catch + error:function_clause -> + ok + end. + +t_is_ack_required(Config) when is_list(Config) -> ?assertEqual(false, emqx_shared_sub:is_ack_required(#message{headers = #{}})). -t_maybe_nack_dropped(_) -> +t_maybe_nack_dropped(Config) when is_list(Config) -> ?assertEqual(false, emqx_shared_sub:maybe_nack_dropped(#message{headers = #{}})), Msg = #message{headers = #{shared_dispatch_ack => {<<"group">>, self(), for_test}}}, ?assertEqual(true, emqx_shared_sub:maybe_nack_dropped(Msg)), @@ -60,7 +87,7 @@ t_maybe_nack_dropped(_) -> end ). -t_nack_no_connection(_) -> +t_nack_no_connection(Config) when is_list(Config) -> Msg = #message{headers = #{shared_dispatch_ack => {<<"group">>, self(), for_test}}}, ?assertEqual(ok, emqx_shared_sub:nack_no_connection(Msg)), ?assertEqual( @@ -71,7 +98,7 @@ t_nack_no_connection(_) -> end ). -t_maybe_ack(_) -> +t_maybe_ack(Config) when is_list(Config) -> ?assertEqual(#message{headers = #{}}, emqx_shared_sub:maybe_ack(#message{headers = #{}})), Msg = #message{headers = #{shared_dispatch_ack => {<<"group">>, self(), for_test}}}, ?assertEqual( @@ -86,10 +113,7 @@ t_maybe_ack(_) -> end ). -% t_subscribers(_) -> -% error('TODO'). - -t_random_basic(_) -> +t_random_basic(Config) when is_list(Config) -> ok = ensure_config(random), ClientId = <<"ClientId">>, Topic = <<"foo">>, @@ -121,7 +145,7 @@ t_random_basic(_) -> %% After the connection for the 2nd session is also closed, %% i.e. when all clients are offline, the following message(s) %% should be delivered randomly. -t_no_connection_nack(_) -> +t_no_connection_nack(Config) when is_list(Config) -> ok = ensure_config(sticky), Publisher = <<"publisher">>, Subscriber1 = <<"Subscriber1">>, @@ -153,54 +177,22 @@ t_no_connection_nack(_) -> %% This is the connection which was picked by broker to dispatch (sticky) for 1st message ?assertMatch([#{packet_id := 1}], recv_msgs(1)), - %% Now kill the connection, expect all following messages to be delivered to the other - %% subscriber. - %emqx_mock_client:stop(ConnPid), - %% sleep then make synced calls to session processes to ensure that - %% the connection pid's 'EXIT' message is propagated to the session process - %% also to be sure sessions are still alive - % timer:sleep(2), - % _ = emqx_session:info(SPid1), - % _ = emqx_session:info(SPid2), - % %% Now we know what is the other still alive connection - % [TheOtherConnPid] = [SubConnPid1, SubConnPid2] -- [ConnPid], - % %% Send some more messages - % PacketIdList = lists:seq(2, 10), - % lists:foreach(fun(Id) -> - % SendF(Id), - % ?wait(Received(Id, TheOtherConnPid), 1000) - % end, PacketIdList), - % %% Now close the 2nd (last connection) - % emqx_mock_client:stop(TheOtherConnPid), - % timer:sleep(2), - % %% both sessions should have conn_pid = undefined - % ?assertEqual({conn_pid, undefined}, lists:keyfind(conn_pid, 1, emqx_session:info(SPid1))), - % ?assertEqual({conn_pid, undefined}, lists:keyfind(conn_pid, 1, emqx_session:info(SPid2))), - % %% send more messages, but all should be queued in session state - % lists:foreach(fun(Id) -> SendF(Id) end, PacketIdList), - % {_, L1} = lists:keyfind(mqueue_len, 1, emqx_session:info(SPid1)), - % {_, L2} = lists:keyfind(mqueue_len, 1, emqx_session:info(SPid2)), - % ?assertEqual(length(PacketIdList), L1 + L2), - % %% clean up - % emqx_mock_client:close_session(PubConnPid), - % emqx_sm:close_session(SPid1), - % emqx_sm:close_session(SPid2), ok. -t_random(_) -> +t_random(Config) when is_list(Config) -> ok = ensure_config(random, true), test_two_messages(random). -t_round_robin(_) -> +t_round_robin(Config) when is_list(Config) -> ok = ensure_config(round_robin, true), test_two_messages(round_robin). -t_round_robin_per_group(_) -> +t_round_robin_per_group(Config) when is_list(Config) -> ok = ensure_config(round_robin_per_group, true), test_two_messages(round_robin_per_group). %% this would fail if executed with the standard round_robin strategy -t_round_robin_per_group_even_distribution_one_group(_) -> +t_round_robin_per_group_even_distribution_one_group(Config) when is_list(Config) -> ok = ensure_config(round_robin_per_group, true), Topic = <<"foo/bar">>, Group = <<"group1">>, @@ -264,7 +256,7 @@ t_round_robin_per_group_even_distribution_one_group(_) -> ), ok. -t_round_robin_per_group_even_distribution_two_groups(_) -> +t_round_robin_per_group_even_distribution_two_groups(Config) when is_list(Config) -> ok = ensure_config(round_robin_per_group, true), Topic = <<"foo/bar">>, {ok, ConnPid1} = emqtt:start_link([{clientid, <<"C0">>}]), @@ -350,19 +342,19 @@ t_round_robin_per_group_even_distribution_two_groups(_) -> ), ok. -t_sticky(_) -> +t_sticky(Config) when is_list(Config) -> ok = ensure_config(sticky, true), test_two_messages(sticky). -t_hash(_) -> +t_hash(Config) when is_list(Config) -> ok = ensure_config(hash, false), test_two_messages(hash). -t_hash_clinetid(_) -> +t_hash_clinetid(Config) when is_list(Config) -> ok = ensure_config(hash_clientid, false), test_two_messages(hash_clientid). -t_hash_topic(_) -> +t_hash_topic(Config) when is_list(Config) -> ok = ensure_config(hash_topic, false), ClientId1 = <<"ClientId1">>, ClientId2 = <<"ClientId2">>, @@ -407,7 +399,7 @@ t_hash_topic(_) -> ok. %% if the original subscriber dies, change to another one alive -t_not_so_sticky(_) -> +t_not_so_sticky(Config) when is_list(Config) -> ok = ensure_config(sticky), ClientId1 = <<"ClientId1">>, ClientId2 = <<"ClientId2">>, @@ -474,14 +466,14 @@ last_message(ExpectedPayload, Pids) -> last_message(ExpectedPayload, Pids, Timeout) -> receive {publish, #{client_pid := Pid, payload := ExpectedPayload}} -> - ct:pal("~p ====== ~p", [Pids, Pid]), + ?assert(lists:member(Pid, Pids)), {true, Pid} after Timeout -> ct:pal("not yet"), <<"not yet?">> end. -t_dispatch(_) -> +t_dispatch(Config) when is_list(Config) -> ok = ensure_config(random), Topic = <<"foo">>, ?assertEqual( @@ -494,13 +486,13 @@ t_dispatch(_) -> emqx_shared_sub:dispatch(<<"group1">>, Topic, #delivery{message = #message{}}) ). -t_uncovered_func(_) -> +t_uncovered_func(Config) when is_list(Config) -> ignored = gen_server:call(emqx_shared_sub, ignored), ok = gen_server:cast(emqx_shared_sub, ignored), ignored = emqx_shared_sub ! ignored, {mnesia_table_event, []} = emqx_shared_sub ! {mnesia_table_event, []}. -t_per_group_config(_) -> +t_per_group_config(Config) when is_list(Config) -> ok = ensure_group_config(#{ <<"local_group">> => local, <<"round_robin_group">> => round_robin, @@ -521,7 +513,7 @@ t_per_group_config(_) -> test_two_messages(round_robin_per_group, <<"round_robin_per_group_group">>), test_two_messages(round_robin_per_group, <<"round_robin_per_group_group">>). -t_local(_) -> +t_local(Config) when is_list(Config) -> GroupConfig = #{ <<"local_group">> => local, <<"round_robin_group">> => round_robin, @@ -567,7 +559,7 @@ t_local(_) -> ?assertNotEqual(UsedSubPid1, UsedSubPid2), ok. -t_remote(_) -> +t_remote(Config) when is_list(Config) -> %% This testcase verifies dispatching of shared messages to the remote nodes via backplane API. %% %% In this testcase we start two EMQX nodes: local and remote. @@ -594,7 +586,7 @@ t_remote(_) -> try {ok, ClientPidLocal} = emqtt:connect(ConnPidLocal), - {ok, ClientPidRemote} = emqtt:connect(ConnPidRemote), + {ok, _ClientPidRemote} = emqtt:connect(ConnPidRemote), emqtt:subscribe(ConnPidRemote, {<<"$share/remote_group/", Topic/binary>>, 0}), @@ -620,7 +612,7 @@ t_remote(_) -> stop_slave(Node) end. -t_local_fallback(_) -> +t_local_fallback(Config) when is_list(Config) -> ok = ensure_group_config(#{ <<"local_group">> => local, <<"round_robin_group">> => round_robin, @@ -653,9 +645,14 @@ t_local_fallback(_) -> %% This one tests that broker tries to select another shared subscriber %% If the first one doesn't return an ACK -t_redispatch(_) -> - ok = ensure_config(sticky, true), +t_redispatch_qos1_with_ack(Config) when is_list(Config) -> + test_redispatch_qos1(Config, true). +t_redispatch_qos1_no_ack(Config) when is_list(Config) -> + test_redispatch_qos1(Config, false). + +test_redispatch_qos1(_Config, AckEnabled) -> + ok = ensure_config(sticky, AckEnabled), Group = <<"group1">>, Topic = <<"foo/bar">>, ClientId1 = <<"ClientId1">>, @@ -682,10 +679,292 @@ t_redispatch(_) -> emqtt:stop(UsedSubPid2), ok. +t_qos1_random_dispatch_if_all_members_are_down(Config) when is_list(Config) -> + ok = ensure_config(sticky, true), + Group = <<"group1">>, + Topic = <<"foo/bar">>, + ClientId1 = <<"ClientId1">>, + ClientId2 = <<"ClientId2">>, + SubOpts = [{clean_start, false}], + {ok, ConnPub} = emqtt:start_link([{clientid, <<"pub">>}]), + {ok, _} = emqtt:connect(ConnPub), + + {ok, ConnPid1} = emqtt:start_link([{clientid, ClientId1} | SubOpts]), + {ok, ConnPid2} = emqtt:start_link([{clientid, ClientId2} | SubOpts]), + {ok, _} = emqtt:connect(ConnPid1), + {ok, _} = emqtt:connect(ConnPid2), + + emqtt:subscribe(ConnPid1, {<<"$share/", Group/binary, "/foo/bar">>, 1}), + emqtt:subscribe(ConnPid2, {<<"$share/", Group/binary, "/foo/bar">>, 1}), + + ok = emqtt:stop(ConnPid1), + ok = emqtt:stop(ConnPid2), + + [Pid1, Pid2] = emqx_shared_sub:subscribers(Group, Topic), + ?assert(is_process_alive(Pid1)), + ?assert(is_process_alive(Pid2)), + + {ok, _} = emqtt:publish(ConnPub, Topic, <<"hello11">>, 1), + ct:sleep(100), + {ok, Msgs1} = gen_server:call(Pid1, get_mqueue), + {ok, Msgs2} = gen_server:call(Pid2, get_mqueue), + %% assert the message is in mqueue (because socket is closed) + ?assertMatch([#message{payload = <<"hello11">>}], Msgs1 ++ Msgs2), + emqtt:stop(ConnPub), + ok. + +%% No ack, QoS 2 subscriptions, +%% client1 receives one message, send pubrec, then suspend +%% client2 acts normal (auto_ack=true) +%% Expected behaviour: +%% the messages sent to client1's inflight and mq are re-dispatched after client1 is down +t_dispatch_qos2({init, Config}) when is_list(Config) -> + emqx_config:put_zone_conf(default, [mqtt, max_inflight], 1), + Config; +t_dispatch_qos2({'end', Config}) when is_list(Config) -> + emqx_config:put_zone_conf(default, [mqtt, max_inflight], 0); +t_dispatch_qos2(Config) when is_list(Config) -> + ok = ensure_config(round_robin, _AckEnabled = false), + Topic = <<"foo/bar/1">>, + ClientId1 = <<"ClientId1">>, + ClientId2 = <<"ClientId2">>, + + {ok, ConnPid1} = emqtt:start_link([{clientid, ClientId1}, {auto_ack, false}]), + {ok, ConnPid2} = emqtt:start_link([{clientid, ClientId2}, {auto_ack, true}]), + {ok, _} = emqtt:connect(ConnPid1), + {ok, _} = emqtt:connect(ConnPid2), + + emqtt:subscribe(ConnPid1, {<<"$share/group/foo/bar/#">>, 2}), + emqtt:subscribe(ConnPid2, {<<"$share/group/foo/bar/#">>, 2}), + + Message1 = emqx_message:make(ClientId1, 2, Topic, <<"hello1">>), + Message2 = emqx_message:make(ClientId1, 2, Topic, <<"hello2">>), + Message3 = emqx_message:make(ClientId1, 2, Topic, <<"hello3">>), + Message4 = emqx_message:make(ClientId1, 2, Topic, <<"hello4">>), + ct:sleep(100), + + ok = sys:suspend(ConnPid1), + + %% One message is inflight + ?assertMatch([{_, _, {ok, 1}}], emqx:publish(Message1)), + ?assertMatch([{_, _, {ok, 1}}], emqx:publish(Message2)), + ?assertMatch([{_, _, {ok, 1}}], emqx:publish(Message3)), + ?assertMatch([{_, _, {ok, 1}}], emqx:publish(Message4)), + + %% assert client 2 receives two messages, they are eiter 1,3 or 2,4 depending + %% on if it's picked as the first one for round_robin + MsgRec1 = ?WAIT(2000, {publish, #{client_pid := ConnPid2, payload := P1}}, P1), + MsgRec2 = ?WAIT(2000, {publish, #{client_pid := ConnPid2, payload := P2}}, P2), + case MsgRec2 of + <<"hello3">> -> + ?assertEqual(<<"hello1">>, MsgRec1); + <<"hello4">> -> + ?assertEqual(<<"hello2">>, MsgRec1) + end, + sys:resume(ConnPid1), + %% emqtt subscriber automatically sends PUBREC, but since auto_ack is set to false + %% so it will never send PUBCOMP, hence EMQX should not attempt to send + %% the 4th message yet since max_inflight is 1. + MsgRec3 = ?WAIT(2000, {publish, #{client_pid := ConnPid1, payload := P3}}, P3), + ct:sleep(100), + %% no message expected + ?assertEqual([], collect_msgs(0)), + %% now kill client 1 + kill_process(ConnPid1), + %% client 2 should receive the message + MsgRec4 = ?WAIT(2000, {publish, #{client_pid := ConnPid2, payload := P4}}, P4), + case MsgRec2 of + <<"hello3">> -> + ?assertEqual(<<"hello2">>, MsgRec3), + ?assertEqual(<<"hello4">>, MsgRec4); + <<"hello4">> -> + ?assertEqual(<<"hello1">>, MsgRec3), + ?assertEqual(<<"hello3">>, MsgRec4) + end, + emqtt:stop(ConnPid2), + ok. + +t_dispatch_qos0({init, Config}) when is_list(Config) -> + Config; +t_dispatch_qos0({'end', Config}) when is_list(Config) -> + ok; +t_dispatch_qos0(Config) when is_list(Config) -> + ok = ensure_config(round_robin, _AckEnabled = false), + Topic = <<"foo/bar/1">>, + ClientId1 = <<"ClientId1">>, + ClientId2 = <<"ClientId2">>, + + {ok, ConnPid1} = emqtt:start_link([{clientid, ClientId1}, {auto_ack, false}]), + {ok, ConnPid2} = emqtt:start_link([{clientid, ClientId2}, {auto_ack, true}]), + {ok, _} = emqtt:connect(ConnPid1), + {ok, _} = emqtt:connect(ConnPid2), + + %% subscribe with QoS 0 + emqtt:subscribe(ConnPid1, {<<"$share/group/foo/bar/#">>, 0}), + emqtt:subscribe(ConnPid2, {<<"$share/group/foo/bar/#">>, 0}), + + %% publish with QoS 2, but should be downgraded to 0 as the subscribers + %% subscribe with QoS 0 + Message1 = emqx_message:make(ClientId1, 2, Topic, <<"hello1">>), + Message2 = emqx_message:make(ClientId1, 2, Topic, <<"hello2">>), + Message3 = emqx_message:make(ClientId1, 2, Topic, <<"hello3">>), + Message4 = emqx_message:make(ClientId1, 2, Topic, <<"hello4">>), + ct:sleep(100), + + ok = sys:suspend(ConnPid1), + + ?assertMatch([_], emqx:publish(Message1)), + ?assertMatch([_], emqx:publish(Message2)), + ?assertMatch([_], emqx:publish(Message3)), + ?assertMatch([_], emqx:publish(Message4)), + + MsgRec1 = ?WAIT(2000, {publish, #{client_pid := ConnPid2, payload := P1}}, P1), + MsgRec2 = ?WAIT(2000, {publish, #{client_pid := ConnPid2, payload := P2}}, P2), + %% assert hello2 > hello1 or hello4 > hello3 + ?assert(MsgRec2 > MsgRec1), + + kill_process(ConnPid1), + %% expect no redispatch + ?assertEqual([], collect_msgs(timer:seconds(2))), + emqtt:stop(ConnPid2), + ok. + +t_session_takeover({init, Config}) when is_list(Config) -> + Config; +t_session_takeover({'end', Config}) when is_list(Config) -> + ok; +t_session_takeover(Config) when is_list(Config) -> + Topic = <<"t1/a">>, + ClientId = iolist_to_binary("c" ++ integer_to_list(erlang:system_time())), + Opts = [ + {clientid, ClientId}, + {auto_ack, true}, + {proto_ver, v5}, + {clean_start, false}, + {properties, #{'Session-Expiry-Interval' => 60}} + ], + {ok, ConnPid1} = emqtt:start_link(Opts), + %% with the same client ID, start another client + {ok, ConnPid2} = emqtt:start_link(Opts), + {ok, _} = emqtt:connect(ConnPid1), + emqtt:subscribe(ConnPid1, {<<"$share/t1/", Topic/binary>>, _QoS = 1}), + Message1 = emqx_message:make(<<"dummypub">>, 2, Topic, <<"hello1">>), + Message2 = emqx_message:make(<<"dummypub">>, 2, Topic, <<"hello2">>), + Message3 = emqx_message:make(<<"dummypub">>, 2, Topic, <<"hello3">>), + Message4 = emqx_message:make(<<"dummypub">>, 2, Topic, <<"hello4">>), + %% Make sure client1 is functioning + ?assertMatch([_], emqx:publish(Message1)), + {true, _} = last_message(<<"hello1">>, [ConnPid1]), + %% Kill client1 + emqtt:stop(ConnPid1), + %% publish another message (should end up in client1's session) + ?assertMatch([_], emqx:publish(Message2)), + %% connect client2 (with the same clientid) + + %% should trigger session take over + {ok, _} = emqtt:connect(ConnPid2), + ?assertMatch([_], emqx:publish(Message3)), + ?assertMatch([_], emqx:publish(Message4)), + {true, _} = last_message(<<"hello2">>, [ConnPid2]), + {true, _} = last_message(<<"hello3">>, [ConnPid2]), + {true, _} = last_message(<<"hello4">>, [ConnPid2]), + ?assertEqual([], collect_msgs(timer:seconds(2))), + emqtt:stop(ConnPid2), + ok. + +t_session_kicked({init, Config}) when is_list(Config) -> + emqx_config:put_zone_conf(default, [mqtt, max_inflight], 1), + Config; +t_session_kicked({'end', Config}) when is_list(Config) -> + emqx_config:put_zone_conf(default, [mqtt, max_inflight], 0); +t_session_kicked(Config) when is_list(Config) -> + ok = ensure_config(round_robin, _AckEnabled = false), + Topic = <<"foo/bar/1">>, + ClientId1 = <<"ClientId1">>, + ClientId2 = <<"ClientId2">>, + + {ok, ConnPid1} = emqtt:start_link([{clientid, ClientId1}, {auto_ack, false}]), + {ok, ConnPid2} = emqtt:start_link([{clientid, ClientId2}, {auto_ack, true}]), + {ok, _} = emqtt:connect(ConnPid1), + {ok, _} = emqtt:connect(ConnPid2), + + emqtt:subscribe(ConnPid1, {<<"$share/group/foo/bar/#">>, 2}), + emqtt:subscribe(ConnPid2, {<<"$share/group/foo/bar/#">>, 2}), + + Message1 = emqx_message:make(ClientId1, 2, Topic, <<"hello1">>), + Message2 = emqx_message:make(ClientId1, 2, Topic, <<"hello2">>), + Message3 = emqx_message:make(ClientId1, 2, Topic, <<"hello3">>), + Message4 = emqx_message:make(ClientId1, 2, Topic, <<"hello4">>), + ct:sleep(100), + + ok = sys:suspend(ConnPid1), + + %% One message is inflight + ?assertMatch([{_, _, {ok, 1}}], emqx:publish(Message1)), + ?assertMatch([{_, _, {ok, 1}}], emqx:publish(Message2)), + ?assertMatch([{_, _, {ok, 1}}], emqx:publish(Message3)), + ?assertMatch([{_, _, {ok, 1}}], emqx:publish(Message4)), + + %% assert client 2 receives two messages, they are eiter 1,3 or 2,4 depending + %% on if it's picked as the first one for round_robin + MsgRec1 = ?WAIT(2000, {publish, #{client_pid := ConnPid2, payload := P1}}, P1), + MsgRec2 = ?WAIT(2000, {publish, #{client_pid := ConnPid2, payload := P2}}, P2), + case MsgRec2 of + <<"hello3">> -> + ?assertEqual(<<"hello1">>, MsgRec1); + <<"hello4">> -> + ?assertEqual(<<"hello2">>, MsgRec1) + end, + sys:resume(ConnPid1), + %% emqtt subscriber automatically sends PUBREC, but since auto_ack is set to false + %% so it will never send PUBCOMP, hence EMQX should not attempt to send + %% the 4th message yet since max_inflight is 1. + MsgRec3 = ?WAIT(2000, {publish, #{client_pid := ConnPid1, payload := P3}}, P3), + case MsgRec2 of + <<"hello3">> -> + ?assertEqual(<<"hello2">>, MsgRec3); + <<"hello4">> -> + ?assertEqual(<<"hello1">>, MsgRec3) + end, + %% no message expected + ?assertEqual([], collect_msgs(0)), + %% now kick client 1 + kill_process(ConnPid1, fun(_Pid) -> emqx_cm:kick_session(ClientId1) end), + %% client 2 should NOT receive the message + ?assertEqual([], collect_msgs(1000)), + emqtt:stop(ConnPid2), + ?assertEqual([], collect_msgs(0)), + ok. + %%-------------------------------------------------------------------- %% help functions %%-------------------------------------------------------------------- +kill_process(Pid) -> + kill_process(Pid, fun(_) -> erlang:exit(Pid, kill) end). + +kill_process(Pid, WithFun) -> + _ = unlink(Pid), + _ = monitor(process, Pid), + _ = WithFun(Pid), + receive + {'DOWN', _, process, Pid, _} -> + ok + after 10_000 -> + error(timeout) + end. + +collect_msgs(Timeout) -> + collect_msgs([], Timeout). + +collect_msgs(Acc, Timeout) -> + receive + Msg -> + collect_msgs([Msg | Acc], Timeout) + after Timeout -> + lists:reverse(Acc) + end. + ensure_config(Strategy) -> ensure_config(Strategy, _AckEnabled = true). diff --git a/apps/emqx/test/emqx_takeover_SUITE.erl b/apps/emqx/test/emqx_takeover_SUITE.erl index beb7817af..df17e434a 100644 --- a/apps/emqx/test/emqx_takeover_SUITE.erl +++ b/apps/emqx/test/emqx_takeover_SUITE.erl @@ -33,6 +33,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> + emqx_common_test_helpers:boot_modules(all), emqx_channel_SUITE:set_test_listener_confs(), ?check_trace( ?wait_async_action( diff --git a/apps/emqx_authn/i18n/emqx_authn_jwt_i18n.conf b/apps/emqx_authn/i18n/emqx_authn_jwt_i18n.conf index 80d389de4..a420dd7d9 100644 --- a/apps/emqx_authn/i18n/emqx_authn_jwt_i18n.conf +++ b/apps/emqx_authn/i18n/emqx_authn_jwt_i18n.conf @@ -57,7 +57,7 @@ emqx_authn_jwt { endpoint { desc { en: """JWKS endpoint, it's a read-only endpoint that returns the server's public key set in the JWKS format.""" - zh: """JWKS 端点, 它是一个以 JWKS 格式返回服务端的公钥集的只读端点。""" + zh: """JWKS 端点, 它是一个以 JWKS 格式返回服务端的公钥集的只读端点。""" } label { en: """JWKS Endpoint""" diff --git a/apps/emqx_authn/src/emqx_authn.app.src b/apps/emqx_authn/src/emqx_authn.app.src index acd65ee38..fe4a2888d 100644 --- a/apps/emqx_authn/src/emqx_authn.app.src +++ b/apps/emqx_authn/src/emqx_authn.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_authn, [ {description, "EMQX Authentication"}, - {vsn, "0.1.7"}, + {vsn, "0.1.8"}, {modules, []}, {registered, [emqx_authn_sup, emqx_authn_registry]}, {applications, [kernel, stdlib, emqx_resource, emqx_connector, ehttpc, epgsql, mysql, jose]}, diff --git a/apps/emqx_authn/src/emqx_authn.erl b/apps/emqx_authn/src/emqx_authn.erl index 79d269a55..1f986e016 100644 --- a/apps/emqx_authn/src/emqx_authn.erl +++ b/apps/emqx_authn/src/emqx_authn.erl @@ -70,7 +70,9 @@ do_check_config(#{<<"mechanism">> := Mec} = Config, Opts) -> #{?CONF_NS_BINARY => Config}, Opts#{atom_key => true} ) - end. + end; +do_check_config(_Config, _Opts) -> + throw({invalid_config, "mechanism_field_required"}). atom(Bin) -> try diff --git a/apps/emqx_authn/src/emqx_authn_api.erl b/apps/emqx_authn/src/emqx_authn_api.erl index c7f8eb824..6b5a8c5cd 100644 --- a/apps/emqx_authn/src/emqx_authn_api.erl +++ b/apps/emqx_authn/src/emqx_authn_api.erl @@ -34,7 +34,7 @@ % Swagger -define(API_TAGS_GLOBAL, [<<"Authentication">>]). --define(API_TAGS_SINGLE, [<<"Listener authentication">>]). +-define(API_TAGS_SINGLE, [<<"Listener Authentication">>]). -export([ api_spec/0, diff --git a/apps/emqx_authn/src/emqx_authn_app.erl b/apps/emqx_authn/src/emqx_authn_app.erl index f761bfe33..5adf067b4 100644 --- a/apps/emqx_authn/src/emqx_authn_app.erl +++ b/apps/emqx_authn/src/emqx_authn_app.erl @@ -37,8 +37,10 @@ start(_StartType, _StartArgs) -> ok = mria_rlog:wait_for_shards([?AUTH_SHARD], infinity), {ok, Sup} = emqx_authn_sup:start_link(), - ok = initialize(), - {ok, Sup}. + case initialize() of + ok -> {ok, Sup}; + {error, Reason} -> {error, Reason} + end. stop(_State) -> ok = deinitialize(), @@ -49,18 +51,26 @@ stop(_State) -> %%------------------------------------------------------------------------------ initialize() -> - ok = ?AUTHN:register_providers(emqx_authn:providers()), + try + ok = ?AUTHN:register_providers(emqx_authn:providers()), - lists:foreach( - fun({ChainName, RawAuthConfigs}) -> - AuthConfig = emqx_authn:check_configs(RawAuthConfigs), - ?AUTHN:initialize_authentication( - ChainName, - AuthConfig - ) - end, - chain_configs() - ). + lists:foreach( + fun({ChainName, RawAuthConfigs}) -> + AuthConfig = emqx_authn:check_configs(RawAuthConfigs), + ?AUTHN:initialize_authentication( + ChainName, + AuthConfig + ) + end, + chain_configs() + ) + of + ok -> ok + catch + throw:Reason -> + ?SLOG(error, #{msg => "failed_to_initialize_authentication", reason => Reason}), + {error, {failed_to_initialize_authentication, Reason}} + end. deinitialize() -> ok = ?AUTHN:deregister_providers(provider_types()), diff --git a/apps/emqx_authn/src/emqx_authn_user_import_api.erl b/apps/emqx_authn/src/emqx_authn_user_import_api.erl index 47d4de81c..dbd4c3656 100644 --- a/apps/emqx_authn/src/emqx_authn_user_import_api.erl +++ b/apps/emqx_authn/src/emqx_authn_user_import_api.erl @@ -30,7 +30,7 @@ % Swagger -define(API_TAGS_GLOBAL, [<<"Authentication">>]). --define(API_TAGS_SINGLE, [<<"Listener authentication">>]). +-define(API_TAGS_SINGLE, [<<"Listener Authentication">>]). -export([ api_spec/0, diff --git a/apps/emqx_authz/i18n/emqx_authz_api_cache_i18n.conf b/apps/emqx_authz/i18n/emqx_authz_api_cache_i18n.conf index 31ad90fa1..9c620a22d 100644 --- a/apps/emqx_authz/i18n/emqx_authz_api_cache_i18n.conf +++ b/apps/emqx_authz/i18n/emqx_authz_api_cache_i18n.conf @@ -2,7 +2,7 @@ emqx_authz_api_cache { authorization_cache_delete { desc { en: """Clean all authorization cache in the cluster.""" - zh: """清除集群中所有鉴权数据缓存""" + zh: """清除集群中所有授权数据缓存。""" } } } diff --git a/apps/emqx_authz/i18n/emqx_authz_api_schema_i18n.conf b/apps/emqx_authz/i18n/emqx_authz_api_schema_i18n.conf index 6d2c66b79..afec5c109 100644 --- a/apps/emqx_authz/i18n/emqx_authz_api_schema_i18n.conf +++ b/apps/emqx_authz/i18n/emqx_authz_api_schema_i18n.conf @@ -1,8 +1,8 @@ emqx_authz_api_schema { enable { desc { - en: """Set to true or false to disable this ACL provider""" - zh: """设为 truefalse 以启用或禁用此访问控制数据源""" + en: """Set to true or false to disable this ACL provider.""" + zh: """设为 truefalse 以启用或禁用此访问控制数据源。""" } label { en: """enable""" @@ -13,7 +13,7 @@ emqx_authz_api_schema { type { desc { en: """Backend type.""" - zh: """数据后端类型""" + zh: """数据后端类型。""" } label { en: """type""" @@ -26,7 +26,7 @@ emqx_authz_api_schema { rules { desc { en: """Authorization static file rules.""" - zh: """静态鉴权文件规则""" + zh: """静态授权文件规则。""" } label { en: """rules""" @@ -39,7 +39,7 @@ emqx_authz_api_schema { method { desc { en: """HTTP method.""" - zh: """HTTP 请求方法""" + zh: """HTTP 请求方法。""" } label { en: """method""" @@ -50,7 +50,7 @@ emqx_authz_api_schema { url { desc { en: """URL of the auth server.""" - zh: """认证服务器 URL""" + zh: """认证服务器 URL。""" } label { en: """url""" @@ -72,7 +72,7 @@ emqx_authz_api_schema { headers_no_content_type { desc { en: """List of HTTP headers (without content-type).""" - zh: """HTTP Headers 列表(无 content-type)""" + zh: """HTTP Headers 列表(无 content-type)。""" } label { en: """headers_no_content_type""" @@ -83,7 +83,7 @@ emqx_authz_api_schema { body { desc { en: """HTTP request body.""" - zh: """HTTP 请求体""" + zh: """HTTP 请求体。""" } label { en: """body""" @@ -94,7 +94,7 @@ emqx_authz_api_schema { request_timeout { desc { en: """Request timeout.""" - zh: """请求超时时间""" + zh: """请求超时时间。""" } label { en: """request_timeout""" @@ -111,7 +111,7 @@ emqx_authz_api_schema { collection { desc { en: """`MongoDB` collection containing the authorization data.""" - zh: """`MongoDB` 鉴权数据集""" + zh: """`MongoDB` 授权数据集。""" } label { en: """collection""" @@ -153,7 +153,7 @@ Filter supports the following placeholders: cmd { desc { en: """Database query used to retrieve authorization data.""" - zh: """访问控制数据查询命令""" + zh: """访问控制数据查询命令。""" } label { en: """cmd""" @@ -166,7 +166,7 @@ Filter supports the following placeholders: query { desc { en: """Database query used to retrieve authorization data.""" - zh: """访问控制数据查询语句""" + zh: """访问控制数据查询语句。""" } label { en: """query""" @@ -178,8 +178,8 @@ Filter supports the following placeholders: position { desc { - en: """Where to place the source""" - zh: """认证数据源位置""" + en: """Where to place the source.""" + zh: """认证数据源位置。""" } label { en: """position""" diff --git a/apps/emqx_authz/i18n/emqx_authz_api_settings_i18n.conf b/apps/emqx_authz/i18n/emqx_authz_api_settings_i18n.conf index c739bedbb..b44580b34 100644 --- a/apps/emqx_authz/i18n/emqx_authz_api_settings_i18n.conf +++ b/apps/emqx_authz/i18n/emqx_authz_api_settings_i18n.conf @@ -2,14 +2,14 @@ emqx_authz_api_settings { authorization_settings_get { desc { en: """Get authorization settings""" - zh: """获取鉴权配置""" + zh: """获取授权配置""" } } authorization_settings_put { desc { en: """Update authorization settings""" - zh: """更新鉴权配置""" + zh: """更新授权配置""" } } } diff --git a/apps/emqx_authz/i18n/emqx_authz_api_sources_i18n.conf b/apps/emqx_authz/i18n/emqx_authz_api_sources_i18n.conf index 6c3f59d93..c5f0eaad4 100644 --- a/apps/emqx_authz/i18n/emqx_authz_api_sources_i18n.conf +++ b/apps/emqx_authz/i18n/emqx_authz_api_sources_i18n.conf @@ -2,56 +2,56 @@ emqx_authz_api_sources { authorization_sources_get { desc { en: """List all authorization sources""" - zh: """列出所有鉴权数据源""" + zh: """列出所有授权数据源""" } } authorization_sources_post { desc { en: """Add a new source""" - zh: """添加鉴权数据源""" + zh: """添加授权数据源""" } } authorization_sources_type_get { desc { en: """Get a authorization source""" - zh: """获取指定类型的鉴权数据源""" + zh: """获取指定类型的授权数据源""" } } authorization_sources_type_put { desc { en: """Update source""" - zh: """更新指定类型的鉴权数据源""" + zh: """更新指定类型的授权数据源""" } } authorization_sources_type_delete { desc { en: """Delete source""" - zh: """删除指定类型的鉴权数据源""" + zh: """删除指定类型的授权数据源""" } } authorization_sources_type_status_get { desc { en: """Get a authorization source""" - zh: """获取指定鉴权数据源的状态""" + zh: """获取指定授权数据源的状态""" } } authorization_sources_type_move_post { desc { en: """Change the exection order of sources""" - zh: """更新鉴权数据源的优先执行顺序""" + zh: """更新授权数据源的优先执行顺序""" } } sources { desc { en: """Authorization source""" - zh: """鉴权数据源列表""" + zh: """授权数据源列表""" } label { en: """sources""" @@ -62,7 +62,7 @@ emqx_authz_api_sources { sources { desc { en: """Authorization sources""" - zh: """鉴权数据源列表""" + zh: """授权数据源列表""" } label { en: """sources""" @@ -84,7 +84,7 @@ emqx_authz_api_sources { source { desc { en: """Authorization source""" - zh: """鉴权数据源""" + zh: """授权数据源""" } label { en: """source""" diff --git a/apps/emqx_authz/i18n/emqx_authz_schema_i18n.conf b/apps/emqx_authz/i18n/emqx_authz_schema_i18n.conf index e83169978..a10128592 100644 --- a/apps/emqx_authz/i18n/emqx_authz_schema_i18n.conf +++ b/apps/emqx_authz/i18n/emqx_authz_schema_i18n.conf @@ -2,41 +2,41 @@ emqx_authz_schema { sources { desc { en: """ -Authorization data sources.
+Authorization data sources.
An array of authorization (ACL) data providers. It is designed as an array, not a hash-map, so the sources can be -ordered to form a chain of access controls.
+ordered to form a chain of access controls.
When authorizing a 'publish' or 'subscribe' action, the configured sources are checked in order. When checking an ACL source, in case the client (identified by username or client ID) is not found, it moves on to the next source. And it stops immediately -once an 'allow' or 'deny' decision is returned.
+once an 'allow' or 'deny' decision is returned.
If the client is not found in any of the sources, -the default action configured in 'authorization.no_match' is applied.
+the default action configured in 'authorization.no_match' is applied.
NOTE: The source elements are identified by their 'type'. It is NOT allowed to configure two or more sources of the same type. """ zh: """ -鉴权数据源.
-鉴权(ACL)数据源的列表. -它被设计为一个数组,而不是一个散列映射, -所以可以作为链式访问控制.
+授权数据源。
+授权(ACL)数据源的列表。 +它被设计为一个数组,而不是一个散列映射, +所以可以作为链式访问控制。
-当授权一个 'publish' 或 'subscribe' 行为时, +当授权一个 'publish' 或 'subscribe' 行为时, 该配置列表中的所有数据源将按顺序进行检查。 -如果在某个客户端未找到时(使用 ClientID 或 Username), -将会移动到下一个数据源. 直至得到 'allow' 或 'deny' 的结果.
+如果在某个客户端未找到时(使用 ClientID 或 Username)。 +将会移动到下一个数据源。直至得到 'allow' 或 'deny' 的结果。
-如果在任何数据源中都未找到对应的客户端信息, -配置的默认行为 ('authorization.no_match') 将生效.
+如果在任何数据源中都未找到对应的客户端信息。 +配置的默认行为 ('authorization.no_match') 将生效。
-注意: -数据源使用 'type' 进行标识. -使用同一类型的数据源多于一次不被允许. +注意: +数据源使用 'type' 进行标识。 +使用同一类型的数据源多于一次不被允许。 """ } label { @@ -83,7 +83,7 @@ It is NOT allowed to configure two or more sources of the same type. file { desc { en: """Authorization using a static file.""" - zh: """使用静态文件鉴权""" + zh: """使用静态文件授权""" } label { en: """file""" @@ -109,7 +109,7 @@ and the old file will not be used anymore. 那么可以将该文件置于任何 EMQX 可以访问到的位置。 如果从 EMQX Dashboard 或 HTTP API 创建或修改了规则集, -那么EMQX将会生成一个新的文件并将它存放在 `data_dir` 下的 `authz` 子目录中, +那么EMQX将会生成一个新的文件并将它存放在 `data_dir` 下的 `authz` 子目录中, 并从此弃用旧的文件。""" } label { @@ -123,7 +123,7 @@ and the old file will not be used anymore. http_get { desc { en: """Authorization using an external HTTP server (via GET requests).""" - zh: """使用外部 HTTP 服务器鉴权(GET 请求)。""" + zh: """使用外部 HTTP 服务器授权(GET 请求)。""" } label { en: """http_get""" @@ -134,7 +134,7 @@ and the old file will not be used anymore. http_post { desc { en: """Authorization using an external HTTP server (via POST requests).""" - zh: """使用外部 HTTP 服务器鉴权(POST 请求)。""" + zh: """使用外部 HTTP 服务器授权(POST 请求)。""" } label { en: """http_post""" @@ -156,7 +156,7 @@ and the old file will not be used anymore. url { desc { en: """URL of the auth server.""" - zh: """鉴权 HTTP 服务器地址。""" + zh: """授权 HTTP 服务器地址。""" } label { en: """URL""" @@ -213,7 +213,7 @@ and the old file will not be used anymore. mnesia { desc { en: """Authorization using a built-in database (mnesia).""" - zh: """使用内部数据库鉴权 (mnesia).""" + zh: """使用内部数据库授权(mnesia)。""" } label { en: """mnesia""" @@ -226,7 +226,7 @@ and the old file will not be used anymore. mongo_single { desc { en: """Authorization using a single MongoDB instance.""" - zh: """使用 MongoDB 鉴权(单实例)""" + zh: """使用 MongoDB 授权(单实例)。""" } label { en: """mongo_single""" @@ -237,7 +237,7 @@ and the old file will not be used anymore. mongo_rs { desc { en: """Authorization using a MongoDB replica set.""" - zh: """使用 MongoDB 鉴权(副本集模式)""" + zh: """使用 MongoDB 授权(副本集模式)""" } label { en: """mongo_rs""" @@ -248,7 +248,7 @@ and the old file will not be used anymore. mongo_sharded { desc { en: """Authorization using a sharded MongoDB cluster.""" - zh: """使用 MongoDB 鉴权(分片集群模式)""" + zh: """使用 MongoDB 授权(分片集群模式)。""" } label { en: """mongo_sharded""" @@ -259,7 +259,7 @@ and the old file will not be used anymore. collection { desc { en: """`MongoDB` collection containing the authorization data.""" - zh: """`MongoDB` 鉴权数据集""" + zh: """`MongoDB` 授权数据集。""" } label { en: """collection""" @@ -278,8 +278,8 @@ Filter supports the following placeholders: zh: """ 在查询中定义过滤条件的条件表达式。 过滤器支持如下占位符: -- ${username}: 将在运行时被替换为客户端连接时使用的用户名 -- ${clientid}: 将在运行时被替换为客户端连接时使用的客户端标识符 +- ${username}:将在运行时被替换为客户端连接时使用的用户名 +- ${clientid}:将在运行时被替换为客户端连接时使用的客户端标识符 """ } label { @@ -293,7 +293,7 @@ Filter supports the following placeholders: mysql { desc { en: """Authorization using a MySQL database.""" - zh: """使用 MySOL 数据库鉴权""" + zh: """使用 MySOL 数据库授权""" } label { en: """mysql""" @@ -306,7 +306,7 @@ Filter supports the following placeholders: postgresql { desc { en: """Authorization using a PostgreSQL database.""" - zh: """使用 PostgreSQL 数据库鉴权""" + zh: """使用 PostgreSQL 数据库授权""" } label { en: """postgresql""" @@ -319,7 +319,7 @@ Filter supports the following placeholders: redis_single { desc { en: """Authorization using a single Redis instance.""" - zh: """使用 Redis 鉴权(单实例)""" + zh: """使用 Redis 授权(单实例)。""" } label { en: """redis_single""" @@ -330,7 +330,7 @@ Filter supports the following placeholders: redis_sentinel { desc { en: """Authorization using a Redis Sentinel.""" - zh: """使用 Redis 鉴权(哨兵模式)""" + zh: """使用 Redis 授权(哨兵模式)。""" } label { en: """redis_sentinel""" @@ -341,7 +341,7 @@ Filter supports the following placeholders: redis_cluster { desc { en: """Authorization using a Redis cluster.""" - zh: """使用 Redis 鉴权(集群模式)""" + zh: """使用 Redis 授权(集群模式)。""" } label { en: """redis_cluster""" @@ -365,7 +365,7 @@ Filter supports the following placeholders: query { desc { en: """Database query used to retrieve authorization data.""" - zh: """访问控制数据查询语句/查询命令""" + zh: """访问控制数据查询语句/查询命令。""" } label { en: """query""" @@ -510,44 +510,44 @@ Filter supports the following placeholders: metrics_total { desc { en: """The total number of times the authorization rule was triggered.""" - zh: """鉴权实例被触发的总次数。""" + zh: """授权实例被触发的总次数。""" } label: { en: """The Total Number of Times the Authorization Rule was Triggered""" - zh: """鉴权实例被触发的总次数""" + zh: """授权实例被触发的总次数""" } } nomatch { desc { en: """The number of times that no authorization rules were matched.""" - zh: """没有匹配到任何鉴权规则的次数。""" + zh: """没有匹配到任何授权规则的次数。""" } label: { en: """The Number of Times that no Authorization Rules were Matched""" - zh: """没有匹配到任何鉴权规则的次数""" + zh: """没有匹配到任何授权规则的次数""" } } allow { desc { en: """The number of times the authentication was successful.""" - zh: """鉴权成功的次数。""" + zh: """授权成功的次数。""" } label: { en: """The Number of Times the Authentication was Successful""" - zh: """鉴权成功次数""" + zh: """授权成功次数""" } } deny { desc { en: """The number of authentication failures.""" - zh: """鉴权失败的次数。""" + zh: """授权失败的次数。""" } label: { en: """The Number of Authentication Failures""" - zh: """鉴权失败次数""" + zh: """授权失败次数""" } } } diff --git a/apps/emqx_authz/src/emqx_authz.app.src b/apps/emqx_authz/src/emqx_authz.app.src index 3f63859c2..33d9a7e41 100644 --- a/apps/emqx_authz/src/emqx_authz.app.src +++ b/apps/emqx_authz/src/emqx_authz.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_authz, [ {description, "An OTP application"}, - {vsn, "0.1.6"}, + {vsn, "0.1.7"}, {registered, []}, {mod, {emqx_authz_app, []}}, {applications, [ diff --git a/apps/emqx_authz/src/emqx_authz_api_sources.erl b/apps/emqx_authz/src/emqx_authz_api_sources.erl index 9ff65f8a5..3af2988db 100644 --- a/apps/emqx_authz/src/emqx_authz_api_sources.erl +++ b/apps/emqx_authz/src/emqx_authz_api_sources.erl @@ -40,7 +40,8 @@ -export([ api_spec/0, paths/0, - schema/1 + schema/1, + fields/1 ]). -export([ @@ -63,6 +64,9 @@ paths() -> "/authorization/sources/:type/move" ]. +fields(sources) -> + [{sources, mk(array(hoconsc:union(authz_sources_type_refs())), #{desc => ?DESC(sources)})}]. + %%-------------------------------------------------------------------- %% Schema for each URI %%-------------------------------------------------------------------- @@ -75,10 +79,7 @@ schema("/authorization/sources") -> tags => ?TAGS, responses => #{ - 200 => mk( - array(hoconsc:union(authz_sources_type_refs())), - #{desc => ?DESC(sources)} - ) + 200 => ref(?MODULE, sources) } }, post => @@ -241,7 +242,7 @@ source(Method, #{bindings := #{type := Type} = Bindings} = Req) when source(get, #{bindings := #{type := Type}}) -> case get_raw_source(Type) of [] -> - {404, #{message => <<"Not found ", Type/binary>>}}; + {404, #{code => <<"NOT_FOUND">>, message => <<"Not found: ", Type/binary>>}}; [#{<<"type">> := <<"file">>, <<"enable">> := Enable, <<"path">> := Path}] -> case file:read_file(Path) of {ok, Rules} -> diff --git a/apps/emqx_authz/test/emqx_authz_api_sources_SUITE.erl b/apps/emqx_authz/test/emqx_authz_api_sources_SUITE.erl index 04713090e..e26ad9839 100644 --- a/apps/emqx_authz/test/emqx_authz_api_sources_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_api_sources_SUITE.erl @@ -181,6 +181,12 @@ t_api(_) -> {ok, 200, Result1} = request(get, uri(["authorization", "sources"]), []), ?assertEqual([], get_sources(Result1)), + {ok, 404, ErrResult} = request(get, uri(["authorization", "sources", "http"]), []), + ?assertMatch( + #{<<"code">> := <<"NOT_FOUND">>, <<"message">> := <<"Not found: http">>}, + jsx:decode(ErrResult) + ), + [ begin {ok, 204, _} = request(post, uri(["authorization", "sources"]), Source) diff --git a/apps/emqx_auto_subscribe/i18n/emqx_auto_subscribe_i18n.conf b/apps/emqx_auto_subscribe/i18n/emqx_auto_subscribe_i18n.conf index b93a186b1..57f744e8e 100644 --- a/apps/emqx_auto_subscribe/i18n/emqx_auto_subscribe_i18n.conf +++ b/apps/emqx_auto_subscribe/i18n/emqx_auto_subscribe_i18n.conf @@ -2,7 +2,7 @@ emqx_auto_subscribe_schema { auto_subscribe { desc { en: """After the device logs in successfully, the subscription is automatically completed for the device through the pre-defined subscription representation. Supports the use of placeholders.""" - zh: """设备登陆成功之后,通过预设的订阅表示符,为设备自动完成订阅。支持使用占位符。""" + zh: """设备登录成功之后,通过预设的订阅表示符,为设备自动完成订阅。支持使用占位符。""" } lable { en: """Auto Subscribe""" diff --git a/apps/emqx_auto_subscribe/src/emqx_auto_subscribe.app.src b/apps/emqx_auto_subscribe/src/emqx_auto_subscribe.app.src index 937f201bb..1c5627a1f 100644 --- a/apps/emqx_auto_subscribe/src/emqx_auto_subscribe.app.src +++ b/apps/emqx_auto_subscribe/src/emqx_auto_subscribe.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_auto_subscribe, [ {description, "An OTP application"}, - {vsn, "0.1.1"}, + {vsn, "0.1.2"}, {registered, []}, {mod, {emqx_auto_subscribe_app, []}}, {applications, [ diff --git a/apps/emqx_auto_subscribe/src/emqx_auto_subscribe_api.erl b/apps/emqx_auto_subscribe/src/emqx_auto_subscribe_api.erl index 4edb709b9..4d48bfced 100644 --- a/apps/emqx_auto_subscribe/src/emqx_auto_subscribe_api.erl +++ b/apps/emqx_auto_subscribe/src/emqx_auto_subscribe_api.erl @@ -44,14 +44,14 @@ schema("/mqtt/auto_subscribe") -> 'operationId' => auto_subscribe, get => #{ description => ?DESC(list_auto_subscribe_api), - tags => [<<"Auto subscribe">>], + tags => [<<"Auto Subscribe">>], responses => #{ 200 => hoconsc:ref(emqx_auto_subscribe_schema, "auto_subscribe") } }, put => #{ description => ?DESC(update_auto_subscribe_api), - tags => [<<"Auto subscribe">>], + tags => [<<"Auto Subscribe">>], 'requestBody' => hoconsc:ref(emqx_auto_subscribe_schema, "auto_subscribe"), responses => #{ 200 => hoconsc:ref(emqx_auto_subscribe_schema, "auto_subscribe"), diff --git a/apps/emqx_bridge/i18n/emqx_bridge_webhook_schema.conf b/apps/emqx_bridge/i18n/emqx_bridge_webhook_schema.conf index 9e89b5f0c..d9d2d0c40 100644 --- a/apps/emqx_bridge/i18n/emqx_bridge_webhook_schema.conf +++ b/apps/emqx_bridge/i18n/emqx_bridge_webhook_schema.conf @@ -14,16 +14,16 @@ emqx_bridge_webhook_schema { config_url { desc { en: """ -The URL of the HTTP Bridge.
+The URL of the HTTP Bridge.
Template with variables is allowed in the path, but variables cannot be used in the scheme, host, -or port part.
+or port part.
For example, http://localhost:9901/${topic} is allowed, but http://${host}:9901/message or http://localhost:${port}/message is not allowed. """ zh: """ -HTTP Bridge 的 URL。
-路径中允许使用带变量的模板,但是 host, port 不允许使用变量模板。
+HTTP Bridge 的 URL。
+路径中允许使用带变量的模板,但是 host, port 不允许使用变量模板。
例如, http://localhost:9901/${topic} 是允许的, 但是 http://${host}:9901/message http://localhost:${port}/message @@ -40,13 +40,13 @@ HTTP Bridge 的 URL。
desc { en: """ The MQTT topic filter to be forwarded to the HTTP server. All MQTT 'PUBLISH' messages with the topic -matching the local_topic will be forwarded.
+matching the local_topic will be forwarded.
NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is configured, then both the data got from the rule and the MQTT messages that match local_topic will be forwarded. """ zh: """ -发送到 'local_topic' 的消息都会转发到 HTTP 服务器。
+发送到 'local_topic' 的消息都会转发到 HTTP 服务器。
注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发到 HTTP 服务器。 """ } @@ -59,12 +59,12 @@ will be forwarded. config_method { desc { en: """ -The method of the HTTP request. All the available methods are: post, put, get, delete.
-Template with variables is allowed.
+The method of the HTTP request. All the available methods are: post, put, get, delete.
+Template with variables is allowed.
""" zh: """ -HTTP 请求的方法。 所有可用的方法包括:post、put、get、delete。
-允许使用带有变量的模板。
""" +HTTP 请求的方法。 所有可用的方法包括:post、put、get、delete。
+允许使用带有变量的模板。
""" } label: { en: "HTTP Method" @@ -75,11 +75,11 @@ HTTP 请求的方法。 所有可用的方法包括:post、put、get、delete config_headers { desc { en: """ -The headers of the HTTP request.
+The headers of the HTTP request.
Template with variables is allowed. """ zh: """ -HTTP 请求的标头。
+HTTP 请求的标头。
允许使用带有变量的模板。 """ } @@ -92,11 +92,11 @@ HTTP 请求的标头。
config_body { desc { en: """ -The body of the HTTP request.
+The body of the HTTP request.
Template with variables is allowed. """ zh: """ -HTTP 请求的正文。
+HTTP 请求的正文。
允许使用带有变量的模板。""" } label: { diff --git a/apps/emqx_bridge/src/emqx_bridge_api.erl b/apps/emqx_bridge/src/emqx_bridge_api.erl index 822c9bcdf..d513fd38d 100644 --- a/apps/emqx_bridge/src/emqx_bridge_api.erl +++ b/apps/emqx_bridge/src/emqx_bridge_api.erl @@ -518,34 +518,16 @@ lookup_from_local_node(BridgeType, BridgeName) -> invalid -> {400, error_msg('BAD_REQUEST', <<"invalid operation">>)}; OperFunc -> - TargetNode = binary_to_atom(Node, utf8), ConfMap = emqx:get_config([bridges, BridgeType, BridgeName]), case maps:get(enable, ConfMap, false) of false -> {403, error_msg( - 'FORBIDDEN_REQUEST', <<"forbidden operation: bridge disabled">> + 'FORBIDDEN_REQUEST', + <<"forbidden operation: bridge disabled">> )}; true -> - case emqx_bridge_proto_v1:OperFunc(TargetNode, BridgeType, BridgeName) of - ok -> - {200}; - {error, timeout} -> - {503, error_msg('SERVICE_UNAVAILABLE', <<"request timeout">>)}; - {error, {start_pool_failed, Name, Reason}} -> - {503, - error_msg( - 'SERVICE_UNAVAILABLE', - bin( - io_lib:format( - "failed to start ~p pool for reason ~p", - [Name, Reason] - ) - ) - )}; - {error, Reason} -> - {500, error_msg('INTERNAL_ERROR', Reason)} - end + call_operation(Node, OperFunc, BridgeType, BridgeName) end end ). @@ -794,3 +776,33 @@ bin(S) when is_atom(S) -> atom_to_binary(S, utf8); bin(S) when is_binary(S) -> S. + +call_operation(Node, OperFunc, BridgeType, BridgeName) -> + case emqx_misc:safe_to_existing_atom(Node, utf8) of + {ok, TargetNode} -> + case + emqx_bridge_proto_v1:OperFunc( + TargetNode, BridgeType, BridgeName + ) + of + ok -> + {200}; + {error, timeout} -> + {503, error_msg('SERVICE_UNAVAILABLE', <<"request timeout">>)}; + {error, {start_pool_failed, Name, Reason}} -> + {503, + error_msg( + 'SERVICE_UNAVAILABLE', + bin( + io_lib:format( + "failed to start ~p pool for reason ~p", + [Name, Reason] + ) + ) + )}; + {error, Reason} -> + {500, error_msg('INTERNAL_ERROR', Reason)} + end; + {error, _} -> + {400, error_msg('INVALID_NODE', <<"invalid node">>)} + end. diff --git a/apps/emqx_bridge/src/emqx_bridge_resource.erl b/apps/emqx_bridge/src/emqx_bridge_resource.erl index 2894ec461..bb5f5cc54 100644 --- a/apps/emqx_bridge/src/emqx_bridge_resource.erl +++ b/apps/emqx_bridge/src/emqx_bridge_resource.erl @@ -71,9 +71,10 @@ bridge_id(BridgeType, BridgeName) -> Type = bin(BridgeType), <>. +-spec parse_bridge_id(list() | binary() | atom()) -> {atom(), binary()}. parse_bridge_id(BridgeId) -> case string:split(bin(BridgeId), ":", all) of - [Type, Name] -> {binary_to_atom(Type, utf8), binary_to_atom(Name, utf8)}; + [Type, Name] -> {binary_to_atom(Type, utf8), Name}; _ -> error({invalid_bridge_id, BridgeId}) end. diff --git a/apps/emqx_conf/i18n/emqx_conf_schema.conf b/apps/emqx_conf/i18n/emqx_conf_schema.conf index fb986785b..fc33589ec 100644 --- a/apps/emqx_conf/i18n/emqx_conf_schema.conf +++ b/apps/emqx_conf/i18n/emqx_conf_schema.conf @@ -71,12 +71,12 @@ For more information, see: https://www.erlang.org/doc/man/erl.html desc { en: """Service discovery method for the cluster nodes.""" zh: """集群节点发现方式。可选值为: -- manual: 手动加入集群
-- static: 配置静态节点。配置几个固定的节点,新节点通过连接固定节点中的某一个来加入集群。
-- mcast: 使用 UDP 多播的方式发现节点。
-- dns: 使用 DNS A 记录的方式发现节点。
-- etcd: 使用 etcd 发现节点。
-- k8s: 使用 Kubernetes 发现节点。
+- manual: 手动加入集群
+- static: 配置静态节点。配置几个固定的节点,新节点通过连接固定节点中的某一个来加入集群。
+- mcast: 使用 UDP 多播的方式发现节点。
+- dns: 使用 DNS A 记录的方式发现节点。
+- etcd: 使用 etcd 发现节点。
+- k8s: 使用 Kubernetes 发现节点。
""" } label { @@ -111,9 +111,9 @@ For more information, see: https://www.erlang.org/doc/man/erl.html desc { en: """The Erlang distribution protocol for the cluster.""" zh: """分布式 Erlang 集群协议类型。可选值为: -- inet_tcp: 使用 IPv4
-- inet6_tcp 使用 IPv6
-- inet_tls: 使用 TLS,需要与 node.ssl_dist_optfile 配置一起使用。
+- inet_tcp: 使用 IPv4
+- inet6_tcp 使用 IPv6
+- inet_tls: 使用 TLS,需要与 node.ssl_dist_optfile 配置一起使用。
""" } label { @@ -152,7 +152,7 @@ For more information, see: https://www.erlang.org/doc/man/erl.html cluster_mcast_ports { desc { - en: """List of UDP ports used for service discovery.
+ en: """List of UDP ports used for service discovery.
Note: probe messages are broadcast to all the specified ports. """ zh: """指定多播端口。如有多个端口使用逗号 , 分隔。 @@ -286,7 +286,7 @@ Applicable when cluster.discovery_strategy = dns desc { en: """Key prefix used for EMQX service discovery.""" zh: """指定 etcd 路径的前缀。每个节点在 etcd 中都会创建一个路径: -v2/keys///
+v2/keys///
当 cluster.discovery_strategy 为 etcd 时,此配置项才有效。 """ } @@ -357,7 +357,7 @@ Setting cluster.k8s.address_type to ip will make EMQX to discover IP addresses of peer nodes from Kubernetes API. """ zh: """当使用 k8s 方式集群时,address_type 用来从 Kubernetes 接口的应答里获取什么形式的 Host 列表。 -指定 cluster.k8s.address_typeip,则将从 Kubernetes 接口中获取集群中其他节点 +指定 cluster.k8s.address_typeip,则将从 Kubernetes 接口中获取集群中其他节点 的IP地址。 """ } @@ -382,7 +382,7 @@ make EMQX to discover IP addresses of peer nodes from Kubernetes API. cluster_k8s_suffix { desc { - en: """Node name suffix.
+ en: """Node name suffix.
Note: this parameter is only relevant when address_type is dns or hostname.""" zh: """当使用 k8s 方式并且 cluster.k8s.address_type 指定为 dns 类型时,可设置 emqx 节点名的后缀。 @@ -426,26 +426,26 @@ belong to different clusters from accidentally connecting to each other.""" node_data_dir { desc { en: """ -Path to the persistent data directory.
-Possible auto-created subdirectories are:
-- `mnesia/`: EMQX's built-in database directory.
-For example, `mnesia/emqx@127.0.0.1`.
-There should be only one such subdirectory.
-Meaning, in case the node is to be renamed (to e.g. `emqx@10.0.1.1`),
-the old dir should be deleted first.
-- `configs`: Generated configs at boot time, and cluster/local override configs.
-- `patches`: Hot-patch beam files are to be placed here.
-- `trace`: Trace log files.
+Path to the persistent data directory.
+Possible auto-created subdirectories are:
+- `mnesia/`: EMQX's built-in database directory.
+For example, `mnesia/emqx@127.0.0.1`.
+There should be only one such subdirectory.
+Meaning, in case the node is to be renamed (to e.g. `emqx@10.0.1.1`),
+the old dir should be deleted first.
+- `configs`: Generated configs at boot time, and cluster/local override configs.
+- `patches`: Hot-patch beam files are to be placed here.
+- `trace`: Trace log files.
**NOTE**: One data dir cannot be shared by two or more EMQX nodes. """ zh: """ -节点数据存放目录,可能会自动创建的子目录如下:
-- `mnesia/`。EMQX的内置数据库目录。例如,`mnesia/emqx@127.0.0.1`。
-如果节点要被重新命名(例如,`emqx@10.0.1.1`)。旧目录应该首先被删除。
-- `configs`。在启动时生成的配置,以及集群/本地覆盖的配置。
-- `patches`: 热补丁文件将被放在这里。
-- `trace`: 日志跟踪文件。
+节点数据存放目录,可能会自动创建的子目录如下:
+- `mnesia/`。EMQX的内置数据库目录。例如,`mnesia/emqx@127.0.0.1`。
+如果节点要被重新命名(例如,`emqx@10.0.1.1`)。旧目录应该首先被删除。
+- `configs`。在启动时生成的配置,以及集群/本地覆盖的配置。
+- `patches`: 热补丁文件将被放在这里。
+- `trace`: 日志跟踪文件。
**注意**: 一个数据dir不能被两个或更多的EMQX节点同时使用。 """ @@ -566,9 +566,9 @@ significant: later configuration files override the previous ones. db_backend { desc { en: """ -Select the backend for the embedded database.
+Select the backend for the embedded database.
rlog is the default backend, -that is suitable for very large clusters.
+that is suitable for very large clusters.
mnesia is a backend that offers decent performance in small clusters. """ zh: """ rlog是默认的数据库,他适用于大规模的集群。 @@ -584,20 +584,20 @@ mnesia是备选数据库,在小集群中提供了很好的性能。 db_role { desc { en: """ -Select a node role.
+Select a node role.
core nodes provide durability of the data, and take care of writes. -It is recommended to place core nodes in different racks or different availability zones.
+It is recommended to place core nodes in different racks or different availability zones.
replicant nodes are ephemeral worker nodes. Removing them from the cluster -doesn't affect database redundancy
-It is recommended to have more replicant nodes than core nodes.
+doesn't affect database redundancy
+It is recommended to have more replicant nodes than core nodes.
Note: this parameter only takes effect when the backend is set to rlog. """ zh: """ -选择节点的角色。
-core 节点提供数据的持久性,并负责写入。建议将核心节点放置在不同的机架或不同的可用区。
-repliant 节点是临时工作节点。 从集群中删除它们,不影响数据库冗余
-建议复制节点多于核心节点。
+选择节点的角色。
+core 节点提供数据的持久性,并负责写入。建议将核心节点放置在不同的机架或不同的可用区。
+repliant 节点是临时工作节点。 从集群中删除它们,不影响数据库冗余
+建议复制节点多于核心节点。
注意:该参数仅在设置backend时生效到 rlog。 """ } @@ -610,17 +610,17 @@ to rlog. db_core_nodes { desc { en: """ -List of core nodes that the replicant will connect to.
+List of core nodes that the replicant will connect to.
Note: this parameter only takes effect when the backend is set -to rlog and the role is set to replicant.
-This value needs to be defined for manual or static cluster discovery mechanisms.
+to rlog and the role is set to replicant.
+This value needs to be defined for manual or static cluster discovery mechanisms.
If an automatic cluster discovery mechanism is being used (such as etcd), there is no need to set this value. """ - zh: """当前节点连接的核心节点列表。
+ zh: """当前节点连接的核心节点列表。
注意:该参数仅在设置backend时生效到 rlog -并且设置rolereplicant时生效。
-该值需要在手动或静态集群发现机制下设置。
+并且设置rolereplicant时生效。
+该值需要在手动或静态集群发现机制下设置。
如果使用了自动集群发现机制(如etcd),则不需要设置该值。 """ } @@ -657,15 +657,15 @@ transaction log entry. db_default_shard_transport { desc { - en: """Defines the default transport for pushing transaction logs.
+ en: """Defines the default transport for pushing transaction logs.
This may be overridden on a per-shard basis in db.shard_transports. gen_rpc uses the gen_rpc library, -distr uses the Erlang distribution.
""" +distr uses the Erlang distribution.
""" zh: """ -定义用于推送事务日志的默认传输。
+定义用于推送事务日志的默认传输。
这可以在 db.shard_transports 中基于每个分片被覆盖。 gen_rpc 使用 gen_rpc 库, -distr 使用 Erlang 发行版。
+distr 使用 Erlang 发行版。
""" } label { @@ -676,13 +676,13 @@ This may be overridden on a per-shard basis in db.shard_transports. db_shard_transports { desc { - en: """Allows to tune the transport method used for transaction log replication, on a per-shard basis.
+ en: """Allows to tune the transport method used for transaction log replication, on a per-shard basis.
gen_rpc uses the gen_rpc library, -distr uses the Erlang distribution.
If not specified, +distr uses the Erlang distribution.
If not specified, the default is to use the value set in db.default_shard_transport.""" - zh: """允许为每个 shard 下的事务日志复制操作的传输方法进行调优。
+ zh: """允许为每个 shard 下的事务日志复制操作的传输方法进行调优。
gen_rpc 使用 gen_rpc 库, -distr 使用 Erlang 自带的 rpc 库。
如果未指定, +distr 使用 Erlang 自带的 rpc 库。
如果未指定, 默认是使用 db.default_shard_transport 中设置的值。 """ } @@ -763,12 +763,12 @@ Ensure that the number of completed transactions is less than the max_hist rpc_port_discovery { desc { - en: """manual: discover ports by tcp_server_port.
+ en: """manual: discover ports by tcp_server_port.
stateless: discover ports in a stateless manner, using the following algorithm. If node name is emqxN@127.0.0.1, where the N is an integer, then the listening port will be 5370 + N.""" zh: """manual: 通过 tcp_server_port 来发现端口。 -
stateless: 使用无状态的方式来发现端口,使用如下算法。如果节点名称是 +
stateless: 使用无状态的方式来发现端口,使用如下算法。如果节点名称是 emqxN@127.0.0.1, N 是一个数字,那么监听端口就是 5370 + N。 """ } @@ -780,9 +780,9 @@ emqxN@127.0.0.1
, N 是一个数字,那么监听端口就是 5370 + N。 rpc_tcp_server_port { desc { - en: """Listening port used by RPC local service.
+ en: """Listening port used by RPC local service.
Note that this config only takes effect when rpc.port_discovery is set to manual.""" - zh: """RPC 本地服务使用的 TCP 端口。
+ zh: """RPC 本地服务使用的 TCP 端口。
只有当 rpc.port_discovery 设置为 manual 时,此配置才会生效。 """ } @@ -794,10 +794,10 @@ Note that this config only takes effect when rpc.port_discovery is set to manual rpc_ssl_server_port { desc { - en: """Listening port used by RPC local service.
+ en: """Listening port used by RPC local service.
Note that this config only takes effect when rpc.port_discovery is set to manual and driver is set to ssl.""" - zh: """RPC 本地服务使用的监听SSL端口。
+ zh: """RPC 本地服务使用的监听SSL端口。
只有当 rpc.port_discovery 设置为 manual 且 dirver 设置为 ssl, 此配置才会生效。 """ @@ -847,9 +847,9 @@ Note that this config only takes effect when rpc.driver is set to < rpc_keyfile { desc { - en: """Path to the private key file for the rpc.certfile.
+ en: """Path to the private key file for the rpc.certfile.
Note: contents of this file are secret, so it's necessary to set permissions to 600.""" - zh: """rpc.certfile 的私钥文件的路径。
+ zh: """rpc.certfile 的私钥文件的路径。
注意:此文件内容是私钥,所以需要设置权限为 600。 """ } @@ -861,9 +861,9 @@ Note: contents of this file are secret, so it's necessary to set permissions to rpc_cacertfile { desc { - en: """Path to certification authority TLS certificate file used to validate rpc.certfile.
+ en: """Path to certification authority TLS certificate file used to validate rpc.certfile.
Note: certificates of all nodes in the cluster must be signed by the same CA.""" - zh: """验证 rpc.certfile 的 CA 证书文件的路径。
+ zh: """验证 rpc.certfile 的 CA 证书文件的路径。
注意:集群中所有节点的证书必须使用同一个 CA 签发。 """ } @@ -973,6 +973,17 @@ until the RPC connection is considered lost.""" } } + rpc_insecure_fallback { + desc { + en: """Enable compatibility with old RPC authentication.""" + zh: """兼容旧的无鉴权模式""" + } + label { + en: "RPC insecure fallback" + zh: "向后兼容旧的无鉴权模式" + } + } + log_file_handlers { desc { en: """File-based log handlers.""" @@ -1190,7 +1201,7 @@ Supervisor 报告的类型。默认为 error 类型。 desc { en: """Enable log rotation feature.""" zh: """启用日志轮换功能。启动后生成日志文件后缀会加上对应的索引数字,比如:log/emqx.log.1。 -系统会默认生成*.siz/*.idx用于记录日志位置,请不要手动修改这两个文件。 +系统会默认生成*.siz/*.idx用于记录日志位置,请不要手动修改这两个文件。 """ } label { @@ -1290,17 +1301,17 @@ Supervisor 报告的类型。默认为 error 类型。 authorization { desc { en: """ -Authorization a.k.a. ACL.
-In EMQX, MQTT client access control is extremely flexible.
+Authorization a.k.a. ACL.
+In EMQX, MQTT client access control is extremely flexible.
An out-of-the-box set of authorization data sources are supported. -For example,
-'file' source is to support concise and yet generic ACL rules in a file;
+For example,
+'file' source is to support concise and yet generic ACL rules in a file;
'built_in_database' source can be used to store per-client customizable rule sets, -natively in the EMQX node;
-'http' source to make EMQX call an external HTTP API to make the decision;
-'PostgreSQL' etc. to look up clients or rules from external databases;
+natively in the EMQX node;
+'http' source to make EMQX call an external HTTP API to make the decision;
+'PostgreSQL' etc. to look up clients or rules from external databases;
""" - zh: """ 授权(ACL)。EMQX 支持完整的客户端访问控制(ACL)。
""" + zh: """ 授权(ACL)。EMQX 支持完整的客户端访问控制(ACL)。
""" } label { en: "Authorization" @@ -1310,9 +1321,9 @@ natively in the EMQX node;
desc_cluster { desc { - en: """EMQX nodes can form a cluster to scale up the total capacity.
+ en: """EMQX nodes can form a cluster to scale up the total capacity.
Here holds the configs to instruct how individual nodes can discover each other.""" - zh: """EMQX 节点可以组成一个集群,以提高总容量。
这里指定了节点之间如何连接。""" + zh: """EMQX 节点可以组成一个集群,以提高总容量。
这里指定了节点之间如何连接。""" } label { en: "Cluster" @@ -1411,11 +1422,11 @@ The new node joins the cluster by connecting to one of the bootstrap nodes.""" desc_rpc { desc { - en: """EMQX uses a library called gen_rpc for inter-broker communication.
+ en: """EMQX uses a library called gen_rpc for inter-broker communication.
Most of the time the default config should work, but in case you need to do performance fine-tuning or experiment a bit, this is where to look.""" - zh: """EMQX 使用 gen_rpc 库来实现跨节点通信。
+ zh: """EMQX 使用 gen_rpc 库来实现跨节点通信。
大多数情况下,默认的配置应该可以工作,但如果你需要做一些性能优化或者实验,可以尝试调整这些参数。""" } label { @@ -1461,11 +1472,11 @@ Each sink is represented by a _log handler_, which can be configured independent desc_log_rotation { desc { en: """ -By default, the logs are stored in `./log` directory (for installation from zip file) or in `/var/log/emqx` (for binary installation).
+By default, the logs are stored in `./log` directory (for installation from zip file) or in `/var/log/emqx` (for binary installation).
This section of the configuration controls the number of files kept for each log handler. """ zh: """ -默认情况下,日志存储在 `./log` 目录(用于从 zip 文件安装)或 `/var/log/emqx`(用于二进制安装)。
+默认情况下,日志存储在 `./log` 目录(用于从 zip 文件安装)或 `/var/log/emqx`(用于二进制安装)。
这部分配置,控制每个日志处理进程保留的文件数量。 """ } @@ -1478,11 +1489,11 @@ This section of the configuration controls the number of files kept for each log desc_log_overload_kill { desc { en: """ -Log overload kill features an overload protection that activates when the log handlers use too much memory or have too many buffered log messages.
+Log overload kill features an overload protection that activates when the log handlers use too much memory or have too many buffered log messages.
When the overload is detected, the log handler is terminated and restarted after a cooldown period. """ zh: """ -日志过载终止,具有过载保护功能。当日志处理进程使用过多内存,或者缓存的日志消息过多时该功能被激活。
+日志过载终止,具有过载保护功能。当日志处理进程使用过多内存,或者缓存的日志消息过多时该功能被激活。
检测到过载时,日志处理进程将终止,并在冷却期后重新启动。 """ } diff --git a/apps/emqx_conf/src/emqx_conf.app.src b/apps/emqx_conf/src/emqx_conf.app.src index c0d5e1f44..2b6efe639 100644 --- a/apps/emqx_conf/src/emqx_conf.app.src +++ b/apps/emqx_conf/src/emqx_conf.app.src @@ -1,6 +1,6 @@ {application, emqx_conf, [ {description, "EMQX configuration management"}, - {vsn, "0.1.5"}, + {vsn, "0.1.6"}, {registered, []}, {mod, {emqx_conf_app, []}}, {applications, [kernel, stdlib]}, diff --git a/apps/emqx_conf/src/emqx_conf_schema.erl b/apps/emqx_conf/src/emqx_conf_schema.erl index fc75af242..0507c6d8e 100644 --- a/apps/emqx_conf/src/emqx_conf_schema.erl +++ b/apps/emqx_conf/src/emqx_conf_schema.erl @@ -813,6 +813,15 @@ fields("rpc") -> default => "1MB", desc => ?DESC(rpc_socket_buffer) } + )}, + {"insecure_fallback", + sc( + boolean(), + #{ + mapping => "gen_rpc.insecure_auth_fallback_allowed", + default => true, + desc => ?DESC(rpc_insecure_fallback) + } )} ]; fields("log") -> @@ -970,7 +979,7 @@ desc("authorization") -> desc(_) -> undefined. -translations() -> ["ekka", "kernel", "emqx", "gen_rpc"]. +translations() -> ["ekka", "kernel", "emqx", "gen_rpc", "prometheus"]. translation("ekka") -> [{"cluster_discovery", fun tr_cluster_discovery/1}]; @@ -987,7 +996,37 @@ translation("emqx") -> {"local_override_conf_file", fun tr_local_override_conf_file/1} ]; translation("gen_rpc") -> - [{"default_client_driver", fun tr_default_config_driver/1}]. + [{"default_client_driver", fun tr_default_config_driver/1}]; +translation("prometheus") -> + [ + {"vm_dist_collector_metrics", fun tr_vm_dist_collector/1}, + {"mnesia_collector_metrics", fun tr_mnesia_collector/1}, + {"vm_statistics_collector_metrics", fun tr_vm_statistics_collector/1}, + {"vm_system_info_collector_metrics", fun tr_vm_system_info_collector/1}, + {"vm_memory_collector_metrics", fun tr_vm_memory_collector/1}, + {"vm_msacc_collector_metrics", fun tr_vm_msacc_collector/1} + ]. + +tr_vm_dist_collector(Conf) -> + metrics_enabled(conf_get("prometheus.vm_dist_collector", Conf, enabled)). + +tr_mnesia_collector(Conf) -> + metrics_enabled(conf_get("prometheus.mnesia_collector", Conf, enabled)). + +tr_vm_statistics_collector(Conf) -> + metrics_enabled(conf_get("prometheus.vm_statistics_collector", Conf, enabled)). + +tr_vm_system_info_collector(Conf) -> + metrics_enabled(conf_get("prometheus.vm_system_info_collector", Conf, enabled)). + +tr_vm_memory_collector(Conf) -> + metrics_enabled(conf_get("prometheus.vm_memory_collector", Conf, enabled)). + +tr_vm_msacc_collector(Conf) -> + metrics_enabled(conf_get("prometheus.vm_msacc_collector", Conf, enabled)). + +metrics_enabled(enabled) -> all; +metrics_enabled(disabled) -> []. tr_default_config_driver(Conf) -> conf_get("rpc.driver", Conf). diff --git a/apps/emqx_connector/i18n/emqx_connector_api.conf b/apps/emqx_connector/i18n/emqx_connector_api.conf index b79be15fc..2f468fff0 100644 --- a/apps/emqx_connector/i18n/emqx_connector_api.conf +++ b/apps/emqx_connector/i18n/emqx_connector_api.conf @@ -14,11 +14,11 @@ emqx_connector_api { conn_test_post { desc { en: """ -Test creating a new connector by given ID
+Test creating a new connector by given ID
The ID must be of format '{type}:{name}' """ zh: """ -通过给定的 ID 测试创建一个新的连接器
+通过给定的 ID 测试创建一个新的连接器
ID 的格式必须为“{type}:{name}” """ } diff --git a/apps/emqx_connector/i18n/emqx_connector_http.conf b/apps/emqx_connector/i18n/emqx_connector_http.conf index 8664d324f..7583a38ed 100644 --- a/apps/emqx_connector/i18n/emqx_connector_http.conf +++ b/apps/emqx_connector/i18n/emqx_connector_http.conf @@ -2,14 +2,14 @@ emqx_connector_http { base_url { desc { en: """ -The base URL is the URL includes only the scheme, host and port.
+The base URL is the URL includes only the scheme, host and port.
When send an HTTP request, the real URL to be used is the concatenation of the base URL and the -path parameter (passed by the emqx_resource:query/2,3 or provided by the request parameter).
+path parameter (passed by the emqx_resource:query/2,3 or provided by the request parameter).
For example: `http://localhost:9901/` """ zh: """ -base URL 只包含host和port。
-发送HTTP请求时,真实的URL是由base URL 和 path parameter连接而成(通过emqx_resource:query/2,3传递,或者通过请求参数提供)。
+base URL 只包含host和port。
+发送HTTP请求时,真实的URL是由base URL 和 path parameter连接而成(通过emqx_resource:query/2,3传递,或者通过请求参数提供)。
示例:`http://localhost:9901/` """ } diff --git a/apps/emqx_connector/i18n/emqx_connector_mongo.conf b/apps/emqx_connector/i18n/emqx_connector_mongo.conf index e290a8107..e43f7bc33 100644 --- a/apps/emqx_connector/i18n/emqx_connector_mongo.conf +++ b/apps/emqx_connector/i18n/emqx_connector_mongo.conf @@ -47,13 +47,13 @@ emqx_connector_mongo { server { desc { en: """ -The IPv4 or IPv6 address or the hostname to connect to.
-A host entry has the following form: `Host[:Port]`.
+The IPv4 or IPv6 address or the hostname to connect to.
+A host entry has the following form: `Host[:Port]`.
The MongoDB default port 27017 is used if `[:Port]` is not specified. """ zh: """ -将要连接的 IPv4 或 IPv6 地址,或者主机名。
-主机名具有以下形式:`Host[:Port]`。
+将要连接的 IPv4 或 IPv6 地址,或者主机名。
+主机名具有以下形式:`Host[:Port]`。
如果未指定 `[:Port]`,则使用 MongoDB 默认端口 27017。 """ } diff --git a/apps/emqx_connector/i18n/emqx_connector_mqtt_schema.conf b/apps/emqx_connector/i18n/emqx_connector_mqtt_schema.conf index f92446fe4..d7e6cc033 100644 --- a/apps/emqx_connector/i18n/emqx_connector_mqtt_schema.conf +++ b/apps/emqx_connector/i18n/emqx_connector_mqtt_schema.conf @@ -2,13 +2,13 @@ emqx_connector_mqtt_schema { ingress_desc { desc { en: """The ingress config defines how this bridge receive messages from the remote MQTT broker, and then - send them to the local broker.
- Template with variables is allowed in 'remote.qos', 'local.topic', 'local.qos', 'local.retain', 'local.payload'.
+ send them to the local broker.
+ Template with variables is allowed in 'remote.qos', 'local.topic', 'local.qos', 'local.retain', 'local.payload'.
NOTE: if this bridge is used as the input of a rule, and also 'local.topic' is configured, then messages got from the remote broker will be sent to both the 'local.topic' and the rule.""" - zh: """入口配置定义了该桥接如何从远程 MQTT Broker 接收消息,然后将消息发送到本地 Broker。
- 以下字段中允许使用带有变量的模板:'remote.qos', 'local.topic', 'local.qos', 'local.retain', 'local.payload'。
+ zh: """入口配置定义了该桥接如何从远程 MQTT Broker 接收消息,然后将消息发送到本地 Broker。
+ 以下字段中允许使用带有变量的模板:'remote.qos', 'local.topic', 'local.qos', 'local.retain', 'local.payload'。
注意:如果此桥接被用作规则的输入,并且配置了 'local.topic',则从远程代理获取的消息将同时被发送到 'local.topic' 和规则。 """ } @@ -20,13 +20,13 @@ emqx_connector_mqtt_schema { egress_desc { desc { - en: """The egress config defines how this bridge forwards messages from the local broker to the remote broker.
-Template with variables is allowed in 'remote.topic', 'local.qos', 'local.retain', 'local.payload'.
+ en: """The egress config defines how this bridge forwards messages from the local broker to the remote broker.
+Template with variables is allowed in 'remote.topic', 'local.qos', 'local.retain', 'local.payload'.
NOTE: if this bridge is used as the action of a rule, and also 'local.topic' is configured, then both the data got from the rule and the MQTT messages that matches 'local.topic' will be forwarded.""" zh: """出口配置定义了该桥接如何将消息从本地 Broker 转发到远程 Broker。 -以下字段中允许使用带有变量的模板:'remote.topic', 'local.qos', 'local.retain', 'local.payload'。
+以下字段中允许使用带有变量的模板:'remote.topic', 'local.qos', 'local.retain', 'local.payload'。
注意:如果此桥接被用作规则的动作,并且配置了 'local.topic',则从规则输出的数据以及匹配到 'local.topic' 的 MQTT 消息都会被转发。 """ } @@ -83,22 +83,22 @@ is configured, then both the data got from the rule and the MQTT messages that m mode { desc { en: """ -The mode of the MQTT Bridge.
+The mode of the MQTT Bridge.
-- cluster_shareload: create an MQTT connection on each node in the emqx cluster.
+- cluster_shareload: create an MQTT connection on each node in the emqx cluster.
In 'cluster_shareload' mode, the incoming load from the remote broker is shared by -using shared subscription.
+using shared subscription.
Note that the 'clientid' is suffixed by the node name, this is to avoid clientid conflicts between different nodes. And we can only use shared subscription -topic filters for 'remote.topic' of ingress connections. +topic filters for remote.topic of ingress connections. """ zh: """ -MQTT 桥的模式。
+MQTT 桥的模式。
-- cluster_shareload:在 emqx 集群的每个节点上创建一个 MQTT 连接。
-在“cluster_shareload”模式下,来自远程代理的传入负载通过共享订阅的方式接收。
-请注意,“clientid”以节点名称为后缀,这是为了避免不同节点之间的clientid冲突。 -而且对于入口连接的“remote.topic”,我们只能使用共享订阅主题过滤器。 +- cluster_shareload:在 emqx 集群的每个节点上创建一个 MQTT 连接。
+在“cluster_shareload”模式下,来自远程代理的传入负载通过共享订阅的方式接收。
+请注意,clientid 以节点名称为后缀,这是为了避免不同节点之间的 clientid 冲突。 +而且对于入口连接的 remote.topic,我们只能使用共享订阅主题过滤器。 """ } label: { @@ -216,11 +216,11 @@ broker MUST support this feature. ingress_local_topic { desc { en: """ -Send messages to which topic of the local broker.
+Send messages to which topic of the local broker.
Template with variables is allowed. """ zh: """ -向本地broker的哪个topic发送消息。
+向本地broker的哪个topic发送消息。
允许使用带有变量的模板。 """ } @@ -233,11 +233,11 @@ Template with variables is allowed. ingress_local_qos { desc { en: """ -The QoS of the MQTT message to be sent.
+The QoS of the MQTT message to be sent.
Template with variables is allowed. """ zh: """ -待发送 MQTT 消息的 QoS。
+待发送 MQTT 消息的 QoS。
允许使用带有变量的模板。 """ } @@ -261,11 +261,11 @@ Template with variables is allowed. egress_remote_topic { desc { en: """ -Forward to which topic of the remote broker.
+Forward to which topic of the remote broker.
Template with variables is allowed. """ zh: """ -转发到远程broker的哪个topic。
+转发到远程broker的哪个topic。
允许使用带有变量的模板。 """ } @@ -278,11 +278,11 @@ Template with variables is allowed. egress_remote_qos { desc { en: """ -The QoS of the MQTT message to be sent.
+The QoS of the MQTT message to be sent.
Template with variables is allowed. """ zh: """ -待发送 MQTT 消息的 QoS。
+待发送 MQTT 消息的 QoS。
允许使用带有变量的模板。 """ } @@ -295,11 +295,11 @@ Template with variables is allowed. retain { desc { en: """ -The 'retain' flag of the MQTT message to be sent.
+The 'retain' flag of the MQTT message to be sent.
Template with variables is allowed. """ zh: """ -要发送的 MQTT 消息的“保留”标志。
+要发送的 MQTT 消息的“保留”标志。
允许使用带有变量的模板。 """ } @@ -312,11 +312,11 @@ Template with variables is allowed. payload { desc { en: """ -The payload of the MQTT message to be sent.
+The payload of the MQTT message to be sent.
Template with variables is allowed. """ zh: """ -要发送的 MQTT 消息的负载。
+要发送的 MQTT 消息的负载。
允许使用带有变量的模板。 """ } diff --git a/apps/emqx_connector/i18n/emqx_connector_mysql.conf b/apps/emqx_connector/i18n/emqx_connector_mysql.conf index 4a5d99254..499caae12 100644 --- a/apps/emqx_connector/i18n/emqx_connector_mysql.conf +++ b/apps/emqx_connector/i18n/emqx_connector_mysql.conf @@ -3,13 +3,13 @@ emqx_connector_mysql { server { desc { en: """ -The IPv4 or IPv6 address or the hostname to connect to.
-A host entry has the following form: `Host[:Port]`.
+The IPv4 or IPv6 address or the hostname to connect to.
+A host entry has the following form: `Host[:Port]`.
The MySQL default port 3306 is used if `[:Port]` is not specified. """ zh: """ -将要连接的 IPv4 或 IPv6 地址,或者主机名。
-主机名具有以下形式:`Host[:Port]`。
+将要连接的 IPv4 或 IPv6 地址,或者主机名。
+主机名具有以下形式:`Host[:Port]`。
如果未指定 `[:Port]`,则使用 MySQL 默认端口 3306。 """ } diff --git a/apps/emqx_connector/i18n/emqx_connector_pgsql.conf b/apps/emqx_connector/i18n/emqx_connector_pgsql.conf index 9731b3e18..6aa792070 100644 --- a/apps/emqx_connector/i18n/emqx_connector_pgsql.conf +++ b/apps/emqx_connector/i18n/emqx_connector_pgsql.conf @@ -3,13 +3,13 @@ emqx_connector_pgsql { server { desc { en: """ -The IPv4 or IPv6 address or the hostname to connect to.
-A host entry has the following form: `Host[:Port]`.
+The IPv4 or IPv6 address or the hostname to connect to.
+A host entry has the following form: `Host[:Port]`.
The PostgreSQL default port 5432 is used if `[:Port]` is not specified. """ zh: """ -将要连接的 IPv4 或 IPv6 地址,或者主机名。
-主机名具有以下形式:`Host[:Port]`。
+将要连接的 IPv4 或 IPv6 地址,或者主机名。
+主机名具有以下形式:`Host[:Port]`。
如果未指定 `[:Port]`,则使用 PostgreSQL 默认端口 5432。 """ } diff --git a/apps/emqx_connector/i18n/emqx_connector_redis.conf b/apps/emqx_connector/i18n/emqx_connector_redis.conf index 0e27ee9c6..228d0805a 100644 --- a/apps/emqx_connector/i18n/emqx_connector_redis.conf +++ b/apps/emqx_connector/i18n/emqx_connector_redis.conf @@ -47,13 +47,13 @@ emqx_connector_redis { server { desc { en: """ -The IPv4 or IPv6 address or the hostname to connect to.
-A host entry has the following form: `Host[:Port]`.
+The IPv4 or IPv6 address or the hostname to connect to.
+A host entry has the following form: `Host[:Port]`.
The Redis default port 6379 is used if `[:Port]` is not specified. """ zh: """ -将要连接的 IPv4 或 IPv6 地址,或者主机名。
-主机名具有以下形式:`Host[:Port]`。
+将要连接的 IPv4 或 IPv6 地址,或者主机名。
+主机名具有以下形式:`Host[:Port]`。
如果未指定 `[:Port]`,则使用 MongoDB 默认端口 27017。 """ } diff --git a/apps/emqx_connector/include/emqx_connector.hrl b/apps/emqx_connector/include/emqx_connector.hrl index 8945bd14a..52c9929a5 100644 --- a/apps/emqx_connector/include/emqx_connector.hrl +++ b/apps/emqx_connector/include/emqx_connector.hrl @@ -25,14 +25,13 @@ -define(PGSQL_DEFAULT_PORT, 5432). -define(SERVERS_DESC, - "A Node list for Cluster to connect to. The nodes should be separated with commas, such as: `Node[,Node].`\n" + "A Node list for Cluster to connect to. The nodes should be separated with commas, such as: `Node[,Node].`
" "For each Node should be: " ). -define(SERVER_DESC(TYPE, DEFAULT_PORT), - "\n" - "The IPv4 or IPv6 address or the hostname to connect to.
\n" - "A host entry has the following form: `Host[:Port]`.
\n" + "The IPv4 or IPv6 address or the hostname to connect to.
" + "A host entry has the following form: `Host[:Port]`.
" "The " ++ TYPE ++ " default port " ++ DEFAULT_PORT ++ " is used if `[:Port]` is not specified." ). diff --git a/apps/emqx_connector/src/emqx_connector.app.src b/apps/emqx_connector/src/emqx_connector.app.src index 06da66398..547a37b8e 100644 --- a/apps/emqx_connector/src/emqx_connector.app.src +++ b/apps/emqx_connector/src/emqx_connector.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_connector, [ {description, "An OTP application"}, - {vsn, "0.1.6"}, + {vsn, "0.1.8"}, {registered, []}, {mod, {emqx_connector_app, []}}, {applications, [ diff --git a/apps/emqx_connector/src/emqx_connector_mongo.erl b/apps/emqx_connector/src/emqx_connector_mongo.erl index 778abf8c2..678a4f847 100644 --- a/apps/emqx_connector/src/emqx_connector_mongo.erl +++ b/apps/emqx_connector/src/emqx_connector_mongo.erl @@ -387,7 +387,7 @@ init_worker_options([], Acc) -> %% =================================================================== %% Schema funcs -server(type) -> emqx_schema:ip_port(); +server(type) -> emqx_schema:host_port(); server(required) -> true; server(validator) -> [?NOT_EMPTY("the value of the field 'server' cannot be empty")]; server(converter) -> fun to_server_raw/1; diff --git a/apps/emqx_connector/src/emqx_connector_mysql.erl b/apps/emqx_connector/src/emqx_connector_mysql.erl index 8b44846cf..b35f0b018 100644 --- a/apps/emqx_connector/src/emqx_connector_mysql.erl +++ b/apps/emqx_connector/src/emqx_connector_mysql.erl @@ -70,7 +70,7 @@ fields(config) -> emqx_connector_schema_lib:ssl_fields() ++ emqx_connector_schema_lib:prepare_statement_fields(). -server(type) -> emqx_schema:ip_port(); +server(type) -> emqx_schema:host_port(); server(required) -> true; server(validator) -> [?NOT_EMPTY("the value of the field 'server' cannot be empty")]; server(converter) -> fun to_server/1; diff --git a/apps/emqx_connector/src/emqx_connector_pgsql.erl b/apps/emqx_connector/src/emqx_connector_pgsql.erl index 9b0125227..71dd2bbeb 100644 --- a/apps/emqx_connector/src/emqx_connector_pgsql.erl +++ b/apps/emqx_connector/src/emqx_connector_pgsql.erl @@ -59,7 +59,7 @@ fields(config) -> emqx_connector_schema_lib:ssl_fields() ++ emqx_connector_schema_lib:prepare_statement_fields(). -server(type) -> emqx_schema:ip_port(); +server(type) -> emqx_schema:host_port(); server(required) -> true; server(validator) -> [?NOT_EMPTY("the value of the field 'server' cannot be empty")]; server(converter) -> fun to_server/1; diff --git a/apps/emqx_connector/src/emqx_connector_redis.erl b/apps/emqx_connector/src/emqx_connector_redis.erl index fae628d9e..a1e864f1d 100644 --- a/apps/emqx_connector/src/emqx_connector_redis.erl +++ b/apps/emqx_connector/src/emqx_connector_redis.erl @@ -98,7 +98,7 @@ fields(sentinel) -> redis_fields() ++ emqx_connector_schema_lib:ssl_fields(). -server(type) -> emqx_schema:ip_port(); +server(type) -> emqx_schema:host_port(); server(required) -> true; server(validator) -> [?NOT_EMPTY("the value of the field 'server' cannot be empty")]; server(converter) -> fun to_server_raw/1; diff --git a/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_schema.erl b/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_schema.erl index b31c12394..d77859dd7 100644 --- a/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_schema.erl +++ b/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_schema.erl @@ -69,7 +69,7 @@ fields("server_configs") -> )}, {server, mk( - emqx_schema:ip_port(), + emqx_schema:host_port(), #{ required => true, desc => ?DESC("server") diff --git a/apps/emqx_dashboard/src/emqx_dashboard.app.src b/apps/emqx_dashboard/src/emqx_dashboard.app.src index 9d5f85c7b..9e639bcf8 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard.app.src +++ b/apps/emqx_dashboard/src/emqx_dashboard.app.src @@ -2,7 +2,7 @@ {application, emqx_dashboard, [ {description, "EMQX Web Dashboard"}, % strict semver, bump manually! - {vsn, "5.0.6"}, + {vsn, "5.0.8"}, {modules, []}, {registered, [emqx_dashboard_sup]}, {applications, [kernel, stdlib, mnesia, minirest, emqx]}, diff --git a/apps/emqx_dashboard/src/emqx_dashboard.erl b/apps/emqx_dashboard/src/emqx_dashboard.erl index 6c2a02e47..e032eb28b 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard.erl @@ -235,7 +235,7 @@ authorize(Req) -> ) end; {error, _} -> - return_unauthorized(<<"WORNG_USERNAME_OR_PWD">>, <<"Check username/password">>) + return_unauthorized(?WRONG_USERNAME_OR_PWD, <<"Check username/password">>) end; {bearer, Token} -> case emqx_dashboard_admin:verify_token(Token) of diff --git a/apps/emqx_dashboard/src/emqx_dashboard_error_code_api.erl b/apps/emqx_dashboard/src/emqx_dashboard_error_code_api.erl index 139567828..2605ad91e 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_error_code_api.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_error_code_api.erl @@ -51,7 +51,7 @@ schema("/error_codes") -> get => #{ security => [], description => <<"API Error Codes">>, - tags => [<<"Error codes">>], + tags => [<<"Error Codes">>], responses => #{ 200 => hoconsc:array(hoconsc:ref(?MODULE, error_code)) } @@ -63,7 +63,7 @@ schema("/error_codes/:code") -> get => #{ security => [], description => <<"API Error Codes">>, - tags => [<<"Error codes">>], + tags => [<<"Error Codes">>], parameters => [ {code, hoconsc:mk(hoconsc:enum(emqx_dashboard_error_code:all()), #{ diff --git a/apps/emqx_dashboard/src/emqx_dashboard_monitor_api.erl b/apps/emqx_dashboard/src/emqx_dashboard_monitor_api.erl index e3ea870af..50349bc40 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_monitor_api.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_monitor_api.erl @@ -131,12 +131,20 @@ monitor(get, #{query_string := QS, bindings := Bindings}) -> end. monitor_current(get, #{bindings := Bindings}) -> - NodeOrCluster = binary_to_atom(maps:get(node, Bindings, <<"all">>), utf8), - case emqx_dashboard_monitor:current_rate(NodeOrCluster) of - {ok, CurrentRate} -> - {200, CurrentRate}; - {badrpc, {Node, Reason}} -> - Message = list_to_binary(io_lib:format("Bad node ~p, rpc failed ~p", [Node, Reason])), + RawNode = maps:get(node, Bindings, all), + case emqx_misc:safe_to_existing_atom(RawNode, utf8) of + {ok, NodeOrCluster} -> + case emqx_dashboard_monitor:current_rate(NodeOrCluster) of + {ok, CurrentRate} -> + {200, CurrentRate}; + {badrpc, {Node, Reason}} -> + Message = list_to_binary( + io_lib:format("Bad node ~p, rpc failed ~p", [Node, Reason]) + ), + {400, 'BAD_RPC', Message} + end; + {error, _} -> + Message = list_to_binary(io_lib:format("Bad node ~p", [RawNode])), {400, 'BAD_RPC', Message} end. diff --git a/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl b/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl index 27f259eab..009fd1d64 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl @@ -367,11 +367,13 @@ parameters(Params, Module) -> Required = hocon_schema:field_schema(Type, required), Default = hocon_schema:field_schema(Type, default), HoconType = hocon_schema:field_schema(Type, type), + SchemaExtras = hocon_extract_map([enum, default], Type), Meta = init_meta(Default), {ParamType, Refs} = hocon_schema_to_spec(HoconType, Module), + Schema = maps:merge(maps:merge(ParamType, Meta), SchemaExtras), Spec0 = init_prop( [required | ?DEFAULT_FIELDS], - #{schema => maps:merge(ParamType, Meta), name => Name, in => In}, + #{schema => Schema, name => Name, in => In}, Type ), Spec1 = trans_required(Spec0, Required, In), @@ -384,6 +386,18 @@ parameters(Params, Module) -> ), {lists:reverse(SpecList), AllRefs}. +hocon_extract_map(Keys, Type) -> + lists:foldl( + fun(K, M) -> + case hocon_schema:field_schema(Type, K) of + undefined -> M; + V -> M#{K => V} + end + end, + #{}, + Keys + ). + init_meta(undefined) -> #{}; init_meta(Default) -> #{default => Default}. @@ -427,7 +441,7 @@ trans_description(Spec, Hocon) -> undefined -> Spec; Desc -> - Desc1 = binary:replace(Desc, [<<"
\n">>, <<"\n">>], <<"
">>, [global]), + Desc1 = binary:replace(Desc, [<<"\n">>], <<"
">>, [global]), Spec#{description => Desc1} end. @@ -656,6 +670,8 @@ typename_to_spec("file()", _Mod) -> #{type => string, example => <<"/path/to/file">>}; typename_to_spec("ip_port()", _Mod) -> #{type => string, example => <<"127.0.0.1:80">>}; +typename_to_spec("host_port()", _Mod) -> + #{type => string, example => <<"example.host.domain:80">>}; typename_to_spec("write_syntax()", _Mod) -> #{ type => string, @@ -663,8 +679,6 @@ typename_to_spec("write_syntax()", _Mod) -> <<"${topic},clientid=${clientid}", " ", "payload=${payload},", "${clientid}_int_value=${payload.int_key}i,", "bool=${payload.bool}">> }; -typename_to_spec("ip_ports()", _Mod) -> - #{type => string, example => <<"127.0.0.1:80, 127.0.0.2:80">>}; typename_to_spec("url()", _Mod) -> #{type => string, example => <<"http://127.0.0.1">>}; typename_to_spec("connect_timeout()", Mod) -> diff --git a/apps/emqx_dashboard/test/emqx_dashboard_SUITE.erl b/apps/emqx_dashboard/test/emqx_dashboard_SUITE.erl index b3735109f..cb9c77657 100644 --- a/apps/emqx_dashboard/test/emqx_dashboard_SUITE.erl +++ b/apps/emqx_dashboard/test/emqx_dashboard_SUITE.erl @@ -74,14 +74,6 @@ end_per_suite(_Config) -> emqx_common_test_helpers:stop_apps([emqx_dashboard, emqx_management]), mria:stop(). -set_special_configs(emqx_management) -> - Listeners = #{http => #{port => 8081}}, - Config = #{ - listeners => Listeners, - applications => [#{id => "admin", secret => "public"}] - }, - emqx_config:put([emqx_management], Config), - ok; set_special_configs(emqx_dashboard) -> emqx_dashboard_api_test_helpers:set_default_config(), ok; diff --git a/apps/emqx_dashboard/test/emqx_swagger_parameter_SUITE.erl b/apps/emqx_dashboard/test/emqx_swagger_parameter_SUITE.erl index 912459b9d..5dd76acf6 100644 --- a/apps/emqx_dashboard/test/emqx_swagger_parameter_SUITE.erl +++ b/apps/emqx_dashboard/test/emqx_swagger_parameter_SUITE.erl @@ -6,7 +6,7 @@ -export([paths/0, api_spec/0, schema/1, fields/1]). -export([init_per_suite/1, end_per_suite/1]). -export([t_in_path/1, t_in_query/1, t_in_mix/1, t_without_in/1, t_ref/1, t_public_ref/1]). --export([t_require/1, t_nullable/1, t_method/1, t_api_spec/1]). +-export([t_require/1, t_query_enum/1, t_nullable/1, t_method/1, t_api_spec/1]). -export([t_in_path_trans/1, t_in_query_trans/1, t_in_mix_trans/1, t_ref_trans/1]). -export([t_in_path_trans_error/1, t_in_query_trans_error/1, t_in_mix_trans_error/1]). -export([all/0, suite/0, groups/0]). @@ -30,6 +30,7 @@ groups() -> t_in_mix, t_without_in, t_require, + t_query_enum, t_nullable, t_method, t_public_ref @@ -226,6 +227,17 @@ t_require(_Config) -> validate("/required/false", ExpectSpec), ok. +t_query_enum(_Config) -> + ExpectSpec = [ + #{ + in => query, + name => userid, + schema => #{type => string, enum => [<<"a">>], default => <<"a">>} + } + ], + validate("/query/enum", ExpectSpec), + ok. + t_nullable(_Config) -> NullableFalse = [ #{ @@ -528,6 +540,8 @@ schema("/test/without/in") -> }; schema("/required/false") -> to_schema([{'userid', mk(binary(), #{in => query, required => false})}]); +schema("/query/enum") -> + to_schema([{'userid', mk(binary(), #{in => query, enum => [<<"a">>], default => <<"a">>})}]); schema("/nullable/false") -> to_schema([{'userid', mk(binary(), #{in => query, required => true})}]); schema("/nullable/true") -> diff --git a/apps/emqx_exhook/src/emqx_exhook.app.src b/apps/emqx_exhook/src/emqx_exhook.app.src index c4a43d846..4e8abef81 100644 --- a/apps/emqx_exhook/src/emqx_exhook.app.src +++ b/apps/emqx_exhook/src/emqx_exhook.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_exhook, [ {description, "EMQX Extension for Hook"}, - {vsn, "5.0.4"}, + {vsn, "5.0.6"}, {modules, []}, {registered, []}, {mod, {emqx_exhook_app, []}}, diff --git a/apps/emqx_exhook/src/emqx_exhook_api.erl b/apps/emqx_exhook/src/emqx_exhook_api.erl index 62f616903..6676c4503 100644 --- a/apps/emqx_exhook/src/emqx_exhook_api.erl +++ b/apps/emqx_exhook/src/emqx_exhook_api.erl @@ -41,7 +41,7 @@ -import(hoconsc, [mk/1, mk/2, ref/1, enum/1, array/1, map/2]). -import(emqx_dashboard_swagger, [schema_with_example/2, error_codes/2]). --define(TAGS, [<<"exhooks">>]). +-define(TAGS, [<<"ExHook">>]). -define(NOT_FOURD, 'NOT_FOUND'). -define(BAD_REQUEST, 'BAD_REQUEST'). -define(BAD_RPC, 'BAD_RPC'). @@ -219,9 +219,9 @@ params_server_name_in_path() -> server_conf_schema() -> SSL = #{ enable => false, - cacertfile => emqx:cert_file(<<"cacert.pem">>), - certfile => emqx:cert_file(<<"cert.pem">>), - keyfile => emqx:cert_file(<<"key.pem">>) + cacertfile => <<"/etc/emqx/certs/cacert.pem">>, + certfile => <<"/etc/emqx/certs/cert.pem">>, + keyfile => <<"/etc/emqx/certs/key.pem">> }, schema_with_example( ref(server_config), diff --git a/apps/emqx_gateway/i18n/emqx_gateway_api_authn_i18n.conf b/apps/emqx_gateway/i18n/emqx_gateway_api_authn_i18n.conf index 0f04b3938..a9ae33f0c 100644 --- a/apps/emqx_gateway/i18n/emqx_gateway_api_authn_i18n.conf +++ b/apps/emqx_gateway/i18n/emqx_gateway_api_authn_i18n.conf @@ -2,70 +2,76 @@ emqx_gateway_api_authn { get_authn { desc { - en: """Get the gateway authentication""" - zh: """获取指定网关认证器""" + en: """Gets the configuration of the specified gateway authenticator.
+Returns 404 when gateway or authentication is not enabled.""" + zh: """获取指定网关认证器的配置 +当网关或认证未启用时,返回 404。""" } } update_authn { desc { - en: """Update authentication for the gateway""" - zh: """更新网关认证器""" + en: """Update the configuration of the specified gateway authenticator, or disable the authenticator.""" + zh: """更新指定网关认证器的配置,或停用认证器。""" } } add_authn { desc { - en: """Add authentication for the gateway""" - zh: """为指定网关新增认证器""" + en: """Enables the authenticator for client authentication for the specified gateway.
+When the authenticator is not configured or turned off, all client connections are assumed to be allowed.
+Note: Only one authenticator is allowed to be enabled at a time in the gateway, rather than allowing multiple authenticators to be configured to form an authentication chain as in MQTT.""" + zh: """为指定网关开启认证器实现客户端认证的功能。
+当未配置认证器或关闭认证器时,则认为允许所有客户端的连接。
+注:在网关中仅支持添加一个认证器,而不是像 MQTT 一样允许配置多个认证器构成认证链。""" } } delete_authn { desc { - en: """Remove the gateway authentication""" - zh: """删除指定网关的认证器""" + en: """Delete the authenticator of the specified gateway.""" + zh: """删除指定网关的认证器。""" } } list_users { desc { - en: """Get the users for the authentication""" + en: """Get the users for the authenticator (only supported by built_in_database).""" zh: """获取用户列表(仅支持 built_in_database 类型的认证器)""" } } add_user { desc { - en: """Add user for the authentication""" + en: """Add user for the authenticator (only supports built_in_database).""" zh: """添加用户(仅支持 built_in_database 类型的认证器)""" } } get_user { desc { - en: """Get user info from the gateway authentication""" + en: """Get user info from the gateway authenticator (only supports built_in_database)""" zh: """获取用户信息(仅支持 built_in_database 类型的认证器)""" } } update_user { desc { - en: """Update the user info for the gateway authentication""" + en: """Update the user info for the gateway authenticator (only supports built_in_database)""" zh: """更新用户信息(仅支持 built_in_database 类型的认证器)""" } } delete_user { desc { - en: """Delete the user for the gateway authentication""" + en: """Delete the user for the gateway authenticator (only supports built_in_database)""" zh: """删除用户(仅支持 built_in_database 类型的认证器)""" } } import_users { desc { - en: """Import users into the gateway authentication""" + en: """Import users into the gateway authenticator (only supports built_in_database)""" zh: """导入用户(仅支持 built_in_database 类型的认证器)""" } } @@ -79,8 +85,8 @@ emqx_gateway_api_authn { like_user_id { desc { - en: """Fuzzy search by user_id (username or clientid)""" - zh: """用户 ID (username 或 clientid)模糊搜索""" + en: """Fuzzy search using user ID (username or clientid), only supports search by substring.""" + zh: """使用用户 ID (username 或 clientid)模糊搜索,仅支持按子串的方式进行搜索。""" } } @@ -90,5 +96,4 @@ emqx_gateway_api_authn { zh: """是否是超级用户""" } } - } diff --git a/apps/emqx_gateway/i18n/emqx_gateway_api_i18n.conf b/apps/emqx_gateway/i18n/emqx_gateway_api_i18n.conf index 5ab9277b2..34e9b8567 100644 --- a/apps/emqx_gateway/i18n/emqx_gateway_api_i18n.conf +++ b/apps/emqx_gateway/i18n/emqx_gateway_api_i18n.conf @@ -2,15 +2,17 @@ emqx_gateway_api { list_gateway { desc { - en: """Get gateway list""" - zh: """获取网关列表""" + en: """This API returns an overview info for the specified or all gateways. +including current running status, number of connections, listener status, etc.""" + zh: """该接口会返回指定或所有网关的概览状态, +包括当前状态、连接数、监听器状态等。""" } } enable_gateway { desc { - en: """Enable a gateway""" - zh: """启用某网关""" + en: """Enable a gateway by confs.""" + zh: """使用配置启动某一网关。""" } } @@ -23,15 +25,17 @@ emqx_gateway_api { delete_gateway { desc { - en: """Delete/Unload the gateway""" - zh: """删除/禁用某网关""" + en: """Unload the specified gateway""" + zh: """停用指定网关""" } } update_gateway { desc { - en: """Update the gateway configurations/status""" - zh: """更新网关配置或启用状态""" + en: """Update the gateway basic configurations and running status.
+Note: The Authentication and Listener configurations should be updated by other special APIs. """ + zh: """更新指定网关的基础配置、和启用的状态。
+注:认证、和监听器的配置更新需参考对应的 API 接口。""" } } @@ -42,13 +46,33 @@ emqx_gateway_api { } } + gateway_name_in_qs { + desc { + en: """Gateway Name.
+It's enum with `stomp`, `mqttsn`, `coap`, `lwm2m`, `exproto` +""" + zh: """网关名称.
+可取值为 `stomp`、`mqttsn`、`coap`、`lwm2m`、`exproto` +""" + } + } + gateway_status { desc { - en: """Gateway Status""" + en: """Gateway status""" zh: """网关启用状态""" } } + gateway_status_in_qs { + desc { + en: """Filter gateways by status.
+It is enum with `running`, `stopped`, `unloaded`""" + zh: """通过网关状态筛选
+可选值为 `running`、`stopped`、`unloaded`""" + } + } + gateway_created_at { desc { en: """The Gateway created datetime""" diff --git a/apps/emqx_gateway/i18n/emqx_gateway_api_listeners_i18n.conf b/apps/emqx_gateway/i18n/emqx_gateway_api_listeners_i18n.conf index 9c5de67c3..dc14a7e01 100644 --- a/apps/emqx_gateway/i18n/emqx_gateway_api_listeners_i18n.conf +++ b/apps/emqx_gateway/i18n/emqx_gateway_api_listeners_i18n.conf @@ -2,105 +2,109 @@ emqx_gateway_api_listeners { list_listeners { desc { - en: """Get the gateway listeners""" - zh: """获取网关监听器列表""" + en: """Gets a list of gateway listeners. This interface returns all the configs of the listener (including the authenticator on that listener), as well as the status of that listener running in the cluster.""" + zh: """获取网关监听器列表。该接口会返回监听器所有的配置(包括该监听器上的认证器),同时也会返回该监听器在集群中运行的状态。""" } } add_listener { desc { - en: """Create the gateway listener""" - zh: """为指定网关添加监听器""" + en: """Create the gateway listener.
+Note: For listener types not supported by a gateway, this API returns `400: BAD_REQUEST`.""" + zh: """为指定网关添加监听器。
+注:对于某网关不支持的监听器类型,该接口会返回 `400: BAD_REQUEST`。""" } } get_listener { desc { - en: """Get the gateway listener configurations""" - zh: """获取指定监听器信息""" + en: """Get the gateway listener configs""" + zh: """获取指定网关监听器的配置。""" } } delete_listener { desc { - en: """Delete the gateway listener""" - zh: """删除监听器""" + en: """Delete the gateway listener. All connected clients under the deleted listener will be disconnected.""" + zh: """删除指定监听器。被删除的监听器下所有已连接的客户端都会离线。""" } } update_listener { desc { - en: """Update the gateway listener""" - zh: """更新监听器""" + en: """Update the gateway listener. The listener being updated performs a restart and all clients connected to that listener will be disconnected.""" + zh: """更新某网关监听器的配置。被更新的监听器会执行重启,所有已连接到该监听器上的客户端都会被断开。""" } } get_listener_authn { desc { - en: """Get the listener's authentication info""" - zh: """获取监听器的认证器信息""" + en: """Get the listener's authenticator configs.""" + zh: """获取监听器的认证器配置。""" } } add_listener_authn { desc { - en: """Add authentication for the listener""" - zh: """为指定监听器添加认证器""" + en: """Enable authenticator for specified listener for client authentication.
+When authenticator is enabled for a listener, all clients connecting to that listener will use that authenticator for authentication.""" + zh: """为指定监听器开启认证器以实现客户端认证的能力。
+当某一监听器开启认证后,所有连接到该监听器的客户端会使用该认证器进行认证。""" } } update_listener_authn { desc { - en: """Update authentication for the listener""" - zh: """更新指定监听上的认证器配置""" + en: """Update authenticator configs for the listener, or disable/enable it.""" + zh: """更新指定监听器的认证器配置,或停用/启用该认证器。""" } } delete_listener_authn { desc { - en: """Remove authentication for the listener""" - zh: """为指定监听器移除认证器""" + en: """Remove authenticator for the listener.""" + zh: """移除指定监听器的认证器。""" } } list_users { desc { - en: """Get the users for the authentication""" + en: """Get the users for the authenticator (only supported by built_in_database)""" zh: """获取用户列表(仅支持 built_in_database 类型的认证器)""" } } add_user { desc { - en: """Add user for the authentication""" + en: """Add user for the authenticator (only supports built_in_database)""" zh: """添加用户(仅支持 built_in_database 类型的认证器)""" } } get_user { desc { - en: """Get user info from the gateway authentication""" + en: """Get user info from the gateway authenticator (only supports built_in_database)""" zh: """获取用户信息(仅支持 built_in_database 类型的认证器)""" } } update_user { desc { - en: """Update the user info for the gateway authentication""" + en: """Update the user info for the gateway authenticator (only supports built_in_database)""" zh: """更新用户信息(仅支持 built_in_database 类型的认证器)""" } } delete_user { desc { - en: """Delete the user for the gateway authentication""" + en: """Delete the user for the gateway authenticator (only supports built_in_database)""" zh: """删除用户(仅支持 built_in_database 类型的认证器)""" } } import_users { desc { - en: """Import users into the gateway authentication""" + en: """Import users into the gateway authenticator (only supports built_in_database)""" zh: """导入用户(仅支持 built_in_database 类型的认证器)""" } } @@ -139,5 +143,4 @@ emqx_gateway_api_listeners { zh: """当前连接数""" } } - } diff --git a/apps/emqx_gateway/i18n/emqx_gateway_schema_i18n.conf b/apps/emqx_gateway/i18n/emqx_gateway_schema_i18n.conf index 32d8cfe48..a05fec5c4 100644 --- a/apps/emqx_gateway/i18n/emqx_gateway_schema_i18n.conf +++ b/apps/emqx_gateway/i18n/emqx_gateway_schema_i18n.conf @@ -146,7 +146,7 @@ This option specifies the QoS level for the CoAP Client when establishing a subs """ zh: """客户端订阅请求的默认 QoS 等级。 当 CoAP 客户端发起订阅请求时,如果未携带 `qos` 参数则会使用该默认值。默认值可设置为: - - qos0, qos1, qos2: 设置为固定的 QoS 等级 + - qos0、 qos1、qos2: 设置为固定的 QoS 等级 - coap: 依据订阅操作的 CoAP 报文类型来动态决定 * 当订阅请求为 `non-confirmable` 类型时,取值为 qos0 * 当订阅请求为 `confirmable` 类型时,取值为 qos1 @@ -165,7 +165,7 @@ This option specifies the QoS level for the CoAP Client when publishing a messag zh: """客户端发布请求的默认 QoS 等级。 当 CoAP 客户端发起发布请求时,如果未携带 `qos` 参数则会使用该默认值。默认值可设置为: - - qos0, qos1, qos2: 设置为固定的 QoS 等级 + - qos0、qos1、qos2: 设置为固定的 QoS 等级 - coap: 依据发布操作的 CoAP 报文类型来动态决定 * 当发布请求为 `non-confirmable` 类型时,取值为 qos0 * 当发布请求为 `confirmable` 类型时,取值为 qos1 @@ -175,29 +175,29 @@ This option specifies the QoS level for the CoAP Client when publishing a messag lwm2m { desc { - en: """The LwM2M Gateway configuration. This gateway only supports the v1.0.1 protocol""" - zh: """LwM2M 网关配置。仅支持 v1.0.1 协议""" + en: """The LwM2M Gateway configuration. This gateway only supports the v1.0.1 protocol.""" + zh: """LwM2M 网关配置。仅支持 v1.0.1 协议。""" } } lwm2m_xml_dir { desc { - en: """The Directory for LwM2M Resource definition""" - zh: """LwM2M Resource 定义的 XML 文件目录路径""" + en: """The Directory for LwM2M Resource definition.""" + zh: """LwM2M Resource 定义的 XML 文件目录路径。""" } } lwm2m_lifetime_min { desc { - en: """Minimum value of lifetime allowed to be set by the LwM2M client""" - zh: """允许 LwM2M 客户端允许设置的心跳最小值""" + en: """Minimum value of lifetime allowed to be set by the LwM2M client.""" + zh: """允许 LwM2M 客户端允许设置的心跳最小值。""" } } lwm2m_lifetime_max { desc { - en: """Maximum value of lifetime allowed to be set by the LwM2M client""" - zh: """允许 LwM2M 客户端允许设置的心跳最大值""" + en: """Maximum value of lifetime allowed to be set by the LwM2M client.""" + zh: """允许 LwM2M 客户端允许设置的心跳最大值。""" } } @@ -207,14 +207,14 @@ This option specifies the QoS level for the CoAP Client when publishing a messag For example, after receiving an update message from a client, any messages within this time window are sent directly to the LwM2M client, and all messages beyond this time window are temporarily stored in memory.""" zh: """在QMode模式下,LwM2M网关认为网络链接有效的时间窗口的值。 -例如,在收到客户端的更新信息后,在这个时间窗口内的任何信息都会直接发送到LwM2M客户端,而超过这个时间窗口的所有信息都会暂时储存在内存中""" +例如,在收到客户端的更新信息后,在这个时间窗口内的任何信息都会直接发送到LwM2M客户端,而超过这个时间窗口的所有信息都会暂时储存在内存中。""" } } lwm2m_auto_observe { desc { - en: """Automatically observe the object list of REGISTER packet""" - zh: """自动 Observe REGISTER 数据包的 Object 列表""" + en: """Automatically observe the object list of REGISTER packet.""" + zh: """自动 Observe REGISTER 数据包的 Object 列表。""" } } @@ -226,15 +226,15 @@ For example, after receiving an update message from a client, any messages withi """ zh: """发布UPDATE事件消息的策略。 - always: 只要收到 UPDATE 请求,就发送更新事件。 - - contains_object_list: 仅当 UPDATE 请求携带 Object 列表时才发送更新事件 + - contains_object_list: 仅当 UPDATE 请求携带 Object 列表时才发送更新事件。 """ } } lwm2m_translators { desc { - en: """Topic configuration for LwM2M's gateway publishing and subscription""" - zh: """LwM2M 网关订阅/发布消息的主题映射配置""" + en: """Topic configuration for LwM2M's gateway publishing and subscription.""" + zh: """LwM2M 网关订阅/发布消息的主题映射配置。""" } } @@ -244,14 +244,14 @@ For example, after receiving an update message from a client, any messages withi For each new LwM2M client that succeeds in going online, the gateway creates a subscription relationship to receive downstream commands and send it to the LwM2M client""" zh: """下行命令主题。 -对于每个成功上线的新 LwM2M 客户端,网关会创建一个订阅关系来接收下行消息并将其发送给客户端""" +对于每个成功上线的新 LwM2M 客户端,网关会创建一个订阅关系来接收下行消息并将其发送给客户端。""" } } lwm2m_translators_response { desc { en: """The topic for gateway to publish the acknowledge events from LwM2M client""" - zh: """用于网关发布来自 LwM2M 客户端的确认事件的主题""" + zh: """用于网关发布来自 LwM2M 客户端的确认事件的主题。""" } } @@ -261,28 +261,28 @@ For each new LwM2M client that succeeds in going online, the gateway creates a s After succeed observe a resource of LwM2M client, Gateway will send the notify events via this topic, if the client reports any resource changes""" zh: """用于发布来自 LwM2M 客户端的通知事件的主题。 -在成功 Observe 到 LwM2M 客户端的资源后,如果客户端报告任何资源状态的变化,网关将通过该主题发送通知事件""" +在成功 Observe 到 LwM2M 客户端的资源后,如果客户端报告任何资源状态的变化,网关将通过该主题发送通知事件。""" } } lwm2m_translators_register { desc { en: """The topic for gateway to publish the register events from LwM2M client.""" - zh: """用于发布来自 LwM2M 客户端的注册事件的主题""" + zh: """用于发布来自 LwM2M 客户端的注册事件的主题。""" } } lwm2m_translators_update { desc { en: """The topic for gateway to publish the update events from LwM2M client""" - zh: """用于发布来自LwM2M客户端的更新事件的主题""" + zh: """用于发布来自LwM2M客户端的更新事件的主题。""" } } translator { desc { en: """MQTT topic that corresponds to a particular type of event.""" - zh: """配置某网关客户端对于发布消息或订阅的主题和 QoS 等级""" + zh: """配置某网关客户端对于发布消息或订阅的主题和 QoS 等级。""" } } @@ -412,28 +412,28 @@ After succeed observe a resource of LwM2M client, Gateway will send the notify e gateway_common_authentication { desc { en: """Default authentication configs for all the gateway listeners. For per-listener overrides see authentication\n in listener configs""" - zh: """网关的认证器配置,对该网关下所以的监听器生效。如果每个监听器需要配置不同的认证器,需要配置监听器下的 authentication 字段""" + zh: """网关的认证器配置,对该网关下所以的监听器生效。如果每个监听器需要配置不同的认证器,需要配置监听器下的 authentication 字段。""" } } tcp_udp_listeners { desc { en: """Settings for the listeners.""" - zh: """监听器配置""" + zh: """监听器配置。""" } } tcp_listeners { desc { en: """Settings for the TCP listeners.""" - zh: """配置 TCP 类型的监听器""" + zh: """配置 TCP 类型的监听器。""" } } udp_listeners { desc { en: """Settings for the UDP listeners.""" - zh: """配置 UDP 类型的监听器""" + zh: """配置 UDP 类型的监听器。""" } } @@ -454,7 +454,7 @@ After succeed observe a resource of LwM2M client, Gateway will send the notify e tcp_listener_tcp_opts{ desc { en: """Setting the TCP socket options.""" - zh: """TCP Socket 配置""" + zh: """TCP Socket 配置。""" } } @@ -484,7 +484,7 @@ EMQX will close the TCP connection if proxy protocol packet is not received with ssl_listener_options { desc { en: """SSL Socket options.""" - zh: """SSL Socket 配置""" + zh: """SSL Socket 配置。""" } } @@ -498,7 +498,7 @@ EMQX will close the TCP connection if proxy protocol packet is not received with udp_listener_udp_opts { desc { en: """Settings for the UDP sockets.""" - zh: """UDP Socket 配置 """ + zh: """UDP Socket 配置。""" } } @@ -535,7 +535,7 @@ See: https://erlang.org/doc/man/inet.html#setopts-2""" udp_listener_reuseaddr { desc { en: """Allow local reuse of port numbers.""" - zh: """允许重用本地处于 TIME_WAIT 的端口号""" + zh: """允许重用本地处于 TIME_WAIT 的端口号。""" } } diff --git a/apps/emqx_gateway/src/coap/emqx_coap_api.erl b/apps/emqx_gateway/src/coap/emqx_coap_api.erl index 5e94934da..f06071040 100644 --- a/apps/emqx_gateway/src/coap/emqx_coap_api.erl +++ b/apps/emqx_gateway/src/coap/emqx_coap_api.erl @@ -29,6 +29,7 @@ -export([request/2]). -define(PREFIX, "/gateways/coap/clients/:clientid"). +-define(TAGS, [<<"CoAP Gateways">>]). -import(hoconsc, [mk/2, enum/1]). -import(emqx_dashboard_swagger, [error_codes/2]). @@ -48,8 +49,9 @@ schema(?PREFIX ++ "/request") -> #{ operationId => request, post => #{ - tags => [<<"CoAP">>], + tags => ?TAGS, desc => ?DESC(send_coap_request), + summary => <<"Send a Request to a Client">>, parameters => request_parameters(), requestBody => request_body(), responses => #{ diff --git a/apps/emqx_gateway/src/coap/handler/emqx_coap_pubsub_handler.erl b/apps/emqx_gateway/src/coap/handler/emqx_coap_pubsub_handler.erl index 2e962a0bc..8587dc1dc 100644 --- a/apps/emqx_gateway/src/coap/handler/emqx_coap_pubsub_handler.erl +++ b/apps/emqx_gateway/src/coap/handler/emqx_coap_pubsub_handler.erl @@ -121,7 +121,7 @@ apply_publish_opts(Msg, MQTTMsg) -> maps:fold( fun (<<"retain">>, V, Acc) -> - Val = erlang:binary_to_atom(V), + Val = V =:= <<"true">>, emqx_message:set_flag(retain, Val, Acc); (<<"expiry">>, V, Acc) -> Val = erlang:binary_to_integer(V), diff --git a/apps/emqx_gateway/src/emqx_gateway.app.src b/apps/emqx_gateway/src/emqx_gateway.app.src index 47245c0a2..491d0242a 100644 --- a/apps/emqx_gateway/src/emqx_gateway.app.src +++ b/apps/emqx_gateway/src/emqx_gateway.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_gateway, [ {description, "The Gateway management application"}, - {vsn, "0.1.5"}, + {vsn, "0.1.7"}, {registered, []}, {mod, {emqx_gateway_app, []}}, {applications, [kernel, stdlib, grpc, emqx, emqx_authn]}, diff --git a/apps/emqx_gateway/src/emqx_gateway_api.erl b/apps/emqx_gateway/src/emqx_gateway_api.erl index 5ae8fe1e7..dc34c03a8 100644 --- a/apps/emqx_gateway/src/emqx_gateway_api.erl +++ b/apps/emqx_gateway/src/emqx_gateway_api.erl @@ -53,6 +53,9 @@ gateway_insta/2 ]). +-define(KNOWN_GATEWAY_STATUSES, [<<"running">>, <<"stopped">>, <<"unloaded">>]). +-define(TAGS, [<<"Gateways">>]). + %%-------------------------------------------------------------------- %% minirest behaviour callbacks %%-------------------------------------------------------------------- @@ -71,12 +74,22 @@ paths() -> gateway(get, Request) -> Params = maps:get(query_string, Request, #{}), - Status = - case maps:get(<<"status">>, Params, undefined) of - undefined -> all; - S0 -> binary_to_existing_atom(S0, utf8) - end, - {200, emqx_gateway_http:gateways(Status)}; + Status = maps:get(<<"status">>, Params, <<"all">>), + case lists:member(Status, [<<"all">> | ?KNOWN_GATEWAY_STATUSES]) of + true -> + {200, emqx_gateway_http:gateways(binary_to_existing_atom(Status, utf8))}; + false -> + return_http_error( + 400, + [ + "Unknown gateway status in query: ", + Status, + "\n", + "Values allowed: ", + lists:join(", ", ?KNOWN_GATEWAY_STATUSES) + ] + ) + end; gateway(post, Request) -> Body = maps:get(body, Request, #{}), try @@ -138,7 +151,7 @@ gateway_insta(get, #{bindings := #{name := Name0}}) -> end catch error:badarg -> - return_http_error(400, "Bad gateway name") + return_http_error(404, "Bad gateway name") end; gateway_insta(put, #{ body := GwConf0, @@ -164,7 +177,9 @@ schema("/gateways") -> 'operationId' => gateway, get => #{ + tags => ?TAGS, desc => ?DESC(list_gateway), + summary => <<"List All Gateways">>, parameters => params_gateway_status_in_qs(), responses => ?STANDARD_RESP( @@ -178,7 +193,9 @@ schema("/gateways") -> }, post => #{ + tags => ?TAGS, desc => ?DESC(enable_gateway), + summary => <<"Enable a Gateway">>, %% TODO: distinguish create & response swagger schema 'requestBody' => schema_gateways_conf(), responses => @@ -190,21 +207,27 @@ schema("/gateways/:name") -> 'operationId' => gateway_insta, get => #{ + tags => ?TAGS, desc => ?DESC(get_gateway), + summary => <<"Get the Gateway">>, parameters => params_gateway_name_in_path(), responses => ?STANDARD_RESP(#{200 => schema_gateways_conf()}) }, delete => #{ + tags => ?TAGS, desc => ?DESC(delete_gateway), + summary => <<"Unload the gateway">>, parameters => params_gateway_name_in_path(), responses => ?STANDARD_RESP(#{204 => <<"Deleted">>}) }, put => #{ + tags => ?TAGS, desc => ?DESC(update_gateway), + summary => <<"Update the gateway confs">>, parameters => params_gateway_name_in_path(), 'requestBody' => schema_update_gateways_conf(), responses => @@ -224,23 +247,23 @@ params_gateway_name_in_path() -> binary(), #{ in => path, - desc => ?DESC(gateway_name), - example => <<"">> + desc => ?DESC(gateway_name_in_qs), + example => <<"stomp">> } )} ]. params_gateway_status_in_qs() -> - %% FIXME: enum in swagger ?? [ {status, mk( binary(), #{ in => query, + enum => ?KNOWN_GATEWAY_STATUSES, required => false, - desc => ?DESC(gateway_status), - example => <<"">> + desc => ?DESC(gateway_status_in_qs), + example => <<"running">> } )} ]. @@ -663,7 +686,7 @@ examples_gateway_confs() -> enable_stats => true, idle_timeout => <<"30s">>, mountpoint => <<"lwm2m/">>, - xml_dir => emqx:etc_file(<<"lwm2m_xml">>), + xml_dir => <<"/etc/emqx/lwm2m_xml">>, lifetime_min => <<"1s">>, lifetime_max => <<"86400s">>, qmode_time_window => <<"22s">>, @@ -782,7 +805,7 @@ examples_update_gateway_confs() -> enable_stats => true, idle_timeout => <<"30s">>, mountpoint => <<"lwm2m2/">>, - xml_dir => emqx:etc_file(<<"lwm2m_xml">>), + xml_dir => <<"/etc/emqx/lwm2m_xml">>, lifetime_min => <<"1s">>, lifetime_max => <<"86400s">>, qmode_time_window => <<"22s">>, diff --git a/apps/emqx_gateway/src/emqx_gateway_api_authn.erl b/apps/emqx_gateway/src/emqx_gateway_api_authn.erl index 6fd073a3b..f337563ee 100644 --- a/apps/emqx_gateway/src/emqx_gateway_api_authn.erl +++ b/apps/emqx_gateway/src/emqx_gateway_api_authn.erl @@ -52,6 +52,8 @@ %% internal export for emqx_gateway_api_listeners module -export([schema_authn/0]). +-define(TAGS, [<<"Gateway Authentication">>]). + %%-------------------------------------------------------------------- %% minirest behaviour callbacks %%-------------------------------------------------------------------- @@ -75,6 +77,7 @@ authn(get, #{bindings := #{name := Name0}}) -> Authn -> {200, Authn} catch error:{config_not_found, _} -> + %% FIXME: should return 404? {204} end end); @@ -181,19 +184,23 @@ schema("/gateways/:name/authentication") -> 'operationId' => authn, get => #{ + tags => ?TAGS, desc => ?DESC(get_authn), + summary => <<"Get Authenticator Configuration">>, parameters => params_gateway_name_in_path(), responses => ?STANDARD_RESP( #{ 200 => schema_authn(), - 204 => <<"Authentication does not initiated">> + 204 => <<"Authenticator doesn't initiated">> } ) }, put => #{ + tags => ?TAGS, desc => ?DESC(update_authn), + summary => <<"Update Authenticator Configuration">>, parameters => params_gateway_name_in_path(), 'requestBody' => schema_authn(), responses => @@ -201,7 +208,9 @@ schema("/gateways/:name/authentication") -> }, post => #{ + tags => ?TAGS, desc => ?DESC(add_authn), + summary => <<"Create an Authenticator for a Gateway">>, parameters => params_gateway_name_in_path(), 'requestBody' => schema_authn(), responses => @@ -209,7 +218,9 @@ schema("/gateways/:name/authentication") -> }, delete => #{ + tags => ?TAGS, desc => ?DESC(delete_authn), + summary => <<"Delete the Gateway Authenticator">>, parameters => params_gateway_name_in_path(), responses => ?STANDARD_RESP(#{204 => <<"Deleted">>}) @@ -220,7 +231,9 @@ schema("/gateways/:name/authentication/users") -> 'operationId' => users, get => #{ + tags => ?TAGS, desc => ?DESC(list_users), + summary => <<"List users for a Gateway Authenticator">>, parameters => params_gateway_name_in_path() ++ params_paging_in_qs() ++ params_fuzzy_in_qs(), @@ -236,7 +249,9 @@ schema("/gateways/:name/authentication/users") -> }, post => #{ + tags => ?TAGS, desc => ?DESC(add_user), + summary => <<"Add User for a Gateway Authenticator">>, parameters => params_gateway_name_in_path(), 'requestBody' => emqx_dashboard_swagger:schema_with_examples( ref(emqx_authn_api, request_user_create), @@ -258,7 +273,9 @@ schema("/gateways/:name/authentication/users/:uid") -> 'operationId' => users_insta, get => #{ + tags => ?TAGS, desc => ?DESC(get_user), + summary => <<"Get User Info for a Gateway Authenticator">>, parameters => params_gateway_name_in_path() ++ params_userid_in_path(), responses => @@ -273,7 +290,9 @@ schema("/gateways/:name/authentication/users/:uid") -> }, put => #{ + tags => ?TAGS, desc => ?DESC(update_user), + summary => <<"Update User Info for a Gateway Authenticator">>, parameters => params_gateway_name_in_path() ++ params_userid_in_path(), 'requestBody' => emqx_dashboard_swagger:schema_with_examples( @@ -292,7 +311,9 @@ schema("/gateways/:name/authentication/users/:uid") -> }, delete => #{ + tags => ?TAGS, desc => ?DESC(delete_user), + summary => <<"Delete User for a Gateway Authenticator">>, parameters => params_gateway_name_in_path() ++ params_userid_in_path(), responses => @@ -311,8 +332,8 @@ params_gateway_name_in_path() -> binary(), #{ in => path, - desc => ?DESC(emqx_gateway_api, gateway_name), - example => <<"">> + desc => ?DESC(emqx_gateway_api, gateway_name_in_qs), + example => <<"stomp">> } )} ]. @@ -325,7 +346,7 @@ params_userid_in_path() -> #{ in => path, desc => ?DESC(user_id), - example => <<"">> + example => <<"test_username">> } )} ]. @@ -343,7 +364,7 @@ params_fuzzy_in_qs() -> in => query, required => false, desc => ?DESC(like_user_id), - example => <<"username">> + example => <<"test_">> } )}, {is_superuser, diff --git a/apps/emqx_gateway/src/emqx_gateway_api_authn_user_import.erl b/apps/emqx_gateway/src/emqx_gateway_api_authn_user_import.erl index 38036f7c7..b26e77e83 100644 --- a/apps/emqx_gateway/src/emqx_gateway_api_authn_user_import.erl +++ b/apps/emqx_gateway/src/emqx_gateway_api_authn_user_import.erl @@ -45,6 +45,8 @@ import_listener_users/2 ]). +-define(TAGS, [<<"Gateway Authentication">>]). + %%-------------------------------------------------------------------- %% minirest behaviour callbacks %%-------------------------------------------------------------------- @@ -122,7 +124,9 @@ schema("/gateways/:name/authentication/import_users") -> 'operationId' => import_users, post => #{ + tags => ?TAGS, desc => ?DESC(emqx_gateway_api_authn, import_users), + summary => <<"Import Users">>, parameters => params_gateway_name_in_path(), 'requestBody' => emqx_dashboard_swagger:file_schema(filename), responses => @@ -134,7 +138,9 @@ schema("/gateways/:name/listeners/:id/authentication/import_users") -> 'operationId' => import_listener_users, post => #{ + tags => ?TAGS, desc => ?DESC(emqx_gateway_api_listeners, import_users), + summary => <<"Import Users">>, parameters => params_gateway_name_in_path() ++ params_listener_id_in_path(), 'requestBody' => emqx_dashboard_swagger:file_schema(filename), @@ -144,6 +150,7 @@ schema("/gateways/:name/listeners/:id/authentication/import_users") -> }; schema(Path) -> emqx_gateway_utils:make_compatible_schema(Path, fun schema/1). + %%-------------------------------------------------------------------- %% params defines %%-------------------------------------------------------------------- @@ -155,7 +162,7 @@ params_gateway_name_in_path() -> binary(), #{ in => path, - desc => ?DESC(emqx_gateway_api, gateway_name), + desc => ?DESC(emqx_gateway_api, gateway_name_in_qs), example => <<"stomp">> } )} diff --git a/apps/emqx_gateway/src/emqx_gateway_api_clients.erl b/apps/emqx_gateway/src/emqx_gateway_api_clients.erl index b7cf9fc64..5f6cc25b6 100644 --- a/apps/emqx_gateway/src/emqx_gateway_api_clients.erl +++ b/apps/emqx_gateway/src/emqx_gateway_api_clients.erl @@ -59,6 +59,8 @@ format_channel_info/1 ]). +-define(TAGS, [<<"Gateway Clients">>]). + %%-------------------------------------------------------------------- %% APIs %%-------------------------------------------------------------------- @@ -113,15 +115,19 @@ clients(get, #{ ?QUERY_FUN ); Node0 -> - Node1 = binary_to_atom(Node0, utf8), - QStringWithoutNode = maps:without([<<"node">>], QString), - emqx_mgmt_api:node_query( - Node1, - QStringWithoutNode, - TabName, - ?CLIENT_QSCHEMA, - ?QUERY_FUN - ) + case emqx_misc:safe_to_existing_atom(Node0) of + {ok, Node1} -> + QStringWithoutNode = maps:without([<<"node">>], QString), + emqx_mgmt_api:node_query( + Node1, + QStringWithoutNode, + TabName, + ?CLIENT_QSCHEMA, + ?QUERY_FUN + ); + {error, _} -> + {error, Node0, {badrpc, <<"invalid node">>}} + end end, case Result of {error, page_limit_invalid} -> @@ -467,7 +473,9 @@ schema("/gateways/:name/clients") -> 'operationId' => clients, get => #{ + tags => ?TAGS, desc => ?DESC(list_clients), + summary => <<"List Gateway's Clients">>, parameters => params_client_query(), responses => ?STANDARD_RESP(#{200 => schema_client_list()}) @@ -478,14 +486,18 @@ schema("/gateways/:name/clients/:clientid") -> 'operationId' => clients_insta, get => #{ + tags => ?TAGS, desc => ?DESC(get_client), + summary => <<"Get Client Info">>, parameters => params_client_insta(), responses => ?STANDARD_RESP(#{200 => schema_client()}) }, delete => #{ + tags => ?TAGS, desc => ?DESC(kick_client), + summary => <<"Kick out Client">>, parameters => params_client_insta(), responses => ?STANDARD_RESP(#{204 => <<"Kicked">>}) @@ -496,7 +508,9 @@ schema("/gateways/:name/clients/:clientid/subscriptions") -> 'operationId' => subscriptions, get => #{ + tags => ?TAGS, desc => ?DESC(list_subscriptions), + summary => <<"List Client's Subscription">>, parameters => params_client_insta(), responses => ?STANDARD_RESP( @@ -510,7 +524,9 @@ schema("/gateways/:name/clients/:clientid/subscriptions") -> }, post => #{ + tags => ?TAGS, desc => ?DESC(add_subscription), + summary => <<"Add Subscription for Client">>, parameters => params_client_insta(), 'requestBody' => emqx_dashboard_swagger:schema_with_examples( ref(subscription), @@ -532,7 +548,9 @@ schema("/gateways/:name/clients/:clientid/subscriptions/:topic") -> 'operationId' => subscriptions, delete => #{ + tags => ?TAGS, desc => ?DESC(delete_subscription), + summary => <<"Delete Client's Subscription">>, parameters => params_topic_name_in_path() ++ params_client_insta(), responses => ?STANDARD_RESP(#{204 => <<"Unsubscribed">>}) diff --git a/apps/emqx_gateway/src/emqx_gateway_api_listeners.erl b/apps/emqx_gateway/src/emqx_gateway_api_listeners.erl index 92903ec35..08bf37a47 100644 --- a/apps/emqx_gateway/src/emqx_gateway_api_listeners.erl +++ b/apps/emqx_gateway/src/emqx_gateway_api_listeners.erl @@ -60,6 +60,8 @@ %% RPC -export([do_listeners_cluster_status/1]). +-define(TAGS, [<<"Gateway Listeners">>]). + %%-------------------------------------------------------------------- %% minirest behaviour callbacks %%-------------------------------------------------------------------- @@ -358,7 +360,9 @@ schema("/gateways/:name/listeners") -> 'operationId' => listeners, get => #{ + tags => ?TAGS, desc => ?DESC(list_listeners), + summary => <<"List All Listeners">>, parameters => params_gateway_name_in_path(), responses => ?STANDARD_RESP( @@ -372,7 +376,9 @@ schema("/gateways/:name/listeners") -> }, post => #{ + tags => ?TAGS, desc => ?DESC(add_listener), + summary => <<"Add a Listener">>, parameters => params_gateway_name_in_path(), %% XXX: How to distinguish the different listener supported by %% different types of gateways? @@ -396,7 +402,9 @@ schema("/gateways/:name/listeners/:id") -> 'operationId' => listeners_insta, get => #{ + tags => ?TAGS, desc => ?DESC(get_listener), + summary => <<"Get the Listener Configs">>, parameters => params_gateway_name_in_path() ++ params_listener_id_in_path(), responses => @@ -411,7 +419,9 @@ schema("/gateways/:name/listeners/:id") -> }, delete => #{ + tags => ?TAGS, desc => ?DESC(delete_listener), + summary => <<"Delete the Listener">>, parameters => params_gateway_name_in_path() ++ params_listener_id_in_path(), responses => @@ -419,7 +429,9 @@ schema("/gateways/:name/listeners/:id") -> }, put => #{ + tags => ?TAGS, desc => ?DESC(update_listener), + summary => <<"Update the Listener Configs">>, parameters => params_gateway_name_in_path() ++ params_listener_id_in_path(), 'requestBody' => emqx_dashboard_swagger:schema_with_examples( @@ -442,7 +454,9 @@ schema("/gateways/:name/listeners/:id/authentication") -> 'operationId' => listeners_insta_authn, get => #{ + tags => ?TAGS, desc => ?DESC(get_listener_authn), + summary => <<"Get the Listener's Authenticator">>, parameters => params_gateway_name_in_path() ++ params_listener_id_in_path(), responses => @@ -455,7 +469,9 @@ schema("/gateways/:name/listeners/:id/authentication") -> }, post => #{ + tags => ?TAGS, desc => ?DESC(add_listener_authn), + summary => <<"Create an Authenticator for a Listener">>, parameters => params_gateway_name_in_path() ++ params_listener_id_in_path(), 'requestBody' => schema_authn(), @@ -464,7 +480,9 @@ schema("/gateways/:name/listeners/:id/authentication") -> }, put => #{ + tags => ?TAGS, desc => ?DESC(update_listener_authn), + summary => <<"Update the Listener Authenticator configs">>, parameters => params_gateway_name_in_path() ++ params_listener_id_in_path(), 'requestBody' => schema_authn(), @@ -473,7 +491,9 @@ schema("/gateways/:name/listeners/:id/authentication") -> }, delete => #{ + tags => ?TAGS, desc => ?DESC(delete_listener_authn), + summary => <<"Delete the Listener's Authenticator">>, parameters => params_gateway_name_in_path() ++ params_listener_id_in_path(), responses => @@ -485,7 +505,9 @@ schema("/gateways/:name/listeners/:id/authentication/users") -> 'operationId' => users, get => #{ + tags => ?TAGS, desc => ?DESC(list_users), + summary => <<"List Authenticator's Users">>, parameters => params_gateway_name_in_path() ++ params_listener_id_in_path() ++ params_paging_in_qs(), @@ -501,7 +523,9 @@ schema("/gateways/:name/listeners/:id/authentication/users") -> }, post => #{ + tags => ?TAGS, desc => ?DESC(add_user), + summary => <<"Add User for an Authenticator">>, parameters => params_gateway_name_in_path() ++ params_listener_id_in_path(), 'requestBody' => emqx_dashboard_swagger:schema_with_examples( @@ -524,7 +548,9 @@ schema("/gateways/:name/listeners/:id/authentication/users/:uid") -> 'operationId' => users_insta, get => #{ + tags => ?TAGS, desc => ?DESC(get_user), + summary => <<"Get User Info">>, parameters => params_gateway_name_in_path() ++ params_listener_id_in_path() ++ params_userid_in_path(), @@ -540,7 +566,9 @@ schema("/gateways/:name/listeners/:id/authentication/users/:uid") -> }, put => #{ + tags => ?TAGS, desc => ?DESC(update_user), + summary => <<"Update User Info">>, parameters => params_gateway_name_in_path() ++ params_listener_id_in_path() ++ params_userid_in_path(), @@ -560,7 +588,9 @@ schema("/gateways/:name/listeners/:id/authentication/users/:uid") -> }, delete => #{ + tags => ?TAGS, desc => ?DESC(delete_user), + summary => <<"Delete User">>, parameters => params_gateway_name_in_path() ++ params_listener_id_in_path() ++ params_userid_in_path(), @@ -570,6 +600,7 @@ schema("/gateways/:name/listeners/:id/authentication/users/:uid") -> }; schema(Path) -> emqx_gateway_utils:make_compatible_schema(Path, fun schema/1). + %%-------------------------------------------------------------------- %% params defines @@ -580,8 +611,8 @@ params_gateway_name_in_path() -> binary(), #{ in => path, - desc => ?DESC(emqx_gateway_api, gateway_name), - example => <<"">> + desc => ?DESC(emqx_gateway_api, gateway_name_in_qs), + example => <<"stomp">> } )} ]. @@ -725,9 +756,9 @@ examples_listener() -> <<"tlsv1.1">>, <<"tlsv1">> ], - cacertfile => emqx:cert_file(<<"cacert.pem">>), - certfile => emqx:cert_file(<<"cert.pem">>), - keyfile => emqx:cert_file(<<"key.pem">>), + cacertfile => <<"/etc/emqx/certs/cacert.pem">>, + certfile => <<"/etc/emqx/certs/cert.pem">>, + keyfile => <<"/etc/emqx/certs/key.pem">>, verify => <<"verify_none">>, fail_if_no_peer_cert => false }, @@ -771,9 +802,9 @@ examples_listener() -> dtls_options => #{ versions => [<<"dtlsv1.2">>, <<"dtlsv1">>], - cacertfile => emqx:cert_file(<<"cacert.pem">>), - certfile => emqx:cert_file(<<"cert.pem">>), - keyfile => emqx:cert_file(<<"key.pem">>), + cacertfile => <<"/etc/emqx/certs/cacert.pem">>, + certfile => <<"/etc/emqx/certs/cert.pem">>, + keyfile => <<"/etc/emqx/certs/key.pem">>, verify => <<"verify_none">>, fail_if_no_peer_cert => false }, @@ -798,9 +829,9 @@ examples_listener() -> dtls_options => #{ versions => [<<"dtlsv1.2">>, <<"dtlsv1">>], - cacertfile => emqx:cert_file(<<"cacert.pem">>), - certfile => emqx:cert_file(<<"cert.pem">>), - keyfile => emqx:cert_file(<<"key.pem">>), + cacertfile => <<"/etc/emqx/certs/cacert.pem">>, + certfile => <<"/etc/emqx/certs/cert.pem">>, + keyfile => <<"/etc/emqx/certs/key.pem">>, verify => <<"verify_none">>, user_lookup_fun => <<"emqx_tls_psk:lookup">>, ciphers => diff --git a/apps/emqx_gateway/src/emqx_gateway_schema.erl b/apps/emqx_gateway/src/emqx_gateway_schema.erl index 8850fa462..c58d2f74c 100644 --- a/apps/emqx_gateway/src/emqx_gateway_schema.erl +++ b/apps/emqx_gateway/src/emqx_gateway_schema.erl @@ -28,7 +28,7 @@ -include_lib("hocon/include/hoconsc.hrl"). -include_lib("typerefl/include/types.hrl"). --type ip_port() :: tuple(). +-type ip_port() :: tuple() | integer(). -type duration() :: non_neg_integer(). -type duration_s() :: non_neg_integer(). -type bytesize() :: pos_integer(). @@ -250,7 +250,12 @@ fields(lwm2m) -> sc( binary(), #{ - default => emqx:etc_file("lwm2m_xml"), + %% since this is not packaged with emqx, nor + %% present in the packages, we must let the user + %% specify it rather than creating a dynamic + %% default (especially difficult to handle when + %% generating docs). + example => <<"/etc/emqx/lwm2m_xml">>, required => true, desc => ?DESC(lwm2m_xml_dir) } diff --git a/apps/emqx_gateway/src/emqx_gateway_utils.erl b/apps/emqx_gateway/src/emqx_gateway_utils.erl index 8df7d84c0..68fce7589 100644 --- a/apps/emqx_gateway/src/emqx_gateway_utils.erl +++ b/apps/emqx_gateway/src/emqx_gateway_utils.erl @@ -70,6 +70,8 @@ default_subopts/0 ]). +-import(emqx_listeners, [esockd_access_rules/1]). + -define(ACTIVE_N, 100). -define(DEFAULT_IDLE_TIMEOUT, 30000). -define(DEFAULT_GC_OPTS, #{count => 1000, bytes => 1024 * 1024}). @@ -443,19 +445,6 @@ esockd_opts(Type, Opts0) -> end ). -esockd_access_rules(StrRules) -> - Access = fun(S) -> - [A, CIDR] = string:tokens(S, " "), - { - list_to_atom(A), - case CIDR of - "all" -> all; - _ -> CIDR - end - } - end, - [Access(R) || R <- StrRules]. - ssl_opts(Name, Opts) -> Type = case Name of diff --git a/apps/emqx_gateway/src/lwm2m/emqx_lwm2m_api.erl b/apps/emqx_gateway/src/lwm2m/emqx_lwm2m_api.erl index 1aa0bac93..9a6468455 100644 --- a/apps/emqx_gateway/src/lwm2m/emqx_lwm2m_api.erl +++ b/apps/emqx_gateway/src/lwm2m/emqx_lwm2m_api.erl @@ -27,6 +27,7 @@ -define(PATH(Suffix), "/gateways/lwm2m/clients/:clientid" Suffix). -define(DATA_TYPE, ['Integer', 'Float', 'Time', 'String', 'Boolean', 'Opaque', 'Objlnk']). +-define(TAGS, [<<"LwM2M Gateways">>]). -import(hoconsc, [mk/2, ref/1, ref/2]). -import(emqx_dashboard_swagger, [error_codes/2]). @@ -45,8 +46,9 @@ schema(?PATH("/lookup")) -> #{ 'operationId' => lookup, get => #{ - tags => [<<"LwM2M">>], + tags => ?TAGS, desc => ?DESC(lookup_resource), + summary => <<"List Client's Resources">>, parameters => [ {clientid, mk(binary(), #{in => path, example => "urn:oma:lwm2m:oma:2"})}, {path, mk(binary(), #{in => query, required => true, example => "/3/0/7"})}, @@ -69,8 +71,9 @@ schema(?PATH("/observe")) -> #{ 'operationId' => observe, post => #{ - tags => [<<"LwM2M">>], + tags => ?TAGS, desc => ?DESC(observe_resource), + summary => <<"Observe a Resource">>, parameters => [ {clientid, mk(binary(), #{in => path, example => "urn:oma:lwm2m:oma:2"})}, {path, mk(binary(), #{in => query, required => true, example => "/3/0/7"})}, @@ -87,8 +90,9 @@ schema(?PATH("/read")) -> #{ 'operationId' => read, post => #{ - tags => [<<"LwM2M">>], + tags => ?TAGS, desc => ?DESC(read_resource), + summary => <<"Read Value from a Resource Path">>, parameters => [ {clientid, mk(binary(), #{in => path, example => "urn:oma:lwm2m:oma:2"})}, {path, mk(binary(), #{in => query, required => true, example => "/3/0/7"})} @@ -103,8 +107,9 @@ schema(?PATH("/write")) -> #{ 'operationId' => write, post => #{ + tags => ?TAGS, desc => ?DESC(write_resource), - tags => [<<"LwM2M">>], + summary => <<"Write a Value to Resource Path">>, parameters => [ {clientid, mk(binary(), #{in => path, example => "urn:oma:lwm2m:oma:2"})}, {path, mk(binary(), #{in => query, required => true, example => "/3/0/7"})}, diff --git a/apps/emqx_gateway/test/emqx_gateway_api_SUITE.erl b/apps/emqx_gateway/test/emqx_gateway_api_SUITE.erl index 8532a3a74..c4a6758a3 100644 --- a/apps/emqx_gateway/test/emqx_gateway_api_SUITE.erl +++ b/apps/emqx_gateway/test/emqx_gateway_api_SUITE.erl @@ -62,8 +62,16 @@ end_per_suite(Conf) -> t_gateway(_) -> {200, Gateways} = request(get, "/gateways"), lists:foreach(fun assert_gw_unloaded/1, Gateways), - {400, BadReq} = request(get, "/gateways/uname_gateway"), - assert_bad_request(BadReq), + {200, UnloadedGateways} = request(get, "/gateways?status=unloaded"), + lists:foreach(fun assert_gw_unloaded/1, UnloadedGateways), + {200, NoRunningGateways} = request(get, "/gateways?status=running"), + ?assertEqual([], NoRunningGateways), + {404, GwNotFoundReq} = request(get, "/gateways/unknown_gateway"), + assert_not_found(GwNotFoundReq), + {400, BadReqInvalidStatus} = request(get, "/gateways?status=invalid_status"), + assert_bad_request(BadReqInvalidStatus), + {400, BadReqUCStatus} = request(get, "/gateways?status=UNLOADED"), + assert_bad_request(BadReqUCStatus), {201, _} = request(post, "/gateways", #{name => <<"stomp">>}), {200, StompGw1} = request(get, "/gateways/stomp"), assert_feilds_apperence( @@ -78,8 +86,8 @@ t_gateway(_) -> t_deprecated_gateway(_) -> {200, Gateways} = request(get, "/gateway"), lists:foreach(fun assert_gw_unloaded/1, Gateways), - {400, BadReq} = request(get, "/gateway/uname_gateway"), - assert_bad_request(BadReq), + {404, NotFoundReq} = request(get, "/gateway/uname_gateway"), + assert_not_found(NotFoundReq), {201, _} = request(post, "/gateway", #{name => <<"stomp">>}), {200, StompGw1} = request(get, "/gateway/stomp"), assert_feilds_apperence( @@ -563,3 +571,6 @@ assert_gw_unloaded(Gateway) -> assert_bad_request(BadReq) -> ?assertEqual(<<"BAD_REQUEST">>, maps:get(code, BadReq)). + +assert_not_found(NotFoundReq) -> + ?assertEqual(<<"RESOURCE_NOT_FOUND">>, maps:get(code, NotFoundReq)). diff --git a/apps/emqx_management/i18n/emqx_mgmt_api_alarms_i18n.conf b/apps/emqx_management/i18n/emqx_mgmt_api_alarms_i18n.conf index c6f518c86..0ab09520e 100644 --- a/apps/emqx_management/i18n/emqx_mgmt_api_alarms_i18n.conf +++ b/apps/emqx_management/i18n/emqx_mgmt_api_alarms_i18n.conf @@ -2,78 +2,82 @@ emqx_mgmt_api_alarms { list_alarms_api { desc { - en: """List alarms""" - zh: """列出告警,获取告警列表""" + en: """List currently activated alarms or historical alarms, determined by query parameters.""" + zh: """列出当前激活的告警或历史告警,由查询参数决定。""" } } delete_alarms_api { desc { - en: """Remove all deactivated alarms""" - zh: """删除所有历史告警(非活跃告警)""" + en: """Remove all historical alarms.""" + zh: """删除所有历史告警。""" } } delete_alarms_api_response204 { desc { - en: """Remove all deactivated alarms ok""" - zh: """删除所有历史告警(非活跃告警)成功""" + en: """Historical alarms have been cleared successfully.""" + zh: """历史告警已成功清除。""" } } get_alarms_qs_activated { desc { - en: """Activate alarms, or deactivate alarms. Default is false""" - zh: """活跃中的告警,或历史告警(非活跃告警),默认为 false""" + en: """It is used to specify the alarm type of the query. +When true, it returns the currently activated alarm, +and when it is false, it returns the historical alarm. +The default is false.""" + zh: """用于指定查询的告警类型, +为 true 时返回当前激活的告警,为 false 时返回历史告警,默认为 false。""" } } node { desc { - en: """Alarm in node""" - zh: """告警节点名称""" + en: """The name of the node that triggered this alarm.""" + zh: """触发此告警的节点名称。""" } } name { desc { - en: """Alarm name""" - zh: """告警名称""" + en: """Alarm name, used to distinguish different alarms.""" + zh: """告警名称,用于区分不同的告警。""" } } message { desc { - en: """Alarm readable information""" - zh: """告警信息""" + en: """Alarm message, which describes the alarm content in a human-readable format.""" + zh: """告警消息,以人类可读的方式描述告警内容。""" } } details { desc { - en: """Alarm details information""" - zh: """告警详细信息""" + en: """Alarm details, provides more alarm information, mainly for program processing.""" + zh: """告警详情,提供了更多的告警信息,主要提供给程序处理。""" } } duration { desc { - en: """Alarms duration time; UNIX time stamp, millisecond""" - zh: """告警持续时间,单位:毫秒""" + en: """Indicates how long the alarm has lasted, in milliseconds.""" + zh: """表明告警已经持续了多久,单位:毫秒。""" } } activate_at { desc { - en: """Alarms activate time, RFC 3339""" - zh: """告警开始时间,使用 rfc3339 标准时间格式""" + en: """Alarm start time, using rfc3339 standard time format.""" + zh: """告警开始时间,使用 rfc3339 标准时间格式。""" } } deactivate_at { desc { - en: """Alarms deactivate time, RFC 3339""" - zh: """告警结束时间,使用 rfc3339 标准时间格式""" + en: """Alarm end time, using rfc3339 standard time format.""" + zh: """告警结束时间,使用 rfc3339 标准时间格式。""" } } diff --git a/apps/emqx_management/i18n/emqx_mgmt_api_banned_i18n.conf b/apps/emqx_management/i18n/emqx_mgmt_api_banned_i18n.conf index 686293d0b..3045cb293 100644 --- a/apps/emqx_management/i18n/emqx_mgmt_api_banned_i18n.conf +++ b/apps/emqx_management/i18n/emqx_mgmt_api_banned_i18n.conf @@ -2,112 +2,97 @@ emqx_mgmt_api_banned { list_banned_api { desc { - en: """List banned.""" - zh: """列出黑名单""" - } - label { - en: """List Banned""" - zh: """列出黑名单""" + en: """List all currently banned client IDs, usernames and IP addresses.""" + zh: """列出目前所有被封禁的客户端 ID、用户名和 IP 地址。""" } } create_banned_api { desc { - en: """Create banned.""" - zh: """创建黑名单""" + en: """Add a client ID, username or IP address to the blacklist.""" + zh: """添加一个客户端 ID、用户名或者 IP 地址到黑名单。""" } } create_banned_api_response400 { desc { - en: """Banned already existed, or bad args.""" - zh: """黑名单已存在,或参数格式有错误""" + en: """Bad request, possibly due to wrong parameters or the existence of a banned object.""" + zh: """错误的请求,可能是参数错误或封禁对象已存在等原因。""" } } delete_banned_api { desc { - en: """Delete banned""" - zh: """删除黑名单""" + en: """Remove a client ID, username or IP address from the blacklist.""" + zh: """将一个客户端 ID、用户名或者 IP 地址从黑名单中删除。""" } } delete_banned_api_response404 { desc { - en: """Banned not found. May be the banned time has been exceeded""" - zh: """黑名单未找到,可能为已经超期失效""" - } - } - - create_banned { - desc { - en: """List banned.""" - zh: """列出黑名单""" - } - label { - en: """List Banned""" - zh: """列出黑名单""" + en: """The banned object was not found in the blacklist.""" + zh: """未在黑名单中找到该封禁对象。""" } } as { desc { - en: """Banned type clientid, username, peerhost""" - zh: """黑名单类型,可选 clientid、username、peerhost""" + en: """Ban method, which can be client ID, username or IP address.""" + zh: """封禁方式,可以通过客户端 ID、用户名或者 IP 地址等方式进行封禁。""" } label { - en: """Banned Type""" - zh: """黑名单类型""" + en: """Ban Method""" + zh: """封禁方式""" } } who { desc { - en: """Client info as banned type""" - zh: """设备信息""" + en: """Ban object, specific client ID, username or IP address.""" + zh: """封禁对象,具体的客户端 ID、用户名或者 IP 地址。""" } label { - en: """Banned Info""" - zh: """黑名单信息""" + en: """Ban Object""" + zh: """封禁对象""" } } by { desc { - en: """Commander""" - zh: """黑名单创建者""" + en: """Initiator of the ban.""" + zh: """封禁的发起者。""" } label { - en: """Commander""" - zh: """黑名单创建者""" + en: """Ban Initiator""" + zh: """封禁发起者""" } } reason { desc { - en: """Banned reason""" - zh: """黑名单创建原因""" + en: """Ban reason, record the reason why the current object was banned.""" + zh: """封禁原因,记录当前对象被封禁的原因。""" } label { - en: """Reason""" - zh: """原因""" + en: """Ban Reason""" + zh: """封禁原因""" } } at { desc { - en: """Create banned time, rfc3339, now if not specified""" - zh: """黑名单创建时间,默认为当前""" + en: """The start time of the ban, the format is rfc3339, the default is the time when the operation was initiated.""" + zh: """封禁的起始时间,格式为 rfc3339,默认为发起操作的时间。""" } label { - en: """Create banned time""" - zh: """黑名单创建时间""" + en: """Ban Time""" + zh: """封禁时间""" } } until { desc { - en: """Cancel banned time, rfc3339, now + 5 minute if not specified""" - zh: """黑名单结束时间,默认为创建时间 + 5 分钟""" + en: """The end time of the ban, the format is rfc3339, the default is the time when the operation was initiated + 5 minutes.""" + zh: """封禁的结束时间,式为 rfc3339,默认为发起操作的时间 + 5 分钟。""" } label { - en: """Cancel banned time""" - zh: """黑名单结束时间""" + en: """Ban End Time""" + zh: """封禁结束时间""" } } } diff --git a/apps/emqx_management/i18n/emqx_mgmt_api_publish_i18n.conf b/apps/emqx_management/i18n/emqx_mgmt_api_publish_i18n.conf new file mode 100644 index 000000000..2a7c9def8 --- /dev/null +++ b/apps/emqx_management/i18n/emqx_mgmt_api_publish_i18n.conf @@ -0,0 +1,127 @@ + +emqx_mgmt_api_publish { + publish_api { + desc { + en: """ +Publish one message.
+Possible HTTP status response codes are:
+200: The message is delivered to at least one subscriber;
+202: No matched subscribers;
+400: Message is invalid. for example bad topic name, or QoS is out of range;
+503: Failed to deliver the message to subscriber(s);
+""" + zh: """ +发布一个消息。
+可能的 HTTP 状态码如下:
+200: 消息被成功发送到至少一个订阅。
+202: 没有匹配到任何订阅。
+400: 消息编码错误,如非法主题,或 QoS 超出范围等。
+503: 服务重启等过程中导致转发失败。

+""" + } + } + publish_bulk_api { + desc { + en: """ +Publish a batch of messages.
+Possible HTTP response status code are:
+200: All messages are delivered to at least one subscriber;
+202: At least one message was not delivered to any subscriber;
+400: At least one message is invalid. For example bad topic name, or QoS is out of range;
+503: Failed to deliver at least one of the messages;
+ +In case there is at lest one invalid message in the batch, the HTTP response body +is the same as for /publish API.
+Otherwise the HTTP response body is an array of JSON objects indicating the publish +result of each individual message in the batch. +""" + zh: """ +批量发布一组消息。
+可能的 HTTP 状态码如下:
+200: 所有的消息都被成功发送到至少一个订阅。
+202: 至少有一个消息没有匹配到任何订阅。
+400: 至少有一个消息编码错误,如非法主题,或 QoS 超出范围等。
+503: 至少有一个小因为服务重启的原因导致转发失败。
+ +请求的 Body 或者 Body 中包含的某个消息无法通过 API 规范的类型检查时,HTTP 响应的消息与发布单个消息的 API + /publish 是一样的。 +如果所有的消息都是合法的,那么 HTTP 返回的内容是一个 JSON 数组,每个元素代表了该消息转发的状态。 + +""" + } + } + + topic_name { + desc { + en: "Topic Name" + zh: "主题名称" + } + } + qos { + desc { + en: "MQTT message QoS" + zh: "MQTT 消息的 QoS" + } + } + clientid { + desc { + en: "Each message can be published as if it is done on behalf of an MQTT client whos ID can be specified in this field." + zh: "每个消息都可以带上一个 MQTT 客户端 ID,用于模拟 MQTT 客户端的发布行为。" + } + } + payload { + desc { + en: "The MQTT message payload." + zh: "MQTT 消息体。" + } + } + retain { + desc { + en: "A boolean field to indicate if this message should be retained." + zh: "布尔型字段,用于表示该消息是否保留消息。" + } + } + payload_encoding { + desc { + en: "MQTT Payload Encoding, base64 or plain. When set to base64, the message is decoded before it is published." + zh: "MQTT 消息体的编码方式,可以是 base64plain。当设置为 base64 时,消息在发布前会先被解码。" + } + } + message_id { + desc { + en: "A globally unique message ID for correlation/tracing." + zh: "全局唯一的一个消息 ID,方便用于关联和追踪。" + } + } + reason_code { + desc { + en: """ +The MQTT reason code, as the same ones used in PUBACK packet.
+Currently supported codes are:
+ +16(0x10): No matching subscribers;
+131(0x81): Error happened when dispatching the message. e.g. during EMQX restart;
+144(0x90): Topic name invalid;
+151(0x97): Publish rate limited, or message size exceeded limit. The global size limit can be configured with mqtt.max_packet_size
+NOTE: The message size is estimated with the received topic and payload size, meaning the actual size of serialized bytes (when sent to MQTT subscriber) +might be slightly over the limit. +""" + zh: """ +MQTT 消息发布的错误码,这些错误码也是 MQTT 规范中 PUBACK 消息可能携带的错误码。
+当前支持如下错误码:
+ +16(0x10):没能匹配到任何订阅;
+131(0x81):消息转发时发生错误,例如 EMQX 服务重启;
+144(0x90):主题名称非法;
+151(0x97):受到了速率限制,或者消息尺寸过大。全局消息大小限制可以通过配置项 mqtt.max_packet_size 来进行修改。
+注意:消息尺寸的是通过主题和消息体的字节数进行估算的。具体发布时所占用的字节数可能会稍大于这个估算的值。 +""" + } + } + error_message { + desc { + en: "Describes the failure reason in detail." + zh: "失败的详细原因。" + } + } +} diff --git a/apps/emqx_management/i18n/emqx_mgmt_api_status_i18n.conf b/apps/emqx_management/i18n/emqx_mgmt_api_status_i18n.conf new file mode 100644 index 000000000..fae17b35d --- /dev/null +++ b/apps/emqx_management/i18n/emqx_mgmt_api_status_i18n.conf @@ -0,0 +1,44 @@ +emqx_mgmt_api_status { + get_status_api { + desc { + en: "Serves as a health check for the node. Returns a plain text response" + " describing the status of the node. This endpoint requires no" + " authentication.\n" + "\n" + "Returns status code 200 if the EMQX application is up and running, " + "503 otherwise." + "\n" + "This API was introduced in v5.0.10." + "\n" + "The GET `/status` endpoint (without the `/api/...` prefix) is also an alias" + " to this endpoint and works in the same way. This alias has been available since" + " v5.0.0." + zh: "作为节点的健康检查。 返回一个纯文本的响应,描述节点的状态。\n" + "\n" + "如果 EMQX 应用程序已经启动并运行,返回状态代码 200,否则返回 503。\n" + "\n" + "这个API是在v5.0.10中引入的。" + "\n" + "GET `/status`端点(没有`/api/...`前缀)也是这个端点的一个别名,工作方式相同。" + " 这个别名从v5.0.0开始就有了。" + } + } + + get_status_response200 { + desc { + en: "Node emqx@127.0.0.1 is started\n" + "emqx is running" + zh: "Node emqx@127.0.0.1 is started\n" + "emqx is running" + } + } + + get_status_response503 { + desc { + en: "Node emqx@127.0.0.1 is stopped\n" + "emqx is not_running" + zh: "Node emqx@127.0.0.1 is stopped\n" + "emqx is not_running" + } + } +} diff --git a/apps/emqx_management/src/emqx_management.app.src b/apps/emqx_management/src/emqx_management.app.src index 5f8b30bf4..ab726cbb2 100644 --- a/apps/emqx_management/src/emqx_management.app.src +++ b/apps/emqx_management/src/emqx_management.app.src @@ -2,7 +2,7 @@ {application, emqx_management, [ {description, "EMQX Management API and CLI"}, % strict semver, bump manually! - {vsn, "5.0.6"}, + {vsn, "5.0.8"}, {modules, []}, {registered, [emqx_management_sup]}, {applications, [kernel, stdlib, emqx_plugins, minirest, emqx]}, diff --git a/apps/emqx_management/src/emqx_mgmt.erl b/apps/emqx_management/src/emqx_mgmt.erl index d1232c122..38b26444c 100644 --- a/apps/emqx_management/src/emqx_mgmt.erl +++ b/apps/emqx_management/src/emqx_mgmt.erl @@ -118,9 +118,11 @@ list_nodes() -> Running = mria_mnesia:cluster_nodes(running), Stopped = mria_mnesia:cluster_nodes(stopped), DownNodes = lists:map(fun stopped_node_info/1, Stopped), - [{Node, node_info(Node)} || Node <- Running] ++ DownNodes. + [{Node, Info} || #{node := Node} = Info <- node_info(Running)] ++ DownNodes. -lookup_node(Node) -> node_info(Node). +lookup_node(Node) -> + [Info] = node_info([Node]), + Info. node_info() -> {UsedRatio, Total} = get_sys_memory(), @@ -153,8 +155,8 @@ get_sys_memory() -> {0, 0} end. -node_info(Node) -> - wrap_rpc(emqx_management_proto_v2:node_info(Node)). +node_info(Nodes) -> + emqx_rpc:unwrap_erpc(emqx_management_proto_v3:node_info(Nodes)). stopped_node_info(Node) -> #{name => Node, node_status => 'stopped'}. @@ -164,17 +166,19 @@ stopped_node_info(Node) -> %%-------------------------------------------------------------------- list_brokers() -> - [{Node, broker_info(Node)} || Node <- mria_mnesia:running_nodes()]. + Running = mria_mnesia:running_nodes(), + [{Node, Broker} || #{node := Node} = Broker <- broker_info(Running)]. lookup_broker(Node) -> - broker_info(Node). + [Broker] = broker_info([Node]), + Broker. broker_info() -> Info = maps:from_list([{K, iolist_to_binary(V)} || {K, V} <- emqx_sys:info()]), Info#{node => node(), otp_release => otp_rel(), node_status => 'Running'}. -broker_info(Node) -> - wrap_rpc(emqx_management_proto_v2:broker_info(Node)). +broker_info(Nodes) -> + emqx_rpc:unwrap_erpc(emqx_management_proto_v3:broker_info(Nodes)). %%-------------------------------------------------------------------- %% Metrics and Stats @@ -184,7 +188,7 @@ get_metrics() -> nodes_info_count([get_metrics(Node) || Node <- mria_mnesia:running_nodes()]). get_metrics(Node) -> - wrap_rpc(emqx_proto_v1:get_metrics(Node)). + unwrap_rpc(emqx_proto_v1:get_metrics(Node)). get_stats() -> GlobalStatsKeys = @@ -212,7 +216,7 @@ delete_keys(List, [Key | Keys]) -> delete_keys(proplists:delete(Key, List), Keys). get_stats(Node) -> - wrap_rpc(emqx_proto_v1:get_stats(Node)). + unwrap_rpc(emqx_proto_v1:get_stats(Node)). nodes_info_count(PropList) -> NodeCount = @@ -242,7 +246,7 @@ lookup_client({username, Username}, FormatFun) -> ]). lookup_client(Node, Key, {M, F}) -> - case wrap_rpc(emqx_cm_proto_v1:lookup_client(Node, Key)) of + case unwrap_rpc(emqx_cm_proto_v1:lookup_client(Node, Key)) of {error, Err} -> {error, Err}; L -> @@ -265,7 +269,7 @@ kickout_client({ClientID, FormatFun}) -> end. kickout_client(Node, ClientId) -> - wrap_rpc(emqx_cm_proto_v1:kickout_client(Node, ClientId)). + unwrap_rpc(emqx_cm_proto_v1:kickout_client(Node, ClientId)). list_authz_cache(ClientId) -> call_client(ClientId, list_authz_cache). @@ -285,14 +289,14 @@ list_client_subscriptions(ClientId) -> end. client_subscriptions(Node, ClientId) -> - {Node, wrap_rpc(emqx_broker_proto_v1:list_client_subscriptions(Node, ClientId))}. + {Node, unwrap_rpc(emqx_broker_proto_v1:list_client_subscriptions(Node, ClientId))}. clean_authz_cache(ClientId) -> Results = [clean_authz_cache(Node, ClientId) || Node <- mria_mnesia:running_nodes()], check_results(Results). clean_authz_cache(Node, ClientId) -> - wrap_rpc(emqx_proto_v1:clean_authz_cache(Node, ClientId)). + unwrap_rpc(emqx_proto_v1:clean_authz_cache(Node, ClientId)). clean_authz_cache_all() -> Results = [{Node, clean_authz_cache_all(Node)} || Node <- mria_mnesia:running_nodes()], @@ -309,10 +313,10 @@ wrap_results(Results) -> end. clean_authz_cache_all(Node) -> - wrap_rpc(emqx_proto_v1:clean_authz_cache(Node)). + unwrap_rpc(emqx_proto_v1:clean_authz_cache(Node)). clean_pem_cache_all(Node) -> - wrap_rpc(emqx_proto_v1:clean_pem_cache(Node)). + unwrap_rpc(emqx_proto_v1:clean_pem_cache(Node)). set_ratelimit_policy(ClientId, Policy) -> call_client(ClientId, {ratelimit, Policy}). @@ -358,7 +362,7 @@ do_call_client(ClientId, Req) -> %% @private call_client(Node, ClientId, Req) -> - wrap_rpc(emqx_management_proto_v2:call_client(Node, ClientId, Req)). + unwrap_rpc(emqx_management_proto_v3:call_client(Node, ClientId, Req)). %%-------------------------------------------------------------------- %% Subscriptions @@ -377,7 +381,7 @@ do_list_subscriptions() -> end. list_subscriptions(Node) -> - wrap_rpc(emqx_management_proto_v2:list_subscriptions(Node)). + unwrap_rpc(emqx_management_proto_v3:list_subscriptions(Node)). list_subscriptions_via_topic(Topic, FormatFun) -> lists:append([ @@ -386,7 +390,7 @@ list_subscriptions_via_topic(Topic, FormatFun) -> ]). list_subscriptions_via_topic(Node, Topic, _FormatFun = {M, F}) -> - case wrap_rpc(emqx_broker_proto_v1:list_subscriptions_via_topic(Node, Topic)) of + case unwrap_rpc(emqx_broker_proto_v1:list_subscriptions_via_topic(Node, Topic)) of {error, Reason} -> {error, Reason}; Result -> M:F(Result) end. @@ -395,7 +399,7 @@ lookup_subscriptions(ClientId) -> lists:append([lookup_subscriptions(Node, ClientId) || Node <- mria_mnesia:running_nodes()]). lookup_subscriptions(Node, ClientId) -> - wrap_rpc(emqx_broker_proto_v1:list_client_subscriptions(Node, ClientId)). + unwrap_rpc(emqx_broker_proto_v1:list_client_subscriptions(Node, ClientId)). %%-------------------------------------------------------------------- %% PubSub @@ -405,7 +409,7 @@ subscribe(ClientId, TopicTables) -> subscribe(mria_mnesia:running_nodes(), ClientId, TopicTables). subscribe([Node | Nodes], ClientId, TopicTables) -> - case wrap_rpc(emqx_management_proto_v2:subscribe(Node, ClientId, TopicTables)) of + case unwrap_rpc(emqx_management_proto_v3:subscribe(Node, ClientId, TopicTables)) of {error, _} -> subscribe(Nodes, ClientId, TopicTables); {subscribe, Res} -> {subscribe, Res, Node} end; @@ -432,7 +436,7 @@ unsubscribe(ClientId, Topic) -> -spec unsubscribe([node()], emqx_types:clientid(), emqx_types:topic()) -> {unsubscribe, _} | {error, channel_not_found}. unsubscribe([Node | Nodes], ClientId, Topic) -> - case wrap_rpc(emqx_management_proto_v2:unsubscribe(Node, ClientId, Topic)) of + case unwrap_rpc(emqx_management_proto_v3:unsubscribe(Node, ClientId, Topic)) of {error, _} -> unsubscribe(Nodes, ClientId, Topic); Re -> Re end; @@ -455,7 +459,7 @@ unsubscribe_batch(ClientId, Topics) -> -spec unsubscribe_batch([node()], emqx_types:clientid(), [emqx_types:topic()]) -> {unsubscribe_batch, _} | {error, channel_not_found}. unsubscribe_batch([Node | Nodes], ClientId, Topics) -> - case wrap_rpc(emqx_management_proto_v2:unsubscribe_batch(Node, ClientId, Topics)) of + case unwrap_rpc(emqx_management_proto_v3:unsubscribe_batch(Node, ClientId, Topics)) of {error, _} -> unsubscribe_batch(Nodes, ClientId, Topics); Re -> Re end; @@ -478,16 +482,16 @@ get_alarms(Type) -> [{Node, get_alarms(Node, Type)} || Node <- mria_mnesia:running_nodes()]. get_alarms(Node, Type) -> - add_duration_field(wrap_rpc(emqx_proto_v1:get_alarms(Node, Type))). + add_duration_field(unwrap_rpc(emqx_proto_v1:get_alarms(Node, Type))). deactivate(Node, Name) -> - wrap_rpc(emqx_proto_v1:deactivate_alarm(Node, Name)). + unwrap_rpc(emqx_proto_v1:deactivate_alarm(Node, Name)). delete_all_deactivated_alarms() -> [delete_all_deactivated_alarms(Node) || Node <- mria_mnesia:running_nodes()]. delete_all_deactivated_alarms(Node) -> - wrap_rpc(emqx_proto_v1:delete_all_deactivated_alarms(Node)). + unwrap_rpc(emqx_proto_v1:delete_all_deactivated_alarms(Node)). add_duration_field(Alarms) -> Now = erlang:system_time(microsecond), @@ -524,10 +528,9 @@ delete_banned(Who) -> %%-------------------------------------------------------------------- %% Internal Functions. %%-------------------------------------------------------------------- - -wrap_rpc({badrpc, Reason}) -> +unwrap_rpc({badrpc, Reason}) -> {error, Reason}; -wrap_rpc(Res) -> +unwrap_rpc(Res) -> Res. otp_rel() -> @@ -547,7 +550,7 @@ check_row_limit([Tab | Tables], Limit) -> check_results(Results) -> case lists:any(fun(Item) -> Item =:= ok end, Results) of true -> ok; - false -> wrap_rpc(lists:last(Results)) + false -> unwrap_rpc(lists:last(Results)) end. max_row_limit() -> diff --git a/apps/emqx_management/src/emqx_mgmt_api_app.erl b/apps/emqx_management/src/emqx_mgmt_api_app.erl index 89311a8d1..7050ea1af 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_app.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_app.erl @@ -22,7 +22,7 @@ -export([api_spec/0, fields/1, paths/0, schema/1, namespace/0]). -export([api_key/2, api_key_by_name/2]). -export([validate_name/1]). --define(TAGS, [<<"API keys">>]). +-define(TAGS, [<<"API Keys">>]). namespace() -> "api_key". diff --git a/apps/emqx_management/src/emqx_mgmt_api_clients.erl b/apps/emqx_management/src/emqx_mgmt_api_clients.erl index 19bf63f66..beff0d53e 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_clients.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_clients.erl @@ -648,15 +648,19 @@ list_clients(QString) -> ?QUERY_FUN ); Node0 -> - Node1 = binary_to_atom(Node0, utf8), - QStringWithoutNode = maps:without([<<"node">>], QString), - emqx_mgmt_api:node_query( - Node1, - QStringWithoutNode, - ?CLIENT_QTAB, - ?CLIENT_QSCHEMA, - ?QUERY_FUN - ) + case emqx_misc:safe_to_existing_atom(Node0) of + {ok, Node1} -> + QStringWithoutNode = maps:without([<<"node">>], QString), + emqx_mgmt_api:node_query( + Node1, + QStringWithoutNode, + ?CLIENT_QTAB, + ?CLIENT_QSCHEMA, + ?QUERY_FUN + ); + {error, _} -> + {error, Node0, {badrpc, <<"invalid node">>}} + end end, case Result of {error, page_limit_invalid} -> diff --git a/apps/emqx_management/src/emqx_mgmt_api_configs.erl b/apps/emqx_management/src/emqx_mgmt_api_configs.erl index 8eb801952..db582c612 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_configs.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_configs.erl @@ -103,7 +103,9 @@ schema("/configs") -> )} ], responses => #{ - 200 => lists:map(fun({_, Schema}) -> Schema end, config_list()) + 200 => lists:map(fun({_, Schema}) -> Schema end, config_list()), + 404 => emqx_dashboard_swagger:error_codes(['NOT_FOUND']), + 500 => emqx_dashboard_swagger:error_codes(['BAD_NODE']) } } }; @@ -115,7 +117,7 @@ schema("/configs_reset/:rootname") -> tags => ?TAGS, description => << - "Reset the config entry specified by the query string parameter `conf_path`.
\n" + "Reset the config entry specified by the query string parameter `conf_path`.
" "- For a config entry that has default value, this resets it to the default value;\n" "- For a config entry that has no default value, an error 400 will be returned" >>, @@ -311,14 +313,15 @@ config_reset(post, _Params, Req) -> end. configs(get, Params, _Req) -> - Node = maps:get(node, Params, node()), + QS = maps:get(query_string, Params, #{}), + Node = maps:get(<<"node">>, QS, node()), case lists:member(Node, mria_mnesia:running_nodes()) andalso emqx_management_proto_v2:get_full_config(Node) of false -> Message = list_to_binary(io_lib:format("Bad node ~p, reason not found", [Node])), - {500, #{code => 'BAD_NODE', message => Message}}; + {404, #{code => 'NOT_FOUND', message => Message}}; {badrpc, R} -> Message = list_to_binary(io_lib:format("Bad node ~p, reason ~p", [Node, R])), {500, #{code => 'BAD_NODE', message => Message}}; diff --git a/apps/emqx_management/src/emqx_mgmt_api_plugins.erl b/apps/emqx_management/src/emqx_mgmt_api_plugins.erl index 88dd21518..ac9c8644d 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_plugins.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_plugins.erl @@ -70,8 +70,8 @@ schema("/plugins") -> 'operationId' => list_plugins, get => #{ description => - "List all install plugins.
" - "Plugins are launched in top-down order.
" + "List all install plugins.
" + "Plugins are launched in top-down order.
" "Using `POST /plugins/{name}/move` to change the boot order.", tags => ?TAGS, responses => #{ @@ -136,9 +136,9 @@ schema("/plugins/:name/:action") -> 'operationId' => update_plugin, put => #{ description => - "start/stop a installed plugin.
" - "- **start**: start the plugin.
" - "- **stop**: stop the plugin.
", + "start/stop a installed plugin.
" + "- **start**: start the plugin.
" + "- **stop**: stop the plugin.
", tags => ?TAGS, parameters => [ hoconsc:ref(name), @@ -272,9 +272,9 @@ fields(running_status) -> {status, hoconsc:mk(hoconsc:enum([running, stopped]), #{ desc => - "Install plugin status at runtime
" - "1. running: plugin is running.
" - "2. stopped: plugin is stopped.
" + "Install plugin status at runtime
" + "1. running: plugin is running.
" + "2. stopped: plugin is stopped.
" })} ]. diff --git a/apps/emqx_management/src/emqx_mgmt_api_publish.erl b/apps/emqx_management/src/emqx_mgmt_api_publish.erl index bd214a87c..1678c56e0 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_publish.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_publish.erl @@ -16,7 +16,15 @@ -module(emqx_mgmt_api_publish). -include_lib("emqx/include/emqx.hrl"). +-include_lib("emqx/include/emqx_mqtt.hrl"). -include_lib("typerefl/include/types.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-define(ALL_IS_WELL, 200). +-define(PARTIALLY_OK, 202). +-define(BAD_REQUEST, 400). +-define(DISPATCH_ERROR, 503). -behaviour(minirest_api). @@ -42,11 +50,14 @@ schema("/publish") -> #{ 'operationId' => publish, post => #{ - description => <<"Publish Message">>, + description => ?DESC(publish_api), tags => [<<"Publish">>], 'requestBody' => hoconsc:mk(hoconsc:ref(?MODULE, publish_message)), responses => #{ - 200 => hoconsc:mk(hoconsc:ref(?MODULE, publish_message_info)) + ?ALL_IS_WELL => hoconsc:mk(hoconsc:ref(?MODULE, publish_ok)), + ?PARTIALLY_OK => hoconsc:mk(hoconsc:ref(?MODULE, publish_error)), + ?BAD_REQUEST => bad_request_schema(), + ?DISPATCH_ERROR => hoconsc:mk(hoconsc:ref(?MODULE, publish_error)) } } }; @@ -54,44 +65,58 @@ schema("/publish/bulk") -> #{ 'operationId' => publish_batch, post => #{ - description => <<"Publish Messages">>, + description => ?DESC(publish_bulk_api), tags => [<<"Publish">>], 'requestBody' => hoconsc:mk(hoconsc:array(hoconsc:ref(?MODULE, publish_message)), #{}), responses => #{ - 200 => hoconsc:mk(hoconsc:array(hoconsc:ref(?MODULE, publish_message_info)), #{}) + ?ALL_IS_WELL => hoconsc:mk(hoconsc:array(hoconsc:ref(?MODULE, publish_ok)), #{}), + ?PARTIALLY_OK => hoconsc:mk( + hoconsc:array(hoconsc:ref(?MODULE, publish_error)), #{} + ), + ?BAD_REQUEST => bad_request_schema(), + ?DISPATCH_ERROR => hoconsc:mk( + hoconsc:array(hoconsc:ref(?MODULE, publish_error)), #{} + ) } } }. +bad_request_schema() -> + Union = hoconsc:union([ + hoconsc:ref(?MODULE, bad_request), + hoconsc:array(hoconsc:ref(?MODULE, publish_error)) + ]), + hoconsc:mk(Union, #{}). + fields(message) -> [ {topic, hoconsc:mk(binary(), #{ - desc => <<"Topic Name">>, + desc => ?DESC(topic_name), required => true, example => <<"api/example/topic">> })}, {qos, hoconsc:mk(emqx_schema:qos(), #{ - desc => <<"MQTT QoS">>, + desc => ?DESC(qos), required => false, default => 0 })}, {clientid, hoconsc:mk(binary(), #{ - desc => <<"From client ID">>, + desc => ?DESC(clientid), required => false, example => <<"api_example_client">> })}, {payload, hoconsc:mk(binary(), #{ - desc => <<"MQTT Payload">>, + desc => ?DESC(payload), required => true, example => <<"hello emqx api">> })}, {retain, hoconsc:mk(boolean(), #{ - desc => <<"MQTT Retain Message">>, + desc => ?DESC(retain), required => false, default => false })} @@ -100,53 +125,196 @@ fields(publish_message) -> [ {payload_encoding, hoconsc:mk(hoconsc:enum([plain, base64]), #{ - desc => <<"MQTT Payload Encoding, base64 or plain">>, + desc => ?DESC(payload_encoding), required => false, default => plain })} ] ++ fields(message); -fields(publish_message_info) -> +fields(publish_ok) -> [ {id, hoconsc:mk(binary(), #{ - desc => <<"Internal Message ID">> + desc => ?DESC(message_id) })} - ] ++ fields(message). + ]; +fields(publish_error) -> + [ + {reason_code, + hoconsc:mk(integer(), #{ + desc => ?DESC(reason_code), + example => 16 + })}, + {message, + hoconsc:mk(binary(), #{ + desc => ?DESC(error_message), + example => <<"no_matching_subscribers">> + })} + ]; +fields(bad_request) -> + [ + {code, + hoconsc:mk(string(), #{ + desc => <<"BAD_REQUEST">> + })}, + {message, + hoconsc:mk(binary(), #{ + desc => ?DESC(error_message) + })} + ]. publish(post, #{body := Body}) -> case message(Body) of {ok, Message} -> - _ = emqx_mgmt:publish(Message), - {200, format_message(Message)}; - {error, R} -> - {400, 'BAD_REQUEST', to_binary(R)} + Res = emqx_mgmt:publish(Message), + publish_result_to_http_reply(Message, Res); + {error, Reason} -> + {?BAD_REQUEST, make_bad_req_reply(Reason)} end. publish_batch(post, #{body := Body}) -> case messages(Body) of {ok, Messages} -> - _ = [emqx_mgmt:publish(Message) || Message <- Messages], - {200, format_message(Messages)}; - {error, R} -> - {400, 'BAD_REQUEST', to_binary(R)} + ResList = lists:map( + fun(Message) -> + Res = emqx_mgmt:publish(Message), + publish_result_to_http_reply(Message, Res) + end, + Messages + ), + publish_results_to_http_reply(ResList); + {error, Reason} -> + {?BAD_REQUEST, make_bad_req_reply(Reason)} end. +make_bad_req_reply(invalid_topic_name) -> + make_publish_error_response(?RC_TOPIC_NAME_INVALID); +make_bad_req_reply(packet_too_large) -> + %% 0x95 RC_PACKET_TOO_LARGE is not a PUBACK reason code + %% This is why we use RC_QUOTA_EXCEEDED instead + make_publish_error_response(?RC_QUOTA_EXCEEDED, packet_too_large); +make_bad_req_reply(Reason) -> + make_publish_error_response(?RC_IMPLEMENTATION_SPECIFIC_ERROR, to_binary(Reason)). + +-spec is_ok_deliver({_NodeOrShare, _MatchedTopic, emqx_types:deliver_result()}) -> boolean(). +is_ok_deliver({_NodeOrShare, _MatchedTopic, ok}) -> true; +is_ok_deliver({_NodeOrShare, _MatchedTopic, {ok, _}}) -> true; +is_ok_deliver({_NodeOrShare, _MatchedTopic, {error, _}}) -> false. + +%% @hidden Map MQTT publish result reason code to HTTP status code. +%% MQTT reason code | Description | HTTP status code +%% 0 Success 200 +%% 16 No matching subscribers 202 +%% 128 Unspecified error 406 +%% 131 Implementation specific error 406 +%% 144 Topic Name invalid 400 +%% 151 Quota exceeded 400 +%% +%% %%%%%% Below error codes are not implemented so far %%%% +%% +%% If HTTP request passes HTTP authentication, it is considered trusted. +%% In the future, we may choose to check ACL for the provided MQTT Client ID +%% 135 Not authorized 401 +%% +%% %%%%%% Below error codes are not applicable %%%%%%% +%% +%% No user specified packet ID, so there should be no packet ID error +%% 145 Packet identifier is in use 400 +%% +%% No preceding payload format indicator to compare against. +%% Content-Type check should be done at HTTP layer but not here. +%% 153 Payload format invalid 400 +publish_result_to_http_reply(_Message, []) -> + %% matched no subscriber + {?PARTIALLY_OK, make_publish_error_response(?RC_NO_MATCHING_SUBSCRIBERS)}; +publish_result_to_http_reply(Message, PublishResult) -> + case lists:any(fun is_ok_deliver/1, PublishResult) of + true -> + %% delivered to at least one subscriber + OkBody = make_publish_response(Message), + {?ALL_IS_WELL, OkBody}; + false -> + %% this is quite unusual, matched, but failed to deliver + %% if this happens, the publish result log can be helpful + %% to idnetify the reason why publish failed + %% e.g. during emqx restart + ReasonString = <<"failed_to_dispatch">>, + ErrorBody = make_publish_error_response( + ?RC_IMPLEMENTATION_SPECIFIC_ERROR, ReasonString + ), + ?SLOG(warning, #{ + msg => ReasonString, + message_id => emqx_message:id(Message), + results => PublishResult + }), + {?DISPATCH_ERROR, ErrorBody} + end. + +%% @hidden Reply batch publish result. +%% 200 if all published OK. +%% 202 if at least one message matched no subscribers. +%% 503 for temp errors duing EMQX restart +publish_results_to_http_reply([_ | _] = ResList) -> + {Codes0, BodyL} = lists:unzip(ResList), + Codes = lists:usort(Codes0), + HasFailure = lists:member(?DISPATCH_ERROR, Codes), + All200 = (Codes =:= [?ALL_IS_WELL]), + Code = + case All200 of + true -> + %% All OK + ?ALL_IS_WELL; + false when not HasFailure -> + %% Partially OK + ?PARTIALLY_OK; + false -> + %% At least one failed + ?DISPATCH_ERROR + end, + {Code, BodyL}. + message(Map) -> + try + make_message(Map) + catch + throw:Reason -> + {error, Reason} + end. + +make_message(Map) -> Encoding = maps:get(<<"payload_encoding">>, Map, plain), - case encode_payload(Encoding, maps:get(<<"payload">>, Map)) of + case decode_payload(Encoding, maps:get(<<"payload">>, Map)) of {ok, Payload} -> From = maps:get(<<"clientid">>, Map, http_api), QoS = maps:get(<<"qos">>, Map, 0), Topic = maps:get(<<"topic">>, Map), Retain = maps:get(<<"retain">>, Map, false), - {ok, emqx_message:make(From, QoS, Topic, Payload, #{retain => Retain}, #{})}; + try + _ = emqx_topic:validate(name, Topic) + catch + error:_Reason -> + throw(invalid_topic_name) + end, + Message = emqx_message:make(From, QoS, Topic, Payload, #{retain => Retain}, #{}), + Size = emqx_message:estimate_size(Message), + (Size > size_limit()) andalso throw(packet_too_large), + {ok, Message}; {error, R} -> {error, R} end. -encode_payload(plain, Payload) -> +%% get the global packet size limit since HTTP API does not belong to any zone. +size_limit() -> + try + emqx_config:get([mqtt, max_packet_size]) + catch + _:_ -> + %% leave 1000 bytes for topic name etc. + ?MAX_PACKET_SIZE + end. + +decode_payload(plain, Payload) -> {ok, Payload}; -encode_payload(base64, Payload) -> +decode_payload(base64, Payload) -> try {ok, base64:decode(Payload)} catch @@ -154,6 +322,8 @@ encode_payload(base64, Payload) -> {error, {decode_base64_payload_failed, Payload}} end. +messages([]) -> + {errror, <<"empty_batch">>}; messages(List) -> messages(List, []). @@ -167,21 +337,23 @@ messages([MessageMap | List], Res) -> {error, R} end. -format_message(Messages) when is_list(Messages) -> - [format_message(Message) || Message <- Messages]; -format_message(#message{ - id = ID, qos = Qos, from = From, topic = Topic, payload = Payload, flags = Flags -}) -> +make_publish_response(#message{id = ID}) -> #{ - id => emqx_guid:to_hexstr(ID), - qos => Qos, - topic => Topic, - payload => Payload, - retain => maps:get(retain, Flags, false), - clientid => to_binary(From) + id => emqx_guid:to_hexstr(ID) }. -to_binary(Data) when is_binary(Data) -> - Data; -to_binary(Data) -> - list_to_binary(io_lib:format("~p", [Data])). +make_publish_error_response(ReasonCode) -> + make_publish_error_response(ReasonCode, emqx_reason_codes:name(ReasonCode)). + +make_publish_error_response(ReasonCode, Msg) -> + #{ + reason_code => ReasonCode, + message => to_binary(Msg) + }. + +to_binary(Atom) when is_atom(Atom) -> + atom_to_binary(Atom); +to_binary(Msg) when is_binary(Msg) -> + Msg; +to_binary(Term) -> + list_to_binary(io_lib:format("~0p", [Term])). diff --git a/apps/emqx_management/src/emqx_mgmt_api_status.erl b/apps/emqx_management/src/emqx_mgmt_api_status.erl index e38ccfd69..ea91f1c03 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_status.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_status.erl @@ -15,11 +15,57 @@ %%-------------------------------------------------------------------- -module(emqx_mgmt_api_status). +-behaviour(minirest_api). + +-include_lib("hocon/include/hoconsc.hrl"). + +%% minirest API +-export([api_spec/0, paths/0, schema/1]). + +-export([get_status/2]). + -export([ init/2, path/0 ]). +-define(TAGS, [<<"Status">>]). + +%%-------------------------------------------------------------------- +%% minirest API and schema +%%-------------------------------------------------------------------- + +api_spec() -> + emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}). + +paths() -> + ["/status"]. + +schema("/status") -> + #{ + 'operationId' => get_status, + get => #{ + description => ?DESC(get_status_api), + tags => ?TAGS, + security => [], + responses => #{ + 200 => ?DESC(get_status_response200), + 503 => ?DESC(get_status_response503) + } + } + }. + +%%-------------------------------------------------------------------- +%% non-minirest (cowboy) API +%%-------------------------------------------------------------------- + +%% Note: Because swagger now requires an HTTP prefix (e.g. /api/v5), +%% but the `/status` does not require this fixed prefix. +%% +%% Changing the swagger framework was too big, so we implemented the `/status` +%% in a simple way first +%% +%% XXX: So the HTTP API docs generated by swagger can not find this API now path() -> "/status". @@ -32,13 +78,25 @@ init(Req0, State) -> %% API Handler funcs %%-------------------------------------------------------------------- +get_status(get, _Params) -> + running_status(). + running_status() -> case emqx_dashboard_listener:is_ready(timer:seconds(20)) of true -> BrokerStatus = broker_status(), AppStatus = application_status(), Body = io_lib:format("Node ~ts is ~ts~nemqx is ~ts", [node(), BrokerStatus, AppStatus]), - {200, #{<<"content-type">> => <<"text/plain">>}, list_to_binary(Body)}; + StatusCode = + case AppStatus of + running -> 200; + not_running -> 503 + end, + Headers = #{ + <<"content-type">> => <<"text/plain">>, + <<"retry-after">> => <<"15">> + }, + {StatusCode, Headers, list_to_binary(Body)}; false -> {503, #{<<"retry-after">> => <<"15">>}, <<>>} end. diff --git a/apps/emqx_management/src/emqx_mgmt_api_subscriptions.erl b/apps/emqx_management/src/emqx_mgmt_api_subscriptions.erl index 470242cfd..03b833e84 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_subscriptions.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_subscriptions.erl @@ -145,13 +145,18 @@ subscriptions(get, #{query_string := QString}) -> ?QUERY_FUN ); Node0 -> - emqx_mgmt_api:node_query( - binary_to_atom(Node0, utf8), - QString, - ?SUBS_QTABLE, - ?SUBS_QSCHEMA, - ?QUERY_FUN - ) + case emqx_misc:safe_to_existing_atom(Node0) of + {ok, Node1} -> + emqx_mgmt_api:node_query( + Node1, + QString, + ?SUBS_QTABLE, + ?SUBS_QSCHEMA, + ?QUERY_FUN + ); + {error, _} -> + {error, Node0, {badrpc, <<"invalid node">>}} + end end, case Response of {error, page_limit_invalid} -> diff --git a/apps/emqx_management/src/emqx_mgmt_api_sys.erl b/apps/emqx_management/src/emqx_mgmt_api_sys.erl index 43fd9ee14..c7aeb1d95 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_sys.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_sys.erl @@ -31,7 +31,7 @@ -export([sys/2]). --define(TAGS, [<<"System topics">>]). +-define(TAGS, [<<"System Topics">>]). namespace() -> "sys". diff --git a/apps/emqx_management/src/emqx_mgmt_api_trace.erl b/apps/emqx_management/src/emqx_mgmt_api_trace.erl index 7a9ae5710..587257688 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_trace.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_trace.erl @@ -34,7 +34,8 @@ delete_trace/2, update_trace/2, download_trace_log/2, - stream_log_file/2 + stream_log_file/2, + log_file_detail/2 ]). -export([validate_name/1]). @@ -55,7 +56,14 @@ api_spec() -> emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true, translate_body => true}). paths() -> - ["/trace", "/trace/:name/stop", "/trace/:name/download", "/trace/:name/log", "/trace/:name"]. + [ + "/trace", + "/trace/:name/stop", + "/trace/:name/download", + "/trace/:name/log", + "/trace/:name/log_detail", + "/trace/:name" + ]. schema("/trace") -> #{ @@ -95,7 +103,7 @@ schema("/trace/:name") -> #{ 'operationId' => delete_trace, delete => #{ - description => "Delete trace by name", + description => "Delete specified trace", tags => ?TAGS, parameters => [hoconsc:ref(name)], responses => #{ @@ -136,6 +144,19 @@ schema("/trace/:name/download") -> } } }; +schema("/trace/:name/log_detail") -> + #{ + 'operationId' => log_file_detail, + get => #{ + description => "get trace log file's metadata, such as size, last update time", + tags => ?TAGS, + parameters => [hoconsc:ref(name)], + responses => #{ + 200 => hoconsc:array(hoconsc:ref(log_file_detail)), + 404 => emqx_dashboard_swagger:error_codes(['NOT_FOUND'], <<"Trace Name Not Found">>) + } + } + }; schema("/trace/:name/log") -> #{ 'operationId' => stream_log_file, @@ -158,6 +179,13 @@ schema("/trace/:name/log") -> } }. +fields(log_file_detail) -> + fields(node) ++ + [ + {size, hoconsc:mk(integer(), #{desc => "file size"})}, + {mtime, + hoconsc:mk(integer(), #{desc => "the modification and last access times of a file"})} + ]; fields(trace) -> [ {name, @@ -265,7 +293,8 @@ fields(node) -> #{ desc => "Node name", in => query, - required => false + required => false, + example => "emqx@127.0.0.1" } )} ]; @@ -323,7 +352,7 @@ trace(get, _Params) -> emqx_trace:format(List0) ), Nodes = mria_mnesia:running_nodes(), - TraceSize = wrap_rpc(emqx_mgmt_trace_proto_v1:get_trace_size(Nodes)), + TraceSize = wrap_rpc(emqx_mgmt_trace_proto_v2:get_trace_size(Nodes)), AllFileSize = lists:foldl(fun(F, Acc) -> maps:merge(Acc, F) end, #{}, TraceSize), Now = erlang:system_time(second), Traces = @@ -471,19 +500,43 @@ group_trace_file(ZipDir, TraceLog, TraceFiles) -> ). collect_trace_file(Nodes, TraceLog) -> - wrap_rpc(emqx_mgmt_trace_proto_v1:trace_file(Nodes, TraceLog)). + wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file(Nodes, TraceLog)). + +collect_trace_file_detail(TraceLog) -> + Nodes = mria_mnesia:running_nodes(), + wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file_detail(Nodes, TraceLog)). wrap_rpc({GoodRes, BadNodes}) -> BadNodes =/= [] andalso ?SLOG(error, #{msg => "rpc_call_failed", bad_nodes => BadNodes}), GoodRes. +log_file_detail(get, #{bindings := #{name := Name}}) -> + case emqx_trace:get_trace_filename(Name) of + {ok, TraceLog} -> + TraceFiles = collect_trace_file_detail(TraceLog), + {200, group_trace_file_detail(TraceFiles)}; + {error, not_found} -> + ?NOT_FOUND(Name) + end. + +group_trace_file_detail(TraceLogDetail) -> + GroupFun = + fun + ({ok, Info}, Acc) -> + [Info | Acc]; + ({error, Error}, Acc) -> + ?SLOG(error, Error#{msg => "read_trace_file_failed"}), + Acc + end, + lists:foldl(GroupFun, [], TraceLogDetail). + stream_log_file(get, #{bindings := #{name := Name}, query_string := Query}) -> Position = maps:get(<<"position">>, Query, 0), Bytes = maps:get(<<"bytes">>, Query, 1000), case parse_node(Query, node()) of {ok, Node} -> - case emqx_mgmt_trace_proto_v1:read_trace_file(Node, Name, Position, Bytes) of + case emqx_mgmt_trace_proto_v2:read_trace_file(Node, Name, Position, Bytes) of {ok, Bin} -> Meta = #{<<"position">> => Position + byte_size(Bin), <<"bytes">> => Bytes}, {200, #{meta => Meta, items => Bin}}; diff --git a/apps/emqx_management/src/emqx_mgmt_cli.erl b/apps/emqx_management/src/emqx_mgmt_cli.erl index 60b2f3b15..6a0f8e8be 100644 --- a/apps/emqx_management/src/emqx_mgmt_cli.erl +++ b/apps/emqx_management/src/emqx_mgmt_cli.erl @@ -356,15 +356,26 @@ mnesia(_) -> %% @doc Logger Command log(["set-level", Level]) -> - case emqx_logger:set_log_level(list_to_atom(Level)) of - ok -> emqx_ctl:print("~ts~n", [Level]); - Error -> emqx_ctl:print("[error] set overall log level failed: ~p~n", [Error]) + case emqx_misc:safe_to_existing_atom(Level) of + {ok, Level1} -> + case emqx_logger:set_log_level(Level1) of + ok -> emqx_ctl:print("~ts~n", [Level]); + Error -> emqx_ctl:print("[error] set overall log level failed: ~p~n", [Error]) + end; + _ -> + emqx_ctl:print("[error] invalid level: ~p~n", [Level]) end; log(["primary-level"]) -> Level = emqx_logger:get_primary_log_level(), emqx_ctl:print("~ts~n", [Level]); log(["primary-level", Level]) -> - _ = emqx_logger:set_primary_log_level(list_to_atom(Level)), + case emqx_misc:safe_to_existing_atom(Level) of + {ok, Level1} -> + _ = emqx_logger:set_primary_log_level(Level1), + ok; + _ -> + emqx_ctl:print("[error] invalid level: ~p~n", [Level]) + end, emqx_ctl:print("~ts~n", [emqx_logger:get_primary_log_level()]); log(["handlers", "list"]) -> _ = [ @@ -381,26 +392,50 @@ log(["handlers", "list"]) -> ], ok; log(["handlers", "start", HandlerId]) -> - case emqx_logger:start_log_handler(list_to_atom(HandlerId)) of - ok -> - emqx_ctl:print("log handler ~ts started~n", [HandlerId]); - {error, Reason} -> - emqx_ctl:print("[error] failed to start log handler ~ts: ~p~n", [HandlerId, Reason]) + case emqx_misc:safe_to_existing_atom(HandlerId) of + {ok, HandlerId1} -> + case emqx_logger:start_log_handler(HandlerId1) of + ok -> + emqx_ctl:print("log handler ~ts started~n", [HandlerId]); + {error, Reason} -> + emqx_ctl:print("[error] failed to start log handler ~ts: ~p~n", [ + HandlerId, Reason + ]) + end; + _ -> + emqx_ctl:print("[error] invalid handler:~ts~n", [HandlerId]) end; log(["handlers", "stop", HandlerId]) -> - case emqx_logger:stop_log_handler(list_to_atom(HandlerId)) of - ok -> - emqx_ctl:print("log handler ~ts stopped~n", [HandlerId]); - {error, Reason} -> - emqx_ctl:print("[error] failed to stop log handler ~ts: ~p~n", [HandlerId, Reason]) + case emqx_misc:safe_to_existing_atom(HandlerId) of + {ok, HandlerId1} -> + case emqx_logger:stop_log_handler(HandlerId1) of + ok -> + emqx_ctl:print("log handler ~ts stopped~n", [HandlerId1]); + {error, Reason} -> + emqx_ctl:print("[error] failed to stop log handler ~ts: ~p~n", [ + HandlerId1, Reason + ]) + end; + _ -> + emqx_ctl:print("[error] invalid handler:~ts~n", [HandlerId]) end; log(["handlers", "set-level", HandlerId, Level]) -> - case emqx_logger:set_log_handler_level(list_to_atom(HandlerId), list_to_atom(Level)) of - ok -> - #{level := NewLevel} = emqx_logger:get_log_handler(list_to_atom(HandlerId)), - emqx_ctl:print("~ts~n", [NewLevel]); - {error, Error} -> - emqx_ctl:print("[error] ~p~n", [Error]) + case emqx_misc:safe_to_existing_atom(HandlerId) of + {ok, HandlerId1} -> + case emqx_misc:safe_to_existing_atom(Level) of + {ok, Level1} -> + case emqx_logger:set_log_handler_level(HandlerId1, Level1) of + ok -> + #{level := NewLevel} = emqx_logger:get_log_handler(HandlerId1), + emqx_ctl:print("~ts~n", [NewLevel]); + {error, Error} -> + emqx_ctl:print("[error] ~p~n", [Error]) + end; + _ -> + emqx_ctl:print("[error] invalid level:~p~n", [Level]) + end; + _ -> + emqx_ctl:print("[error] invalid handler:~ts~n", [HandlerId]) end; log(_) -> emqx_ctl:usage( @@ -593,25 +628,40 @@ listeners([]) -> emqx_listeners:list() ); listeners(["stop", ListenerId]) -> - case emqx_listeners:stop_listener(list_to_atom(ListenerId)) of - ok -> - emqx_ctl:print("Stop ~ts listener successfully.~n", [ListenerId]); - {error, Error} -> - emqx_ctl:print("Failed to stop ~ts listener: ~0p~n", [ListenerId, Error]) + case emqx_misc:safe_to_existing_atom(ListenerId) of + {ok, ListenerId1} -> + case emqx_listeners:stop_listener(ListenerId1) of + ok -> + emqx_ctl:print("Stop ~ts listener successfully.~n", [ListenerId]); + {error, Error} -> + emqx_ctl:print("Failed to stop ~ts listener: ~0p~n", [ListenerId, Error]) + end; + _ -> + emqx_ctl:print("Invalid listener: ~0p~n", [ListenerId]) end; listeners(["start", ListenerId]) -> - case emqx_listeners:start_listener(list_to_atom(ListenerId)) of - ok -> - emqx_ctl:print("Started ~ts listener successfully.~n", [ListenerId]); - {error, Error} -> - emqx_ctl:print("Failed to start ~ts listener: ~0p~n", [ListenerId, Error]) + case emqx_misc:safe_to_existing_atom(ListenerId) of + {ok, ListenerId1} -> + case emqx_listeners:start_listener(ListenerId1) of + ok -> + emqx_ctl:print("Started ~ts listener successfully.~n", [ListenerId]); + {error, Error} -> + emqx_ctl:print("Failed to start ~ts listener: ~0p~n", [ListenerId, Error]) + end; + _ -> + emqx_ctl:print("Invalid listener: ~0p~n", [ListenerId]) end; listeners(["restart", ListenerId]) -> - case emqx_listeners:restart_listener(list_to_atom(ListenerId)) of - ok -> - emqx_ctl:print("Restarted ~ts listener successfully.~n", [ListenerId]); - {error, Error} -> - emqx_ctl:print("Failed to restart ~ts listener: ~0p~n", [ListenerId, Error]) + case emqx_misc:safe_to_existing_atom(ListenerId) of + {ok, ListenerId1} -> + case emqx_listeners:restart_listener(ListenerId1) of + ok -> + emqx_ctl:print("Restarted ~ts listener successfully.~n", [ListenerId]); + {error, Error} -> + emqx_ctl:print("Failed to restart ~ts listener: ~0p~n", [ListenerId, Error]) + end; + _ -> + emqx_ctl:print("Invalid listener: ~0p~n", [ListenerId]) end; listeners(_) -> emqx_ctl:usage([ diff --git a/apps/emqx_management/src/proto/emqx_management_proto_v3.erl b/apps/emqx_management/src/proto/emqx_management_proto_v3.erl new file mode 100644 index 000000000..937a948e5 --- /dev/null +++ b/apps/emqx_management/src/proto/emqx_management_proto_v3.erl @@ -0,0 +1,80 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_management_proto_v3). + +-behaviour(emqx_bpapi). + +-export([ + introduced_in/0, + + node_info/1, + broker_info/1, + list_subscriptions/1, + + list_listeners/1, + subscribe/3, + unsubscribe/3, + unsubscribe_batch/3, + + call_client/3, + + get_full_config/1 +]). + +-include_lib("emqx/include/bpapi.hrl"). + +introduced_in() -> + "5.0.9". + +-spec unsubscribe_batch(node(), emqx_types:clientid(), [emqx_types:topic()]) -> + {unsubscribe, _} | {error, _} | {badrpc, _}. +unsubscribe_batch(Node, ClientId, Topics) -> + rpc:call(Node, emqx_mgmt, do_unsubscribe_batch, [ClientId, Topics]). + +-spec node_info([node()]) -> emqx_rpc:erpc_multicall(map()). +node_info(Nodes) -> + erpc:multicall(Nodes, emqx_mgmt, node_info, [], 30000). + +-spec broker_info([node()]) -> emqx_rpc:erpc_multicall(map()). +broker_info(Nodes) -> + erpc:multicall(Nodes, emqx_mgmt, broker_info, [], 30000). + +-spec list_subscriptions(node()) -> [map()] | {badrpc, _}. +list_subscriptions(Node) -> + rpc:call(Node, emqx_mgmt, do_list_subscriptions, []). + +-spec list_listeners(node()) -> map() | {badrpc, _}. +list_listeners(Node) -> + rpc:call(Node, emqx_mgmt_api_listeners, do_list_listeners, []). + +-spec subscribe(node(), emqx_types:clientid(), emqx_types:topic_filters()) -> + {subscribe, _} | {error, atom()} | {badrpc, _}. +subscribe(Node, ClientId, TopicTables) -> + rpc:call(Node, emqx_mgmt, do_subscribe, [ClientId, TopicTables]). + +-spec unsubscribe(node(), emqx_types:clientid(), emqx_types:topic()) -> + {unsubscribe, _} | {error, _} | {badrpc, _}. +unsubscribe(Node, ClientId, Topic) -> + rpc:call(Node, emqx_mgmt, do_unsubscribe, [ClientId, Topic]). + +-spec call_client(node(), emqx_types:clientid(), term()) -> term(). +call_client(Node, ClientId, Req) -> + rpc:call(Node, emqx_mgmt, do_call_client, [ClientId, Req]). + +-spec get_full_config(node()) -> map() | list() | {badrpc, _}. +get_full_config(Node) -> + rpc:call(Node, emqx_mgmt_api_configs, get_full_config, []). diff --git a/apps/emqx_management/src/proto/emqx_mgmt_trace_proto_v2.erl b/apps/emqx_management/src/proto/emqx_mgmt_trace_proto_v2.erl new file mode 100644 index 000000000..8502b81d9 --- /dev/null +++ b/apps/emqx_management/src/proto/emqx_mgmt_trace_proto_v2.erl @@ -0,0 +1,66 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_mgmt_trace_proto_v2). + +-behaviour(emqx_bpapi). + +-export([ + introduced_in/0, + + trace_file/2, + trace_file_detail/2, + get_trace_size/1, + read_trace_file/4 +]). + +-include_lib("emqx/include/bpapi.hrl"). + +introduced_in() -> + "5.0.10". + +-spec get_trace_size([node()]) -> + emqx_rpc:multicall_result(#{{node(), file:name_all()} => non_neg_integer()}). +get_trace_size(Nodes) -> + rpc:multicall(Nodes, emqx_mgmt_api_trace, get_trace_size, [], 30000). + +-spec trace_file([node()], file:name_all()) -> + emqx_rpc:multicall_result( + {ok, Node :: list(), Binary :: binary()} + | {error, Node :: list(), Reason :: term()} + ). +trace_file(Nodes, File) -> + rpc:multicall(Nodes, emqx_trace, trace_file, [File], 60000). + +-spec trace_file_detail([node()], file:name_all()) -> + emqx_rpc:multicall_result( + {ok, #{ + size => non_neg_integer(), + mtime => file:date_time() | non_neg_integer() | 'undefined', + node => atom() + }} + | {error, #{reason => term(), node => atom(), file => file:name_all()}} + ). +trace_file_detail(Nodes, File) -> + rpc:multicall(Nodes, emqx_trace, trace_file_detail, [File], 25000). + +-spec read_trace_file(node(), binary(), non_neg_integer(), non_neg_integer()) -> + {ok, binary()} + | {error, _} + | {eof, non_neg_integer()} + | {badrpc, _}. +read_trace_file(Node, Name, Position, Limit) -> + rpc:call(Node, emqx_mgmt_api_trace, read_trace_file, [Name, Position, Limit], 15000). diff --git a/apps/emqx_management/test/emqx_mgmt_api_configs_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_configs_SUITE.erl index 97939bbaf..83f68c5fe 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_configs_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_configs_SUITE.erl @@ -30,6 +30,16 @@ init_per_suite(Config) -> end_per_suite(_) -> emqx_mgmt_api_test_util:end_suite([emqx_conf]). +init_per_testcase(TestCase = t_configs_node, Config) -> + ?MODULE:TestCase({'init', Config}); +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(TestCase = t_configs_node, Config) -> + ?MODULE:TestCase({'end', Config}); +end_per_testcase(_TestCase, Config) -> + Config. + t_get(_Config) -> {ok, Configs} = get_configs(), maps:map( @@ -188,6 +198,37 @@ t_dashboard(_Config) -> timer:sleep(1000), ok. +t_configs_node({'init', Config}) -> + Node = node(), + meck:expect(mria_mnesia, running_nodes, fun() -> [Node, bad_node, other_node] end), + meck:expect( + emqx_management_proto_v2, + get_full_config, + fun + (Node0) when Node0 =:= Node -> <<"\"self\"">>; + (other_node) -> <<"\"other\"">>; + (bad_node) -> {badrpc, bad} + end + ), + Config; +t_configs_node({'end', _}) -> + meck:unload([mria_mnesia, emqx_management_proto_v2]); +t_configs_node(_) -> + Node = atom_to_list(node()), + + ?assertEqual({ok, <<"self">>}, get_configs(Node, #{return_body => true})), + ?assertEqual({ok, <<"other">>}, get_configs("other_node", #{return_body => true})), + + {ExpType, ExpRes} = get_configs("unknown_node", #{return_body => true}), + ?assertEqual(error, ExpType), + ?assertMatch({{_, 404, _}, _, _}, ExpRes), + {_, _, Body} = ExpRes, + ?assertMatch(#{<<"code">> := <<"NOT_FOUND">>}, emqx_json:decode(Body, [return_maps])), + + ?assertMatch({error, {_, 500, _}}, get_configs("bad_node")). + +%% Helpers + get_config(Name) -> Path = emqx_mgmt_api_test_util:api_path(["configs", Name]), case emqx_mgmt_api_test_util:request_api(get, Path) of @@ -198,8 +239,19 @@ get_config(Name) -> end. get_configs() -> - Path = emqx_mgmt_api_test_util:api_path(["configs"]), - case emqx_mgmt_api_test_util:request_api(get, Path) of + get_configs([], #{}). + +get_configs(Node) -> + get_configs(Node, #{}). + +get_configs(Node, Opts) -> + Path = + case Node of + [] -> ["configs"]; + _ -> ["configs?node=" ++ Node] + end, + URI = emqx_mgmt_api_test_util:api_path(Path), + case emqx_mgmt_api_test_util:request_api(get, URI, [], [], [], Opts) of {ok, Res} -> {ok, emqx_json:decode(Res, [return_maps])}; Error -> Error end. diff --git a/apps/emqx_management/test/emqx_mgmt_api_nodes_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_nodes_SUITE.erl index c2330dc48..73b796afe 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_nodes_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_nodes_SUITE.erl @@ -115,3 +115,51 @@ t_node_metrics_api(_) -> {error, {_, 400, _}}, emqx_mgmt_api_test_util:request_api(get, BadNodePath) ). + +t_multiple_nodes_api(_) -> + net_kernel:start(['node_api@127.0.0.1', longnames]), + ct:timetrap({seconds, 120}), + snabbkaffe:fix_ct_logging(), + Seq1 = list_to_atom(atom_to_list(?MODULE) ++ "1"), + Seq2 = list_to_atom(atom_to_list(?MODULE) ++ "2"), + Cluster = [{Name, Opts}, {Name1, Opts1}] = cluster([{core, Seq1}, {core, Seq2}]), + ct:pal("Starting ~p", [Cluster]), + Node1 = emqx_common_test_helpers:start_slave(Name, Opts), + Node2 = emqx_common_test_helpers:start_slave(Name1, Opts1), + try + {200, NodesList} = rpc:call(Node1, emqx_mgmt_api_nodes, nodes, [get, #{}]), + All = [Node1, Node2], + lists:map( + fun(N) -> + N1 = maps:get(node, N), + ?assertEqual(true, lists:member(N1, All)) + end, + NodesList + ), + ?assertEqual(2, length(NodesList)), + + {200, Node11} = rpc:call(Node1, emqx_mgmt_api_nodes, node, [ + get, #{bindings => #{node => Node1}} + ]), + ?assertMatch(#{node := Node1}, Node11) + after + emqx_common_test_helpers:stop_slave(Node1), + emqx_common_test_helpers:stop_slave(Node2) + end, + ok. + +cluster(Specs) -> + Env = [{emqx, boot_modules, []}], + emqx_common_test_helpers:emqx_cluster(Specs, [ + {env, Env}, + {apps, [emqx_conf]}, + {load_schema, false}, + {join_to, true}, + {env_handler, fun + (emqx) -> + application:set_env(emqx, boot_modules, []), + ok; + (_) -> + ok + end} + ]). diff --git a/apps/emqx_management/test/emqx_mgmt_api_publish_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_publish_SUITE.erl index 4d17451c9..0ebaf7195 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_publish_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_publish_SUITE.erl @@ -19,6 +19,7 @@ -compile(nowarn_export_all). -include_lib("eunit/include/eunit.hrl"). +-include_lib("emqx/include/emqx_mqtt.hrl"). -define(CLIENTID, <<"api_clientid">>). -define(USERNAME, <<"api_username">>). @@ -36,6 +37,16 @@ init_per_suite(Config) -> end_per_suite(_) -> emqx_mgmt_api_test_util:end_suite(). +init_per_testcase(Case, Config) -> + ?MODULE:Case({init, Config}). + +end_per_testcase(Case, Config) -> + ?MODULE:Case({'end', Config}). + +t_publish_api({init, Config}) -> + Config; +t_publish_api({'end', _Config}) -> + ok; t_publish_api(_) -> {ok, Client} = emqtt:start_link(#{ username => <<"api_username">>, clientid => <<"api_clientid">> @@ -47,10 +58,114 @@ t_publish_api(_) -> Path = emqx_mgmt_api_test_util:api_path(["publish"]), Auth = emqx_mgmt_api_test_util:auth_header_(), Body = #{topic => ?TOPIC1, payload => Payload}, - {ok, _} = emqx_mgmt_api_test_util:request_api(post, Path, "", Auth, Body), - ?assertEqual(receive_assert(?TOPIC1, 0, Payload), ok), - emqtt:disconnect(Client). + {ok, Response} = emqx_mgmt_api_test_util:request_api(post, Path, "", Auth, Body), + ResponseMap = decode_json(Response), + ?assertEqual([<<"id">>], lists:sort(maps:keys(ResponseMap))), + ?assertEqual(ok, receive_assert(?TOPIC1, 0, Payload)), + emqtt:stop(Client). +t_publish_no_subscriber({init, Config}) -> + Config; +t_publish_no_subscriber({'end', _Config}) -> + ok; +t_publish_no_subscriber(_) -> + Payload = <<"hello">>, + Path = emqx_mgmt_api_test_util:api_path(["publish"]), + Auth = emqx_mgmt_api_test_util:auth_header_(), + Body = #{topic => ?TOPIC1, payload => Payload}, + {ok, Response} = emqx_mgmt_api_test_util:request_api(post, Path, "", Auth, Body), + ResponseMap = decode_json(Response), + ?assertEqual([<<"message">>, <<"reason_code">>], lists:sort(maps:keys(ResponseMap))), + ?assertMatch(#{<<"reason_code">> := ?RC_NO_MATCHING_SUBSCRIBERS}, ResponseMap), + ok. + +t_publish_bad_topic({init, Config}) -> + Config; +t_publish_bad_topic({'end', _Config}) -> + ok; +t_publish_bad_topic(_) -> + Payload = <<"hello">>, + Path = emqx_mgmt_api_test_util:api_path(["publish"]), + Auth = emqx_mgmt_api_test_util:auth_header_(), + Body = #{topic => <<"not/a+/valid/topic">>, payload => Payload}, + ?assertMatch( + {error, {_, 400, _}}, emqx_mgmt_api_test_util:request_api(post, Path, "", Auth, Body) + ). + +t_publish_bad_base64({init, Config}) -> + Config; +t_publish_bad_base64({'end', _Config}) -> + ok; +t_publish_bad_base64(_) -> + %% not a valid base64 + Payload = <<"hello">>, + Path = emqx_mgmt_api_test_util:api_path(["publish"]), + Auth = emqx_mgmt_api_test_util:auth_header_(), + Body = #{ + topic => <<"not/a+/valid/topic">>, payload => Payload, payload_encoding => <<"base64">> + }, + ?assertMatch( + {error, {_, 400, _}}, emqx_mgmt_api_test_util:request_api(post, Path, "", Auth, Body) + ). + +t_publish_too_large({init, Config}) -> + MaxPacketSize = 100, + meck:new(emqx_config, [no_link, passthrough, no_history]), + meck:expect(emqx_config, get, fun + ([mqtt, max_packet_size]) -> + MaxPacketSize; + (Other) -> + meck:passthrough(Other) + end), + [{max_packet_size, MaxPacketSize} | Config]; +t_publish_too_large({'end', _Config}) -> + meck:unload(emqx_config), + ok; +t_publish_too_large(Config) -> + MaxPacketSize = proplists:get_value(max_packet_size, Config), + Payload = lists:duplicate(MaxPacketSize, $0), + Path = emqx_mgmt_api_test_util:api_path(["publish"]), + Auth = emqx_mgmt_api_test_util:auth_header_(), + Body = #{topic => <<"random/topic">>, payload => Payload}, + {error, {Summary, _Headers, ResponseBody}} = + emqx_mgmt_api_test_util:request_api( + post, + Path, + "", + Auth, + Body, + #{return_body => true} + ), + ?assertMatch({_, 400, _}, Summary), + ?assertMatch( + #{ + <<"reason_code">> := ?RC_QUOTA_EXCEEDED, + <<"message">> := <<"packet_too_large">> + }, + decode_json(ResponseBody) + ), + ok. + +t_publish_bad_topic_bulk({init, Config}) -> + Config; +t_publish_bad_topic_bulk({'end', _Config}) -> + ok; +t_publish_bad_topic_bulk(_Config) -> + Payload = <<"hello">>, + Path = emqx_mgmt_api_test_util:api_path(["publish", "bulk"]), + Auth = emqx_mgmt_api_test_util:auth_header_(), + Body = [ + #{topic => <<"not/a+/valid/topic">>, payload => Payload}, + #{topic => <<"good/topic">>, payload => Payload} + ], + ?assertMatch( + {error, {_, 400, _}}, emqx_mgmt_api_test_util:request_api(post, Path, "", Auth, Body) + ). + +t_publish_bulk_api({init, Config}) -> + Config; +t_publish_bulk_api({'end', _Config}) -> + ok; t_publish_bulk_api(_) -> {ok, Client} = emqtt:start_link(#{ username => <<"api_username">>, clientid => <<"api_clientid">> @@ -61,13 +176,135 @@ t_publish_bulk_api(_) -> Payload = <<"hello">>, Path = emqx_mgmt_api_test_util:api_path(["publish", "bulk"]), Auth = emqx_mgmt_api_test_util:auth_header_(), - Body = [#{topic => ?TOPIC1, payload => Payload}, #{topic => ?TOPIC2, payload => Payload}], + Body = [ + #{ + topic => ?TOPIC1, + payload => Payload, + payload_encoding => plain + }, + #{ + topic => ?TOPIC2, + payload => base64:encode(Payload), + payload_encoding => base64 + } + ], {ok, Response} = emqx_mgmt_api_test_util:request_api(post, Path, "", Auth, Body), - ResponseMap = emqx_json:decode(Response, [return_maps]), - ?assertEqual(2, erlang:length(ResponseMap)), - ?assertEqual(receive_assert(?TOPIC1, 0, Payload), ok), - ?assertEqual(receive_assert(?TOPIC2, 0, Payload), ok), - emqtt:disconnect(Client). + ResponseList = decode_json(Response), + ?assertEqual(2, erlang:length(ResponseList)), + lists:foreach( + fun(ResponseMap) -> + ?assertMatch( + [<<"id">>], lists:sort(maps:keys(ResponseMap)) + ) + end, + ResponseList + ), + ?assertEqual(ok, receive_assert(?TOPIC1, 0, Payload)), + ?assertEqual(ok, receive_assert(?TOPIC2, 0, Payload)), + emqtt:stop(Client). + +t_publish_no_subscriber_bulk({init, Config}) -> + Config; +t_publish_no_subscriber_bulk({'end', _Config}) -> + ok; +t_publish_no_subscriber_bulk(_) -> + {ok, Client} = emqtt:start_link(#{ + username => <<"api_username">>, clientid => <<"api_clientid">> + }), + {ok, _} = emqtt:connect(Client), + {ok, _, [0]} = emqtt:subscribe(Client, ?TOPIC1), + {ok, _, [0]} = emqtt:subscribe(Client, ?TOPIC2), + Payload = <<"hello">>, + Path = emqx_mgmt_api_test_util:api_path(["publish", "bulk"]), + Auth = emqx_mgmt_api_test_util:auth_header_(), + Body = [ + #{topic => ?TOPIC1, payload => Payload}, + #{topic => ?TOPIC2, payload => Payload}, + #{topic => <<"no/subscrbier/topic">>, payload => Payload} + ], + {ok, Response} = emqx_mgmt_api_test_util:request_api(post, Path, "", Auth, Body), + ResponseList = decode_json(Response), + ?assertMatch( + [ + #{<<"id">> := _}, + #{<<"id">> := _}, + #{<<"message">> := <<"no_matching_subscribers">>} + ], + ResponseList + ), + ?assertEqual(ok, receive_assert(?TOPIC1, 0, Payload)), + ?assertEqual(ok, receive_assert(?TOPIC2, 0, Payload)), + emqtt:stop(Client). + +t_publish_bulk_dispatch_one_message_invalid_topic({init, Config}) -> + Config; +t_publish_bulk_dispatch_one_message_invalid_topic({'end', _Config}) -> + ok; +t_publish_bulk_dispatch_one_message_invalid_topic(Config) when is_list(Config) -> + Payload = <<"hello">>, + Path = emqx_mgmt_api_test_util:api_path(["publish", "bulk"]), + Auth = emqx_mgmt_api_test_util:auth_header_(), + Body = [ + #{topic => ?TOPIC1, payload => Payload}, + #{topic => ?TOPIC2, payload => Payload}, + #{topic => <<"bad/#/topic">>, payload => Payload} + ], + {error, {Summary, _Headers, ResponseBody}} = + emqx_mgmt_api_test_util:request_api( + post, + Path, + "", + Auth, + Body, + #{return_body => true} + ), + ?assertMatch({_, 400, _}, Summary), + ?assertMatch( + #{<<"reason_code">> := ?RC_TOPIC_NAME_INVALID}, + decode_json(ResponseBody) + ). + +t_publish_bulk_dispatch_failure({init, Config}) -> + meck:new(emqx, [no_link, passthrough, no_history]), + meck:expect(emqx, is_running, fun() -> false end), + Config; +t_publish_bulk_dispatch_failure({'end', _Config}) -> + meck:unload(emqx), + ok; +t_publish_bulk_dispatch_failure(Config) when is_list(Config) -> + {ok, Client} = emqtt:start_link(#{ + username => <<"api_username">>, clientid => <<"api_clientid">> + }), + {ok, _} = emqtt:connect(Client), + {ok, _, [0]} = emqtt:subscribe(Client, ?TOPIC1), + {ok, _, [0]} = emqtt:subscribe(Client, ?TOPIC2), + Payload = <<"hello">>, + Path = emqx_mgmt_api_test_util:api_path(["publish", "bulk"]), + Auth = emqx_mgmt_api_test_util:auth_header_(), + Body = [ + #{topic => ?TOPIC1, payload => Payload}, + #{topic => ?TOPIC2, payload => Payload}, + #{topic => <<"no/subscrbier/topic">>, payload => Payload} + ], + {error, {Summary, _Headers, ResponseBody}} = + emqx_mgmt_api_test_util:request_api( + post, + Path, + "", + Auth, + Body, + #{return_body => true} + ), + ?assertMatch({_, 503, _}, Summary), + ?assertMatch( + [ + #{<<"reason_code">> := ?RC_IMPLEMENTATION_SPECIFIC_ERROR}, + #{<<"reason_code">> := ?RC_IMPLEMENTATION_SPECIFIC_ERROR}, + #{<<"reason_code">> := ?RC_NO_MATCHING_SUBSCRIBERS} + ], + decode_json(ResponseBody) + ), + emqtt:stop(Client). receive_assert(Topic, Qos, Payload) -> receive @@ -82,3 +319,6 @@ receive_assert(Topic, Qos, Payload) -> after 5000 -> timeout end. + +decode_json(In) -> + emqx_json:decode(In, [return_maps]). diff --git a/apps/emqx_management/test/emqx_mgmt_api_status_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_status_SUITE.erl index b725e37b2..a768d2bfe 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_status_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_status_SUITE.erl @@ -19,9 +19,33 @@ -compile(nowarn_export_all). -include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). + +-define(HOST, "http://127.0.0.1:18083/"). + +%%--------------------------------------------------------------------------------------- +%% CT boilerplate +%%--------------------------------------------------------------------------------------- all() -> - emqx_common_test_helpers:all(?MODULE). + OtherTCs = emqx_common_test_helpers:all(?MODULE) -- get_status_tests(), + [ + {group, api_status_endpoint}, + {group, non_api_status_endpoint} + | OtherTCs + ]. + +get_status_tests() -> + [ + t_status_ok, + t_status_not_ok + ]. + +groups() -> + [ + {api_status_endpoint, [], get_status_tests()}, + {non_api_status_endpoint, [], get_status_tests()} + ]. init_per_suite(Config) -> emqx_mgmt_api_test_util:init_suite(), @@ -30,8 +54,114 @@ init_per_suite(Config) -> end_per_suite(_) -> emqx_mgmt_api_test_util:end_suite(). -t_status(_Config) -> - Path = emqx_mgmt_api_test_util:api_path_without_base_path(["/status"]), - Status = io_lib:format("Node ~ts is ~ts~nemqx is ~ts", [node(), started, running]), - {ok, Status} = emqx_mgmt_api_test_util:request_api(get, Path), +init_per_group(api_status_endpoint, Config) -> + [{get_status_path, ["api", "v5", "status"]} | Config]; +init_per_group(non_api_status_endpoint, Config) -> + [{get_status_path, ["status"]} | Config]; +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(t_status_not_ok, Config) -> + ok = application:stop(emqx), + Config; +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(t_status_not_ok, _Config) -> + {ok, _} = application:ensure_all_started(emqx), + ok; +end_per_testcase(_TestCase, _Config) -> + ok. + +%%--------------------------------------------------------------------------------------- +%% Helper fns +%%--------------------------------------------------------------------------------------- + +do_request(Opts) -> + #{ + path := Path0, + method := Method, + headers := Headers, + body := Body0 + } = Opts, + URL = ?HOST ++ filename:join(Path0), + {ok, #{host := Host, port := Port, path := Path}} = emqx_http_lib:uri_parse(URL), + %% we must not use `httpc' here, because it keeps retrying when it + %% receives a 503 with `retry-after' header, and there's no option + %% to stop that behavior... + {ok, Gun} = gun:open(Host, Port, #{retry => 0}), + {ok, http} = gun:await_up(Gun), + Request = + fun() -> + case Body0 of + no_body -> gun:Method(Gun, Path, Headers); + {_Encoding, Body} -> gun:Method(Gun, Path, Headers, Body) + end + end, + Ref = Request(), + receive + {gun_response, Gun, Ref, nofin, StatusCode, Headers1} -> + Data = data_loop(Gun, Ref, _Acc = <<>>), + #{status_code => StatusCode, headers => maps:from_list(Headers1), body => Data} + after 5_000 -> + error({timeout, Opts, process_info(self(), messages)}) + end. + +data_loop(Gun, Ref, Acc) -> + receive + {gun_data, Gun, Ref, nofin, Data} -> + data_loop(Gun, Ref, <>); + {gun_data, Gun, Ref, fin, Data} -> + gun:shutdown(Gun), + <> + after 5000 -> + error(timeout) + end. + +%%--------------------------------------------------------------------------------------- +%% Test cases +%%--------------------------------------------------------------------------------------- + +t_status_ok(Config) -> + Path = ?config(get_status_path, Config), + #{ + body := Resp, + status_code := StatusCode + } = do_request(#{ + method => get, + path => Path, + headers => [], + body => no_body + }), + ?assertEqual(200, StatusCode), + ?assertMatch( + {match, _}, + re:run(Resp, <<"emqx is running$">>) + ), + ok. + +t_status_not_ok(Config) -> + Path = ?config(get_status_path, Config), + #{ + body := Resp, + headers := Headers, + status_code := StatusCode + } = do_request(#{ + method => get, + path => Path, + headers => [], + body => no_body + }), + ?assertEqual(503, StatusCode), + ?assertMatch( + {match, _}, + re:run(Resp, <<"emqx is not_running$">>) + ), + ?assertMatch( + #{<<"retry-after">> := <<"15">>}, + Headers + ), ok. diff --git a/apps/emqx_management/test/emqx_mgmt_api_test_util.erl b/apps/emqx_management/test/emqx_mgmt_api_test_util.erl index a8b04dc80..aed28930b 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_test_util.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_test_util.erl @@ -44,15 +44,20 @@ set_special_configs(_App) -> ok. request_api(Method, Url) -> - request_api(Method, Url, [], auth_header_(), []). + request_api(Method, Url, [], [], [], #{}). request_api(Method, Url, AuthOrHeaders) -> - request_api(Method, Url, [], AuthOrHeaders, []). + request_api(Method, Url, [], AuthOrHeaders, [], #{}). request_api(Method, Url, QueryParams, AuthOrHeaders) -> - request_api(Method, Url, QueryParams, AuthOrHeaders, []). + request_api(Method, Url, QueryParams, AuthOrHeaders, [], #{}). -request_api(Method, Url, QueryParams, AuthOrHeaders, []) when +request_api(Method, Url, QueryParams, AuthOrHeaders, Body) -> + request_api(Method, Url, QueryParams, AuthOrHeaders, Body, #{}). + +request_api(Method, Url, QueryParams, [], Body, Opts) -> + request_api(Method, Url, QueryParams, auth_header_(), Body, Opts); +request_api(Method, Url, QueryParams, AuthOrHeaders, [], Opts) when (Method =:= options) orelse (Method =:= get) orelse (Method =:= put) orelse @@ -65,8 +70,8 @@ request_api(Method, Url, QueryParams, AuthOrHeaders, []) when "" -> Url; _ -> Url ++ "?" ++ QueryParams end, - do_request_api(Method, {NewUrl, build_http_header(AuthOrHeaders)}); -request_api(Method, Url, QueryParams, AuthOrHeaders, Body) when + do_request_api(Method, {NewUrl, build_http_header(AuthOrHeaders)}, Opts); +request_api(Method, Url, QueryParams, AuthOrHeaders, Body, Opts) when (Method =:= post) orelse (Method =:= patch) orelse (Method =:= put) orelse @@ -79,10 +84,12 @@ request_api(Method, Url, QueryParams, AuthOrHeaders, Body) when end, do_request_api( Method, - {NewUrl, build_http_header(AuthOrHeaders), "application/json", emqx_json:encode(Body)} + {NewUrl, build_http_header(AuthOrHeaders), "application/json", emqx_json:encode(Body)}, + Opts ). -do_request_api(Method, Request) -> +do_request_api(Method, Request, Opts) -> + ReturnBody = maps:get(return_body, Opts, false), ct:pal("Method: ~p, Request: ~p", [Method, Request]), case httpc:request(Method, Request, [], []) of {error, socket_closed_remotely} -> @@ -91,8 +98,9 @@ do_request_api(Method, Request) -> Code >= 200 andalso Code =< 299 -> {ok, Return}; - {ok, {Reason, _, _} = Error} -> - ct:pal("error: ~p~n", [Error]), + {ok, {Reason, Headers, Body}} when ReturnBody -> + {error, {Reason, Headers, Body}}; + {ok, {Reason, _Headers, _Body}} -> {error, Reason} end. diff --git a/apps/emqx_management/test/emqx_mgmt_api_trace_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_trace_SUITE.erl index 5130913c1..72737ba60 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_trace_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_trace_SUITE.erl @@ -175,7 +175,7 @@ t_create_failed(_Config) -> emqx_trace:clear(), ok. -t_download_log(_Config) -> +t_log_file(_Config) -> ClientId = <<"client-test-download">>, Now = erlang:system_time(second), Name = <<"test_client_id">>, @@ -191,6 +191,12 @@ t_download_log(_Config) -> ], ok = emqx_trace_handler_SUITE:filesync(Name, clientid), Header = auth_header_(), + ?assertMatch( + {error, {"HTTP/1.1", 404, "Not Found"}, _}, + request_api(get, api_path("trace/test_client_not_found/log_detail"), Header) + ), + {ok, Detail} = request_api(get, api_path("trace/test_client_id/log_detail"), Header), + ?assertMatch([#{<<"mtime">> := _, <<"size">> := _, <<"node">> := _}], json(Detail)), {ok, Binary} = request_api(get, api_path("trace/test_client_id/download"), Header), {ok, [ _Comment, diff --git a/apps/emqx_modules/src/emqx_delayed.erl b/apps/emqx_modules/src/emqx_delayed.erl index ac7f75158..f511d74d9 100644 --- a/apps/emqx_modules/src/emqx_delayed.erl +++ b/apps/emqx_modules/src/emqx_delayed.erl @@ -91,6 +91,7 @@ -define(SERVER, ?MODULE). -define(MAX_INTERVAL, 4294967). -define(FORMAT_FUN, {?MODULE, format_delayed}). +-define(NOW, erlang:system_time(milli_seconds)). %%-------------------------------------------------------------------- %% Mnesia bootstrap @@ -118,12 +119,13 @@ on_message_publish( {PubAt, Delayed} = case binary_to_integer(Delay) of Interval when Interval < ?MAX_INTERVAL -> - {Interval + erlang:round(Ts / 1000), Interval}; + {Interval * 1000 + Ts, Interval}; Timestamp -> %% Check malicious timestamp? - case (Timestamp - erlang:round(Ts / 1000)) > ?MAX_INTERVAL of + Internal = Timestamp - erlang:round(Ts / 1000), + case Internal > ?MAX_INTERVAL of true -> error(invalid_delayed_timestamp); - false -> {Timestamp, Timestamp - erlang:round(Ts / 1000)} + false -> {Timestamp * 1000, Internal} end end, PubMsg = Msg#message{topic = Topic1}, @@ -189,14 +191,14 @@ format_delayed( WithPayload ) -> PublishTime = to_rfc3339(PublishTimeStamp div 1000), - ExpectTime = to_rfc3339(ExpectTimeStamp), - RemainingTime = ExpectTimeStamp - erlang:system_time(second), + ExpectTime = to_rfc3339(ExpectTimeStamp div 1000), + RemainingTime = ExpectTimeStamp - ?NOW, Result = #{ msgid => emqx_guid:to_hexstr(Id), node => node(), publish_at => PublishTime, delayed_interval => Delayed, - delayed_remaining => RemainingTime, + delayed_remaining => RemainingTime div 1000, expected_at => ExpectTime, topic => Topic, qos => Qos, @@ -296,7 +298,7 @@ handle_cast(Msg, State) -> %% Do Publish... handle_info({timeout, TRef, do_publish}, State = #{publish_timer := TRef}) -> - DeletedKeys = do_publish(mnesia:dirty_first(?TAB), erlang:system_time(seconds)), + DeletedKeys = do_publish(mnesia:dirty_first(?TAB), ?NOW), lists:foreach(fun(Key) -> mria:dirty_delete(?TAB, Key) end, DeletedKeys), {noreply, ensure_publish_timer(State#{publish_timer := undefined, publish_at := 0})}; handle_info(stats, State = #{stats_fun := StatsFun}) -> @@ -347,18 +349,18 @@ ensure_publish_timer(State) -> ensure_publish_timer('$end_of_table', State) -> State#{publish_timer := undefined, publish_at := 0}; ensure_publish_timer({Ts, _Id}, State = #{publish_timer := undefined}) -> - ensure_publish_timer(Ts, erlang:system_time(seconds), State); + ensure_publish_timer(Ts, ?NOW, State); ensure_publish_timer({Ts, _Id}, State = #{publish_timer := TRef, publish_at := PubAt}) when Ts < PubAt -> ok = emqx_misc:cancel_timer(TRef), - ensure_publish_timer(Ts, erlang:system_time(seconds), State); + ensure_publish_timer(Ts, ?NOW, State); ensure_publish_timer(_Key, State) -> State. ensure_publish_timer(Ts, Now, State) -> Interval = max(1, Ts - Now), - TRef = emqx_misc:start_timer(timer:seconds(Interval), do_publish), + TRef = emqx_misc:start_timer(Interval, do_publish), State#{publish_timer := TRef, publish_at := Now + Interval}. do_publish(Key, Now) -> @@ -371,8 +373,23 @@ do_publish({Ts, _Id}, Now, Acc) when Ts > Now -> Acc; do_publish(Key = {Ts, _Id}, Now, Acc) when Ts =< Now -> case mnesia:dirty_read(?TAB, Key) of - [] -> ok; - [#delayed_message{msg = Msg}] -> emqx_pool:async_submit(fun emqx:publish/1, [Msg]) + [] -> + ok; + [#delayed_message{msg = Msg}] -> + case emqx_banned:look_up({clientid, Msg#message.from}) of + [] -> + emqx_pool:async_submit(fun emqx:publish/1, [Msg]); + _ -> + ?tp( + notice, + ignore_delayed_message_publish, + #{ + reason => "client is banned", + clienid => Msg#message.from + } + ), + ok + end end, do_publish(mnesia:dirty_next(?TAB, Key), Now, [Key | Acc]). diff --git a/apps/emqx_modules/src/emqx_modules.app.src b/apps/emqx_modules/src/emqx_modules.app.src index 2fa38dae3..e2a142a99 100644 --- a/apps/emqx_modules/src/emqx_modules.app.src +++ b/apps/emqx_modules/src/emqx_modules.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_modules, [ {description, "EMQX Modules"}, - {vsn, "5.0.4"}, + {vsn, "5.0.6"}, {modules, []}, {applications, [kernel, stdlib, emqx]}, {mod, {emqx_modules_app, []}}, diff --git a/apps/emqx_modules/test/emqx_delayed_SUITE.erl b/apps/emqx_modules/test/emqx_delayed_SUITE.erl index d1af9a064..5864646ad 100644 --- a/apps/emqx_modules/test/emqx_delayed_SUITE.erl +++ b/apps/emqx_modules/test/emqx_delayed_SUITE.erl @@ -26,6 +26,7 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("emqx/include/emqx.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). %%-------------------------------------------------------------------- %% Setups @@ -36,7 +37,8 @@ }). all() -> - emqx_common_test_helpers:all(?MODULE). + [t_banned_delayed]. +%% emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF, #{ @@ -202,3 +204,69 @@ t_get_basic_usage_info(_Config) -> ), ?assertEqual(#{delayed_message_count => 4}, emqx_delayed:get_basic_usage_info()), ok. + +t_delayed_precision(_) -> + MaxSpan = 1250, + FutureDiff = subscribe_proc(), + DelayedMsg0 = emqx_message:make( + ?MODULE, 1, <<"$delayed/1/delayed/test">>, <<"delayed/test">> + ), + _ = on_message_publish(DelayedMsg0), + ?assert(FutureDiff() =< MaxSpan). + +t_banned_delayed(_) -> + emqx:update_config([delayed, max_delayed_messages], 10000), + ClientId1 = <<"bc1">>, + ClientId2 = <<"bc2">>, + + Now = erlang:system_time(second), + Who = {clientid, ClientId2}, + emqx_banned:create(#{ + who => Who, + by => <<"test">>, + reason => <<"test">>, + at => Now, + until => Now + 120 + }), + + snabbkaffe:start_trace(), + lists:foreach( + fun(ClientId) -> + Msg = emqx_message:make(ClientId, <<"$delayed/1/bc">>, <<"payload">>), + emqx_delayed:on_message_publish(Msg) + end, + [ClientId1, ClientId1, ClientId1, ClientId2, ClientId2] + ), + + timer:sleep(2000), + Trace = snabbkaffe:collect_trace(), + snabbkaffe:stop(), + emqx_banned:delete(Who), + mnesia:clear_table(emqx_delayed), + + ?assertEqual(2, length(?of_kind(ignore_delayed_message_publish, Trace))). + +subscribe_proc() -> + Self = self(), + Ref = erlang:make_ref(), + erlang:spawn(fun() -> + Topic = <<"delayed/+">>, + emqx_broker:subscribe(Topic), + Self ! + {Ref, + receive + {deliver, Topic, Msg} -> + erlang:system_time(milli_seconds) - Msg#message.timestamp + after 2000 -> + 2000 + end}, + emqx_broker:unsubscribe(Topic) + end), + fun() -> + receive + {Ref, Diff} -> + Diff + after 2000 -> + 2000 + end + end. diff --git a/apps/emqx_plugins/i18n/emqx_plugins_schema.conf b/apps/emqx_plugins/i18n/emqx_plugins_schema.conf index 4c2a2fbac..454d36f6f 100644 --- a/apps/emqx_plugins/i18n/emqx_plugins_schema.conf +++ b/apps/emqx_plugins/i18n/emqx_plugins_schema.conf @@ -2,14 +2,14 @@ emqx_plugins_schema { plugins { desc { en: """ -Manage EMQX plugins.
+Manage EMQX plugins.
Plugins can be pre-built as a part of EMQX package, or installed as a standalone package in a location specified by -install_dir config key
+install_dir config key
The standalone-installed plugins are referred to as 'external' plugins. """ - zh: """管理EMQX插件。
-插件可以是EMQX安装包中的一部分,也可以是一个独立的安装包。
+ zh: """管理EMQX插件。
+插件可以是EMQX安装包中的一部分,也可以是一个独立的安装包。
独立安装的插件称为“外部插件”。 """ } @@ -30,11 +30,11 @@ The standalone-installed plugins are referred to as 'external' plugins. } name_vsn { desc { - en: """The {name}-{version} of the plugin.
-It should match the plugin application name-version as the for the plugin release package name
+ en: """The {name}-{version} of the plugin.
+It should match the plugin application name-version as the for the plugin release package name
For example: my_plugin-0.1.0. """ - zh: """插件的名称{name}-{version}。
+ zh: """插件的名称{name}-{version}。
它应该与插件的发布包名称一致,如my_plugin-0.1.0。""" } label { @@ -54,7 +54,7 @@ For example: my_plugin-0.1.0. } states { desc { - en: """An array of plugins in the desired states.
+ en: """An array of plugins in the desired states.
The plugins are started in the defined order""" zh: """一组插件的状态。插件将按照定义的顺序启动""" } @@ -69,11 +69,11 @@ The plugins are started in the defined order""" The installation directory for the external plugins. The plugin beam files and configuration files should reside in the subdirectory named as emqx_foo_bar-0.1.0. -
+
NOTE: For security reasons, this directory should **NOT** be writable by anyone except emqx (or any user which runs EMQX). """ - zh: "插件安装包的目录, 不要自己创建, 只能由emqx用户创建与修改" + zh: "插件安装包的目录,出于安全考虑,该目录应该值允许 emqx,或用于运行 EMQX 服务的用户拥有写入权限。" } label { en: "Install Directory" @@ -82,10 +82,10 @@ by anyone except emqx (or any user which runs EMQX). } check_interval { desc { - en: """Check interval: check if the status of the plugins in the cluster is consistent,
+ en: """Check interval: check if the status of the plugins in the cluster is consistent,
if the results of 3 consecutive checks are not consistent, then alarm. """ - zh: """检查间隔:检查集群中插件的状态是否一致,
+ zh: """检查间隔:检查集群中插件的状态是否一致,
如果连续3次检查结果不一致,则报警。 """ } diff --git a/apps/emqx_prometheus/i18n/emqx_prometheus_schema_i18n.conf b/apps/emqx_prometheus/i18n/emqx_prometheus_schema_i18n.conf index 2c7736938..7f251ff4b 100644 --- a/apps/emqx_prometheus/i18n/emqx_prometheus_schema_i18n.conf +++ b/apps/emqx_prometheus/i18n/emqx_prometheus_schema_i18n.conf @@ -30,4 +30,41 @@ emqx_prometheus_schema { zh: """开启或关闭 Prometheus 数据推送""" } } + vm_dist_collector { + desc { + en: """Enable or disable VM distribution collector, collects information about the sockets and processes involved in the Erlang distribution mechanism.""" + zh: """开启或关闭 VM 分布采集器,收集 Erlang 分布机制中涉及的套接字和进程的信息。""" + } + } + mnesia_collector { + desc { + en: """Enable or disable Mnesia collector, collects Mnesia metrics mainly using mnesia:system_info/1 .""" + zh: """开启或关闭 Mnesia 采集器, 使用 mnesia:system_info/1 收集 Mnesia 相关指标""" + } + } + vm_statistics_collector { + desc { + en: """Enable or disable VM statistics collector, collects Erlang VM metrics using erlang:statistics/1 .""" + zh: """开启或关闭 VM 统计采集器, 使用 erlang:statistics/1 收集 Erlang VM 相关指标""" + } + } + + vm_system_info_collector { + desc { + en: """Enable or disable VM system info collector, collects Erlang VM metrics using erlang:system_info/1 .""" + zh: """开启或关闭 VM 系统信息采集器, 使用 erlang:system_info/1 收集 Erlang VM 相关指标""" + } + } + vm_memory_collector { + desc { + en: """Enable or disable VM memory collector, collects information about memory dynamically allocated by the Erlang emulator using erlang:memory/0 , also provides basic (D)ETS statistics .""" + zh: """开启或关闭 VM 内存采集器, 使用 erlang:memory/0 收集 Erlang 虚拟机动态分配的内存信息,同时提供基本的 (D)ETS 统计信息""" + } + } + vm_msacc_collector { + desc { + en: """Enable or disable VM msacc collector, collects microstate accounting metrics using erlang:statistics(microstate_accounting) .""" + zh: """开启或关闭 VM msacc 采集器, 使用 erlang:statistics(microstate_accounting) 收集微状态计数指标""" + } + } } diff --git a/apps/emqx_prometheus/include/emqx_prometheus.hrl b/apps/emqx_prometheus/include/emqx_prometheus.hrl index 589bbd024..36066a55d 100644 --- a/apps/emqx_prometheus/include/emqx_prometheus.hrl +++ b/apps/emqx_prometheus/include/emqx_prometheus.hrl @@ -1 +1,2 @@ -define(APP, emqx_prometheus). +-define(PROMETHEUS, [prometheus]). diff --git a/apps/emqx_prometheus/src/emqx_prometheus.app.src b/apps/emqx_prometheus/src/emqx_prometheus.app.src index e446a572a..d95c89c3b 100644 --- a/apps/emqx_prometheus/src/emqx_prometheus.app.src +++ b/apps/emqx_prometheus/src/emqx_prometheus.app.src @@ -2,7 +2,7 @@ {application, emqx_prometheus, [ {description, "Prometheus for EMQX"}, % strict semver, bump manually! - {vsn, "5.0.2"}, + {vsn, "5.0.3"}, {modules, []}, {registered, [emqx_prometheus_sup]}, {applications, [kernel, stdlib, prometheus, emqx]}, diff --git a/apps/emqx_prometheus/src/emqx_prometheus.erl b/apps/emqx_prometheus/src/emqx_prometheus.erl index 4bbfbe524..de9349b97 100644 --- a/apps/emqx_prometheus/src/emqx_prometheus.erl +++ b/apps/emqx_prometheus/src/emqx_prometheus.erl @@ -37,18 +37,8 @@ ] ). --export([ - update/1, - start/0, - stop/0, - restart/0, - % for rpc - do_start/0, - do_stop/0 -]). - %% APIs --export([start_link/1]). +-export([start_link/1, info/0]). %% gen_server callbacks -export([ @@ -69,87 +59,69 @@ -export([collect/1]). +-export([ + %% For bpapi, deprecated_since 5.0.10, remove this when 5.1.x + do_start/0, + do_stop/0 +]). + -define(C(K, L), proplists:get_value(K, L, 0)). -define(TIMER_MSG, '#interval'). --record(state, {push_gateway, timer, interval}). - -%%-------------------------------------------------------------------- -%% update new config -update(Config) -> - case - emqx_conf:update( - [prometheus], - Config, - #{rawconf_with_defaults => true, override_to => cluster} - ) - of - {ok, #{raw_config := NewConfigRows}} -> - case maps:get(<<"enable">>, Config, true) of - true -> - ok = restart(); - false -> - ok = stop() - end, - {ok, NewConfigRows}; - {error, Reason} -> - {error, Reason} - end. - -start() -> - {_, []} = emqx_prometheus_proto_v1:start(mria_mnesia:running_nodes()), - ok. - -stop() -> - {_, []} = emqx_prometheus_proto_v1:stop(mria_mnesia:running_nodes()), - ok. - -restart() -> - ok = stop(), - ok = start(). - -do_start() -> - emqx_prometheus_sup:start_child(?APP, emqx_conf:get([prometheus])). - -do_stop() -> - case emqx_prometheus_sup:stop_child(?APP) of - ok -> - ok; - {error, not_found} -> - ok - end. +-define(HTTP_OPTIONS, [{autoredirect, true}, {timeout, 60000}]). %%-------------------------------------------------------------------- %% APIs %%-------------------------------------------------------------------- -start_link(Opts) -> - gen_server:start_link({local, ?MODULE}, ?MODULE, [Opts], []). +start_link([]) -> + gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). + +info() -> + gen_server:call(?MODULE, info). %%-------------------------------------------------------------------- %% gen_server callbacks %%-------------------------------------------------------------------- -init([Opts]) -> - Interval = maps:get(interval, Opts), - PushGateway = maps:get(push_gateway_server, Opts), - {ok, ensure_timer(#state{push_gateway = PushGateway, interval = Interval})}. +init([]) -> + #{interval := Interval} = opts(), + {ok, #{timer => ensure_timer(Interval), ok => 0, failed => 0}}. +handle_call(info, _From, State = #{timer := Timer}) -> + {reply, State#{opts => opts(), next_push_ms => erlang:read_timer(Timer)}, State}; handle_call(_Msg, _From, State) -> - {noreply, State}. + {reply, ok, State}. handle_cast(_Msg, State) -> {noreply, State}. -handle_info({timeout, R, ?TIMER_MSG}, State = #state{timer = R, push_gateway = Uri}) -> +handle_info({timeout, Timer, ?TIMER_MSG}, State = #{timer := Timer}) -> + #{interval := Interval, push_gateway_server := Server} = opts(), + PushRes = push_to_push_gateway(Server), + NewTimer = ensure_timer(Interval), + NewState = maps:update_with(PushRes, fun(C) -> C + 1 end, 1, State#{timer => NewTimer}), + %% Data is too big, hibernate for saving memory and stop system monitor warning. + {noreply, NewState, hibernate}; +handle_info(_Msg, State) -> + {noreply, State}. + +push_to_push_gateway(Uri) -> [Name, Ip] = string:tokens(atom_to_list(node()), "@"), Url = lists:concat([Uri, "/metrics/job/", Name, "/instance/", Name, "~", Ip]), Data = prometheus_text_format:format(), - httpc:request(post, {Url, [], "text/plain", Data}, [{autoredirect, true}], []), - {noreply, ensure_timer(State)}; -handle_info(_Msg, State) -> - {noreply, State}. + case httpc:request(post, {Url, [], "text/plain", Data}, ?HTTP_OPTIONS, []) of + {ok, {{"HTTP/1.1", 200, "OK"}, _Headers, _Body}} -> + ok; + Error -> + ?SLOG(error, #{ + msg => "post_to_push_gateway_failed", + error => Error, + url => Url + }), + failed + end. code_change(_OldVsn, State, _Extra) -> {ok, State}. @@ -157,11 +129,14 @@ code_change(_OldVsn, State, _Extra) -> terminate(_Reason, _State) -> ok. -ensure_timer(State = #state{interval = Interval}) -> - State#state{timer = emqx_misc:start_timer(Interval, ?TIMER_MSG)}. +ensure_timer(Interval) -> + emqx_misc:start_timer(Interval, ?TIMER_MSG). + %%-------------------------------------------------------------------- %% prometheus callbacks %%-------------------------------------------------------------------- +opts() -> + emqx_conf:get(?PROMETHEUS). deregister_cleanup(_Registry) -> ok. @@ -622,3 +597,11 @@ emqx_cluster_data() -> {nodes_running, length(Running)}, {nodes_stopped, length(Stopped)} ]. + +%% deprecated_since 5.0.10, remove this when 5.1.x +do_start() -> + emqx_prometheus_sup:start_child(?APP). + +%% deprecated_since 5.0.10, remove this when 5.1.x +do_stop() -> + emqx_prometheus_sup:stop_child(?APP). diff --git a/apps/emqx_prometheus/src/emqx_prometheus_api.erl b/apps/emqx_prometheus/src/emqx_prometheus_api.erl index 9a81f3ea3..125eed560 100644 --- a/apps/emqx_prometheus/src/emqx_prometheus_api.erl +++ b/apps/emqx_prometheus/src/emqx_prometheus_api.erl @@ -84,7 +84,7 @@ schema("/prometheus/stats") -> prometheus(get, _Params) -> {200, emqx:get_raw_config([<<"prometheus">>], #{})}; prometheus(put, #{body := Body}) -> - case emqx_prometheus:update(Body) of + case emqx_prometheus_config:update(Body) of {ok, NewConfig} -> {200, NewConfig}; {error, Reason} -> @@ -120,7 +120,13 @@ prometheus_config_example() -> #{ enable => true, interval => "15s", - push_gateway_server => <<"http://127.0.0.1:9091">> + push_gateway_server => <<"http://127.0.0.1:9091">>, + vm_dist_collector => enabled, + mnesia_collector => enabled, + vm_statistics_collector => enabled, + vm_system_info_collector => enabled, + vm_memory_collector => enabled, + vm_msacc_collector => enabled }. prometheus_data_schema() -> diff --git a/apps/emqx_prometheus/src/emqx_prometheus_app.erl b/apps/emqx_prometheus/src/emqx_prometheus_app.erl index b9dd9c466..bdee12d0e 100644 --- a/apps/emqx_prometheus/src/emqx_prometheus_app.erl +++ b/apps/emqx_prometheus/src/emqx_prometheus_app.erl @@ -27,17 +27,10 @@ ]). start(_StartType, _StartArgs) -> - {ok, Sup} = emqx_prometheus_sup:start_link(), - maybe_enable_prometheus(), - {ok, Sup}. + Res = emqx_prometheus_sup:start_link(), + emqx_prometheus_config:add_handler(), + Res. stop(_State) -> + emqx_prometheus_config:remove_handler(), ok. - -maybe_enable_prometheus() -> - case emqx_conf:get([prometheus, enable], false) of - true -> - emqx_prometheus_sup:start_child(?APP, emqx_conf:get([prometheus], #{})); - false -> - ok - end. diff --git a/apps/emqx_prometheus/src/emqx_prometheus_config.erl b/apps/emqx_prometheus/src/emqx_prometheus_config.erl new file mode 100644 index 000000000..b4914f216 --- /dev/null +++ b/apps/emqx_prometheus/src/emqx_prometheus_config.erl @@ -0,0 +1,57 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2020-2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-module(emqx_prometheus_config). + +-behaviour(emqx_config_handler). + +-include("emqx_prometheus.hrl"). + +-export([add_handler/0, remove_handler/0]). +-export([post_config_update/5]). +-export([update/1]). + +update(Config) -> + case + emqx_conf:update( + [prometheus], + Config, + #{rawconf_with_defaults => true, override_to => cluster} + ) + of + {ok, #{raw_config := NewConfigRows}} -> + {ok, NewConfigRows}; + {error, Reason} -> + {error, Reason} + end. + +add_handler() -> + ok = emqx_config_handler:add_handler(?PROMETHEUS, ?MODULE), + ok. + +remove_handler() -> + ok = emqx_config_handler:remove_handler(?PROMETHEUS), + ok. + +post_config_update(?PROMETHEUS, _Req, New, _Old, AppEnvs) -> + application:set_env(AppEnvs), + update_prometheus(New); +post_config_update(_ConfPath, _Req, _NewConf, _OldConf, _AppEnvs) -> + ok. + +update_prometheus(#{enable := true}) -> + emqx_prometheus_sup:start_child(?APP); +update_prometheus(#{enable := false}) -> + emqx_prometheus_sup:stop_child(?APP). diff --git a/apps/emqx_prometheus/src/emqx_prometheus_schema.erl b/apps/emqx_prometheus/src/emqx_prometheus_schema.erl index efd453135..09908167c 100644 --- a/apps/emqx_prometheus/src/emqx_prometheus_schema.erl +++ b/apps/emqx_prometheus/src/emqx_prometheus_schema.erl @@ -24,12 +24,13 @@ namespace/0, roots/0, fields/1, - desc/1 + desc/1, + translation/1 ]). namespace() -> "prometheus". -roots() -> ["prometheus"]. +roots() -> [{"prometheus", ?HOCON(?R_REF("prometheus"), #{translate_to => ["prometheus"]})}]. fields("prometheus") -> [ @@ -59,8 +60,72 @@ fields("prometheus") -> required => true, desc => ?DESC(enable) } + )}, + {vm_dist_collector, + ?HOCON( + hoconsc:enum([enabled, disabled]), + #{ + default => enabled, + required => true, + hidden => true, + desc => ?DESC(vm_dist_collector) + } + )}, + {mnesia_collector, + ?HOCON( + hoconsc:enum([enabled, disabled]), + #{ + default => enabled, + required => true, + hidden => true, + desc => ?DESC(mnesia_collector) + } + )}, + {vm_statistics_collector, + ?HOCON( + hoconsc:enum([enabled, disabled]), + #{ + default => enabled, + required => true, + hidden => true, + desc => ?DESC(vm_statistics_collector) + } + )}, + {vm_system_info_collector, + ?HOCON( + hoconsc:enum([enabled, disabled]), + #{ + default => enabled, + required => true, + hidden => true, + desc => ?DESC(vm_system_info_collector) + } + )}, + {vm_memory_collector, + ?HOCON( + hoconsc:enum([enabled, disabled]), + #{ + default => enabled, + required => true, + hidden => true, + desc => ?DESC(vm_memory_collector) + } + )}, + {vm_msacc_collector, + ?HOCON( + hoconsc:enum([enabled, disabled]), + #{ + default => enabled, + required => true, + hidden => true, + desc => ?DESC(vm_msacc_collector) + } )} ]. desc("prometheus") -> ?DESC(prometheus); desc(_) -> undefined. + +%% for CI test, CI don't load the whole emqx_conf_schema. +translation(Name) -> + emqx_conf_schema:translation(Name). diff --git a/apps/emqx_prometheus/src/emqx_prometheus_sup.erl b/apps/emqx_prometheus/src/emqx_prometheus_sup.erl index 65023da14..a70fda322 100644 --- a/apps/emqx_prometheus/src/emqx_prometheus_sup.erl +++ b/apps/emqx_prometheus/src/emqx_prometheus_sup.erl @@ -21,7 +21,6 @@ -export([ start_link/0, start_child/1, - start_child/2, stop_child/1 ]). @@ -40,23 +39,27 @@ start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). --spec start_child(supervisor:child_spec()) -> ok. +-spec start_child(supervisor:child_spec() | atom()) -> ok. start_child(ChildSpec) when is_map(ChildSpec) -> - assert_started(supervisor:start_child(?MODULE, ChildSpec)). - --spec start_child(atom(), map()) -> ok. -start_child(Mod, Opts) when is_atom(Mod) andalso is_map(Opts) -> - assert_started(supervisor:start_child(?MODULE, ?CHILD(Mod, Opts))). + assert_started(supervisor:start_child(?MODULE, ChildSpec)); +start_child(Mod) when is_atom(Mod) -> + assert_started(supervisor:start_child(?MODULE, ?CHILD(Mod, []))). -spec stop_child(any()) -> ok | {error, term()}. stop_child(ChildId) -> case supervisor:terminate_child(?MODULE, ChildId) of ok -> supervisor:delete_child(?MODULE, ChildId); + {error, not_found} -> ok; Error -> Error end. init([]) -> - {ok, {{one_for_one, 10, 3600}, []}}. + Children = + case emqx_conf:get([prometheus, enable], false) of + false -> []; + true -> [?CHILD(emqx_prometheus, [])] + end, + {ok, {{one_for_one, 10, 3600}, Children}}. %%-------------------------------------------------------------------- %% Internal functions @@ -64,5 +67,5 @@ init([]) -> assert_started({ok, _Pid}) -> ok; assert_started({ok, _Pid, _Info}) -> ok; -assert_started({error, {already_tarted, _Pid}}) -> ok; -assert_started({error, Reason}) -> erlang:error(Reason). +assert_started({error, {already_started, _Pid}}) -> ok; +assert_started({error, Reason}) -> {error, Reason}. diff --git a/apps/emqx_prometheus/src/proto/emqx_prometheus_proto_v1.erl b/apps/emqx_prometheus/src/proto/emqx_prometheus_proto_v1.erl index c0529cabd..e11f8e3ad 100644 --- a/apps/emqx_prometheus/src/proto/emqx_prometheus_proto_v1.erl +++ b/apps/emqx_prometheus/src/proto/emqx_prometheus_proto_v1.erl @@ -20,13 +20,15 @@ -export([ introduced_in/0, - + deprecated_since/0, start/1, stop/1 ]). -include_lib("emqx/include/bpapi.hrl"). +deprecated_since() -> "5.0.10". + introduced_in() -> "5.0.0". diff --git a/apps/emqx_prometheus/test/emqx_prometheus_SUITE.erl b/apps/emqx_prometheus/test/emqx_prometheus_SUITE.erl index 16590b114..e26bcfeb4 100644 --- a/apps/emqx_prometheus/test/emqx_prometheus_SUITE.erl +++ b/apps/emqx_prometheus/test/emqx_prometheus_SUITE.erl @@ -28,6 +28,12 @@ " push_gateway_server = \"http://127.0.0.1:9091\"\n" " interval = \"1s\"\n" " enable = true\n" + " vm_dist_collector = enabled\n" + " mnesia_collector = enabled\n" + " vm_statistics_collector = disabled\n" + " vm_system_info_collector = disabled\n" + " vm_memory_collector = enabled\n" + " vm_msacc_collector = enabled\n" "}\n" >>). @@ -65,13 +71,18 @@ load_config() -> %%-------------------------------------------------------------------- t_start_stop(_) -> - ?assertMatch(ok, emqx_prometheus:start()), - ?assertMatch(ok, emqx_prometheus:stop()), - ?assertMatch(ok, emqx_prometheus:restart()), - %% wait the interval timer tigger + App = emqx_prometheus, + ?assertMatch(ok, emqx_prometheus_sup:start_child(App)), + %% start twice return ok. + ?assertMatch(ok, emqx_prometheus_sup:start_child(App)), + ?assertMatch(ok, emqx_prometheus_sup:stop_child(App)), + %% stop twice return ok. + ?assertMatch(ok, emqx_prometheus_sup:stop_child(App)), + %% wait the interval timer trigger timer:sleep(2000). -t_test(_) -> +t_collector_no_crash_test(_) -> + prometheus_text_format:format(), ok. t_only_for_coverage(_) -> diff --git a/apps/emqx_prometheus/test/emqx_prometheus_api_SUITE.erl b/apps/emqx_prometheus/test/emqx_prometheus_api_SUITE.erl index e72d7865a..59b3b9a17 100644 --- a/apps/emqx_prometheus/test/emqx_prometheus_api_SUITE.erl +++ b/apps/emqx_prometheus/test/emqx_prometheus_api_SUITE.erl @@ -71,16 +71,27 @@ t_prometheus_api(_) -> #{ <<"push_gateway_server">> := _, <<"interval">> := _, - <<"enable">> := _ + <<"enable">> := _, + <<"vm_statistics_collector">> := _, + <<"vm_system_info_collector">> := _, + <<"vm_memory_collector">> := _, + <<"vm_msacc_collector">> := _ }, Conf ), - - NewConf = Conf#{<<"interval">> := <<"2s">>}, + #{<<"enable">> := Enable} = Conf, + ?assertEqual(Enable, undefined =/= erlang:whereis(emqx_prometheus)), + NewConf = Conf#{<<"interval">> => <<"2s">>, <<"vm_statistics_collector">> => <<"disabled">>}, {ok, Response2} = emqx_mgmt_api_test_util:request_api(put, Path, "", Auth, NewConf), Conf2 = emqx_json:decode(Response2, [return_maps]), ?assertMatch(NewConf, Conf2), + ?assertEqual({ok, []}, application:get_env(prometheus, vm_statistics_collector_metrics)), + ?assertEqual({ok, all}, application:get_env(prometheus, vm_memory_collector_metrics)), + + NewConf1 = Conf#{<<"enable">> => (not Enable)}, + {ok, _Response3} = emqx_mgmt_api_test_util:request_api(put, Path, "", Auth, NewConf1), + ?assertEqual((not Enable), undefined =/= erlang:whereis(emqx_prometheus)), ok. t_stats_api(_) -> diff --git a/apps/emqx_psk/i18n/emqx_psk_i18n.conf b/apps/emqx_psk/i18n/emqx_psk_i18n.conf index 3a4e27ca3..6bba9c6d5 100644 --- a/apps/emqx_psk/i18n/emqx_psk_i18n.conf +++ b/apps/emqx_psk/i18n/emqx_psk_i18n.conf @@ -16,7 +16,7 @@ The IDs and secrets can be provided from a file which is configurable by the retainer_indices(desc) -> "Retainer index specifications: list of arrays of positive ascending integers. " "Each array specifies an index. Numbers in an index specification are 1-based " - "word positions in topics. Words from specified positions will be used for indexing.
" + "word positions in topics. Words from specified positions will be used for indexing.
" "For example, it is good to have [2, 4] index to optimize " "+/X/+/Y/... topic wildcard subscriptions."; retainer_indices(example) -> diff --git a/apps/emqx_rule_engine/src/emqx_rule_engine.app.src b/apps/emqx_rule_engine/src/emqx_rule_engine.app.src index 28f90fdb9..6bb9ad010 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_engine.app.src +++ b/apps/emqx_rule_engine/src/emqx_rule_engine.app.src @@ -2,7 +2,7 @@ {application, emqx_rule_engine, [ {description, "EMQX Rule Engine"}, % strict semver, bump manually! - {vsn, "5.0.2"}, + {vsn, "5.0.3"}, {modules, []}, {registered, [emqx_rule_engine_sup, emqx_rule_engine]}, {applications, [kernel, stdlib, rulesql, getopt]}, diff --git a/apps/emqx_rule_engine/src/emqx_rule_events.erl b/apps/emqx_rule_engine/src/emqx_rule_events.erl index 0ffa2e684..2935aeeb9 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_events.erl +++ b/apps/emqx_rule_engine/src/emqx_rule_events.erl @@ -671,8 +671,8 @@ event_info_client_connack() -> event_info_client_check_authz_complete() -> event_info_common( 'client.check_authz_complete', - {<<"client check authz complete">>, <<"鉴权结果"/utf8>>}, - {<<"client check authz complete">>, <<"鉴权结果"/utf8>>}, + {<<"client check authz complete">>, <<"授权结果"/utf8>>}, + {<<"client check authz complete">>, <<"授权结果"/utf8>>}, <<"SELECT * FROM \"$events/client_check_authz_complete\"">> ). event_info_session_subscribed() -> @@ -1054,8 +1054,11 @@ printable_maps(Headers) -> || {Key, Value} <- V0 ] }; - (K, V0, AccIn) -> - AccIn#{K => V0} + (_K, V, AccIn) when is_tuple(V) -> + %% internal headers + AccIn; + (K, V, AccIn) -> + AccIn#{K => V} end, #{}, Headers diff --git a/apps/emqx_rule_engine/test/emqx_rule_engine_SUITE.erl b/apps/emqx_rule_engine/test/emqx_rule_engine_SUITE.erl index ba3a2cc30..e773b3b4e 100644 --- a/apps/emqx_rule_engine/test/emqx_rule_engine_SUITE.erl +++ b/apps/emqx_rule_engine/test/emqx_rule_engine_SUITE.erl @@ -93,7 +93,13 @@ groups() -> t_sqlparse_new_map, t_sqlparse_invalid_json ]}, - {events, [], [t_events]}, + {events, [], [ + t_events, + t_event_client_disconnected_normal, + t_event_client_disconnected_kicked, + t_event_client_disconnected_discarded, + t_event_client_disconnected_takenover + ]}, {telemetry, [], [ t_get_basic_usage_info_0, t_get_basic_usage_info_1 @@ -575,6 +581,165 @@ t_events(_Config) -> client_connack_failed(), ok. +t_event_client_disconnected_normal(_Config) -> + SQL = + "select * " + "from \"$events/client_disconnected\" ", + RepubT = <<"repub/to/disconnected/normal">>, + + {ok, TopicRule} = emqx_rule_engine:create_rule( + #{ + sql => SQL, + id => ?TMP_RULEID, + actions => [republish_action(RepubT, <<>>)] + } + ), + + {ok, Client} = emqtt:start_link([{clientid, <<"get_repub_client">>}, {username, <<"emqx0">>}]), + {ok, _} = emqtt:connect(Client), + {ok, _, _} = emqtt:subscribe(Client, RepubT, 0), + ct:sleep(200), + {ok, Client1} = emqtt:start_link([{clientid, <<"emqx">>}, {username, <<"emqx">>}]), + {ok, _} = emqtt:connect(Client1), + emqtt:disconnect(Client1), + + receive + {publish, #{topic := T, payload := Payload}} -> + ?assertEqual(RepubT, T), + ?assertMatch(#{<<"reason">> := <<"normal">>}, emqx_json:decode(Payload, [return_maps])) + after 1000 -> + ct:fail(wait_for_repub_disconnected_normal) + end, + emqtt:stop(Client), + + delete_rule(TopicRule). + +t_event_client_disconnected_kicked(_Config) -> + SQL = + "select * " + "from \"$events/client_disconnected\" ", + RepubT = <<"repub/to/disconnected/kicked">>, + + {ok, TopicRule} = emqx_rule_engine:create_rule( + #{ + sql => SQL, + id => ?TMP_RULEID, + actions => [republish_action(RepubT, <<>>)] + } + ), + + {ok, Client} = emqtt:start_link([{clientid, <<"get_repub_client">>}, {username, <<"emqx0">>}]), + {ok, _} = emqtt:connect(Client), + {ok, _, _} = emqtt:subscribe(Client, RepubT, 0), + ct:sleep(200), + + {ok, Client1} = emqtt:start_link([{clientid, <<"emqx">>}, {username, <<"emqx">>}]), + {ok, _} = emqtt:connect(Client1), + %% the process will receive {'EXIT',{shutdown,tcp_closed}} + unlink(Client1), + + emqx_cm:kick_session(<<"emqx">>), + + receive + {publish, #{topic := T, payload := Payload}} -> + ?assertEqual(RepubT, T), + ?assertMatch(#{<<"reason">> := <<"kicked">>}, emqx_json:decode(Payload, [return_maps])) + after 1000 -> + ct:fail(wait_for_repub_disconnected_kicked) + end, + + emqtt:stop(Client), + delete_rule(TopicRule). + +t_event_client_disconnected_discarded(_Config) -> + SQL = + "select * " + "from \"$events/client_disconnected\" ", + RepubT = <<"repub/to/disconnected/discarded">>, + + {ok, TopicRule} = emqx_rule_engine:create_rule( + #{ + sql => SQL, + id => ?TMP_RULEID, + actions => [republish_action(RepubT, <<>>)] + } + ), + + {ok, Client} = emqtt:start_link([{clientid, <<"get_repub_client">>}, {username, <<"emqx0">>}]), + {ok, _} = emqtt:connect(Client), + {ok, _, _} = emqtt:subscribe(Client, RepubT, 0), + ct:sleep(200), + + {ok, Client1} = emqtt:start_link([{clientid, <<"emqx">>}, {username, <<"emqx">>}]), + {ok, _} = emqtt:connect(Client1), + %% the process will receive {'EXIT',{shutdown,tcp_closed}} + unlink(Client1), + + {ok, Client2} = emqtt:start_link([ + {clientid, <<"emqx">>}, {username, <<"emqx">>}, {clean_start, true} + ]), + {ok, _} = emqtt:connect(Client2), + + receive + {publish, #{topic := T, payload := Payload}} -> + ?assertEqual(RepubT, T), + ?assertMatch( + #{<<"reason">> := <<"discarded">>}, emqx_json:decode(Payload, [return_maps]) + ) + after 1000 -> + ct:fail(wait_for_repub_disconnected_discarded) + end, + emqtt:stop(Client), + emqtt:stop(Client2), + + delete_rule(TopicRule). + +t_event_client_disconnected_takenover(_Config) -> + SQL = + "select * " + "from \"$events/client_disconnected\" ", + RepubT = <<"repub/to/disconnected/takenover">>, + + {ok, TopicRule} = emqx_rule_engine:create_rule( + #{ + sql => SQL, + id => ?TMP_RULEID, + actions => [republish_action(RepubT, <<>>)] + } + ), + + {ok, ClientRecv} = emqtt:start_link([ + {clientid, <<"get_repub_client">>}, {username, <<"emqx0">>} + ]), + {ok, _} = emqtt:connect(ClientRecv), + {ok, _, _} = emqtt:subscribe(ClientRecv, RepubT, 0), + ct:sleep(200), + + {ok, Client1} = emqtt:start_link([{clientid, <<"emqx">>}, {username, <<"emqx">>}]), + {ok, _} = emqtt:connect(Client1), + %% the process will receive {'EXIT',{shutdown,tcp_closed}} + unlink(Client1), + + {ok, Client2} = emqtt:start_link([ + {clientid, <<"emqx">>}, {username, <<"emqx">>}, {clean_start, false} + ]), + {ok, _} = emqtt:connect(Client2), + + receive + {publish, #{topic := T, payload := Payload}} -> + ?assertEqual(RepubT, T), + ?assertMatch( + #{<<"reason">> := <<"takenover">>}, emqx_json:decode(Payload, [return_maps]) + ) + after 1000 -> + ct:fail(wait_for_repub_disconnected_discarded) + end, + + emqtt:stop(ClientRecv), + emqtt:stop(Client2), + + delete_rule(TopicRule). + client_connack_failed() -> {ok, Client} = emqtt:start_link( [ diff --git a/apps/emqx_rule_engine/test/emqx_rule_events_SUITE.erl b/apps/emqx_rule_engine/test/emqx_rule_events_SUITE.erl index ad8d28159..c9774b93d 100644 --- a/apps/emqx_rule_engine/test/emqx_rule_events_SUITE.erl +++ b/apps/emqx_rule_engine/test/emqx_rule_events_SUITE.erl @@ -26,13 +26,19 @@ t_printable_maps(_) -> Headers = #{ peerhost => {127, 0, 0, 1}, peername => {{127, 0, 0, 1}, 9980}, - sockname => {{127, 0, 0, 1}, 1883} + sockname => {{127, 0, 0, 1}, 1883}, + redispatch_to => {<<"group">>, <<"sub/topic/+">>}, + shared_dispatch_ack => {self(), ref} }, + Converted = emqx_rule_events:printable_maps(Headers), ?assertMatch( #{ peerhost := <<"127.0.0.1">>, peername := <<"127.0.0.1:9980">>, sockname := <<"127.0.0.1:1883">> }, - emqx_rule_events:printable_maps(Headers) - ). + Converted + ), + ?assertNot(maps:is_key(redispatch_to, Converted)), + ?assertNot(maps:is_key(shared_dispatch_ack, Converted)), + ok. diff --git a/apps/emqx_slow_subs/src/emqx_slow_subs_api.erl b/apps/emqx_slow_subs/src/emqx_slow_subs_api.erl index cd2f76f11..c24d043e6 100644 --- a/apps/emqx_slow_subs/src/emqx_slow_subs_api.erl +++ b/apps/emqx_slow_subs/src/emqx_slow_subs_api.erl @@ -25,7 +25,7 @@ -export([api_spec/0, paths/0, schema/1, fields/1, namespace/0]). -export([slow_subs/2, get_history/0, settings/2]). --define(TAGS, [<<"Slow subscriptions">>]). +-define(TAGS, [<<"Slow Subscriptions">>]). -import(hoconsc, [mk/2, ref/1, ref/2]). -import(emqx_mgmt_util, [bad_request/0]). diff --git a/apps/emqx_statsd/i18n/emqx_statsd_api_i18n.conf b/apps/emqx_statsd/i18n/emqx_statsd_api_i18n.conf new file mode 100644 index 000000000..2721188bd --- /dev/null +++ b/apps/emqx_statsd/i18n/emqx_statsd_api_i18n.conf @@ -0,0 +1,16 @@ +emqx_statsd_api { + + get_statsd_config_api { + desc { + en: """List the configuration of StatsD metrics collection and push service.""" + zh: """列出 StatsD 指标采集和推送服务的的配置。""" + } + } + + update_statsd_config_api { + desc { + en: """Update the configuration of StatsD metrics collection and push service.""" + zh: """更新 StatsD 指标采集和推送服务的配置。""" + } + } +} diff --git a/apps/emqx_statsd/i18n/emqx_statsd_schema_i18n.conf b/apps/emqx_statsd/i18n/emqx_statsd_schema_i18n.conf index 4ccad1682..9c6eb5afb 100644 --- a/apps/emqx_statsd/i18n/emqx_statsd_schema_i18n.conf +++ b/apps/emqx_statsd/i18n/emqx_statsd_schema_i18n.conf @@ -1,9 +1,23 @@ emqx_statsd_schema { + get_statsd_config_api { + desc { + en: """List the configuration of StatsD metrics collection and push service.""" + zh: """列出 StatsD 指标采集和推送服务的的配置。""" + } + } + + update_statsd_config_api { + desc { + en: """Update the configuration of StatsD metrics collection and push service.""" + zh: """更新 StatsD 指标采集和推送服务的配置。""" + } + } + statsd { desc { - en: """Settings for reporting metrics to StatsD""" - zh: """StatsD 监控数据推送""" + en: """StatsD metrics collection and push configuration.""" + zh: """StatsD 指标采集与推送配置。""" } label { en: """StatsD""" @@ -13,29 +27,29 @@ emqx_statsd_schema { server { desc { - en: """URL of StatsD server""" - zh: """StatsD 服务器地址""" + en: """StatsD server address.""" + zh: """StatsD 服务器地址。""" } } sample_interval { desc { - en: """Data collection interval.""" - zh: """数据收集间隔""" + en: """The sampling interval for metrics.""" + zh: """指标的采样间隔。""" } } flush_interval { desc { - en: """Data reporting interval.""" - zh: """数据推送间隔""" + en: """The push interval for metrics.""" + zh: """指标的推送间隔。""" } } enable { desc { - en: """Turn StatsD data pushing on or off""" - zh: """开启或关闭 StatsD 数据推送""" + en: """Enable or disable StatsD metrics collection and push service.""" + zh: """启用或禁用 StatsD 指标采集和推送服务。""" } } } diff --git a/apps/emqx_statsd/src/emqx_statsd.app.src b/apps/emqx_statsd/src/emqx_statsd.app.src index 21a972266..76b04204b 100644 --- a/apps/emqx_statsd/src/emqx_statsd.app.src +++ b/apps/emqx_statsd/src/emqx_statsd.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_statsd, [ {description, "An OTP application"}, - {vsn, "5.0.1"}, + {vsn, "5.0.2"}, {registered, []}, {mod, {emqx_statsd_app, []}}, {applications, [ diff --git a/apps/emqx_statsd/src/emqx_statsd_api.erl b/apps/emqx_statsd/src/emqx_statsd_api.erl index ee6007d7d..2f2e42303 100644 --- a/apps/emqx_statsd/src/emqx_statsd_api.erl +++ b/apps/emqx_statsd/src/emqx_statsd_api.erl @@ -20,6 +20,7 @@ -include("emqx_statsd.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). -include_lib("typerefl/include/types.hrl"). -import(hoconsc, [mk/2, ref/2]). @@ -48,14 +49,14 @@ schema("/statsd") -> 'operationId' => statsd, get => #{ - description => <<"Get statsd config">>, + description => ?DESC(get_statsd_config_api), tags => ?API_TAG_STATSD, responses => #{200 => statsd_config_schema()} }, put => #{ - description => <<"Set statsd config">>, + description => ?DESC(update_statsd_config_api), tags => ?API_TAG_STATSD, 'requestBody' => statsd_config_schema(), responses => diff --git a/apps/emqx_statsd/src/emqx_statsd_schema.erl b/apps/emqx_statsd/src/emqx_statsd_schema.erl index e8cc32f99..9efde5afc 100644 --- a/apps/emqx_statsd/src/emqx_statsd_schema.erl +++ b/apps/emqx_statsd/src/emqx_statsd_schema.erl @@ -21,8 +21,6 @@ -behaviour(hocon_schema). --export([to_ip_port/1]). - -export([ namespace/0, roots/0, @@ -30,8 +28,6 @@ desc/1 ]). --typerefl_from_string({ip_port/0, emqx_statsd_schema, to_ip_port}). - namespace() -> "statsd". roots() -> ["statsd"]. @@ -55,7 +51,7 @@ fields("statsd") -> desc("statsd") -> ?DESC(statsd); desc(_) -> undefined. -server(type) -> emqx_schema:ip_port(); +server(type) -> emqx_schema:host_port(); server(required) -> true; server(default) -> "127.0.0.1:8125"; server(desc) -> ?DESC(?FUNCTION_NAME); @@ -72,14 +68,3 @@ flush_interval(required) -> true; flush_interval(default) -> "10s"; flush_interval(desc) -> ?DESC(?FUNCTION_NAME); flush_interval(_) -> undefined. - -to_ip_port(Str) -> - case string:tokens(Str, ":") of - [Ip, Port] -> - case inet:parse_address(Ip) of - {ok, R} -> {ok, {R, list_to_integer(Port)}}; - _ -> {error, Str} - end; - _ -> - {error, Str} - end. diff --git a/bin/emqx b/bin/emqx index 36762f1b1..811bb2981 100755 --- a/bin/emqx +++ b/bin/emqx @@ -7,7 +7,11 @@ set -euo pipefail DEBUG="${DEBUG:-0}" [ "$DEBUG" -eq 1 ] && set -x -RUNNER_ROOT_DIR="$(cd "$(dirname "$(readlink "$0" || echo "$0")")"/..; pwd -P)" +if [ "$(uname -s)" == 'Darwin' ]; then + RUNNER_ROOT_DIR="$(cd "$(dirname "$(stat -f%R "$0" || echo "$0")")"/..; pwd -P)" +else + RUNNER_ROOT_DIR="$(cd "$(dirname "$(realpath "$0" || echo "$0")")"/..; pwd -P)" +fi # shellcheck disable=SC1090,SC1091 . "$RUNNER_ROOT_DIR"/releases/emqx_vars diff --git a/build b/build index f0356d228..7e5f65b59 100755 --- a/build +++ b/build @@ -370,6 +370,7 @@ export_release_vars() { # compiling the project, so that `emqx_release.erl' picks up # `emqx_vsn' as if it was compiled by rebar3. erl_opts+=( "{compile_info,[{emqx_vsn,\"${PKG_VSN}\"}]}" ) + erl_opts+=( "{d,snk_kind,msg}" ) ERL_COMPILER_OPTIONS="[$(join , "${erl_opts[@]}")]" export ERL_COMPILER_OPTIONS diff --git a/changes/v5.0.1-en.md b/changes/v5.0.1-en.md new file mode 100644 index 000000000..f3c301637 --- /dev/null +++ b/changes/v5.0.1-en.md @@ -0,0 +1,53 @@ +# v5.0.1 + +5.0.1 is built on [Erlang/OTP 24.2.1-1](https://github.com/emqx/otp/tree/OTP-24.2.1-1). Same as 5.0.0. + +5.0.0 (like 4.4.x) had Erlang/OTP version number in the package name. +This is because we wanted to release different flavor packages (on different Elixir/Erlang/OTP platforms). + +However the long package names also causes confusion, as users may not know which to choose if there were more than +one presented at the same time. + +Going forward, (starting from 5.0.1), packages will be released in both default (short) and flavored (long) package names. + +For example: `emqx-5.0.1-otp24.2.1-1-ubuntu20.04-amd64.tar.gz`, +but only the default one is presented to the users: `emqx-5.0.1-ubuntu20.04-amd64.tar.gz`. + +In case anyone wants to try a different flavor package, it can be downlowded from the public s3 bucket, +for example: +https://s3.us-west-2.amazonaws.com/packages.emqx/emqx-ce/v5.0.1/emqx-5.0.1-otp24.2.1-1-ubuntu20.04-arm64.tar.gz + +Exceptions: + +* Windows package is always presented with short name (currently on Erlang/OTP 24.2.1). +* Elixir package name is flavored with both Elixir and Erlang/OTP version numbers, + for example: `emqx-5.0.1-elixir1.13.4-otp24.2.1-1-ubuntu20.04-amd64.tar.gz` + +## Enhancements + +* Removed management API auth for prometheus scraping endpoint /api/v5/prometheus/stats [#8299](https://github.com/emqx/emqx/pull/8299) +* Added more TCP options for exhook (gRPC) connections. [#8317](https://github.com/emqx/emqx/pull/8317) +* HTTP Servers used for authentication and authorization will now indicate the result via the response body. [#8374](https://github.com/emqx/emqx/pull/8374) [#8377](https://github.com/emqx/emqx/pull/8377) +* Bulk subscribe/unsubscribe APIs [#8356](https://github.com/emqx/emqx/pull/8356) +* Added exclusive subscription [#8315](https://github.com/emqx/emqx/pull/8315) +* Provide authentication counter metrics [#8352](https://github.com/emqx/emqx/pull/8352) [#8375](https://github.com/emqx/emqx/pull/8375) +* Do not allow admin user self-deletion [#8286](https://github.com/emqx/emqx/pull/8286) +* After restart, ensure to copy `cluster-override.conf` from the clustered node which has the greatest `tnxid`. [#8333](https://github.com/emqx/emqx/pull/8333) + +## Bug fixes + +* A bug fix ported from 4.x: allow deleting subscriptions from `client.subscribe` hookpoint callback result. [#8304](https://github.com/emqx/emqx/pull/8304) [#8347](https://github.com/emqx/emqx/pull/8377) +* Fixed Erlang distribution over TLS [#8309](https://github.com/emqx/emqx/pull/8309) +* Made possible to override authentication configs from environment variables [#8323](https://github.com/emqx/emqx/pull/8309) +* Made authentication passwords in Mnesia database backward compatible to 4.x, so we can support data migration better. [#8351](https://github.com/emqx/emqx/pull/8351) +* Fix plugins upload for rpm/deb installations [#8379](https://github.com/emqx/emqx/pull/8379) +* Sync data/authz/acl.conf and data/certs from clustered nodes after a new node joins the cluster [#8369](https://github.com/emqx/emqx/pull/8369) +* Ensure auto-retry of failed resources [#8371](https://github.com/emqx/emqx/pull/8371) +* Fix the issue that the count of `packets.connack.auth_error` is inaccurate when the client uses a protocol version below MQTT v5.0 to access [#8178](https://github.com/emqx/emqx/pull/8178) + +## Others + +* Rate limiter interface is hidden so far, it's subject to a UX redesign. +* QUIC library upgraded to 0.0.14. +* Now the default packages will be released withot otp version number in the package name. +* Renamed config exmpale file name in `etc` dir. diff --git a/changes/v5.0.10-en.md b/changes/v5.0.10-en.md new file mode 100644 index 000000000..74037c407 --- /dev/null +++ b/changes/v5.0.10-en.md @@ -0,0 +1,53 @@ +# v5.0.10 + +## Enhancements + +- Improve `/nodes` API responsiveness [#9221](https://github.com/emqx/emqx/pull/9221). + +- Improve the integration of the `banned` and the `delayed` feature [#9326](https://github.com/emqx/emqx/pull/9326). + Now when publishing a delayed message will check first if its source client is banned, if true, this publish will be ignored. + +- Update `gen_rpc` library to version 3.0 [#9187](https://github.com/emqx/emqx/pull/9187). + +- Improve memory usage on core nodes when bootstrapping a replicant [#9236](https://github.com/emqx/emqx/pull/9236). + +- Improve stability of Prometheus Push Gateway and log errors when POST fails [#9235](http://github.com/emqx/emqx/pull/9235). + +- Now it is possible to opt out VM internal metrics in prometheus stats [#9222](https://github.com/emqx/emqx/pull/9222). + When system load is high, reporting too much metrics data may cause the prometheus stats API timeout. + +- Improve security when converting types such as `binary` `lists` to `atom` types [#9279](https://github.com/emqx/emqx/pull/9279), [#9286](https://github.com/emqx/emqx/pull/9286). + +- Add `/trace/:name/log_detail` HTTP API to return trace file's size and mtime [#9152](https://github.com/emqx/emqx/pull/9152). + +- Add `/status` HTTP API endpoint to api documentation [#9230](https://github.com/emqx/emqx/pull/9230). + +- Binary packages for all platforms are now built on Erlang/OTP version 24.3.4.2 [#9293](https://github.com/emqx/emqx/pull/9293). + +## Bug fixes + +- Fix error log message when `mechanism` is missing in authentication config [#8924](https://github.com/emqx/emqx/pull/8924). + +- Fix HTTP 500 issue when unknown `status` parameter is used in `/gateway` API call [#9225](https://github.com/emqx/emqx/pull/9225). + +- Fixed the HTTP response status code for the `/status` endpoint [#9211](https://github.com/emqx/emqx/pull/9211). + Before the fix, it always returned `200` even if the EMQX application was not running. Now it returns `503` in that case. + +- Fix message delivery related event encoding [#9228](https://github.com/emqx/emqx/pull/9228). + This bug was introduced in v5.0.9. For Rule-Engine's input events like `$events/message_delivered` + and `$events/message_dropped`, if the message was delivered to a shared-subscription, + the encoding (to JSON) of the event will fail. + +- Fix bad HTTP response status code for `/gateways` API, when Gateway name is unknown, it should return `404` instead of `400` [#9268](https://github.com/emqx/emqx/pull/9268). + +- Fix incorrect topic authorize checking of delayed messages [#9290](https://github.com/emqx/emqx/pull/9290). + Now will determine the actual topic of the delayed messages, e.g. `$delayed/1/t/foo` will be treated as `t/foo` in authorize checks. + +- Add property `code` to error response for `/authentication/sources/:type` [9299](https://github.com/emqx/emqx/pull/9299). + +- Align documentation for `/authentication/sources` with what we actually send [9299](https://github.com/emqx/emqx/pull/9299). + +- Fix query string parameter 'node' to `/configs` resource being ignored, return 404 if node does not exist [#9310](https://github.com/emqx/emqx/pull/9310/). + +- Avoid re-dispatching shared-subscription session messages when a session is kicked or taken-over (to a new session) [#9123](https://github.com/emqx/emqx/pull/9123). + diff --git a/changes/v5.0.10-zh.md b/changes/v5.0.10-zh.md new file mode 100644 index 000000000..bbaa758b3 --- /dev/null +++ b/changes/v5.0.10-zh.md @@ -0,0 +1,51 @@ +# v5.0.10 + +## 增强 + +- 提升 `/nodes` API 响应速度 [#9221](https://github.com/emqx/emqx/pull/9221)。 + +- 增强 `封禁` 和 `延迟消息` 这两个功能的集成性 [#9326](https://github.com/emqx/emqx/pull/9326)。 + 现在发送延迟消息前,会先检查消息的来源客户端是否被封禁了,如果是,这条延迟消息将会被忽略。 + +- 升级 `gen_rpc` 库到 3.0 [#9187](https://github.com/emqx/emqx/pull/9187)。 + +- 在引导 `replicant` 节点时,改善 `core` 节点的内存使用量 [#9236](https://github.com/emqx/emqx/pull/9236)。 + +- 增加 Prometheus Push Gateway 的稳定性, 并在 POST 失败时打印错误日志 [#9235](http://github.com/emqx/emqx/pull/9235)。 + +- 可通过配置关闭 prometheus 中的部分内部指标,如果遇到机器负载过高 prometheus 接口返回超时可考虑关闭部分不关心指标,以提高响应速度 [#9222](https://github.com/emqx/emqx/pull/9222)。 + +- 提升 `binary` 、`list` 等类型转换为 `atom` 类型时的安全性 [#9279](https://github.com/emqx/emqx/pull/9279),[#9286](https://github.com/emqx/emqx/pull/9286)。 + +- 增加了 `/trace/:name/log_detail` HTTP API 用于返回 trace 文件的大小和修改日期等信息 [#9152](https://github.com/emqx/emqx/pull/9152)。 + +- HTTP API 文档中增加 `/status` 端点的描述 [#9230](https://github.com/emqx/emqx/pull/9230)。 + +- 为所有平台的二进制包升级了 Erlang/OTP 到 24.3.4.2 [#9293](https://github.com/emqx/emqx/pull/9293)。 + +## Bug fixes + +- 优化认认证配置中 `mechanism` 字段缺失情况下的错误日志 [#8924](https://github.com/emqx/emqx/pull/8924)。 + +- 修复未知 `status` 参数导致 `/gateway` API 发生 HTTP 500 错误的问题 [#9225](https://github.com/emqx/emqx/pull/9225)。 + +- 修正了 `/status` 端点的响应状态代码 [#9211](https://github.com/emqx/emqx/pull/9211)。 + 在此修复前,它总是返回 HTTP 状态码 `200`,即使 EMQX 没有完成启动或正在重启。 现在它在这些情况下会返回状态码 `503`。 + +- 修复规则引擎的消息事件编码失败 [#9228](https://github.com/emqx/emqx/pull/9228)。 + 该问题在 v5.0.9 中引入:带消息的规则引擎事件,例如 `$events/message_delivered` 和 + `$events/message_dropped`, 如果消息事件是共享订阅产生的,在编码(到 JSON 格式)过程中会失败。 + +- 修复 HTTP API `/gateways` 的返回状态码,未知 Gateway 名字应返回 `404` 而不是 `400` [#9268](https://github.com/emqx/emqx/pull/9268)。 + +- 修复延迟消息的主题授权判断不正确的问题 [#9290](https://github.com/emqx/emqx/pull/9290)。 + 现在将会对延迟消息中的真实主题进行授权判断,比如,`$delayed/1/t/foo` 会被当作 `t/foo` 进行判断。 + + +- 为 API `/authentication/sources/:type` 的返回值增加 `code` 字段 [9299](https://github.com/emqx/emqx/pull/9299)。 + +- 对齐文档,`/authentication/sources` 接口的文档仅列出已经支持的资源 [9299](https://github.com/emqx/emqx/pull/9299)。 + +- 修复 `/configs` API 的 'node' 参数的问题,如果节点不存在,则返回 HTTP 状态码 404 [#9310](https://github.com/emqx/emqx/pull/9310/)。 + +- 共享订阅的消息在会话被踢出或者迁移时,不向其他订阅组成员进行转发 [#9123](https://github.com/emqx/emqx/pull/9123)。 diff --git a/changes/v5.0.11-en.md b/changes/v5.0.11-en.md new file mode 100644 index 000000000..9fbc2225f --- /dev/null +++ b/changes/v5.0.11-en.md @@ -0,0 +1,6 @@ +# v5.0.11 + +## Enhancements + +## Bug fixes + diff --git a/changes/v5.0.11-zh.md b/changes/v5.0.11-zh.md new file mode 100644 index 000000000..cea0f10fb --- /dev/null +++ b/changes/v5.0.11-zh.md @@ -0,0 +1,5 @@ +# v5.0.11 + +## 增强 + +## 修复 diff --git a/changes/v5.0.2-en.md b/changes/v5.0.2-en.md new file mode 100644 index 000000000..92e15fe4f --- /dev/null +++ b/changes/v5.0.2-en.md @@ -0,0 +1,18 @@ +# v5.0.2 + +Announcement: EMQX team has decided to stop supporting relup for opensource edition. +Going forward, it will be an enterprise-only feature. + +Main reason: relup requires carefully crafted upgrade instructions from ALL previous versions. + +For example, 4.3 is now at 4.3.16, we have `4.3.0->4.3.16`, `4.3.1->4.3.16`, ... 16 such upgrade paths in total to maintain. +This had been the biggest obstacle for EMQX team to act agile enough in delivering enhancements and fixes. + +## Enhancements + +## Bug fixes + +* Fixed a typo in `bin/emqx` which affects MacOs release when trying to enable Erlang distribution over TLS [#8398](https://github.com/emqx/emqx/pull/8398) +* Restricted shell was accidentally disabled in 5.0.1, it has been added back. [#8396](https://github.com/emqx/emqx/pull/8396) + + diff --git a/changes/v5.0.3-en.md b/changes/v5.0.3-en.md new file mode 100644 index 000000000..df09e1db5 --- /dev/null +++ b/changes/v5.0.3-en.md @@ -0,0 +1,12 @@ +# v5.0.3 + +## Bug fixes + +* Websocket listener failed to read headers `X-Forwarded-For` and `X-Forwarded-Port` [#8415](https://github.com/emqx/emqx/pull/8415) +* Deleted `cluster_singleton` from MQTT bridge config document. This config is no longer applicable in 5.0 [#8407](https://github.com/emqx/emqx/pull/8407) +* Fix `emqx/emqx:latest` docker image publish to use the Erlang flavor, but not Elixir flavor [#8414](https://github.com/emqx/emqx/pull/8414) +* Changed the `exp` field in JWT auth to be optional rather than required to fix backwards compatability with 4.X releases. [#8425](https://github.com/emqx/emqx/pull/8425) + +## Enhancements + +* Improve the speed of dashboard's HTTP API routing rule generation, which sometimes causes timeout [#8438](https://github.com/emqx/emqx/pull/8438) diff --git a/changes/v5.0.4-en.md b/changes/v5.0.4-en.md new file mode 100644 index 000000000..a2531acd1 --- /dev/null +++ b/changes/v5.0.4-en.md @@ -0,0 +1,37 @@ +# v5.0.4 + +## Enhancements + +* Improve the dashboard listener startup log, the listener name is no longer spliced with port information, + and the colon(:) is no longer displayed when IP is not specified. [#8480](https://github.com/emqx/emqx/pull/8480) +* Remove `/configs/listeners` API, use `/listeners/` instead. [#8485](https://github.com/emqx/emqx/pull/8485) +* Optimize performance of builtin database operations in processes with long message queue [#8439](https://github.com/emqx/emqx/pull/8439) +* Improve authentication tracing. [#8554](https://github.com/emqx/emqx/pull/8554) +* Standardize the '/listeners' and `/gateway//listeners` API fields. + It will introduce some incompatible updates, see [#8571](https://github.com/emqx/emqx/pull/8571) +* Add option to perform GC on connection process after TLS/SSL handshake is performed. [#8637](https://github.com/emqx/emqx/pull/8637) + +## Bug fixes + +* The `data/configs/cluster-override.conf` is cleared to 0KB if `hocon_pp:do/2` failed [commits/71f64251](https://github.com/emqx/emqx/pull/8443/commits/71f642518a683cc91a32fd542aafaac6ef915720) +* Improve the health_check for webhooks. + Prior to this change, the webhook only checks the connectivity of the TCP port using `gen_tcp:connect/2`, so + if it's a HTTPs server, we didn't check if TLS handshake was successful. + [commits/6b45d2ea](https://github.com/emqx/emqx/commit/6b45d2ea9fde6d3b4a5b007f7a8c5a1c573d141e) +* The `created_at` field of rules is missing after emqx restarts. [commits/5fc09e6b](https://github.com/emqx/emqx/commit/5fc09e6b950c340243d7be627a0ce1700691221c) +* The rule engine's jq function now works even when the path to the EMQX install dir contains spaces [jq#35](https://github.com/emqx/jq/pull/35) [#8455](https://github.com/emqx/emqx/pull/8455) +* Avoid applying any ACL checks on superusers [#8452](https://github.com/emqx/emqx/pull/8452) +* Fix statistics related system topic name error +* Fix AuthN JWKS SSL schema. Using schema in `emqx_schema`. [#8458](https://github.com/emqx/emqx/pull/8458) +* `sentinel` field should be required when AuthN/AuthZ Redis using sentinel mode. [#8458](https://github.com/emqx/emqx/pull/8458) +* Fix bad swagger format. [#8517](https://github.com/emqx/emqx/pull/8517) +* Fix `chars_limit` is not working when `formatter` is `json`. [#8518](http://github.com/emqx/emqx/pull/8518) +* Ensuring that exhook dispatches the client events are sequential. [#8530](https://github.com/emqx/emqx/pull/8530) +* Avoid using RocksDB backend for persistent sessions when such backend is unavailable. [#8528](https://github.com/emqx/emqx/pull/8528) +* Fix AuthN `cert_subject` and `cert_common_name` placeholder rendering failure. [#8531](https://github.com/emqx/emqx/pull/8531) +* Support listen on an IPv6 address, e.g: [::1]:1883 or ::1:1883. [#8547](https://github.com/emqx/emqx/pull/8547) +* GET '/rules' support for pagination and fuzzy search. [#8472](https://github.com/emqx/emqx/pull/8472) + **‼️ Note** : The previous API only returns array: `[RuleObj1,RuleObj2]`, after updating, it will become + `{"data": [RuleObj1,RuleObj2], "meta":{"count":2, "limit":100, "page":1}`, + which will carry the paging meta information. +* Fix the issue that webhook leaks TCP connections. [ehttpc#34](https://github.com/emqx/ehttpc/pull/34), [#8580](https://github.com/emqx/emqx/pull/8580) diff --git a/changes/v5.0.5-en.md b/changes/v5.0.5-en.md new file mode 100644 index 000000000..9c462545d --- /dev/null +++ b/changes/v5.0.5-en.md @@ -0,0 +1,17 @@ +# v5.0.5 + +## Enhancements + +* Add `bootstrap_users_file` configuration to add default Dashboard username list, which is only added when EMQX is first started. +* The license is now copied to all nodes in the cluster when it's reloaded. [#8598](https://github.com/emqx/emqx/pull/8598) +* Added a HTTP API to manage licenses. [#8610](https://github.com/emqx/emqx/pull/8610) +* Updated `/nodes` API node_status from `Running/Stopped` to `running/stopped`. [#8642](https://github.com/emqx/emqx/pull/8642) +* Improve handling of placeholder interpolation errors [#8635](https://github.com/emqx/emqx/pull/8635) +* Better logging on unknown object IDs. [#8670](https://github.com/emqx/emqx/pull/8670) +* The bind option support `:1883` style. [#8758](https://github.com/emqx/emqx/pull/8758) + +## Bug fixes + +* Allow changing the license type from key to file (and vice-versa). [#8598](https://github.com/emqx/emqx/pull/8598) +* Add back http connector config keys `max_retries` `retry_interval` as deprecated fields [#8672](https://github.com/emqx/emqx/issues/8672) + This caused upgrade failure in 5.0.4, because it would fail to boot on configs created from older version. diff --git a/changes/v5.0.6-en.md b/changes/v5.0.6-en.md new file mode 100644 index 000000000..342cb67e2 --- /dev/null +++ b/changes/v5.0.6-en.md @@ -0,0 +1,5 @@ +# v5.0.6 + +## Bug fixes + +* Upgrade Dashboard version to fix an issue where the node status was not displayed correctly. [#8771](https://github.com/emqx/emqx/pull/8771) diff --git a/changes/v5.0.7-en.md b/changes/v5.0.7-en.md new file mode 100644 index 000000000..cfda78fb1 --- /dev/null +++ b/changes/v5.0.7-en.md @@ -0,0 +1,12 @@ +# v5.0.7 + +## Enhancements + +* Do not auto-populate default SSL cipher suites, so that the configs are less bloated. [#8769](https://github.com/emqx/emqx/pull/8769) + +## Bug fixes + +* Remove `will_msg` (not used) field from the client API. [#8721](https://github.com/emqx/emqx/pull/8721) +* Fix `$queue` topic name error in management API return. [#8728](https://github.com/emqx/emqx/pull/8728) +* Fix race condition which may cause `client.connected` and `client.disconnected` out of order. [#8625](https://github.com/emqx/emqx/pull/8625) +* Fix quic listener default idle timeout's type. [#8826](https://github.com/emqx/emqx/pull/8826) diff --git a/changes/v5.0.8-en.md b/changes/v5.0.8-en.md new file mode 100644 index 000000000..e05c3b89d --- /dev/null +++ b/changes/v5.0.8-en.md @@ -0,0 +1,28 @@ +# v5.0.8 + +## Enhancements + +* Print a warning message when boot with the default (insecure) Erlang cookie. [#8905](https://github.com/emqx/emqx/pull/8905) +* Change the `/gateway` API path to plural form. [#8823](https://github.com/emqx/emqx/pull/8823) +* Don't allow updating config items when they already exist in `local-override.conf`. [#8851](https://github.com/emqx/emqx/pull/8851) +* Remove `node.etc_dir` from emqx.conf, because it is never used. + Also allow user to customize the logging directory [#8892](https://github.com/emqx/emqx/pull/8892) +* Added a new API `POST /listeners` for creating listener. [#8876](https://github.com/emqx/emqx/pull/8876) +* Close ExProto client process immediately if it's keepalive timeouted. [#8866](https://github.com/emqx/emqx/pull/8866) +* Upgrade grpc-erl driver to 0.6.7 to support batch operation in sending stream. [#8866](https://github.com/emqx/emqx/pull/8866) + + +## Bug fixes + +* Fix exhook `client.authorize` never being execauted. [#8780](https://github.com/emqx/emqx/pull/8780) +* Fix JWT plugin don't support non-integer timestamp claims. [#8867](https://github.com/emqx/emqx/pull/8867) +* Avoid publishing will message when client fails to auhtenticate. [#8887](https://github.com/emqx/emqx/pull/8887) +* Speed up dispatching of shared subscription messages in a cluster [#8893](https://github.com/emqx/emqx/pull/8893) +* Fix the extra / prefix when CoAP gateway parsing client topics. [#8658](https://github.com/emqx/emqx/pull/8658) +* Speed up updating the configuration, When some nodes in the cluster are down. [#8857](https://github.com/emqx/emqx/pull/8857) +* Fix delayed publish inaccurate caused by os time change. [#8926](https://github.com/emqx/emqx/pull/8926) +* Fix that EMQX can't start when the retainer is disabled [#8911](https://github.com/emqx/emqx/pull/8911) +* Fix that redis authn will deny the unknown users [#8934](https://github.com/emqx/emqx/pull/8934) +* Fix ExProto UDP client keepalive checking error. + This causes the clients to not expire as long as a new UDP packet arrives [#8866](https://github.com/emqx/emqx/pull/8866) +* Fix that MQTT Bridge message payload could be empty string. [#8949](https://github.com/emqx/emqx/pull/8949) diff --git a/changes/v5.0.9-en.md b/changes/v5.0.9-en.md new file mode 100644 index 000000000..c6d0b5192 --- /dev/null +++ b/changes/v5.0.9-en.md @@ -0,0 +1,34 @@ +# v5.0.9 + +## Enhancements + +- Add `cert_common_name` and `cert_subject` placeholder support for authz_http and authz_mongo [#8973](https://github.com/emqx/emqx/pull/8973). + +- Use milliseconds internally in emqx_delayed to store the publish time, improving precision [#9060](https://github.com/emqx/emqx/pull/9060). + +- More rigorous checking of flapping to improve stability of the system [#9136](https://github.com/emqx/emqx/pull/9136). + +- No message(s) echo for the message publish APIs [#9155](https://github.com/emqx/emqx/pull/9155). + Prior to this fix, the message publish APIs (`api/v5/publish` and `api/v5/publish/bulk`) echos the message back to the client in HTTP body. + This change fixed it to only send back the message ID. + +## Bug fixes + +- Check ACLs for last will testament topic before publishing the message [#8930](https://github.com/emqx/emqx/pull/8930). + +- Fix GET /listeners API crash when some nodes (in a cluster) is still loading the configs [#9002](https://github.com/emqx/emqx/pull/9002). + +- Fix empty variable interpolation in authentication and authorization [#8963](https://github.com/emqx/emqx/pull/8963). + Placeholders for undefined variables are rendered now as empty strings and do not cause errors anymore. + +- Fix the latency statistics error of the slow subscription stats [#8986](https://github.com/emqx/emqx/pull/8986). + Prior to this change when `stats_type` is `internal` or `response`, the begin time stamp was taken at wrong precision. + +- Fix shared subscription message re-dispatches [#9104](https://github.com/emqx/emqx/pull/9104). + - When discarding QoS 2 inflight messages, there were excessive logs + - For wildcard deliveries, the re-dispatch used the wrong topic (the publishing topic, + but not the subscribing topic), caused messages to be lost when dispatching. + +- Upgrade http client `gun` from 1.3.7 to [1.3.9](https://github.com/emqx/gun/tree/1.3.9) + Prior to this fix, long-lived HTTPS connections for HTTP auth or webhook integrations + may stall indefinitely, causing massive timeouts for HTTP requests. diff --git a/changes/v5.0.9-zh.md b/changes/v5.0.9-zh.md new file mode 100644 index 000000000..460d79941 --- /dev/null +++ b/changes/v5.0.9-zh.md @@ -0,0 +1,28 @@ +# v5.0.9 + +## 增强 + +- 为 `authz_http` 和 `authz_mongo` 增加了 `cert_common_name` 和 `cert_subject` 两个占位符 [#8973](https://github.com/emqx/emqx/pull/8973)。 + +- 统一使用 Erlang 虚拟机的时间,而不是系统时间,可以避免系统时间修改后导致的延迟发布不准确问题 [#9060](https://github.com/emqx/emqx/pull/9060)。 + +- 更严格的 flapping 检测,认证失败等也会进行计数 [#9136](https://github.com/emqx/emqx/pull/9136)。 + +## 修复 + +- 遗嘱消息发布前进行 ACL 检查 [#8930](https://github.com/emqx/emqx/pull/8930)。 + +- 在集群环境下,当有节点还没有完全初始化好配置时,`GET /listeners` 可能会返回 HTTP 500 的错误 [#9002](https://github.com/emqx/emqx/pull/9002)。 + +- 认证和鉴权的占位符替换中,如果没有找到匹配的值,使用空字符串代替,而不是抛出一异常 [#8963](https://github.com/emqx/emqx/pull/8963)。 + +- 慢订阅统计中时间单位用错的问题 [#8986](https://github.com/emqx/emqx/pull/8986)。 + 当统计类型(`stats_type`)是 `internal` 或者 `response` 时,起始时间戳的精度使用错误。 + +- 共享订阅消息重新派发 [#9104](https://github.com/emqx/emqx/pull/9104)。 + - 当 QoS 2 的 inflight 消息被丢弃时,产生了大量的 warning 日志,修复后不再打印。 + - 通配符订阅的共享订阅消息在重新派发时,使用了消息发布时的主题,而不是订阅的通配符主题选择 + 订阅组中的其他成员,导致转发失败。 + +- HTTP 客户端 (`gun`) 从 1.3.7 升级到 [1.3.9](https://github.com/emqx/gun/tree/1.3.9)。 + 此次修复前,HTTP 认证和 webhook 等使用 HTTPS 客户端长连接的后端可能会进入一个无限等待状态,导致大量超时发生。 diff --git a/deploy/charts/emqx-enterprise/README.md b/deploy/charts/emqx-enterprise/README.md index 9c3762fdd..a579af70d 100644 --- a/deploy/charts/emqx-enterprise/README.md +++ b/deploy/charts/emqx-enterprise/README.md @@ -60,10 +60,9 @@ The following table lists the configurable parameters of the emqx chart and thei | `service.type` | Kubernetes Service type. | ClusterIP | | `service.mqtt` | Port for MQTT. | 1883 | | `service.mqttssl` | Port for MQTT(SSL). | 8883 | -| `service.mgmt` | Port for mgmt API. | 8081 | | `service.ws` | Port for WebSocket/HTTP. | 8083 | | `service.wss` | Port for WSS/HTTPS. | 8084 | -| `service.dashboard` | Port for dashboard. | 18083 | +| `service.dashboard` | Port for dashboard and API. | 18083 | | `service.nodePorts.mqtt` | Kubernetes node port for MQTT. | nil | | `service.nodePorts.mqttssl` | Kubernetes node port for MQTT(SSL). | nil | | `service.nodePorts.mgmt` | Kubernetes node port for mgmt API. | nil | diff --git a/deploy/charts/emqx-enterprise/templates/certificate.yaml b/deploy/charts/emqx-enterprise/templates/certificate.yaml index 36b7f6521..9a2ed969a 100644 --- a/deploy/charts/emqx-enterprise/templates/certificate.yaml +++ b/deploy/charts/emqx-enterprise/templates/certificate.yaml @@ -1,4 +1,4 @@ -{{- if and (.Values.ssl.enable) (not .Values.ssl.useExisting) -}} +{{- if and (.Values.ssl.enabled) (not .Values.ssl.useExisting) -}} --- apiVersion: cert-manager.io/v1 kind: Certificate diff --git a/deploy/charts/emqx-enterprise/values.yaml b/deploy/charts/emqx-enterprise/values.yaml index aa61a62ea..7827d6afb 100644 --- a/deploy/charts/emqx-enterprise/values.yaml +++ b/deploy/charts/emqx-enterprise/values.yaml @@ -123,19 +123,16 @@ service: ## Port for MQTT(SSL) ## mqttssl: 8883 - ## Port for mgmt API - ## - mgmt: 8081 ## Port for WebSocket/HTTP ## ws: 8083 ## Port for WSS/HTTPS ## wss: 8084 - ## Port for dashboard + ## Port for dashboard and API ## dashboard: 18083 - ## Port for dashboard HTTPS + ## Port for dashboard and API over HTTPS ## # dashboardtls: 18084 ## Specify the nodePort(s) value for the LoadBalancer and NodePort service types. diff --git a/deploy/charts/emqx/Chart.yaml b/deploy/charts/emqx/Chart.yaml index fa265e663..b42ca84d0 100644 --- a/deploy/charts/emqx/Chart.yaml +++ b/deploy/charts/emqx/Chart.yaml @@ -14,8 +14,8 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 5.0.8 +version: 5.0.10 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 5.0.8 +appVersion: 5.0.10 diff --git a/deploy/charts/emqx/README.md b/deploy/charts/emqx/README.md index 9c3762fdd..a579af70d 100644 --- a/deploy/charts/emqx/README.md +++ b/deploy/charts/emqx/README.md @@ -60,10 +60,9 @@ The following table lists the configurable parameters of the emqx chart and thei | `service.type` | Kubernetes Service type. | ClusterIP | | `service.mqtt` | Port for MQTT. | 1883 | | `service.mqttssl` | Port for MQTT(SSL). | 8883 | -| `service.mgmt` | Port for mgmt API. | 8081 | | `service.ws` | Port for WebSocket/HTTP. | 8083 | | `service.wss` | Port for WSS/HTTPS. | 8084 | -| `service.dashboard` | Port for dashboard. | 18083 | +| `service.dashboard` | Port for dashboard and API. | 18083 | | `service.nodePorts.mqtt` | Kubernetes node port for MQTT. | nil | | `service.nodePorts.mqttssl` | Kubernetes node port for MQTT(SSL). | nil | | `service.nodePorts.mgmt` | Kubernetes node port for mgmt API. | nil | diff --git a/deploy/charts/emqx/templates/certificate.yaml b/deploy/charts/emqx/templates/certificate.yaml index 36b7f6521..9a2ed969a 100644 --- a/deploy/charts/emqx/templates/certificate.yaml +++ b/deploy/charts/emqx/templates/certificate.yaml @@ -1,4 +1,4 @@ -{{- if and (.Values.ssl.enable) (not .Values.ssl.useExisting) -}} +{{- if and (.Values.ssl.enabled) (not .Values.ssl.useExisting) -}} --- apiVersion: cert-manager.io/v1 kind: Certificate diff --git a/deploy/charts/emqx/values.yaml b/deploy/charts/emqx/values.yaml index 5bf7377f4..b648f070f 100644 --- a/deploy/charts/emqx/values.yaml +++ b/deploy/charts/emqx/values.yaml @@ -125,19 +125,16 @@ service: ## Port for MQTT(SSL) ## mqttssl: 8883 - ## Port for mgmt API - ## - mgmt: 8081 ## Port for WebSocket/HTTP ## ws: 8083 ## Port for WSS/HTTPS ## wss: 8084 - ## Port for dashboard + ## Port for dashboard and API ## dashboard: 18083 - ## Port for dashboard HTTPS + ## Port for dashboard and API over HTTPS ## # dashboardtls: 18084 ## Specify the nodePort(s) value for the LoadBalancer and NodePort service types. diff --git a/deploy/docker/Dockerfile b/deploy/docker/Dockerfile index 6c5baa391..51b4dbd0c 100644 --- a/deploy/docker/Dockerfile +++ b/deploy/docker/Dockerfile @@ -16,6 +16,7 @@ RUN export PROFILE=${EMQX_NAME%%-elixir} \ && cd /emqx \ && rm -rf $EMQX_LIB_PATH \ && make $EMQX_NAME1 \ + && rm -f $EMQX_REL_PATH/*.tar.gz \ && mkdir -p /emqx-rel \ && mv $EMQX_REL_PATH /emqx-rel @@ -48,15 +49,14 @@ VOLUME ["/opt/emqx/log", "/opt/emqx/data"] # emqx will occupy these port: # - 1883 port for MQTT -# - 8081 for mgmt API # - 8083 for WebSocket/HTTP # - 8084 for WSS/HTTPS # - 8883 port for MQTT(SSL) # - 11883 port for internal MQTT/TCP -# - 18083 for dashboard +# - 18083 for dashboard and API # - 4370 default Erlang distribution port # - 5369 for backplain gen_rpc -EXPOSE 1883 8081 8083 8084 8883 11883 18083 4370 5369 +EXPOSE 1883 8083 8084 8883 11883 18083 4370 5369 ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"] diff --git a/deploy/docker/Dockerfile.alpine b/deploy/docker/Dockerfile.alpine index 6368a0b51..dc16afc54 100644 --- a/deploy/docker/Dockerfile.alpine +++ b/deploy/docker/Dockerfile.alpine @@ -62,15 +62,14 @@ VOLUME ["/opt/emqx/log", "/opt/emqx/data"] # emqx will occupy these port: # - 1883 port for MQTT -# - 8081 for mgmt API # - 8083 for WebSocket/HTTP # - 8084 for WSS/HTTPS # - 8883 port for MQTT(SSL) # - 11883 port for internal MQTT/TCP -# - 18083 for dashboard +# - 18083 for dashboard and API # - 4370 default Erlang distrbution port # - 5369 for backplain gen_rpc -EXPOSE 1883 8081 8083 8084 8883 11883 18083 4370 5369 +EXPOSE 1883 8083 8084 8883 11883 18083 4370 5369 ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"] diff --git a/mix.exs b/mix.exs index e9f861ce5..39a12769a 100644 --- a/mix.exs +++ b/mix.exs @@ -52,7 +52,7 @@ defmodule EMQXUmbrella.MixProject do {:jiffy, github: "emqx/jiffy", tag: "1.0.5", override: true}, {:cowboy, github: "emqx/cowboy", tag: "2.9.0", override: true}, {:esockd, github: "emqx/esockd", tag: "5.9.4", override: true}, - {:ekka, github: "emqx/ekka", tag: "0.13.5", override: true}, + {:ekka, github: "emqx/ekka", tag: "0.13.6", override: true}, {:gen_rpc, github: "emqx/gen_rpc", tag: "2.8.1", override: true}, {:grpc, github: "emqx/grpc-erl", tag: "0.6.7", override: true}, {:minirest, github: "emqx/minirest", tag: "1.3.7", override: true}, @@ -72,7 +72,7 @@ defmodule EMQXUmbrella.MixProject do {:esasl, github: "emqx/esasl", tag: "0.2.0"}, {:jose, github: "potatosalad/erlang-jose", tag: "1.11.2"}, # in conflict by ehttpc and emqtt - {:gun, github: "emqx/gun", tag: "1.3.7", override: true}, + {:gun, github: "emqx/gun", tag: "1.3.9", override: true}, # in conflict by emqx_connectior and system_monitor {:epgsql, github: "emqx/epgsql", tag: "4.7-emqx.2", override: true}, # in conflict by mongodb and eredis_cluster @@ -639,7 +639,7 @@ defmodule EMQXUmbrella.MixProject do defp jq_dep() do if enable_jq?(), - do: [{:jq, github: "emqx/jq", tag: "v0.3.6", override: true}], + do: [{:jq, github: "emqx/jq", tag: "v0.3.8", override: true}], else: [] end diff --git a/pkg-vsn.sh b/pkg-vsn.sh index 7f50f03e2..281160de8 100755 --- a/pkg-vsn.sh +++ b/pkg-vsn.sh @@ -73,6 +73,12 @@ while [ "$#" -gt 0 ]; do esac done +# return immediately if version is already set +if [[ "${PKG_VSN:-novalue}" != novalue && "${LONG_VERSION:-novalue}" != 'yes' ]]; then + echo "$PKG_VSN" + exit 0 +fi + case "${PROFILE}" in *enterprise*) RELEASE_EDITION="EMQX_RELEASE_EE" diff --git a/rebar.config b/rebar.config index 505c475cc..c2e62d3aa 100644 --- a/rebar.config +++ b/rebar.config @@ -48,13 +48,13 @@ , {redbug, "2.0.7"} , {gpb, "4.19.5"} %% gpb only used to build, but not for release, pin it here to avoid fetching a wrong version due to rebar plugins scattered in all the deps , {typerefl, {git, "https://github.com/ieQu1/typerefl", {tag, "0.9.1"}}} - , {gun, {git, "https://github.com/emqx/gun", {tag, "1.3.7"}}} + , {gun, {git, "https://github.com/emqx/gun", {tag, "1.3.9"}}} , {ehttpc, {git, "https://github.com/emqx/ehttpc", {tag, "0.4.0"}}} , {gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}} , {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}} , {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}} , {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.4"}}} - , {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.5"}}} + , {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.6"}}} , {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}} , {grpc, {git, "https://github.com/emqx/grpc-erl", {tag, "0.6.7"}}} , {minirest, {git, "https://github.com/emqx/minirest", {tag, "1.3.7"}}} diff --git a/rebar.config.erl b/rebar.config.erl index f745b5cca..dd340fa69 100644 --- a/rebar.config.erl +++ b/rebar.config.erl @@ -42,7 +42,7 @@ quicer() -> {quicer, {git, "https://github.com/emqx/quic.git", {tag, "0.0.16"}}}. jq() -> - {jq, {git, "https://github.com/emqx/jq", {tag, "v0.3.6"}}}. + {jq, {git, "https://github.com/emqx/jq", {tag, "v0.3.8"}}}. deps(Config) -> {deps, OldDeps} = lists:keyfind(deps, 1, Config), diff --git a/rel/emqx_conf.template.en.md b/rel/emqx_conf.template.en.md index 76d25680b..46ff2b36b 100644 --- a/rel/emqx_conf.template.en.md +++ b/rel/emqx_conf.template.en.md @@ -1,4 +1,4 @@ -EMQX configuration file is in [HOCON](https://github.com/emqx/hocon) format. +EMQX configuration files are in [HOCON](https://github.com/emqx/hocon) format. HOCON, or Human-Optimized Config Object Notation is a format for human-readable data, and a superset of JSON. @@ -7,7 +7,7 @@ and a superset of JSON. EMQX configuration consists of 3 layers. From bottom up: -1. Immutable base: `emqx.conf` + `EMQX_` prefixed environment variables.
+1. Immutable base: `emqx.conf` + `EMQX_` prefixed environment variables.
Changes in this layer require a full node restart to take effect. 1. Cluster overrides: `$EMQX_NODE__DATA_DIR/configs/cluster-override.conf` 1. Local node overrides: `$EMQX_NODE__DATA_DIR/configs/local-override.conf` @@ -94,14 +94,14 @@ Complex types define data 'boxes' which may contain other complex data or primitive values. There are quite some different primitive types, to name a few: -* `atom()` -* `boolean()` -* `string()` -* `integer()` -* `float()` -* `number()` -* `binary()` # another format of string() -* `emqx_schema:duration()` # time duration, another format of integer() +* `atom()`. +* `boolean()`. +* `string()`. +* `integer()`. +* `float()`. +* `number()`. +* `binary()`, another format of string(). +* `emqx_schema:duration()`, time duration, another format of integer() * ... ::: tip Tip @@ -143,7 +143,19 @@ to even set complex values from environment variables. For example, this environment variable sets an array value. ``` -export EMQX_LISTENERS__SSL__L1__AUTHENTICATION__SSL__CIPHERS="[\"TLS_AES_256_GCM_SHA384\"]" +export EMQX_LISTENERS__SSL__L1__AUTHENTICATION__SSL__CIPHERS='["TLS_AES_256_GCM_SHA384"]' +``` + +However this also means a string value should be quoted if it happens to contain special +characters such as `=` and `:`. + +For example, a string value `"localhost:1883"` would be +parsed into object (struct): `{"localhost": 1883}`. + +To keep it as a string, one should quote the value like below: + +``` +EMQX_BRIDGES__MQTT__MYBRIDGE__CONNECTOR_SERVER='"localhost:1883"' ``` ::: tip Tip @@ -236,9 +248,9 @@ authentication=[{enable=true}] #### TLS/SSL ciphers -Starting from v5.0.6, EMQX no longer pre-populate the ciphers list with a default +Starting from v5.0.6, EMQX no longer pre-populates the ciphers list with a default set of cipher suite names. -Instead, the default ciphers are applyed at runtime when starting the listener +Instead, the default ciphers are applied at runtime when starting the listener for servers, or when establishing a TLS connection as a client. Below are the default ciphers selected by EMQX. diff --git a/rel/emqx_conf.template.zh.md b/rel/emqx_conf.template.zh.md index ac4c5ce39..9b430da36 100644 --- a/rel/emqx_conf.template.zh.md +++ b/rel/emqx_conf.template.zh.md @@ -1,28 +1,28 @@ -EMQX的配置文件格式是 [HOCON](https://github.com/emqx/hocon) . +EMQX的配置文件格式是 [HOCON](https://github.com/emqx/hocon) 。 HOCON(Human-Optimized Config Object Notation)是一个JSON的超集,非常适用于易于人类读写的配置数据存储。 ## 分层结构 EMQX的配置文件可分为三层,自底向上依次是: -1. 不可变的基础层 `emqx.conf` 加上 `EMQX_` 前缀的环境变量.
+1. 不可变的基础层 `emqx.conf` 加上 `EMQX_` 前缀的环境变量。
修改这一层的配置之后,需要重启节点来使之生效。 1. 集群范围重载层:`$EMQX_NODE__DATA_DIR/configs/cluster-override.conf` 1. 节点本地重载层:`$EMQX_NODE__DATA_DIR/configs/local-override.conf` -如果环境变量 `$EMQX_NODE__DATA_DIR` 没有设置,那么该目录会从 emqx.conf 的 `node.data_dir`配置中读取。 +如果环境变量 `$EMQX_NODE__DATA_DIR` 没有设置,那么该目录会从 `emqx.conf` 的 `node.data_dir` 配置中读取。 配置文件 `cluster-override.conf` 的内容会在运行时被EMQX重写。 这些重写发生在 dashboard UI,管理HTTP API,或者CLI对集群配置进行修改时。 当EMQX运行在集群中时,一个EMQX节点重启之后,会从集群中其他节点复制该文件内容到本地。 :::tip Tip -有些配置项是不能被重载的(例如 `node.name`). +有些配置项是不能被重载的(例如 `node.name`)。 配置项如果有 `mapping: path.to.boot.config.key` 这个属性, -则不能被添加到重载文件中 `*-override.conf` 中。 +则不能被添加到重载文件 `*-override.conf` 中。 ::: -更多的重载规则,请参考下文 [配置重载规则](#配置重载规则). +更多的重载规则,请参考下文 [配置重载规则](#配置重载规则)。 ## 配置文件语法 @@ -70,7 +70,7 @@ EMQX的配置文件中,有4中复杂数据结构类型,它们分别是: 1. Struct:结构体都是有类型名称的,结构体中可以有任意多个字段。 结构体和字段的名称由不带特殊字符的全小些字母组成,名称中可以带数字,但不得以数字开头,多个单词可用下划线分隔。 -1. Map: Map与Struct(结构体)类似,但是内部的字段不是预先定义好的. +1. Map: Map 与 Struct(结构体)类似,但是内部的字段不是预先定义好的。 1. Union: 联合 `MemberType1 | MemberType2 | ...`,可以理解为:“不是这个,就是那个” 1. Array: 数组 `[ElementType]` @@ -89,19 +89,19 @@ myarray.2 = 75 复杂类型定义了数据 "盒子",其中可能包含其他复杂数据或原始值。 有很多不同的原始类型,仅举几个例子。 -* 原子 `atom()` -* 布尔 `boolean()`. -* 字符串 `string()'。 -* 整形 `integer()'。 -* 浮点数 `float()'. -* 数值 `number()'。 -* 二进制编码的字符串 `binary()` # 是 `string()` 的另一种格式 -* 时间间隔 `emqx_schema:duration()` # 时间间隔,是 `integer()` 的另一种格式 +* 原子 `atom()`。 +* 布尔 `boolean()`。 +* 字符串 `string()`。 +* 整形 `integer()`。 +* 浮点数 `float()`。 +* 数值 `number()`。 +* 二进制编码的字符串 `binary()` 是 `string()` 的另一种格式。 +* 时间间隔 `emqx_schema:duration()` 是 `integer()` 的另一种格式。 * ... ::: tip Tip 原始类型的名称大多是自我描述的,所以不需要过多的注释。 -但是有一些不是那么直观的数据类型,则需要配合字段的描述文档进行理解 +但是有一些不是那么直观的数据类型,则需要配合字段的描述文档进行理解。 ::: @@ -110,7 +110,7 @@ myarray.2 = 75 如果我们把EMQX的配置值理解成一个类似目录树的结构,那么类似于文件系统中使用斜杠或反斜杠进行层级分割, EMQX使用的配置路径的层级分割符是 `'.'` -被`'.'`号分割的每一段,则是Struct(结构体)的字段,或Map的key. +被 `'.'` 号分割的每一段,则是 Struct(结构体)的字段,或 Map 的 key。 下面有几个例子: @@ -122,19 +122,29 @@ authentication.1.enable = true ### 环境变量重载 -因为`'.'` 分隔符不能使用于环境变量,所以我们需要使用另一个分割符。EMQX选用的是双下划线`__`。 +因为 `'.'` 分隔符不能使用于环境变量,所以我们需要使用另一个分割符。EMQX选用的是双下划线 `__`。 为了与其他的环境变量有所区分,EMQX还增加了一个前缀 `EMQX_` 来用作环境变量命名空间。 例如 `node.name` 的重载变量名是 `EMQX_NODE__NAME`。 -环境变量的值,是解析成HOCON值的。所以这也使得环境变量可以用来传递复杂数据类型的值。 +环境变量的值,是按 HOCON 值解析的,这也使得环境变量可以用来传递复杂数据类型的值。 例如,下面这个环境变量传入一个数组类型的值。 ``` -export EMQX_LISTENERS__SSL__L1__AUTHENTICATION__SSL__CIPHERS="[\"TLS_AES_256_GCM_SHA384\"]" +export EMQX_LISTENERS__SSL__L1__AUTHENTICATION__SSL__CIPHERS='["TLS_AES_256_GCM_SHA384"]' ``` +这也意味着有些带特殊字符(例如`:` 和 `=`),则需要用双引号对这个值包起来。 + +例如`localhost:1883` 会被解析成一个结构体 `{"localhost": 1883}`。 +想要把它当字符串使用时,就必需使用引号,如下: + +``` +EMQX_BRIDGES__MQTT__MYBRIDGE__CONNECTOR_SERVER='"localhost:1883"' +``` + + ::: tip Tip 未定义的根路径会被EMQX忽略,例如 `EMQX_UNKNOWN_ROOT__FOOBAR` 这个环境变量会被EMQX忽略, 因为 `UNKNOWN_ROOT` 不是预先定义好的根路径。 @@ -144,7 +154,7 @@ export EMQX_LISTENERS__SSL__L1__AUTHENTICATION__SSL__CIPHERS="[\"TLS_AES_256_GCM [warning] unknown_env_vars: ["EMQX_AUTHENTICATION__ENABLED"] ``` -这是因为正确的字段名称是 `enable`,而不是 `enabled`. +这是因为正确的字段名称是 `enable`,而不是 `enabled`。 ::: ### 配置重载规则 @@ -158,8 +168,7 @@ HOCON的值是分层覆盖的,普遍规则如下: #### 结构体 -合并覆盖规则。在如下配置中,最后一行的 `debug` 值会覆盖覆盖原先`level`字段的 `error` 值 -但是`enable` 字段保持不变。 +合并覆盖规则。在如下配置中,最后一行的 `debug` 值会覆盖覆盖原先`level`字段的 `error` 值,但是 `enable` 字段保持不变。 ``` log { console_handler{ @@ -168,7 +177,7 @@ log { } } -## 控制台日志打印先定义为`error`级,后被覆写成`debug`级 +## 控制台日志打印先定义为 `error` 级,后被覆写成 `debug` 级 log.console_handler.level=debug ``` @@ -176,7 +185,7 @@ log.console_handler.level=debug #### Map Map与结构体类似,也是合并覆盖规则。 -如下例子中,`zone1` 的 `max_packet_size` 可以在文件后面覆写. +如下例子中,`zone1` 的 `max_packet_size` 可以在文件后面覆写。 ``` zone { diff --git a/scripts/check-nl-at-eof.sh b/scripts/check-nl-at-eof.sh index 32b774b3b..88f8f9c2e 100755 --- a/scripts/check-nl-at-eof.sh +++ b/scripts/check-nl-at-eof.sh @@ -13,6 +13,9 @@ nl_at_eof() { *.png|*rebar3) return ;; + scripts/erlfmt) + return + ;; esac local lastbyte lastbyte="$(tail -c 1 "$file" 2>&1)" diff --git a/scripts/ct/run.sh b/scripts/ct/run.sh index 45d32767c..78b211844 100755 --- a/scripts/ct/run.sh +++ b/scripts/ct/run.sh @@ -130,8 +130,13 @@ for file in "${FILES[@]}"; do F_OPTIONS="$F_OPTIONS -f $file" done +# Passing $UID to docker-compose to be used in erlang container +# as owner of the main process to avoid git repo permissions issue. +# Permissions issue happens because we are mounting local filesystem +# where files are owned by $UID to docker container where it's using +# root (UID=0) by default, and git is not happy about it. # shellcheck disable=2086 # no quotes for F_OPTIONS -docker-compose $F_OPTIONS up -d --build +UID_GID="$UID:$UID" docker-compose $F_OPTIONS up -d --build # /emqx is where the source dir is mounted to the Erlang container # in .ci/docker-compose-file/docker-compose.yaml @@ -139,7 +144,11 @@ TTY='' if [[ -t 1 ]]; then TTY='-t' fi -docker exec -i $TTY "$ERLANG_CONTAINER" bash -c 'git config --global --add safe.directory /emqx' + +# rebar and hex cache directory need to be writable by $UID +docker exec -i $TTY -u root:root "$ERLANG_CONTAINER" bash -c "mkdir /.cache && chown $UID:$UID /.cache" +# need to initialize .erlang.cookie manually here because / is not writable by $UID +docker exec -i $TTY -u root:root "$ERLANG_CONTAINER" bash -c "openssl rand -base64 16 > /.erlang.cookie && chown $UID:$UID /.erlang.cookie && chmod 0400 /.erlang.cookie" if [ "$ONLY_UP" = 'yes' ]; then exit 0 @@ -157,7 +166,7 @@ else exit $RESULT else # shellcheck disable=2086 # no quotes for F_OPTIONS - docker-compose $F_OPTIONS down + UID_GID="$UID:$UID" docker-compose $F_OPTIONS down exit $RESULT fi fi diff --git a/scripts/docker-create-push-manifests.sh b/scripts/docker-create-push-manifests.sh index 7c67ae788..db9c01bfb 100755 --- a/scripts/docker-create-push-manifests.sh +++ b/scripts/docker-create-push-manifests.sh @@ -2,23 +2,23 @@ set -exuo pipefail img_amd64=$1 -IsPushLatest=$2 +push_latest=${2:-false} img_arm64=$(echo ${img_amd64} | sed 's/-amd64$/-arm64/g') -img_march=${img_amd64%-amd64} +img_name=${img_amd64%-amd64} docker pull "$img_amd64" docker pull --platform linux/arm64 "$img_arm64" img_amd64_digest=$(docker inspect --format='{{index .RepoDigests 0}}' "$img_amd64") img_arm64_digest=$(docker inspect --format='{{index .RepoDigests 0}}' "$img_arm64") echo "sha256 of amd64 is $img_amd64_digest" echo "sha256 of arm64 is $img_arm64_digest" -docker manifest create "${img_march}" \ +docker manifest create "${img_name}" \ --amend "$img_amd64_digest" \ --amend "$img_arm64_digest" -docker manifest push "${img_march}" +docker manifest push "${img_name}" # PUSH latest if it is a release build -if [ "$IsPushLatest" = "true" ]; then +if [ "$push_latest" = "true" ]; then img_latest=$(echo "$img_arm64" | cut -d: -f 1):latest docker manifest create "${img_latest}" \ --amend "$img_amd64_digest" \ diff --git a/scripts/macos-sign-binaries.sh b/scripts/macos-sign-binaries.sh index a69b9a49b..11b6b734d 100755 --- a/scripts/macos-sign-binaries.sh +++ b/scripts/macos-sign-binaries.sh @@ -52,4 +52,4 @@ codesign -s "${APPLE_DEVELOPER_IDENTITY}" -f --verbose=4 --timestamp --options=r codesign -s "${APPLE_DEVELOPER_IDENTITY}" -f --verbose=4 --timestamp --options=runtime "${REL_DIR}"/lib/os_mon-*/priv/bin/{cpu_sup,memsup} codesign -s "${APPLE_DEVELOPER_IDENTITY}" -f --verbose=4 --timestamp --options=runtime "${REL_DIR}"/lib/rocksdb-*/priv/liberocksdb.so codesign -s "${APPLE_DEVELOPER_IDENTITY}" -f --verbose=4 --timestamp --options=runtime "${REL_DIR}"/lib/runtime_tools-*/priv/lib/{dyntrace.so,trace_ip_drv.so,trace_file_drv.so} -codesign -s "${APPLE_DEVELOPER_IDENTITY}" -f --verbose=4 --timestamp --options=runtime "${REL_DIR}"/lib/quicer-*/priv/libquicer_nif.so +find "${REL_DIR}/lib/" -name libquicer_nif.so -exec codesign -s "${APPLE_DEVELOPER_IDENTITY}" -f --verbose=4 --timestamp --options=runtime {} \; diff --git a/scripts/test-node-discovery-dns.sh b/scripts/test-node-discovery-dns.sh index 968269042..e502dbf59 100755 --- a/scripts/test-node-discovery-dns.sh +++ b/scripts/test-node-discovery-dns.sh @@ -1,12 +1,18 @@ #!/usr/bin/env bash -## Test two nodes-cluster discover each other using DNS A records lookup result. +## Test two-nodes cluster discover each other using DNS A records lookup result. set -euo pipefail cd -P -- "$(dirname -- "$0")/.." -IMAGE="${1}" +IMAGE="${1:-}" + +if [ -z "$IMAGE" ]; then + echo "Usage: $0 " + echo "e.g. $0 docker.io/emqx/emqx:5.0.8" + exit 1 +fi NET='test_node_discovery_dns' NODE1='emqx1' @@ -56,7 +62,9 @@ docker run -d -t --name dnsmasq \ --cap-add=NET_ADMIN \ storytel/dnsmasq dnsmasq --no-daemon --log-queries -start_emqx() { +# Node names (the part before '@') should be all the same in the cluster +# e.g. emqx@${IP} +start_emqx_v5() { NAME="$1" IP="$2" DASHBOARD_PORT="$3" @@ -66,6 +74,7 @@ start_emqx() { --ip "$IP" \ --dns "$IP0" \ -p "$DASHBOARD_PORT:18083" \ + -e EMQX_NODE_NAME="emqx@${IP}" \ -e EMQX_LOG__CONSOLE_HANDLER__LEVEL=debug \ -e EMQX_NODE_COOKIE="$COOKIE" \ -e EMQX_cluster__discovery_strategy='dns' \ @@ -74,5 +83,48 @@ start_emqx() { "$IMAGE" } -start_emqx "$NODE1" "$IP1" 18083 -start_emqx "$NODE2" "$IP2" 18084 +## EMQX v4 has different configuration schema: +# EMQX_NODE_NAME="emqx@${IP}": +# This is necessary because 4.x docker entrypoint +# by default uses docker container ID as node name +# (the part before @ of e.g. emqx@172.18.0.101) +# EMQX_cluster__dns__app +# This must be the same as node name in 4.x +# EMQX_cluster__discovery +# This in 5.0 is EMQX_cluster__discovery_strategy +# EMQX_cluster__dns__name +# The DNS domain to lookup for peer nodes +# EMQX_cluster__dns__record_type +# The DNS record type. (only 'a' type is tested) +start_emqx_v4() { + NAME="$1" + IP="$2" + APP_NAME="emqx" + DASHBOARD_PORT="$3" + docker run -d -t \ + --name "$NAME" \ + --net "$NET" \ + --ip "$IP" \ + --dns "$IP0" \ + -p "$DASHBOARD_PORT:18083" \ + -e EMQX_NODE_NAME="${APP_NAME}@${IP}" \ + -e EMQX_LOG__LEVEL=debug \ + -e EMQX_NODE_COOKIE="$COOKIE" \ + -e EMQX_cluster__discovery='dns' \ + -e EMQX_cluster__dns__name="$DOMAIN" \ + -e EMQX_cluster__dns__app="${APP_NAME}" \ + -e EMQX_cluster__dns__record_type="a" \ + "$IMAGE" +} + +case "${IMAGE}" in + *emqx:4.*|*emqx-ee:4.*) + start_emqx_v4 "$NODE1" "$IP1" 18083 + start_emqx_v4 "$NODE2" "$IP2" 18084 + ;; + *) + start_emqx_v5 "$NODE1" "$IP1" 18083 + start_emqx_v5 "$NODE2" "$IP2" 18084 + ;; + +esac diff --git a/scripts/update-appup.sh b/scripts/update-appup.sh index 1f2b23435..b962c4a9a 100755 --- a/scripts/update-appup.sh +++ b/scripts/update-appup.sh @@ -99,7 +99,7 @@ else pushd "${PREV_DIR_BASE}/${PREV_TAG}" if [ "$NEW_COPY" = 'no' ]; then REMOTE="$(git remote -v | grep "${GIT_REPO}" | head -1 | awk '{print $1}')" - git fetch "$REMOTE" + git fetch "$REMOTE" --tags --force fi git reset --hard git clean -ffdx