diff --git a/.ci/docker-compose-file/Makefile.local b/.ci/docker-compose-file/Makefile.local index 026cc7a1d..d11ab64a6 100644 --- a/.ci/docker-compose-file/Makefile.local +++ b/.ci/docker-compose-file/Makefile.local @@ -16,7 +16,7 @@ up: REDIS_TAG=6 \ MONGO_TAG=5 \ PGSQL_TAG=13 \ - docker-compose \ + docker compose \ -f .ci/docker-compose-file/docker-compose.yaml \ -f .ci/docker-compose-file/docker-compose-mongo-single-tcp.yaml \ -f .ci/docker-compose-file/docker-compose-mongo-single-tls.yaml \ @@ -28,10 +28,13 @@ up: -f .ci/docker-compose-file/docker-compose-redis-single-tls.yaml \ -f .ci/docker-compose-file/docker-compose-redis-sentinel-tcp.yaml \ -f .ci/docker-compose-file/docker-compose-redis-sentinel-tls.yaml \ - up -d --build + -f .ci/docker-compose-file/docker-compose-redis-cluster-tcp.yaml \ + -f .ci/docker-compose-file/docker-compose-redis-cluster-tls.yaml \ + -f .ci/docker-compose-file/docker-compose-toxiproxy.yaml \ + up -d --build --remove-orphans down: - docker-compose \ + docker compose \ -f .ci/docker-compose-file/docker-compose.yaml \ -f .ci/docker-compose-file/docker-compose-mongo-single-tcp.yaml \ -f .ci/docker-compose-file/docker-compose-mongo-single-tls.yaml \ @@ -43,7 +46,10 @@ down: -f .ci/docker-compose-file/docker-compose-redis-single-tls.yaml \ -f .ci/docker-compose-file/docker-compose-redis-sentinel-tcp.yaml \ -f .ci/docker-compose-file/docker-compose-redis-sentinel-tls.yaml \ - down + -f .ci/docker-compose-file/docker-compose-redis-cluster-tcp.yaml \ + -f .ci/docker-compose-file/docker-compose-redis-cluster-tls.yaml \ + -f .ci/docker-compose-file/docker-compose-toxiproxy.yaml \ + down --remove-orphans ct: docker exec -i "$(CONTAINER)" bash -c "rebar3 ct --name 'test@127.0.0.1' -v --suite $(SUITE)" diff --git a/.ci/docker-compose-file/docker-compose-redis-cluster-tcp.yaml b/.ci/docker-compose-file/docker-compose-redis-cluster-tcp.yaml index 997388aa5..9c03fc65e 100644 --- a/.ci/docker-compose-file/docker-compose-redis-cluster-tcp.yaml +++ b/.ci/docker-compose-file/docker-compose-redis-cluster-tcp.yaml @@ -1,9 +1,9 @@ version: '3.9' services: - redis_server: + redis_cluster: image: redis:${REDIS_TAG} - container_name: redis + container_name: redis-cluster volumes: - ./redis/:/data/conf command: bash -c "/bin/bash /data/conf/redis.sh --node cluster && tail -f /var/log/redis-server.log" diff --git a/.ci/docker-compose-file/docker-compose-redis-cluster-tls.yaml b/.ci/docker-compose-file/docker-compose-redis-cluster-tls.yaml index c5cefd9e6..bfbf1a4a3 100644 --- a/.ci/docker-compose-file/docker-compose-redis-cluster-tls.yaml +++ b/.ci/docker-compose-file/docker-compose-redis-cluster-tls.yaml @@ -1,8 +1,8 @@ version: '3.9' services: - redis_server: - container_name: redis + redis_cluster_tls: + container_name: redis-cluster-tls image: redis:${REDIS_TAG} volumes: - ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.crt diff --git a/.ci/docker-compose-file/redis/.gitignore b/.ci/docker-compose-file/redis/.gitignore index b5947692d..23ffe8469 100644 --- a/.ci/docker-compose-file/redis/.gitignore +++ b/.ci/docker-compose-file/redis/.gitignore @@ -1,3 +1,3 @@ -r7000i.log -r7001i.log -r7002i.log +r700?i.log +nodes.700?.conf +*.rdb diff --git a/.ci/docker-compose-file/redis/redis-tls.conf b/.ci/docker-compose-file/redis/redis-tls.conf index e304c814f..c503dc2e8 100644 --- a/.ci/docker-compose-file/redis/redis-tls.conf +++ b/.ci/docker-compose-file/redis/redis-tls.conf @@ -1,11 +1,12 @@ daemonize yes bind 0.0.0.0 :: logfile /var/log/redis-server.log +protected-mode no +requirepass public +masterauth public + tls-cert-file /etc/certs/redis.crt tls-key-file /etc/certs/redis.key tls-ca-cert-file /etc/certs/ca.crt tls-replication yes tls-cluster yes -protected-mode no -requirepass public -masterauth public diff --git a/.ci/docker-compose-file/redis/redis.conf b/.ci/docker-compose-file/redis/redis.conf index 6181925db..484d9abf9 100644 --- a/.ci/docker-compose-file/redis/redis.conf +++ b/.ci/docker-compose-file/redis/redis.conf @@ -1,5 +1,6 @@ daemonize yes bind 0.0.0.0 :: logfile /var/log/redis-server.log +protected-mode no requirepass public masterauth public diff --git a/.ci/docker-compose-file/redis/redis.sh b/.ci/docker-compose-file/redis/redis.sh index b7cf62a60..be6462249 100755 --- a/.ci/docker-compose-file/redis/redis.sh +++ b/.ci/docker-compose-file/redis/redis.sh @@ -16,13 +16,8 @@ case $key in shift # past argument shift # past value ;; - -t) - tls="$2" - shift # past argument - shift # past value - ;; --tls-enabled) - tls=1 + tls=true shift # past argument ;; *) @@ -37,69 +32,71 @@ rm -f \ /data/conf/r7002i.log \ /data/conf/nodes.7000.conf \ /data/conf/nodes.7001.conf \ - /data/conf/nodes.7002.conf ; + /data/conf/nodes.7002.conf -if [ "${node}" = "cluster" ] ; then - if $tls ; then +if [ "$node" = "cluster" ]; then + if $tls; then redis-server /data/conf/redis-tls.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf \ - --tls-port 8000 --cluster-enabled yes ; + --tls-port 8000 --cluster-enabled yes redis-server /data/conf/redis-tls.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf \ - --tls-port 8001 --cluster-enabled yes; + --tls-port 8001 --cluster-enabled yes redis-server /data/conf/redis-tls.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf \ - --tls-port 8002 --cluster-enabled yes; + --tls-port 8002 --cluster-enabled yes else - redis-server /data/conf/redis.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf --cluster-enabled yes; - redis-server /data/conf/redis.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf --cluster-enabled yes; - redis-server /data/conf/redis.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf --cluster-enabled yes; + redis-server /data/conf/redis.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf \ + --cluster-enabled yes + redis-server /data/conf/redis.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf \ + --cluster-enabled yes + redis-server /data/conf/redis.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf \ + --cluster-enabled yes fi -elif [ "${node}" = "sentinel" ] ; then - if $tls ; then +elif [ "$node" = "sentinel" ]; then + if $tls; then redis-server /data/conf/redis-tls.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf \ - --tls-port 8000 --cluster-enabled no; + --tls-port 8000 --cluster-enabled no redis-server /data/conf/redis-tls.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf \ - --tls-port 8001 --cluster-enabled no --slaveof "$LOCAL_IP" 8000; + --tls-port 8001 --cluster-enabled no --slaveof "$LOCAL_IP" 8000 redis-server /data/conf/redis-tls.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf \ - --tls-port 8002 --cluster-enabled no --slaveof "$LOCAL_IP" 8000; + --tls-port 8002 --cluster-enabled no --slaveof "$LOCAL_IP" 8000 else redis-server /data/conf/redis.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf \ - --cluster-enabled no; + --cluster-enabled no redis-server /data/conf/redis.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf \ - --cluster-enabled no --slaveof "$LOCAL_IP" 7000; + --cluster-enabled no --slaveof "$LOCAL_IP" 7000 redis-server /data/conf/redis.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf \ - --cluster-enabled no --slaveof "$LOCAL_IP" 7000; + --cluster-enabled no --slaveof "$LOCAL_IP" 7000 fi fi -REDIS_LOAD_FLG=true; + +REDIS_LOAD_FLG=true while $REDIS_LOAD_FLG; do - sleep 1; - redis-cli --pass public --no-auth-warning -p 7000 info 1> /data/conf/r7000i.log 2> /dev/null; - if [ -s /data/conf/r7000i.log ]; then - : - else - continue; + sleep 1 + redis-cli --pass public --no-auth-warning -p 7000 info 1> /data/conf/r7000i.log 2> /dev/null + if ! [ -s /data/conf/r7000i.log ]; then + continue fi - redis-cli --pass public --no-auth-warning -p 7001 info 1> /data/conf/r7001i.log 2> /dev/null; - if [ -s /data/conf/r7001i.log ]; then - : - else - continue; + redis-cli --pass public --no-auth-warning -p 7001 info 1> /data/conf/r7001i.log 2> /dev/null + if ! [ -s /data/conf/r7001i.log ]; then + continue fi redis-cli --pass public --no-auth-warning -p 7002 info 1> /data/conf/r7002i.log 2> /dev/null; - if [ -s /data/conf/r7002i.log ]; then - : - else - continue; + if ! [ -s /data/conf/r7002i.log ]; then + continue fi - if [ "${node}" = "cluster" ] ; then - if $tls ; then - yes "yes" | redis-cli --cluster create "$LOCAL_IP:8000" "$LOCAL_IP:8001" "$LOCAL_IP:8002" --pass public --no-auth-warning --tls true --cacert /etc/certs/ca.crt --cert /etc/certs/redis.crt --key /etc/certs/redis.key; + if [ "$node" = "cluster" ] ; then + if $tls; then + yes "yes" | redis-cli --cluster create "$LOCAL_IP:8000" "$LOCAL_IP:8001" "$LOCAL_IP:8002" \ + --pass public --no-auth-warning \ + --tls true --cacert /etc/certs/ca.crt \ + --cert /etc/certs/redis.crt --key /etc/certs/redis.key else - yes "yes" | redis-cli --cluster create "$LOCAL_IP:7000" "$LOCAL_IP:7001" "$LOCAL_IP:7002" --pass public --no-auth-warning; + yes "yes" | redis-cli --cluster create "$LOCAL_IP:7000" "$LOCAL_IP:7001" "$LOCAL_IP:7002" \ + --pass public --no-auth-warning fi - elif [ "${node}" = "sentinel" ] ; then + elif [ "$node" = "sentinel" ]; then tee /_sentinel.conf>/dev/null << EOF port 26379 bind 0.0.0.0 :: @@ -107,7 +104,7 @@ daemonize yes logfile /var/log/redis-server.log dir /tmp EOF - if $tls ; then + if $tls; then cat >>/_sentinel.conf<> $GITHUB_ENV + echo "EMQX_NAME=${{ matrix.profile[0] }}" >> $GITHUB_ENV echo "CODE_PATH=$GITHUB_WORKSPACE" >> $GITHUB_ENV - name: Work around https://github.com/actions/checkout/issues/766 run: | @@ -75,14 +72,14 @@ jobs: ./scripts/pkg-tests.sh ${EMQX_NAME}-elixir-pkg - uses: actions/upload-artifact@v3 with: - name: ${{ matrix.profile}}-${{ matrix.otp }}-${{ matrix.os }} - path: _packages/${{ matrix.profile}}/* + name: "${{ matrix.profile[0] }}-${{ matrix.otp }}-${{ matrix.profile[1] }}" + path: _packages/${{ matrix.profile[0] }}/* - uses: actions/upload-artifact@v3 with: - name: "${{ matrix.profile }}_schema_dump" + name: "${{ matrix.profile[0] }}_schema_dump" path: | scripts/spellcheck - _build/${{ matrix.profile }}/lib/emqx_dashboard/priv/www/static/schema.json + _build/${{ matrix.profile[0] }}/lib/emqx_dashboard/priv/www/static/schema.json windows: runs-on: windows-2019 diff --git a/.github/workflows/check_deps_integrity.yaml b/.github/workflows/check_deps_integrity.yaml index c5c509f0c..2578e078f 100644 --- a/.github/workflows/check_deps_integrity.yaml +++ b/.github/workflows/check_deps_integrity.yaml @@ -5,7 +5,7 @@ on: [pull_request, push] jobs: check_deps_integrity: runs-on: ubuntu-20.04 - container: ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04 + container: ghcr.io/emqx/emqx-builder/5.0-24:1.13.4-24.3.4.2-1-ubuntu20.04 steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/code_style_check.yaml b/.github/workflows/code_style_check.yaml index bc15d696f..910bb8835 100644 --- a/.github/workflows/code_style_check.yaml +++ b/.github/workflows/code_style_check.yaml @@ -5,7 +5,7 @@ on: [pull_request] jobs: code_style_check: runs-on: ubuntu-20.04 - container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04" + container: "ghcr.io/emqx/emqx-builder/5.0-24:1.13.4-24.3.4.2-1-ubuntu20.04" steps: - uses: actions/checkout@v3 with: diff --git a/.github/workflows/elixir_apps_check.yaml b/.github/workflows/elixir_apps_check.yaml index 8dc9e54cd..e4b93cf80 100644 --- a/.github/workflows/elixir_apps_check.yaml +++ b/.github/workflows/elixir_apps_check.yaml @@ -8,7 +8,7 @@ jobs: elixir_apps_check: runs-on: ubuntu-latest # just use the latest builder - container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04" + container: "ghcr.io/emqx/emqx-builder/5.0-24:1.13.4-24.3.4.2-1-ubuntu20.04" strategy: fail-fast: false diff --git a/.github/workflows/elixir_deps_check.yaml b/.github/workflows/elixir_deps_check.yaml index 210eda570..9f1d6f3e4 100644 --- a/.github/workflows/elixir_deps_check.yaml +++ b/.github/workflows/elixir_deps_check.yaml @@ -7,7 +7,7 @@ on: [pull_request, push] jobs: elixir_deps_check: runs-on: ubuntu-20.04 - container: ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04 + container: ghcr.io/emqx/emqx-builder/5.0-24:1.13.4-24.3.4.2-1-ubuntu20.04 steps: - name: Checkout diff --git a/.github/workflows/elixir_release.yml b/.github/workflows/elixir_release.yml index b93e6a675..0ac1e9155 100644 --- a/.github/workflows/elixir_release.yml +++ b/.github/workflows/elixir_release.yml @@ -17,7 +17,8 @@ jobs: profile: - emqx - emqx-enterprise - container: ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04 + container: ghcr.io/emqx/emqx-builder/5.0-24:1.13.4-24.3.4.2-1-ubuntu20.04 + steps: - name: Checkout uses: actions/checkout@v3 diff --git a/.github/workflows/run_emqx_app_tests.yaml b/.github/workflows/run_emqx_app_tests.yaml index 78f35bc67..71b016761 100644 --- a/.github/workflows/run_emqx_app_tests.yaml +++ b/.github/workflows/run_emqx_app_tests.yaml @@ -34,7 +34,7 @@ jobs: use-self-hosted: false runs-on: ${{ matrix.runs-on }} - container: "ghcr.io/emqx/emqx-builder/5.0-18:${{ matrix.elixir}}-${{ matrix.otp }}-${{ matrix.os }}" + container: "ghcr.io/emqx/emqx-builder/5.0-24:${{ matrix.elixir}}-${{ matrix.otp }}-${{ matrix.os }}" defaults: run: diff --git a/.github/workflows/run_fvt_tests.yaml b/.github/workflows/run_fvt_tests.yaml index 0464b5e50..8870b8257 100644 --- a/.github/workflows/run_fvt_tests.yaml +++ b/.github/workflows/run_fvt_tests.yaml @@ -16,7 +16,7 @@ jobs: prepare: runs-on: ubuntu-20.04 # prepare source with any OTP version, no need for a matrix - container: ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-alpine3.15.1 + container: ghcr.io/emqx/emqx-builder/5.0-24:1.13.4-24.3.4.2-1-alpine3.15.1 steps: - uses: actions/checkout@v3 @@ -68,7 +68,7 @@ jobs: - name: make docker image working-directory: source env: - EMQX_BUILDER: ghcr.io/emqx/emqx-builder/5.0-18:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }} + EMQX_BUILDER: ghcr.io/emqx/emqx-builder/5.0-24:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }} EMQX_RUNNER: ${{ matrix.os[1] }} run: | make ${{ matrix.profile }}-docker @@ -141,7 +141,7 @@ jobs: - name: make docker image working-directory: source env: - EMQX_BUILDER: ghcr.io/emqx/emqx-builder/5.0-18:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }} + EMQX_BUILDER: ghcr.io/emqx/emqx-builder/5.0-24:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }} EMQX_RUNNER: ${{ matrix.os[1] }} run: | make ${{ matrix.profile }}-docker diff --git a/.github/workflows/run_relup_tests.yaml b/.github/workflows/run_relup_tests.yaml index ea0dff9e9..ac2da6f85 100644 --- a/.github/workflows/run_relup_tests.yaml +++ b/.github/workflows/run_relup_tests.yaml @@ -16,7 +16,7 @@ on: jobs: relup_test_plan: runs-on: ubuntu-20.04 - container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04" + container: "ghcr.io/emqx/emqx-builder/5.0-24:1.13.4-24.3.4.2-1-ubuntu20.04" outputs: CUR_EE_VSN: ${{ steps.find-versions.outputs.CUR_EE_VSN }} OLD_VERSIONS: ${{ steps.find-versions.outputs.OLD_VERSIONS }} diff --git a/.github/workflows/run_test_cases.yaml b/.github/workflows/run_test_cases.yaml index c30de16b1..a21f130d4 100644 --- a/.github/workflows/run_test_cases.yaml +++ b/.github/workflows/run_test_cases.yaml @@ -17,7 +17,7 @@ jobs: prepare: runs-on: aws-amd64 # prepare source with any OTP version, no need for a matrix - container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04" + container: "ghcr.io/emqx/emqx-builder/5.0-24:1.13.4-24.3.4.2-1-ubuntu20.04" outputs: fast_ct_apps: ${{ steps.find_ct_apps.outputs.fast_ct_apps }} docker_ct_apps: ${{ steps.find_ct_apps.outputs.docker_ct_apps }} @@ -104,7 +104,7 @@ jobs: defaults: run: shell: bash - container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04" + container: "ghcr.io/emqx/emqx-builder/5.0-24:1.13.4-24.3.4.2-1-ubuntu20.04" steps: - uses: AutoModality/action-clean@v1 @@ -198,9 +198,6 @@ jobs: fail-fast: false matrix: app: ${{ fromJson(needs.prepare.outputs.fast_ct_apps) }} - profile: - - emqx - - emqx-enterprise runs-on: - aws-amd64 - ubuntu-20.04 @@ -213,7 +210,7 @@ jobs: use-self-hosted: false runs-on: ${{ matrix.runs-on }} - container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04" + container: "ghcr.io/emqx/emqx-builder/5.0-24:1.13.4-24.3.4.2-1-ubuntu20.04" defaults: run: shell: bash @@ -252,7 +249,7 @@ jobs: - ct - ct_docker runs-on: ubuntu-20.04 - container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04" + container: "ghcr.io/emqx/emqx-builder/5.0-24:1.13.4-24.3.4.2-1-ubuntu20.04" steps: - uses: AutoModality/action-clean@v1 - uses: actions/download-artifact@v3 diff --git a/Makefile b/Makefile index f9310e636..d6d913a11 100644 --- a/Makefile +++ b/Makefile @@ -6,8 +6,8 @@ export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-d export EMQX_DEFAULT_RUNNER = debian:11-slim export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh) export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh) -export EMQX_DASHBOARD_VERSION ?= v1.1.3-sync-code -export EMQX_EE_DASHBOARD_VERSION ?= e1.0.1-beta.5 +export EMQX_DASHBOARD_VERSION ?= v1.1.3 +export EMQX_EE_DASHBOARD_VERSION ?= e1.0.1-beta.7 export EMQX_REL_FORM ?= tgz export QUICER_DOWNLOAD_FROM_RELEASE = 1 ifeq ($(OS),Windows_NT) diff --git a/apps/emqx/include/emqx_release.hrl b/apps/emqx/include/emqx_release.hrl index f17a8d3f2..9f03ee307 100644 --- a/apps/emqx/include/emqx_release.hrl +++ b/apps/emqx/include/emqx_release.hrl @@ -32,10 +32,10 @@ %% `apps/emqx/src/bpapi/README.md' %% Community edition --define(EMQX_RELEASE_CE, "5.0.11"). +-define(EMQX_RELEASE_CE, "5.0.12"). %% Enterprise edition --define(EMQX_RELEASE_EE, "5.0.0-beta.5"). +-define(EMQX_RELEASE_EE, "5.0.0-beta.6"). %% the HTTP API version -define(EMQX_API_VERSION, "5.0"). diff --git a/apps/emqx/priv/bpapi.versions b/apps/emqx/priv/bpapi.versions index 9997055dc..1a1bac140 100644 --- a/apps/emqx/priv/bpapi.versions +++ b/apps/emqx/priv/bpapi.versions @@ -27,6 +27,7 @@ {emqx_prometheus,1}. {emqx_resource,1}. {emqx_retainer,1}. +{emqx_retainer,2}. {emqx_rule_engine,1}. {emqx_shared_sub,1}. {emqx_slow_subs,1}. diff --git a/apps/emqx/rebar.config b/apps/emqx/rebar.config index d13fda30a..ff27566ec 100644 --- a/apps/emqx/rebar.config +++ b/apps/emqx/rebar.config @@ -27,9 +27,9 @@ {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}}, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}}, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.4"}}}, - {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.6"}}}, + {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.7"}}}, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}, - {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.31.2"}}}, + {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.32.0"}}}, {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}}, {recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}}, {snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.0"}}} diff --git a/apps/emqx/src/emqx.app.src b/apps/emqx/src/emqx.app.src index e687e9905..c281b11cc 100644 --- a/apps/emqx/src/emqx.app.src +++ b/apps/emqx/src/emqx.app.src @@ -3,7 +3,7 @@ {id, "emqx"}, {description, "EMQX Core"}, % strict semver, bump manually! - {vsn, "5.0.12"}, + {vsn, "5.0.13"}, {modules, []}, {registered, []}, {applications, [ diff --git a/apps/emqx/src/emqx_access_control.erl b/apps/emqx/src/emqx_access_control.erl index 30d56f257..3fa781e6d 100644 --- a/apps/emqx/src/emqx_access_control.erl +++ b/apps/emqx/src/emqx_access_control.erl @@ -46,16 +46,32 @@ authenticate(Credential) -> NotSuperUser = #{is_superuser => false}, case emqx_authentication:pre_hook_authenticate(Credential) of ok -> + inc_authn_metrics(anonymous), {ok, NotSuperUser}; continue -> - case run_hooks('client.authenticate', [Credential], {ok, #{is_superuser => false}}) of - ok -> + case run_hooks('client.authenticate', [Credential], ignore) of + ignore -> + inc_authn_metrics(anonymous), {ok, NotSuperUser}; + ok -> + inc_authn_metrics(ok), + {ok, NotSuperUser}; + {ok, _AuthResult} = OkResult -> + inc_authn_metrics(ok), + OkResult; + {ok, _AuthResult, _AuthData} = OkResult -> + inc_authn_metrics(ok), + OkResult; + {error, _Reason} = Error -> + inc_authn_metrics(error), + Error; + %% {continue, AuthCache} | {continue, AuthData, AuthCache} Other -> Other end; - Other -> - Other + {error, _Reason} = Error -> + inc_authn_metrics(error), + Error end. %% @doc Check Authorization @@ -134,3 +150,11 @@ inc_authz_metrics(deny) -> emqx_metrics:inc('authorization.deny'); inc_authz_metrics(cache_hit) -> emqx_metrics:inc('authorization.cache_hit'). + +inc_authn_metrics(error) -> + emqx_metrics:inc('authentication.failure'); +inc_authn_metrics(ok) -> + emqx_metrics:inc('authentication.success'); +inc_authn_metrics(anonymous) -> + emqx_metrics:inc('authentication.success.anonymous'), + emqx_metrics:inc('authentication.success'). diff --git a/apps/emqx/src/emqx_authentication.erl b/apps/emqx/src/emqx_authentication.erl index 749f5bfd7..ffce81787 100644 --- a/apps/emqx/src/emqx_authentication.erl +++ b/apps/emqx/src/emqx_authentication.erl @@ -228,7 +228,6 @@ when -spec pre_hook_authenticate(emqx_types:clientinfo()) -> ok | continue | {error, not_authorized}. pre_hook_authenticate(#{enable_authn := false}) -> - inc_authenticate_metric('authentication.success.anonymous'), ?TRACE_RESULT("authentication_result", ok, enable_authn_false); pre_hook_authenticate(#{enable_authn := quick_deny_anonymous} = Credential) -> case maps:get(username, Credential, undefined) of @@ -242,29 +241,18 @@ pre_hook_authenticate(#{enable_authn := quick_deny_anonymous} = Credential) -> pre_hook_authenticate(_) -> continue. -authenticate(#{listener := Listener, protocol := Protocol} = Credential, _AuthResult) -> +authenticate(#{listener := Listener, protocol := Protocol} = Credential, AuthResult) -> case get_authenticators(Listener, global_chain(Protocol)) of {ok, ChainName, Authenticators} -> case get_enabled(Authenticators) of [] -> - inc_authenticate_metric('authentication.success.anonymous'), - ?TRACE_RESULT("authentication_result", ignore, empty_chain); + ?TRACE_RESULT("authentication_result", AuthResult, empty_chain); NAuthenticators -> Result = do_authenticate(ChainName, NAuthenticators, Credential), - - case Result of - {stop, {ok, _}} -> - inc_authenticate_metric('authentication.success'); - {stop, {error, _}} -> - inc_authenticate_metric('authentication.failure'); - _ -> - ok - end, ?TRACE_RESULT("authentication_result", Result, chain_result) end; none -> - inc_authenticate_metric('authentication.success.anonymous'), - ?TRACE_RESULT("authentication_result", ignore, no_chain) + ?TRACE_RESULT("authentication_result", AuthResult, no_chain) end. get_authenticators(Listener, Global) -> @@ -649,7 +637,7 @@ handle_create_authenticator(Chain, Config, Providers) -> end. do_authenticate(_ChainName, [], _) -> - {stop, {error, not_authorized}}; + {ok, {error, not_authorized}}; do_authenticate( ChainName, [#authenticator{id = ID} = Authenticator | More], Credential ) -> @@ -673,7 +661,7 @@ do_authenticate( _ -> ok end, - {stop, Result} + {ok, Result} catch Class:Reason:Stacktrace -> ?TRACE_AUTHN(warning, "authenticator_error", #{ @@ -947,9 +935,3 @@ to_list(M) when is_map(M) -> [M]; to_list(L) when is_list(L) -> L. call(Call) -> gen_server:call(?MODULE, Call, infinity). - -inc_authenticate_metric('authentication.success.anonymous' = Metric) -> - emqx_metrics:inc(Metric), - emqx_metrics:inc('authentication.success'); -inc_authenticate_metric(Metric) -> - emqx_metrics:inc(Metric). diff --git a/apps/emqx/src/emqx_config.erl b/apps/emqx/src/emqx_config.erl index fa1c63868..16d9f31a7 100644 --- a/apps/emqx/src/emqx_config.erl +++ b/apps/emqx/src/emqx_config.erl @@ -402,6 +402,7 @@ merge_envs(SchemaMod, RawConf) -> required => false, format => map, apply_override_envs => true, + remove_env_meta => true, check_lazy => true }, hocon_tconf:merge_env_overrides(SchemaMod, RawConf, all, Opts). @@ -575,10 +576,10 @@ load_hocon_file(FileName, LoadType) -> end. do_get_raw(Path) -> - hocon_tconf:remove_env_meta(do_get(?RAW_CONF, Path)). + do_get(?RAW_CONF, Path). do_get_raw(Path, Default) -> - hocon_tconf:remove_env_meta(do_get(?RAW_CONF, Path, Default)). + do_get(?RAW_CONF, Path, Default). do_get(Type, KeyPath) -> Ref = make_ref(), diff --git a/apps/emqx/src/emqx_listeners.erl b/apps/emqx/src/emqx_listeners.erl index a0f2b1e7d..4a6ea2046 100644 --- a/apps/emqx/src/emqx_listeners.erl +++ b/apps/emqx/src/emqx_listeners.erl @@ -199,6 +199,7 @@ start_listener(Type, ListenerName, #{bind := Bind} = Conf) -> Reason =:= listener_disabled; Reason =:= quic_app_missing -> + ?tp(listener_not_started, #{type => Type, bind => Bind, status => {skipped, Reason}}), console_print( "Listener ~ts is NOT started due to: ~p.~n", [listener_id(Type, ListenerName), Reason] @@ -212,8 +213,12 @@ start_listener(Type, ListenerName, #{bind := Bind} = Conf) -> ), ok; {error, {already_started, Pid}} -> + ?tp(listener_not_started, #{ + type => Type, bind => Bind, status => {already_started, Pid} + }), {error, {already_started, Pid}}; {error, Reason} -> + ?tp(listener_not_started, #{type => Type, bind => Bind, status => {error, Reason}}), ListenerId = listener_id(Type, ListenerName), BindStr = format_bind(Bind), ?ELOG( diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl index f2358aa32..171b6dc42 100644 --- a/apps/emqx/src/emqx_schema.erl +++ b/apps/emqx/src/emqx_schema.erl @@ -1942,7 +1942,6 @@ common_ssl_opts_schema(Defaults) -> ]. %% @doc Make schema for SSL listener options. -%% When it's for ranch listener, an extra field `handshake_timeout' is added. -spec server_ssl_opts_schema(map(), boolean()) -> hocon_schema:field_schema(). server_ssl_opts_schema(Defaults, IsRanchListener) -> D = fun(Field) -> maps:get(to_atom(Field), Defaults, undefined) end, @@ -1981,26 +1980,23 @@ server_ssl_opts_schema(Defaults, IsRanchListener) -> default => Df("client_renegotiation", true), desc => ?DESC(server_ssl_opts_schema_client_renegotiation) } + )}, + {"handshake_timeout", + sc( + duration(), + #{ + default => Df("handshake_timeout", "15s"), + desc => ?DESC(server_ssl_opts_schema_handshake_timeout) + } )} - | [ - {"handshake_timeout", - sc( - duration(), - #{ - default => Df("handshake_timeout", "15s"), - desc => ?DESC(server_ssl_opts_schema_handshake_timeout) - } - )} - || IsRanchListener - ] ++ - [ - {"gc_after_handshake", - sc(boolean(), #{ - default => false, - desc => ?DESC(server_ssl_opts_schema_gc_after_handshake) - })} - || not IsRanchListener - ] + ] ++ + [ + {"gc_after_handshake", + sc(boolean(), #{ + default => false, + desc => ?DESC(server_ssl_opts_schema_gc_after_handshake) + })} + || not IsRanchListener ]. %% @doc Make schema for SSL client. diff --git a/apps/emqx/src/emqx_tls_lib.erl b/apps/emqx/src/emqx_tls_lib.erl index 4bc18f1e0..85c4396ab 100644 --- a/apps/emqx/src/emqx_tls_lib.erl +++ b/apps/emqx/src/emqx_tls_lib.erl @@ -166,7 +166,20 @@ all_ciphers(['tlsv1.3']) -> all_ciphers(Versions) -> %% assert non-empty List = lists:append([ssl:cipher_suites(all, V, openssl) || V <- Versions]), - [_ | _] = dedup(List). + + %% Some PSK ciphers are both supported by OpenSSL and Erlang, but they need manual add here. + %% Found by this cmd + %% openssl ciphers -v|grep ^PSK| awk '{print $1}'| sed "s/^/\"/;s/$/\"/" | tr "\n" "," + %% Then remove the ciphers that aren't supported by Erlang + PSK = [ + "PSK-AES256-GCM-SHA384", + "PSK-AES128-GCM-SHA256", + "PSK-AES256-CBC-SHA384", + "PSK-AES256-CBC-SHA", + "PSK-AES128-CBC-SHA256", + "PSK-AES128-CBC-SHA" + ], + [_ | _] = dedup(List ++ PSK). %% @doc All Pre-selected TLS ciphers. default_ciphers() -> diff --git a/apps/emqx/test/emqx_authentication_SUITE.erl b/apps/emqx/test/emqx_authentication_SUITE.erl index 61b4b2775..7016a8a00 100644 --- a/apps/emqx/test/emqx_authentication_SUITE.erl +++ b/apps/emqx/test/emqx_authentication_SUITE.erl @@ -22,6 +22,8 @@ -compile(export_all). -compile(nowarn_export_all). +-include_lib("emqx/include/emqx_hooks.hrl"). + -include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("typerefl/include/types.hrl"). @@ -35,6 +37,20 @@ end)() ). -define(CONF_ROOT, ?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_ATOM). +-define(NOT_SUPERUSER, #{is_superuser => false}). + +-define(assertAuthSuccessForUser(User), + ?assertMatch( + {ok, _}, + emqx_access_control:authenticate(ClientInfo#{username => atom_to_binary(User)}) + ) +). +-define(assertAuthFailureForUser(User), + ?assertMatch( + {error, _}, + emqx_access_control:authenticate(ClientInfo#{username => atom_to_binary(User)}) + ) +). %%------------------------------------------------------------------------------ %% Hocon Schema @@ -88,9 +104,22 @@ update(_Config, _State) -> authenticate(#{username := <<"good">>}, _State) -> {ok, #{is_superuser => true}}; +authenticate(#{username := <<"ignore">>}, _State) -> + ignore; authenticate(#{username := _}, _State) -> {error, bad_username_or_password}. +hook_authenticate(#{username := <<"hook_user_good">>}, _AuthResult) -> + {ok, {ok, ?NOT_SUPERUSER}}; +hook_authenticate(#{username := <<"hook_user_bad">>}, _AuthResult) -> + {ok, {error, invalid_username}}; +hook_authenticate(#{username := <<"hook_user_finally_good">>}, _AuthResult) -> + {stop, {ok, ?NOT_SUPERUSER}}; +hook_authenticate(#{username := <<"hook_user_finally_bad">>}, _AuthResult) -> + {stop, {error, invalid_username}}; +hook_authenticate(_ClientId, AuthResult) -> + {ok, AuthResult}. + destroy(_State) -> ok. @@ -113,6 +142,10 @@ end_per_testcase(Case, Config) -> _ = ?MODULE:Case({'end', Config}), ok. +%%================================================================================= +%% Testcases +%%================================================================================= + t_chain({'init', Config}) -> Config; t_chain(Config) when is_list(Config) -> @@ -500,6 +533,92 @@ t_convert_certs(Config) when is_list(Config) -> clear_certs(CertsDir, #{<<"ssl">> => NCerts3}), ?assertEqual(false, filelib:is_regular(maps:get(<<"keyfile">>, NCerts3))). +t_combine_authn_and_callback({init, Config}) -> + [ + {listener_id, 'tcp:default'}, + {authn_type, {password_based, built_in_database}} + | Config + ]; +t_combine_authn_and_callback(Config) when is_list(Config) -> + ListenerID = ?config(listener_id), + ClientInfo = #{ + zone => default, + listener => ListenerID, + protocol => mqtt, + password => <<"any">> + }, + + %% no emqx_authentication authenticators, anonymous is allowed + ?assertAuthSuccessForUser(bad), + + AuthNType = ?config(authn_type), + register_provider(AuthNType, ?MODULE), + + AuthenticatorConfig = #{ + mechanism => password_based, + backend => built_in_database, + enable => true + }, + {ok, _} = ?AUTHN:create_authenticator(ListenerID, AuthenticatorConfig), + + %% emqx_authentication alone + ?assertAuthSuccessForUser(good), + ?assertAuthFailureForUser(ignore), + ?assertAuthFailureForUser(bad), + + %% add hook with higher priority + ok = hook(?HP_AUTHN + 1), + + %% for hook unrelataed users everything is the same + ?assertAuthSuccessForUser(good), + ?assertAuthFailureForUser(ignore), + ?assertAuthFailureForUser(bad), + + %% higher-priority hook can permit access with {ok,...}, + %% then emqx_authentication overrides the result + ?assertAuthFailureForUser(hook_user_good), + ?assertAuthFailureForUser(hook_user_bad), + + %% higher-priority hook can permit and return {stop,...}, + %% then emqx_authentication cannot override the result + ?assertAuthSuccessForUser(hook_user_finally_good), + ?assertAuthFailureForUser(hook_user_finally_bad), + + ok = unhook(), + + %% add hook with lower priority + ok = hook(?HP_AUTHN - 1), + + %% for hook unrelataed users + ?assertAuthSuccessForUser(good), + ?assertAuthFailureForUser(bad), + ?assertAuthFailureForUser(ignore), + + %% lower-priority hook can overrride auth result, + %% because emqx_authentication permits/denies with {ok, ...} + ?assertAuthSuccessForUser(hook_user_good), + ?assertAuthFailureForUser(hook_user_bad), + ?assertAuthSuccessForUser(hook_user_finally_good), + ?assertAuthFailureForUser(hook_user_finally_bad), + + ok = unhook(); +t_combine_authn_and_callback({'end', Config}) -> + ?AUTHN:delete_chain(?config(listener_id)), + ?AUTHN:deregister_provider(?config(authn_type)), + ok. + +%%================================================================================= +%% Helpers fns +%%================================================================================= + +hook(Priority) -> + ok = emqx_hooks:put( + 'client.authenticate', {?MODULE, hook_authenticate, []}, Priority + ). + +unhook() -> + ok = emqx_hooks:del('client.authenticate', {?MODULE, hook_authenticate}). + update_config(Path, ConfigRequest) -> emqx:update_config(Path, ConfigRequest, #{rawconf_with_defaults => true}). diff --git a/apps/emqx/test/emqx_common_test_helpers.erl b/apps/emqx/test/emqx_common_test_helpers.erl index d640293f0..1d0d793bd 100644 --- a/apps/emqx/test/emqx_common_test_helpers.erl +++ b/apps/emqx/test/emqx_common_test_helpers.erl @@ -453,7 +453,10 @@ is_all_tcp_servers_available(Servers) -> fun({Host, Port}) -> is_tcp_server_available(Host, Port) end, - lists:all(Fun, Servers). + case lists:partition(Fun, Servers) of + {_, []} -> true; + {_, Unavail} -> ct:print("Unavailable servers: ~p", [Unavail]) + end. -spec is_tcp_server_available( Host :: inet:socket_address() | inet:hostname(), diff --git a/apps/emqx/test/emqx_schema_tests.erl b/apps/emqx/test/emqx_schema_tests.erl index fdda9ef44..fba70e303 100644 --- a/apps/emqx/test/emqx_schema_tests.erl +++ b/apps/emqx/test/emqx_schema_tests.erl @@ -37,11 +37,11 @@ ssl_opts_dtls_test() -> ssl_opts_tls_1_3_test() -> Sc = emqx_schema:server_ssl_opts_schema(#{}, false), Checked = validate(Sc, #{<<"versions">> => [<<"tlsv1.3">>]}), - ?assertNot(maps:is_key(handshake_timeout, Checked)), ?assertMatch( #{ versions := ['tlsv1.3'], - ciphers := [] + ciphers := [], + handshake_timeout := _ }, Checked ). diff --git a/apps/emqx/test/emqx_trace_SUITE.erl b/apps/emqx/test/emqx_trace_SUITE.erl index 0c55687d0..38459c16a 100644 --- a/apps/emqx/test/emqx_trace_SUITE.erl +++ b/apps/emqx/test/emqx_trace_SUITE.erl @@ -43,6 +43,9 @@ init_per_suite(Config) -> timer:seconds(100) ), fun(Trace) -> + ct:pal("listener start statuses: ~p", [ + ?of_kind([listener_started, listener_not_started], Trace) + ]), %% more than one listener ?assertMatch([_ | _], ?of_kind(listener_started, Trace)) end diff --git a/apps/emqx_authn/src/emqx_authn.erl b/apps/emqx_authn/src/emqx_authn.erl index 1f986e016..c7f9d6fdf 100644 --- a/apps/emqx_authn/src/emqx_authn.erl +++ b/apps/emqx_authn/src/emqx_authn.erl @@ -39,12 +39,15 @@ providers() -> {{scram, built_in_database}, emqx_enhanced_authn_scram_mnesia} ]. -check_configs(C) when is_map(C) -> - check_configs([C]); -check_configs([]) -> +check_configs(CM) when is_map(CM) -> + check_configs([CM]); +check_configs(CL) -> + check_configs(CL, 1). + +check_configs([], _Nth) -> []; -check_configs([Config | Configs]) -> - [check_config(Config) | check_configs(Configs)]. +check_configs([Config | Configs], Nth) -> + [check_config(Config, #{id_for_log => Nth}) | check_configs(Configs, Nth + 1)]. check_config(Config) -> check_config(Config, #{}). @@ -55,15 +58,16 @@ check_config(Config, Opts) -> #{?CONF_NS_BINARY := WithDefaults} -> WithDefaults end. -do_check_config(#{<<"mechanism">> := Mec} = Config, Opts) -> +do_check_config(#{<<"mechanism">> := Mec0} = Config, Opts) -> + Mec = atom(Mec0, #{error => unknown_mechanism}), Key = case maps:get(<<"backend">>, Config, false) of - false -> atom(Mec); - Backend -> {atom(Mec), atom(Backend)} + false -> Mec; + Backend -> {Mec, atom(Backend, #{error => unknown_backend})} end, case lists:keyfind(Key, 1, providers()) of false -> - throw({unknown_handler, Key}); + throw(#{error => unknown_authn_provider, which => Key}); {_, ProviderModule} -> hocon_tconf:check_plain( ProviderModule, @@ -71,15 +75,22 @@ do_check_config(#{<<"mechanism">> := Mec} = Config, Opts) -> Opts#{atom_key => true} ) end; -do_check_config(_Config, _Opts) -> - throw({invalid_config, "mechanism_field_required"}). +do_check_config(Config, Opts) when is_map(Config) -> + throw(#{ + error => invalid_config, + which => maps:get(id_for_log, Opts, unknown), + reason => "mechanism_field_required" + }). -atom(Bin) -> +%% The atoms have to be loaded already, +%% which might be an issue for plugins which are loaded after node boot +%% but they should really manage their own configs in that case. +atom(Bin, ErrorContext) -> try binary_to_existing_atom(Bin, utf8) catch _:_ -> - throw({unknown_auth_provider, Bin}) + throw(ErrorContext#{value => Bin}) end. -spec get_enabled_authns() -> diff --git a/apps/emqx_authz/test/emqx_authz_api_sources_SUITE.erl b/apps/emqx_authz/test/emqx_authz_api_sources_SUITE.erl index e26ad9839..a8ae38444 100644 --- a/apps/emqx_authz/test/emqx_authz_api_sources_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_api_sources_SUITE.erl @@ -80,7 +80,6 @@ <<"servers">> => <>, <<"redis_type">> => <<"cluster">>, <<"pool_size">> => 1, - <<"database">> => 0, <<"password">> => <<"ee">>, <<"auto_reconnect">> => true, <<"ssl">> => #{<<"enable">> => false}, diff --git a/apps/emqx_bridge/include/emqx_bridge.hrl b/apps/emqx_bridge/include/emqx_bridge.hrl index 6bc80f9cc..ab0e895fa 100644 --- a/apps/emqx_bridge/include/emqx_bridge.hrl +++ b/apps/emqx_bridge/include/emqx_bridge.hrl @@ -1,3 +1,19 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2020-2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + -define(EMPTY_METRICS, ?METRICS( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 diff --git a/apps/emqx_bridge/src/emqx_bridge.app.src b/apps/emqx_bridge/src/emqx_bridge.app.src index 3cc858665..86ab01a97 100644 --- a/apps/emqx_bridge/src/emqx_bridge.app.src +++ b/apps/emqx_bridge/src/emqx_bridge.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_bridge, [ {description, "EMQX bridges"}, - {vsn, "0.1.6"}, + {vsn, "0.1.7"}, {registered, []}, {mod, {emqx_bridge_app, []}}, {applications, [ diff --git a/apps/emqx_bridge/src/emqx_bridge.erl b/apps/emqx_bridge/src/emqx_bridge.erl index 321f8a2ae..30387eb8a 100644 --- a/apps/emqx_bridge/src/emqx_bridge.erl +++ b/apps/emqx_bridge/src/emqx_bridge.erl @@ -53,7 +53,10 @@ T == mysql; T == gcp_pubsub; T == influxdb_api_v1; - T == influxdb_api_v2 + T == influxdb_api_v2; + T == redis_single; + T == redis_sentinel; + T == redis_cluster ). load() -> @@ -135,6 +138,7 @@ on_message_publish(Message = #message{topic = Topic, flags = Flags}) -> {ok, Message}. send_to_matched_egress_bridges(Topic, Msg) -> + MatchedBridgeIds = get_matched_egress_bridges(Topic), lists:foreach( fun(Id) -> try send_message(Id, Msg) of @@ -157,7 +161,7 @@ send_to_matched_egress_bridges(Topic, Msg) -> }) end end, - get_matched_bridges(Topic) + MatchedBridgeIds ). send_message(BridgeId, Message) -> @@ -242,6 +246,12 @@ disable_enable(Action, BridgeType, BridgeName) when ). create(BridgeType, BridgeName, RawConf) -> + ?SLOG(debug, #{ + brige_action => create, + bridge_type => BridgeType, + bridge_name => BridgeName, + bridge_raw_config => RawConf + }), emqx_conf:update( emqx_bridge:config_key_path() ++ [BridgeType, BridgeName], RawConf, @@ -249,6 +259,11 @@ create(BridgeType, BridgeName, RawConf) -> ). remove(BridgeType, BridgeName) -> + ?SLOG(debug, #{ + brige_action => remove, + bridge_type => BridgeType, + bridge_name => BridgeName + }), emqx_conf:remove( emqx_bridge:config_key_path() ++ [BridgeType, BridgeName], #{override_to => cluster} @@ -324,13 +339,19 @@ flatten_confs(Conf0) -> do_flatten_confs(Type, Conf0) -> [{{Type, Name}, Conf} || {Name, Conf} <- maps:to_list(Conf0)]. -get_matched_bridges(Topic) -> +get_matched_egress_bridges(Topic) -> Bridges = emqx:get_config([bridges], #{}), maps:fold( fun(BType, Conf, Acc0) -> maps:fold( - fun(BName, BConf, Acc1) -> - get_matched_bridge_id(BType, BConf, Topic, BName, Acc1) + fun + (BName, #{egress := _} = BConf, Acc1) when BType =:= mqtt -> + get_matched_bridge_id(BType, BConf, Topic, BName, Acc1); + (_BName, #{ingress := _}, Acc1) when BType =:= mqtt -> + %% ignore ingress only bridge + Acc1; + (BName, BConf, Acc1) -> + get_matched_bridge_id(BType, BConf, Topic, BName, Acc1) end, Acc0, Conf diff --git a/apps/emqx_bridge/src/emqx_bridge_resource.erl b/apps/emqx_bridge/src/emqx_bridge_resource.erl index ad35485ed..ef5dd6093 100644 --- a/apps/emqx_bridge/src/emqx_bridge_resource.erl +++ b/apps/emqx_bridge/src/emqx_bridge_resource.erl @@ -298,8 +298,8 @@ parse_confs(Type, Name, Conf) when ?IS_BI_DIR_BRIDGE(Type) -> %% For some drivers that can be used as data-sources, we need to provide a %% hookpoint. The underlying driver will run `emqx_hooks:run/3` when it %% receives a message from the external database. - BName = bridge_id(Type, Name), - Conf#{hookpoint => <<"$bridges/", BName/binary>>, bridge_name => Name}; + BId = bridge_id(Type, Name), + Conf#{hookpoint => <<"$bridges/", BId/binary>>, bridge_name => Name}; parse_confs(_Type, _Name, Conf) -> Conf. diff --git a/apps/emqx_bridge/test/emqx_bridge_mqtt_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_mqtt_SUITE.erl index c907205f1..ec3caff7d 100644 --- a/apps/emqx_bridge/test/emqx_bridge_mqtt_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_mqtt_SUITE.erl @@ -34,6 +34,13 @@ -define(NAME_MQTT, <<"my_mqtt_bridge">>). -define(BRIDGE_NAME_INGRESS, <<"ingress_mqtt_bridge">>). -define(BRIDGE_NAME_EGRESS, <<"egress_mqtt_bridge">>). + +%% Having ingress/egress prefixs of topic names to avoid dead loop while bridging +-define(INGRESS_REMOTE_TOPIC, "ingress_remote_topic"). +-define(INGRESS_LOCAL_TOPIC, "ingress_local_topic"). +-define(EGRESS_REMOTE_TOPIC, "egress_remote_topic"). +-define(EGRESS_LOCAL_TOPIC, "egress_local_topic"). + -define(SERVER_CONF(Username), #{ <<"server">> => <<"127.0.0.1:1883">>, <<"username">> => Username, @@ -44,11 +51,11 @@ -define(INGRESS_CONF, #{ <<"remote">> => #{ - <<"topic">> => <<"remote_topic/#">>, + <<"topic">> => <>, <<"qos">> => 2 }, <<"local">> => #{ - <<"topic">> => <<"local_topic/${topic}">>, + <<"topic">> => <>, <<"qos">> => <<"${qos}">>, <<"payload">> => <<"${payload}">>, <<"retain">> => <<"${retain}">> @@ -57,10 +64,10 @@ -define(EGRESS_CONF, #{ <<"local">> => #{ - <<"topic">> => <<"local_topic/#">> + <<"topic">> => <> }, <<"remote">> => #{ - <<"topic">> => <<"remote_topic/${topic}">>, + <<"topic">> => <>, <<"payload">> => <<"${payload}">>, <<"qos">> => <<"${qos}">>, <<"retain">> => <<"${retain}">> @@ -155,8 +162,8 @@ t_mqtt_conn_bridge_ingress(_) -> BridgeIDIngress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_INGRESS), %% we now test if the bridge works as expected - RemoteTopic = <<"remote_topic/1">>, - LocalTopic = <<"local_topic/", RemoteTopic/binary>>, + RemoteTopic = <>, + LocalTopic = <>, Payload = <<"hello">>, emqx:subscribe(LocalTopic), timer:sleep(100), @@ -219,8 +226,8 @@ t_mqtt_conn_bridge_egress(_) -> } = jsx:decode(Bridge), BridgeIDEgress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS), %% we now test if the bridge works as expected - LocalTopic = <<"local_topic/1">>, - RemoteTopic = <<"remote_topic/", LocalTopic/binary>>, + LocalTopic = <>, + RemoteTopic = <>, Payload = <<"hello">>, emqx:subscribe(RemoteTopic), timer:sleep(100), @@ -264,6 +271,113 @@ t_mqtt_conn_bridge_egress(_) -> {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), ok. +t_mqtt_conn_bridge_ingress_and_egress(_) -> + User1 = <<"user1">>, + %% create an MQTT bridge, using POST + {ok, 201, Bridge} = request( + post, + uri(["bridges"]), + ?SERVER_CONF(User1)#{ + <<"type">> => ?TYPE_MQTT, + <<"name">> => ?BRIDGE_NAME_INGRESS, + <<"ingress">> => ?INGRESS_CONF + } + ), + + #{ + <<"type">> := ?TYPE_MQTT, + <<"name">> := ?BRIDGE_NAME_INGRESS + } = jsx:decode(Bridge), + BridgeIDIngress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_INGRESS), + {ok, 201, Bridge2} = request( + post, + uri(["bridges"]), + ?SERVER_CONF(User1)#{ + <<"type">> => ?TYPE_MQTT, + <<"name">> => ?BRIDGE_NAME_EGRESS, + <<"egress">> => ?EGRESS_CONF + } + ), + #{ + <<"type">> := ?TYPE_MQTT, + <<"name">> := ?BRIDGE_NAME_EGRESS + } = jsx:decode(Bridge2), + + BridgeIDEgress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS), + %% we now test if the bridge works as expected + LocalTopic = <>, + RemoteTopic = <>, + Payload = <<"hello">>, + emqx:subscribe(RemoteTopic), + + {ok, 200, BridgeStr1} = request(get, uri(["bridges", BridgeIDEgress]), []), + #{ + <<"metrics">> := #{ + <<"matched">> := CntMatched1, <<"success">> := CntSuccess1, <<"failed">> := 0 + }, + <<"node_metrics">> := + [ + #{ + <<"node">> := _, + <<"metrics">> := + #{ + <<"matched">> := NodeCntMatched1, + <<"success">> := NodeCntSuccess1, + <<"failed">> := 0 + } + } + ] + } = jsx:decode(BridgeStr1), + timer:sleep(100), + %% PUBLISH a message to the 'local' broker, as we have only one broker, + %% the remote broker is also the local one. + emqx:publish(emqx_message:make(LocalTopic, Payload)), + + %% we should receive a message on the "remote" broker, with specified topic + ?assert( + receive + {deliver, RemoteTopic, #message{payload = Payload}} -> + ct:pal("local broker got message: ~p on topic ~p", [Payload, RemoteTopic]), + true; + Msg -> + ct:pal("Msg: ~p", [Msg]), + false + after 100 -> + false + end + ), + + %% verify the metrics of the bridge + timer:sleep(1000), + {ok, 200, BridgeStr2} = request(get, uri(["bridges", BridgeIDEgress]), []), + #{ + <<"metrics">> := #{ + <<"matched">> := CntMatched2, <<"success">> := CntSuccess2, <<"failed">> := 0 + }, + <<"node_metrics">> := + [ + #{ + <<"node">> := _, + <<"metrics">> := + #{ + <<"matched">> := NodeCntMatched2, + <<"success">> := NodeCntSuccess2, + <<"failed">> := 0 + } + } + ] + } = jsx:decode(BridgeStr2), + ?assertEqual(CntMatched2, CntMatched1 + 1), + ?assertEqual(CntSuccess2, CntSuccess1 + 1), + ?assertEqual(NodeCntMatched2, NodeCntMatched1 + 1), + ?assertEqual(NodeCntSuccess2, NodeCntSuccess1 + 1), + + %% delete the bridge + {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []), + {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDIngress]), []), + {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), + ok. + t_ingress_mqtt_bridge_with_rules(_) -> {ok, 201, _} = request( post, @@ -290,8 +404,8 @@ t_ingress_mqtt_bridge_with_rules(_) -> %% we now test if the bridge works as expected - RemoteTopic = <<"remote_topic/1">>, - LocalTopic = <<"local_topic/", RemoteTopic/binary>>, + RemoteTopic = <>, + LocalTopic = <>, Payload = <<"hello">>, emqx:subscribe(LocalTopic), timer:sleep(100), @@ -400,8 +514,8 @@ t_egress_mqtt_bridge_with_rules(_) -> #{<<"id">> := RuleId} = jsx:decode(Rule), %% we now test if the bridge works as expected - LocalTopic = <<"local_topic/1">>, - RemoteTopic = <<"remote_topic/", LocalTopic/binary>>, + LocalTopic = <>, + RemoteTopic = <>, Payload = <<"hello">>, emqx:subscribe(RemoteTopic), timer:sleep(100), @@ -426,7 +540,7 @@ t_egress_mqtt_bridge_with_rules(_) -> %% PUBLISH a message to the rule. Payload2 = <<"hi">>, RuleTopic = <<"t/1">>, - RemoteTopic2 = <<"remote_topic/", RuleTopic/binary>>, + RemoteTopic2 = <>, emqx:subscribe(RemoteTopic2), timer:sleep(100), emqx:publish(emqx_message:make(RuleTopic, Payload2)), @@ -517,8 +631,8 @@ t_mqtt_conn_bridge_egress_reconnect(_) -> } = jsx:decode(Bridge), BridgeIDEgress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS), %% we now test if the bridge works as expected - LocalTopic = <<"local_topic/1">>, - RemoteTopic = <<"remote_topic/", LocalTopic/binary>>, + LocalTopic = <>, + RemoteTopic = <>, Payload0 = <<"hello">>, emqx:subscribe(RemoteTopic), timer:sleep(100), diff --git a/apps/emqx_conf/src/emqx_conf.app.src b/apps/emqx_conf/src/emqx_conf.app.src index c57664ca7..6d7e9ef9c 100644 --- a/apps/emqx_conf/src/emqx_conf.app.src +++ b/apps/emqx_conf/src/emqx_conf.app.src @@ -1,6 +1,6 @@ {application, emqx_conf, [ {description, "EMQX configuration management"}, - {vsn, "0.1.7"}, + {vsn, "0.1.8"}, {registered, []}, {mod, {emqx_conf_app, []}}, {applications, [kernel, stdlib]}, diff --git a/apps/emqx_connector/src/emqx_connector.app.src b/apps/emqx_connector/src/emqx_connector.app.src index 8061203e7..e73b43751 100644 --- a/apps/emqx_connector/src/emqx_connector.app.src +++ b/apps/emqx_connector/src/emqx_connector.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_connector, [ {description, "An OTP application"}, - {vsn, "0.1.9"}, + {vsn, "0.1.10"}, {registered, []}, {mod, {emqx_connector_app, []}}, {applications, [ diff --git a/apps/emqx_connector/src/emqx_connector_redis.erl b/apps/emqx_connector/src/emqx_connector_redis.erl index a1e864f1d..5a77ba6ab 100644 --- a/apps/emqx_connector/src/emqx_connector_redis.erl +++ b/apps/emqx_connector/src/emqx_connector_redis.erl @@ -37,7 +37,7 @@ -export([connect/1]). --export([cmd/3]). +-export([do_cmd/3]). %% redis host don't need parse -define(REDIS_HOST_OPTIONS, #{ @@ -63,7 +63,8 @@ fields(single) -> [ {server, fun server/1}, {redis_type, #{ - type => hoconsc:enum([single]), + type => single, + default => single, required => true, desc => ?DESC("single") }} @@ -74,18 +75,20 @@ fields(cluster) -> [ {servers, fun servers/1}, {redis_type, #{ - type => hoconsc:enum([cluster]), + type => cluster, + default => cluster, required => true, desc => ?DESC("cluster") }} ] ++ - redis_fields() ++ + lists:keydelete(database, 1, redis_fields()) ++ emqx_connector_schema_lib:ssl_fields(); fields(sentinel) -> [ {servers, fun servers/1}, {redis_type, #{ - type => hoconsc:enum([sentinel]), + type => sentinel, + default => sentinel, required => true, desc => ?DESC("sentinel") }}, @@ -119,7 +122,6 @@ on_start( InstId, #{ redis_type := Type, - database := Database, pool_size := PoolSize, auto_reconnect := AutoReconn, ssl := SSL @@ -135,13 +137,17 @@ on_start( single -> [{servers, [maps:get(server, Config)]}]; _ -> [{servers, maps:get(servers, Config)}] end, + Database = + case Type of + cluster -> []; + _ -> [{database, maps:get(database, Config)}] + end, Opts = [ {pool_size, PoolSize}, - {database, Database}, {password, maps:get(password, Config, "")}, {auto_reconnect, reconn_interval(AutoReconn)} - ] ++ Servers, + ] ++ Database ++ Servers, Options = case maps:get(enable, SSL) of true -> @@ -157,9 +163,12 @@ on_start( case Type of cluster -> case eredis_cluster:start_pool(PoolName, Opts ++ [{options, Options}]) of - {ok, _} -> {ok, State}; - {ok, _, _} -> {ok, State}; - {error, Reason} -> {error, Reason} + {ok, _} -> + {ok, State}; + {ok, _, _} -> + {ok, State}; + {error, Reason} -> + {error, Reason} end; _ -> case @@ -180,23 +189,28 @@ on_stop(InstId, #{poolname := PoolName, type := Type}) -> _ -> emqx_plugin_libs_pool:stop_pool(PoolName) end. -on_query(InstId, {cmd, Command}, #{poolname := PoolName, type := Type} = State) -> +on_query(InstId, {cmd, _} = Query, State) -> + do_query(InstId, Query, State); +on_query(InstId, {cmds, _} = Query, State) -> + do_query(InstId, Query, State). + +do_query(InstId, Query, #{poolname := PoolName, type := Type} = State) -> ?TRACE( "QUERY", "redis_connector_received", - #{connector => InstId, sql => Command, state => State} + #{connector => InstId, query => Query, state => State} ), Result = case Type of - cluster -> eredis_cluster:q(PoolName, Command); - _ -> ecpool:pick_and_do(PoolName, {?MODULE, cmd, [Type, Command]}, no_handover) + cluster -> do_cmd(PoolName, cluster, Query); + _ -> ecpool:pick_and_do(PoolName, {?MODULE, do_cmd, [Type, Query]}, no_handover) end, case Result of {error, Reason} -> ?SLOG(error, #{ - msg => "redis_connector_do_cmd_query_failed", + msg => "redis_connector_do_query_failed", connector => InstId, - sql => Command, + query => Query, reason => Reason }); _ -> @@ -226,7 +240,7 @@ on_get_status(_InstId, #{type := cluster, poolname := PoolName, auto_reconnect : Health = eredis_cluster_workers_exist_and_are_connected(Workers), status_result(Health, AutoReconn); false -> - disconnect + disconnected end; on_get_status(_InstId, #{poolname := Pool, auto_reconnect := AutoReconn}) -> Health = emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1), @@ -245,10 +259,29 @@ status_result(_Status = false, _AutoReconn = false) -> disconnected. reconn_interval(true) -> 15; reconn_interval(false) -> false. -cmd(Conn, cluster, Command) -> - eredis_cluster:q(Conn, Command); -cmd(Conn, _Type, Command) -> - eredis:q(Conn, Command). +do_cmd(PoolName, cluster, {cmd, Command}) -> + eredis_cluster:q(PoolName, Command); +do_cmd(Conn, _Type, {cmd, Command}) -> + eredis:q(Conn, Command); +do_cmd(PoolName, cluster, {cmds, Commands}) -> + wrap_qp_result(eredis_cluster:qp(PoolName, Commands)); +do_cmd(Conn, _Type, {cmds, Commands}) -> + wrap_qp_result(eredis:qp(Conn, Commands)). + +wrap_qp_result({error, _} = Error) -> + Error; +wrap_qp_result(Results) when is_list(Results) -> + AreAllOK = lists:all( + fun + ({ok, _}) -> true; + ({error, _}) -> false + end, + Results + ), + case AreAllOK of + true -> {ok, Results}; + false -> {error, Results} + end. %% =================================================================== connect(Opts) -> diff --git a/apps/emqx_connector/test/emqx_connector_redis_SUITE.erl b/apps/emqx_connector/test/emqx_connector_redis_SUITE.erl index d9199d2d6..e67dced2f 100644 --- a/apps/emqx_connector/test/emqx_connector_redis_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_redis_SUITE.erl @@ -111,6 +111,14 @@ perform_lifecycle_check(PoolName, InitialConfig, RedisCommand) -> ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), % Perform query as further check that the resource is working as expected ?assertEqual({ok, <<"PONG">>}, emqx_resource:query(PoolName, {cmd, RedisCommand})), + ?assertEqual( + {ok, [{ok, <<"PONG">>}, {ok, <<"PONG">>}]}, + emqx_resource:query(PoolName, {cmds, [RedisCommand, RedisCommand]}) + ), + ?assertMatch( + {error, [{ok, <<"PONG">>}, {error, _}]}, + emqx_resource:query(PoolName, {cmds, [RedisCommand, [<<"INVALID_COMMAND">>]]}) + ), ?assertEqual(ok, emqx_resource:stop(PoolName)), % Resource will be listed still, but state will be changed and healthcheck will fail % as the worker no longer exists. @@ -152,14 +160,14 @@ redis_config_cluster() -> redis_config_sentinel() -> redis_config_base("sentinel", "servers"). --define(REDIS_CONFIG_BASE(MaybeSentinel), +-define(REDIS_CONFIG_BASE(MaybeSentinel, MaybeDatabase), "" ++ "\n" ++ " auto_reconnect = true\n" ++ - " database = 1\n" ++ " pool_size = 8\n" ++ " redis_type = ~s\n" ++ MaybeSentinel ++ + MaybeDatabase ++ " password = public\n" ++ " ~s = \"~s:~b\"\n" ++ " " ++ @@ -171,15 +179,22 @@ redis_config_base(Type, ServerKey) -> "sentinel" -> Host = ?REDIS_SENTINEL_HOST, Port = ?REDIS_SENTINEL_PORT, - MaybeSentinel = " sentinel = mymaster\n"; - _ -> + MaybeSentinel = " sentinel = mymaster\n", + MaybeDatabase = " database = 1\n"; + "single" -> Host = ?REDIS_SINGLE_HOST, Port = ?REDIS_SINGLE_PORT, - MaybeSentinel = "" + MaybeSentinel = "", + MaybeDatabase = " database = 1\n"; + "cluster" -> + Host = ?REDIS_SINGLE_HOST, + Port = ?REDIS_SINGLE_PORT, + MaybeSentinel = "", + MaybeDatabase = "" end, RawConfig = list_to_binary( io_lib:format( - ?REDIS_CONFIG_BASE(MaybeSentinel), + ?REDIS_CONFIG_BASE(MaybeSentinel, MaybeDatabase), [Type, ServerKey, Host, Port] ) ), diff --git a/apps/emqx_dashboard/src/emqx_dashboard.app.src b/apps/emqx_dashboard/src/emqx_dashboard.app.src index 16c51342f..56bd64c74 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard.app.src +++ b/apps/emqx_dashboard/src/emqx_dashboard.app.src @@ -2,7 +2,7 @@ {application, emqx_dashboard, [ {description, "EMQX Web Dashboard"}, % strict semver, bump manually! - {vsn, "5.0.9"}, + {vsn, "5.0.10"}, {modules, []}, {registered, [emqx_dashboard_sup]}, {applications, [kernel, stdlib, mnesia, minirest, emqx]}, diff --git a/apps/emqx_gateway/src/lwm2m/emqx_lwm2m_api.erl b/apps/emqx_gateway/src/lwm2m/emqx_lwm2m_api.erl index 9a6468455..9bf1d77ea 100644 --- a/apps/emqx_gateway/src/lwm2m/emqx_lwm2m_api.erl +++ b/apps/emqx_gateway/src/lwm2m/emqx_lwm2m_api.erl @@ -81,7 +81,7 @@ schema(?PATH("/observe")) -> ], 'requestBody' => [], responses => #{ - 200 => <<"No Content">>, + 204 => <<"No Content">>, 404 => error_codes(['CLIENT_NOT_FOUND'], <<"Clientid not found">>) } } @@ -98,7 +98,7 @@ schema(?PATH("/read")) -> {path, mk(binary(), #{in => query, required => true, example => "/3/0/7"})} ], responses => #{ - 200 => <<"No Content">>, + 204 => <<"No Content">>, 404 => error_codes(['CLIENT_NOT_FOUND'], <<"clientid not found">>) } } @@ -121,7 +121,7 @@ schema(?PATH("/write")) -> {value, mk(binary(), #{in => query, required => true, example => 123})} ], responses => #{ - 200 => <<"No Content">>, + 204 => <<"No Content">>, 404 => error_codes(['CLIENT_NOT_FOUND'], <<"Clientid not found">>) } } @@ -275,7 +275,7 @@ send_cmd(ClientId, Cmd) -> case emqx_gateway_cm_registry:lookup_channels(lwm2m, ClientId) of [Channel | _] -> ok = emqx_lwm2m_channel:send_cmd(Channel, Cmd), - {200}; + {204}; _ -> {404, #{code => 'CLIENT_NOT_FOUND'}} end. diff --git a/apps/emqx_gateway/test/emqx_lwm2m_api_SUITE.erl b/apps/emqx_gateway/test/emqx_lwm2m_api_SUITE.erl index 6128b9b62..5b206dffb 100644 --- a/apps/emqx_gateway/test/emqx_lwm2m_api_SUITE.erl +++ b/apps/emqx_gateway/test/emqx_lwm2m_api_SUITE.erl @@ -253,7 +253,7 @@ t_read(Config) -> test_recv_mqtt_response(RespTopic), %% step2, call Read API - call_send_api(Epn, "read", "path=/3/0/0"), + ?assertMatch({204, []}, call_send_api(Epn, "read", "path=/3/0/0")), timer:sleep(100), #coap_message{type = Type, method = Method, options = Opts} = test_recv_coap_request(UdpSock), ?assertEqual(con, Type), @@ -289,7 +289,7 @@ t_write(Config) -> test_recv_mqtt_response(RespTopic), %% step2, call write API - call_send_api(Epn, "write", "path=/3/0/13&type=Integer&value=123"), + ?assertMatch({204, []}, call_send_api(Epn, "write", "path=/3/0/13&type=Integer&value=123")), timer:sleep(100), #coap_message{type = Type, method = Method, options = Opts} = test_recv_coap_request(UdpSock), ?assertEqual(con, Type), @@ -326,7 +326,7 @@ t_observe(Config) -> test_recv_mqtt_response(RespTopic), %% step2, call observe API - call_deprecated_send_api(Epn, "observe", "path=/3/0/1&enable=false"), + ?assertMatch({204, []}, call_deprecated_send_api(Epn, "observe", "path=/3/0/1&enable=false")), timer:sleep(100), #coap_message{type = Type, method = Method, options = Opts} = test_recv_coap_request(UdpSock), ?assertEqual(con, Type), @@ -354,9 +354,12 @@ call_deprecated_send_api(ClientId, Cmd, Query) -> call_send_api(ClientId, Cmd, Query, API) -> ApiPath = emqx_mgmt_api_test_util:api_path([API, ClientId, Cmd]), Auth = emqx_mgmt_api_test_util:auth_header_(), - {ok, Response} = emqx_mgmt_api_test_util:request_api(post, ApiPath, Query, Auth), + Opts = #{return_all => true}, + {ok, {{"HTTP/1.1", StatusCode, _}, _Headers, Response}} = emqx_mgmt_api_test_util:request_api( + post, ApiPath, Query, Auth, [], Opts + ), ?LOGT("rest api response:~ts~n", [Response]), - Response. + {StatusCode, Response}. no_received_request(ClientId, Path, Action) -> Response = call_lookup_api(ClientId, Path, Action), diff --git a/apps/emqx_machine/src/emqx_restricted_shell.erl b/apps/emqx_machine/src/emqx_restricted_shell.erl index 31ee16986..f5a52809f 100644 --- a/apps/emqx_machine/src/emqx_restricted_shell.erl +++ b/apps/emqx_machine/src/emqx_restricted_shell.erl @@ -45,10 +45,14 @@ set_prompt_func() -> prompt_func(PropList) -> Line = proplists:get_value(history, PropList, 1), Version = emqx_release:version(), - Edition = emqx_release:edition(), + Prefix = + case emqx_release:edition() of + ce -> "v"; + ee -> "e" + end, case is_alive() of - true -> io_lib:format(<<"~ts-~ts(~s)~w> ">>, [Edition, Version, node(), Line]); - false -> io_lib:format(<<"~ts-~ts ~w> ">>, [Edition, Version, Line]) + true -> io_lib:format(<<"~ts~ts(~s)~w> ">>, [Prefix, Version, node(), Line]); + false -> io_lib:format(<<"~ts~ts ~w> ">>, [Prefix, Version, Line]) end. local_allowed(MF, Args, State) -> diff --git a/apps/emqx_management/src/emqx_management.app.src b/apps/emqx_management/src/emqx_management.app.src index a74d411f9..5df8fe4df 100644 --- a/apps/emqx_management/src/emqx_management.app.src +++ b/apps/emqx_management/src/emqx_management.app.src @@ -2,7 +2,7 @@ {application, emqx_management, [ {description, "EMQX Management API and CLI"}, % strict semver, bump manually! - {vsn, "5.0.9"}, + {vsn, "5.0.10"}, {modules, []}, {registered, [emqx_management_sup]}, {applications, [kernel, stdlib, emqx_plugins, minirest, emqx]}, diff --git a/apps/emqx_management/src/emqx_mgmt_api_trace.erl b/apps/emqx_management/src/emqx_mgmt_api_trace.erl index 93d647753..17895786b 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_trace.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_trace.erl @@ -48,6 +48,7 @@ -define(TO_BIN(_B_), iolist_to_binary(_B_)). -define(NOT_FOUND(N), {404, #{code => 'NOT_FOUND', message => ?TO_BIN([N, " NOT FOUND"])}}). +-define(BAD_REQUEST(C, M), {400, #{code => C, message => ?TO_BIN(M)}}). -define(TAGS, [<<"Trace">>]). namespace() -> "trace". @@ -83,11 +84,16 @@ schema("/trace") -> 200 => hoconsc:ref(trace), 400 => emqx_dashboard_swagger:error_codes( [ - 'ALREADY_EXISTS', - 'DUPLICATE_CONDITION', 'INVALID_PARAMS' ], - <<"trace name already exists">> + <<"invalid trace params">> + ), + 409 => emqx_dashboard_swagger:error_codes( + [ + 'ALREADY_EXISTS', + 'DUPLICATE_CONDITION' + ], + <<"trace already exists">> ) } }, @@ -141,6 +147,7 @@ schema("/trace/:name/download") -> #{schema => #{type => "string", format => "binary"}} } }, + 400 => emqx_dashboard_swagger:error_codes(['NODE_ERROR'], <<"Node Not Found">>), 404 => emqx_dashboard_swagger:error_codes(['NOT_FOUND'], <<"Trace Name Not Found">>) } } @@ -176,9 +183,8 @@ schema("/trace/:name/log") -> {items, hoconsc:mk(binary(), #{example => "TEXT-LOG-ITEMS"})}, {meta, fields(bytes) ++ fields(position)} ], - 400 => emqx_dashboard_swagger:error_codes( - ['READ_FILE_ERROR', 'RPC_ERROR', 'NODE_ERROR'], <<"Trace Log Failed">> - ) + 400 => emqx_dashboard_swagger:error_codes(['NODE_ERROR'], <<"Trace Log Failed">>), + 404 => emqx_dashboard_swagger:error_codes(['NOT_FOUND'], <<"Trace Name Not Found">>) } } }. @@ -391,12 +397,12 @@ trace(post, #{body := Param}) -> {ok, Trace0} -> {200, format_trace(Trace0)}; {error, {already_existed, Name}} -> - {400, #{ + {409, #{ code => 'ALREADY_EXISTS', message => ?TO_BIN([Name, " Already Exists"]) }}; {error, {duplicate_condition, Name}} -> - {400, #{ + {409, #{ code => 'DUPLICATE_CONDITION', message => ?TO_BIN([Name, " Duplication Condition"]) }}; @@ -450,30 +456,31 @@ update_trace(put, #{bindings := #{name := Name}}) -> %% if HTTP request headers include accept-encoding: gzip and file size > 300 bytes. %% cowboy_compress_h will auto encode gzip format. download_trace_log(get, #{bindings := #{name := Name}, query_string := Query}) -> - Nodes = - case parse_node(Query, undefined) of - {ok, undefined} -> mria_mnesia:running_nodes(); - {ok, Node0} -> [Node0]; - {error, not_found} -> mria_mnesia:running_nodes() - end, case emqx_trace:get_trace_filename(Name) of {ok, TraceLog} -> - TraceFiles = collect_trace_file(Nodes, TraceLog), - ZipDir = emqx_trace:zip_dir(), - Zips = group_trace_file(ZipDir, TraceLog, TraceFiles), - FileName = binary_to_list(Name) ++ ".zip", - ZipFileName = filename:join([ZipDir, FileName]), - {ok, ZipFile} = zip:zip(ZipFileName, Zips, [{cwd, ZipDir}]), - %% emqx_trace:delete_files_after_send(ZipFileName, Zips), - %% TODO use file replace file_binary.(delete file after send is not ready now). - {ok, Binary} = file:read_file(ZipFile), - ZipName = filename:basename(ZipFile), - _ = file:delete(ZipFile), - Headers = #{ - <<"content-type">> => <<"application/x-zip">>, - <<"content-disposition">> => iolist_to_binary("attachment; filename=" ++ ZipName) - }, - {200, Headers, {file_binary, ZipName, Binary}}; + case parse_node(Query, undefined) of + {ok, Node} -> + TraceFiles = collect_trace_file(Node, TraceLog), + ZipDir = emqx_trace:zip_dir(), + Zips = group_trace_file(ZipDir, TraceLog, TraceFiles), + FileName = binary_to_list(Name) ++ ".zip", + ZipFileName = filename:join([ZipDir, FileName]), + {ok, ZipFile} = zip:zip(ZipFileName, Zips, [{cwd, ZipDir}]), + %% emqx_trace:delete_files_after_send(ZipFileName, Zips), + %% TODO use file replace file_binary.(delete file after send is not ready now). + {ok, Binary} = file:read_file(ZipFile), + ZipName = filename:basename(ZipFile), + _ = file:delete(ZipFile), + Headers = #{ + <<"content-type">> => <<"application/x-zip">>, + <<"content-disposition">> => iolist_to_binary( + "attachment; filename=" ++ ZipName + ) + }, + {200, Headers, {file_binary, ZipName, Binary}}; + {error, not_found} -> + ?BAD_REQUEST('NODE_ERROR', <<"Node not found">>) + end; {error, not_found} -> ?NOT_FOUND(Name) end. @@ -503,8 +510,11 @@ group_trace_file(ZipDir, TraceLog, TraceFiles) -> TraceFiles ). -collect_trace_file(Nodes, TraceLog) -> - wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file(Nodes, TraceLog)). +collect_trace_file(undefined, TraceLog) -> + Nodes = mria_mnesia:running_nodes(), + wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file(Nodes, TraceLog)); +collect_trace_file(Node, TraceLog) -> + wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file([Node], TraceLog)). collect_trace_file_detail(TraceLog) -> Nodes = mria_mnesia:running_nodes(), @@ -551,21 +561,13 @@ stream_log_file(get, #{bindings := #{name := Name}, query_string := Query}) -> {error, enoent} -> Meta = #{<<"position">> => Position, <<"bytes">> => Bytes}, {200, #{meta => Meta, items => <<"">>}}; - {error, Reason} -> - ?SLOG(error, #{ - msg => "read_file_failed", - node => Node, - name => Name, - reason => Reason, - position => Position, - bytes => Bytes - }), - {400, #{code => 'READ_FILE_ERROR', message => Reason}}; + {error, not_found} -> + ?NOT_FOUND(Name); {badrpc, nodedown} -> - {400, #{code => 'RPC_ERROR', message => "BadRpc node down"}} + ?BAD_REQUEST('NODE_ERROR', <<"Node not found">>) end; {error, not_found} -> - {400, #{code => 'NODE_ERROR', message => <<"Node not found">>}} + ?BAD_REQUEST('NODE_ERROR', <<"Node not found">>) end. -spec get_trace_size() -> #{{node(), file:name_all()} => non_neg_integer()}. @@ -633,8 +635,12 @@ read_file(Path, Offset, Bytes) -> parse_node(Query, Default) -> try case maps:find(<<"node">>, Query) of - error -> {ok, Default}; - {ok, Node} -> {ok, binary_to_existing_atom(Node)} + error -> + {ok, Default}; + {ok, NodeBin} -> + Node = binary_to_existing_atom(NodeBin), + true = lists:member(Node, mria_mnesia:running_nodes()), + {ok, Node} end catch _:_ -> diff --git a/apps/emqx_management/test/emqx_mgmt_api_configs_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_configs_SUITE.erl index adea70af6..067fe312f 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_configs_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_configs_SUITE.erl @@ -228,10 +228,10 @@ t_configs_node({'end', _}) -> t_configs_node(_) -> Node = atom_to_list(node()), - ?assertEqual({ok, <<"self">>}, get_configs(Node, #{return_body => true})), - ?assertEqual({ok, <<"other">>}, get_configs("other_node", #{return_body => true})), + ?assertEqual({ok, <<"self">>}, get_configs(Node, #{return_all => true})), + ?assertEqual({ok, <<"other">>}, get_configs("other_node", #{return_all => true})), - {ExpType, ExpRes} = get_configs("unknown_node", #{return_body => true}), + {ExpType, ExpRes} = get_configs("unknown_node", #{return_all => true}), ?assertEqual(error, ExpType), ?assertMatch({{_, 404, _}, _, _}, ExpRes), {_, _, Body} = ExpRes, @@ -264,6 +264,7 @@ get_configs(Node, Opts) -> end, URI = emqx_mgmt_api_test_util:api_path(Path), case emqx_mgmt_api_test_util:request_api(get, URI, [], [], [], Opts) of + {ok, {_, _, Res}} -> {ok, emqx_json:decode(Res, [return_maps])}; {ok, Res} -> {ok, emqx_json:decode(Res, [return_maps])}; Error -> Error end. diff --git a/apps/emqx_management/test/emqx_mgmt_api_publish_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_publish_SUITE.erl index 7622b0d17..783a90185 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_publish_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_publish_SUITE.erl @@ -163,7 +163,7 @@ t_publish_too_large(Config) -> "", Auth, Body, - #{return_body => true} + #{return_all => true} ), ?assertMatch({_, 400, _}, Summary), ?assertMatch( @@ -286,7 +286,7 @@ t_publish_bulk_dispatch_one_message_invalid_topic(Config) when is_list(Config) - "", Auth, Body, - #{return_body => true} + #{return_all => true} ), ?assertMatch({_, 400, _}, Summary), ?assertMatch( @@ -325,7 +325,7 @@ t_publish_bulk_dispatch_failure(Config) when is_list(Config) -> "", Auth, Body, - #{return_body => true} + #{return_all => true} ), ?assertMatch({_, 503, _}, Summary), ?assertMatch( diff --git a/apps/emqx_management/test/emqx_mgmt_api_test_util.erl b/apps/emqx_management/test/emqx_mgmt_api_test_util.erl index aed28930b..ec40e3cc2 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_test_util.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_test_util.erl @@ -89,16 +89,20 @@ request_api(Method, Url, QueryParams, AuthOrHeaders, Body, Opts) when ). do_request_api(Method, Request, Opts) -> - ReturnBody = maps:get(return_body, Opts, false), + ReturnAll = maps:get(return_all, Opts, false), ct:pal("Method: ~p, Request: ~p", [Method, Request]), case httpc:request(Method, Request, [], []) of {error, socket_closed_remotely} -> {error, socket_closed_remotely}; - {ok, {{"HTTP/1.1", Code, _}, _, Return}} when + {ok, {{"HTTP/1.1", Code, _} = Reason, Headers, Body}} when + Code >= 200 andalso Code =< 299 andalso ReturnAll + -> + {ok, {Reason, Headers, Body}}; + {ok, {{"HTTP/1.1", Code, _}, _, Body}} when Code >= 200 andalso Code =< 299 -> - {ok, Return}; - {ok, {Reason, Headers, Body}} when ReturnBody -> + {ok, Body}; + {ok, {Reason, Headers, Body}} when ReturnAll -> {error, {Reason, Headers, Body}}; {ok, {Reason, _Headers, _Body}} -> {error, Reason} diff --git a/apps/emqx_management/test/emqx_mgmt_api_trace_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_trace_SUITE.erl index 72737ba60..d73cb79fd 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_trace_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_trace_SUITE.erl @@ -149,7 +149,7 @@ t_create_failed(_Config) -> {ok, Create} = request_api(post, api_path("trace"), Header, [GoodName | Trace]), ?assertMatch(#{<<"name">> := <<"test-name-0">>}, json(Create)), ?assertMatch( - {error, {"HTTP/1.1", 400, _}, _}, + {error, {"HTTP/1.1", 409, _}, _}, request_api(post, api_path("trace"), Header, [GoodName | Trace]) ), @@ -171,6 +171,16 @@ t_create_failed(_Config) -> {error, {"HTTP/1.1", 400, _}, _}, request_api(post, api_path("trace"), Header, [GoodName1 | Trace]) ), + %% clear + ?assertMatch({ok, _}, request_api(delete, api_path("trace"), Header, [])), + {ok, Create} = request_api(post, api_path("trace"), Header, [GoodName | Trace]), + %% new name but same trace + GoodName2 = {<<"name">>, <<"test-name-1">>}, + ?assertMatch( + {error, {"HTTP/1.1", 409, _}, _}, + request_api(post, api_path("trace"), Header, [GoodName2 | Trace]) + ), + unload(), emqx_trace:clear(), ok. @@ -213,6 +223,27 @@ t_log_file(_Config) -> Path = api_path("trace/test_client_id/download?node=" ++ atom_to_list(node())), {ok, Binary2} = request_api(get, Path, Header), ?assertEqual(ZipTab, zip:table(Binary2)), + {error, {_, 400, _}, _} = + request_api( + get, + api_path("trace/test_client_id/download?node=unknonwn_node"), + Header + ), + {error, {_, 400, _}, _} = + request_api( + get, + % known atom but unknown node + api_path("trace/test_client_id/download?node=undefined"), + Header + ), + ?assertMatch( + {error, {"HTTP/1.1", 404, "Not Found"}, _}, + request_api( + get, + api_path("trace/test_client_not_found/download?node=" ++ atom_to_list(node())), + Header + ) + ), ok = emqtt:disconnect(Client), ok. @@ -267,6 +298,25 @@ t_stream_log(_Config) -> #{<<"meta">> := Meta1, <<"items">> := Bin1} = json(Binary1), ?assertEqual(#{<<"position">> => 30, <<"bytes">> => 10}, Meta1), ?assertEqual(10, byte_size(Bin1)), + {error, {_, 400, _}, _} = + request_api( + get, + api_path("trace/test_stream_log/log?node=unknonwn_node"), + Header + ), + {error, {_, 400, _}, _} = + request_api( + get, + % known atom but not a node + api_path("trace/test_stream_log/log?node=undefined"), + Header + ), + {error, {_, 404, _}, _} = + request_api( + get, + api_path("trace/test_stream_log_not_found/log"), + Header + ), unload(), ok. diff --git a/apps/emqx_psk/test/emqx_psk_SUITE.erl b/apps/emqx_psk/test/emqx_psk_SUITE.erl index 816562a26..af19cae38 100644 --- a/apps/emqx_psk/test/emqx_psk_SUITE.erl +++ b/apps/emqx_psk/test/emqx_psk_SUITE.erl @@ -24,8 +24,13 @@ -define(CR, 13). -define(LF, 10). -all() -> - emqx_common_test_helpers:all(?MODULE). +all() -> [{group, normal}, {group, ciphers}]. + +groups() -> + [ + {normal, [], emqx_common_test_helpers:all(?MODULE)}, + {ciphers, [], [ciphers_test]} + ]. init_per_suite(Config) -> meck:new(emqx_config, [non_strict, passthrough, no_history, no_link]), @@ -128,3 +133,47 @@ t_trim_crlf(_) -> ?assertEqual(Bin, emqx_psk:trim_crlf(Bin)), ?assertEqual(Bin, emqx_psk:trim_crlf(<>)), ?assertEqual(Bin, emqx_psk:trim_crlf(<>)). + +ciphers_test(Config) -> + Ciphers = [ + "PSK-AES256-GCM-SHA384", + "PSK-AES128-GCM-SHA256", + "PSK-AES256-CBC-SHA384", + "PSK-AES256-CBC-SHA", + "PSK-AES128-CBC-SHA256", + "PSK-AES128-CBC-SHA" + ], + lists:foreach(fun(Cipher) -> cipher_test(Cipher, Config) end, Ciphers). + +cipher_test(Cipher, _) -> + ct:pal("Test PSK with Cipher:~p~n", [Cipher]), + PSKIdentity1 = "myclient1", + SharedSecret1 = <<"8c701116e9127c57a99d5563709af3deaca75563e2c4dd0865701ae839fb6d79">>, + + ClientLookup = fun + (psk, undefined, _) -> {ok, SharedSecret1}; + (psk, _, _) -> error + end, + + ClientTLSOpts = #{ + versions => ['tlsv1.2'], + ciphers => [Cipher], + psk_identity => PSKIdentity1, + verify => verify_none, + user_lookup_fun => {ClientLookup, undefined} + }, + + ServerTLSOpts = #{ + versions => ['tlsv1.2'], + ciphers => [Cipher], + verify => verify_none, + reuseaddr => true, + user_lookup_fun => {fun emqx_tls_psk:lookup/3, undefined} + }, + emqx_config:put([listeners, ssl, default, ssl_options], ServerTLSOpts), + emqx_listeners:restart_listener('ssl:default'), + + {ok, Socket} = ssl:connect("127.0.0.1", 8883, maps:to_list(ClientTLSOpts)), + ssl:close(Socket), + + ok. diff --git a/apps/emqx_resource/src/emqx_resource.app.src b/apps/emqx_resource/src/emqx_resource.app.src index 38dac5449..78f5d8342 100644 --- a/apps/emqx_resource/src/emqx_resource.app.src +++ b/apps/emqx_resource/src/emqx_resource.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_resource, [ {description, "Manager for all external resources"}, - {vsn, "0.1.2"}, + {vsn, "0.1.3"}, {registered, []}, {mod, {emqx_resource_app, []}}, {applications, [ diff --git a/apps/emqx_retainer/src/emqx_retainer.app.src b/apps/emqx_retainer/src/emqx_retainer.app.src index 844277ba6..f61468d9b 100644 --- a/apps/emqx_retainer/src/emqx_retainer.app.src +++ b/apps/emqx_retainer/src/emqx_retainer.app.src @@ -2,7 +2,7 @@ {application, emqx_retainer, [ {description, "EMQX Retainer"}, % strict semver, bump manually! - {vsn, "5.0.7"}, + {vsn, "5.0.8"}, {modules, []}, {registered, [emqx_retainer_sup]}, {applications, [kernel, stdlib, emqx]}, diff --git a/apps/emqx_retainer/src/emqx_retainer_mnesia.erl b/apps/emqx_retainer/src/emqx_retainer_mnesia.erl index c236b9c28..d147877e8 100644 --- a/apps/emqx_retainer/src/emqx_retainer_mnesia.erl +++ b/apps/emqx_retainer/src/emqx_retainer_mnesia.erl @@ -38,11 +38,9 @@ %% Internal exports (RPC) -export([ - do_store_retained/1, - do_clear_expired/0, - do_delete_message/1, do_populate_index_meta/1, - do_reindex_batch/2 + do_reindex_batch/2, + active_indices/0 ]). %% Management API: @@ -66,6 +64,8 @@ -define(CLEAR_BATCH_SIZE, 1000). -define(REINDEX_BATCH_SIZE, 1000). -define(REINDEX_DISPATCH_WAIT, 30000). +-define(REINDEX_RPC_RETRY_INTERVAL, 1000). +-define(REINDEX_INDEX_UPDATE_WAIT, 30000). %%-------------------------------------------------------------------- %% Management API @@ -136,64 +136,41 @@ create_table(Table, RecordName, Attributes, Type, StorageType) -> end. store_retained(_, Msg = #message{topic = Topic}) -> - case mria:transaction(?RETAINER_SHARD, fun ?MODULE:do_store_retained/1, [Msg]) of - {atomic, ok} -> - ?tp(debug, message_retained, #{topic => Topic}), - ok; - {aborted, Reason} -> + ExpiryTime = emqx_retainer:get_expiry_time(Msg), + Tokens = topic_to_tokens(Topic), + case is_table_full() andalso is_new_topic(Tokens) of + true -> ?SLOG(error, #{ msg => "failed_to_retain_message", topic => Topic, - reason => Reason - }) - end. - -do_store_retained(#message{topic = Topic} = Msg) -> - ExpiryTime = emqx_retainer:get_expiry_time(Msg), - Tokens = topic_to_tokens(Topic), - case is_table_full() of + reason => table_is_full + }); false -> - store_retained(db_indices(write), Msg, Tokens, ExpiryTime); - _ -> - case mnesia:read(?TAB_MESSAGE, Tokens, write) of - [_] -> - store_retained(db_indices(write), Msg, Tokens, ExpiryTime); - [] -> - mnesia:abort(table_is_full) - end + do_store_retained(Msg, Tokens, ExpiryTime) end. clear_expired(_) -> - {atomic, _} = mria:transaction(?RETAINER_SHARD, fun ?MODULE:do_clear_expired/0), - ok. - -do_clear_expired() -> NowMs = erlang:system_time(millisecond), QH = qlc:q([ - TopicTokens + RetainedMsg || #retained_message{ - topic = TopicTokens, expiry_time = ExpiryTime - } <- mnesia:table(?TAB_MESSAGE, [{lock, write}]), + } = RetainedMsg <- ets:table(?TAB_MESSAGE), (ExpiryTime =/= 0) and (ExpiryTime < NowMs) ]), QC = qlc:cursor(QH), - clear_batch(db_indices(write), QC). + clear_batch(dirty_indices(write), QC). delete_message(_, Topic) -> - {atomic, _} = mria:transaction(?RETAINER_SHARD, fun ?MODULE:do_delete_message/1, [Topic]), - ok. - -do_delete_message(Topic) -> Tokens = topic_to_tokens(Topic), case emqx_topic:wildcard(Topic) of false -> - ok = delete_message_by_topic(Tokens, db_indices(write)); + ok = delete_message_by_topic(Tokens, dirty_indices(write)); true -> - QH = topic_search_table(Tokens), + QH = search_table(Tokens, 0), qlc:fold( - fun(TopicTokens, _) -> - ok = delete_message_by_topic(TopicTokens, db_indices(write)) + fun(RetainedMsg, _) -> + ok = delete_message_with_indices(RetainedMsg, dirty_indices(write)) end, undefined, QH @@ -206,7 +183,7 @@ read_message(_, Topic) -> match_messages(_, Topic, undefined) -> Tokens = topic_to_tokens(Topic), Now = erlang:system_time(millisecond), - QH = search_table(Tokens, Now), + QH = msg_table(search_table(Tokens, Now)), case batch_read_number() of all_remaining -> {ok, qlc:eval(QH), undefined}; @@ -227,10 +204,10 @@ page_read(_, Topic, Page, Limit) -> QH = case Topic of undefined -> - search_table(undefined, ['#'], Now); + msg_table(search_table(undefined, ['#'], Now)); _ -> Tokens = topic_to_tokens(Topic), - search_table(Tokens, Now) + msg_table(search_table(Tokens, Now)) end, OrderedQH = qlc:sort(QH, {order, fun compare_message/2}), Cursor = qlc:cursor(OrderedQH), @@ -281,49 +258,49 @@ reindex_status() -> %% Internal functions %%-------------------------------------------------------------------- -store_retained(Indices, Msg, Tokens, ExpiryTime) -> - ok = store_retained_message(Msg, Tokens, ExpiryTime), - ok = emqx_retainer_index:foreach_index_key( - fun(Key) -> store_retained_index(Key, ExpiryTime) end, - Indices, - Tokens - ). +do_store_retained(Msg, TopicTokens, ExpiryTime) -> + %% Retained message is stored syncronously on all core nodes + ok = do_store_retained_message(Msg, TopicTokens, ExpiryTime), + %% Since retained message was stored syncronously on all core nodes, + %% now we are sure that + %% * either we will write correct indices + %% * or if we a replicant with outdated write indices due to reindexing, + %% the correct indices will be added by reindexing + ok = do_store_retained_indices(TopicTokens, ExpiryTime). -store_retained_message(Msg, Tokens, ExpiryTime) -> +do_store_retained_message(Msg, TopicTokens, ExpiryTime) -> RetainedMessage = #retained_message{ - topic = Tokens, + topic = TopicTokens, msg = Msg, expiry_time = ExpiryTime }, - mnesia:write(?TAB_MESSAGE, RetainedMessage, write). + ok = mria:dirty_write_sync(?TAB_MESSAGE, RetainedMessage). -store_retained_index(Key, ExpiryTime) -> +do_store_retained_indices(TopicTokens, ExpiryTime) -> + Indices = dirty_indices(write), + ok = emqx_retainer_index:foreach_index_key( + fun(Key) -> do_store_retained_index(Key, ExpiryTime) end, + Indices, + TopicTokens + ). + +do_store_retained_index(Key, ExpiryTime) -> RetainedIndex = #retained_index{ key = Key, expiry_time = ExpiryTime }, - mnesia:write(?TAB_INDEX, RetainedIndex, write). + mria:dirty_write(?TAB_INDEX, RetainedIndex). -topic_search_table(Tokens) -> - Index = emqx_retainer_index:select_index(Tokens, db_indices(read)), - topic_search_table(Index, Tokens). - -topic_search_table(undefined, Tokens) -> - Cond = emqx_retainer_index:condition(Tokens), - Ms = [{#retained_message{topic = Cond, msg = '_', expiry_time = '_'}, [], ['$_']}], - MsgQH = mnesia:table(?TAB_MESSAGE, [{traverse, {select, Ms}}]), - qlc:q([Topic || #retained_message{topic = Topic} <- MsgQH]); -topic_search_table(Index, Tokens) -> - Cond = emqx_retainer_index:condition(Index, Tokens), - Ms = [{#retained_index{key = Cond, expiry_time = '_'}, [], ['$_']}], - IndexQH = mnesia:table(?TAB_INDEX, [{traverse, {select, Ms}}]), +msg_table(SearchTable) -> qlc:q([ - emqx_retainer_index:restore_topic(Key) - || #retained_index{key = Key} <- IndexQH + Msg + || #retained_message{ + msg = Msg + } <- SearchTable ]). search_table(Tokens, Now) -> - Indices = dirty_read_indices(), + Indices = dirty_indices(read), Index = emqx_retainer_index:select_index(Tokens, Indices), search_table(Index, Tokens, Now). @@ -341,26 +318,21 @@ search_table(Index, Tokens, Now) -> || TopicTokens <- Topics ]), qlc:q([ - Msg + RetainedMsg || [ #retained_message{ - msg = Msg, expiry_time = ExpiryTime - } + } = RetainedMsg ] <- RetainedMsgQH, (ExpiryTime == 0) or (ExpiryTime > Now) ]). -dirty_read_indices() -> - case ets:lookup(?TAB_INDEX_META, ?META_KEY) of - [#retained_index_meta{read_indices = ReadIndices}] -> ReadIndices; - [] -> [] - end. - clear_batch(Indices, QC) -> {Result, Rows} = qlc_next_answers(QC, ?CLEAR_BATCH_SIZE), lists:foreach( - fun(TopicTokens) -> delete_message_by_topic(TopicTokens, Indices) end, + fun(RetainedMsg) -> + delete_message_with_indices(RetainedMsg, Indices) + end, Rows ), case Result of @@ -369,14 +341,23 @@ clear_batch(Indices, QC) -> end. delete_message_by_topic(TopicTokens, Indices) -> + case mnesia:dirty_read(?TAB_MESSAGE, TopicTokens) of + [] -> ok; + [RetainedMsg] -> delete_message_with_indices(RetainedMsg, Indices) + end. + +delete_message_with_indices(RetainedMsg, Indices) -> + #retained_message{topic = TopicTokens, expiry_time = ExpiryTime} = RetainedMsg, ok = emqx_retainer_index:foreach_index_key( fun(Key) -> - mnesia:delete({?TAB_INDEX, Key}) + mria:dirty_delete_object(?TAB_INDEX, #retained_index{ + key = Key, expiry_time = ExpiryTime + }) end, Indices, TopicTokens ), - ok = mnesia:delete({?TAB_MESSAGE, TopicTokens}). + ok = mria:dirty_delete_object(?TAB_MESSAGE, RetainedMsg). compare_message(M1, M2) -> M1#message.timestamp =< M2#message.timestamp. @@ -415,20 +396,26 @@ qlc_next_answers(QC, N) -> make_message_match_spec(Tokens, NowMs) -> Cond = emqx_retainer_index:condition(Tokens), - MsHd = #retained_message{topic = Cond, msg = '$2', expiry_time = '$3'}, - [{MsHd, [{'orelse', {'=:=', '$3', 0}, {'>', '$3', NowMs}}], ['$2']}]. + MsHd = #retained_message{topic = Cond, msg = '_', expiry_time = '$3'}, + [{MsHd, [{'orelse', {'=:=', '$3', 0}, {'>', '$3', NowMs}}], ['$_']}]. make_index_match_spec(Index, Tokens, NowMs) -> Cond = emqx_retainer_index:condition(Index, Tokens), MsHd = #retained_index{key = Cond, expiry_time = '$3'}, [{MsHd, [{'orelse', {'=:=', '$3', 0}, {'>', '$3', NowMs}}], ['$_']}]. --spec is_table_full() -> boolean(). is_table_full() -> Limit = emqx:get_config([retainer, backend, max_retained_messages]), Limit > 0 andalso (table_size() >= Limit). --spec table_size() -> non_neg_integer(). +is_new_topic(Tokens) -> + case mnesia:dirty_read(?TAB_MESSAGE, Tokens) of + [_] -> + false; + [] -> + true + end. + table_size() -> mnesia:table_info(?TAB_MESSAGE, size). @@ -486,8 +473,14 @@ do_populate_index_meta(ConfigIndices) -> ) end. +dirty_indices(Type) -> + indices(ets:lookup(?TAB_INDEX_META, ?META_KEY), Type). + db_indices(Type) -> - case mnesia:read(?TAB_INDEX_META, ?META_KEY) of + indices(mnesia:read(?TAB_INDEX_META, ?META_KEY), Type). + +indices(IndexRecords, Type) -> + case IndexRecords of [#retained_index_meta{read_indices = ReadIndices, write_indices = WriteIndices}] -> case Type of read -> ReadIndices; @@ -506,10 +499,15 @@ batch_read_number() -> reindex(NewIndices, Force, StatusFun) when is_boolean(Force) andalso is_function(StatusFun, 1) -> + %% Do not run on replicants + core = mria_rlog:role(), %% Disable read indices and update write indices so that new records are written %% with correct indices. Also block parallel reindexing. case try_start_reindex(NewIndices, Force) of {atomic, ok} -> + %% Wait for all nodes to have new indices, including rlog nodes + true = wait_indices_updated({[], NewIndices}, ?REINDEX_INDEX_UPDATE_WAIT), + %% Wait for all dispatch operations to be completed to avoid %% inconsistent results. true = wait_dispatch_complete(?REINDEX_DISPATCH_WAIT), @@ -592,7 +590,7 @@ reindex_topic(Indices, Topic) -> case mnesia:read(?TAB_MESSAGE, Topic, read) of [#retained_message{expiry_time = ExpiryTime}] -> ok = emqx_retainer_index:foreach_index_key( - fun(Key) -> store_retained_index(Key, ExpiryTime) end, + fun(Key) -> do_store_retained_index(Key, ExpiryTime) end, Indices, Topic ); @@ -627,8 +625,35 @@ do_reindex_batch(QC, Done) -> wait_dispatch_complete(Timeout) -> Nodes = mria_mnesia:running_nodes(), - {Results, []} = emqx_retainer_proto_v1:wait_dispatch_complete(Nodes, Timeout), + {Results, []} = emqx_retainer_proto_v2:wait_dispatch_complete(Nodes, Timeout), lists:all( fun(Result) -> Result =:= ok end, Results ). + +wait_indices_updated(_Indices, TimeLeft) when TimeLeft < 0 -> false; +wait_indices_updated(Indices, TimeLeft) -> + case timer:tc(fun() -> are_indices_updated(Indices) end) of + {_, true} -> + true; + {TimePassed, false} -> + timer:sleep(?REINDEX_RPC_RETRY_INTERVAL), + wait_indices_updated( + Indices, TimeLeft - ?REINDEX_RPC_RETRY_INTERVAL - TimePassed / 1000 + ) + end. + +active_indices() -> + {dirty_indices(read), dirty_indices(write)}. + +are_indices_updated(Indices) -> + Nodes = mria_mnesia:running_nodes(), + case emqx_retainer_proto_v2:active_mnesia_indices(Nodes) of + {Results, []} -> + lists:all( + fun(NodeIndices) -> NodeIndices =:= Indices end, + Results + ); + _ -> + false + end. diff --git a/apps/emqx_retainer/src/emqx_retainer_mnesia_cli.erl b/apps/emqx_retainer/src/emqx_retainer_mnesia_cli.erl index a576b953d..402c8003f 100644 --- a/apps/emqx_retainer/src/emqx_retainer_mnesia_cli.erl +++ b/apps/emqx_retainer/src/emqx_retainer_mnesia_cli.erl @@ -50,11 +50,39 @@ retainer(["reindex", "status"]) -> retainer(["reindex", "start"]) -> retainer(["reindex", "start", "false"]); retainer(["reindex", "start", ForceParam]) -> - Force = - case ForceParam of - "true" -> true; - _ -> false - end, + case mria_rlog:role() of + core -> + Force = + case ForceParam of + "true" -> true; + _ -> false + end, + do_reindex(Force); + replicant -> + ?PRINT_MSG("Can't run reindex on a replicant node") + end; +retainer(_) -> + emqx_ctl:usage( + [ + {"retainer info", "Show the count of retained messages"}, + {"retainer topics", "Show all topics of retained messages"}, + {"retainer clean", "Clean all retained messages"}, + {"retainer clean ", "Clean retained messages by the specified topic filter"}, + {"retainer reindex status", "Show reindex status"}, + {"retainer reindex start [force]", + "Generate new retainer topic indices from config settings.\n" + "Pass true as to ignore previously started reindexing"} + ] + ). + +unload() -> + ok = emqx_ctl:unregister_command(retainer). + +%%------------------------------------------------------------------------------ +%% Private +%%------------------------------------------------------------------------------ + +do_reindex(Force) -> ?PRINT_MSG("Starting reindexing~n"), emqx_retainer_mnesia:reindex( Force, @@ -69,20 +97,4 @@ retainer(["reindex", "start", ForceParam]) -> ?PRINT("Reindexed ~p messages~n", [Done]) end ), - ?PRINT_MSG("Reindexing finished~n"); -retainer(_) -> - emqx_ctl:usage( - [ - {"retainer info", "Show the count of retained messages"}, - {"retainer topics", "Show all topics of retained messages"}, - {"retainer clean", "Clean all retained messages"}, - {"retainer clean ", "Clean retained messages by the specified topic filter"}, - {"retainer reindex status", "Show reindex status"}, - {"retainer reindex start [force]", - "Generate new retainer topic indices config settings.\n" - "Pass true as to ignore previously started reindexing"} - ] - ). - -unload() -> - ok = emqx_ctl:unregister_command(retainer). + ?PRINT_MSG("Reindexing finished~n"). diff --git a/apps/emqx_retainer/src/proto/emqx_retainer_proto_v2.erl b/apps/emqx_retainer/src/proto/emqx_retainer_proto_v2.erl new file mode 100644 index 000000000..4b98f945f --- /dev/null +++ b/apps/emqx_retainer/src/proto/emqx_retainer_proto_v2.erl @@ -0,0 +1,41 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_retainer_proto_v2). + +-behaviour(emqx_bpapi). + +-include_lib("emqx/include/bpapi.hrl"). + +-export([ + introduced_in/0, + wait_dispatch_complete/2, + active_mnesia_indices/1 +]). + +-define(TIMEOUT, 5000). + +introduced_in() -> + "5.0.13". + +-spec wait_dispatch_complete(list(node()), timeout()) -> emqx_rpc:multicall_result(ok). +wait_dispatch_complete(Nodes, Timeout) -> + rpc:multicall(Nodes, emqx_retainer_dispatcher, wait_dispatch_complete, [Timeout]). + +-spec active_mnesia_indices(list(node())) -> + emqx_rpc:multicall_result({list(emqx_retainer_index:index()), list(emqx_retainer_index:index())}). +active_mnesia_indices(Nodes) -> + rpc:multicall(Nodes, emqx_retainer_mnesia, active_indices, [], ?TIMEOUT). diff --git a/apps/emqx_retainer/test/emqx_retainer_SUITE.erl b/apps/emqx_retainer/test/emqx_retainer_SUITE.erl index f3e46aed9..e6f4a404e 100644 --- a/apps/emqx_retainer/test/emqx_retainer_SUITE.erl +++ b/apps/emqx_retainer/test/emqx_retainer_SUITE.erl @@ -318,6 +318,25 @@ t_message_expiry_2(_) -> end, with_conf(ConfMod, Case). +t_table_full(_) -> + ConfMod = fun(Conf) -> + Conf#{<<"backend">> => #{<<"max_retained_messages">> => <<"1">>}} + end, + Case = fun() -> + {ok, C1} = emqtt:start_link([{clean_start, true}, {proto_ver, v5}]), + {ok, _} = emqtt:connect(C1), + emqtt:publish(C1, <<"retained/t/1">>, <<"a">>, [{qos, 0}, {retain, true}]), + emqtt:publish(C1, <<"retained/t/2">>, <<"b">>, [{qos, 0}, {retain, true}]), + + {ok, #{}, [0]} = emqtt:subscribe(C1, <<"retained/t/1">>, [{qos, 0}, {rh, 0}]), + ?assertEqual(1, length(receive_messages(1))), + {ok, #{}, [0]} = emqtt:subscribe(C1, <<"retained/t/2">>, [{qos, 0}, {rh, 0}]), + ?assertEqual(0, length(receive_messages(1))), + + ok = emqtt:disconnect(C1) + end, + with_conf(ConfMod, Case). + t_clean(_) -> {ok, C1} = emqtt:start_link([{clean_start, true}, {proto_ver, v5}]), {ok, _} = emqtt:connect(C1), diff --git a/bin/emqx b/bin/emqx index 199898ea4..f4c52d1e4 100755 --- a/bin/emqx +++ b/bin/emqx @@ -115,14 +115,14 @@ usage() { echo "Print path to Erlang runtime bin dir" ;; rpc) - echo "Usge $REL_NAME rpc MODULE FUNCTION [ARGS, ...]" + echo "Usage: $REL_NAME rpc MODULE FUNCTION [ARGS, ...]" echo "Connect to the EMQX node and make an Erlang RPC" echo "This command blocks for at most 60 seconds." echo "It exits with non-zero code in case of any RPC failure" echo "including connection error and runtime exception" ;; rpcterms) - echo "Usge $REL_NAME rpcterms MODULE FUNCTION [ARGS, ...]" + echo "Usage: $REL_NAME rpcterms MODULE FUNCTION [ARGS, ...]" echo "Connect to the EMQX node and make an Erlang RPC" echo "The result of the RPC call is pretty-printed as an " echo "Erlang term" diff --git a/changes/v5.0.12-en.md b/changes/v5.0.12-en.md index 7388e1e02..c99b1bb95 100644 --- a/changes/v5.0.12-en.md +++ b/changes/v5.0.12-en.md @@ -1,5 +1,11 @@ # v5.0.12 +This version included a refactoring of MQTT bridge config. +The older version config file created from v5.0.11 or earlier will be converted to +according to the new schema. + +Please note, the request body of `/bridges` API to configure MQTT brdige is changed in a incompatible way. + ## Enhancements - Disable global garbage collection by `node.global_gc_interval = disabled` [#9418](https://github.com/emqx/emqx/pull/9418)。 @@ -16,6 +22,28 @@ - Redesign `/rules` API to make `metrics` a dedicated resources rather than being included with every response [#9461](https://github.com/emqx/emqx/pull/9461). +- Add more PSK ciphers support [#9505](https://github.com/emqx/emqx/pull/9505). + +- Improve `emqx_retainer` write performance: get rid of transactions on write [#9372](https://github.com/emqx/emqx/pull/9372). + +- HTTP client library `ehttpc` upgraded from `0.4.0` to `0.4.2` [#9520](https://github.com/emqx/emqx/pull/9520). + +- Add `handshake_timeout` option to MQTT SSL listener [#9502](https://github.com/emqx/emqx/pull/9502). + +- Upgrade dashboard to [v1.1.3](https://github.com/emqx/emqx-dashboard-web-new/releases/tag/v1.1.3). + +- Users can define the `externalTrafficPolicy` of service in EMQX Helm Chart [#9527](https://github.com/emqx/emqx/pull/9527). + +- Return `204` instead of `200` for `POST /gateway/lwm2m/clients/{clientid}/{read,write,observe}` [#9480](https://github.com/emqx/emqx/pull/9480). + +- Make possible to create an authentication entirely from environment variable [#9547](https://github.com/emqx/emqx/pull/9547). + As an example, one can now enable MySQL auth with: + `env EMQX_AUTHENTICATION__1='{mechanism="password_based",backend="mysql",server="localhost:3306",database="emqx",username="emqx",password="******",query="SELECT password_hash,salt FROM mqtt_user WHERE username=${username} LIMIT 1",enable=true}'`. + Prior to this change, overrides only work on top of existing authentication, for example, if there is already MySQL auth configured in `emqx.conf` + but we want to disable it, we can do it with `env EMQX_AUTHENTICATION__1__ENABLE=false`. + +- Start building packages for Amazon Linux 2 [#9537](https://github.com/emqx/emqx/pull/9537). + ## Bug fixes - Fix that the obsolete SSL files aren't deleted after the ExHook config update [#9432](https://github.com/emqx/emqx/pull/9432). @@ -25,3 +53,18 @@ - Return `404` for `/telemetry/data` in case it's disabled [#9464](https://github.com/emqx/emqx/pull/9464). - Fix some potential MQTT packet parse errors [#9477](https://github.com/emqx/emqx/pull/9477). + +- Fixed EMQX Helm Chart deployment error [#9509](https://github.com/emqx/emqx/pull/9509). + - Fixed the `Discovery error: no such service` error occurred during helm chart deployment, resulting in an abnormal discovery of cluster nodes. + - Fixed issue that caused EMQX Helm Chart to fail when modifying some of EMQX's configuration items via environment variables. + +- Fix shadowing `'client.authenticate'` callbacks by `emqx_authenticator`. Now `emqx_authenticator` + passes execution to the further callbacks if none of the authenticators matches [#9496](https://github.com/emqx/emqx/pull/9496). + +- Return `400` if query param `node` is not a known node in `/trace/:id/download?node={node}` [#9478](https://github.com/emqx/emqx/pull/9478). + +- `POST /traces` to return `409` in case of duplicate [#9494](https://github.com/emqx/emqx/pull/9494). + +- Fix bridging function, when both ingress and egress bridges are configured, egress bridge does not work [#9523](https://github.com/emqx/emqx/pull/9523). + +- Fix EMQX Helm Chart using incorrect secret values when custom credentials are provided [#9536](https://github.com/emqx/emqx/pull/9536). diff --git a/changes/v5.0.12-zh.md b/changes/v5.0.12-zh.md index b1e487131..6d8ed4643 100644 --- a/changes/v5.0.12-zh.md +++ b/changes/v5.0.12-zh.md @@ -1,5 +1,10 @@ # v5.0.12 +该版本包含了 MQTT 桥接的一个重构。 +v5.0.11 或更早版本创建的配置文件,在新版本中会被自动转换。 + +需要注意的是,用于配置 MQTT 桥接的 API `/bridges` 请求的结构发生了不兼容的变更。 + ## 增强 - 通过 `node.global_gc_interval = disabled` 来禁用全局垃圾回收 [#9418](https://github.com/emqx/emqx/pull/9418)。 @@ -16,6 +21,28 @@ - 重新设计了 `/rules` API,将 `metrics` 改为专用资源,而不再是包含在每个响应中 [#9461](https://github.com/emqx/emqx/pull/9461)。 +- 支持更多的 PSK 密码套件[#9505](https://github.com/emqx/emqx/pull/9505)。 + +- 提高 `emqx_retainer` 写入性能:摆脱写入时的事务 [#9372](https://github.com/emqx/emqx/pull/9372)。 + +- HTTP 客户端库 `ehttpc` 从 `0.4.0` 升级到 `0.4.2` [#9520](https://github.com/emqx/emqx/pull/9520)。 + +- 为 MQTT SSL 监听器增加配置 `handshake_timeout` [#9502](https://github.com/emqx/emqx/pull/9502)。 + +- Dashboard 更新到 [v1.1.3](https://github.com/emqx/emqx-dashboard-web-new/releases/tag/v1.1.3)。 + +- 用户可以在 EMQX Helm Chart 中自定义 service 资源的 `externalTrafficPolicy` [#9527](https://github.com/emqx/emqx/pull/9527)。 + +- 现在调用 `POST /gateway/lwm2m/clients/{clientid}/{read,write,observe}` 时,将会返回 204,而不再是 200 [#9480](https://github.com/emqx/emqx/pull/9480)。 + +- 允许使用环境变量来创建一个认证配置 [#9547](https://github.com/emqx/emqx/pull/9547)。 + 例如,现在可以用如下环境变量来创建一个 MySQL 认证: + `env EMQX_AUTHENTICATION__1='{mechanism="password_based",backend="mysql",server="localhost:3306",database="emqx",username="emqx",password="******",query="SELECT password_hash,salt FROM mqtt_user WHERE username=${username} LIMIT 1",enable=true}'`。 + 在此之前,环境变量的重载仅作用于已经存在的配置之上,例如,当 `emqx.conf` 中已经配置了一个 MySQL 认证,那么可以使用如下方法来将它禁用: + `env EMQX_AUTHENTICATION__1__ENABLE=false`。 + +- 为 Amazon Linux 2 平台发布安装包 [#9537](https://github.com/emqx/emqx/pull/9537)。 + ## 修复 - 修复 ExHook 更新 SSL 相关配置后,过时的 SSL 文件没有被删除的问题 [#9432](https://github.com/emqx/emqx/pull/9432)。 @@ -25,3 +52,17 @@ - 在遥测功能未开启时,通过 /telemetry/data 请求其数据,将会返回 404 [#9464](https://github.com/emqx/emqx/pull/9464)。 - 修复了一些 MQTT 协议包的潜在解析错误 [#9477](https://github.com/emqx/emqx/pull/9477)。 + +- 修复了 EMQX Helm Chart 部署的一些问题 [#9509](https://github.com/emqx/emqx/pull/9509)。 + - 修复了 EMQX Helm Chart 部署时出现 `Discovery error: no such service` 错误,导致集群节点发现异常。 + - 修复了 EMQX Helm Chart 通过环境变量修改部分 EMQX 的配置项时的错误。 + +- 通过 `emqx_authenticator` 修复隐藏 `'client.authenticate'` 回调。 现在 `emqx_authenticator` 如果没有任何验证器匹配,则将执行传递给进一步的回调 [#9496](https://github.com/emqx/emqx/pull/9496)。 + +- 如果在调用 `/trace/:id/download?node={node}` 时,`node` 不存在,则会返回 `400` [#9478](https://github.com/emqx/emqx/pull/9478)。 + +- 当重复调用 `POST /traces` 时,将会返回 `409` ,而不再是 `400` [#9494](https://github.com/emqx/emqx/pull/9494)。 + +- 桥接功能修复,当同时配置了2个桥,方向为入桥和出桥时,出桥不工作的问题。[#9523](https://github.com/emqx/emqx/pull/9523). + +- 修复了 EMQX Helm Chart 中当用户使用自定义的用户名和密码时,创建的 Secret 资源不正确问题 [#9536](https://github.com/emqx/emqx/pull/9536)。 diff --git a/deploy/charts/emqx-enterprise/README.md b/deploy/charts/emqx-enterprise/README.md index a579af70d..33a3fa22f 100644 --- a/deploy/charts/emqx-enterprise/README.md +++ b/deploy/charts/emqx-enterprise/README.md @@ -37,63 +37,64 @@ $ helm del my-emqx The following table lists the configurable parameters of the emqx chart and their default values. -| Parameter | Description | Default Value | +| Parameter | Description | Default Value | |--------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| -| `replicaCount` | It is recommended to have odd number of nodes in a cluster, otherwise the emqx cluster cannot be automatically healed in case of net-split. | 3 | -| `image.repository` | EMQX Image name | emqx/emqx | -| `image.pullPolicy` | The image pull policy | IfNotPresent | -| `image.pullSecrets ` | The image pull secrets | `[]` (does not add image pull secrets to deployed pods) | -| `envFromSecret` | The name pull a secret in the same kubernetes namespace which contains values that will be added to the environment | nil | -| `recreatePods` | Forces the recreation of pods during upgrades, which can be useful to always apply the most recent configuration. | false | -| `podAnnotations ` | Annotations for pod | `{}` | -| `podManagementPolicy` | To redeploy a chart with existing PVC(s), the value must be set to Parallel to avoid deadlock | `Parallel` | -| `persistence.enabled` | Enable EMQX persistence using PVC | false | -| `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) | -| `persistence.existingClaim` | EMQX data Persistent Volume existing claim name, evaluated as a template | "" | -| `persistence.accessMode` | PVC Access Mode for EMQX volume | ReadWriteOnce | -| `persistence.size` | PVC Storage Request for EMQX volume | 20Mi | -| `initContainers` | Containers that run before the creation of EMQX containers. They can contain utilities or setup scripts. | `{}` | -| `resources` | CPU/Memory resource requests/limits | {} | -| `nodeSelector` | Node labels for pod assignment | `{}` | -| `tolerations` | Toleration labels for pod assignment | `[]` | -| `affinity` | Map of node/pod affinities | `{}` | -| `service.type` | Kubernetes Service type. | ClusterIP | -| `service.mqtt` | Port for MQTT. | 1883 | -| `service.mqttssl` | Port for MQTT(SSL). | 8883 | -| `service.ws` | Port for WebSocket/HTTP. | 8083 | -| `service.wss` | Port for WSS/HTTPS. | 8084 | -| `service.dashboard` | Port for dashboard and API. | 18083 | -| `service.nodePorts.mqtt` | Kubernetes node port for MQTT. | nil | -| `service.nodePorts.mqttssl` | Kubernetes node port for MQTT(SSL). | nil | -| `service.nodePorts.mgmt` | Kubernetes node port for mgmt API. | nil | -| `service.nodePorts.ws` | Kubernetes node port for WebSocket/HTTP. | nil | -| `service.nodePorts.wss` | Kubernetes node port for WSS/HTTPS. | nil | -| `service.nodePorts.dashboard` | Kubernetes node port for dashboard. | nil | -| `service.loadBalancerIP` | loadBalancerIP for Service | nil | -| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | [] | -| `service.externalIPs` | ExternalIPs for the service | [] | -| `service.annotations` | Service annotations | {}(evaluated as a template) | -| `ingress.dashboard.enabled` | Enable ingress for EMQX Dashboard | false | -| `ingress.dashboard.ingressClassName` | Set the ingress class for EMQX Dashboard | | -| `ingress.dashboard.path` | Ingress path for EMQX Dashboard | / | -| `ingress.dashboard.pathType` | Ingress pathType for EMQX Dashboard | `ImplementationSpecific` | -| `ingress.dashboard.hosts` | Ingress hosts for EMQX Mgmt API | dashboard.emqx.local | -| `ingress.dashboard.tls` | Ingress tls for EMQX Mgmt API | [] | -| `ingress.dashboard.annotations` | Ingress annotations for EMQX Mgmt API | {} | -| `ingress.mgmt.enabled` | Enable ingress for EMQX Mgmt API | false | -| `ingress.dashboard.ingressClassName` | Set the ingress class for EMQX Mgmt API | | -| `ingress.mgmt.path` | Ingress path for EMQX Mgmt API | / | -| `ingress.mgmt.hosts` | Ingress hosts for EMQX Mgmt API | api.emqx.local | -| `ingress.mgmt.tls` | Ingress tls for EMQX Mgmt API | [] | -| `ingress.mgmt.annotations` | Ingress annotations for EMQX Mgmt API | {} | -| `metrics.enable` | If set to true, [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) needs to be installed, and emqx_prometheus needs to enable | false | -| `metrics.type` | Now we only supported "prometheus" | "prometheus" | -| `ssl.enabled` | Enable SSL support | false | -| `ssl.useExisting` | Use existing certificate or let cert-manager generate one | false | -| `ssl.existingName` | Name of existing certificate | emqx-tls | -| `ssl.dnsnames` | DNS name(s) for certificate to be generated | {} | -| `ssl.issuer.name` | Issuer name for certificate generation | letsencrypt-dns | -| `ssl.issuer.kind` | Issuer kind for certificate generation | ClusterIssuer | +| `replicaCount` | It is recommended to have odd number of nodes in a cluster, otherwise the emqx cluster cannot be automatically healed in case of net-split. | 3 | +| `image.repository` | EMQX Image name | `emqx/emqx-enterprise` | +| `image.pullPolicy` | The image pull policy | IfNotPresent | +| `image.pullSecrets ` | The image pull secrets | `[]` (does not add image pull secrets to deployed pods) | +| `envFromSecret` | The name pull a secret in the same kubernetes namespace which contains values that will be added to the environment | nil | +| `recreatePods` | Forces the recreation of pods during upgrades, which can be useful to always apply the most recent configuration. | false | +| `podAnnotations ` | Annotations for pod | `{}` | +| `podManagementPolicy` | To redeploy a chart with existing PVC(s), the value must be set to Parallel to avoid deadlock | `Parallel` | +| `persistence.enabled` | Enable EMQX persistence using PVC | false | +| `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) | +| `persistence.existingClaim` | EMQX data Persistent Volume existing claim name, evaluated as a template | "" | +| `persistence.accessMode` | PVC Access Mode for EMQX volume | ReadWriteOnce | +| `persistence.size` | PVC Storage Request for EMQX volume | 20Mi | +| `initContainers` | Containers that run before the creation of EMQX containers. They can contain utilities or setup scripts. | `{}` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Toleration labels for pod assignment | `[]` | +| `affinity` | Map of node/pod affinities | `{}` | +| `service.type` | Kubernetes Service type. | ClusterIP | +| `service.mqtt` | Port for MQTT. | 1883 | +| `service.mqttssl` | Port for MQTT(SSL). | 8883 | +| `service.ws` | Port for WebSocket/HTTP. | 8083 | +| `service.wss` | Port for WSS/HTTPS. | 8084 | +| `service.dashboard` | Port for dashboard and API. | 18083 | +| `service.nodePorts.mqtt` | Kubernetes node port for MQTT. | nil | +| `service.nodePorts.mqttssl` | Kubernetes node port for MQTT(SSL). | nil | +| `service.nodePorts.mgmt` | Kubernetes node port for mgmt API. | nil | +| `service.nodePorts.ws` | Kubernetes node port for WebSocket/HTTP. | nil | +| `service.nodePorts.wss` | Kubernetes node port for WSS/HTTPS. | nil | +| `service.nodePorts.dashboard` | Kubernetes node port for dashboard. | nil | +| `service.loadBalancerIP` | loadBalancerIP for Service | nil | +| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | [] | +| `service.externalIPs` | ExternalIPs for the service | [] | +`service.externalTrafficPolicy` | External Traffic Policy for the service | `Cluster` +| `service.annotations` | Service annotations | {}(evaluated as a template) | +| `ingress.dashboard.enabled` | Enable ingress for EMQX Dashboard | false | +| `ingress.dashboard.ingressClassName` | Set the ingress class for EMQX Dashboard | | +| `ingress.dashboard.path` | Ingress path for EMQX Dashboard | / | +| `ingress.dashboard.pathType` | Ingress pathType for EMQX Dashboard | `ImplementationSpecific` | +| `ingress.dashboard.hosts` | Ingress hosts for EMQX Mgmt API | dashboard.emqx.local | +| `ingress.dashboard.tls` | Ingress tls for EMQX Mgmt API | [] | +| `ingress.dashboard.annotations` | Ingress annotations for EMQX Mgmt API | {} | +| `ingress.mgmt.enabled` | Enable ingress for EMQX Mgmt API | false | +| `ingress.dashboard.ingressClassName` | Set the ingress class for EMQX Mgmt API | | +| `ingress.mgmt.path` | Ingress path for EMQX Mgmt API | / | +| `ingress.mgmt.hosts` | Ingress hosts for EMQX Mgmt API | api.emqx.local | +| `ingress.mgmt.tls` | Ingress tls for EMQX Mgmt API | [] | +| `ingress.mgmt.annotations` | Ingress annotations for EMQX Mgmt API | {} | +| `metrics.enable` | If set to true, [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) needs to be installed, and emqx_prometheus needs to enable | false | +| `metrics.type` | Now we only supported "prometheus" | "prometheus" | +| `ssl.enabled` | Enable SSL support | false | +| `ssl.useExisting` | Use existing certificate or let cert-manager generate one | false | +| `ssl.existingName` | Name of existing certificate | emqx-tls | +| `ssl.dnsnames` | DNS name(s) for certificate to be generated | {} | +| `ssl.issuer.name` | Issuer name for certificate generation | letsencrypt-dns | +| `ssl.issuer.kind` | Issuer kind for certificate generation | ClusterIssuer | ## EMQX specific settings diff --git a/deploy/charts/emqx-enterprise/templates/configmap.yaml b/deploy/charts/emqx-enterprise/templates/configmap.yaml index e0563d02a..5086f85f6 100644 --- a/deploy/charts/emqx-enterprise/templates/configmap.yaml +++ b/deploy/charts/emqx-enterprise/templates/configmap.yaml @@ -10,10 +10,25 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} data: + EMQX_NAME: {{ .Release.Name }} + {{- if eq (.Values.emqxConfig.EMQX_CLUSTER__DISCOVERY_STRATEGY) "k8s" }} + EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc:443" + EMQX_CLUSTER__K8S__SERVICE_NAME: {{ include "emqx.fullname" . }}-headless + EMQX_CLUSTER__K8S__NAMESPACE: {{ .Release.Namespace }} + EMQX_CLUSTER__K8S__ADDRESS_TYPE: "hostname" + EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local" + {{- else if eq (.Values.emqxConfig.EMQX_CLUSTER__DISCOVERY_STRATEGY) "dns" }} + EMQX_CLUSTER__DNS__NAME: "{{ include "emqx.fullname" . }}-headless.{{ .Release.Namespace }}.svc.cluster.local" + EMQX_CLUSTER__DNS__RECORD_TYPE: "srv" + {{- end -}} {{- range $index, $value := .Values.emqxConfig }} {{- if $value }} {{- $key := (regexReplaceAllLiteral "\\." (regexReplaceAllLiteral "EMQX[_\\.]" (upper (trimAll " " $index)) "") "__") }} + {{- if or (kindIs "map" $value) (kindIs "slice" $value) }} + {{ print "EMQX_" $key }}: {{ tpl (printf "%q" (toJson $value)) $ }} + {{- else }} {{ print "EMQX_" $key }}: "{{ tpl (printf "%v" $value) $ }}" {{- end }} {{- end }} + {{- end }} {{- end }} diff --git a/deploy/charts/emqx-enterprise/templates/secret.yaml b/deploy/charts/emqx-enterprise/templates/secret.yaml index 447326769..440f50d30 100644 --- a/deploy/charts/emqx-enterprise/templates/secret.yaml +++ b/deploy/charts/emqx-enterprise/templates/secret.yaml @@ -6,14 +6,6 @@ metadata: namespace: {{ .Release.Namespace }} type: kubernetes.io/basic-auth stringData: - {{- if not (empty .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_USERNAME) }} - username: admin - {{- else }} - username: {{ .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_USERNAME }} - {{- end }} - {{- if not (empty .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_PASSWORD) }} - password: public - {{- else }} - password: {{ .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_PASSWORD}} - {{- end }} + username: {{ .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_USERNAME | default "admin" }} + password: {{ .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_PASSWORD | default "public" }} {{- end }} diff --git a/deploy/charts/emqx-enterprise/templates/service.yaml b/deploy/charts/emqx-enterprise/templates/service.yaml index 54efa6426..301213150 100644 --- a/deploy/charts/emqx-enterprise/templates/service.yaml +++ b/deploy/charts/emqx-enterprise/templates/service.yaml @@ -14,6 +14,9 @@ metadata: {{- end }} spec: type: {{ .Values.service.type }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | default "Cluster" }} + {{- end }} {{- if eq .Values.service.type "LoadBalancer" }} {{- if .Values.service.loadBalancerIP }} loadBalancerIP: {{ .Values.service.loadBalancerIP }} diff --git a/deploy/charts/emqx-enterprise/values.yaml b/deploy/charts/emqx-enterprise/values.yaml index 7827d6afb..0396b2b20 100644 --- a/deploy/charts/emqx-enterprise/values.yaml +++ b/deploy/charts/emqx-enterprise/values.yaml @@ -7,6 +7,8 @@ replicaCount: 3 image: repository: emqx/emqx-enterprise pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ @@ -92,19 +94,6 @@ initContainers: {} ## EMQX configuration item, see the documentation (https://hub.docker.com/r/emqx/emqx) emqxConfig: EMQX_CLUSTER__DISCOVERY_STRATEGY: "dns" - EMQX_CLUSTER__DNS__NAME: "{{ .Release.Name }}-headless.{{ .Release.Namespace }}.svc.cluster.local" - EMQX_CLUSTER__DNS__RECORD_TYPE: "srv" - # EMQX_CLUSTER__DISCOVERY_STRATEGY: "k8s" - # EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc:443" - # EMQX_CLUSTER__K8S__SERVICE_NAME: "{{ .Release.Name }}-headless" - # EMQX_CLUSTER__K8S__NAMESPACE: "{{ .Release.Namespace }}" - ## The address type is used to extract host from k8s service. - ## Value: ip | dns | hostname - ## Note:Hostname is only supported after v4.0-rc.2 - EMQX_CLUSTER__K8S__ADDRESS_TYPE: "hostname" - EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local" - ## if EMQX_CLUSTER__K8S__ADDRESS_TYPE eq dns - # EMQX_CLUSTER__K8S__SUFFIX: "pod.cluster.local" EMQX_DASHBOARD__DEFAULT_USERNAME: "admin" EMQX_DASHBOARD__DEFAULT_PASSWORD: "public" @@ -160,6 +149,12 @@ service: ## Set the ExternalIPs ## externalIPs: [] + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. + ## There are two available options: Cluster (default) and Local. + ## Cluster obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading. + ## Local preserves the client source IP and avoids a second hop for LoadBalancer and NodePort type Services, but risks potentially imbalanced traffic spreading. + ## + externalTrafficPolicy: "Cluster" ## Provide any additional annotations which may be required. Evaluated as a template ## annotations: {} diff --git a/deploy/charts/emqx/Chart.yaml b/deploy/charts/emqx/Chart.yaml index f3f33c984..8bf0ee0e3 100644 --- a/deploy/charts/emqx/Chart.yaml +++ b/deploy/charts/emqx/Chart.yaml @@ -14,8 +14,8 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 5.0.11 +version: 5.0.12 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 5.0.11 +appVersion: 5.0.12 diff --git a/deploy/charts/emqx/README.md b/deploy/charts/emqx/README.md index a579af70d..b07bf35ae 100644 --- a/deploy/charts/emqx/README.md +++ b/deploy/charts/emqx/README.md @@ -37,63 +37,64 @@ $ helm del my-emqx The following table lists the configurable parameters of the emqx chart and their default values. -| Parameter | Description | Default Value | +| Parameter | Description | Default Value | |--------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| -| `replicaCount` | It is recommended to have odd number of nodes in a cluster, otherwise the emqx cluster cannot be automatically healed in case of net-split. | 3 | -| `image.repository` | EMQX Image name | emqx/emqx | -| `image.pullPolicy` | The image pull policy | IfNotPresent | -| `image.pullSecrets ` | The image pull secrets | `[]` (does not add image pull secrets to deployed pods) | -| `envFromSecret` | The name pull a secret in the same kubernetes namespace which contains values that will be added to the environment | nil | -| `recreatePods` | Forces the recreation of pods during upgrades, which can be useful to always apply the most recent configuration. | false | -| `podAnnotations ` | Annotations for pod | `{}` | -| `podManagementPolicy` | To redeploy a chart with existing PVC(s), the value must be set to Parallel to avoid deadlock | `Parallel` | -| `persistence.enabled` | Enable EMQX persistence using PVC | false | -| `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) | -| `persistence.existingClaim` | EMQX data Persistent Volume existing claim name, evaluated as a template | "" | -| `persistence.accessMode` | PVC Access Mode for EMQX volume | ReadWriteOnce | -| `persistence.size` | PVC Storage Request for EMQX volume | 20Mi | -| `initContainers` | Containers that run before the creation of EMQX containers. They can contain utilities or setup scripts. | `{}` | -| `resources` | CPU/Memory resource requests/limits | {} | -| `nodeSelector` | Node labels for pod assignment | `{}` | -| `tolerations` | Toleration labels for pod assignment | `[]` | -| `affinity` | Map of node/pod affinities | `{}` | -| `service.type` | Kubernetes Service type. | ClusterIP | -| `service.mqtt` | Port for MQTT. | 1883 | -| `service.mqttssl` | Port for MQTT(SSL). | 8883 | -| `service.ws` | Port for WebSocket/HTTP. | 8083 | -| `service.wss` | Port for WSS/HTTPS. | 8084 | -| `service.dashboard` | Port for dashboard and API. | 18083 | -| `service.nodePorts.mqtt` | Kubernetes node port for MQTT. | nil | -| `service.nodePorts.mqttssl` | Kubernetes node port for MQTT(SSL). | nil | -| `service.nodePorts.mgmt` | Kubernetes node port for mgmt API. | nil | -| `service.nodePorts.ws` | Kubernetes node port for WebSocket/HTTP. | nil | -| `service.nodePorts.wss` | Kubernetes node port for WSS/HTTPS. | nil | -| `service.nodePorts.dashboard` | Kubernetes node port for dashboard. | nil | -| `service.loadBalancerIP` | loadBalancerIP for Service | nil | -| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | [] | -| `service.externalIPs` | ExternalIPs for the service | [] | -| `service.annotations` | Service annotations | {}(evaluated as a template) | -| `ingress.dashboard.enabled` | Enable ingress for EMQX Dashboard | false | -| `ingress.dashboard.ingressClassName` | Set the ingress class for EMQX Dashboard | | -| `ingress.dashboard.path` | Ingress path for EMQX Dashboard | / | -| `ingress.dashboard.pathType` | Ingress pathType for EMQX Dashboard | `ImplementationSpecific` | -| `ingress.dashboard.hosts` | Ingress hosts for EMQX Mgmt API | dashboard.emqx.local | -| `ingress.dashboard.tls` | Ingress tls for EMQX Mgmt API | [] | -| `ingress.dashboard.annotations` | Ingress annotations for EMQX Mgmt API | {} | -| `ingress.mgmt.enabled` | Enable ingress for EMQX Mgmt API | false | -| `ingress.dashboard.ingressClassName` | Set the ingress class for EMQX Mgmt API | | -| `ingress.mgmt.path` | Ingress path for EMQX Mgmt API | / | -| `ingress.mgmt.hosts` | Ingress hosts for EMQX Mgmt API | api.emqx.local | -| `ingress.mgmt.tls` | Ingress tls for EMQX Mgmt API | [] | -| `ingress.mgmt.annotations` | Ingress annotations for EMQX Mgmt API | {} | -| `metrics.enable` | If set to true, [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) needs to be installed, and emqx_prometheus needs to enable | false | -| `metrics.type` | Now we only supported "prometheus" | "prometheus" | -| `ssl.enabled` | Enable SSL support | false | -| `ssl.useExisting` | Use existing certificate or let cert-manager generate one | false | -| `ssl.existingName` | Name of existing certificate | emqx-tls | -| `ssl.dnsnames` | DNS name(s) for certificate to be generated | {} | -| `ssl.issuer.name` | Issuer name for certificate generation | letsencrypt-dns | -| `ssl.issuer.kind` | Issuer kind for certificate generation | ClusterIssuer | +| `replicaCount` | It is recommended to have odd number of nodes in a cluster, otherwise the emqx cluster cannot be automatically healed in case of net-split. | 3 | +| `image.repository` | EMQX Image name | emqx/emqx | +| `image.pullPolicy` | The image pull policy | IfNotPresent | +| `image.pullSecrets ` | The image pull secrets | `[]` (does not add image pull secrets to deployed pods) | +| `envFromSecret` | The name pull a secret in the same kubernetes namespace which contains values that will be added to the environment | nil | +| `recreatePods` | Forces the recreation of pods during upgrades, which can be useful to always apply the most recent configuration. | false | +| `podAnnotations ` | Annotations for pod | `{}` | +| `podManagementPolicy` | To redeploy a chart with existing PVC(s), the value must be set to Parallel to avoid deadlock | `Parallel` | +| `persistence.enabled` | Enable EMQX persistence using PVC | false | +| `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) | +| `persistence.existingClaim` | EMQX data Persistent Volume existing claim name, evaluated as a template | "" | +| `persistence.accessMode` | PVC Access Mode for EMQX volume | ReadWriteOnce | +| `persistence.size` | PVC Storage Request for EMQX volume | 20Mi | +| `initContainers` | Containers that run before the creation of EMQX containers. They can contain utilities or setup scripts. | `{}` | +| `resources` | CPU/Memory resource requests/limits | {} | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Toleration labels for pod assignment | `[]` | +| `affinity` | Map of node/pod affinities | `{}` | +| `service.type` | Kubernetes Service type. | ClusterIP | +| `service.mqtt` | Port for MQTT. | 1883 | +| `service.mqttssl` | Port for MQTT(SSL). | 8883 | +| `service.ws` | Port for WebSocket/HTTP. | 8083 | +| `service.wss` | Port for WSS/HTTPS. | 8084 | +| `service.dashboard` | Port for dashboard and API. | 18083 | +| `service.nodePorts.mqtt` | Kubernetes node port for MQTT. | nil | +| `service.nodePorts.mqttssl` | Kubernetes node port for MQTT(SSL). | nil | +| `service.nodePorts.mgmt` | Kubernetes node port for mgmt API. | nil | +| `service.nodePorts.ws` | Kubernetes node port for WebSocket/HTTP. | nil | +| `service.nodePorts.wss` | Kubernetes node port for WSS/HTTPS. | nil | +| `service.nodePorts.dashboard` | Kubernetes node port for dashboard. | nil | +| `service.loadBalancerIP` | loadBalancerIP for Service | nil | +| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | [] | +| `service.externalIPs` | ExternalIPs for the service | [] | +`service.externalTrafficPolicy` | External Traffic Policy for the service | `Cluster` +| `service.annotations` | Service annotations | {}(evaluated as a template) | +| `ingress.dashboard.enabled` | Enable ingress for EMQX Dashboard | false | +| `ingress.dashboard.ingressClassName` | Set the ingress class for EMQX Dashboard | | +| `ingress.dashboard.path` | Ingress path for EMQX Dashboard | / | +| `ingress.dashboard.pathType` | Ingress pathType for EMQX Dashboard | `ImplementationSpecific` | +| `ingress.dashboard.hosts` | Ingress hosts for EMQX Mgmt API | dashboard.emqx.local | +| `ingress.dashboard.tls` | Ingress tls for EMQX Mgmt API | [] | +| `ingress.dashboard.annotations` | Ingress annotations for EMQX Mgmt API | {} | +| `ingress.mgmt.enabled` | Enable ingress for EMQX Mgmt API | false | +| `ingress.dashboard.ingressClassName` | Set the ingress class for EMQX Mgmt API | | +| `ingress.mgmt.path` | Ingress path for EMQX Mgmt API | / | +| `ingress.mgmt.hosts` | Ingress hosts for EMQX Mgmt API | api.emqx.local | +| `ingress.mgmt.tls` | Ingress tls for EMQX Mgmt API | [] | +| `ingress.mgmt.annotations` | Ingress annotations for EMQX Mgmt API | {} | +| `metrics.enable` | If set to true, [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) needs to be installed, and emqx_prometheus needs to enable | false | +| `metrics.type` | Now we only supported "prometheus" | "prometheus" | +| `ssl.enabled` | Enable SSL support | false | +| `ssl.useExisting` | Use existing certificate or let cert-manager generate one | false | +| `ssl.existingName` | Name of existing certificate | emqx-tls | +| `ssl.dnsnames` | DNS name(s) for certificate to be generated | {} | +| `ssl.issuer.name` | Issuer name for certificate generation | letsencrypt-dns | +| `ssl.issuer.kind` | Issuer kind for certificate generation | ClusterIssuer | ## EMQX specific settings diff --git a/deploy/charts/emqx/templates/configmap.yaml b/deploy/charts/emqx/templates/configmap.yaml index e0563d02a..5086f85f6 100644 --- a/deploy/charts/emqx/templates/configmap.yaml +++ b/deploy/charts/emqx/templates/configmap.yaml @@ -10,10 +10,25 @@ metadata: app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} data: + EMQX_NAME: {{ .Release.Name }} + {{- if eq (.Values.emqxConfig.EMQX_CLUSTER__DISCOVERY_STRATEGY) "k8s" }} + EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc:443" + EMQX_CLUSTER__K8S__SERVICE_NAME: {{ include "emqx.fullname" . }}-headless + EMQX_CLUSTER__K8S__NAMESPACE: {{ .Release.Namespace }} + EMQX_CLUSTER__K8S__ADDRESS_TYPE: "hostname" + EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local" + {{- else if eq (.Values.emqxConfig.EMQX_CLUSTER__DISCOVERY_STRATEGY) "dns" }} + EMQX_CLUSTER__DNS__NAME: "{{ include "emqx.fullname" . }}-headless.{{ .Release.Namespace }}.svc.cluster.local" + EMQX_CLUSTER__DNS__RECORD_TYPE: "srv" + {{- end -}} {{- range $index, $value := .Values.emqxConfig }} {{- if $value }} {{- $key := (regexReplaceAllLiteral "\\." (regexReplaceAllLiteral "EMQX[_\\.]" (upper (trimAll " " $index)) "") "__") }} + {{- if or (kindIs "map" $value) (kindIs "slice" $value) }} + {{ print "EMQX_" $key }}: {{ tpl (printf "%q" (toJson $value)) $ }} + {{- else }} {{ print "EMQX_" $key }}: "{{ tpl (printf "%v" $value) $ }}" {{- end }} {{- end }} + {{- end }} {{- end }} diff --git a/deploy/charts/emqx/templates/secret.yaml b/deploy/charts/emqx/templates/secret.yaml index 447326769..440f50d30 100644 --- a/deploy/charts/emqx/templates/secret.yaml +++ b/deploy/charts/emqx/templates/secret.yaml @@ -6,14 +6,6 @@ metadata: namespace: {{ .Release.Namespace }} type: kubernetes.io/basic-auth stringData: - {{- if not (empty .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_USERNAME) }} - username: admin - {{- else }} - username: {{ .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_USERNAME }} - {{- end }} - {{- if not (empty .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_PASSWORD) }} - password: public - {{- else }} - password: {{ .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_PASSWORD}} - {{- end }} + username: {{ .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_USERNAME | default "admin" }} + password: {{ .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_PASSWORD | default "public" }} {{- end }} diff --git a/deploy/charts/emqx/templates/service.yaml b/deploy/charts/emqx/templates/service.yaml index 54efa6426..301213150 100644 --- a/deploy/charts/emqx/templates/service.yaml +++ b/deploy/charts/emqx/templates/service.yaml @@ -14,6 +14,9 @@ metadata: {{- end }} spec: type: {{ .Values.service.type }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | default "Cluster" }} + {{- end }} {{- if eq .Values.service.type "LoadBalancer" }} {{- if .Values.service.loadBalancerIP }} loadBalancerIP: {{ .Values.service.loadBalancerIP }} diff --git a/deploy/charts/emqx/values.yaml b/deploy/charts/emqx/values.yaml index b648f070f..4fb263c7a 100644 --- a/deploy/charts/emqx/values.yaml +++ b/deploy/charts/emqx/values.yaml @@ -94,19 +94,6 @@ initContainers: {} ## EMQX configuration item, see the documentation (https://hub.docker.com/r/emqx/emqx) emqxConfig: EMQX_CLUSTER__DISCOVERY_STRATEGY: "dns" - EMQX_CLUSTER__DNS__NAME: "{{ .Release.Name }}-headless.{{ .Release.Namespace }}.svc.cluster.local" - EMQX_CLUSTER__DNS__RECORD_TYPE: "srv" - # EMQX_CLUSTER__DISCOVERY_STRATEGY: "k8s" - # EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc:443" - # EMQX_CLUSTER__K8S__SERVICE_NAME: "{{ .Release.Name }}-headless" - # EMQX_CLUSTER__K8S__NAMESPACE: "{{ .Release.Namespace }}" - ## The address type is used to extract host from k8s service. - ## Value: ip | dns | hostname - ## Note:Hostname is only supported after v4.0-rc.2 - EMQX_CLUSTER__K8S__ADDRESS_TYPE: "hostname" - EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local" - ## if EMQX_CLUSTER__K8S__ADDRESS_TYPE eq dns - # EMQX_CLUSTER__K8S__SUFFIX: "pod.cluster.local" EMQX_DASHBOARD__DEFAULT_USERNAME: "admin" EMQX_DASHBOARD__DEFAULT_PASSWORD: "public" @@ -162,6 +149,12 @@ service: ## Set the ExternalIPs ## externalIPs: [] + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. + ## There are two available options: Cluster (default) and Local. + ## Cluster obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading. + ## Local preserves the client source IP and avoids a second hop for LoadBalancer and NodePort type Services, but risks potentially imbalanced traffic spreading. + ## + externalTrafficPolicy: "Cluster" ## Provide any additional annotations which may be required. Evaluated as a template ## annotations: {} diff --git a/lib-ee/emqx_ee_bridge/docker-ct b/lib-ee/emqx_ee_bridge/docker-ct index 94f9379df..fba33559e 100644 --- a/lib-ee/emqx_ee_bridge/docker-ct +++ b/lib-ee/emqx_ee_bridge/docker-ct @@ -4,3 +4,5 @@ kafka mongo mongo_rs_sharded mysql +redis +redis_cluster diff --git a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_redis.conf b/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_redis.conf new file mode 100644 index 000000000..a5744df4c --- /dev/null +++ b/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_redis.conf @@ -0,0 +1,73 @@ +emqx_ee_bridge_redis { + local_topic { + desc { + en: """The MQTT topic filter to be forwarded to Redis. All MQTT 'PUBLISH' messages with the topic +matching the local_topic will be forwarded.
+NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is +configured, then both the data got from the rule and the MQTT messages that match local_topic +will be forwarded. +""" + zh: """发送到 'local_topic' 的消息都会转发到 Redis。
+注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发到 Redis。 +""" + } + label { + en: "Local Topic" + zh: "本地 Topic" + } + } + + command_template { + desc { + en: """Redis Command Template""" + zh: """Redis Command 模板""" + } + label { + en: "Redis Command Template" + zh: "Redis Command 模板" + } + } + config_enable { + desc { + en: """Enable or disable this bridge""" + zh: """启用/禁用桥接""" + } + label { + en: "Enable Or Disable Bridge" + zh: "启用/禁用桥接" + } + } + + desc_config { + desc { + en: """Configuration for a Redis bridge.""" + zh: """Resis 桥接配置""" + } + label: { + en: "Redis Bridge Configuration" + zh: "Redis 桥接配置" + } + } + + desc_type { + desc { + en: """The Bridge Type""" + zh: """Bridge 类型""" + } + label { + en: "Bridge Type" + zh: "桥接类型" + } + } + + desc_name { + desc { + en: """Bridge name, used as a human-readable description of the bridge.""" + zh: """桥接名字,可读描述""" + } + label { + en: "Bridge Name" + zh: "桥接名字" + } + } +} diff --git a/lib-ee/emqx_ee_bridge/rebar.config b/lib-ee/emqx_ee_bridge/rebar.config index 9119b052d..ee112def8 100644 --- a/lib-ee/emqx_ee_bridge/rebar.config +++ b/lib-ee/emqx_ee_bridge/rebar.config @@ -1,5 +1,5 @@ {erl_opts, [debug_info]}. -{deps, [ {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.31.2"}}} +{deps, [ {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.32.0"}}} , {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.7.0"}}} , {kafka_protocol, {git, "https://github.com/kafka4beam/kafka_protocol.git", {tag, "4.1.0"}}} , {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.0-rc1"}}} diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src index 2748c27a7..343325c5c 100644 --- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src @@ -1,6 +1,6 @@ {application, emqx_ee_bridge, [ {description, "EMQX Enterprise data bridges"}, - {vsn, "0.1.0"}, + {vsn, "0.1.1"}, {registered, []}, {applications, [ kernel, diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl index 14ae7fc8d..cf20d7110 100644 --- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl @@ -22,7 +22,10 @@ api_schemas(Method) -> ref(emqx_ee_bridge_mongodb, Method ++ "_single"), ref(emqx_ee_bridge_hstreamdb, Method), ref(emqx_ee_bridge_influxdb, Method ++ "_api_v1"), - ref(emqx_ee_bridge_influxdb, Method ++ "_api_v2") + ref(emqx_ee_bridge_influxdb, Method ++ "_api_v2"), + ref(emqx_ee_bridge_redis, Method ++ "_single"), + ref(emqx_ee_bridge_redis, Method ++ "_sentinel"), + ref(emqx_ee_bridge_redis, Method ++ "_cluster") ]. schema_modules() -> @@ -32,7 +35,8 @@ schema_modules() -> emqx_ee_bridge_gcp_pubsub, emqx_ee_bridge_influxdb, emqx_ee_bridge_mongodb, - emqx_ee_bridge_mysql + emqx_ee_bridge_mysql, + emqx_ee_bridge_redis ]. examples(Method) -> @@ -56,7 +60,10 @@ resource_type(mongodb_sharded) -> emqx_connector_mongo; resource_type(mongodb_single) -> emqx_connector_mongo; resource_type(mysql) -> emqx_connector_mysql; resource_type(influxdb_api_v1) -> emqx_ee_connector_influxdb; -resource_type(influxdb_api_v2) -> emqx_ee_connector_influxdb. +resource_type(influxdb_api_v2) -> emqx_ee_connector_influxdb; +resource_type(redis_single) -> emqx_ee_connector_redis; +resource_type(redis_sentinel) -> emqx_ee_connector_redis; +resource_type(redis_cluster) -> emqx_ee_connector_redis. fields(bridges) -> [ @@ -92,7 +99,7 @@ fields(bridges) -> required => false } )} - ] ++ mongodb_structs() ++ influxdb_structs(). + ] ++ mongodb_structs() ++ influxdb_structs() ++ redis_structs(). mongodb_structs() -> [ @@ -122,3 +129,20 @@ influxdb_structs() -> influxdb_api_v2 ] ]. + +redis_structs() -> + [ + {Type, + mk( + hoconsc:map(name, ref(emqx_ee_bridge_redis, Type)), + #{ + desc => <<"Redis Bridge Config">>, + required => false + } + )} + || Type <- [ + redis_single, + redis_sentinel, + redis_cluster + ] + ]. diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_redis.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_redis.erl new file mode 100644 index 000000000..5360efa7f --- /dev/null +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_redis.erl @@ -0,0 +1,193 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_ee_bridge_redis). + +-include_lib("emqx_bridge/include/emqx_bridge.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + conn_bridge_examples/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +%% ------------------------------------------------------------------------------------------------- +%% api + +conn_bridge_examples(Method) -> + [ + #{ + <<"redis_single">> => #{ + summary => <<"Redis Single Node Bridge">>, + value => values("single", Method) + } + }, + #{ + <<"redis_sentinel">> => #{ + summary => <<"Redis Sentinel Bridge">>, + value => values("sentinel", Method) + } + }, + #{ + <<"redis_cluster">> => #{ + summary => <<"Redis Cluster Bridge">>, + value => values("cluster", Method) + } + } + ]. + +values(Protocol, get) -> + maps:merge(values(Protocol, post), ?METRICS_EXAMPLE); +values("single", post) -> + SpecificOpts = #{ + server => <<"127.0.0.1:6379">>, + database => 1 + }, + values(common, "single", SpecificOpts); +values("sentinel", post) -> + SpecificOpts = #{ + servers => [<<"127.0.0.1:26379">>], + sentinel => <<"mymaster">>, + database => 1 + }, + values(common, "sentinel", SpecificOpts); +values("cluster", post) -> + SpecificOpts = #{ + servers => [<<"127.0.0.1:6379">>] + }, + values(common, "cluster", SpecificOpts); +values(Protocol, put) -> + maps:without([type, name], values(Protocol, post)). + +values(common, RedisType, SpecificOpts) -> + Config = #{ + type => list_to_atom("redis_" ++ RedisType), + name => <<"redis_bridge">>, + enable => true, + local_topic => <<"local/topic/#">>, + pool_size => 8, + password => <<"secret">>, + auto_reconnect => true, + command_template => [<<"LPUSH">>, <<"MSGS">>, <<"${payload}">>], + resource_opts => #{ + enable_batch => false, + batch_size => 100, + batch_time => <<"20ms">> + }, + ssl => #{enable => false} + }, + maps:merge(Config, SpecificOpts). + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions +namespace() -> "bridge_redis". + +roots() -> []. + +fields("post_single") -> + method_fileds(post, redis_single); +fields("post_sentinel") -> + method_fileds(post, redis_sentinel); +fields("post_cluster") -> + method_fileds(post, redis_cluster); +fields("put_single") -> + method_fileds(put, redis_single); +fields("put_sentinel") -> + method_fileds(put, redis_sentinel); +fields("put_cluster") -> + method_fileds(put, redis_cluster); +fields("get_single") -> + method_fileds(get, redis_single); +fields("get_sentinel") -> + method_fileds(get, redis_sentinel); +fields("get_cluster") -> + method_fileds(get, redis_cluster); +fields(Type) when + Type == redis_single orelse Type == redis_sentinel orelse Type == redis_cluster +-> + redis_bridge_common_fields() ++ + connector_fields(Type). + +method_fileds(post, ConnectorType) -> + redis_bridge_common_fields() ++ + connector_fields(ConnectorType) ++ + type_name_fields(ConnectorType); +method_fileds(get, ConnectorType) -> + redis_bridge_common_fields() ++ + connector_fields(ConnectorType) ++ + type_name_fields(ConnectorType) ++ + emqx_bridge_schema:metrics_status_fields(); +method_fileds(put, ConnectorType) -> + redis_bridge_common_fields() ++ + connector_fields(ConnectorType). + +redis_bridge_common_fields() -> + emqx_bridge_schema:common_bridge_fields() ++ + [ + {local_topic, mk(binary(), #{desc => ?DESC("local_topic")})}, + {command_template, fun command_template/1} + ] ++ + emqx_resource_schema:fields("resource_opts"). + +connector_fields(Type) -> + RedisType = bridge_type_to_redis_conn_type(Type), + emqx_connector_redis:fields(RedisType). + +bridge_type_to_redis_conn_type(redis_single) -> + single; +bridge_type_to_redis_conn_type(redis_sentinel) -> + sentinel; +bridge_type_to_redis_conn_type(redis_cluster) -> + cluster. + +type_name_fields(Type) -> + [ + {type, mk(Type, #{required => true, desc => ?DESC("desc_type")})}, + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})} + ]. + +desc("config") -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for Redis using `", string:to_upper(Method), "` method."]; +desc(redis_single) -> + ?DESC(emqx_connector_redis, "single"); +desc(redis_sentinel) -> + ?DESC(emqx_connector_redis, "sentinel"); +desc(redis_cluster) -> + ?DESC(emqx_connector_redis, "cluster"); +desc(_) -> + undefined. + +command_template(type) -> + list(binary()); +command_template(required) -> + true; +command_template(validator) -> + fun is_command_template_valid/1; +command_template(desc) -> + ?DESC("command_template"); +command_template(_) -> + undefined. + +is_command_template_valid(CommandSegments) -> + case + is_list(CommandSegments) andalso length(CommandSegments) > 0 andalso + lists:all(fun is_binary/1, CommandSegments) + of + true -> + ok; + false -> + {error, + "the value of the field 'command_template' should be a nonempty " + "list of strings (templates for Redis command and arguments)"} + end. diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_redis_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_redis_SUITE.erl new file mode 100644 index 000000000..cd6a2d212 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_redis_SUITE.erl @@ -0,0 +1,493 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_ee_bridge_redis_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-include_lib("emqx_bridge/include/emqx_bridge.hrl"). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +-define(REDIS_TOXYPROXY_CONNECT_CONFIG, #{ + <<"server">> => <<"toxiproxy:6379">> +}). + +-define(COMMON_REDIS_OPTS, #{ + <<"password">> => <<"public">>, + <<"command_template">> => [<<"RPUSH">>, <<"MSGS">>, <<"${payload}">>], + <<"local_topic">> => <<"local_topic/#">> +}). + +-define(BATCH_SIZE, 5). + +-define(PROXY_HOST, "toxiproxy"). +-define(PROXY_PORT, "8474"). + +all() -> [{group, redis_types}, {group, rest}]. + +groups() -> + ResourceSpecificTCs = [t_create_delete_bridge], + TCs = emqx_common_test_helpers:all(?MODULE) -- ResourceSpecificTCs, + TypeGroups = [ + {group, redis_single}, + {group, redis_sentinel}, + {group, redis_cluster} + ], + BatchGroups = [ + {group, batch_on}, + {group, batch_off} + ], + [ + {rest, TCs}, + {redis_types, [ + {group, tcp}, + {group, tls} + ]}, + {tcp, TypeGroups}, + {tls, TypeGroups}, + {redis_single, BatchGroups}, + {redis_sentinel, BatchGroups}, + {redis_cluster, BatchGroups}, + {batch_on, ResourceSpecificTCs}, + {batch_off, ResourceSpecificTCs} + ]. + +init_per_group(Group, Config) when + Group =:= redis_single; Group =:= redis_sentinel; Group =:= redis_cluster +-> + [{redis_type, Group} | Config]; +init_per_group(Group, Config) when + Group =:= tcp; Group =:= tls +-> + [{transport, Group} | Config]; +init_per_group(Group, Config) when + Group =:= batch_on; Group =:= batch_off +-> + [{batch_mode, Group} | Config]; +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_suite(Config) -> + TestHosts = all_test_hosts(), + case emqx_common_test_helpers:is_all_tcp_servers_available(TestHosts) of + true -> + ProxyHost = os:getenv("PROXY_HOST", ?PROXY_HOST), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", ?PROXY_PORT)), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ok = emqx_common_test_helpers:start_apps([emqx_conf]), + ok = emqx_connector_test_helpers:start_apps([ + emqx_resource, emqx_bridge, emqx_rule_engine + ]), + {ok, _} = application:ensure_all_started(emqx_connector), + [ + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort} + | Config + ]; + false -> + {skip, no_redis} + end. + +end_per_suite(_Config) -> + ok = delete_all_bridges(), + ok = emqx_common_test_helpers:stop_apps([emqx_conf]), + ok = emqx_connector_test_helpers:stop_apps([emqx_rule_engine, emqx_bridge, emqx_resource]), + _ = application:stop(emqx_connector), + ok. + +init_per_testcase(_Testcase, Config) -> + ok = delete_all_bridges(), + case ?config(redis_type, Config) of + undefined -> + Config; + RedisType -> + Transport = ?config(transport, Config), + BatchMode = ?config(batch_mode, Config), + #{RedisType := #{Transport := RedisConnConfig}} = redis_connect_configs(), + #{BatchMode := ResourceConfig} = resource_configs(), + IsBatch = (BatchMode =:= batch_on), + BridgeConfig0 = maps:merge(RedisConnConfig, ?COMMON_REDIS_OPTS), + BridgeConfig1 = BridgeConfig0#{<<"resource_opts">> => ResourceConfig}, + [{bridge_config, BridgeConfig1}, {is_batch, IsBatch} | Config] + end. + +end_per_testcase(_Testcase, Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + ok = snabbkaffe:stop(), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ok = delete_all_bridges(). + +t_create_delete_bridge(Config) -> + Name = <<"mybridge">>, + Type = ?config(redis_type, Config), + BridgeConfig = ?config(bridge_config, Config), + IsBatch = ?config(is_batch, Config), + ?assertMatch( + {ok, _}, + emqx_bridge:create(Type, Name, BridgeConfig) + ), + + ResourceId = emqx_bridge_resource:resource_id(Type, Name), + + ?assertEqual( + {ok, connected}, + emqx_resource:health_check(ResourceId) + ), + + RedisType = atom_to_binary(Type), + Action = <>, + + RuleId = <<"my_rule_id">>, + RuleConf = #{ + actions => [Action], + description => <<>>, + enable => true, + id => RuleId, + name => <<>>, + sql => <<"SELECT * FROM \"t/#\"">> + }, + + %% check export by rule + {ok, _} = emqx_rule_engine:create_rule(RuleConf), + _ = check_resource_queries(ResourceId, <<"t/test">>, IsBatch), + ok = emqx_rule_engine:delete_rule(RuleId), + + %% check export through local topic + _ = check_resource_queries(ResourceId, <<"local_topic/test">>, IsBatch), + + {ok, _} = emqx_bridge:remove(Type, Name). + +% check that we provide correct examples +t_check_values(_Config) -> + lists:foreach( + fun(Method) -> + lists:foreach( + fun({RedisType, #{value := Value0}}) -> + Value = maps:without(maps:keys(?METRICS_EXAMPLE), Value0), + MethodBin = atom_to_binary(Method), + Type = string:slice(RedisType, length("redis_")), + RefName = binary_to_list(<>), + Schema = conf_schema(RefName), + ?assertMatch( + #{}, + hocon_tconf:check_plain(Schema, #{<<"root">> => Value}, #{ + atom_key => true, + required => false + }) + ) + end, + lists:flatmap( + fun maps:to_list/1, + emqx_ee_bridge_redis:conn_bridge_examples(Method) + ) + ) + end, + [put, post, get] + ). + +t_check_replay(Config) -> + Name = <<"toxic_bridge">>, + Type = <<"redis_single">>, + Topic = <<"local_topic/test">>, + ProxyName = "redis_single_tcp", + + ?assertMatch( + {ok, _}, + emqx_bridge:create(Type, Name, toxiproxy_redis_bridge_config()) + ), + + ResourceId = emqx_bridge_resource:resource_id(Type, Name), + Health = emqx_resource:health_check(ResourceId), + + ?assertEqual( + {ok, connected}, + Health + ), + + ?check_trace( + begin + ?wait_async_action( + with_down_failure(Config, ProxyName, fun() -> + ct:sleep(100), + lists:foreach( + fun(_) -> + _ = publish_message(Topic, <<"test_payload">>) + end, + lists:seq(1, ?BATCH_SIZE) + ) + end), + #{?snk_kind := redis_ee_connector_send_done, batch := true, result := {ok, _}}, + 10000 + ) + end, + fun(Trace) -> + ?assert( + ?strict_causality( + #{?snk_kind := redis_ee_connector_send_done, result := {error, _}}, + #{?snk_kind := redis_ee_connector_send_done, result := {ok, _}}, + Trace + ) + ) + end + ), + {ok, _} = emqx_bridge:remove(Type, Name). + +t_permanent_error(_Config) -> + Name = <<"invalid_command_bridge">>, + Type = <<"redis_single">>, + Topic = <<"local_topic/test">>, + Payload = <<"payload for invalid redis command">>, + + ?assertMatch( + {ok, _}, + emqx_bridge:create(Type, Name, invalid_command_bridge_config()) + ), + + ?check_trace( + begin + ?wait_async_action( + publish_message(Topic, Payload), + #{?snk_kind := redis_ee_connector_send_done}, + 10000 + ) + end, + fun(Trace) -> + ?assertMatch( + [#{result := {error, _}} | _], + ?of_kind(redis_ee_connector_send_done, Trace) + ) + end + ), + {ok, _} = emqx_bridge:remove(Type, Name). + +t_create_disconnected(Config) -> + Name = <<"toxic_bridge">>, + Type = <<"redis_single">>, + + ?check_trace( + with_down_failure(Config, "redis_single_tcp", fun() -> + {ok, _} = emqx_bridge:create( + Type, Name, toxiproxy_redis_bridge_config() + ) + end), + fun(Trace) -> + ?assertMatch( + [#{error := _} | _], + ?of_kind(redis_ee_connector_start_error, Trace) + ), + ok + end + ), + {ok, _} = emqx_bridge:remove(Type, Name). + +%%------------------------------------------------------------------------------ +%% Helper functions +%%------------------------------------------------------------------------------ + +with_down_failure(Config, Name, F) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + emqx_common_test_helpers:with_failure(down, Name, ProxyHost, ProxyPort, F). + +check_resource_queries(ResourceId, Topic, IsBatch) -> + RandomPayload = rand:bytes(20), + N = + case IsBatch of + true -> ?BATCH_SIZE; + false -> 1 + end, + ?check_trace( + begin + ?wait_async_action( + lists:foreach( + fun(_) -> + _ = publish_message(Topic, RandomPayload) + end, + lists:seq(1, N) + ), + #{?snk_kind := redis_ee_connector_send_done, batch := IsBatch}, + 1000 + ) + end, + fun(Trace) -> + AddedMsgCount = length(added_msgs(ResourceId, RandomPayload)), + case IsBatch of + true -> + ?assertMatch( + [#{result := {ok, _}, batch := true, batch_size := ?BATCH_SIZE} | _], + ?of_kind(redis_ee_connector_send_done, Trace) + ), + ?assertEqual(?BATCH_SIZE, AddedMsgCount); + false -> + ?assertMatch( + [#{result := {ok, _}, batch := false} | _], + ?of_kind(redis_ee_connector_send_done, Trace) + ), + ?assertEqual(1, AddedMsgCount) + end + end + ). + +added_msgs(ResourceId, Payload) -> + {ok, Results} = emqx_resource:simple_sync_query( + ResourceId, {cmd, [<<"LRANGE">>, <<"MSGS">>, <<"0">>, <<"-1">>]} + ), + [El || El <- Results, El =:= Payload]. + +conf_schema(StructName) -> + #{ + fields => #{}, + translations => #{}, + validations => [], + namespace => undefined, + roots => [{root, hoconsc:ref(emqx_ee_bridge_redis, StructName)}] + }. + +delete_all_bridges() -> + lists:foreach( + fun(#{name := Name, type := Type}) -> + emqx_bridge:remove(Type, Name) + end, + emqx_bridge:list() + ). + +all_test_hosts() -> + Confs = [ + ?REDIS_TOXYPROXY_CONNECT_CONFIG + | lists:concat([ + maps:values(TypeConfs) + || TypeConfs <- maps:values(redis_connect_configs()) + ]) + ], + lists:flatmap( + fun + (#{<<"servers">> := ServersRaw}) -> + lists:map( + fun(Server) -> + parse_server(Server) + end, + string:tokens(binary_to_list(ServersRaw), ", ") + ); + (#{<<"server">> := ServerRaw}) -> + [parse_server(ServerRaw)] + end, + Confs + ). + +parse_server(Server) -> + emqx_connector_schema_lib:parse_server(Server, #{ + host_type => hostname, + default_port => 6379 + }). + +redis_connect_ssl_opts(Type) -> + maps:merge( + client_ssl_cert_opts(Type), + #{ + <<"enable">> => <<"true">>, + <<"verify">> => <<"verify_none">> + } + ). + +client_ssl_cert_opts(redis_single) -> + emqx_authn_test_lib:client_ssl_cert_opts(); +client_ssl_cert_opts(_) -> + Dir = code:lib_dir(emqx, etc), + #{ + <<"keyfile">> => filename:join([Dir, <<"certs">>, <<"client-key.pem">>]), + <<"certfile">> => filename:join([Dir, <<"certs">>, <<"client-cert.pem">>]), + <<"cacertfile">> => filename:join([Dir, <<"certs">>, <<"cacert.pem">>]) + }. + +redis_connect_configs() -> + #{ + redis_single => #{ + tcp => #{ + <<"server">> => <<"redis:6379">> + }, + tls => #{ + <<"server">> => <<"redis-tls:6380">>, + <<"ssl">> => redis_connect_ssl_opts(redis_single) + } + }, + redis_sentinel => #{ + tcp => #{ + <<"servers">> => <<"redis-sentinel:26379">>, + <<"sentinel">> => <<"mymaster">> + }, + tls => #{ + <<"servers">> => <<"redis-sentinel-tls:26380">>, + <<"sentinel">> => <<"mymaster">>, + <<"ssl">> => redis_connect_ssl_opts(redis_sentinel) + } + }, + redis_cluster => #{ + tcp => #{ + <<"servers">> => <<"redis-cluster:7000,redis-cluster:7001,redis-cluster:7002">> + }, + tls => #{ + <<"servers">> => + <<"redis-cluster-tls:8000,redis-cluster-tls:8001,redis-cluster-tls:8002">>, + <<"ssl">> => redis_connect_ssl_opts(redis_cluster) + } + } + }. + +toxiproxy_redis_bridge_config() -> + Conf0 = ?REDIS_TOXYPROXY_CONNECT_CONFIG#{ + <<"resource_opts">> => #{ + <<"query_mode">> => <<"async">>, + <<"enable_batch">> => <<"true">>, + <<"enable_queue">> => <<"true">>, + <<"worker_pool_size">> => <<"1">>, + <<"batch_size">> => integer_to_binary(?BATCH_SIZE), + <<"health_check_interval">> => <<"1s">> + } + }, + maps:merge(Conf0, ?COMMON_REDIS_OPTS). + +invalid_command_bridge_config() -> + #{redis_single := #{tcp := Conf0}} = redis_connect_configs(), + Conf1 = maps:merge(Conf0, ?COMMON_REDIS_OPTS), + Conf1#{ + <<"resource_opts">> => #{ + <<"enable_batch">> => <<"false">>, + <<"enable_queue">> => <<"false">>, + <<"worker_pool_size">> => <<"1">> + }, + <<"command_template">> => [<<"BAD">>, <<"COMMAND">>, <<"${payload}">>] + }. + +resource_configs() -> + #{ + batch_off => #{ + <<"query_mode">> => <<"sync">>, + <<"enable_batch">> => <<"false">>, + <<"enable_queue">> => <<"false">> + }, + batch_on => #{ + <<"query_mode">> => <<"async">>, + <<"enable_batch">> => <<"true">>, + <<"enable_queue">> => <<"true">>, + <<"worker_pool_size">> => <<"1">>, + <<"batch_size">> => integer_to_binary(?BATCH_SIZE) + } + }. + +publish_message(Topic, Payload) -> + {ok, Client} = emqtt:start_link(), + {ok, _} = emqtt:connect(Client), + ok = emqtt:publish(Client, Topic, Payload), + ok = emqtt:stop(Client). diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src b/lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src index 1163e391c..84f9bec8b 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src @@ -1,6 +1,6 @@ {application, emqx_ee_connector, [ {description, "EMQX Enterprise connectors"}, - {vsn, "0.1.0"}, + {vsn, "0.1.1"}, {registered, []}, {applications, [ kernel, diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_redis.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_redis.erl new file mode 100644 index 000000000..39579c737 --- /dev/null +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_redis.erl @@ -0,0 +1,138 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_ee_connector_redis). + +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-behaviour(emqx_resource). + +%% callbacks of behaviour emqx_resource +-export([ + callback_mode/0, + on_start/2, + on_stop/2, + on_query/3, + on_batch_query/3, + on_get_status/2 +]). + +%% ------------------------------------------------------------------------------------------------- +%% resource callbacks +%% ------------------------------------------------------------------------------------------------- + +callback_mode() -> always_sync. + +on_start(InstId, #{command_template := CommandTemplate} = Config) -> + case emqx_connector_redis:on_start(InstId, Config) of + {ok, RedisConnSt} -> + ?tp( + redis_ee_connector_start_success, + #{} + ), + {ok, #{ + conn_st => RedisConnSt, + command_template => preproc_command_template(CommandTemplate) + }}; + {error, _} = Error -> + ?tp( + redis_ee_connector_start_error, + #{error => Error} + ), + Error + end. + +on_stop(InstId, #{conn_st := RedisConnSt}) -> + emqx_connector_redis:on_stop(InstId, RedisConnSt). + +on_get_status(InstId, #{conn_st := RedisConnSt}) -> + emqx_connector_redis:on_get_status(InstId, RedisConnSt). + +on_query( + InstId, + {send_message, Data}, + _State = #{ + command_template := CommandTemplate, conn_st := RedisConnSt + } +) -> + Cmd = proc_command_template(CommandTemplate, Data), + ?tp( + redis_ee_connector_cmd, + #{cmd => Cmd, batch => false, mode => sync} + ), + Result = query(InstId, {cmd, Cmd}, RedisConnSt), + ?tp( + redis_ee_connector_send_done, + #{cmd => Cmd, batch => false, mode => sync, result => Result} + ), + Result; +on_query( + InstId, + Query, + _State = #{conn_st := RedisConnSt} +) -> + ?tp( + redis_ee_connector_query, + #{query => Query, batch => false, mode => sync} + ), + Result = query(InstId, Query, RedisConnSt), + ?tp( + redis_ee_connector_send_done, + #{query => Query, batch => false, mode => sync, result => Result} + ), + Result. + +on_batch_query( + InstId, BatchData, _State = #{command_template := CommandTemplate, conn_st := RedisConnSt} +) -> + Cmds = process_batch_data(BatchData, CommandTemplate), + ?tp( + redis_ee_connector_send, + #{batch_data => BatchData, batch => true, mode => sync} + ), + Result = query(InstId, {cmds, Cmds}, RedisConnSt), + ?tp( + redis_ee_connector_send_done, + #{ + batch_data => BatchData, + batch_size => length(BatchData), + batch => true, + mode => sync, + result => Result + } + ), + Result. + +%% ------------------------------------------------------------------------------------------------- +%% private helpers +%% ------------------------------------------------------------------------------------------------- + +query(InstId, Query, RedisConnSt) -> + case emqx_connector_redis:on_query(InstId, Query, RedisConnSt) of + {ok, _} = Ok -> Ok; + {error, no_connection} -> {error, {recoverable_error, no_connection}}; + {error, _} = Error -> Error + end. + +process_batch_data(BatchData, CommandTemplate) -> + lists:map( + fun({send_message, Data}) -> + proc_command_template(CommandTemplate, Data) + end, + BatchData + ). + +proc_command_template(CommandTemplate, Msg) -> + lists:map( + fun(ArgTks) -> + emqx_plugin_libs_rule:proc_tmpl(ArgTks, Msg, #{return => full_binary}) + end, + CommandTemplate + ). + +preproc_command_template(CommandTemplate) -> + lists:map( + fun emqx_plugin_libs_rule:preproc_tmpl/1, + CommandTemplate + ). diff --git a/mix.exs b/mix.exs index f43ca7119..11ef14eb1 100644 --- a/mix.exs +++ b/mix.exs @@ -47,12 +47,12 @@ defmodule EMQXUmbrella.MixProject do {:lc, github: "emqx/lc", tag: "0.3.2", override: true}, {:redbug, "2.0.7"}, {:typerefl, github: "ieQu1/typerefl", tag: "0.9.1", override: true}, - {:ehttpc, github: "emqx/ehttpc", tag: "0.4.0", override: true}, + {:ehttpc, github: "emqx/ehttpc", tag: "0.4.2", override: true}, {:gproc, github: "uwiger/gproc", tag: "0.8.0", override: true}, {:jiffy, github: "emqx/jiffy", tag: "1.0.5", override: true}, {:cowboy, github: "emqx/cowboy", tag: "2.9.0", override: true}, {:esockd, github: "emqx/esockd", tag: "5.9.4", override: true}, - {:ekka, github: "emqx/ekka", tag: "0.13.6", override: true}, + {:ekka, github: "emqx/ekka", tag: "0.13.7", override: true}, {:gen_rpc, github: "emqx/gen_rpc", tag: "2.8.1", override: true}, {:grpc, github: "emqx/grpc-erl", tag: "0.6.7", override: true}, {:minirest, github: "emqx/minirest", tag: "1.3.7", override: true}, @@ -67,7 +67,7 @@ defmodule EMQXUmbrella.MixProject do # in conflict by emqtt and hocon {:getopt, "1.0.2", override: true}, {:snabbkaffe, github: "kafka4beam/snabbkaffe", tag: "1.0.0", override: true}, - {:hocon, github: "emqx/hocon", tag: "0.31.2", override: true}, + {:hocon, github: "emqx/hocon", tag: "0.32.0", override: true}, {:emqx_http_lib, github: "emqx/emqx_http_lib", tag: "0.5.1", override: true}, {:esasl, github: "emqx/esasl", tag: "0.2.0"}, {:jose, github: "potatosalad/erlang-jose", tag: "1.11.2"}, diff --git a/rebar.config b/rebar.config index 687f49cea..e9678b1cd 100644 --- a/rebar.config +++ b/rebar.config @@ -49,12 +49,12 @@ , {gpb, "4.19.5"} %% gpb only used to build, but not for release, pin it here to avoid fetching a wrong version due to rebar plugins scattered in all the deps , {typerefl, {git, "https://github.com/ieQu1/typerefl", {tag, "0.9.1"}}} , {gun, {git, "https://github.com/emqx/gun", {tag, "1.3.9"}}} - , {ehttpc, {git, "https://github.com/emqx/ehttpc", {tag, "0.4.0"}}} + , {ehttpc, {git, "https://github.com/emqx/ehttpc", {tag, "0.4.2"}}} , {gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}} , {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}} , {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}} , {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.4"}}} - , {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.6"}}} + , {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.7"}}} , {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}} , {grpc, {git, "https://github.com/emqx/grpc-erl", {tag, "0.6.7"}}} , {minirest, {git, "https://github.com/emqx/minirest", {tag, "1.3.7"}}} @@ -67,7 +67,7 @@ , {system_monitor, {git, "https://github.com/ieQu1/system_monitor", {tag, "3.0.3"}}} , {getopt, "1.0.2"} , {snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.0"}}} - , {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.31.2"}}} + , {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.32.0"}}} , {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.1"}}} , {esasl, {git, "https://github.com/emqx/esasl", {tag, "0.2.0"}}} , {jose, {git, "https://github.com/potatosalad/erlang-jose", {tag, "1.11.2"}}} diff --git a/scripts/ct/run.sh b/scripts/ct/run.sh index 18dfb2525..ae779c572 100755 --- a/scripts/ct/run.sh +++ b/scripts/ct/run.sh @@ -113,6 +113,10 @@ for dep in ${CT_DEPS}; do '.ci/docker-compose-file/docker-compose-redis-sentinel-tcp.yaml' '.ci/docker-compose-file/docker-compose-redis-sentinel-tls.yaml' ) ;; + redis_cluster) + FILES+=( '.ci/docker-compose-file/docker-compose-redis-cluster-tcp.yaml' + '.ci/docker-compose-file/docker-compose-redis-cluster-tls.yaml' ) + ;; mysql) FILES+=( '.ci/docker-compose-file/docker-compose-mysql-tcp.yaml' '.ci/docker-compose-file/docker-compose-mysql-tls.yaml' ) diff --git a/scripts/get-distro.sh b/scripts/get-distro.sh index 512abdab6..bf0e98229 100755 --- a/scripts/get-distro.sh +++ b/scripts/get-distro.sh @@ -14,7 +14,11 @@ case "$UNAME" in SYSTEM="${DIST}${VERSION_ID}" ;; Linux) - if grep -q -i 'rhel' /etc/*-release; then + # /etc/os-release on amazon linux 2 contains both rhel and centos strings + if grep -q -i 'amzn' /etc/*-release; then + DIST='amzn' + VERSION_ID="$(sed -n '/^VERSION_ID=/p' /etc/os-release | sed -r 's/VERSION_ID=(.*)/\1/g' | sed 's/"//g')" + elif grep -q -i 'rhel' /etc/*-release; then DIST='el' VERSION_ID="$(rpm --eval '%{rhel}')" else diff --git a/scripts/pkg-tests.sh b/scripts/pkg-tests.sh index 9f3e4d7bd..c511a08e6 100755 --- a/scripts/pkg-tests.sh +++ b/scripts/pkg-tests.sh @@ -148,11 +148,14 @@ emqx_test(){ fi ;; "rpm") + # yum wants python2 + alternatives --list | grep python && alternatives --set python /usr/bin/python2 YUM_RES=$(yum install -y "${PACKAGE_PATH}/${packagename}"| tee /dev/null) if [[ $YUM_RES =~ "Failed" ]]; then echo "yum install failed" exit 1 fi + alternatives --list | grep python && alternatives --set python /usr/bin/python3 if ! rpm -q "${EMQX_NAME}" | grep -q "${EMQX_NAME}"; then echo "package install error" exit 1