Merge remote-tracking branch 'origin/release-50' into 1214-sync-master-upstreams
This commit is contained in:
commit
42c58e2a91
|
@ -16,7 +16,7 @@ up:
|
||||||
REDIS_TAG=6 \
|
REDIS_TAG=6 \
|
||||||
MONGO_TAG=5 \
|
MONGO_TAG=5 \
|
||||||
PGSQL_TAG=13 \
|
PGSQL_TAG=13 \
|
||||||
docker-compose \
|
docker compose \
|
||||||
-f .ci/docker-compose-file/docker-compose.yaml \
|
-f .ci/docker-compose-file/docker-compose.yaml \
|
||||||
-f .ci/docker-compose-file/docker-compose-mongo-single-tcp.yaml \
|
-f .ci/docker-compose-file/docker-compose-mongo-single-tcp.yaml \
|
||||||
-f .ci/docker-compose-file/docker-compose-mongo-single-tls.yaml \
|
-f .ci/docker-compose-file/docker-compose-mongo-single-tls.yaml \
|
||||||
|
@ -28,10 +28,13 @@ up:
|
||||||
-f .ci/docker-compose-file/docker-compose-redis-single-tls.yaml \
|
-f .ci/docker-compose-file/docker-compose-redis-single-tls.yaml \
|
||||||
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tcp.yaml \
|
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tcp.yaml \
|
||||||
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tls.yaml \
|
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tls.yaml \
|
||||||
up -d --build
|
-f .ci/docker-compose-file/docker-compose-redis-cluster-tcp.yaml \
|
||||||
|
-f .ci/docker-compose-file/docker-compose-redis-cluster-tls.yaml \
|
||||||
|
-f .ci/docker-compose-file/docker-compose-toxiproxy.yaml \
|
||||||
|
up -d --build --remove-orphans
|
||||||
|
|
||||||
down:
|
down:
|
||||||
docker-compose \
|
docker compose \
|
||||||
-f .ci/docker-compose-file/docker-compose.yaml \
|
-f .ci/docker-compose-file/docker-compose.yaml \
|
||||||
-f .ci/docker-compose-file/docker-compose-mongo-single-tcp.yaml \
|
-f .ci/docker-compose-file/docker-compose-mongo-single-tcp.yaml \
|
||||||
-f .ci/docker-compose-file/docker-compose-mongo-single-tls.yaml \
|
-f .ci/docker-compose-file/docker-compose-mongo-single-tls.yaml \
|
||||||
|
@ -43,7 +46,10 @@ down:
|
||||||
-f .ci/docker-compose-file/docker-compose-redis-single-tls.yaml \
|
-f .ci/docker-compose-file/docker-compose-redis-single-tls.yaml \
|
||||||
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tcp.yaml \
|
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tcp.yaml \
|
||||||
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tls.yaml \
|
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tls.yaml \
|
||||||
down
|
-f .ci/docker-compose-file/docker-compose-redis-cluster-tcp.yaml \
|
||||||
|
-f .ci/docker-compose-file/docker-compose-redis-cluster-tls.yaml \
|
||||||
|
-f .ci/docker-compose-file/docker-compose-toxiproxy.yaml \
|
||||||
|
down --remove-orphans
|
||||||
|
|
||||||
ct:
|
ct:
|
||||||
docker exec -i "$(CONTAINER)" bash -c "rebar3 ct --name 'test@127.0.0.1' -v --suite $(SUITE)"
|
docker exec -i "$(CONTAINER)" bash -c "rebar3 ct --name 'test@127.0.0.1' -v --suite $(SUITE)"
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
version: '3.9'
|
version: '3.9'
|
||||||
|
|
||||||
services:
|
services:
|
||||||
redis_server:
|
redis_cluster:
|
||||||
image: redis:${REDIS_TAG}
|
image: redis:${REDIS_TAG}
|
||||||
container_name: redis
|
container_name: redis-cluster
|
||||||
volumes:
|
volumes:
|
||||||
- ./redis/:/data/conf
|
- ./redis/:/data/conf
|
||||||
command: bash -c "/bin/bash /data/conf/redis.sh --node cluster && tail -f /var/log/redis-server.log"
|
command: bash -c "/bin/bash /data/conf/redis.sh --node cluster && tail -f /var/log/redis-server.log"
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
version: '3.9'
|
version: '3.9'
|
||||||
|
|
||||||
services:
|
services:
|
||||||
redis_server:
|
redis_cluster_tls:
|
||||||
container_name: redis
|
container_name: redis-cluster-tls
|
||||||
image: redis:${REDIS_TAG}
|
image: redis:${REDIS_TAG}
|
||||||
volumes:
|
volumes:
|
||||||
- ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.crt
|
- ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.crt
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
r7000i.log
|
r700?i.log
|
||||||
r7001i.log
|
nodes.700?.conf
|
||||||
r7002i.log
|
*.rdb
|
||||||
|
|
|
@ -1,11 +1,12 @@
|
||||||
daemonize yes
|
daemonize yes
|
||||||
bind 0.0.0.0 ::
|
bind 0.0.0.0 ::
|
||||||
logfile /var/log/redis-server.log
|
logfile /var/log/redis-server.log
|
||||||
|
protected-mode no
|
||||||
|
requirepass public
|
||||||
|
masterauth public
|
||||||
|
|
||||||
tls-cert-file /etc/certs/redis.crt
|
tls-cert-file /etc/certs/redis.crt
|
||||||
tls-key-file /etc/certs/redis.key
|
tls-key-file /etc/certs/redis.key
|
||||||
tls-ca-cert-file /etc/certs/ca.crt
|
tls-ca-cert-file /etc/certs/ca.crt
|
||||||
tls-replication yes
|
tls-replication yes
|
||||||
tls-cluster yes
|
tls-cluster yes
|
||||||
protected-mode no
|
|
||||||
requirepass public
|
|
||||||
masterauth public
|
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
daemonize yes
|
daemonize yes
|
||||||
bind 0.0.0.0 ::
|
bind 0.0.0.0 ::
|
||||||
logfile /var/log/redis-server.log
|
logfile /var/log/redis-server.log
|
||||||
|
protected-mode no
|
||||||
requirepass public
|
requirepass public
|
||||||
masterauth public
|
masterauth public
|
||||||
|
|
|
@ -16,13 +16,8 @@ case $key in
|
||||||
shift # past argument
|
shift # past argument
|
||||||
shift # past value
|
shift # past value
|
||||||
;;
|
;;
|
||||||
-t)
|
|
||||||
tls="$2"
|
|
||||||
shift # past argument
|
|
||||||
shift # past value
|
|
||||||
;;
|
|
||||||
--tls-enabled)
|
--tls-enabled)
|
||||||
tls=1
|
tls=true
|
||||||
shift # past argument
|
shift # past argument
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
|
@ -37,69 +32,71 @@ rm -f \
|
||||||
/data/conf/r7002i.log \
|
/data/conf/r7002i.log \
|
||||||
/data/conf/nodes.7000.conf \
|
/data/conf/nodes.7000.conf \
|
||||||
/data/conf/nodes.7001.conf \
|
/data/conf/nodes.7001.conf \
|
||||||
/data/conf/nodes.7002.conf ;
|
/data/conf/nodes.7002.conf
|
||||||
|
|
||||||
if [ "${node}" = "cluster" ] ; then
|
if [ "$node" = "cluster" ]; then
|
||||||
if $tls ; then
|
if $tls; then
|
||||||
redis-server /data/conf/redis-tls.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf \
|
redis-server /data/conf/redis-tls.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf \
|
||||||
--tls-port 8000 --cluster-enabled yes ;
|
--tls-port 8000 --cluster-enabled yes
|
||||||
redis-server /data/conf/redis-tls.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf \
|
redis-server /data/conf/redis-tls.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf \
|
||||||
--tls-port 8001 --cluster-enabled yes;
|
--tls-port 8001 --cluster-enabled yes
|
||||||
redis-server /data/conf/redis-tls.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf \
|
redis-server /data/conf/redis-tls.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf \
|
||||||
--tls-port 8002 --cluster-enabled yes;
|
--tls-port 8002 --cluster-enabled yes
|
||||||
else
|
else
|
||||||
redis-server /data/conf/redis.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf --cluster-enabled yes;
|
redis-server /data/conf/redis.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf \
|
||||||
redis-server /data/conf/redis.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf --cluster-enabled yes;
|
--cluster-enabled yes
|
||||||
redis-server /data/conf/redis.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf --cluster-enabled yes;
|
redis-server /data/conf/redis.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf \
|
||||||
|
--cluster-enabled yes
|
||||||
|
redis-server /data/conf/redis.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf \
|
||||||
|
--cluster-enabled yes
|
||||||
fi
|
fi
|
||||||
elif [ "${node}" = "sentinel" ] ; then
|
elif [ "$node" = "sentinel" ]; then
|
||||||
if $tls ; then
|
if $tls; then
|
||||||
redis-server /data/conf/redis-tls.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf \
|
redis-server /data/conf/redis-tls.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf \
|
||||||
--tls-port 8000 --cluster-enabled no;
|
--tls-port 8000 --cluster-enabled no
|
||||||
redis-server /data/conf/redis-tls.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf \
|
redis-server /data/conf/redis-tls.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf \
|
||||||
--tls-port 8001 --cluster-enabled no --slaveof "$LOCAL_IP" 8000;
|
--tls-port 8001 --cluster-enabled no --slaveof "$LOCAL_IP" 8000
|
||||||
redis-server /data/conf/redis-tls.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf \
|
redis-server /data/conf/redis-tls.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf \
|
||||||
--tls-port 8002 --cluster-enabled no --slaveof "$LOCAL_IP" 8000;
|
--tls-port 8002 --cluster-enabled no --slaveof "$LOCAL_IP" 8000
|
||||||
|
|
||||||
else
|
else
|
||||||
redis-server /data/conf/redis.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf \
|
redis-server /data/conf/redis.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf \
|
||||||
--cluster-enabled no;
|
--cluster-enabled no
|
||||||
redis-server /data/conf/redis.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf \
|
redis-server /data/conf/redis.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf \
|
||||||
--cluster-enabled no --slaveof "$LOCAL_IP" 7000;
|
--cluster-enabled no --slaveof "$LOCAL_IP" 7000
|
||||||
redis-server /data/conf/redis.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf \
|
redis-server /data/conf/redis.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf \
|
||||||
--cluster-enabled no --slaveof "$LOCAL_IP" 7000;
|
--cluster-enabled no --slaveof "$LOCAL_IP" 7000
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
REDIS_LOAD_FLG=true;
|
|
||||||
|
REDIS_LOAD_FLG=true
|
||||||
|
|
||||||
while $REDIS_LOAD_FLG;
|
while $REDIS_LOAD_FLG;
|
||||||
do
|
do
|
||||||
sleep 1;
|
sleep 1
|
||||||
redis-cli --pass public --no-auth-warning -p 7000 info 1> /data/conf/r7000i.log 2> /dev/null;
|
redis-cli --pass public --no-auth-warning -p 7000 info 1> /data/conf/r7000i.log 2> /dev/null
|
||||||
if [ -s /data/conf/r7000i.log ]; then
|
if ! [ -s /data/conf/r7000i.log ]; then
|
||||||
:
|
continue
|
||||||
else
|
|
||||||
continue;
|
|
||||||
fi
|
fi
|
||||||
redis-cli --pass public --no-auth-warning -p 7001 info 1> /data/conf/r7001i.log 2> /dev/null;
|
redis-cli --pass public --no-auth-warning -p 7001 info 1> /data/conf/r7001i.log 2> /dev/null
|
||||||
if [ -s /data/conf/r7001i.log ]; then
|
if ! [ -s /data/conf/r7001i.log ]; then
|
||||||
:
|
continue
|
||||||
else
|
|
||||||
continue;
|
|
||||||
fi
|
fi
|
||||||
redis-cli --pass public --no-auth-warning -p 7002 info 1> /data/conf/r7002i.log 2> /dev/null;
|
redis-cli --pass public --no-auth-warning -p 7002 info 1> /data/conf/r7002i.log 2> /dev/null;
|
||||||
if [ -s /data/conf/r7002i.log ]; then
|
if ! [ -s /data/conf/r7002i.log ]; then
|
||||||
:
|
continue
|
||||||
else
|
|
||||||
continue;
|
|
||||||
fi
|
fi
|
||||||
if [ "${node}" = "cluster" ] ; then
|
if [ "$node" = "cluster" ] ; then
|
||||||
if $tls ; then
|
if $tls; then
|
||||||
yes "yes" | redis-cli --cluster create "$LOCAL_IP:8000" "$LOCAL_IP:8001" "$LOCAL_IP:8002" --pass public --no-auth-warning --tls true --cacert /etc/certs/ca.crt --cert /etc/certs/redis.crt --key /etc/certs/redis.key;
|
yes "yes" | redis-cli --cluster create "$LOCAL_IP:8000" "$LOCAL_IP:8001" "$LOCAL_IP:8002" \
|
||||||
|
--pass public --no-auth-warning \
|
||||||
|
--tls true --cacert /etc/certs/ca.crt \
|
||||||
|
--cert /etc/certs/redis.crt --key /etc/certs/redis.key
|
||||||
else
|
else
|
||||||
yes "yes" | redis-cli --cluster create "$LOCAL_IP:7000" "$LOCAL_IP:7001" "$LOCAL_IP:7002" --pass public --no-auth-warning;
|
yes "yes" | redis-cli --cluster create "$LOCAL_IP:7000" "$LOCAL_IP:7001" "$LOCAL_IP:7002" \
|
||||||
|
--pass public --no-auth-warning
|
||||||
fi
|
fi
|
||||||
elif [ "${node}" = "sentinel" ] ; then
|
elif [ "$node" = "sentinel" ]; then
|
||||||
tee /_sentinel.conf>/dev/null << EOF
|
tee /_sentinel.conf>/dev/null << EOF
|
||||||
port 26379
|
port 26379
|
||||||
bind 0.0.0.0 ::
|
bind 0.0.0.0 ::
|
||||||
|
@ -107,7 +104,7 @@ daemonize yes
|
||||||
logfile /var/log/redis-server.log
|
logfile /var/log/redis-server.log
|
||||||
dir /tmp
|
dir /tmp
|
||||||
EOF
|
EOF
|
||||||
if $tls ; then
|
if $tls; then
|
||||||
cat >>/_sentinel.conf<<EOF
|
cat >>/_sentinel.conf<<EOF
|
||||||
tls-port 26380
|
tls-port 26380
|
||||||
tls-replication yes
|
tls-replication yes
|
||||||
|
@ -121,9 +118,9 @@ EOF
|
||||||
sentinel monitor mymaster $LOCAL_IP 7000 1
|
sentinel monitor mymaster $LOCAL_IP 7000 1
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
redis-server /_sentinel.conf --sentinel;
|
redis-server /_sentinel.conf --sentinel
|
||||||
fi
|
fi
|
||||||
REDIS_LOAD_FLG=false;
|
REDIS_LOAD_FLG=false
|
||||||
done
|
done
|
||||||
|
|
||||||
exit 0;
|
exit 0;
|
||||||
|
|
|
@ -22,5 +22,12 @@
|
||||||
"listen": "0.0.0.0:3307",
|
"listen": "0.0.0.0:3307",
|
||||||
"upstream": "mysql-tls:3306",
|
"upstream": "mysql-tls:3306",
|
||||||
"enabled": true
|
"enabled": true
|
||||||
|
},
|
||||||
|
|
||||||
|
{
|
||||||
|
"name": "redis_single_tcp",
|
||||||
|
"listen": "0.0.0.0:6379",
|
||||||
|
"upstream": "redis:6379",
|
||||||
|
"enabled": true
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
|
@ -23,7 +23,7 @@ jobs:
|
||||||
prepare:
|
prepare:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
# prepare source with any OTP version, no need for a matrix
|
# prepare source with any OTP version, no need for a matrix
|
||||||
container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04"
|
container: "ghcr.io/emqx/emqx-builder/5.0-24:1.13.4-24.3.4.2-1-ubuntu20.04"
|
||||||
|
|
||||||
outputs:
|
outputs:
|
||||||
BUILD_PROFILE: ${{ steps.get_profile.outputs.BUILD_PROFILE }}
|
BUILD_PROFILE: ${{ steps.get_profile.outputs.BUILD_PROFILE }}
|
||||||
|
@ -167,14 +167,14 @@ jobs:
|
||||||
|
|
||||||
- uses: docker/build-push-action@v3
|
- uses: docker/build-push-action@v3
|
||||||
with:
|
with:
|
||||||
push: ${{ needs.prepare.outputs.IS_EXACT_TAG }}
|
push: ${{ needs.prepare.outputs.IS_EXACT_TAG == 'true' || github.repository_owner != 'emqx' }}
|
||||||
pull: true
|
pull: true
|
||||||
no-cache: true
|
no-cache: true
|
||||||
platforms: linux/${{ matrix.arch[0] }}
|
platforms: linux/${{ matrix.arch[0] }}
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
build-args: |
|
build-args: |
|
||||||
BUILD_FROM=ghcr.io/emqx/emqx-builder/5.0-18:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }}
|
BUILD_FROM=ghcr.io/emqx/emqx-builder/5.0-24:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }}
|
||||||
RUN_FROM=${{ matrix.os[1] }}
|
RUN_FROM=${{ matrix.os[1] }}
|
||||||
EMQX_NAME=${{ steps.meta.outputs.emqx_name }}
|
EMQX_NAME=${{ steps.meta.outputs.emqx_name }}
|
||||||
file: source/${{ matrix.os[2] }}
|
file: source/${{ matrix.os[2] }}
|
||||||
|
@ -245,14 +245,14 @@ jobs:
|
||||||
|
|
||||||
- uses: docker/build-push-action@v3
|
- uses: docker/build-push-action@v3
|
||||||
with:
|
with:
|
||||||
push: ${{ needs.prepare.outputs.IS_EXACT_TAG }}
|
push: ${{ needs.prepare.outputs.IS_EXACT_TAG == 'true' || github.repository_owner != 'emqx' }}
|
||||||
pull: true
|
pull: true
|
||||||
no-cache: true
|
no-cache: true
|
||||||
platforms: linux/${{ matrix.arch[0] }}
|
platforms: linux/${{ matrix.arch[0] }}
|
||||||
tags: ${{ steps.meta.outputs.tags }}
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
labels: ${{ steps.meta.outputs.labels }}
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
build-args: |
|
build-args: |
|
||||||
BUILD_FROM=ghcr.io/emqx/emqx-builder/5.0-18:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }}
|
BUILD_FROM=ghcr.io/emqx/emqx-builder/5.0-24:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }}
|
||||||
RUN_FROM=${{ matrix.os[1] }}
|
RUN_FROM=${{ matrix.os[1] }}
|
||||||
EMQX_NAME=${{ steps.meta.outputs.emqx_name }}
|
EMQX_NAME=${{ steps.meta.outputs.emqx_name }}
|
||||||
file: source/${{ matrix.os[2] }}
|
file: source/${{ matrix.os[2] }}
|
||||||
|
@ -328,7 +328,7 @@ jobs:
|
||||||
docker-elixir-push-multi-arch-manifest:
|
docker-elixir-push-multi-arch-manifest:
|
||||||
# note, we only run on amd64
|
# note, we only run on amd64
|
||||||
# do not build enterprise elixir images for now
|
# do not build enterprise elixir images for now
|
||||||
if: needs.prepare.outputs.IS_EXACT_TAG && needs.prepare.outputs.BUILD_PROFILE == 'emqx'
|
if: needs.prepare.outputs.IS_EXACT_TAG == 'true' && needs.prepare.outputs.BUILD_PROFILE == 'emqx'
|
||||||
needs:
|
needs:
|
||||||
- prepare
|
- prepare
|
||||||
- docker-elixir
|
- docker-elixir
|
||||||
|
|
|
@ -23,7 +23,7 @@ on:
|
||||||
jobs:
|
jobs:
|
||||||
prepare:
|
prepare:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
container: ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04
|
container: ghcr.io/emqx/emqx-builder/5.0-24:1.13.4-24.3.4.2-1-ubuntu20.04
|
||||||
outputs:
|
outputs:
|
||||||
BUILD_PROFILE: ${{ steps.get_profile.outputs.BUILD_PROFILE }}
|
BUILD_PROFILE: ${{ steps.get_profile.outputs.BUILD_PROFILE }}
|
||||||
IS_EXACT_TAG: ${{ steps.get_profile.outputs.IS_EXACT_TAG }}
|
IS_EXACT_TAG: ${{ steps.get_profile.outputs.IS_EXACT_TAG }}
|
||||||
|
@ -173,7 +173,7 @@ jobs:
|
||||||
needs: prepare
|
needs: prepare
|
||||||
runs-on: ${{ matrix.build_machine }}
|
runs-on: ${{ matrix.build_machine }}
|
||||||
container:
|
container:
|
||||||
image: "ghcr.io/emqx/emqx-builder/5.0-18:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}"
|
image: "ghcr.io/emqx/emqx-builder/5.0-24:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}"
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
|
@ -203,27 +203,31 @@ jobs:
|
||||||
- debian10
|
- debian10
|
||||||
- el8
|
- el8
|
||||||
- el7
|
- el7
|
||||||
|
- amzn2
|
||||||
build_machine:
|
build_machine:
|
||||||
- aws-arm64
|
- aws-arm64
|
||||||
- ubuntu-20.04
|
- ubuntu-20.04
|
||||||
exclude:
|
exclude:
|
||||||
- arch: arm64
|
- arch: arm64
|
||||||
|
build_machine: ubuntu-20.04
|
||||||
|
- arch: amd64
|
||||||
|
build_machine: aws-arm64
|
||||||
|
include:
|
||||||
|
- profile: emqx
|
||||||
|
otp: 24.3.4.2-1
|
||||||
|
elixir: 1.13.4
|
||||||
|
build_elixir: with_elixir
|
||||||
|
arch: amd64
|
||||||
|
os: ubuntu20.04
|
||||||
build_machine: ubuntu-20.04
|
build_machine: ubuntu-20.04
|
||||||
- arch: amd64
|
- profile: emqx
|
||||||
build_machine: aws-arm64
|
otp: 24.3.4.2-1
|
||||||
# elixir: only for opensource edition and only on ubuntu20.04 and el8 on amd64
|
elixir: 1.13.4
|
||||||
- build_elixir: with_elixir
|
build_elixir: with_elixir
|
||||||
profile: emqx-enterprise
|
arch: amd64
|
||||||
- build_elixir: with_elixir
|
os: amzn2
|
||||||
arch: arm64
|
build_machine: ubuntu-20.04
|
||||||
- build_elixir: with_elixir
|
|
||||||
os: ubuntu18.04
|
|
||||||
- build_elixir: with_elixir
|
|
||||||
os: debian10
|
|
||||||
- build_elixir: with_elixir
|
|
||||||
os: debian11
|
|
||||||
- build_elixir: with_elixir
|
|
||||||
os: el7
|
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
|
@ -268,7 +272,7 @@ jobs:
|
||||||
--pkgtype "${PKGTYPE}" \
|
--pkgtype "${PKGTYPE}" \
|
||||||
--arch "${ARCH}" \
|
--arch "${ARCH}" \
|
||||||
--elixir "${IsElixir}" \
|
--elixir "${IsElixir}" \
|
||||||
--builder "ghcr.io/emqx/emqx-builder/5.0-18:${ELIXIR}-${OTP}-${SYSTEM}"
|
--builder "ghcr.io/emqx/emqx-builder/5.0-24:${ELIXIR}-${OTP}-${SYSTEM}"
|
||||||
done
|
done
|
||||||
- uses: actions/upload-artifact@v3
|
- uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
|
|
|
@ -29,17 +29,14 @@ jobs:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
profile:
|
profile:
|
||||||
- emqx
|
- ["emqx", "el7"]
|
||||||
- emqx-enterprise
|
- ["emqx-enterprise", "ubuntu20.04"]
|
||||||
otp:
|
otp:
|
||||||
- 24.3.4.2-1
|
- 24.3.4.2-1
|
||||||
elixir:
|
elixir:
|
||||||
- 1.13.4
|
- 1.13.4
|
||||||
os:
|
|
||||||
- ubuntu20.04
|
|
||||||
- el8
|
|
||||||
|
|
||||||
container: "ghcr.io/emqx/emqx-builder/5.0-18:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}"
|
container: "ghcr.io/emqx/emqx-builder/5.0-24:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.profile[1] }}"
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: AutoModality/action-clean@v1
|
- uses: AutoModality/action-clean@v1
|
||||||
|
@ -48,7 +45,7 @@ jobs:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
- name: prepare
|
- name: prepare
|
||||||
run: |
|
run: |
|
||||||
echo "EMQX_NAME=${{ matrix.profile }}" >> $GITHUB_ENV
|
echo "EMQX_NAME=${{ matrix.profile[0] }}" >> $GITHUB_ENV
|
||||||
echo "CODE_PATH=$GITHUB_WORKSPACE" >> $GITHUB_ENV
|
echo "CODE_PATH=$GITHUB_WORKSPACE" >> $GITHUB_ENV
|
||||||
- name: Work around https://github.com/actions/checkout/issues/766
|
- name: Work around https://github.com/actions/checkout/issues/766
|
||||||
run: |
|
run: |
|
||||||
|
@ -75,14 +72,14 @@ jobs:
|
||||||
./scripts/pkg-tests.sh ${EMQX_NAME}-elixir-pkg
|
./scripts/pkg-tests.sh ${EMQX_NAME}-elixir-pkg
|
||||||
- uses: actions/upload-artifact@v3
|
- uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.profile}}-${{ matrix.otp }}-${{ matrix.os }}
|
name: "${{ matrix.profile[0] }}-${{ matrix.otp }}-${{ matrix.profile[1] }}"
|
||||||
path: _packages/${{ matrix.profile}}/*
|
path: _packages/${{ matrix.profile[0] }}/*
|
||||||
- uses: actions/upload-artifact@v3
|
- uses: actions/upload-artifact@v3
|
||||||
with:
|
with:
|
||||||
name: "${{ matrix.profile }}_schema_dump"
|
name: "${{ matrix.profile[0] }}_schema_dump"
|
||||||
path: |
|
path: |
|
||||||
scripts/spellcheck
|
scripts/spellcheck
|
||||||
_build/${{ matrix.profile }}/lib/emqx_dashboard/priv/www/static/schema.json
|
_build/${{ matrix.profile[0] }}/lib/emqx_dashboard/priv/www/static/schema.json
|
||||||
|
|
||||||
windows:
|
windows:
|
||||||
runs-on: windows-2019
|
runs-on: windows-2019
|
||||||
|
|
|
@ -5,7 +5,7 @@ on: [pull_request, push]
|
||||||
jobs:
|
jobs:
|
||||||
check_deps_integrity:
|
check_deps_integrity:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
container: ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04
|
container: ghcr.io/emqx/emqx-builder/5.0-24:1.13.4-24.3.4.2-1-ubuntu20.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
|
|
@ -5,7 +5,7 @@ on: [pull_request]
|
||||||
jobs:
|
jobs:
|
||||||
code_style_check:
|
code_style_check:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04"
|
container: "ghcr.io/emqx/emqx-builder/5.0-24:1.13.4-24.3.4.2-1-ubuntu20.04"
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
with:
|
with:
|
||||||
|
|
|
@ -8,7 +8,7 @@ jobs:
|
||||||
elixir_apps_check:
|
elixir_apps_check:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
# just use the latest builder
|
# just use the latest builder
|
||||||
container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04"
|
container: "ghcr.io/emqx/emqx-builder/5.0-24:1.13.4-24.3.4.2-1-ubuntu20.04"
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
|
|
|
@ -7,7 +7,7 @@ on: [pull_request, push]
|
||||||
jobs:
|
jobs:
|
||||||
elixir_deps_check:
|
elixir_deps_check:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
container: ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04
|
container: ghcr.io/emqx/emqx-builder/5.0-24:1.13.4-24.3.4.2-1-ubuntu20.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
|
|
|
@ -17,7 +17,8 @@ jobs:
|
||||||
profile:
|
profile:
|
||||||
- emqx
|
- emqx
|
||||||
- emqx-enterprise
|
- emqx-enterprise
|
||||||
container: ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04
|
container: ghcr.io/emqx/emqx-builder/5.0-24:1.13.4-24.3.4.2-1-ubuntu20.04
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
|
|
|
@ -34,7 +34,7 @@ jobs:
|
||||||
use-self-hosted: false
|
use-self-hosted: false
|
||||||
|
|
||||||
runs-on: ${{ matrix.runs-on }}
|
runs-on: ${{ matrix.runs-on }}
|
||||||
container: "ghcr.io/emqx/emqx-builder/5.0-18:${{ matrix.elixir}}-${{ matrix.otp }}-${{ matrix.os }}"
|
container: "ghcr.io/emqx/emqx-builder/5.0-24:${{ matrix.elixir}}-${{ matrix.otp }}-${{ matrix.os }}"
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
|
|
|
@ -16,7 +16,7 @@ jobs:
|
||||||
prepare:
|
prepare:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
# prepare source with any OTP version, no need for a matrix
|
# prepare source with any OTP version, no need for a matrix
|
||||||
container: ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-alpine3.15.1
|
container: ghcr.io/emqx/emqx-builder/5.0-24:1.13.4-24.3.4.2-1-alpine3.15.1
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v3
|
||||||
|
@ -68,7 +68,7 @@ jobs:
|
||||||
- name: make docker image
|
- name: make docker image
|
||||||
working-directory: source
|
working-directory: source
|
||||||
env:
|
env:
|
||||||
EMQX_BUILDER: ghcr.io/emqx/emqx-builder/5.0-18:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }}
|
EMQX_BUILDER: ghcr.io/emqx/emqx-builder/5.0-24:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }}
|
||||||
EMQX_RUNNER: ${{ matrix.os[1] }}
|
EMQX_RUNNER: ${{ matrix.os[1] }}
|
||||||
run: |
|
run: |
|
||||||
make ${{ matrix.profile }}-docker
|
make ${{ matrix.profile }}-docker
|
||||||
|
@ -141,7 +141,7 @@ jobs:
|
||||||
- name: make docker image
|
- name: make docker image
|
||||||
working-directory: source
|
working-directory: source
|
||||||
env:
|
env:
|
||||||
EMQX_BUILDER: ghcr.io/emqx/emqx-builder/5.0-18:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }}
|
EMQX_BUILDER: ghcr.io/emqx/emqx-builder/5.0-24:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os[0] }}
|
||||||
EMQX_RUNNER: ${{ matrix.os[1] }}
|
EMQX_RUNNER: ${{ matrix.os[1] }}
|
||||||
run: |
|
run: |
|
||||||
make ${{ matrix.profile }}-docker
|
make ${{ matrix.profile }}-docker
|
||||||
|
|
|
@ -16,7 +16,7 @@ on:
|
||||||
jobs:
|
jobs:
|
||||||
relup_test_plan:
|
relup_test_plan:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04"
|
container: "ghcr.io/emqx/emqx-builder/5.0-24:1.13.4-24.3.4.2-1-ubuntu20.04"
|
||||||
outputs:
|
outputs:
|
||||||
CUR_EE_VSN: ${{ steps.find-versions.outputs.CUR_EE_VSN }}
|
CUR_EE_VSN: ${{ steps.find-versions.outputs.CUR_EE_VSN }}
|
||||||
OLD_VERSIONS: ${{ steps.find-versions.outputs.OLD_VERSIONS }}
|
OLD_VERSIONS: ${{ steps.find-versions.outputs.OLD_VERSIONS }}
|
||||||
|
|
|
@ -17,7 +17,7 @@ jobs:
|
||||||
prepare:
|
prepare:
|
||||||
runs-on: aws-amd64
|
runs-on: aws-amd64
|
||||||
# prepare source with any OTP version, no need for a matrix
|
# prepare source with any OTP version, no need for a matrix
|
||||||
container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04"
|
container: "ghcr.io/emqx/emqx-builder/5.0-24:1.13.4-24.3.4.2-1-ubuntu20.04"
|
||||||
outputs:
|
outputs:
|
||||||
fast_ct_apps: ${{ steps.find_ct_apps.outputs.fast_ct_apps }}
|
fast_ct_apps: ${{ steps.find_ct_apps.outputs.fast_ct_apps }}
|
||||||
docker_ct_apps: ${{ steps.find_ct_apps.outputs.docker_ct_apps }}
|
docker_ct_apps: ${{ steps.find_ct_apps.outputs.docker_ct_apps }}
|
||||||
|
@ -104,7 +104,7 @@ jobs:
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04"
|
container: "ghcr.io/emqx/emqx-builder/5.0-24:1.13.4-24.3.4.2-1-ubuntu20.04"
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- uses: AutoModality/action-clean@v1
|
- uses: AutoModality/action-clean@v1
|
||||||
|
@ -213,7 +213,7 @@ jobs:
|
||||||
use-self-hosted: false
|
use-self-hosted: false
|
||||||
|
|
||||||
runs-on: ${{ matrix.runs-on }}
|
runs-on: ${{ matrix.runs-on }}
|
||||||
container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04"
|
container: "ghcr.io/emqx/emqx-builder/5.0-24:1.13.4-24.3.4.2-1-ubuntu20.04"
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash
|
||||||
|
@ -252,7 +252,7 @@ jobs:
|
||||||
- ct
|
- ct
|
||||||
- ct_docker
|
- ct_docker
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04"
|
container: "ghcr.io/emqx/emqx-builder/5.0-24:1.13.4-24.3.4.2-1-ubuntu20.04"
|
||||||
steps:
|
steps:
|
||||||
- uses: AutoModality/action-clean@v1
|
- uses: AutoModality/action-clean@v1
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v3
|
||||||
|
|
4
Makefile
4
Makefile
|
@ -6,8 +6,8 @@ export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-d
|
||||||
export EMQX_DEFAULT_RUNNER = debian:11-slim
|
export EMQX_DEFAULT_RUNNER = debian:11-slim
|
||||||
export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh)
|
export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh)
|
||||||
export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh)
|
export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh)
|
||||||
export EMQX_DASHBOARD_VERSION ?= v1.1.3-sync-code
|
export EMQX_DASHBOARD_VERSION ?= v1.1.3
|
||||||
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.1-beta.5
|
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.1-beta.7
|
||||||
export EMQX_REL_FORM ?= tgz
|
export EMQX_REL_FORM ?= tgz
|
||||||
export QUICER_DOWNLOAD_FROM_RELEASE = 1
|
export QUICER_DOWNLOAD_FROM_RELEASE = 1
|
||||||
ifeq ($(OS),Windows_NT)
|
ifeq ($(OS),Windows_NT)
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
{emqx_prometheus,1}.
|
{emqx_prometheus,1}.
|
||||||
{emqx_resource,1}.
|
{emqx_resource,1}.
|
||||||
{emqx_retainer,1}.
|
{emqx_retainer,1}.
|
||||||
|
{emqx_retainer,2}.
|
||||||
{emqx_rule_engine,1}.
|
{emqx_rule_engine,1}.
|
||||||
{emqx_shared_sub,1}.
|
{emqx_shared_sub,1}.
|
||||||
{emqx_slow_subs,1}.
|
{emqx_slow_subs,1}.
|
||||||
|
|
|
@ -27,7 +27,7 @@
|
||||||
{jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}},
|
{jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}},
|
||||||
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}},
|
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}},
|
||||||
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.4"}}},
|
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.4"}}},
|
||||||
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.6"}}},
|
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.7"}}},
|
||||||
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
|
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
|
||||||
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.31.2"}}},
|
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.31.2"}}},
|
||||||
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
|
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
|
||||||
|
|
|
@ -46,16 +46,32 @@ authenticate(Credential) ->
|
||||||
NotSuperUser = #{is_superuser => false},
|
NotSuperUser = #{is_superuser => false},
|
||||||
case emqx_authentication:pre_hook_authenticate(Credential) of
|
case emqx_authentication:pre_hook_authenticate(Credential) of
|
||||||
ok ->
|
ok ->
|
||||||
|
inc_authn_metrics(anonymous),
|
||||||
{ok, NotSuperUser};
|
{ok, NotSuperUser};
|
||||||
continue ->
|
continue ->
|
||||||
case run_hooks('client.authenticate', [Credential], {ok, #{is_superuser => false}}) of
|
case run_hooks('client.authenticate', [Credential], ignore) of
|
||||||
ok ->
|
ignore ->
|
||||||
|
inc_authn_metrics(anonymous),
|
||||||
{ok, NotSuperUser};
|
{ok, NotSuperUser};
|
||||||
|
ok ->
|
||||||
|
inc_authn_metrics(ok),
|
||||||
|
{ok, NotSuperUser};
|
||||||
|
{ok, _AuthResult} = OkResult ->
|
||||||
|
inc_authn_metrics(ok),
|
||||||
|
OkResult;
|
||||||
|
{ok, _AuthResult, _AuthData} = OkResult ->
|
||||||
|
inc_authn_metrics(ok),
|
||||||
|
OkResult;
|
||||||
|
{error, _Reason} = Error ->
|
||||||
|
inc_authn_metrics(error),
|
||||||
|
Error;
|
||||||
|
%% {continue, AuthCache} | {continue, AuthData, AuthCache}
|
||||||
Other ->
|
Other ->
|
||||||
Other
|
Other
|
||||||
end;
|
end;
|
||||||
Other ->
|
{error, _Reason} = Error ->
|
||||||
Other
|
inc_authn_metrics(error),
|
||||||
|
Error
|
||||||
end.
|
end.
|
||||||
|
|
||||||
%% @doc Check Authorization
|
%% @doc Check Authorization
|
||||||
|
@ -134,3 +150,11 @@ inc_authz_metrics(deny) ->
|
||||||
emqx_metrics:inc('authorization.deny');
|
emqx_metrics:inc('authorization.deny');
|
||||||
inc_authz_metrics(cache_hit) ->
|
inc_authz_metrics(cache_hit) ->
|
||||||
emqx_metrics:inc('authorization.cache_hit').
|
emqx_metrics:inc('authorization.cache_hit').
|
||||||
|
|
||||||
|
inc_authn_metrics(error) ->
|
||||||
|
emqx_metrics:inc('authentication.failure');
|
||||||
|
inc_authn_metrics(ok) ->
|
||||||
|
emqx_metrics:inc('authentication.success');
|
||||||
|
inc_authn_metrics(anonymous) ->
|
||||||
|
emqx_metrics:inc('authentication.success.anonymous'),
|
||||||
|
emqx_metrics:inc('authentication.success').
|
||||||
|
|
|
@ -228,7 +228,6 @@ when
|
||||||
-spec pre_hook_authenticate(emqx_types:clientinfo()) ->
|
-spec pre_hook_authenticate(emqx_types:clientinfo()) ->
|
||||||
ok | continue | {error, not_authorized}.
|
ok | continue | {error, not_authorized}.
|
||||||
pre_hook_authenticate(#{enable_authn := false}) ->
|
pre_hook_authenticate(#{enable_authn := false}) ->
|
||||||
inc_authenticate_metric('authentication.success.anonymous'),
|
|
||||||
?TRACE_RESULT("authentication_result", ok, enable_authn_false);
|
?TRACE_RESULT("authentication_result", ok, enable_authn_false);
|
||||||
pre_hook_authenticate(#{enable_authn := quick_deny_anonymous} = Credential) ->
|
pre_hook_authenticate(#{enable_authn := quick_deny_anonymous} = Credential) ->
|
||||||
case maps:get(username, Credential, undefined) of
|
case maps:get(username, Credential, undefined) of
|
||||||
|
@ -242,29 +241,18 @@ pre_hook_authenticate(#{enable_authn := quick_deny_anonymous} = Credential) ->
|
||||||
pre_hook_authenticate(_) ->
|
pre_hook_authenticate(_) ->
|
||||||
continue.
|
continue.
|
||||||
|
|
||||||
authenticate(#{listener := Listener, protocol := Protocol} = Credential, _AuthResult) ->
|
authenticate(#{listener := Listener, protocol := Protocol} = Credential, AuthResult) ->
|
||||||
case get_authenticators(Listener, global_chain(Protocol)) of
|
case get_authenticators(Listener, global_chain(Protocol)) of
|
||||||
{ok, ChainName, Authenticators} ->
|
{ok, ChainName, Authenticators} ->
|
||||||
case get_enabled(Authenticators) of
|
case get_enabled(Authenticators) of
|
||||||
[] ->
|
[] ->
|
||||||
inc_authenticate_metric('authentication.success.anonymous'),
|
?TRACE_RESULT("authentication_result", AuthResult, empty_chain);
|
||||||
?TRACE_RESULT("authentication_result", ignore, empty_chain);
|
|
||||||
NAuthenticators ->
|
NAuthenticators ->
|
||||||
Result = do_authenticate(ChainName, NAuthenticators, Credential),
|
Result = do_authenticate(ChainName, NAuthenticators, Credential),
|
||||||
|
|
||||||
case Result of
|
|
||||||
{stop, {ok, _}} ->
|
|
||||||
inc_authenticate_metric('authentication.success');
|
|
||||||
{stop, {error, _}} ->
|
|
||||||
inc_authenticate_metric('authentication.failure');
|
|
||||||
_ ->
|
|
||||||
ok
|
|
||||||
end,
|
|
||||||
?TRACE_RESULT("authentication_result", Result, chain_result)
|
?TRACE_RESULT("authentication_result", Result, chain_result)
|
||||||
end;
|
end;
|
||||||
none ->
|
none ->
|
||||||
inc_authenticate_metric('authentication.success.anonymous'),
|
?TRACE_RESULT("authentication_result", AuthResult, no_chain)
|
||||||
?TRACE_RESULT("authentication_result", ignore, no_chain)
|
|
||||||
end.
|
end.
|
||||||
|
|
||||||
get_authenticators(Listener, Global) ->
|
get_authenticators(Listener, Global) ->
|
||||||
|
@ -649,7 +637,7 @@ handle_create_authenticator(Chain, Config, Providers) ->
|
||||||
end.
|
end.
|
||||||
|
|
||||||
do_authenticate(_ChainName, [], _) ->
|
do_authenticate(_ChainName, [], _) ->
|
||||||
{stop, {error, not_authorized}};
|
{ok, {error, not_authorized}};
|
||||||
do_authenticate(
|
do_authenticate(
|
||||||
ChainName, [#authenticator{id = ID} = Authenticator | More], Credential
|
ChainName, [#authenticator{id = ID} = Authenticator | More], Credential
|
||||||
) ->
|
) ->
|
||||||
|
@ -673,7 +661,7 @@ do_authenticate(
|
||||||
_ ->
|
_ ->
|
||||||
ok
|
ok
|
||||||
end,
|
end,
|
||||||
{stop, Result}
|
{ok, Result}
|
||||||
catch
|
catch
|
||||||
Class:Reason:Stacktrace ->
|
Class:Reason:Stacktrace ->
|
||||||
?TRACE_AUTHN(warning, "authenticator_error", #{
|
?TRACE_AUTHN(warning, "authenticator_error", #{
|
||||||
|
@ -947,9 +935,3 @@ to_list(M) when is_map(M) -> [M];
|
||||||
to_list(L) when is_list(L) -> L.
|
to_list(L) when is_list(L) -> L.
|
||||||
|
|
||||||
call(Call) -> gen_server:call(?MODULE, Call, infinity).
|
call(Call) -> gen_server:call(?MODULE, Call, infinity).
|
||||||
|
|
||||||
inc_authenticate_metric('authentication.success.anonymous' = Metric) ->
|
|
||||||
emqx_metrics:inc(Metric),
|
|
||||||
emqx_metrics:inc('authentication.success');
|
|
||||||
inc_authenticate_metric(Metric) ->
|
|
||||||
emqx_metrics:inc(Metric).
|
|
||||||
|
|
|
@ -199,6 +199,7 @@ start_listener(Type, ListenerName, #{bind := Bind} = Conf) ->
|
||||||
Reason =:= listener_disabled;
|
Reason =:= listener_disabled;
|
||||||
Reason =:= quic_app_missing
|
Reason =:= quic_app_missing
|
||||||
->
|
->
|
||||||
|
?tp(listener_not_started, #{type => Type, bind => Bind, status => {skipped, Reason}}),
|
||||||
console_print(
|
console_print(
|
||||||
"Listener ~ts is NOT started due to: ~p.~n",
|
"Listener ~ts is NOT started due to: ~p.~n",
|
||||||
[listener_id(Type, ListenerName), Reason]
|
[listener_id(Type, ListenerName), Reason]
|
||||||
|
@ -212,8 +213,12 @@ start_listener(Type, ListenerName, #{bind := Bind} = Conf) ->
|
||||||
),
|
),
|
||||||
ok;
|
ok;
|
||||||
{error, {already_started, Pid}} ->
|
{error, {already_started, Pid}} ->
|
||||||
|
?tp(listener_not_started, #{
|
||||||
|
type => Type, bind => Bind, status => {already_started, Pid}
|
||||||
|
}),
|
||||||
{error, {already_started, Pid}};
|
{error, {already_started, Pid}};
|
||||||
{error, Reason} ->
|
{error, Reason} ->
|
||||||
|
?tp(listener_not_started, #{type => Type, bind => Bind, status => {error, Reason}}),
|
||||||
ListenerId = listener_id(Type, ListenerName),
|
ListenerId = listener_id(Type, ListenerName),
|
||||||
BindStr = format_bind(Bind),
|
BindStr = format_bind(Bind),
|
||||||
?ELOG(
|
?ELOG(
|
||||||
|
|
|
@ -1942,7 +1942,6 @@ common_ssl_opts_schema(Defaults) ->
|
||||||
].
|
].
|
||||||
|
|
||||||
%% @doc Make schema for SSL listener options.
|
%% @doc Make schema for SSL listener options.
|
||||||
%% When it's for ranch listener, an extra field `handshake_timeout' is added.
|
|
||||||
-spec server_ssl_opts_schema(map(), boolean()) -> hocon_schema:field_schema().
|
-spec server_ssl_opts_schema(map(), boolean()) -> hocon_schema:field_schema().
|
||||||
server_ssl_opts_schema(Defaults, IsRanchListener) ->
|
server_ssl_opts_schema(Defaults, IsRanchListener) ->
|
||||||
D = fun(Field) -> maps:get(to_atom(Field), Defaults, undefined) end,
|
D = fun(Field) -> maps:get(to_atom(Field), Defaults, undefined) end,
|
||||||
|
@ -1981,26 +1980,23 @@ server_ssl_opts_schema(Defaults, IsRanchListener) ->
|
||||||
default => Df("client_renegotiation", true),
|
default => Df("client_renegotiation", true),
|
||||||
desc => ?DESC(server_ssl_opts_schema_client_renegotiation)
|
desc => ?DESC(server_ssl_opts_schema_client_renegotiation)
|
||||||
}
|
}
|
||||||
|
)},
|
||||||
|
{"handshake_timeout",
|
||||||
|
sc(
|
||||||
|
duration(),
|
||||||
|
#{
|
||||||
|
default => Df("handshake_timeout", "15s"),
|
||||||
|
desc => ?DESC(server_ssl_opts_schema_handshake_timeout)
|
||||||
|
}
|
||||||
)}
|
)}
|
||||||
| [
|
] ++
|
||||||
{"handshake_timeout",
|
[
|
||||||
sc(
|
{"gc_after_handshake",
|
||||||
duration(),
|
sc(boolean(), #{
|
||||||
#{
|
default => false,
|
||||||
default => Df("handshake_timeout", "15s"),
|
desc => ?DESC(server_ssl_opts_schema_gc_after_handshake)
|
||||||
desc => ?DESC(server_ssl_opts_schema_handshake_timeout)
|
})}
|
||||||
}
|
|| not IsRanchListener
|
||||||
)}
|
|
||||||
|| IsRanchListener
|
|
||||||
] ++
|
|
||||||
[
|
|
||||||
{"gc_after_handshake",
|
|
||||||
sc(boolean(), #{
|
|
||||||
default => false,
|
|
||||||
desc => ?DESC(server_ssl_opts_schema_gc_after_handshake)
|
|
||||||
})}
|
|
||||||
|| not IsRanchListener
|
|
||||||
]
|
|
||||||
].
|
].
|
||||||
|
|
||||||
%% @doc Make schema for SSL client.
|
%% @doc Make schema for SSL client.
|
||||||
|
|
|
@ -166,7 +166,20 @@ all_ciphers(['tlsv1.3']) ->
|
||||||
all_ciphers(Versions) ->
|
all_ciphers(Versions) ->
|
||||||
%% assert non-empty
|
%% assert non-empty
|
||||||
List = lists:append([ssl:cipher_suites(all, V, openssl) || V <- Versions]),
|
List = lists:append([ssl:cipher_suites(all, V, openssl) || V <- Versions]),
|
||||||
[_ | _] = dedup(List).
|
|
||||||
|
%% Some PSK ciphers are both supported by OpenSSL and Erlang, but they need manual add here.
|
||||||
|
%% Found by this cmd
|
||||||
|
%% openssl ciphers -v|grep ^PSK| awk '{print $1}'| sed "s/^/\"/;s/$/\"/" | tr "\n" ","
|
||||||
|
%% Then remove the ciphers that aren't supported by Erlang
|
||||||
|
PSK = [
|
||||||
|
"PSK-AES256-GCM-SHA384",
|
||||||
|
"PSK-AES128-GCM-SHA256",
|
||||||
|
"PSK-AES256-CBC-SHA384",
|
||||||
|
"PSK-AES256-CBC-SHA",
|
||||||
|
"PSK-AES128-CBC-SHA256",
|
||||||
|
"PSK-AES128-CBC-SHA"
|
||||||
|
],
|
||||||
|
[_ | _] = dedup(List ++ PSK).
|
||||||
|
|
||||||
%% @doc All Pre-selected TLS ciphers.
|
%% @doc All Pre-selected TLS ciphers.
|
||||||
default_ciphers() ->
|
default_ciphers() ->
|
||||||
|
|
|
@ -22,6 +22,8 @@
|
||||||
-compile(export_all).
|
-compile(export_all).
|
||||||
-compile(nowarn_export_all).
|
-compile(nowarn_export_all).
|
||||||
|
|
||||||
|
-include_lib("emqx/include/emqx_hooks.hrl").
|
||||||
|
|
||||||
-include_lib("common_test/include/ct.hrl").
|
-include_lib("common_test/include/ct.hrl").
|
||||||
-include_lib("eunit/include/eunit.hrl").
|
-include_lib("eunit/include/eunit.hrl").
|
||||||
-include_lib("typerefl/include/types.hrl").
|
-include_lib("typerefl/include/types.hrl").
|
||||||
|
@ -35,6 +37,20 @@
|
||||||
end)()
|
end)()
|
||||||
).
|
).
|
||||||
-define(CONF_ROOT, ?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_ATOM).
|
-define(CONF_ROOT, ?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_ATOM).
|
||||||
|
-define(NOT_SUPERUSER, #{is_superuser => false}).
|
||||||
|
|
||||||
|
-define(assertAuthSuccessForUser(User),
|
||||||
|
?assertMatch(
|
||||||
|
{ok, _},
|
||||||
|
emqx_access_control:authenticate(ClientInfo#{username => atom_to_binary(User)})
|
||||||
|
)
|
||||||
|
).
|
||||||
|
-define(assertAuthFailureForUser(User),
|
||||||
|
?assertMatch(
|
||||||
|
{error, _},
|
||||||
|
emqx_access_control:authenticate(ClientInfo#{username => atom_to_binary(User)})
|
||||||
|
)
|
||||||
|
).
|
||||||
|
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
%% Hocon Schema
|
%% Hocon Schema
|
||||||
|
@ -88,9 +104,22 @@ update(_Config, _State) ->
|
||||||
|
|
||||||
authenticate(#{username := <<"good">>}, _State) ->
|
authenticate(#{username := <<"good">>}, _State) ->
|
||||||
{ok, #{is_superuser => true}};
|
{ok, #{is_superuser => true}};
|
||||||
|
authenticate(#{username := <<"ignore">>}, _State) ->
|
||||||
|
ignore;
|
||||||
authenticate(#{username := _}, _State) ->
|
authenticate(#{username := _}, _State) ->
|
||||||
{error, bad_username_or_password}.
|
{error, bad_username_or_password}.
|
||||||
|
|
||||||
|
hook_authenticate(#{username := <<"hook_user_good">>}, _AuthResult) ->
|
||||||
|
{ok, {ok, ?NOT_SUPERUSER}};
|
||||||
|
hook_authenticate(#{username := <<"hook_user_bad">>}, _AuthResult) ->
|
||||||
|
{ok, {error, invalid_username}};
|
||||||
|
hook_authenticate(#{username := <<"hook_user_finally_good">>}, _AuthResult) ->
|
||||||
|
{stop, {ok, ?NOT_SUPERUSER}};
|
||||||
|
hook_authenticate(#{username := <<"hook_user_finally_bad">>}, _AuthResult) ->
|
||||||
|
{stop, {error, invalid_username}};
|
||||||
|
hook_authenticate(_ClientId, AuthResult) ->
|
||||||
|
{ok, AuthResult}.
|
||||||
|
|
||||||
destroy(_State) ->
|
destroy(_State) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
@ -113,6 +142,10 @@ end_per_testcase(Case, Config) ->
|
||||||
_ = ?MODULE:Case({'end', Config}),
|
_ = ?MODULE:Case({'end', Config}),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
%%=================================================================================
|
||||||
|
%% Testcases
|
||||||
|
%%=================================================================================
|
||||||
|
|
||||||
t_chain({'init', Config}) ->
|
t_chain({'init', Config}) ->
|
||||||
Config;
|
Config;
|
||||||
t_chain(Config) when is_list(Config) ->
|
t_chain(Config) when is_list(Config) ->
|
||||||
|
@ -500,6 +533,92 @@ t_convert_certs(Config) when is_list(Config) ->
|
||||||
clear_certs(CertsDir, #{<<"ssl">> => NCerts3}),
|
clear_certs(CertsDir, #{<<"ssl">> => NCerts3}),
|
||||||
?assertEqual(false, filelib:is_regular(maps:get(<<"keyfile">>, NCerts3))).
|
?assertEqual(false, filelib:is_regular(maps:get(<<"keyfile">>, NCerts3))).
|
||||||
|
|
||||||
|
t_combine_authn_and_callback({init, Config}) ->
|
||||||
|
[
|
||||||
|
{listener_id, 'tcp:default'},
|
||||||
|
{authn_type, {password_based, built_in_database}}
|
||||||
|
| Config
|
||||||
|
];
|
||||||
|
t_combine_authn_and_callback(Config) when is_list(Config) ->
|
||||||
|
ListenerID = ?config(listener_id),
|
||||||
|
ClientInfo = #{
|
||||||
|
zone => default,
|
||||||
|
listener => ListenerID,
|
||||||
|
protocol => mqtt,
|
||||||
|
password => <<"any">>
|
||||||
|
},
|
||||||
|
|
||||||
|
%% no emqx_authentication authenticators, anonymous is allowed
|
||||||
|
?assertAuthSuccessForUser(bad),
|
||||||
|
|
||||||
|
AuthNType = ?config(authn_type),
|
||||||
|
register_provider(AuthNType, ?MODULE),
|
||||||
|
|
||||||
|
AuthenticatorConfig = #{
|
||||||
|
mechanism => password_based,
|
||||||
|
backend => built_in_database,
|
||||||
|
enable => true
|
||||||
|
},
|
||||||
|
{ok, _} = ?AUTHN:create_authenticator(ListenerID, AuthenticatorConfig),
|
||||||
|
|
||||||
|
%% emqx_authentication alone
|
||||||
|
?assertAuthSuccessForUser(good),
|
||||||
|
?assertAuthFailureForUser(ignore),
|
||||||
|
?assertAuthFailureForUser(bad),
|
||||||
|
|
||||||
|
%% add hook with higher priority
|
||||||
|
ok = hook(?HP_AUTHN + 1),
|
||||||
|
|
||||||
|
%% for hook unrelataed users everything is the same
|
||||||
|
?assertAuthSuccessForUser(good),
|
||||||
|
?assertAuthFailureForUser(ignore),
|
||||||
|
?assertAuthFailureForUser(bad),
|
||||||
|
|
||||||
|
%% higher-priority hook can permit access with {ok,...},
|
||||||
|
%% then emqx_authentication overrides the result
|
||||||
|
?assertAuthFailureForUser(hook_user_good),
|
||||||
|
?assertAuthFailureForUser(hook_user_bad),
|
||||||
|
|
||||||
|
%% higher-priority hook can permit and return {stop,...},
|
||||||
|
%% then emqx_authentication cannot override the result
|
||||||
|
?assertAuthSuccessForUser(hook_user_finally_good),
|
||||||
|
?assertAuthFailureForUser(hook_user_finally_bad),
|
||||||
|
|
||||||
|
ok = unhook(),
|
||||||
|
|
||||||
|
%% add hook with lower priority
|
||||||
|
ok = hook(?HP_AUTHN - 1),
|
||||||
|
|
||||||
|
%% for hook unrelataed users
|
||||||
|
?assertAuthSuccessForUser(good),
|
||||||
|
?assertAuthFailureForUser(bad),
|
||||||
|
?assertAuthFailureForUser(ignore),
|
||||||
|
|
||||||
|
%% lower-priority hook can overrride auth result,
|
||||||
|
%% because emqx_authentication permits/denies with {ok, ...}
|
||||||
|
?assertAuthSuccessForUser(hook_user_good),
|
||||||
|
?assertAuthFailureForUser(hook_user_bad),
|
||||||
|
?assertAuthSuccessForUser(hook_user_finally_good),
|
||||||
|
?assertAuthFailureForUser(hook_user_finally_bad),
|
||||||
|
|
||||||
|
ok = unhook();
|
||||||
|
t_combine_authn_and_callback({'end', Config}) ->
|
||||||
|
?AUTHN:delete_chain(?config(listener_id)),
|
||||||
|
?AUTHN:deregister_provider(?config(authn_type)),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
%%=================================================================================
|
||||||
|
%% Helpers fns
|
||||||
|
%%=================================================================================
|
||||||
|
|
||||||
|
hook(Priority) ->
|
||||||
|
ok = emqx_hooks:put(
|
||||||
|
'client.authenticate', {?MODULE, hook_authenticate, []}, Priority
|
||||||
|
).
|
||||||
|
|
||||||
|
unhook() ->
|
||||||
|
ok = emqx_hooks:del('client.authenticate', {?MODULE, hook_authenticate}).
|
||||||
|
|
||||||
update_config(Path, ConfigRequest) ->
|
update_config(Path, ConfigRequest) ->
|
||||||
emqx:update_config(Path, ConfigRequest, #{rawconf_with_defaults => true}).
|
emqx:update_config(Path, ConfigRequest, #{rawconf_with_defaults => true}).
|
||||||
|
|
||||||
|
|
|
@ -453,7 +453,10 @@ is_all_tcp_servers_available(Servers) ->
|
||||||
fun({Host, Port}) ->
|
fun({Host, Port}) ->
|
||||||
is_tcp_server_available(Host, Port)
|
is_tcp_server_available(Host, Port)
|
||||||
end,
|
end,
|
||||||
lists:all(Fun, Servers).
|
case lists:partition(Fun, Servers) of
|
||||||
|
{_, []} -> true;
|
||||||
|
{_, Unavail} -> ct:print("Unavailable servers: ~p", [Unavail])
|
||||||
|
end.
|
||||||
|
|
||||||
-spec is_tcp_server_available(
|
-spec is_tcp_server_available(
|
||||||
Host :: inet:socket_address() | inet:hostname(),
|
Host :: inet:socket_address() | inet:hostname(),
|
||||||
|
|
|
@ -37,11 +37,11 @@ ssl_opts_dtls_test() ->
|
||||||
ssl_opts_tls_1_3_test() ->
|
ssl_opts_tls_1_3_test() ->
|
||||||
Sc = emqx_schema:server_ssl_opts_schema(#{}, false),
|
Sc = emqx_schema:server_ssl_opts_schema(#{}, false),
|
||||||
Checked = validate(Sc, #{<<"versions">> => [<<"tlsv1.3">>]}),
|
Checked = validate(Sc, #{<<"versions">> => [<<"tlsv1.3">>]}),
|
||||||
?assertNot(maps:is_key(handshake_timeout, Checked)),
|
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
#{
|
#{
|
||||||
versions := ['tlsv1.3'],
|
versions := ['tlsv1.3'],
|
||||||
ciphers := []
|
ciphers := [],
|
||||||
|
handshake_timeout := _
|
||||||
},
|
},
|
||||||
Checked
|
Checked
|
||||||
).
|
).
|
||||||
|
|
|
@ -43,6 +43,9 @@ init_per_suite(Config) ->
|
||||||
timer:seconds(100)
|
timer:seconds(100)
|
||||||
),
|
),
|
||||||
fun(Trace) ->
|
fun(Trace) ->
|
||||||
|
ct:pal("listener start statuses: ~p", [
|
||||||
|
?of_kind([listener_started, listener_not_started], Trace)
|
||||||
|
]),
|
||||||
%% more than one listener
|
%% more than one listener
|
||||||
?assertMatch([_ | _], ?of_kind(listener_started, Trace))
|
?assertMatch([_ | _], ?of_kind(listener_started, Trace))
|
||||||
end
|
end
|
||||||
|
|
|
@ -71,8 +71,15 @@ do_check_config(#{<<"mechanism">> := Mec} = Config, Opts) ->
|
||||||
Opts#{atom_key => true}
|
Opts#{atom_key => true}
|
||||||
)
|
)
|
||||||
end;
|
end;
|
||||||
do_check_config(_Config, _Opts) ->
|
do_check_config(Config, _Opts) when is_map(Config) ->
|
||||||
throw({invalid_config, "mechanism_field_required"}).
|
throw({invalid_config, "mechanism_field_required", Config});
|
||||||
|
do_check_config(RawConf, Opts) ->
|
||||||
|
%% authentication conf is lazy type, when it comes from ENV, it is a string
|
||||||
|
%% EMQX_AUTHENTICATION__1="{mechanism=\"password_based\"...}"
|
||||||
|
case hocon:binary(RawConf, Opts) of
|
||||||
|
{ok, Conf} -> do_check_config(Conf, Opts);
|
||||||
|
{error, Reason} -> throw({invalid_config, Reason})
|
||||||
|
end.
|
||||||
|
|
||||||
atom(Bin) ->
|
atom(Bin) ->
|
||||||
try
|
try
|
||||||
|
|
|
@ -80,7 +80,6 @@
|
||||||
<<"servers">> => <<?REDIS_SINGLE_HOST, ",127.0.0.1:6380">>,
|
<<"servers">> => <<?REDIS_SINGLE_HOST, ",127.0.0.1:6380">>,
|
||||||
<<"redis_type">> => <<"cluster">>,
|
<<"redis_type">> => <<"cluster">>,
|
||||||
<<"pool_size">> => 1,
|
<<"pool_size">> => 1,
|
||||||
<<"database">> => 0,
|
|
||||||
<<"password">> => <<"ee">>,
|
<<"password">> => <<"ee">>,
|
||||||
<<"auto_reconnect">> => true,
|
<<"auto_reconnect">> => true,
|
||||||
<<"ssl">> => #{<<"enable">> => false},
|
<<"ssl">> => #{<<"enable">> => false},
|
||||||
|
|
|
@ -1,3 +1,19 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2020-2022 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%
|
||||||
|
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
%% you may not use this file except in compliance with the License.
|
||||||
|
%% You may obtain a copy of the License at
|
||||||
|
%%
|
||||||
|
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
%%
|
||||||
|
%% Unless required by applicable law or agreed to in writing, software
|
||||||
|
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
%% See the License for the specific language governing permissions and
|
||||||
|
%% limitations under the License.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
-define(EMPTY_METRICS,
|
-define(EMPTY_METRICS,
|
||||||
?METRICS(
|
?METRICS(
|
||||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
|
||||||
|
|
|
@ -53,7 +53,10 @@
|
||||||
T == mysql;
|
T == mysql;
|
||||||
T == gcp_pubsub;
|
T == gcp_pubsub;
|
||||||
T == influxdb_api_v1;
|
T == influxdb_api_v1;
|
||||||
T == influxdb_api_v2
|
T == influxdb_api_v2;
|
||||||
|
T == redis_single;
|
||||||
|
T == redis_sentinel;
|
||||||
|
T == redis_cluster
|
||||||
).
|
).
|
||||||
|
|
||||||
load() ->
|
load() ->
|
||||||
|
@ -135,6 +138,7 @@ on_message_publish(Message = #message{topic = Topic, flags = Flags}) ->
|
||||||
{ok, Message}.
|
{ok, Message}.
|
||||||
|
|
||||||
send_to_matched_egress_bridges(Topic, Msg) ->
|
send_to_matched_egress_bridges(Topic, Msg) ->
|
||||||
|
MatchedBridgeIds = get_matched_egress_bridges(Topic),
|
||||||
lists:foreach(
|
lists:foreach(
|
||||||
fun(Id) ->
|
fun(Id) ->
|
||||||
try send_message(Id, Msg) of
|
try send_message(Id, Msg) of
|
||||||
|
@ -157,7 +161,7 @@ send_to_matched_egress_bridges(Topic, Msg) ->
|
||||||
})
|
})
|
||||||
end
|
end
|
||||||
end,
|
end,
|
||||||
get_matched_bridges(Topic)
|
MatchedBridgeIds
|
||||||
).
|
).
|
||||||
|
|
||||||
send_message(BridgeId, Message) ->
|
send_message(BridgeId, Message) ->
|
||||||
|
@ -242,6 +246,12 @@ disable_enable(Action, BridgeType, BridgeName) when
|
||||||
).
|
).
|
||||||
|
|
||||||
create(BridgeType, BridgeName, RawConf) ->
|
create(BridgeType, BridgeName, RawConf) ->
|
||||||
|
?SLOG(debug, #{
|
||||||
|
brige_action => create,
|
||||||
|
bridge_type => BridgeType,
|
||||||
|
bridge_name => BridgeName,
|
||||||
|
bridge_raw_config => RawConf
|
||||||
|
}),
|
||||||
emqx_conf:update(
|
emqx_conf:update(
|
||||||
emqx_bridge:config_key_path() ++ [BridgeType, BridgeName],
|
emqx_bridge:config_key_path() ++ [BridgeType, BridgeName],
|
||||||
RawConf,
|
RawConf,
|
||||||
|
@ -249,6 +259,11 @@ create(BridgeType, BridgeName, RawConf) ->
|
||||||
).
|
).
|
||||||
|
|
||||||
remove(BridgeType, BridgeName) ->
|
remove(BridgeType, BridgeName) ->
|
||||||
|
?SLOG(debug, #{
|
||||||
|
brige_action => remove,
|
||||||
|
bridge_type => BridgeType,
|
||||||
|
bridge_name => BridgeName
|
||||||
|
}),
|
||||||
emqx_conf:remove(
|
emqx_conf:remove(
|
||||||
emqx_bridge:config_key_path() ++ [BridgeType, BridgeName],
|
emqx_bridge:config_key_path() ++ [BridgeType, BridgeName],
|
||||||
#{override_to => cluster}
|
#{override_to => cluster}
|
||||||
|
@ -324,13 +339,19 @@ flatten_confs(Conf0) ->
|
||||||
do_flatten_confs(Type, Conf0) ->
|
do_flatten_confs(Type, Conf0) ->
|
||||||
[{{Type, Name}, Conf} || {Name, Conf} <- maps:to_list(Conf0)].
|
[{{Type, Name}, Conf} || {Name, Conf} <- maps:to_list(Conf0)].
|
||||||
|
|
||||||
get_matched_bridges(Topic) ->
|
get_matched_egress_bridges(Topic) ->
|
||||||
Bridges = emqx:get_config([bridges], #{}),
|
Bridges = emqx:get_config([bridges], #{}),
|
||||||
maps:fold(
|
maps:fold(
|
||||||
fun(BType, Conf, Acc0) ->
|
fun(BType, Conf, Acc0) ->
|
||||||
maps:fold(
|
maps:fold(
|
||||||
fun(BName, BConf, Acc1) ->
|
fun
|
||||||
get_matched_bridge_id(BType, BConf, Topic, BName, Acc1)
|
(BName, #{egress := _} = BConf, Acc1) when BType =:= mqtt ->
|
||||||
|
get_matched_bridge_id(BType, BConf, Topic, BName, Acc1);
|
||||||
|
(_BName, #{ingress := _}, Acc1) when BType =:= mqtt ->
|
||||||
|
%% ignore ingress only bridge
|
||||||
|
Acc1;
|
||||||
|
(BName, BConf, Acc1) ->
|
||||||
|
get_matched_bridge_id(BType, BConf, Topic, BName, Acc1)
|
||||||
end,
|
end,
|
||||||
Acc0,
|
Acc0,
|
||||||
Conf
|
Conf
|
||||||
|
|
|
@ -34,6 +34,13 @@
|
||||||
-define(NAME_MQTT, <<"my_mqtt_bridge">>).
|
-define(NAME_MQTT, <<"my_mqtt_bridge">>).
|
||||||
-define(BRIDGE_NAME_INGRESS, <<"ingress_mqtt_bridge">>).
|
-define(BRIDGE_NAME_INGRESS, <<"ingress_mqtt_bridge">>).
|
||||||
-define(BRIDGE_NAME_EGRESS, <<"egress_mqtt_bridge">>).
|
-define(BRIDGE_NAME_EGRESS, <<"egress_mqtt_bridge">>).
|
||||||
|
|
||||||
|
%% Having ingress/egress prefixs of topic names to avoid dead loop while bridging
|
||||||
|
-define(INGRESS_REMOTE_TOPIC, "ingress_remote_topic").
|
||||||
|
-define(INGRESS_LOCAL_TOPIC, "ingress_local_topic").
|
||||||
|
-define(EGRESS_REMOTE_TOPIC, "egress_remote_topic").
|
||||||
|
-define(EGRESS_LOCAL_TOPIC, "egress_local_topic").
|
||||||
|
|
||||||
-define(SERVER_CONF(Username), #{
|
-define(SERVER_CONF(Username), #{
|
||||||
<<"server">> => <<"127.0.0.1:1883">>,
|
<<"server">> => <<"127.0.0.1:1883">>,
|
||||||
<<"username">> => Username,
|
<<"username">> => Username,
|
||||||
|
@ -44,11 +51,11 @@
|
||||||
|
|
||||||
-define(INGRESS_CONF, #{
|
-define(INGRESS_CONF, #{
|
||||||
<<"remote">> => #{
|
<<"remote">> => #{
|
||||||
<<"topic">> => <<"remote_topic/#">>,
|
<<"topic">> => <<?INGRESS_REMOTE_TOPIC, "/#">>,
|
||||||
<<"qos">> => 2
|
<<"qos">> => 2
|
||||||
},
|
},
|
||||||
<<"local">> => #{
|
<<"local">> => #{
|
||||||
<<"topic">> => <<"local_topic/${topic}">>,
|
<<"topic">> => <<?INGRESS_LOCAL_TOPIC, "/${topic}">>,
|
||||||
<<"qos">> => <<"${qos}">>,
|
<<"qos">> => <<"${qos}">>,
|
||||||
<<"payload">> => <<"${payload}">>,
|
<<"payload">> => <<"${payload}">>,
|
||||||
<<"retain">> => <<"${retain}">>
|
<<"retain">> => <<"${retain}">>
|
||||||
|
@ -57,10 +64,10 @@
|
||||||
|
|
||||||
-define(EGRESS_CONF, #{
|
-define(EGRESS_CONF, #{
|
||||||
<<"local">> => #{
|
<<"local">> => #{
|
||||||
<<"topic">> => <<"local_topic/#">>
|
<<"topic">> => <<?EGRESS_LOCAL_TOPIC, "/#">>
|
||||||
},
|
},
|
||||||
<<"remote">> => #{
|
<<"remote">> => #{
|
||||||
<<"topic">> => <<"remote_topic/${topic}">>,
|
<<"topic">> => <<?EGRESS_REMOTE_TOPIC, "/${topic}">>,
|
||||||
<<"payload">> => <<"${payload}">>,
|
<<"payload">> => <<"${payload}">>,
|
||||||
<<"qos">> => <<"${qos}">>,
|
<<"qos">> => <<"${qos}">>,
|
||||||
<<"retain">> => <<"${retain}">>
|
<<"retain">> => <<"${retain}">>
|
||||||
|
@ -155,8 +162,8 @@ t_mqtt_conn_bridge_ingress(_) ->
|
||||||
BridgeIDIngress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_INGRESS),
|
BridgeIDIngress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_INGRESS),
|
||||||
|
|
||||||
%% we now test if the bridge works as expected
|
%% we now test if the bridge works as expected
|
||||||
RemoteTopic = <<"remote_topic/1">>,
|
RemoteTopic = <<?INGRESS_REMOTE_TOPIC, "/1">>,
|
||||||
LocalTopic = <<"local_topic/", RemoteTopic/binary>>,
|
LocalTopic = <<?INGRESS_LOCAL_TOPIC, "/", RemoteTopic/binary>>,
|
||||||
Payload = <<"hello">>,
|
Payload = <<"hello">>,
|
||||||
emqx:subscribe(LocalTopic),
|
emqx:subscribe(LocalTopic),
|
||||||
timer:sleep(100),
|
timer:sleep(100),
|
||||||
|
@ -219,8 +226,8 @@ t_mqtt_conn_bridge_egress(_) ->
|
||||||
} = jsx:decode(Bridge),
|
} = jsx:decode(Bridge),
|
||||||
BridgeIDEgress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS),
|
BridgeIDEgress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS),
|
||||||
%% we now test if the bridge works as expected
|
%% we now test if the bridge works as expected
|
||||||
LocalTopic = <<"local_topic/1">>,
|
LocalTopic = <<?EGRESS_LOCAL_TOPIC, "/1">>,
|
||||||
RemoteTopic = <<"remote_topic/", LocalTopic/binary>>,
|
RemoteTopic = <<?EGRESS_REMOTE_TOPIC, "/", LocalTopic/binary>>,
|
||||||
Payload = <<"hello">>,
|
Payload = <<"hello">>,
|
||||||
emqx:subscribe(RemoteTopic),
|
emqx:subscribe(RemoteTopic),
|
||||||
timer:sleep(100),
|
timer:sleep(100),
|
||||||
|
@ -264,6 +271,113 @@ t_mqtt_conn_bridge_egress(_) ->
|
||||||
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
|
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
t_mqtt_conn_bridge_ingress_and_egress(_) ->
|
||||||
|
User1 = <<"user1">>,
|
||||||
|
%% create an MQTT bridge, using POST
|
||||||
|
{ok, 201, Bridge} = request(
|
||||||
|
post,
|
||||||
|
uri(["bridges"]),
|
||||||
|
?SERVER_CONF(User1)#{
|
||||||
|
<<"type">> => ?TYPE_MQTT,
|
||||||
|
<<"name">> => ?BRIDGE_NAME_INGRESS,
|
||||||
|
<<"ingress">> => ?INGRESS_CONF
|
||||||
|
}
|
||||||
|
),
|
||||||
|
|
||||||
|
#{
|
||||||
|
<<"type">> := ?TYPE_MQTT,
|
||||||
|
<<"name">> := ?BRIDGE_NAME_INGRESS
|
||||||
|
} = jsx:decode(Bridge),
|
||||||
|
BridgeIDIngress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_INGRESS),
|
||||||
|
{ok, 201, Bridge2} = request(
|
||||||
|
post,
|
||||||
|
uri(["bridges"]),
|
||||||
|
?SERVER_CONF(User1)#{
|
||||||
|
<<"type">> => ?TYPE_MQTT,
|
||||||
|
<<"name">> => ?BRIDGE_NAME_EGRESS,
|
||||||
|
<<"egress">> => ?EGRESS_CONF
|
||||||
|
}
|
||||||
|
),
|
||||||
|
#{
|
||||||
|
<<"type">> := ?TYPE_MQTT,
|
||||||
|
<<"name">> := ?BRIDGE_NAME_EGRESS
|
||||||
|
} = jsx:decode(Bridge2),
|
||||||
|
|
||||||
|
BridgeIDEgress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS),
|
||||||
|
%% we now test if the bridge works as expected
|
||||||
|
LocalTopic = <<?EGRESS_LOCAL_TOPIC, "/1">>,
|
||||||
|
RemoteTopic = <<?EGRESS_REMOTE_TOPIC, "/", LocalTopic/binary>>,
|
||||||
|
Payload = <<"hello">>,
|
||||||
|
emqx:subscribe(RemoteTopic),
|
||||||
|
|
||||||
|
{ok, 200, BridgeStr1} = request(get, uri(["bridges", BridgeIDEgress]), []),
|
||||||
|
#{
|
||||||
|
<<"metrics">> := #{
|
||||||
|
<<"matched">> := CntMatched1, <<"success">> := CntSuccess1, <<"failed">> := 0
|
||||||
|
},
|
||||||
|
<<"node_metrics">> :=
|
||||||
|
[
|
||||||
|
#{
|
||||||
|
<<"node">> := _,
|
||||||
|
<<"metrics">> :=
|
||||||
|
#{
|
||||||
|
<<"matched">> := NodeCntMatched1,
|
||||||
|
<<"success">> := NodeCntSuccess1,
|
||||||
|
<<"failed">> := 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
} = jsx:decode(BridgeStr1),
|
||||||
|
timer:sleep(100),
|
||||||
|
%% PUBLISH a message to the 'local' broker, as we have only one broker,
|
||||||
|
%% the remote broker is also the local one.
|
||||||
|
emqx:publish(emqx_message:make(LocalTopic, Payload)),
|
||||||
|
|
||||||
|
%% we should receive a message on the "remote" broker, with specified topic
|
||||||
|
?assert(
|
||||||
|
receive
|
||||||
|
{deliver, RemoteTopic, #message{payload = Payload}} ->
|
||||||
|
ct:pal("local broker got message: ~p on topic ~p", [Payload, RemoteTopic]),
|
||||||
|
true;
|
||||||
|
Msg ->
|
||||||
|
ct:pal("Msg: ~p", [Msg]),
|
||||||
|
false
|
||||||
|
after 100 ->
|
||||||
|
false
|
||||||
|
end
|
||||||
|
),
|
||||||
|
|
||||||
|
%% verify the metrics of the bridge
|
||||||
|
timer:sleep(1000),
|
||||||
|
{ok, 200, BridgeStr2} = request(get, uri(["bridges", BridgeIDEgress]), []),
|
||||||
|
#{
|
||||||
|
<<"metrics">> := #{
|
||||||
|
<<"matched">> := CntMatched2, <<"success">> := CntSuccess2, <<"failed">> := 0
|
||||||
|
},
|
||||||
|
<<"node_metrics">> :=
|
||||||
|
[
|
||||||
|
#{
|
||||||
|
<<"node">> := _,
|
||||||
|
<<"metrics">> :=
|
||||||
|
#{
|
||||||
|
<<"matched">> := NodeCntMatched2,
|
||||||
|
<<"success">> := NodeCntSuccess2,
|
||||||
|
<<"failed">> := 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
} = jsx:decode(BridgeStr2),
|
||||||
|
?assertEqual(CntMatched2, CntMatched1 + 1),
|
||||||
|
?assertEqual(CntSuccess2, CntSuccess1 + 1),
|
||||||
|
?assertEqual(NodeCntMatched2, NodeCntMatched1 + 1),
|
||||||
|
?assertEqual(NodeCntSuccess2, NodeCntSuccess1 + 1),
|
||||||
|
|
||||||
|
%% delete the bridge
|
||||||
|
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []),
|
||||||
|
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDIngress]), []),
|
||||||
|
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
|
||||||
|
ok.
|
||||||
|
|
||||||
t_ingress_mqtt_bridge_with_rules(_) ->
|
t_ingress_mqtt_bridge_with_rules(_) ->
|
||||||
{ok, 201, _} = request(
|
{ok, 201, _} = request(
|
||||||
post,
|
post,
|
||||||
|
@ -290,8 +404,8 @@ t_ingress_mqtt_bridge_with_rules(_) ->
|
||||||
|
|
||||||
%% we now test if the bridge works as expected
|
%% we now test if the bridge works as expected
|
||||||
|
|
||||||
RemoteTopic = <<"remote_topic/1">>,
|
RemoteTopic = <<?INGRESS_REMOTE_TOPIC, "/1">>,
|
||||||
LocalTopic = <<"local_topic/", RemoteTopic/binary>>,
|
LocalTopic = <<?INGRESS_LOCAL_TOPIC, "/", RemoteTopic/binary>>,
|
||||||
Payload = <<"hello">>,
|
Payload = <<"hello">>,
|
||||||
emqx:subscribe(LocalTopic),
|
emqx:subscribe(LocalTopic),
|
||||||
timer:sleep(100),
|
timer:sleep(100),
|
||||||
|
@ -400,8 +514,8 @@ t_egress_mqtt_bridge_with_rules(_) ->
|
||||||
#{<<"id">> := RuleId} = jsx:decode(Rule),
|
#{<<"id">> := RuleId} = jsx:decode(Rule),
|
||||||
|
|
||||||
%% we now test if the bridge works as expected
|
%% we now test if the bridge works as expected
|
||||||
LocalTopic = <<"local_topic/1">>,
|
LocalTopic = <<?EGRESS_LOCAL_TOPIC, "/1">>,
|
||||||
RemoteTopic = <<"remote_topic/", LocalTopic/binary>>,
|
RemoteTopic = <<?EGRESS_REMOTE_TOPIC, "/", LocalTopic/binary>>,
|
||||||
Payload = <<"hello">>,
|
Payload = <<"hello">>,
|
||||||
emqx:subscribe(RemoteTopic),
|
emqx:subscribe(RemoteTopic),
|
||||||
timer:sleep(100),
|
timer:sleep(100),
|
||||||
|
@ -426,7 +540,7 @@ t_egress_mqtt_bridge_with_rules(_) ->
|
||||||
%% PUBLISH a message to the rule.
|
%% PUBLISH a message to the rule.
|
||||||
Payload2 = <<"hi">>,
|
Payload2 = <<"hi">>,
|
||||||
RuleTopic = <<"t/1">>,
|
RuleTopic = <<"t/1">>,
|
||||||
RemoteTopic2 = <<"remote_topic/", RuleTopic/binary>>,
|
RemoteTopic2 = <<?EGRESS_REMOTE_TOPIC, "/", RuleTopic/binary>>,
|
||||||
emqx:subscribe(RemoteTopic2),
|
emqx:subscribe(RemoteTopic2),
|
||||||
timer:sleep(100),
|
timer:sleep(100),
|
||||||
emqx:publish(emqx_message:make(RuleTopic, Payload2)),
|
emqx:publish(emqx_message:make(RuleTopic, Payload2)),
|
||||||
|
@ -517,8 +631,8 @@ t_mqtt_conn_bridge_egress_reconnect(_) ->
|
||||||
} = jsx:decode(Bridge),
|
} = jsx:decode(Bridge),
|
||||||
BridgeIDEgress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS),
|
BridgeIDEgress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS),
|
||||||
%% we now test if the bridge works as expected
|
%% we now test if the bridge works as expected
|
||||||
LocalTopic = <<"local_topic/1">>,
|
LocalTopic = <<?EGRESS_LOCAL_TOPIC, "/1">>,
|
||||||
RemoteTopic = <<"remote_topic/", LocalTopic/binary>>,
|
RemoteTopic = <<?EGRESS_REMOTE_TOPIC, "/", LocalTopic/binary>>,
|
||||||
Payload0 = <<"hello">>,
|
Payload0 = <<"hello">>,
|
||||||
emqx:subscribe(RemoteTopic),
|
emqx:subscribe(RemoteTopic),
|
||||||
timer:sleep(100),
|
timer:sleep(100),
|
||||||
|
|
|
@ -37,7 +37,7 @@
|
||||||
|
|
||||||
-export([connect/1]).
|
-export([connect/1]).
|
||||||
|
|
||||||
-export([cmd/3]).
|
-export([do_cmd/3]).
|
||||||
|
|
||||||
%% redis host don't need parse
|
%% redis host don't need parse
|
||||||
-define(REDIS_HOST_OPTIONS, #{
|
-define(REDIS_HOST_OPTIONS, #{
|
||||||
|
@ -63,7 +63,8 @@ fields(single) ->
|
||||||
[
|
[
|
||||||
{server, fun server/1},
|
{server, fun server/1},
|
||||||
{redis_type, #{
|
{redis_type, #{
|
||||||
type => hoconsc:enum([single]),
|
type => single,
|
||||||
|
default => single,
|
||||||
required => true,
|
required => true,
|
||||||
desc => ?DESC("single")
|
desc => ?DESC("single")
|
||||||
}}
|
}}
|
||||||
|
@ -74,18 +75,20 @@ fields(cluster) ->
|
||||||
[
|
[
|
||||||
{servers, fun servers/1},
|
{servers, fun servers/1},
|
||||||
{redis_type, #{
|
{redis_type, #{
|
||||||
type => hoconsc:enum([cluster]),
|
type => cluster,
|
||||||
|
default => cluster,
|
||||||
required => true,
|
required => true,
|
||||||
desc => ?DESC("cluster")
|
desc => ?DESC("cluster")
|
||||||
}}
|
}}
|
||||||
] ++
|
] ++
|
||||||
redis_fields() ++
|
lists:keydelete(database, 1, redis_fields()) ++
|
||||||
emqx_connector_schema_lib:ssl_fields();
|
emqx_connector_schema_lib:ssl_fields();
|
||||||
fields(sentinel) ->
|
fields(sentinel) ->
|
||||||
[
|
[
|
||||||
{servers, fun servers/1},
|
{servers, fun servers/1},
|
||||||
{redis_type, #{
|
{redis_type, #{
|
||||||
type => hoconsc:enum([sentinel]),
|
type => sentinel,
|
||||||
|
default => sentinel,
|
||||||
required => true,
|
required => true,
|
||||||
desc => ?DESC("sentinel")
|
desc => ?DESC("sentinel")
|
||||||
}},
|
}},
|
||||||
|
@ -119,7 +122,6 @@ on_start(
|
||||||
InstId,
|
InstId,
|
||||||
#{
|
#{
|
||||||
redis_type := Type,
|
redis_type := Type,
|
||||||
database := Database,
|
|
||||||
pool_size := PoolSize,
|
pool_size := PoolSize,
|
||||||
auto_reconnect := AutoReconn,
|
auto_reconnect := AutoReconn,
|
||||||
ssl := SSL
|
ssl := SSL
|
||||||
|
@ -135,13 +137,17 @@ on_start(
|
||||||
single -> [{servers, [maps:get(server, Config)]}];
|
single -> [{servers, [maps:get(server, Config)]}];
|
||||||
_ -> [{servers, maps:get(servers, Config)}]
|
_ -> [{servers, maps:get(servers, Config)}]
|
||||||
end,
|
end,
|
||||||
|
Database =
|
||||||
|
case Type of
|
||||||
|
cluster -> [];
|
||||||
|
_ -> [{database, maps:get(database, Config)}]
|
||||||
|
end,
|
||||||
Opts =
|
Opts =
|
||||||
[
|
[
|
||||||
{pool_size, PoolSize},
|
{pool_size, PoolSize},
|
||||||
{database, Database},
|
|
||||||
{password, maps:get(password, Config, "")},
|
{password, maps:get(password, Config, "")},
|
||||||
{auto_reconnect, reconn_interval(AutoReconn)}
|
{auto_reconnect, reconn_interval(AutoReconn)}
|
||||||
] ++ Servers,
|
] ++ Database ++ Servers,
|
||||||
Options =
|
Options =
|
||||||
case maps:get(enable, SSL) of
|
case maps:get(enable, SSL) of
|
||||||
true ->
|
true ->
|
||||||
|
@ -157,9 +163,12 @@ on_start(
|
||||||
case Type of
|
case Type of
|
||||||
cluster ->
|
cluster ->
|
||||||
case eredis_cluster:start_pool(PoolName, Opts ++ [{options, Options}]) of
|
case eredis_cluster:start_pool(PoolName, Opts ++ [{options, Options}]) of
|
||||||
{ok, _} -> {ok, State};
|
{ok, _} ->
|
||||||
{ok, _, _} -> {ok, State};
|
{ok, State};
|
||||||
{error, Reason} -> {error, Reason}
|
{ok, _, _} ->
|
||||||
|
{ok, State};
|
||||||
|
{error, Reason} ->
|
||||||
|
{error, Reason}
|
||||||
end;
|
end;
|
||||||
_ ->
|
_ ->
|
||||||
case
|
case
|
||||||
|
@ -180,23 +189,28 @@ on_stop(InstId, #{poolname := PoolName, type := Type}) ->
|
||||||
_ -> emqx_plugin_libs_pool:stop_pool(PoolName)
|
_ -> emqx_plugin_libs_pool:stop_pool(PoolName)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
on_query(InstId, {cmd, Command}, #{poolname := PoolName, type := Type} = State) ->
|
on_query(InstId, {cmd, _} = Query, State) ->
|
||||||
|
do_query(InstId, Query, State);
|
||||||
|
on_query(InstId, {cmds, _} = Query, State) ->
|
||||||
|
do_query(InstId, Query, State).
|
||||||
|
|
||||||
|
do_query(InstId, Query, #{poolname := PoolName, type := Type} = State) ->
|
||||||
?TRACE(
|
?TRACE(
|
||||||
"QUERY",
|
"QUERY",
|
||||||
"redis_connector_received",
|
"redis_connector_received",
|
||||||
#{connector => InstId, sql => Command, state => State}
|
#{connector => InstId, query => Query, state => State}
|
||||||
),
|
),
|
||||||
Result =
|
Result =
|
||||||
case Type of
|
case Type of
|
||||||
cluster -> eredis_cluster:q(PoolName, Command);
|
cluster -> do_cmd(PoolName, cluster, Query);
|
||||||
_ -> ecpool:pick_and_do(PoolName, {?MODULE, cmd, [Type, Command]}, no_handover)
|
_ -> ecpool:pick_and_do(PoolName, {?MODULE, do_cmd, [Type, Query]}, no_handover)
|
||||||
end,
|
end,
|
||||||
case Result of
|
case Result of
|
||||||
{error, Reason} ->
|
{error, Reason} ->
|
||||||
?SLOG(error, #{
|
?SLOG(error, #{
|
||||||
msg => "redis_connector_do_cmd_query_failed",
|
msg => "redis_connector_do_query_failed",
|
||||||
connector => InstId,
|
connector => InstId,
|
||||||
sql => Command,
|
query => Query,
|
||||||
reason => Reason
|
reason => Reason
|
||||||
});
|
});
|
||||||
_ ->
|
_ ->
|
||||||
|
@ -226,7 +240,7 @@ on_get_status(_InstId, #{type := cluster, poolname := PoolName, auto_reconnect :
|
||||||
Health = eredis_cluster_workers_exist_and_are_connected(Workers),
|
Health = eredis_cluster_workers_exist_and_are_connected(Workers),
|
||||||
status_result(Health, AutoReconn);
|
status_result(Health, AutoReconn);
|
||||||
false ->
|
false ->
|
||||||
disconnect
|
disconnected
|
||||||
end;
|
end;
|
||||||
on_get_status(_InstId, #{poolname := Pool, auto_reconnect := AutoReconn}) ->
|
on_get_status(_InstId, #{poolname := Pool, auto_reconnect := AutoReconn}) ->
|
||||||
Health = emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1),
|
Health = emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1),
|
||||||
|
@ -245,10 +259,29 @@ status_result(_Status = false, _AutoReconn = false) -> disconnected.
|
||||||
reconn_interval(true) -> 15;
|
reconn_interval(true) -> 15;
|
||||||
reconn_interval(false) -> false.
|
reconn_interval(false) -> false.
|
||||||
|
|
||||||
cmd(Conn, cluster, Command) ->
|
do_cmd(PoolName, cluster, {cmd, Command}) ->
|
||||||
eredis_cluster:q(Conn, Command);
|
eredis_cluster:q(PoolName, Command);
|
||||||
cmd(Conn, _Type, Command) ->
|
do_cmd(Conn, _Type, {cmd, Command}) ->
|
||||||
eredis:q(Conn, Command).
|
eredis:q(Conn, Command);
|
||||||
|
do_cmd(PoolName, cluster, {cmds, Commands}) ->
|
||||||
|
wrap_qp_result(eredis_cluster:qp(PoolName, Commands));
|
||||||
|
do_cmd(Conn, _Type, {cmds, Commands}) ->
|
||||||
|
wrap_qp_result(eredis:qp(Conn, Commands)).
|
||||||
|
|
||||||
|
wrap_qp_result({error, _} = Error) ->
|
||||||
|
Error;
|
||||||
|
wrap_qp_result(Results) when is_list(Results) ->
|
||||||
|
AreAllOK = lists:all(
|
||||||
|
fun
|
||||||
|
({ok, _}) -> true;
|
||||||
|
({error, _}) -> false
|
||||||
|
end,
|
||||||
|
Results
|
||||||
|
),
|
||||||
|
case AreAllOK of
|
||||||
|
true -> {ok, Results};
|
||||||
|
false -> {error, Results}
|
||||||
|
end.
|
||||||
|
|
||||||
%% ===================================================================
|
%% ===================================================================
|
||||||
connect(Opts) ->
|
connect(Opts) ->
|
||||||
|
|
|
@ -111,6 +111,14 @@ perform_lifecycle_check(PoolName, InitialConfig, RedisCommand) ->
|
||||||
?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)),
|
?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)),
|
||||||
% Perform query as further check that the resource is working as expected
|
% Perform query as further check that the resource is working as expected
|
||||||
?assertEqual({ok, <<"PONG">>}, emqx_resource:query(PoolName, {cmd, RedisCommand})),
|
?assertEqual({ok, <<"PONG">>}, emqx_resource:query(PoolName, {cmd, RedisCommand})),
|
||||||
|
?assertEqual(
|
||||||
|
{ok, [{ok, <<"PONG">>}, {ok, <<"PONG">>}]},
|
||||||
|
emqx_resource:query(PoolName, {cmds, [RedisCommand, RedisCommand]})
|
||||||
|
),
|
||||||
|
?assertMatch(
|
||||||
|
{error, [{ok, <<"PONG">>}, {error, _}]},
|
||||||
|
emqx_resource:query(PoolName, {cmds, [RedisCommand, [<<"INVALID_COMMAND">>]]})
|
||||||
|
),
|
||||||
?assertEqual(ok, emqx_resource:stop(PoolName)),
|
?assertEqual(ok, emqx_resource:stop(PoolName)),
|
||||||
% Resource will be listed still, but state will be changed and healthcheck will fail
|
% Resource will be listed still, but state will be changed and healthcheck will fail
|
||||||
% as the worker no longer exists.
|
% as the worker no longer exists.
|
||||||
|
@ -152,14 +160,14 @@ redis_config_cluster() ->
|
||||||
redis_config_sentinel() ->
|
redis_config_sentinel() ->
|
||||||
redis_config_base("sentinel", "servers").
|
redis_config_base("sentinel", "servers").
|
||||||
|
|
||||||
-define(REDIS_CONFIG_BASE(MaybeSentinel),
|
-define(REDIS_CONFIG_BASE(MaybeSentinel, MaybeDatabase),
|
||||||
"" ++
|
"" ++
|
||||||
"\n" ++
|
"\n" ++
|
||||||
" auto_reconnect = true\n" ++
|
" auto_reconnect = true\n" ++
|
||||||
" database = 1\n" ++
|
|
||||||
" pool_size = 8\n" ++
|
" pool_size = 8\n" ++
|
||||||
" redis_type = ~s\n" ++
|
" redis_type = ~s\n" ++
|
||||||
MaybeSentinel ++
|
MaybeSentinel ++
|
||||||
|
MaybeDatabase ++
|
||||||
" password = public\n" ++
|
" password = public\n" ++
|
||||||
" ~s = \"~s:~b\"\n" ++
|
" ~s = \"~s:~b\"\n" ++
|
||||||
" " ++
|
" " ++
|
||||||
|
@ -171,15 +179,22 @@ redis_config_base(Type, ServerKey) ->
|
||||||
"sentinel" ->
|
"sentinel" ->
|
||||||
Host = ?REDIS_SENTINEL_HOST,
|
Host = ?REDIS_SENTINEL_HOST,
|
||||||
Port = ?REDIS_SENTINEL_PORT,
|
Port = ?REDIS_SENTINEL_PORT,
|
||||||
MaybeSentinel = " sentinel = mymaster\n";
|
MaybeSentinel = " sentinel = mymaster\n",
|
||||||
_ ->
|
MaybeDatabase = " database = 1\n";
|
||||||
|
"single" ->
|
||||||
Host = ?REDIS_SINGLE_HOST,
|
Host = ?REDIS_SINGLE_HOST,
|
||||||
Port = ?REDIS_SINGLE_PORT,
|
Port = ?REDIS_SINGLE_PORT,
|
||||||
MaybeSentinel = ""
|
MaybeSentinel = "",
|
||||||
|
MaybeDatabase = " database = 1\n";
|
||||||
|
"cluster" ->
|
||||||
|
Host = ?REDIS_SINGLE_HOST,
|
||||||
|
Port = ?REDIS_SINGLE_PORT,
|
||||||
|
MaybeSentinel = "",
|
||||||
|
MaybeDatabase = ""
|
||||||
end,
|
end,
|
||||||
RawConfig = list_to_binary(
|
RawConfig = list_to_binary(
|
||||||
io_lib:format(
|
io_lib:format(
|
||||||
?REDIS_CONFIG_BASE(MaybeSentinel),
|
?REDIS_CONFIG_BASE(MaybeSentinel, MaybeDatabase),
|
||||||
[Type, ServerKey, Host, Port]
|
[Type, ServerKey, Host, Port]
|
||||||
)
|
)
|
||||||
),
|
),
|
||||||
|
|
|
@ -81,7 +81,7 @@ schema(?PATH("/observe")) ->
|
||||||
],
|
],
|
||||||
'requestBody' => [],
|
'requestBody' => [],
|
||||||
responses => #{
|
responses => #{
|
||||||
200 => <<"No Content">>,
|
204 => <<"No Content">>,
|
||||||
404 => error_codes(['CLIENT_NOT_FOUND'], <<"Clientid not found">>)
|
404 => error_codes(['CLIENT_NOT_FOUND'], <<"Clientid not found">>)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -98,7 +98,7 @@ schema(?PATH("/read")) ->
|
||||||
{path, mk(binary(), #{in => query, required => true, example => "/3/0/7"})}
|
{path, mk(binary(), #{in => query, required => true, example => "/3/0/7"})}
|
||||||
],
|
],
|
||||||
responses => #{
|
responses => #{
|
||||||
200 => <<"No Content">>,
|
204 => <<"No Content">>,
|
||||||
404 => error_codes(['CLIENT_NOT_FOUND'], <<"clientid not found">>)
|
404 => error_codes(['CLIENT_NOT_FOUND'], <<"clientid not found">>)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -121,7 +121,7 @@ schema(?PATH("/write")) ->
|
||||||
{value, mk(binary(), #{in => query, required => true, example => 123})}
|
{value, mk(binary(), #{in => query, required => true, example => 123})}
|
||||||
],
|
],
|
||||||
responses => #{
|
responses => #{
|
||||||
200 => <<"No Content">>,
|
204 => <<"No Content">>,
|
||||||
404 => error_codes(['CLIENT_NOT_FOUND'], <<"Clientid not found">>)
|
404 => error_codes(['CLIENT_NOT_FOUND'], <<"Clientid not found">>)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -275,7 +275,7 @@ send_cmd(ClientId, Cmd) ->
|
||||||
case emqx_gateway_cm_registry:lookup_channels(lwm2m, ClientId) of
|
case emqx_gateway_cm_registry:lookup_channels(lwm2m, ClientId) of
|
||||||
[Channel | _] ->
|
[Channel | _] ->
|
||||||
ok = emqx_lwm2m_channel:send_cmd(Channel, Cmd),
|
ok = emqx_lwm2m_channel:send_cmd(Channel, Cmd),
|
||||||
{200};
|
{204};
|
||||||
_ ->
|
_ ->
|
||||||
{404, #{code => 'CLIENT_NOT_FOUND'}}
|
{404, #{code => 'CLIENT_NOT_FOUND'}}
|
||||||
end.
|
end.
|
||||||
|
|
|
@ -253,7 +253,7 @@ t_read(Config) ->
|
||||||
test_recv_mqtt_response(RespTopic),
|
test_recv_mqtt_response(RespTopic),
|
||||||
|
|
||||||
%% step2, call Read API
|
%% step2, call Read API
|
||||||
call_send_api(Epn, "read", "path=/3/0/0"),
|
?assertMatch({204, []}, call_send_api(Epn, "read", "path=/3/0/0")),
|
||||||
timer:sleep(100),
|
timer:sleep(100),
|
||||||
#coap_message{type = Type, method = Method, options = Opts} = test_recv_coap_request(UdpSock),
|
#coap_message{type = Type, method = Method, options = Opts} = test_recv_coap_request(UdpSock),
|
||||||
?assertEqual(con, Type),
|
?assertEqual(con, Type),
|
||||||
|
@ -289,7 +289,7 @@ t_write(Config) ->
|
||||||
test_recv_mqtt_response(RespTopic),
|
test_recv_mqtt_response(RespTopic),
|
||||||
|
|
||||||
%% step2, call write API
|
%% step2, call write API
|
||||||
call_send_api(Epn, "write", "path=/3/0/13&type=Integer&value=123"),
|
?assertMatch({204, []}, call_send_api(Epn, "write", "path=/3/0/13&type=Integer&value=123")),
|
||||||
timer:sleep(100),
|
timer:sleep(100),
|
||||||
#coap_message{type = Type, method = Method, options = Opts} = test_recv_coap_request(UdpSock),
|
#coap_message{type = Type, method = Method, options = Opts} = test_recv_coap_request(UdpSock),
|
||||||
?assertEqual(con, Type),
|
?assertEqual(con, Type),
|
||||||
|
@ -326,7 +326,7 @@ t_observe(Config) ->
|
||||||
test_recv_mqtt_response(RespTopic),
|
test_recv_mqtt_response(RespTopic),
|
||||||
|
|
||||||
%% step2, call observe API
|
%% step2, call observe API
|
||||||
call_deprecated_send_api(Epn, "observe", "path=/3/0/1&enable=false"),
|
?assertMatch({204, []}, call_deprecated_send_api(Epn, "observe", "path=/3/0/1&enable=false")),
|
||||||
timer:sleep(100),
|
timer:sleep(100),
|
||||||
#coap_message{type = Type, method = Method, options = Opts} = test_recv_coap_request(UdpSock),
|
#coap_message{type = Type, method = Method, options = Opts} = test_recv_coap_request(UdpSock),
|
||||||
?assertEqual(con, Type),
|
?assertEqual(con, Type),
|
||||||
|
@ -354,9 +354,12 @@ call_deprecated_send_api(ClientId, Cmd, Query) ->
|
||||||
call_send_api(ClientId, Cmd, Query, API) ->
|
call_send_api(ClientId, Cmd, Query, API) ->
|
||||||
ApiPath = emqx_mgmt_api_test_util:api_path([API, ClientId, Cmd]),
|
ApiPath = emqx_mgmt_api_test_util:api_path([API, ClientId, Cmd]),
|
||||||
Auth = emqx_mgmt_api_test_util:auth_header_(),
|
Auth = emqx_mgmt_api_test_util:auth_header_(),
|
||||||
{ok, Response} = emqx_mgmt_api_test_util:request_api(post, ApiPath, Query, Auth),
|
Opts = #{return_all => true},
|
||||||
|
{ok, {{"HTTP/1.1", StatusCode, _}, _Headers, Response}} = emqx_mgmt_api_test_util:request_api(
|
||||||
|
post, ApiPath, Query, Auth, [], Opts
|
||||||
|
),
|
||||||
?LOGT("rest api response:~ts~n", [Response]),
|
?LOGT("rest api response:~ts~n", [Response]),
|
||||||
Response.
|
{StatusCode, Response}.
|
||||||
|
|
||||||
no_received_request(ClientId, Path, Action) ->
|
no_received_request(ClientId, Path, Action) ->
|
||||||
Response = call_lookup_api(ClientId, Path, Action),
|
Response = call_lookup_api(ClientId, Path, Action),
|
||||||
|
|
|
@ -45,10 +45,14 @@ set_prompt_func() ->
|
||||||
prompt_func(PropList) ->
|
prompt_func(PropList) ->
|
||||||
Line = proplists:get_value(history, PropList, 1),
|
Line = proplists:get_value(history, PropList, 1),
|
||||||
Version = emqx_release:version(),
|
Version = emqx_release:version(),
|
||||||
Edition = emqx_release:edition(),
|
Prefix =
|
||||||
|
case emqx_release:edition() of
|
||||||
|
ce -> "v";
|
||||||
|
ee -> "e"
|
||||||
|
end,
|
||||||
case is_alive() of
|
case is_alive() of
|
||||||
true -> io_lib:format(<<"~ts-~ts(~s)~w> ">>, [Edition, Version, node(), Line]);
|
true -> io_lib:format(<<"~ts~ts(~s)~w> ">>, [Prefix, Version, node(), Line]);
|
||||||
false -> io_lib:format(<<"~ts-~ts ~w> ">>, [Edition, Version, Line])
|
false -> io_lib:format(<<"~ts~ts ~w> ">>, [Prefix, Version, Line])
|
||||||
end.
|
end.
|
||||||
|
|
||||||
local_allowed(MF, Args, State) ->
|
local_allowed(MF, Args, State) ->
|
||||||
|
|
|
@ -48,6 +48,7 @@
|
||||||
|
|
||||||
-define(TO_BIN(_B_), iolist_to_binary(_B_)).
|
-define(TO_BIN(_B_), iolist_to_binary(_B_)).
|
||||||
-define(NOT_FOUND(N), {404, #{code => 'NOT_FOUND', message => ?TO_BIN([N, " NOT FOUND"])}}).
|
-define(NOT_FOUND(N), {404, #{code => 'NOT_FOUND', message => ?TO_BIN([N, " NOT FOUND"])}}).
|
||||||
|
-define(BAD_REQUEST(C, M), {400, #{code => C, message => ?TO_BIN(M)}}).
|
||||||
-define(TAGS, [<<"Trace">>]).
|
-define(TAGS, [<<"Trace">>]).
|
||||||
|
|
||||||
namespace() -> "trace".
|
namespace() -> "trace".
|
||||||
|
@ -83,11 +84,16 @@ schema("/trace") ->
|
||||||
200 => hoconsc:ref(trace),
|
200 => hoconsc:ref(trace),
|
||||||
400 => emqx_dashboard_swagger:error_codes(
|
400 => emqx_dashboard_swagger:error_codes(
|
||||||
[
|
[
|
||||||
'ALREADY_EXISTS',
|
|
||||||
'DUPLICATE_CONDITION',
|
|
||||||
'INVALID_PARAMS'
|
'INVALID_PARAMS'
|
||||||
],
|
],
|
||||||
<<"trace name already exists">>
|
<<"invalid trace params">>
|
||||||
|
),
|
||||||
|
409 => emqx_dashboard_swagger:error_codes(
|
||||||
|
[
|
||||||
|
'ALREADY_EXISTS',
|
||||||
|
'DUPLICATE_CONDITION'
|
||||||
|
],
|
||||||
|
<<"trace already exists">>
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -141,6 +147,7 @@ schema("/trace/:name/download") ->
|
||||||
#{schema => #{type => "string", format => "binary"}}
|
#{schema => #{type => "string", format => "binary"}}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
400 => emqx_dashboard_swagger:error_codes(['NODE_ERROR'], <<"Node Not Found">>),
|
||||||
404 => emqx_dashboard_swagger:error_codes(['NOT_FOUND'], <<"Trace Name Not Found">>)
|
404 => emqx_dashboard_swagger:error_codes(['NOT_FOUND'], <<"Trace Name Not Found">>)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -176,9 +183,8 @@ schema("/trace/:name/log") ->
|
||||||
{items, hoconsc:mk(binary(), #{example => "TEXT-LOG-ITEMS"})},
|
{items, hoconsc:mk(binary(), #{example => "TEXT-LOG-ITEMS"})},
|
||||||
{meta, fields(bytes) ++ fields(position)}
|
{meta, fields(bytes) ++ fields(position)}
|
||||||
],
|
],
|
||||||
400 => emqx_dashboard_swagger:error_codes(
|
400 => emqx_dashboard_swagger:error_codes(['NODE_ERROR'], <<"Trace Log Failed">>),
|
||||||
['READ_FILE_ERROR', 'RPC_ERROR', 'NODE_ERROR'], <<"Trace Log Failed">>
|
404 => emqx_dashboard_swagger:error_codes(['NOT_FOUND'], <<"Trace Name Not Found">>)
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}.
|
}.
|
||||||
|
@ -391,12 +397,12 @@ trace(post, #{body := Param}) ->
|
||||||
{ok, Trace0} ->
|
{ok, Trace0} ->
|
||||||
{200, format_trace(Trace0)};
|
{200, format_trace(Trace0)};
|
||||||
{error, {already_existed, Name}} ->
|
{error, {already_existed, Name}} ->
|
||||||
{400, #{
|
{409, #{
|
||||||
code => 'ALREADY_EXISTS',
|
code => 'ALREADY_EXISTS',
|
||||||
message => ?TO_BIN([Name, " Already Exists"])
|
message => ?TO_BIN([Name, " Already Exists"])
|
||||||
}};
|
}};
|
||||||
{error, {duplicate_condition, Name}} ->
|
{error, {duplicate_condition, Name}} ->
|
||||||
{400, #{
|
{409, #{
|
||||||
code => 'DUPLICATE_CONDITION',
|
code => 'DUPLICATE_CONDITION',
|
||||||
message => ?TO_BIN([Name, " Duplication Condition"])
|
message => ?TO_BIN([Name, " Duplication Condition"])
|
||||||
}};
|
}};
|
||||||
|
@ -450,30 +456,31 @@ update_trace(put, #{bindings := #{name := Name}}) ->
|
||||||
%% if HTTP request headers include accept-encoding: gzip and file size > 300 bytes.
|
%% if HTTP request headers include accept-encoding: gzip and file size > 300 bytes.
|
||||||
%% cowboy_compress_h will auto encode gzip format.
|
%% cowboy_compress_h will auto encode gzip format.
|
||||||
download_trace_log(get, #{bindings := #{name := Name}, query_string := Query}) ->
|
download_trace_log(get, #{bindings := #{name := Name}, query_string := Query}) ->
|
||||||
Nodes =
|
|
||||||
case parse_node(Query, undefined) of
|
|
||||||
{ok, undefined} -> mria_mnesia:running_nodes();
|
|
||||||
{ok, Node0} -> [Node0];
|
|
||||||
{error, not_found} -> mria_mnesia:running_nodes()
|
|
||||||
end,
|
|
||||||
case emqx_trace:get_trace_filename(Name) of
|
case emqx_trace:get_trace_filename(Name) of
|
||||||
{ok, TraceLog} ->
|
{ok, TraceLog} ->
|
||||||
TraceFiles = collect_trace_file(Nodes, TraceLog),
|
case parse_node(Query, undefined) of
|
||||||
ZipDir = emqx_trace:zip_dir(),
|
{ok, Node} ->
|
||||||
Zips = group_trace_file(ZipDir, TraceLog, TraceFiles),
|
TraceFiles = collect_trace_file(Node, TraceLog),
|
||||||
FileName = binary_to_list(Name) ++ ".zip",
|
ZipDir = emqx_trace:zip_dir(),
|
||||||
ZipFileName = filename:join([ZipDir, FileName]),
|
Zips = group_trace_file(ZipDir, TraceLog, TraceFiles),
|
||||||
{ok, ZipFile} = zip:zip(ZipFileName, Zips, [{cwd, ZipDir}]),
|
FileName = binary_to_list(Name) ++ ".zip",
|
||||||
%% emqx_trace:delete_files_after_send(ZipFileName, Zips),
|
ZipFileName = filename:join([ZipDir, FileName]),
|
||||||
%% TODO use file replace file_binary.(delete file after send is not ready now).
|
{ok, ZipFile} = zip:zip(ZipFileName, Zips, [{cwd, ZipDir}]),
|
||||||
{ok, Binary} = file:read_file(ZipFile),
|
%% emqx_trace:delete_files_after_send(ZipFileName, Zips),
|
||||||
ZipName = filename:basename(ZipFile),
|
%% TODO use file replace file_binary.(delete file after send is not ready now).
|
||||||
_ = file:delete(ZipFile),
|
{ok, Binary} = file:read_file(ZipFile),
|
||||||
Headers = #{
|
ZipName = filename:basename(ZipFile),
|
||||||
<<"content-type">> => <<"application/x-zip">>,
|
_ = file:delete(ZipFile),
|
||||||
<<"content-disposition">> => iolist_to_binary("attachment; filename=" ++ ZipName)
|
Headers = #{
|
||||||
},
|
<<"content-type">> => <<"application/x-zip">>,
|
||||||
{200, Headers, {file_binary, ZipName, Binary}};
|
<<"content-disposition">> => iolist_to_binary(
|
||||||
|
"attachment; filename=" ++ ZipName
|
||||||
|
)
|
||||||
|
},
|
||||||
|
{200, Headers, {file_binary, ZipName, Binary}};
|
||||||
|
{error, not_found} ->
|
||||||
|
?BAD_REQUEST('NODE_ERROR', <<"Node not found">>)
|
||||||
|
end;
|
||||||
{error, not_found} ->
|
{error, not_found} ->
|
||||||
?NOT_FOUND(Name)
|
?NOT_FOUND(Name)
|
||||||
end.
|
end.
|
||||||
|
@ -503,8 +510,11 @@ group_trace_file(ZipDir, TraceLog, TraceFiles) ->
|
||||||
TraceFiles
|
TraceFiles
|
||||||
).
|
).
|
||||||
|
|
||||||
collect_trace_file(Nodes, TraceLog) ->
|
collect_trace_file(undefined, TraceLog) ->
|
||||||
wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file(Nodes, TraceLog)).
|
Nodes = mria_mnesia:running_nodes(),
|
||||||
|
wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file(Nodes, TraceLog));
|
||||||
|
collect_trace_file(Node, TraceLog) ->
|
||||||
|
wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file([Node], TraceLog)).
|
||||||
|
|
||||||
collect_trace_file_detail(TraceLog) ->
|
collect_trace_file_detail(TraceLog) ->
|
||||||
Nodes = mria_mnesia:running_nodes(),
|
Nodes = mria_mnesia:running_nodes(),
|
||||||
|
@ -551,21 +561,13 @@ stream_log_file(get, #{bindings := #{name := Name}, query_string := Query}) ->
|
||||||
{error, enoent} ->
|
{error, enoent} ->
|
||||||
Meta = #{<<"position">> => Position, <<"bytes">> => Bytes},
|
Meta = #{<<"position">> => Position, <<"bytes">> => Bytes},
|
||||||
{200, #{meta => Meta, items => <<"">>}};
|
{200, #{meta => Meta, items => <<"">>}};
|
||||||
{error, Reason} ->
|
{error, not_found} ->
|
||||||
?SLOG(error, #{
|
?NOT_FOUND(Name);
|
||||||
msg => "read_file_failed",
|
|
||||||
node => Node,
|
|
||||||
name => Name,
|
|
||||||
reason => Reason,
|
|
||||||
position => Position,
|
|
||||||
bytes => Bytes
|
|
||||||
}),
|
|
||||||
{400, #{code => 'READ_FILE_ERROR', message => Reason}};
|
|
||||||
{badrpc, nodedown} ->
|
{badrpc, nodedown} ->
|
||||||
{400, #{code => 'RPC_ERROR', message => "BadRpc node down"}}
|
?BAD_REQUEST('NODE_ERROR', <<"Node not found">>)
|
||||||
end;
|
end;
|
||||||
{error, not_found} ->
|
{error, not_found} ->
|
||||||
{400, #{code => 'NODE_ERROR', message => <<"Node not found">>}}
|
?BAD_REQUEST('NODE_ERROR', <<"Node not found">>)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
-spec get_trace_size() -> #{{node(), file:name_all()} => non_neg_integer()}.
|
-spec get_trace_size() -> #{{node(), file:name_all()} => non_neg_integer()}.
|
||||||
|
@ -633,8 +635,12 @@ read_file(Path, Offset, Bytes) ->
|
||||||
parse_node(Query, Default) ->
|
parse_node(Query, Default) ->
|
||||||
try
|
try
|
||||||
case maps:find(<<"node">>, Query) of
|
case maps:find(<<"node">>, Query) of
|
||||||
error -> {ok, Default};
|
error ->
|
||||||
{ok, Node} -> {ok, binary_to_existing_atom(Node)}
|
{ok, Default};
|
||||||
|
{ok, NodeBin} ->
|
||||||
|
Node = binary_to_existing_atom(NodeBin),
|
||||||
|
true = lists:member(Node, mria_mnesia:running_nodes()),
|
||||||
|
{ok, Node}
|
||||||
end
|
end
|
||||||
catch
|
catch
|
||||||
_:_ ->
|
_:_ ->
|
||||||
|
|
|
@ -228,10 +228,10 @@ t_configs_node({'end', _}) ->
|
||||||
t_configs_node(_) ->
|
t_configs_node(_) ->
|
||||||
Node = atom_to_list(node()),
|
Node = atom_to_list(node()),
|
||||||
|
|
||||||
?assertEqual({ok, <<"self">>}, get_configs(Node, #{return_body => true})),
|
?assertEqual({ok, <<"self">>}, get_configs(Node, #{return_all => true})),
|
||||||
?assertEqual({ok, <<"other">>}, get_configs("other_node", #{return_body => true})),
|
?assertEqual({ok, <<"other">>}, get_configs("other_node", #{return_all => true})),
|
||||||
|
|
||||||
{ExpType, ExpRes} = get_configs("unknown_node", #{return_body => true}),
|
{ExpType, ExpRes} = get_configs("unknown_node", #{return_all => true}),
|
||||||
?assertEqual(error, ExpType),
|
?assertEqual(error, ExpType),
|
||||||
?assertMatch({{_, 404, _}, _, _}, ExpRes),
|
?assertMatch({{_, 404, _}, _, _}, ExpRes),
|
||||||
{_, _, Body} = ExpRes,
|
{_, _, Body} = ExpRes,
|
||||||
|
@ -264,6 +264,7 @@ get_configs(Node, Opts) ->
|
||||||
end,
|
end,
|
||||||
URI = emqx_mgmt_api_test_util:api_path(Path),
|
URI = emqx_mgmt_api_test_util:api_path(Path),
|
||||||
case emqx_mgmt_api_test_util:request_api(get, URI, [], [], [], Opts) of
|
case emqx_mgmt_api_test_util:request_api(get, URI, [], [], [], Opts) of
|
||||||
|
{ok, {_, _, Res}} -> {ok, emqx_json:decode(Res, [return_maps])};
|
||||||
{ok, Res} -> {ok, emqx_json:decode(Res, [return_maps])};
|
{ok, Res} -> {ok, emqx_json:decode(Res, [return_maps])};
|
||||||
Error -> Error
|
Error -> Error
|
||||||
end.
|
end.
|
||||||
|
|
|
@ -163,7 +163,7 @@ t_publish_too_large(Config) ->
|
||||||
"",
|
"",
|
||||||
Auth,
|
Auth,
|
||||||
Body,
|
Body,
|
||||||
#{return_body => true}
|
#{return_all => true}
|
||||||
),
|
),
|
||||||
?assertMatch({_, 400, _}, Summary),
|
?assertMatch({_, 400, _}, Summary),
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
|
@ -286,7 +286,7 @@ t_publish_bulk_dispatch_one_message_invalid_topic(Config) when is_list(Config) -
|
||||||
"",
|
"",
|
||||||
Auth,
|
Auth,
|
||||||
Body,
|
Body,
|
||||||
#{return_body => true}
|
#{return_all => true}
|
||||||
),
|
),
|
||||||
?assertMatch({_, 400, _}, Summary),
|
?assertMatch({_, 400, _}, Summary),
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
|
@ -325,7 +325,7 @@ t_publish_bulk_dispatch_failure(Config) when is_list(Config) ->
|
||||||
"",
|
"",
|
||||||
Auth,
|
Auth,
|
||||||
Body,
|
Body,
|
||||||
#{return_body => true}
|
#{return_all => true}
|
||||||
),
|
),
|
||||||
?assertMatch({_, 503, _}, Summary),
|
?assertMatch({_, 503, _}, Summary),
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
|
|
|
@ -89,16 +89,20 @@ request_api(Method, Url, QueryParams, AuthOrHeaders, Body, Opts) when
|
||||||
).
|
).
|
||||||
|
|
||||||
do_request_api(Method, Request, Opts) ->
|
do_request_api(Method, Request, Opts) ->
|
||||||
ReturnBody = maps:get(return_body, Opts, false),
|
ReturnAll = maps:get(return_all, Opts, false),
|
||||||
ct:pal("Method: ~p, Request: ~p", [Method, Request]),
|
ct:pal("Method: ~p, Request: ~p", [Method, Request]),
|
||||||
case httpc:request(Method, Request, [], []) of
|
case httpc:request(Method, Request, [], []) of
|
||||||
{error, socket_closed_remotely} ->
|
{error, socket_closed_remotely} ->
|
||||||
{error, socket_closed_remotely};
|
{error, socket_closed_remotely};
|
||||||
{ok, {{"HTTP/1.1", Code, _}, _, Return}} when
|
{ok, {{"HTTP/1.1", Code, _} = Reason, Headers, Body}} when
|
||||||
|
Code >= 200 andalso Code =< 299 andalso ReturnAll
|
||||||
|
->
|
||||||
|
{ok, {Reason, Headers, Body}};
|
||||||
|
{ok, {{"HTTP/1.1", Code, _}, _, Body}} when
|
||||||
Code >= 200 andalso Code =< 299
|
Code >= 200 andalso Code =< 299
|
||||||
->
|
->
|
||||||
{ok, Return};
|
{ok, Body};
|
||||||
{ok, {Reason, Headers, Body}} when ReturnBody ->
|
{ok, {Reason, Headers, Body}} when ReturnAll ->
|
||||||
{error, {Reason, Headers, Body}};
|
{error, {Reason, Headers, Body}};
|
||||||
{ok, {Reason, _Headers, _Body}} ->
|
{ok, {Reason, _Headers, _Body}} ->
|
||||||
{error, Reason}
|
{error, Reason}
|
||||||
|
|
|
@ -149,7 +149,7 @@ t_create_failed(_Config) ->
|
||||||
{ok, Create} = request_api(post, api_path("trace"), Header, [GoodName | Trace]),
|
{ok, Create} = request_api(post, api_path("trace"), Header, [GoodName | Trace]),
|
||||||
?assertMatch(#{<<"name">> := <<"test-name-0">>}, json(Create)),
|
?assertMatch(#{<<"name">> := <<"test-name-0">>}, json(Create)),
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
{error, {"HTTP/1.1", 400, _}, _},
|
{error, {"HTTP/1.1", 409, _}, _},
|
||||||
request_api(post, api_path("trace"), Header, [GoodName | Trace])
|
request_api(post, api_path("trace"), Header, [GoodName | Trace])
|
||||||
),
|
),
|
||||||
|
|
||||||
|
@ -171,6 +171,16 @@ t_create_failed(_Config) ->
|
||||||
{error, {"HTTP/1.1", 400, _}, _},
|
{error, {"HTTP/1.1", 400, _}, _},
|
||||||
request_api(post, api_path("trace"), Header, [GoodName1 | Trace])
|
request_api(post, api_path("trace"), Header, [GoodName1 | Trace])
|
||||||
),
|
),
|
||||||
|
%% clear
|
||||||
|
?assertMatch({ok, _}, request_api(delete, api_path("trace"), Header, [])),
|
||||||
|
{ok, Create} = request_api(post, api_path("trace"), Header, [GoodName | Trace]),
|
||||||
|
%% new name but same trace
|
||||||
|
GoodName2 = {<<"name">>, <<"test-name-1">>},
|
||||||
|
?assertMatch(
|
||||||
|
{error, {"HTTP/1.1", 409, _}, _},
|
||||||
|
request_api(post, api_path("trace"), Header, [GoodName2 | Trace])
|
||||||
|
),
|
||||||
|
|
||||||
unload(),
|
unload(),
|
||||||
emqx_trace:clear(),
|
emqx_trace:clear(),
|
||||||
ok.
|
ok.
|
||||||
|
@ -213,6 +223,27 @@ t_log_file(_Config) ->
|
||||||
Path = api_path("trace/test_client_id/download?node=" ++ atom_to_list(node())),
|
Path = api_path("trace/test_client_id/download?node=" ++ atom_to_list(node())),
|
||||||
{ok, Binary2} = request_api(get, Path, Header),
|
{ok, Binary2} = request_api(get, Path, Header),
|
||||||
?assertEqual(ZipTab, zip:table(Binary2)),
|
?assertEqual(ZipTab, zip:table(Binary2)),
|
||||||
|
{error, {_, 400, _}, _} =
|
||||||
|
request_api(
|
||||||
|
get,
|
||||||
|
api_path("trace/test_client_id/download?node=unknonwn_node"),
|
||||||
|
Header
|
||||||
|
),
|
||||||
|
{error, {_, 400, _}, _} =
|
||||||
|
request_api(
|
||||||
|
get,
|
||||||
|
% known atom but unknown node
|
||||||
|
api_path("trace/test_client_id/download?node=undefined"),
|
||||||
|
Header
|
||||||
|
),
|
||||||
|
?assertMatch(
|
||||||
|
{error, {"HTTP/1.1", 404, "Not Found"}, _},
|
||||||
|
request_api(
|
||||||
|
get,
|
||||||
|
api_path("trace/test_client_not_found/download?node=" ++ atom_to_list(node())),
|
||||||
|
Header
|
||||||
|
)
|
||||||
|
),
|
||||||
ok = emqtt:disconnect(Client),
|
ok = emqtt:disconnect(Client),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
@ -267,6 +298,25 @@ t_stream_log(_Config) ->
|
||||||
#{<<"meta">> := Meta1, <<"items">> := Bin1} = json(Binary1),
|
#{<<"meta">> := Meta1, <<"items">> := Bin1} = json(Binary1),
|
||||||
?assertEqual(#{<<"position">> => 30, <<"bytes">> => 10}, Meta1),
|
?assertEqual(#{<<"position">> => 30, <<"bytes">> => 10}, Meta1),
|
||||||
?assertEqual(10, byte_size(Bin1)),
|
?assertEqual(10, byte_size(Bin1)),
|
||||||
|
{error, {_, 400, _}, _} =
|
||||||
|
request_api(
|
||||||
|
get,
|
||||||
|
api_path("trace/test_stream_log/log?node=unknonwn_node"),
|
||||||
|
Header
|
||||||
|
),
|
||||||
|
{error, {_, 400, _}, _} =
|
||||||
|
request_api(
|
||||||
|
get,
|
||||||
|
% known atom but not a node
|
||||||
|
api_path("trace/test_stream_log/log?node=undefined"),
|
||||||
|
Header
|
||||||
|
),
|
||||||
|
{error, {_, 404, _}, _} =
|
||||||
|
request_api(
|
||||||
|
get,
|
||||||
|
api_path("trace/test_stream_log_not_found/log"),
|
||||||
|
Header
|
||||||
|
),
|
||||||
unload(),
|
unload(),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
|
|
@ -24,8 +24,13 @@
|
||||||
-define(CR, 13).
|
-define(CR, 13).
|
||||||
-define(LF, 10).
|
-define(LF, 10).
|
||||||
|
|
||||||
all() ->
|
all() -> [{group, normal}, {group, ciphers}].
|
||||||
emqx_common_test_helpers:all(?MODULE).
|
|
||||||
|
groups() ->
|
||||||
|
[
|
||||||
|
{normal, [], emqx_common_test_helpers:all(?MODULE)},
|
||||||
|
{ciphers, [], [ciphers_test]}
|
||||||
|
].
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
meck:new(emqx_config, [non_strict, passthrough, no_history, no_link]),
|
meck:new(emqx_config, [non_strict, passthrough, no_history, no_link]),
|
||||||
|
@ -128,3 +133,47 @@ t_trim_crlf(_) ->
|
||||||
?assertEqual(Bin, emqx_psk:trim_crlf(Bin)),
|
?assertEqual(Bin, emqx_psk:trim_crlf(Bin)),
|
||||||
?assertEqual(Bin, emqx_psk:trim_crlf(<<Bin/binary, ?LF>>)),
|
?assertEqual(Bin, emqx_psk:trim_crlf(<<Bin/binary, ?LF>>)),
|
||||||
?assertEqual(Bin, emqx_psk:trim_crlf(<<Bin/binary, ?CR, ?LF>>)).
|
?assertEqual(Bin, emqx_psk:trim_crlf(<<Bin/binary, ?CR, ?LF>>)).
|
||||||
|
|
||||||
|
ciphers_test(Config) ->
|
||||||
|
Ciphers = [
|
||||||
|
"PSK-AES256-GCM-SHA384",
|
||||||
|
"PSK-AES128-GCM-SHA256",
|
||||||
|
"PSK-AES256-CBC-SHA384",
|
||||||
|
"PSK-AES256-CBC-SHA",
|
||||||
|
"PSK-AES128-CBC-SHA256",
|
||||||
|
"PSK-AES128-CBC-SHA"
|
||||||
|
],
|
||||||
|
lists:foreach(fun(Cipher) -> cipher_test(Cipher, Config) end, Ciphers).
|
||||||
|
|
||||||
|
cipher_test(Cipher, _) ->
|
||||||
|
ct:pal("Test PSK with Cipher:~p~n", [Cipher]),
|
||||||
|
PSKIdentity1 = "myclient1",
|
||||||
|
SharedSecret1 = <<"8c701116e9127c57a99d5563709af3deaca75563e2c4dd0865701ae839fb6d79">>,
|
||||||
|
|
||||||
|
ClientLookup = fun
|
||||||
|
(psk, undefined, _) -> {ok, SharedSecret1};
|
||||||
|
(psk, _, _) -> error
|
||||||
|
end,
|
||||||
|
|
||||||
|
ClientTLSOpts = #{
|
||||||
|
versions => ['tlsv1.2'],
|
||||||
|
ciphers => [Cipher],
|
||||||
|
psk_identity => PSKIdentity1,
|
||||||
|
verify => verify_none,
|
||||||
|
user_lookup_fun => {ClientLookup, undefined}
|
||||||
|
},
|
||||||
|
|
||||||
|
ServerTLSOpts = #{
|
||||||
|
versions => ['tlsv1.2'],
|
||||||
|
ciphers => [Cipher],
|
||||||
|
verify => verify_none,
|
||||||
|
reuseaddr => true,
|
||||||
|
user_lookup_fun => {fun emqx_tls_psk:lookup/3, undefined}
|
||||||
|
},
|
||||||
|
emqx_config:put([listeners, ssl, default, ssl_options], ServerTLSOpts),
|
||||||
|
emqx_listeners:restart_listener('ssl:default'),
|
||||||
|
|
||||||
|
{ok, Socket} = ssl:connect("127.0.0.1", 8883, maps:to_list(ClientTLSOpts)),
|
||||||
|
ssl:close(Socket),
|
||||||
|
|
||||||
|
ok.
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
{application, emqx_retainer, [
|
{application, emqx_retainer, [
|
||||||
{description, "EMQX Retainer"},
|
{description, "EMQX Retainer"},
|
||||||
% strict semver, bump manually!
|
% strict semver, bump manually!
|
||||||
{vsn, "5.0.7"},
|
{vsn, "5.0.8"},
|
||||||
{modules, []},
|
{modules, []},
|
||||||
{registered, [emqx_retainer_sup]},
|
{registered, [emqx_retainer_sup]},
|
||||||
{applications, [kernel, stdlib, emqx]},
|
{applications, [kernel, stdlib, emqx]},
|
||||||
|
|
|
@ -38,11 +38,9 @@
|
||||||
|
|
||||||
%% Internal exports (RPC)
|
%% Internal exports (RPC)
|
||||||
-export([
|
-export([
|
||||||
do_store_retained/1,
|
|
||||||
do_clear_expired/0,
|
|
||||||
do_delete_message/1,
|
|
||||||
do_populate_index_meta/1,
|
do_populate_index_meta/1,
|
||||||
do_reindex_batch/2
|
do_reindex_batch/2,
|
||||||
|
active_indices/0
|
||||||
]).
|
]).
|
||||||
|
|
||||||
%% Management API:
|
%% Management API:
|
||||||
|
@ -66,6 +64,8 @@
|
||||||
-define(CLEAR_BATCH_SIZE, 1000).
|
-define(CLEAR_BATCH_SIZE, 1000).
|
||||||
-define(REINDEX_BATCH_SIZE, 1000).
|
-define(REINDEX_BATCH_SIZE, 1000).
|
||||||
-define(REINDEX_DISPATCH_WAIT, 30000).
|
-define(REINDEX_DISPATCH_WAIT, 30000).
|
||||||
|
-define(REINDEX_RPC_RETRY_INTERVAL, 1000).
|
||||||
|
-define(REINDEX_INDEX_UPDATE_WAIT, 30000).
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% Management API
|
%% Management API
|
||||||
|
@ -136,64 +136,41 @@ create_table(Table, RecordName, Attributes, Type, StorageType) ->
|
||||||
end.
|
end.
|
||||||
|
|
||||||
store_retained(_, Msg = #message{topic = Topic}) ->
|
store_retained(_, Msg = #message{topic = Topic}) ->
|
||||||
case mria:transaction(?RETAINER_SHARD, fun ?MODULE:do_store_retained/1, [Msg]) of
|
ExpiryTime = emqx_retainer:get_expiry_time(Msg),
|
||||||
{atomic, ok} ->
|
Tokens = topic_to_tokens(Topic),
|
||||||
?tp(debug, message_retained, #{topic => Topic}),
|
case is_table_full() andalso is_new_topic(Tokens) of
|
||||||
ok;
|
true ->
|
||||||
{aborted, Reason} ->
|
|
||||||
?SLOG(error, #{
|
?SLOG(error, #{
|
||||||
msg => "failed_to_retain_message",
|
msg => "failed_to_retain_message",
|
||||||
topic => Topic,
|
topic => Topic,
|
||||||
reason => Reason
|
reason => table_is_full
|
||||||
})
|
});
|
||||||
end.
|
|
||||||
|
|
||||||
do_store_retained(#message{topic = Topic} = Msg) ->
|
|
||||||
ExpiryTime = emqx_retainer:get_expiry_time(Msg),
|
|
||||||
Tokens = topic_to_tokens(Topic),
|
|
||||||
case is_table_full() of
|
|
||||||
false ->
|
false ->
|
||||||
store_retained(db_indices(write), Msg, Tokens, ExpiryTime);
|
do_store_retained(Msg, Tokens, ExpiryTime)
|
||||||
_ ->
|
|
||||||
case mnesia:read(?TAB_MESSAGE, Tokens, write) of
|
|
||||||
[_] ->
|
|
||||||
store_retained(db_indices(write), Msg, Tokens, ExpiryTime);
|
|
||||||
[] ->
|
|
||||||
mnesia:abort(table_is_full)
|
|
||||||
end
|
|
||||||
end.
|
end.
|
||||||
|
|
||||||
clear_expired(_) ->
|
clear_expired(_) ->
|
||||||
{atomic, _} = mria:transaction(?RETAINER_SHARD, fun ?MODULE:do_clear_expired/0),
|
|
||||||
ok.
|
|
||||||
|
|
||||||
do_clear_expired() ->
|
|
||||||
NowMs = erlang:system_time(millisecond),
|
NowMs = erlang:system_time(millisecond),
|
||||||
QH = qlc:q([
|
QH = qlc:q([
|
||||||
TopicTokens
|
RetainedMsg
|
||||||
|| #retained_message{
|
|| #retained_message{
|
||||||
topic = TopicTokens,
|
|
||||||
expiry_time = ExpiryTime
|
expiry_time = ExpiryTime
|
||||||
} <- mnesia:table(?TAB_MESSAGE, [{lock, write}]),
|
} = RetainedMsg <- ets:table(?TAB_MESSAGE),
|
||||||
(ExpiryTime =/= 0) and (ExpiryTime < NowMs)
|
(ExpiryTime =/= 0) and (ExpiryTime < NowMs)
|
||||||
]),
|
]),
|
||||||
QC = qlc:cursor(QH),
|
QC = qlc:cursor(QH),
|
||||||
clear_batch(db_indices(write), QC).
|
clear_batch(dirty_indices(write), QC).
|
||||||
|
|
||||||
delete_message(_, Topic) ->
|
delete_message(_, Topic) ->
|
||||||
{atomic, _} = mria:transaction(?RETAINER_SHARD, fun ?MODULE:do_delete_message/1, [Topic]),
|
|
||||||
ok.
|
|
||||||
|
|
||||||
do_delete_message(Topic) ->
|
|
||||||
Tokens = topic_to_tokens(Topic),
|
Tokens = topic_to_tokens(Topic),
|
||||||
case emqx_topic:wildcard(Topic) of
|
case emqx_topic:wildcard(Topic) of
|
||||||
false ->
|
false ->
|
||||||
ok = delete_message_by_topic(Tokens, db_indices(write));
|
ok = delete_message_by_topic(Tokens, dirty_indices(write));
|
||||||
true ->
|
true ->
|
||||||
QH = topic_search_table(Tokens),
|
QH = search_table(Tokens, 0),
|
||||||
qlc:fold(
|
qlc:fold(
|
||||||
fun(TopicTokens, _) ->
|
fun(RetainedMsg, _) ->
|
||||||
ok = delete_message_by_topic(TopicTokens, db_indices(write))
|
ok = delete_message_with_indices(RetainedMsg, dirty_indices(write))
|
||||||
end,
|
end,
|
||||||
undefined,
|
undefined,
|
||||||
QH
|
QH
|
||||||
|
@ -206,7 +183,7 @@ read_message(_, Topic) ->
|
||||||
match_messages(_, Topic, undefined) ->
|
match_messages(_, Topic, undefined) ->
|
||||||
Tokens = topic_to_tokens(Topic),
|
Tokens = topic_to_tokens(Topic),
|
||||||
Now = erlang:system_time(millisecond),
|
Now = erlang:system_time(millisecond),
|
||||||
QH = search_table(Tokens, Now),
|
QH = msg_table(search_table(Tokens, Now)),
|
||||||
case batch_read_number() of
|
case batch_read_number() of
|
||||||
all_remaining ->
|
all_remaining ->
|
||||||
{ok, qlc:eval(QH), undefined};
|
{ok, qlc:eval(QH), undefined};
|
||||||
|
@ -227,10 +204,10 @@ page_read(_, Topic, Page, Limit) ->
|
||||||
QH =
|
QH =
|
||||||
case Topic of
|
case Topic of
|
||||||
undefined ->
|
undefined ->
|
||||||
search_table(undefined, ['#'], Now);
|
msg_table(search_table(undefined, ['#'], Now));
|
||||||
_ ->
|
_ ->
|
||||||
Tokens = topic_to_tokens(Topic),
|
Tokens = topic_to_tokens(Topic),
|
||||||
search_table(Tokens, Now)
|
msg_table(search_table(Tokens, Now))
|
||||||
end,
|
end,
|
||||||
OrderedQH = qlc:sort(QH, {order, fun compare_message/2}),
|
OrderedQH = qlc:sort(QH, {order, fun compare_message/2}),
|
||||||
Cursor = qlc:cursor(OrderedQH),
|
Cursor = qlc:cursor(OrderedQH),
|
||||||
|
@ -281,49 +258,49 @@ reindex_status() ->
|
||||||
%% Internal functions
|
%% Internal functions
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
store_retained(Indices, Msg, Tokens, ExpiryTime) ->
|
do_store_retained(Msg, TopicTokens, ExpiryTime) ->
|
||||||
ok = store_retained_message(Msg, Tokens, ExpiryTime),
|
%% Retained message is stored syncronously on all core nodes
|
||||||
ok = emqx_retainer_index:foreach_index_key(
|
ok = do_store_retained_message(Msg, TopicTokens, ExpiryTime),
|
||||||
fun(Key) -> store_retained_index(Key, ExpiryTime) end,
|
%% Since retained message was stored syncronously on all core nodes,
|
||||||
Indices,
|
%% now we are sure that
|
||||||
Tokens
|
%% * either we will write correct indices
|
||||||
).
|
%% * or if we a replicant with outdated write indices due to reindexing,
|
||||||
|
%% the correct indices will be added by reindexing
|
||||||
|
ok = do_store_retained_indices(TopicTokens, ExpiryTime).
|
||||||
|
|
||||||
store_retained_message(Msg, Tokens, ExpiryTime) ->
|
do_store_retained_message(Msg, TopicTokens, ExpiryTime) ->
|
||||||
RetainedMessage = #retained_message{
|
RetainedMessage = #retained_message{
|
||||||
topic = Tokens,
|
topic = TopicTokens,
|
||||||
msg = Msg,
|
msg = Msg,
|
||||||
expiry_time = ExpiryTime
|
expiry_time = ExpiryTime
|
||||||
},
|
},
|
||||||
mnesia:write(?TAB_MESSAGE, RetainedMessage, write).
|
ok = mria:dirty_write_sync(?TAB_MESSAGE, RetainedMessage).
|
||||||
|
|
||||||
store_retained_index(Key, ExpiryTime) ->
|
do_store_retained_indices(TopicTokens, ExpiryTime) ->
|
||||||
|
Indices = dirty_indices(write),
|
||||||
|
ok = emqx_retainer_index:foreach_index_key(
|
||||||
|
fun(Key) -> do_store_retained_index(Key, ExpiryTime) end,
|
||||||
|
Indices,
|
||||||
|
TopicTokens
|
||||||
|
).
|
||||||
|
|
||||||
|
do_store_retained_index(Key, ExpiryTime) ->
|
||||||
RetainedIndex = #retained_index{
|
RetainedIndex = #retained_index{
|
||||||
key = Key,
|
key = Key,
|
||||||
expiry_time = ExpiryTime
|
expiry_time = ExpiryTime
|
||||||
},
|
},
|
||||||
mnesia:write(?TAB_INDEX, RetainedIndex, write).
|
mria:dirty_write(?TAB_INDEX, RetainedIndex).
|
||||||
|
|
||||||
topic_search_table(Tokens) ->
|
msg_table(SearchTable) ->
|
||||||
Index = emqx_retainer_index:select_index(Tokens, db_indices(read)),
|
|
||||||
topic_search_table(Index, Tokens).
|
|
||||||
|
|
||||||
topic_search_table(undefined, Tokens) ->
|
|
||||||
Cond = emqx_retainer_index:condition(Tokens),
|
|
||||||
Ms = [{#retained_message{topic = Cond, msg = '_', expiry_time = '_'}, [], ['$_']}],
|
|
||||||
MsgQH = mnesia:table(?TAB_MESSAGE, [{traverse, {select, Ms}}]),
|
|
||||||
qlc:q([Topic || #retained_message{topic = Topic} <- MsgQH]);
|
|
||||||
topic_search_table(Index, Tokens) ->
|
|
||||||
Cond = emqx_retainer_index:condition(Index, Tokens),
|
|
||||||
Ms = [{#retained_index{key = Cond, expiry_time = '_'}, [], ['$_']}],
|
|
||||||
IndexQH = mnesia:table(?TAB_INDEX, [{traverse, {select, Ms}}]),
|
|
||||||
qlc:q([
|
qlc:q([
|
||||||
emqx_retainer_index:restore_topic(Key)
|
Msg
|
||||||
|| #retained_index{key = Key} <- IndexQH
|
|| #retained_message{
|
||||||
|
msg = Msg
|
||||||
|
} <- SearchTable
|
||||||
]).
|
]).
|
||||||
|
|
||||||
search_table(Tokens, Now) ->
|
search_table(Tokens, Now) ->
|
||||||
Indices = dirty_read_indices(),
|
Indices = dirty_indices(read),
|
||||||
Index = emqx_retainer_index:select_index(Tokens, Indices),
|
Index = emqx_retainer_index:select_index(Tokens, Indices),
|
||||||
search_table(Index, Tokens, Now).
|
search_table(Index, Tokens, Now).
|
||||||
|
|
||||||
|
@ -341,26 +318,21 @@ search_table(Index, Tokens, Now) ->
|
||||||
|| TopicTokens <- Topics
|
|| TopicTokens <- Topics
|
||||||
]),
|
]),
|
||||||
qlc:q([
|
qlc:q([
|
||||||
Msg
|
RetainedMsg
|
||||||
|| [
|
|| [
|
||||||
#retained_message{
|
#retained_message{
|
||||||
msg = Msg,
|
|
||||||
expiry_time = ExpiryTime
|
expiry_time = ExpiryTime
|
||||||
}
|
} = RetainedMsg
|
||||||
] <- RetainedMsgQH,
|
] <- RetainedMsgQH,
|
||||||
(ExpiryTime == 0) or (ExpiryTime > Now)
|
(ExpiryTime == 0) or (ExpiryTime > Now)
|
||||||
]).
|
]).
|
||||||
|
|
||||||
dirty_read_indices() ->
|
|
||||||
case ets:lookup(?TAB_INDEX_META, ?META_KEY) of
|
|
||||||
[#retained_index_meta{read_indices = ReadIndices}] -> ReadIndices;
|
|
||||||
[] -> []
|
|
||||||
end.
|
|
||||||
|
|
||||||
clear_batch(Indices, QC) ->
|
clear_batch(Indices, QC) ->
|
||||||
{Result, Rows} = qlc_next_answers(QC, ?CLEAR_BATCH_SIZE),
|
{Result, Rows} = qlc_next_answers(QC, ?CLEAR_BATCH_SIZE),
|
||||||
lists:foreach(
|
lists:foreach(
|
||||||
fun(TopicTokens) -> delete_message_by_topic(TopicTokens, Indices) end,
|
fun(RetainedMsg) ->
|
||||||
|
delete_message_with_indices(RetainedMsg, Indices)
|
||||||
|
end,
|
||||||
Rows
|
Rows
|
||||||
),
|
),
|
||||||
case Result of
|
case Result of
|
||||||
|
@ -369,14 +341,23 @@ clear_batch(Indices, QC) ->
|
||||||
end.
|
end.
|
||||||
|
|
||||||
delete_message_by_topic(TopicTokens, Indices) ->
|
delete_message_by_topic(TopicTokens, Indices) ->
|
||||||
|
case mnesia:dirty_read(?TAB_MESSAGE, TopicTokens) of
|
||||||
|
[] -> ok;
|
||||||
|
[RetainedMsg] -> delete_message_with_indices(RetainedMsg, Indices)
|
||||||
|
end.
|
||||||
|
|
||||||
|
delete_message_with_indices(RetainedMsg, Indices) ->
|
||||||
|
#retained_message{topic = TopicTokens, expiry_time = ExpiryTime} = RetainedMsg,
|
||||||
ok = emqx_retainer_index:foreach_index_key(
|
ok = emqx_retainer_index:foreach_index_key(
|
||||||
fun(Key) ->
|
fun(Key) ->
|
||||||
mnesia:delete({?TAB_INDEX, Key})
|
mria:dirty_delete_object(?TAB_INDEX, #retained_index{
|
||||||
|
key = Key, expiry_time = ExpiryTime
|
||||||
|
})
|
||||||
end,
|
end,
|
||||||
Indices,
|
Indices,
|
||||||
TopicTokens
|
TopicTokens
|
||||||
),
|
),
|
||||||
ok = mnesia:delete({?TAB_MESSAGE, TopicTokens}).
|
ok = mria:dirty_delete_object(?TAB_MESSAGE, RetainedMsg).
|
||||||
|
|
||||||
compare_message(M1, M2) ->
|
compare_message(M1, M2) ->
|
||||||
M1#message.timestamp =< M2#message.timestamp.
|
M1#message.timestamp =< M2#message.timestamp.
|
||||||
|
@ -415,20 +396,26 @@ qlc_next_answers(QC, N) ->
|
||||||
|
|
||||||
make_message_match_spec(Tokens, NowMs) ->
|
make_message_match_spec(Tokens, NowMs) ->
|
||||||
Cond = emqx_retainer_index:condition(Tokens),
|
Cond = emqx_retainer_index:condition(Tokens),
|
||||||
MsHd = #retained_message{topic = Cond, msg = '$2', expiry_time = '$3'},
|
MsHd = #retained_message{topic = Cond, msg = '_', expiry_time = '$3'},
|
||||||
[{MsHd, [{'orelse', {'=:=', '$3', 0}, {'>', '$3', NowMs}}], ['$2']}].
|
[{MsHd, [{'orelse', {'=:=', '$3', 0}, {'>', '$3', NowMs}}], ['$_']}].
|
||||||
|
|
||||||
make_index_match_spec(Index, Tokens, NowMs) ->
|
make_index_match_spec(Index, Tokens, NowMs) ->
|
||||||
Cond = emqx_retainer_index:condition(Index, Tokens),
|
Cond = emqx_retainer_index:condition(Index, Tokens),
|
||||||
MsHd = #retained_index{key = Cond, expiry_time = '$3'},
|
MsHd = #retained_index{key = Cond, expiry_time = '$3'},
|
||||||
[{MsHd, [{'orelse', {'=:=', '$3', 0}, {'>', '$3', NowMs}}], ['$_']}].
|
[{MsHd, [{'orelse', {'=:=', '$3', 0}, {'>', '$3', NowMs}}], ['$_']}].
|
||||||
|
|
||||||
-spec is_table_full() -> boolean().
|
|
||||||
is_table_full() ->
|
is_table_full() ->
|
||||||
Limit = emqx:get_config([retainer, backend, max_retained_messages]),
|
Limit = emqx:get_config([retainer, backend, max_retained_messages]),
|
||||||
Limit > 0 andalso (table_size() >= Limit).
|
Limit > 0 andalso (table_size() >= Limit).
|
||||||
|
|
||||||
-spec table_size() -> non_neg_integer().
|
is_new_topic(Tokens) ->
|
||||||
|
case mnesia:dirty_read(?TAB_MESSAGE, Tokens) of
|
||||||
|
[_] ->
|
||||||
|
false;
|
||||||
|
[] ->
|
||||||
|
true
|
||||||
|
end.
|
||||||
|
|
||||||
table_size() ->
|
table_size() ->
|
||||||
mnesia:table_info(?TAB_MESSAGE, size).
|
mnesia:table_info(?TAB_MESSAGE, size).
|
||||||
|
|
||||||
|
@ -486,8 +473,14 @@ do_populate_index_meta(ConfigIndices) ->
|
||||||
)
|
)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
dirty_indices(Type) ->
|
||||||
|
indices(ets:lookup(?TAB_INDEX_META, ?META_KEY), Type).
|
||||||
|
|
||||||
db_indices(Type) ->
|
db_indices(Type) ->
|
||||||
case mnesia:read(?TAB_INDEX_META, ?META_KEY) of
|
indices(mnesia:read(?TAB_INDEX_META, ?META_KEY), Type).
|
||||||
|
|
||||||
|
indices(IndexRecords, Type) ->
|
||||||
|
case IndexRecords of
|
||||||
[#retained_index_meta{read_indices = ReadIndices, write_indices = WriteIndices}] ->
|
[#retained_index_meta{read_indices = ReadIndices, write_indices = WriteIndices}] ->
|
||||||
case Type of
|
case Type of
|
||||||
read -> ReadIndices;
|
read -> ReadIndices;
|
||||||
|
@ -506,10 +499,15 @@ batch_read_number() ->
|
||||||
reindex(NewIndices, Force, StatusFun) when
|
reindex(NewIndices, Force, StatusFun) when
|
||||||
is_boolean(Force) andalso is_function(StatusFun, 1)
|
is_boolean(Force) andalso is_function(StatusFun, 1)
|
||||||
->
|
->
|
||||||
|
%% Do not run on replicants
|
||||||
|
core = mria_rlog:role(),
|
||||||
%% Disable read indices and update write indices so that new records are written
|
%% Disable read indices and update write indices so that new records are written
|
||||||
%% with correct indices. Also block parallel reindexing.
|
%% with correct indices. Also block parallel reindexing.
|
||||||
case try_start_reindex(NewIndices, Force) of
|
case try_start_reindex(NewIndices, Force) of
|
||||||
{atomic, ok} ->
|
{atomic, ok} ->
|
||||||
|
%% Wait for all nodes to have new indices, including rlog nodes
|
||||||
|
true = wait_indices_updated({[], NewIndices}, ?REINDEX_INDEX_UPDATE_WAIT),
|
||||||
|
|
||||||
%% Wait for all dispatch operations to be completed to avoid
|
%% Wait for all dispatch operations to be completed to avoid
|
||||||
%% inconsistent results.
|
%% inconsistent results.
|
||||||
true = wait_dispatch_complete(?REINDEX_DISPATCH_WAIT),
|
true = wait_dispatch_complete(?REINDEX_DISPATCH_WAIT),
|
||||||
|
@ -592,7 +590,7 @@ reindex_topic(Indices, Topic) ->
|
||||||
case mnesia:read(?TAB_MESSAGE, Topic, read) of
|
case mnesia:read(?TAB_MESSAGE, Topic, read) of
|
||||||
[#retained_message{expiry_time = ExpiryTime}] ->
|
[#retained_message{expiry_time = ExpiryTime}] ->
|
||||||
ok = emqx_retainer_index:foreach_index_key(
|
ok = emqx_retainer_index:foreach_index_key(
|
||||||
fun(Key) -> store_retained_index(Key, ExpiryTime) end,
|
fun(Key) -> do_store_retained_index(Key, ExpiryTime) end,
|
||||||
Indices,
|
Indices,
|
||||||
Topic
|
Topic
|
||||||
);
|
);
|
||||||
|
@ -627,8 +625,35 @@ do_reindex_batch(QC, Done) ->
|
||||||
|
|
||||||
wait_dispatch_complete(Timeout) ->
|
wait_dispatch_complete(Timeout) ->
|
||||||
Nodes = mria_mnesia:running_nodes(),
|
Nodes = mria_mnesia:running_nodes(),
|
||||||
{Results, []} = emqx_retainer_proto_v1:wait_dispatch_complete(Nodes, Timeout),
|
{Results, []} = emqx_retainer_proto_v2:wait_dispatch_complete(Nodes, Timeout),
|
||||||
lists:all(
|
lists:all(
|
||||||
fun(Result) -> Result =:= ok end,
|
fun(Result) -> Result =:= ok end,
|
||||||
Results
|
Results
|
||||||
).
|
).
|
||||||
|
|
||||||
|
wait_indices_updated(_Indices, TimeLeft) when TimeLeft < 0 -> false;
|
||||||
|
wait_indices_updated(Indices, TimeLeft) ->
|
||||||
|
case timer:tc(fun() -> are_indices_updated(Indices) end) of
|
||||||
|
{_, true} ->
|
||||||
|
true;
|
||||||
|
{TimePassed, false} ->
|
||||||
|
timer:sleep(?REINDEX_RPC_RETRY_INTERVAL),
|
||||||
|
wait_indices_updated(
|
||||||
|
Indices, TimeLeft - ?REINDEX_RPC_RETRY_INTERVAL - TimePassed / 1000
|
||||||
|
)
|
||||||
|
end.
|
||||||
|
|
||||||
|
active_indices() ->
|
||||||
|
{dirty_indices(read), dirty_indices(write)}.
|
||||||
|
|
||||||
|
are_indices_updated(Indices) ->
|
||||||
|
Nodes = mria_mnesia:running_nodes(),
|
||||||
|
case emqx_retainer_proto_v2:active_mnesia_indices(Nodes) of
|
||||||
|
{Results, []} ->
|
||||||
|
lists:all(
|
||||||
|
fun(NodeIndices) -> NodeIndices =:= Indices end,
|
||||||
|
Results
|
||||||
|
);
|
||||||
|
_ ->
|
||||||
|
false
|
||||||
|
end.
|
||||||
|
|
|
@ -50,11 +50,39 @@ retainer(["reindex", "status"]) ->
|
||||||
retainer(["reindex", "start"]) ->
|
retainer(["reindex", "start"]) ->
|
||||||
retainer(["reindex", "start", "false"]);
|
retainer(["reindex", "start", "false"]);
|
||||||
retainer(["reindex", "start", ForceParam]) ->
|
retainer(["reindex", "start", ForceParam]) ->
|
||||||
Force =
|
case mria_rlog:role() of
|
||||||
case ForceParam of
|
core ->
|
||||||
"true" -> true;
|
Force =
|
||||||
_ -> false
|
case ForceParam of
|
||||||
end,
|
"true" -> true;
|
||||||
|
_ -> false
|
||||||
|
end,
|
||||||
|
do_reindex(Force);
|
||||||
|
replicant ->
|
||||||
|
?PRINT_MSG("Can't run reindex on a replicant node")
|
||||||
|
end;
|
||||||
|
retainer(_) ->
|
||||||
|
emqx_ctl:usage(
|
||||||
|
[
|
||||||
|
{"retainer info", "Show the count of retained messages"},
|
||||||
|
{"retainer topics", "Show all topics of retained messages"},
|
||||||
|
{"retainer clean", "Clean all retained messages"},
|
||||||
|
{"retainer clean <Topic>", "Clean retained messages by the specified topic filter"},
|
||||||
|
{"retainer reindex status", "Show reindex status"},
|
||||||
|
{"retainer reindex start [force]",
|
||||||
|
"Generate new retainer topic indices from config settings.\n"
|
||||||
|
"Pass true as <Force> to ignore previously started reindexing"}
|
||||||
|
]
|
||||||
|
).
|
||||||
|
|
||||||
|
unload() ->
|
||||||
|
ok = emqx_ctl:unregister_command(retainer).
|
||||||
|
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
%% Private
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
do_reindex(Force) ->
|
||||||
?PRINT_MSG("Starting reindexing~n"),
|
?PRINT_MSG("Starting reindexing~n"),
|
||||||
emqx_retainer_mnesia:reindex(
|
emqx_retainer_mnesia:reindex(
|
||||||
Force,
|
Force,
|
||||||
|
@ -69,20 +97,4 @@ retainer(["reindex", "start", ForceParam]) ->
|
||||||
?PRINT("Reindexed ~p messages~n", [Done])
|
?PRINT("Reindexed ~p messages~n", [Done])
|
||||||
end
|
end
|
||||||
),
|
),
|
||||||
?PRINT_MSG("Reindexing finished~n");
|
?PRINT_MSG("Reindexing finished~n").
|
||||||
retainer(_) ->
|
|
||||||
emqx_ctl:usage(
|
|
||||||
[
|
|
||||||
{"retainer info", "Show the count of retained messages"},
|
|
||||||
{"retainer topics", "Show all topics of retained messages"},
|
|
||||||
{"retainer clean", "Clean all retained messages"},
|
|
||||||
{"retainer clean <Topic>", "Clean retained messages by the specified topic filter"},
|
|
||||||
{"retainer reindex status", "Show reindex status"},
|
|
||||||
{"retainer reindex start [force]",
|
|
||||||
"Generate new retainer topic indices config settings.\n"
|
|
||||||
"Pass true as <Force> to ignore previously started reindexing"}
|
|
||||||
]
|
|
||||||
).
|
|
||||||
|
|
||||||
unload() ->
|
|
||||||
ok = emqx_ctl:unregister_command(retainer).
|
|
||||||
|
|
|
@ -0,0 +1,41 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%
|
||||||
|
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
%% you may not use this file except in compliance with the License.
|
||||||
|
%% You may obtain a copy of the License at
|
||||||
|
%%
|
||||||
|
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
%%
|
||||||
|
%% Unless required by applicable law or agreed to in writing, software
|
||||||
|
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
%% See the License for the specific language governing permissions and
|
||||||
|
%% limitations under the License.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
|
-module(emqx_retainer_proto_v2).
|
||||||
|
|
||||||
|
-behaviour(emqx_bpapi).
|
||||||
|
|
||||||
|
-include_lib("emqx/include/bpapi.hrl").
|
||||||
|
|
||||||
|
-export([
|
||||||
|
introduced_in/0,
|
||||||
|
wait_dispatch_complete/2,
|
||||||
|
active_mnesia_indices/1
|
||||||
|
]).
|
||||||
|
|
||||||
|
-define(TIMEOUT, 5000).
|
||||||
|
|
||||||
|
introduced_in() ->
|
||||||
|
"5.0.13".
|
||||||
|
|
||||||
|
-spec wait_dispatch_complete(list(node()), timeout()) -> emqx_rpc:multicall_result(ok).
|
||||||
|
wait_dispatch_complete(Nodes, Timeout) ->
|
||||||
|
rpc:multicall(Nodes, emqx_retainer_dispatcher, wait_dispatch_complete, [Timeout]).
|
||||||
|
|
||||||
|
-spec active_mnesia_indices(list(node())) ->
|
||||||
|
emqx_rpc:multicall_result({list(emqx_retainer_index:index()), list(emqx_retainer_index:index())}).
|
||||||
|
active_mnesia_indices(Nodes) ->
|
||||||
|
rpc:multicall(Nodes, emqx_retainer_mnesia, active_indices, [], ?TIMEOUT).
|
|
@ -318,6 +318,25 @@ t_message_expiry_2(_) ->
|
||||||
end,
|
end,
|
||||||
with_conf(ConfMod, Case).
|
with_conf(ConfMod, Case).
|
||||||
|
|
||||||
|
t_table_full(_) ->
|
||||||
|
ConfMod = fun(Conf) ->
|
||||||
|
Conf#{<<"backend">> => #{<<"max_retained_messages">> => <<"1">>}}
|
||||||
|
end,
|
||||||
|
Case = fun() ->
|
||||||
|
{ok, C1} = emqtt:start_link([{clean_start, true}, {proto_ver, v5}]),
|
||||||
|
{ok, _} = emqtt:connect(C1),
|
||||||
|
emqtt:publish(C1, <<"retained/t/1">>, <<"a">>, [{qos, 0}, {retain, true}]),
|
||||||
|
emqtt:publish(C1, <<"retained/t/2">>, <<"b">>, [{qos, 0}, {retain, true}]),
|
||||||
|
|
||||||
|
{ok, #{}, [0]} = emqtt:subscribe(C1, <<"retained/t/1">>, [{qos, 0}, {rh, 0}]),
|
||||||
|
?assertEqual(1, length(receive_messages(1))),
|
||||||
|
{ok, #{}, [0]} = emqtt:subscribe(C1, <<"retained/t/2">>, [{qos, 0}, {rh, 0}]),
|
||||||
|
?assertEqual(0, length(receive_messages(1))),
|
||||||
|
|
||||||
|
ok = emqtt:disconnect(C1)
|
||||||
|
end,
|
||||||
|
with_conf(ConfMod, Case).
|
||||||
|
|
||||||
t_clean(_) ->
|
t_clean(_) ->
|
||||||
{ok, C1} = emqtt:start_link([{clean_start, true}, {proto_ver, v5}]),
|
{ok, C1} = emqtt:start_link([{clean_start, true}, {proto_ver, v5}]),
|
||||||
{ok, _} = emqtt:connect(C1),
|
{ok, _} = emqtt:connect(C1),
|
||||||
|
|
|
@ -1,5 +1,11 @@
|
||||||
# v5.0.12
|
# v5.0.12
|
||||||
|
|
||||||
|
This version included a refactoring of MQTT bridge config.
|
||||||
|
The older version config file created from v5.0.11 or earlier will be converted to
|
||||||
|
according to the new schema.
|
||||||
|
|
||||||
|
Please note, the request body of `/bridges` API to configure MQTT brdige is changed in a incompatible way.
|
||||||
|
|
||||||
## Enhancements
|
## Enhancements
|
||||||
|
|
||||||
- Disable global garbage collection by `node.global_gc_interval = disabled` [#9418](https://github.com/emqx/emqx/pull/9418)。
|
- Disable global garbage collection by `node.global_gc_interval = disabled` [#9418](https://github.com/emqx/emqx/pull/9418)。
|
||||||
|
@ -16,6 +22,28 @@
|
||||||
|
|
||||||
- Redesign `/rules` API to make `metrics` a dedicated resources rather than being included with every response [#9461](https://github.com/emqx/emqx/pull/9461).
|
- Redesign `/rules` API to make `metrics` a dedicated resources rather than being included with every response [#9461](https://github.com/emqx/emqx/pull/9461).
|
||||||
|
|
||||||
|
- Add more PSK ciphers support [#9505](https://github.com/emqx/emqx/pull/9505).
|
||||||
|
|
||||||
|
- Improve `emqx_retainer` write performance: get rid of transactions on write [#9372](https://github.com/emqx/emqx/pull/9372).
|
||||||
|
|
||||||
|
- HTTP client library `ehttpc` upgraded from `0.4.0` to `0.4.2` [#9520](https://github.com/emqx/emqx/pull/9520).
|
||||||
|
|
||||||
|
- Add `handshake_timeout` option to MQTT SSL listener [#9502](https://github.com/emqx/emqx/pull/9502).
|
||||||
|
|
||||||
|
- Upgrade dashboard to [v1.1.3](https://github.com/emqx/emqx-dashboard-web-new/releases/tag/v1.1.3).
|
||||||
|
|
||||||
|
- Users can define the `externalTrafficPolicy` of service in EMQX Helm Chart [#9527](https://github.com/emqx/emqx/pull/9527).
|
||||||
|
|
||||||
|
- Return `204` instead of `200` for `POST /gateway/lwm2m/clients/{clientid}/{read,write,observe}` [#9480](https://github.com/emqx/emqx/pull/9480).
|
||||||
|
|
||||||
|
- Make possible to create an authentication entirely from environment variable [#9437](https://github.com/emqx/emqx/pull/9437).
|
||||||
|
As an example, one can now enable MySQL auth with:
|
||||||
|
`env EMQX_AUTHENTICATION__1='{mechanism="password_based",backend="mysql",server="localhost:3306",database="emqx",username="emqx",password="******",query="SELECT password_hash,salt FROM mqtt_user WHERE username=${username} LIMIT 1",enable=true}'`.
|
||||||
|
Prior to this change, overrides only work on top of existing authentication, for example, if there is already MySQL auth configured in `emqx.conf`
|
||||||
|
but we want to disable it, we can do it with `env EMQX_AUTHENTICATION__1__ENABLE=false`.
|
||||||
|
|
||||||
|
- Start building packages for Amazon Linux 2 [#9537](https://github.com/emqx/emqx/pull/9537).
|
||||||
|
|
||||||
## Bug fixes
|
## Bug fixes
|
||||||
|
|
||||||
- Fix that the obsolete SSL files aren't deleted after the ExHook config update [#9432](https://github.com/emqx/emqx/pull/9432).
|
- Fix that the obsolete SSL files aren't deleted after the ExHook config update [#9432](https://github.com/emqx/emqx/pull/9432).
|
||||||
|
@ -25,3 +53,18 @@
|
||||||
- Return `404` for `/telemetry/data` in case it's disabled [#9464](https://github.com/emqx/emqx/pull/9464).
|
- Return `404` for `/telemetry/data` in case it's disabled [#9464](https://github.com/emqx/emqx/pull/9464).
|
||||||
|
|
||||||
- Fix some potential MQTT packet parse errors [#9477](https://github.com/emqx/emqx/pull/9477).
|
- Fix some potential MQTT packet parse errors [#9477](https://github.com/emqx/emqx/pull/9477).
|
||||||
|
|
||||||
|
- Fixed EMQX Helm Chart deployment error [#9509](https://github.com/emqx/emqx/pull/9509).
|
||||||
|
- Fixed the `Discovery error: no such service` error occurred during helm chart deployment, resulting in an abnormal discovery of cluster nodes.
|
||||||
|
- Fixed issue that caused EMQX Helm Chart to fail when modifying some of EMQX's configuration items via environment variables.
|
||||||
|
|
||||||
|
- Fix shadowing `'client.authenticate'` callbacks by `emqx_authenticator`. Now `emqx_authenticator`
|
||||||
|
passes execution to the further callbacks if none of the authenticators matches [#9496](https://github.com/emqx/emqx/pull/9496).
|
||||||
|
|
||||||
|
- Return `400` if query param `node` is not a known node in `/trace/:id/download?node={node}` [#9478](https://github.com/emqx/emqx/pull/9478).
|
||||||
|
|
||||||
|
- `POST /traces` to return `409` in case of duplicate [#9494](https://github.com/emqx/emqx/pull/9494).
|
||||||
|
|
||||||
|
- Fix bridging function, when both ingress and egress bridges are configured, egress bridge does not work [#9523](https://github.com/emqx/emqx/pull/9523).
|
||||||
|
|
||||||
|
- Fix EMQX Helm Chart using incorrect secret values when custom credentials are provided [#9536](https://github.com/emqx/emqx/pull/9536).
|
||||||
|
|
|
@ -1,5 +1,10 @@
|
||||||
# v5.0.12
|
# v5.0.12
|
||||||
|
|
||||||
|
该版本包含了 MQTT 桥接的一个重构。
|
||||||
|
v5.0.11 或更早版本创建的配置文件,在新版本中会被自动转换。
|
||||||
|
|
||||||
|
需要注意的是,用于配置 MQTT 桥接的 API `/bridges` 请求的结构发生了不兼容的变更。
|
||||||
|
|
||||||
## 增强
|
## 增强
|
||||||
|
|
||||||
- 通过 `node.global_gc_interval = disabled` 来禁用全局垃圾回收 [#9418](https://github.com/emqx/emqx/pull/9418)。
|
- 通过 `node.global_gc_interval = disabled` 来禁用全局垃圾回收 [#9418](https://github.com/emqx/emqx/pull/9418)。
|
||||||
|
@ -16,6 +21,28 @@
|
||||||
|
|
||||||
- 重新设计了 `/rules` API,将 `metrics` 改为专用资源,而不再是包含在每个响应中 [#9461](https://github.com/emqx/emqx/pull/9461)。
|
- 重新设计了 `/rules` API,将 `metrics` 改为专用资源,而不再是包含在每个响应中 [#9461](https://github.com/emqx/emqx/pull/9461)。
|
||||||
|
|
||||||
|
- 支持更多的 PSK 密码套件[#9505](https://github.com/emqx/emqx/pull/9505)。
|
||||||
|
|
||||||
|
- 提高 `emqx_retainer` 写入性能:摆脱写入时的事务 [#9372](https://github.com/emqx/emqx/pull/9372)。
|
||||||
|
|
||||||
|
- HTTP 客户端库 `ehttpc` 从 `0.4.0` 升级到 `0.4.2` [#9520](https://github.com/emqx/emqx/pull/9520)。
|
||||||
|
|
||||||
|
- 为 MQTT SSL 监听器增加配置 `handshake_timeout` [#9502](https://github.com/emqx/emqx/pull/9502)。
|
||||||
|
|
||||||
|
- Dashboard 更新到 [v1.1.3](https://github.com/emqx/emqx-dashboard-web-new/releases/tag/v1.1.3)。
|
||||||
|
|
||||||
|
- 用户可以在 EMQX Helm Chart 中自定义 service 资源的 `externalTrafficPolicy` [#9527](https://github.com/emqx/emqx/pull/9527)。
|
||||||
|
|
||||||
|
- 现在调用 `POST /gateway/lwm2m/clients/{clientid}/{read,write,observe}` 时,将会返回 204,而不再是 200 [#9480](https://github.com/emqx/emqx/pull/9480)。
|
||||||
|
|
||||||
|
- 允许使用环境变量来创建一个认证配置 [#9437](https://github.com/emqx/emqx/pull/9437)。
|
||||||
|
例如,现在可以用如下环境变量来创建一个 MySQL 认证:
|
||||||
|
`env EMQX_AUTHENTICATION__1='{mechanism="password_based",backend="mysql",server="localhost:3306",database="emqx",username="emqx",password="******",query="SELECT password_hash,salt FROM mqtt_user WHERE username=${username} LIMIT 1",enable=true}'`。
|
||||||
|
在此之前,环境变量的重载仅作用于已经存在的配置之上,例如,当 `emqx.conf` 中已经配置了一个 MySQL 认证,那么可以使用如下方法来将它禁用:
|
||||||
|
`env EMQX_AUTHENTICATION__1__ENABLE=false`。
|
||||||
|
|
||||||
|
- 为 Amazon Linux 2 平台发布安装包 [#9537](https://github.com/emqx/emqx/pull/9537)。
|
||||||
|
|
||||||
## 修复
|
## 修复
|
||||||
|
|
||||||
- 修复 ExHook 更新 SSL 相关配置后,过时的 SSL 文件没有被删除的问题 [#9432](https://github.com/emqx/emqx/pull/9432)。
|
- 修复 ExHook 更新 SSL 相关配置后,过时的 SSL 文件没有被删除的问题 [#9432](https://github.com/emqx/emqx/pull/9432)。
|
||||||
|
@ -25,3 +52,17 @@
|
||||||
- 在遥测功能未开启时,通过 /telemetry/data 请求其数据,将会返回 404 [#9464](https://github.com/emqx/emqx/pull/9464)。
|
- 在遥测功能未开启时,通过 /telemetry/data 请求其数据,将会返回 404 [#9464](https://github.com/emqx/emqx/pull/9464)。
|
||||||
|
|
||||||
- 修复了一些 MQTT 协议包的潜在解析错误 [#9477](https://github.com/emqx/emqx/pull/9477)。
|
- 修复了一些 MQTT 协议包的潜在解析错误 [#9477](https://github.com/emqx/emqx/pull/9477)。
|
||||||
|
|
||||||
|
- 修复了 EMQX Helm Chart 部署的一些问题 [#9509](https://github.com/emqx/emqx/pull/9509)。
|
||||||
|
- 修复了 EMQX Helm Chart 部署时出现 `Discovery error: no such service` 错误,导致集群节点发现异常。
|
||||||
|
- 修复了 EMQX Helm Chart 通过环境变量修改部分 EMQX 的配置项时的错误。
|
||||||
|
|
||||||
|
- 通过 `emqx_authenticator` 修复隐藏 `'client.authenticate'` 回调。 现在 `emqx_authenticator` 如果没有任何验证器匹配,则将执行传递给进一步的回调 [#9496](https://github.com/emqx/emqx/pull/9496)。
|
||||||
|
|
||||||
|
- 如果在调用 `/trace/:id/download?node={node}` 时,`node` 不存在,则会返回 `400` [#9478](https://github.com/emqx/emqx/pull/9478)。
|
||||||
|
|
||||||
|
- 当重复调用 `POST /traces` 时,将会返回 `409` ,而不再是 `400` [#9494](https://github.com/emqx/emqx/pull/9494)。
|
||||||
|
|
||||||
|
- 桥接功能修复,当同时配置了2个桥,方向为入桥和出桥时,出桥不工作的问题。[#9523](https://github.com/emqx/emqx/pull/9523).
|
||||||
|
|
||||||
|
- 修复了 EMQX Helm Chart 中当用户使用自定义的用户名和密码时,创建的 Secret 资源不正确问题 [#9536](https://github.com/emqx/emqx/pull/9536)。
|
||||||
|
|
|
@ -37,63 +37,64 @@ $ helm del my-emqx
|
||||||
|
|
||||||
The following table lists the configurable parameters of the emqx chart and their default values.
|
The following table lists the configurable parameters of the emqx chart and their default values.
|
||||||
|
|
||||||
| Parameter | Description | Default Value |
|
| Parameter | Description | Default Value |
|
||||||
|--------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------|
|
|--------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------|
|
||||||
| `replicaCount` | It is recommended to have odd number of nodes in a cluster, otherwise the emqx cluster cannot be automatically healed in case of net-split. | 3 |
|
| `replicaCount` | It is recommended to have odd number of nodes in a cluster, otherwise the emqx cluster cannot be automatically healed in case of net-split. | 3 |
|
||||||
| `image.repository` | EMQX Image name | emqx/emqx |
|
| `image.repository` | EMQX Image name | `emqx/emqx-enterprise` |
|
||||||
| `image.pullPolicy` | The image pull policy | IfNotPresent |
|
| `image.pullPolicy` | The image pull policy | IfNotPresent |
|
||||||
| `image.pullSecrets ` | The image pull secrets | `[]` (does not add image pull secrets to deployed pods) |
|
| `image.pullSecrets ` | The image pull secrets | `[]` (does not add image pull secrets to deployed pods) |
|
||||||
| `envFromSecret` | The name pull a secret in the same kubernetes namespace which contains values that will be added to the environment | nil |
|
| `envFromSecret` | The name pull a secret in the same kubernetes namespace which contains values that will be added to the environment | nil |
|
||||||
| `recreatePods` | Forces the recreation of pods during upgrades, which can be useful to always apply the most recent configuration. | false |
|
| `recreatePods` | Forces the recreation of pods during upgrades, which can be useful to always apply the most recent configuration. | false |
|
||||||
| `podAnnotations ` | Annotations for pod | `{}` |
|
| `podAnnotations ` | Annotations for pod | `{}` |
|
||||||
| `podManagementPolicy` | To redeploy a chart with existing PVC(s), the value must be set to Parallel to avoid deadlock | `Parallel` |
|
| `podManagementPolicy` | To redeploy a chart with existing PVC(s), the value must be set to Parallel to avoid deadlock | `Parallel` |
|
||||||
| `persistence.enabled` | Enable EMQX persistence using PVC | false |
|
| `persistence.enabled` | Enable EMQX persistence using PVC | false |
|
||||||
| `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) |
|
| `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) |
|
||||||
| `persistence.existingClaim` | EMQX data Persistent Volume existing claim name, evaluated as a template | "" |
|
| `persistence.existingClaim` | EMQX data Persistent Volume existing claim name, evaluated as a template | "" |
|
||||||
| `persistence.accessMode` | PVC Access Mode for EMQX volume | ReadWriteOnce |
|
| `persistence.accessMode` | PVC Access Mode for EMQX volume | ReadWriteOnce |
|
||||||
| `persistence.size` | PVC Storage Request for EMQX volume | 20Mi |
|
| `persistence.size` | PVC Storage Request for EMQX volume | 20Mi |
|
||||||
| `initContainers` | Containers that run before the creation of EMQX containers. They can contain utilities or setup scripts. | `{}` |
|
| `initContainers` | Containers that run before the creation of EMQX containers. They can contain utilities or setup scripts. | `{}` |
|
||||||
| `resources` | CPU/Memory resource requests/limits | {} |
|
| `resources` | CPU/Memory resource requests/limits | {} |
|
||||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
||||||
| `tolerations` | Toleration labels for pod assignment | `[]` |
|
| `tolerations` | Toleration labels for pod assignment | `[]` |
|
||||||
| `affinity` | Map of node/pod affinities | `{}` |
|
| `affinity` | Map of node/pod affinities | `{}` |
|
||||||
| `service.type` | Kubernetes Service type. | ClusterIP |
|
| `service.type` | Kubernetes Service type. | ClusterIP |
|
||||||
| `service.mqtt` | Port for MQTT. | 1883 |
|
| `service.mqtt` | Port for MQTT. | 1883 |
|
||||||
| `service.mqttssl` | Port for MQTT(SSL). | 8883 |
|
| `service.mqttssl` | Port for MQTT(SSL). | 8883 |
|
||||||
| `service.ws` | Port for WebSocket/HTTP. | 8083 |
|
| `service.ws` | Port for WebSocket/HTTP. | 8083 |
|
||||||
| `service.wss` | Port for WSS/HTTPS. | 8084 |
|
| `service.wss` | Port for WSS/HTTPS. | 8084 |
|
||||||
| `service.dashboard` | Port for dashboard and API. | 18083 |
|
| `service.dashboard` | Port for dashboard and API. | 18083 |
|
||||||
| `service.nodePorts.mqtt` | Kubernetes node port for MQTT. | nil |
|
| `service.nodePorts.mqtt` | Kubernetes node port for MQTT. | nil |
|
||||||
| `service.nodePorts.mqttssl` | Kubernetes node port for MQTT(SSL). | nil |
|
| `service.nodePorts.mqttssl` | Kubernetes node port for MQTT(SSL). | nil |
|
||||||
| `service.nodePorts.mgmt` | Kubernetes node port for mgmt API. | nil |
|
| `service.nodePorts.mgmt` | Kubernetes node port for mgmt API. | nil |
|
||||||
| `service.nodePorts.ws` | Kubernetes node port for WebSocket/HTTP. | nil |
|
| `service.nodePorts.ws` | Kubernetes node port for WebSocket/HTTP. | nil |
|
||||||
| `service.nodePorts.wss` | Kubernetes node port for WSS/HTTPS. | nil |
|
| `service.nodePorts.wss` | Kubernetes node port for WSS/HTTPS. | nil |
|
||||||
| `service.nodePorts.dashboard` | Kubernetes node port for dashboard. | nil |
|
| `service.nodePorts.dashboard` | Kubernetes node port for dashboard. | nil |
|
||||||
| `service.loadBalancerIP` | loadBalancerIP for Service | nil |
|
| `service.loadBalancerIP` | loadBalancerIP for Service | nil |
|
||||||
| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | [] |
|
| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | [] |
|
||||||
| `service.externalIPs` | ExternalIPs for the service | [] |
|
| `service.externalIPs` | ExternalIPs for the service | [] |
|
||||||
| `service.annotations` | Service annotations | {}(evaluated as a template) |
|
`service.externalTrafficPolicy` | External Traffic Policy for the service | `Cluster`
|
||||||
| `ingress.dashboard.enabled` | Enable ingress for EMQX Dashboard | false |
|
| `service.annotations` | Service annotations | {}(evaluated as a template) |
|
||||||
| `ingress.dashboard.ingressClassName` | Set the ingress class for EMQX Dashboard | |
|
| `ingress.dashboard.enabled` | Enable ingress for EMQX Dashboard | false |
|
||||||
| `ingress.dashboard.path` | Ingress path for EMQX Dashboard | / |
|
| `ingress.dashboard.ingressClassName` | Set the ingress class for EMQX Dashboard | |
|
||||||
| `ingress.dashboard.pathType` | Ingress pathType for EMQX Dashboard | `ImplementationSpecific` |
|
| `ingress.dashboard.path` | Ingress path for EMQX Dashboard | / |
|
||||||
| `ingress.dashboard.hosts` | Ingress hosts for EMQX Mgmt API | dashboard.emqx.local |
|
| `ingress.dashboard.pathType` | Ingress pathType for EMQX Dashboard | `ImplementationSpecific` |
|
||||||
| `ingress.dashboard.tls` | Ingress tls for EMQX Mgmt API | [] |
|
| `ingress.dashboard.hosts` | Ingress hosts for EMQX Mgmt API | dashboard.emqx.local |
|
||||||
| `ingress.dashboard.annotations` | Ingress annotations for EMQX Mgmt API | {} |
|
| `ingress.dashboard.tls` | Ingress tls for EMQX Mgmt API | [] |
|
||||||
| `ingress.mgmt.enabled` | Enable ingress for EMQX Mgmt API | false |
|
| `ingress.dashboard.annotations` | Ingress annotations for EMQX Mgmt API | {} |
|
||||||
| `ingress.dashboard.ingressClassName` | Set the ingress class for EMQX Mgmt API | |
|
| `ingress.mgmt.enabled` | Enable ingress for EMQX Mgmt API | false |
|
||||||
| `ingress.mgmt.path` | Ingress path for EMQX Mgmt API | / |
|
| `ingress.dashboard.ingressClassName` | Set the ingress class for EMQX Mgmt API | |
|
||||||
| `ingress.mgmt.hosts` | Ingress hosts for EMQX Mgmt API | api.emqx.local |
|
| `ingress.mgmt.path` | Ingress path for EMQX Mgmt API | / |
|
||||||
| `ingress.mgmt.tls` | Ingress tls for EMQX Mgmt API | [] |
|
| `ingress.mgmt.hosts` | Ingress hosts for EMQX Mgmt API | api.emqx.local |
|
||||||
| `ingress.mgmt.annotations` | Ingress annotations for EMQX Mgmt API | {} |
|
| `ingress.mgmt.tls` | Ingress tls for EMQX Mgmt API | [] |
|
||||||
| `metrics.enable` | If set to true, [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) needs to be installed, and emqx_prometheus needs to enable | false |
|
| `ingress.mgmt.annotations` | Ingress annotations for EMQX Mgmt API | {} |
|
||||||
| `metrics.type` | Now we only supported "prometheus" | "prometheus" |
|
| `metrics.enable` | If set to true, [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) needs to be installed, and emqx_prometheus needs to enable | false |
|
||||||
| `ssl.enabled` | Enable SSL support | false |
|
| `metrics.type` | Now we only supported "prometheus" | "prometheus" |
|
||||||
| `ssl.useExisting` | Use existing certificate or let cert-manager generate one | false |
|
| `ssl.enabled` | Enable SSL support | false |
|
||||||
| `ssl.existingName` | Name of existing certificate | emqx-tls |
|
| `ssl.useExisting` | Use existing certificate or let cert-manager generate one | false |
|
||||||
| `ssl.dnsnames` | DNS name(s) for certificate to be generated | {} |
|
| `ssl.existingName` | Name of existing certificate | emqx-tls |
|
||||||
| `ssl.issuer.name` | Issuer name for certificate generation | letsencrypt-dns |
|
| `ssl.dnsnames` | DNS name(s) for certificate to be generated | {} |
|
||||||
| `ssl.issuer.kind` | Issuer kind for certificate generation | ClusterIssuer |
|
| `ssl.issuer.name` | Issuer name for certificate generation | letsencrypt-dns |
|
||||||
|
| `ssl.issuer.kind` | Issuer kind for certificate generation | ClusterIssuer |
|
||||||
|
|
||||||
## EMQX specific settings
|
## EMQX specific settings
|
||||||
|
|
||||||
|
|
|
@ -10,10 +10,25 @@ metadata:
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
data:
|
data:
|
||||||
|
EMQX_NAME: {{ .Release.Name }}
|
||||||
|
{{- if eq (.Values.emqxConfig.EMQX_CLUSTER__DISCOVERY_STRATEGY) "k8s" }}
|
||||||
|
EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc:443"
|
||||||
|
EMQX_CLUSTER__K8S__SERVICE_NAME: {{ include "emqx.fullname" . }}-headless
|
||||||
|
EMQX_CLUSTER__K8S__NAMESPACE: {{ .Release.Namespace }}
|
||||||
|
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "hostname"
|
||||||
|
EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
|
||||||
|
{{- else if eq (.Values.emqxConfig.EMQX_CLUSTER__DISCOVERY_STRATEGY) "dns" }}
|
||||||
|
EMQX_CLUSTER__DNS__NAME: "{{ include "emqx.fullname" . }}-headless.{{ .Release.Namespace }}.svc.cluster.local"
|
||||||
|
EMQX_CLUSTER__DNS__RECORD_TYPE: "srv"
|
||||||
|
{{- end -}}
|
||||||
{{- range $index, $value := .Values.emqxConfig }}
|
{{- range $index, $value := .Values.emqxConfig }}
|
||||||
{{- if $value }}
|
{{- if $value }}
|
||||||
{{- $key := (regexReplaceAllLiteral "\\." (regexReplaceAllLiteral "EMQX[_\\.]" (upper (trimAll " " $index)) "") "__") }}
|
{{- $key := (regexReplaceAllLiteral "\\." (regexReplaceAllLiteral "EMQX[_\\.]" (upper (trimAll " " $index)) "") "__") }}
|
||||||
|
{{- if or (kindIs "map" $value) (kindIs "slice" $value) }}
|
||||||
|
{{ print "EMQX_" $key }}: {{ tpl (printf "%q" (toJson $value)) $ }}
|
||||||
|
{{- else }}
|
||||||
{{ print "EMQX_" $key }}: "{{ tpl (printf "%v" $value) $ }}"
|
{{ print "EMQX_" $key }}: "{{ tpl (printf "%v" $value) $ }}"
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
|
@ -6,14 +6,6 @@ metadata:
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
type: kubernetes.io/basic-auth
|
type: kubernetes.io/basic-auth
|
||||||
stringData:
|
stringData:
|
||||||
{{- if not (empty .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_USERNAME) }}
|
username: {{ .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_USERNAME | default "admin" }}
|
||||||
username: admin
|
password: {{ .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_PASSWORD | default "public" }}
|
||||||
{{- else }}
|
|
||||||
username: {{ .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_USERNAME }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if not (empty .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_PASSWORD) }}
|
|
||||||
password: public
|
|
||||||
{{- else }}
|
|
||||||
password: {{ .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_PASSWORD}}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
|
@ -14,6 +14,9 @@ metadata:
|
||||||
{{- end }}
|
{{- end }}
|
||||||
spec:
|
spec:
|
||||||
type: {{ .Values.service.type }}
|
type: {{ .Values.service.type }}
|
||||||
|
{{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }}
|
||||||
|
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | default "Cluster" }}
|
||||||
|
{{- end }}
|
||||||
{{- if eq .Values.service.type "LoadBalancer" }}
|
{{- if eq .Values.service.type "LoadBalancer" }}
|
||||||
{{- if .Values.service.loadBalancerIP }}
|
{{- if .Values.service.loadBalancerIP }}
|
||||||
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
|
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
|
||||||
|
|
|
@ -7,6 +7,8 @@ replicaCount: 3
|
||||||
image:
|
image:
|
||||||
repository: emqx/emqx-enterprise
|
repository: emqx/emqx-enterprise
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
|
# Overrides the image tag whose default is the chart appVersion.
|
||||||
|
tag: ""
|
||||||
## Optionally specify an array of imagePullSecrets.
|
## Optionally specify an array of imagePullSecrets.
|
||||||
## Secrets must be manually created in the namespace.
|
## Secrets must be manually created in the namespace.
|
||||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||||
|
@ -92,19 +94,6 @@ initContainers: {}
|
||||||
## EMQX configuration item, see the documentation (https://hub.docker.com/r/emqx/emqx)
|
## EMQX configuration item, see the documentation (https://hub.docker.com/r/emqx/emqx)
|
||||||
emqxConfig:
|
emqxConfig:
|
||||||
EMQX_CLUSTER__DISCOVERY_STRATEGY: "dns"
|
EMQX_CLUSTER__DISCOVERY_STRATEGY: "dns"
|
||||||
EMQX_CLUSTER__DNS__NAME: "{{ .Release.Name }}-headless.{{ .Release.Namespace }}.svc.cluster.local"
|
|
||||||
EMQX_CLUSTER__DNS__RECORD_TYPE: "srv"
|
|
||||||
# EMQX_CLUSTER__DISCOVERY_STRATEGY: "k8s"
|
|
||||||
# EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc:443"
|
|
||||||
# EMQX_CLUSTER__K8S__SERVICE_NAME: "{{ .Release.Name }}-headless"
|
|
||||||
# EMQX_CLUSTER__K8S__NAMESPACE: "{{ .Release.Namespace }}"
|
|
||||||
## The address type is used to extract host from k8s service.
|
|
||||||
## Value: ip | dns | hostname
|
|
||||||
## Note:Hostname is only supported after v4.0-rc.2
|
|
||||||
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "hostname"
|
|
||||||
EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
|
|
||||||
## if EMQX_CLUSTER__K8S__ADDRESS_TYPE eq dns
|
|
||||||
# EMQX_CLUSTER__K8S__SUFFIX: "pod.cluster.local"
|
|
||||||
EMQX_DASHBOARD__DEFAULT_USERNAME: "admin"
|
EMQX_DASHBOARD__DEFAULT_USERNAME: "admin"
|
||||||
EMQX_DASHBOARD__DEFAULT_PASSWORD: "public"
|
EMQX_DASHBOARD__DEFAULT_PASSWORD: "public"
|
||||||
|
|
||||||
|
@ -160,6 +149,12 @@ service:
|
||||||
## Set the ExternalIPs
|
## Set the ExternalIPs
|
||||||
##
|
##
|
||||||
externalIPs: []
|
externalIPs: []
|
||||||
|
## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints.
|
||||||
|
## There are two available options: Cluster (default) and Local.
|
||||||
|
## Cluster obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.
|
||||||
|
## Local preserves the client source IP and avoids a second hop for LoadBalancer and NodePort type Services, but risks potentially imbalanced traffic spreading.
|
||||||
|
##
|
||||||
|
externalTrafficPolicy: "Cluster"
|
||||||
## Provide any additional annotations which may be required. Evaluated as a template
|
## Provide any additional annotations which may be required. Evaluated as a template
|
||||||
##
|
##
|
||||||
annotations: {}
|
annotations: {}
|
||||||
|
|
|
@ -37,63 +37,64 @@ $ helm del my-emqx
|
||||||
|
|
||||||
The following table lists the configurable parameters of the emqx chart and their default values.
|
The following table lists the configurable parameters of the emqx chart and their default values.
|
||||||
|
|
||||||
| Parameter | Description | Default Value |
|
| Parameter | Description | Default Value |
|
||||||
|--------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------|
|
|--------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------|
|
||||||
| `replicaCount` | It is recommended to have odd number of nodes in a cluster, otherwise the emqx cluster cannot be automatically healed in case of net-split. | 3 |
|
| `replicaCount` | It is recommended to have odd number of nodes in a cluster, otherwise the emqx cluster cannot be automatically healed in case of net-split. | 3 |
|
||||||
| `image.repository` | EMQX Image name | emqx/emqx |
|
| `image.repository` | EMQX Image name | emqx/emqx |
|
||||||
| `image.pullPolicy` | The image pull policy | IfNotPresent |
|
| `image.pullPolicy` | The image pull policy | IfNotPresent |
|
||||||
| `image.pullSecrets ` | The image pull secrets | `[]` (does not add image pull secrets to deployed pods) |
|
| `image.pullSecrets ` | The image pull secrets | `[]` (does not add image pull secrets to deployed pods) |
|
||||||
| `envFromSecret` | The name pull a secret in the same kubernetes namespace which contains values that will be added to the environment | nil |
|
| `envFromSecret` | The name pull a secret in the same kubernetes namespace which contains values that will be added to the environment | nil |
|
||||||
| `recreatePods` | Forces the recreation of pods during upgrades, which can be useful to always apply the most recent configuration. | false |
|
| `recreatePods` | Forces the recreation of pods during upgrades, which can be useful to always apply the most recent configuration. | false |
|
||||||
| `podAnnotations ` | Annotations for pod | `{}` |
|
| `podAnnotations ` | Annotations for pod | `{}` |
|
||||||
| `podManagementPolicy` | To redeploy a chart with existing PVC(s), the value must be set to Parallel to avoid deadlock | `Parallel` |
|
| `podManagementPolicy` | To redeploy a chart with existing PVC(s), the value must be set to Parallel to avoid deadlock | `Parallel` |
|
||||||
| `persistence.enabled` | Enable EMQX persistence using PVC | false |
|
| `persistence.enabled` | Enable EMQX persistence using PVC | false |
|
||||||
| `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) |
|
| `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) |
|
||||||
| `persistence.existingClaim` | EMQX data Persistent Volume existing claim name, evaluated as a template | "" |
|
| `persistence.existingClaim` | EMQX data Persistent Volume existing claim name, evaluated as a template | "" |
|
||||||
| `persistence.accessMode` | PVC Access Mode for EMQX volume | ReadWriteOnce |
|
| `persistence.accessMode` | PVC Access Mode for EMQX volume | ReadWriteOnce |
|
||||||
| `persistence.size` | PVC Storage Request for EMQX volume | 20Mi |
|
| `persistence.size` | PVC Storage Request for EMQX volume | 20Mi |
|
||||||
| `initContainers` | Containers that run before the creation of EMQX containers. They can contain utilities or setup scripts. | `{}` |
|
| `initContainers` | Containers that run before the creation of EMQX containers. They can contain utilities or setup scripts. | `{}` |
|
||||||
| `resources` | CPU/Memory resource requests/limits | {} |
|
| `resources` | CPU/Memory resource requests/limits | {} |
|
||||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
||||||
| `tolerations` | Toleration labels for pod assignment | `[]` |
|
| `tolerations` | Toleration labels for pod assignment | `[]` |
|
||||||
| `affinity` | Map of node/pod affinities | `{}` |
|
| `affinity` | Map of node/pod affinities | `{}` |
|
||||||
| `service.type` | Kubernetes Service type. | ClusterIP |
|
| `service.type` | Kubernetes Service type. | ClusterIP |
|
||||||
| `service.mqtt` | Port for MQTT. | 1883 |
|
| `service.mqtt` | Port for MQTT. | 1883 |
|
||||||
| `service.mqttssl` | Port for MQTT(SSL). | 8883 |
|
| `service.mqttssl` | Port for MQTT(SSL). | 8883 |
|
||||||
| `service.ws` | Port for WebSocket/HTTP. | 8083 |
|
| `service.ws` | Port for WebSocket/HTTP. | 8083 |
|
||||||
| `service.wss` | Port for WSS/HTTPS. | 8084 |
|
| `service.wss` | Port for WSS/HTTPS. | 8084 |
|
||||||
| `service.dashboard` | Port for dashboard and API. | 18083 |
|
| `service.dashboard` | Port for dashboard and API. | 18083 |
|
||||||
| `service.nodePorts.mqtt` | Kubernetes node port for MQTT. | nil |
|
| `service.nodePorts.mqtt` | Kubernetes node port for MQTT. | nil |
|
||||||
| `service.nodePorts.mqttssl` | Kubernetes node port for MQTT(SSL). | nil |
|
| `service.nodePorts.mqttssl` | Kubernetes node port for MQTT(SSL). | nil |
|
||||||
| `service.nodePorts.mgmt` | Kubernetes node port for mgmt API. | nil |
|
| `service.nodePorts.mgmt` | Kubernetes node port for mgmt API. | nil |
|
||||||
| `service.nodePorts.ws` | Kubernetes node port for WebSocket/HTTP. | nil |
|
| `service.nodePorts.ws` | Kubernetes node port for WebSocket/HTTP. | nil |
|
||||||
| `service.nodePorts.wss` | Kubernetes node port for WSS/HTTPS. | nil |
|
| `service.nodePorts.wss` | Kubernetes node port for WSS/HTTPS. | nil |
|
||||||
| `service.nodePorts.dashboard` | Kubernetes node port for dashboard. | nil |
|
| `service.nodePorts.dashboard` | Kubernetes node port for dashboard. | nil |
|
||||||
| `service.loadBalancerIP` | loadBalancerIP for Service | nil |
|
| `service.loadBalancerIP` | loadBalancerIP for Service | nil |
|
||||||
| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | [] |
|
| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | [] |
|
||||||
| `service.externalIPs` | ExternalIPs for the service | [] |
|
| `service.externalIPs` | ExternalIPs for the service | [] |
|
||||||
| `service.annotations` | Service annotations | {}(evaluated as a template) |
|
`service.externalTrafficPolicy` | External Traffic Policy for the service | `Cluster`
|
||||||
| `ingress.dashboard.enabled` | Enable ingress for EMQX Dashboard | false |
|
| `service.annotations` | Service annotations | {}(evaluated as a template) |
|
||||||
| `ingress.dashboard.ingressClassName` | Set the ingress class for EMQX Dashboard | |
|
| `ingress.dashboard.enabled` | Enable ingress for EMQX Dashboard | false |
|
||||||
| `ingress.dashboard.path` | Ingress path for EMQX Dashboard | / |
|
| `ingress.dashboard.ingressClassName` | Set the ingress class for EMQX Dashboard | |
|
||||||
| `ingress.dashboard.pathType` | Ingress pathType for EMQX Dashboard | `ImplementationSpecific` |
|
| `ingress.dashboard.path` | Ingress path for EMQX Dashboard | / |
|
||||||
| `ingress.dashboard.hosts` | Ingress hosts for EMQX Mgmt API | dashboard.emqx.local |
|
| `ingress.dashboard.pathType` | Ingress pathType for EMQX Dashboard | `ImplementationSpecific` |
|
||||||
| `ingress.dashboard.tls` | Ingress tls for EMQX Mgmt API | [] |
|
| `ingress.dashboard.hosts` | Ingress hosts for EMQX Mgmt API | dashboard.emqx.local |
|
||||||
| `ingress.dashboard.annotations` | Ingress annotations for EMQX Mgmt API | {} |
|
| `ingress.dashboard.tls` | Ingress tls for EMQX Mgmt API | [] |
|
||||||
| `ingress.mgmt.enabled` | Enable ingress for EMQX Mgmt API | false |
|
| `ingress.dashboard.annotations` | Ingress annotations for EMQX Mgmt API | {} |
|
||||||
| `ingress.dashboard.ingressClassName` | Set the ingress class for EMQX Mgmt API | |
|
| `ingress.mgmt.enabled` | Enable ingress for EMQX Mgmt API | false |
|
||||||
| `ingress.mgmt.path` | Ingress path for EMQX Mgmt API | / |
|
| `ingress.dashboard.ingressClassName` | Set the ingress class for EMQX Mgmt API | |
|
||||||
| `ingress.mgmt.hosts` | Ingress hosts for EMQX Mgmt API | api.emqx.local |
|
| `ingress.mgmt.path` | Ingress path for EMQX Mgmt API | / |
|
||||||
| `ingress.mgmt.tls` | Ingress tls for EMQX Mgmt API | [] |
|
| `ingress.mgmt.hosts` | Ingress hosts for EMQX Mgmt API | api.emqx.local |
|
||||||
| `ingress.mgmt.annotations` | Ingress annotations for EMQX Mgmt API | {} |
|
| `ingress.mgmt.tls` | Ingress tls for EMQX Mgmt API | [] |
|
||||||
| `metrics.enable` | If set to true, [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) needs to be installed, and emqx_prometheus needs to enable | false |
|
| `ingress.mgmt.annotations` | Ingress annotations for EMQX Mgmt API | {} |
|
||||||
| `metrics.type` | Now we only supported "prometheus" | "prometheus" |
|
| `metrics.enable` | If set to true, [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) needs to be installed, and emqx_prometheus needs to enable | false |
|
||||||
| `ssl.enabled` | Enable SSL support | false |
|
| `metrics.type` | Now we only supported "prometheus" | "prometheus" |
|
||||||
| `ssl.useExisting` | Use existing certificate or let cert-manager generate one | false |
|
| `ssl.enabled` | Enable SSL support | false |
|
||||||
| `ssl.existingName` | Name of existing certificate | emqx-tls |
|
| `ssl.useExisting` | Use existing certificate or let cert-manager generate one | false |
|
||||||
| `ssl.dnsnames` | DNS name(s) for certificate to be generated | {} |
|
| `ssl.existingName` | Name of existing certificate | emqx-tls |
|
||||||
| `ssl.issuer.name` | Issuer name for certificate generation | letsencrypt-dns |
|
| `ssl.dnsnames` | DNS name(s) for certificate to be generated | {} |
|
||||||
| `ssl.issuer.kind` | Issuer kind for certificate generation | ClusterIssuer |
|
| `ssl.issuer.name` | Issuer name for certificate generation | letsencrypt-dns |
|
||||||
|
| `ssl.issuer.kind` | Issuer kind for certificate generation | ClusterIssuer |
|
||||||
|
|
||||||
## EMQX specific settings
|
## EMQX specific settings
|
||||||
|
|
||||||
|
|
|
@ -10,10 +10,25 @@ metadata:
|
||||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||||
data:
|
data:
|
||||||
|
EMQX_NAME: {{ .Release.Name }}
|
||||||
|
{{- if eq (.Values.emqxConfig.EMQX_CLUSTER__DISCOVERY_STRATEGY) "k8s" }}
|
||||||
|
EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc:443"
|
||||||
|
EMQX_CLUSTER__K8S__SERVICE_NAME: {{ include "emqx.fullname" . }}-headless
|
||||||
|
EMQX_CLUSTER__K8S__NAMESPACE: {{ .Release.Namespace }}
|
||||||
|
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "hostname"
|
||||||
|
EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
|
||||||
|
{{- else if eq (.Values.emqxConfig.EMQX_CLUSTER__DISCOVERY_STRATEGY) "dns" }}
|
||||||
|
EMQX_CLUSTER__DNS__NAME: "{{ include "emqx.fullname" . }}-headless.{{ .Release.Namespace }}.svc.cluster.local"
|
||||||
|
EMQX_CLUSTER__DNS__RECORD_TYPE: "srv"
|
||||||
|
{{- end -}}
|
||||||
{{- range $index, $value := .Values.emqxConfig }}
|
{{- range $index, $value := .Values.emqxConfig }}
|
||||||
{{- if $value }}
|
{{- if $value }}
|
||||||
{{- $key := (regexReplaceAllLiteral "\\." (regexReplaceAllLiteral "EMQX[_\\.]" (upper (trimAll " " $index)) "") "__") }}
|
{{- $key := (regexReplaceAllLiteral "\\." (regexReplaceAllLiteral "EMQX[_\\.]" (upper (trimAll " " $index)) "") "__") }}
|
||||||
|
{{- if or (kindIs "map" $value) (kindIs "slice" $value) }}
|
||||||
|
{{ print "EMQX_" $key }}: {{ tpl (printf "%q" (toJson $value)) $ }}
|
||||||
|
{{- else }}
|
||||||
{{ print "EMQX_" $key }}: "{{ tpl (printf "%v" $value) $ }}"
|
{{ print "EMQX_" $key }}: "{{ tpl (printf "%v" $value) $ }}"
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
|
@ -6,14 +6,6 @@ metadata:
|
||||||
namespace: {{ .Release.Namespace }}
|
namespace: {{ .Release.Namespace }}
|
||||||
type: kubernetes.io/basic-auth
|
type: kubernetes.io/basic-auth
|
||||||
stringData:
|
stringData:
|
||||||
{{- if not (empty .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_USERNAME) }}
|
username: {{ .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_USERNAME | default "admin" }}
|
||||||
username: admin
|
password: {{ .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_PASSWORD | default "public" }}
|
||||||
{{- else }}
|
|
||||||
username: {{ .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_USERNAME }}
|
|
||||||
{{- end }}
|
|
||||||
{{- if not (empty .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_PASSWORD) }}
|
|
||||||
password: public
|
|
||||||
{{- else }}
|
|
||||||
password: {{ .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_PASSWORD}}
|
|
||||||
{{- end }}
|
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
|
@ -14,6 +14,9 @@ metadata:
|
||||||
{{- end }}
|
{{- end }}
|
||||||
spec:
|
spec:
|
||||||
type: {{ .Values.service.type }}
|
type: {{ .Values.service.type }}
|
||||||
|
{{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }}
|
||||||
|
externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | default "Cluster" }}
|
||||||
|
{{- end }}
|
||||||
{{- if eq .Values.service.type "LoadBalancer" }}
|
{{- if eq .Values.service.type "LoadBalancer" }}
|
||||||
{{- if .Values.service.loadBalancerIP }}
|
{{- if .Values.service.loadBalancerIP }}
|
||||||
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
|
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
|
||||||
|
|
|
@ -94,19 +94,6 @@ initContainers: {}
|
||||||
## EMQX configuration item, see the documentation (https://hub.docker.com/r/emqx/emqx)
|
## EMQX configuration item, see the documentation (https://hub.docker.com/r/emqx/emqx)
|
||||||
emqxConfig:
|
emqxConfig:
|
||||||
EMQX_CLUSTER__DISCOVERY_STRATEGY: "dns"
|
EMQX_CLUSTER__DISCOVERY_STRATEGY: "dns"
|
||||||
EMQX_CLUSTER__DNS__NAME: "{{ .Release.Name }}-headless.{{ .Release.Namespace }}.svc.cluster.local"
|
|
||||||
EMQX_CLUSTER__DNS__RECORD_TYPE: "srv"
|
|
||||||
# EMQX_CLUSTER__DISCOVERY_STRATEGY: "k8s"
|
|
||||||
# EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc:443"
|
|
||||||
# EMQX_CLUSTER__K8S__SERVICE_NAME: "{{ .Release.Name }}-headless"
|
|
||||||
# EMQX_CLUSTER__K8S__NAMESPACE: "{{ .Release.Namespace }}"
|
|
||||||
## The address type is used to extract host from k8s service.
|
|
||||||
## Value: ip | dns | hostname
|
|
||||||
## Note:Hostname is only supported after v4.0-rc.2
|
|
||||||
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "hostname"
|
|
||||||
EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
|
|
||||||
## if EMQX_CLUSTER__K8S__ADDRESS_TYPE eq dns
|
|
||||||
# EMQX_CLUSTER__K8S__SUFFIX: "pod.cluster.local"
|
|
||||||
EMQX_DASHBOARD__DEFAULT_USERNAME: "admin"
|
EMQX_DASHBOARD__DEFAULT_USERNAME: "admin"
|
||||||
EMQX_DASHBOARD__DEFAULT_PASSWORD: "public"
|
EMQX_DASHBOARD__DEFAULT_PASSWORD: "public"
|
||||||
|
|
||||||
|
@ -162,6 +149,12 @@ service:
|
||||||
## Set the ExternalIPs
|
## Set the ExternalIPs
|
||||||
##
|
##
|
||||||
externalIPs: []
|
externalIPs: []
|
||||||
|
## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints.
|
||||||
|
## There are two available options: Cluster (default) and Local.
|
||||||
|
## Cluster obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.
|
||||||
|
## Local preserves the client source IP and avoids a second hop for LoadBalancer and NodePort type Services, but risks potentially imbalanced traffic spreading.
|
||||||
|
##
|
||||||
|
externalTrafficPolicy: "Cluster"
|
||||||
## Provide any additional annotations which may be required. Evaluated as a template
|
## Provide any additional annotations which may be required. Evaluated as a template
|
||||||
##
|
##
|
||||||
annotations: {}
|
annotations: {}
|
||||||
|
|
|
@ -4,3 +4,5 @@ kafka
|
||||||
mongo
|
mongo
|
||||||
mongo_rs_sharded
|
mongo_rs_sharded
|
||||||
mysql
|
mysql
|
||||||
|
redis
|
||||||
|
redis_cluster
|
||||||
|
|
|
@ -0,0 +1,73 @@
|
||||||
|
emqx_ee_bridge_redis {
|
||||||
|
local_topic {
|
||||||
|
desc {
|
||||||
|
en: """The MQTT topic filter to be forwarded to Redis. All MQTT 'PUBLISH' messages with the topic
|
||||||
|
matching the local_topic will be forwarded.</br>
|
||||||
|
NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is
|
||||||
|
configured, then both the data got from the rule and the MQTT messages that match local_topic
|
||||||
|
will be forwarded.
|
||||||
|
"""
|
||||||
|
zh: """发送到 'local_topic' 的消息都会转发到 Redis。 </br>
|
||||||
|
注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发到 Redis。
|
||||||
|
"""
|
||||||
|
}
|
||||||
|
label {
|
||||||
|
en: "Local Topic"
|
||||||
|
zh: "本地 Topic"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
command_template {
|
||||||
|
desc {
|
||||||
|
en: """Redis Command Template"""
|
||||||
|
zh: """Redis Command 模板"""
|
||||||
|
}
|
||||||
|
label {
|
||||||
|
en: "Redis Command Template"
|
||||||
|
zh: "Redis Command 模板"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
config_enable {
|
||||||
|
desc {
|
||||||
|
en: """Enable or disable this bridge"""
|
||||||
|
zh: """启用/禁用桥接"""
|
||||||
|
}
|
||||||
|
label {
|
||||||
|
en: "Enable Or Disable Bridge"
|
||||||
|
zh: "启用/禁用桥接"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
desc_config {
|
||||||
|
desc {
|
||||||
|
en: """Configuration for a Redis bridge."""
|
||||||
|
zh: """Resis 桥接配置"""
|
||||||
|
}
|
||||||
|
label: {
|
||||||
|
en: "Redis Bridge Configuration"
|
||||||
|
zh: "Redis 桥接配置"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
desc_type {
|
||||||
|
desc {
|
||||||
|
en: """The Bridge Type"""
|
||||||
|
zh: """Bridge 类型"""
|
||||||
|
}
|
||||||
|
label {
|
||||||
|
en: "Bridge Type"
|
||||||
|
zh: "桥接类型"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
desc_name {
|
||||||
|
desc {
|
||||||
|
en: """Bridge name, used as a human-readable description of the bridge."""
|
||||||
|
zh: """桥接名字,可读描述"""
|
||||||
|
}
|
||||||
|
label {
|
||||||
|
en: "Bridge Name"
|
||||||
|
zh: "桥接名字"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -22,7 +22,10 @@ api_schemas(Method) ->
|
||||||
ref(emqx_ee_bridge_mongodb, Method ++ "_single"),
|
ref(emqx_ee_bridge_mongodb, Method ++ "_single"),
|
||||||
ref(emqx_ee_bridge_hstreamdb, Method),
|
ref(emqx_ee_bridge_hstreamdb, Method),
|
||||||
ref(emqx_ee_bridge_influxdb, Method ++ "_api_v1"),
|
ref(emqx_ee_bridge_influxdb, Method ++ "_api_v1"),
|
||||||
ref(emqx_ee_bridge_influxdb, Method ++ "_api_v2")
|
ref(emqx_ee_bridge_influxdb, Method ++ "_api_v2"),
|
||||||
|
ref(emqx_ee_bridge_redis, Method ++ "_single"),
|
||||||
|
ref(emqx_ee_bridge_redis, Method ++ "_sentinel"),
|
||||||
|
ref(emqx_ee_bridge_redis, Method ++ "_cluster")
|
||||||
].
|
].
|
||||||
|
|
||||||
schema_modules() ->
|
schema_modules() ->
|
||||||
|
@ -32,7 +35,8 @@ schema_modules() ->
|
||||||
emqx_ee_bridge_gcp_pubsub,
|
emqx_ee_bridge_gcp_pubsub,
|
||||||
emqx_ee_bridge_influxdb,
|
emqx_ee_bridge_influxdb,
|
||||||
emqx_ee_bridge_mongodb,
|
emqx_ee_bridge_mongodb,
|
||||||
emqx_ee_bridge_mysql
|
emqx_ee_bridge_mysql,
|
||||||
|
emqx_ee_bridge_redis
|
||||||
].
|
].
|
||||||
|
|
||||||
examples(Method) ->
|
examples(Method) ->
|
||||||
|
@ -56,7 +60,10 @@ resource_type(mongodb_sharded) -> emqx_connector_mongo;
|
||||||
resource_type(mongodb_single) -> emqx_connector_mongo;
|
resource_type(mongodb_single) -> emqx_connector_mongo;
|
||||||
resource_type(mysql) -> emqx_connector_mysql;
|
resource_type(mysql) -> emqx_connector_mysql;
|
||||||
resource_type(influxdb_api_v1) -> emqx_ee_connector_influxdb;
|
resource_type(influxdb_api_v1) -> emqx_ee_connector_influxdb;
|
||||||
resource_type(influxdb_api_v2) -> emqx_ee_connector_influxdb.
|
resource_type(influxdb_api_v2) -> emqx_ee_connector_influxdb;
|
||||||
|
resource_type(redis_single) -> emqx_ee_connector_redis;
|
||||||
|
resource_type(redis_sentinel) -> emqx_ee_connector_redis;
|
||||||
|
resource_type(redis_cluster) -> emqx_ee_connector_redis.
|
||||||
|
|
||||||
fields(bridges) ->
|
fields(bridges) ->
|
||||||
[
|
[
|
||||||
|
@ -92,7 +99,7 @@ fields(bridges) ->
|
||||||
required => false
|
required => false
|
||||||
}
|
}
|
||||||
)}
|
)}
|
||||||
] ++ mongodb_structs() ++ influxdb_structs().
|
] ++ mongodb_structs() ++ influxdb_structs() ++ redis_structs().
|
||||||
|
|
||||||
mongodb_structs() ->
|
mongodb_structs() ->
|
||||||
[
|
[
|
||||||
|
@ -122,3 +129,20 @@ influxdb_structs() ->
|
||||||
influxdb_api_v2
|
influxdb_api_v2
|
||||||
]
|
]
|
||||||
].
|
].
|
||||||
|
|
||||||
|
redis_structs() ->
|
||||||
|
[
|
||||||
|
{Type,
|
||||||
|
mk(
|
||||||
|
hoconsc:map(name, ref(emqx_ee_bridge_redis, Type)),
|
||||||
|
#{
|
||||||
|
desc => <<"Redis Bridge Config">>,
|
||||||
|
required => false
|
||||||
|
}
|
||||||
|
)}
|
||||||
|
|| Type <- [
|
||||||
|
redis_single,
|
||||||
|
redis_sentinel,
|
||||||
|
redis_cluster
|
||||||
|
]
|
||||||
|
].
|
||||||
|
|
|
@ -0,0 +1,193 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
-module(emqx_ee_bridge_redis).
|
||||||
|
|
||||||
|
-include_lib("emqx_bridge/include/emqx_bridge.hrl").
|
||||||
|
-include_lib("typerefl/include/types.hrl").
|
||||||
|
-include_lib("hocon/include/hoconsc.hrl").
|
||||||
|
|
||||||
|
-import(hoconsc, [mk/2, enum/1, ref/2]).
|
||||||
|
|
||||||
|
-export([
|
||||||
|
conn_bridge_examples/1
|
||||||
|
]).
|
||||||
|
|
||||||
|
-export([
|
||||||
|
namespace/0,
|
||||||
|
roots/0,
|
||||||
|
fields/1,
|
||||||
|
desc/1
|
||||||
|
]).
|
||||||
|
|
||||||
|
%% -------------------------------------------------------------------------------------------------
|
||||||
|
%% api
|
||||||
|
|
||||||
|
conn_bridge_examples(Method) ->
|
||||||
|
[
|
||||||
|
#{
|
||||||
|
<<"redis_single">> => #{
|
||||||
|
summary => <<"Redis Single Node Bridge">>,
|
||||||
|
value => values("single", Method)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
#{
|
||||||
|
<<"redis_sentinel">> => #{
|
||||||
|
summary => <<"Redis Sentinel Bridge">>,
|
||||||
|
value => values("sentinel", Method)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
#{
|
||||||
|
<<"redis_cluster">> => #{
|
||||||
|
summary => <<"Redis Cluster Bridge">>,
|
||||||
|
value => values("cluster", Method)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
].
|
||||||
|
|
||||||
|
values(Protocol, get) ->
|
||||||
|
maps:merge(values(Protocol, post), ?METRICS_EXAMPLE);
|
||||||
|
values("single", post) ->
|
||||||
|
SpecificOpts = #{
|
||||||
|
server => <<"127.0.0.1:6379">>,
|
||||||
|
database => 1
|
||||||
|
},
|
||||||
|
values(common, "single", SpecificOpts);
|
||||||
|
values("sentinel", post) ->
|
||||||
|
SpecificOpts = #{
|
||||||
|
servers => [<<"127.0.0.1:26379">>],
|
||||||
|
sentinel => <<"mymaster">>,
|
||||||
|
database => 1
|
||||||
|
},
|
||||||
|
values(common, "sentinel", SpecificOpts);
|
||||||
|
values("cluster", post) ->
|
||||||
|
SpecificOpts = #{
|
||||||
|
servers => [<<"127.0.0.1:6379">>]
|
||||||
|
},
|
||||||
|
values(common, "cluster", SpecificOpts);
|
||||||
|
values(Protocol, put) ->
|
||||||
|
maps:without([type, name], values(Protocol, post)).
|
||||||
|
|
||||||
|
values(common, RedisType, SpecificOpts) ->
|
||||||
|
Config = #{
|
||||||
|
type => list_to_atom("redis_" ++ RedisType),
|
||||||
|
name => <<"redis_bridge">>,
|
||||||
|
enable => true,
|
||||||
|
local_topic => <<"local/topic/#">>,
|
||||||
|
pool_size => 8,
|
||||||
|
password => <<"secret">>,
|
||||||
|
auto_reconnect => true,
|
||||||
|
command_template => [<<"LPUSH">>, <<"MSGS">>, <<"${payload}">>],
|
||||||
|
resource_opts => #{
|
||||||
|
enable_batch => false,
|
||||||
|
batch_size => 100,
|
||||||
|
batch_time => <<"20ms">>
|
||||||
|
},
|
||||||
|
ssl => #{enable => false}
|
||||||
|
},
|
||||||
|
maps:merge(Config, SpecificOpts).
|
||||||
|
|
||||||
|
%% -------------------------------------------------------------------------------------------------
|
||||||
|
%% Hocon Schema Definitions
|
||||||
|
namespace() -> "bridge_redis".
|
||||||
|
|
||||||
|
roots() -> [].
|
||||||
|
|
||||||
|
fields("post_single") ->
|
||||||
|
method_fileds(post, redis_single);
|
||||||
|
fields("post_sentinel") ->
|
||||||
|
method_fileds(post, redis_sentinel);
|
||||||
|
fields("post_cluster") ->
|
||||||
|
method_fileds(post, redis_cluster);
|
||||||
|
fields("put_single") ->
|
||||||
|
method_fileds(put, redis_single);
|
||||||
|
fields("put_sentinel") ->
|
||||||
|
method_fileds(put, redis_sentinel);
|
||||||
|
fields("put_cluster") ->
|
||||||
|
method_fileds(put, redis_cluster);
|
||||||
|
fields("get_single") ->
|
||||||
|
method_fileds(get, redis_single);
|
||||||
|
fields("get_sentinel") ->
|
||||||
|
method_fileds(get, redis_sentinel);
|
||||||
|
fields("get_cluster") ->
|
||||||
|
method_fileds(get, redis_cluster);
|
||||||
|
fields(Type) when
|
||||||
|
Type == redis_single orelse Type == redis_sentinel orelse Type == redis_cluster
|
||||||
|
->
|
||||||
|
redis_bridge_common_fields() ++
|
||||||
|
connector_fields(Type).
|
||||||
|
|
||||||
|
method_fileds(post, ConnectorType) ->
|
||||||
|
redis_bridge_common_fields() ++
|
||||||
|
connector_fields(ConnectorType) ++
|
||||||
|
type_name_fields(ConnectorType);
|
||||||
|
method_fileds(get, ConnectorType) ->
|
||||||
|
redis_bridge_common_fields() ++
|
||||||
|
connector_fields(ConnectorType) ++
|
||||||
|
type_name_fields(ConnectorType) ++
|
||||||
|
emqx_bridge_schema:metrics_status_fields();
|
||||||
|
method_fileds(put, ConnectorType) ->
|
||||||
|
redis_bridge_common_fields() ++
|
||||||
|
connector_fields(ConnectorType).
|
||||||
|
|
||||||
|
redis_bridge_common_fields() ->
|
||||||
|
emqx_bridge_schema:common_bridge_fields() ++
|
||||||
|
[
|
||||||
|
{local_topic, mk(binary(), #{desc => ?DESC("local_topic")})},
|
||||||
|
{command_template, fun command_template/1}
|
||||||
|
] ++
|
||||||
|
emqx_resource_schema:fields("resource_opts").
|
||||||
|
|
||||||
|
connector_fields(Type) ->
|
||||||
|
RedisType = bridge_type_to_redis_conn_type(Type),
|
||||||
|
emqx_connector_redis:fields(RedisType).
|
||||||
|
|
||||||
|
bridge_type_to_redis_conn_type(redis_single) ->
|
||||||
|
single;
|
||||||
|
bridge_type_to_redis_conn_type(redis_sentinel) ->
|
||||||
|
sentinel;
|
||||||
|
bridge_type_to_redis_conn_type(redis_cluster) ->
|
||||||
|
cluster.
|
||||||
|
|
||||||
|
type_name_fields(Type) ->
|
||||||
|
[
|
||||||
|
{type, mk(Type, #{required => true, desc => ?DESC("desc_type")})},
|
||||||
|
{name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}
|
||||||
|
].
|
||||||
|
|
||||||
|
desc("config") ->
|
||||||
|
?DESC("desc_config");
|
||||||
|
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->
|
||||||
|
["Configuration for Redis using `", string:to_upper(Method), "` method."];
|
||||||
|
desc(redis_single) ->
|
||||||
|
?DESC(emqx_connector_redis, "single");
|
||||||
|
desc(redis_sentinel) ->
|
||||||
|
?DESC(emqx_connector_redis, "sentinel");
|
||||||
|
desc(redis_cluster) ->
|
||||||
|
?DESC(emqx_connector_redis, "cluster");
|
||||||
|
desc(_) ->
|
||||||
|
undefined.
|
||||||
|
|
||||||
|
command_template(type) ->
|
||||||
|
list(binary());
|
||||||
|
command_template(required) ->
|
||||||
|
true;
|
||||||
|
command_template(validator) ->
|
||||||
|
fun is_command_template_valid/1;
|
||||||
|
command_template(desc) ->
|
||||||
|
?DESC("command_template");
|
||||||
|
command_template(_) ->
|
||||||
|
undefined.
|
||||||
|
|
||||||
|
is_command_template_valid(CommandSegments) ->
|
||||||
|
case
|
||||||
|
is_list(CommandSegments) andalso length(CommandSegments) > 0 andalso
|
||||||
|
lists:all(fun is_binary/1, CommandSegments)
|
||||||
|
of
|
||||||
|
true ->
|
||||||
|
ok;
|
||||||
|
false ->
|
||||||
|
{error,
|
||||||
|
"the value of the field 'command_template' should be a nonempty "
|
||||||
|
"list of strings (templates for Redis command and arguments)"}
|
||||||
|
end.
|
|
@ -0,0 +1,493 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
-module(emqx_ee_bridge_redis_SUITE).
|
||||||
|
|
||||||
|
-compile(nowarn_export_all).
|
||||||
|
-compile(export_all).
|
||||||
|
|
||||||
|
-include_lib("eunit/include/eunit.hrl").
|
||||||
|
-include_lib("common_test/include/ct.hrl").
|
||||||
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
|
|
||||||
|
-include_lib("emqx_bridge/include/emqx_bridge.hrl").
|
||||||
|
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
%% CT boilerplate
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
-define(REDIS_TOXYPROXY_CONNECT_CONFIG, #{
|
||||||
|
<<"server">> => <<"toxiproxy:6379">>
|
||||||
|
}).
|
||||||
|
|
||||||
|
-define(COMMON_REDIS_OPTS, #{
|
||||||
|
<<"password">> => <<"public">>,
|
||||||
|
<<"command_template">> => [<<"RPUSH">>, <<"MSGS">>, <<"${payload}">>],
|
||||||
|
<<"local_topic">> => <<"local_topic/#">>
|
||||||
|
}).
|
||||||
|
|
||||||
|
-define(BATCH_SIZE, 5).
|
||||||
|
|
||||||
|
-define(PROXY_HOST, "toxiproxy").
|
||||||
|
-define(PROXY_PORT, "8474").
|
||||||
|
|
||||||
|
all() -> [{group, redis_types}, {group, rest}].
|
||||||
|
|
||||||
|
groups() ->
|
||||||
|
ResourceSpecificTCs = [t_create_delete_bridge],
|
||||||
|
TCs = emqx_common_test_helpers:all(?MODULE) -- ResourceSpecificTCs,
|
||||||
|
TypeGroups = [
|
||||||
|
{group, redis_single},
|
||||||
|
{group, redis_sentinel},
|
||||||
|
{group, redis_cluster}
|
||||||
|
],
|
||||||
|
BatchGroups = [
|
||||||
|
{group, batch_on},
|
||||||
|
{group, batch_off}
|
||||||
|
],
|
||||||
|
[
|
||||||
|
{rest, TCs},
|
||||||
|
{redis_types, [
|
||||||
|
{group, tcp},
|
||||||
|
{group, tls}
|
||||||
|
]},
|
||||||
|
{tcp, TypeGroups},
|
||||||
|
{tls, TypeGroups},
|
||||||
|
{redis_single, BatchGroups},
|
||||||
|
{redis_sentinel, BatchGroups},
|
||||||
|
{redis_cluster, BatchGroups},
|
||||||
|
{batch_on, ResourceSpecificTCs},
|
||||||
|
{batch_off, ResourceSpecificTCs}
|
||||||
|
].
|
||||||
|
|
||||||
|
init_per_group(Group, Config) when
|
||||||
|
Group =:= redis_single; Group =:= redis_sentinel; Group =:= redis_cluster
|
||||||
|
->
|
||||||
|
[{redis_type, Group} | Config];
|
||||||
|
init_per_group(Group, Config) when
|
||||||
|
Group =:= tcp; Group =:= tls
|
||||||
|
->
|
||||||
|
[{transport, Group} | Config];
|
||||||
|
init_per_group(Group, Config) when
|
||||||
|
Group =:= batch_on; Group =:= batch_off
|
||||||
|
->
|
||||||
|
[{batch_mode, Group} | Config];
|
||||||
|
init_per_group(_Group, Config) ->
|
||||||
|
Config.
|
||||||
|
|
||||||
|
end_per_group(_Group, _Config) ->
|
||||||
|
ok.
|
||||||
|
|
||||||
|
init_per_suite(Config) ->
|
||||||
|
TestHosts = all_test_hosts(),
|
||||||
|
case emqx_common_test_helpers:is_all_tcp_servers_available(TestHosts) of
|
||||||
|
true ->
|
||||||
|
ProxyHost = os:getenv("PROXY_HOST", ?PROXY_HOST),
|
||||||
|
ProxyPort = list_to_integer(os:getenv("PROXY_PORT", ?PROXY_PORT)),
|
||||||
|
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
|
||||||
|
ok = emqx_common_test_helpers:start_apps([emqx_conf]),
|
||||||
|
ok = emqx_connector_test_helpers:start_apps([
|
||||||
|
emqx_resource, emqx_bridge, emqx_rule_engine
|
||||||
|
]),
|
||||||
|
{ok, _} = application:ensure_all_started(emqx_connector),
|
||||||
|
[
|
||||||
|
{proxy_host, ProxyHost},
|
||||||
|
{proxy_port, ProxyPort}
|
||||||
|
| Config
|
||||||
|
];
|
||||||
|
false ->
|
||||||
|
{skip, no_redis}
|
||||||
|
end.
|
||||||
|
|
||||||
|
end_per_suite(_Config) ->
|
||||||
|
ok = delete_all_bridges(),
|
||||||
|
ok = emqx_common_test_helpers:stop_apps([emqx_conf]),
|
||||||
|
ok = emqx_connector_test_helpers:stop_apps([emqx_rule_engine, emqx_bridge, emqx_resource]),
|
||||||
|
_ = application:stop(emqx_connector),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
init_per_testcase(_Testcase, Config) ->
|
||||||
|
ok = delete_all_bridges(),
|
||||||
|
case ?config(redis_type, Config) of
|
||||||
|
undefined ->
|
||||||
|
Config;
|
||||||
|
RedisType ->
|
||||||
|
Transport = ?config(transport, Config),
|
||||||
|
BatchMode = ?config(batch_mode, Config),
|
||||||
|
#{RedisType := #{Transport := RedisConnConfig}} = redis_connect_configs(),
|
||||||
|
#{BatchMode := ResourceConfig} = resource_configs(),
|
||||||
|
IsBatch = (BatchMode =:= batch_on),
|
||||||
|
BridgeConfig0 = maps:merge(RedisConnConfig, ?COMMON_REDIS_OPTS),
|
||||||
|
BridgeConfig1 = BridgeConfig0#{<<"resource_opts">> => ResourceConfig},
|
||||||
|
[{bridge_config, BridgeConfig1}, {is_batch, IsBatch} | Config]
|
||||||
|
end.
|
||||||
|
|
||||||
|
end_per_testcase(_Testcase, Config) ->
|
||||||
|
ProxyHost = ?config(proxy_host, Config),
|
||||||
|
ProxyPort = ?config(proxy_port, Config),
|
||||||
|
ok = snabbkaffe:stop(),
|
||||||
|
emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort),
|
||||||
|
ok = delete_all_bridges().
|
||||||
|
|
||||||
|
t_create_delete_bridge(Config) ->
|
||||||
|
Name = <<"mybridge">>,
|
||||||
|
Type = ?config(redis_type, Config),
|
||||||
|
BridgeConfig = ?config(bridge_config, Config),
|
||||||
|
IsBatch = ?config(is_batch, Config),
|
||||||
|
?assertMatch(
|
||||||
|
{ok, _},
|
||||||
|
emqx_bridge:create(Type, Name, BridgeConfig)
|
||||||
|
),
|
||||||
|
|
||||||
|
ResourceId = emqx_bridge_resource:resource_id(Type, Name),
|
||||||
|
|
||||||
|
?assertEqual(
|
||||||
|
{ok, connected},
|
||||||
|
emqx_resource:health_check(ResourceId)
|
||||||
|
),
|
||||||
|
|
||||||
|
RedisType = atom_to_binary(Type),
|
||||||
|
Action = <<RedisType/binary, ":", Name/binary>>,
|
||||||
|
|
||||||
|
RuleId = <<"my_rule_id">>,
|
||||||
|
RuleConf = #{
|
||||||
|
actions => [Action],
|
||||||
|
description => <<>>,
|
||||||
|
enable => true,
|
||||||
|
id => RuleId,
|
||||||
|
name => <<>>,
|
||||||
|
sql => <<"SELECT * FROM \"t/#\"">>
|
||||||
|
},
|
||||||
|
|
||||||
|
%% check export by rule
|
||||||
|
{ok, _} = emqx_rule_engine:create_rule(RuleConf),
|
||||||
|
_ = check_resource_queries(ResourceId, <<"t/test">>, IsBatch),
|
||||||
|
ok = emqx_rule_engine:delete_rule(RuleId),
|
||||||
|
|
||||||
|
%% check export through local topic
|
||||||
|
_ = check_resource_queries(ResourceId, <<"local_topic/test">>, IsBatch),
|
||||||
|
|
||||||
|
{ok, _} = emqx_bridge:remove(Type, Name).
|
||||||
|
|
||||||
|
% check that we provide correct examples
|
||||||
|
t_check_values(_Config) ->
|
||||||
|
lists:foreach(
|
||||||
|
fun(Method) ->
|
||||||
|
lists:foreach(
|
||||||
|
fun({RedisType, #{value := Value0}}) ->
|
||||||
|
Value = maps:without(maps:keys(?METRICS_EXAMPLE), Value0),
|
||||||
|
MethodBin = atom_to_binary(Method),
|
||||||
|
Type = string:slice(RedisType, length("redis_")),
|
||||||
|
RefName = binary_to_list(<<MethodBin/binary, "_", Type/binary>>),
|
||||||
|
Schema = conf_schema(RefName),
|
||||||
|
?assertMatch(
|
||||||
|
#{},
|
||||||
|
hocon_tconf:check_plain(Schema, #{<<"root">> => Value}, #{
|
||||||
|
atom_key => true,
|
||||||
|
required => false
|
||||||
|
})
|
||||||
|
)
|
||||||
|
end,
|
||||||
|
lists:flatmap(
|
||||||
|
fun maps:to_list/1,
|
||||||
|
emqx_ee_bridge_redis:conn_bridge_examples(Method)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
end,
|
||||||
|
[put, post, get]
|
||||||
|
).
|
||||||
|
|
||||||
|
t_check_replay(Config) ->
|
||||||
|
Name = <<"toxic_bridge">>,
|
||||||
|
Type = <<"redis_single">>,
|
||||||
|
Topic = <<"local_topic/test">>,
|
||||||
|
ProxyName = "redis_single_tcp",
|
||||||
|
|
||||||
|
?assertMatch(
|
||||||
|
{ok, _},
|
||||||
|
emqx_bridge:create(Type, Name, toxiproxy_redis_bridge_config())
|
||||||
|
),
|
||||||
|
|
||||||
|
ResourceId = emqx_bridge_resource:resource_id(Type, Name),
|
||||||
|
Health = emqx_resource:health_check(ResourceId),
|
||||||
|
|
||||||
|
?assertEqual(
|
||||||
|
{ok, connected},
|
||||||
|
Health
|
||||||
|
),
|
||||||
|
|
||||||
|
?check_trace(
|
||||||
|
begin
|
||||||
|
?wait_async_action(
|
||||||
|
with_down_failure(Config, ProxyName, fun() ->
|
||||||
|
ct:sleep(100),
|
||||||
|
lists:foreach(
|
||||||
|
fun(_) ->
|
||||||
|
_ = publish_message(Topic, <<"test_payload">>)
|
||||||
|
end,
|
||||||
|
lists:seq(1, ?BATCH_SIZE)
|
||||||
|
)
|
||||||
|
end),
|
||||||
|
#{?snk_kind := redis_ee_connector_send_done, batch := true, result := {ok, _}},
|
||||||
|
10000
|
||||||
|
)
|
||||||
|
end,
|
||||||
|
fun(Trace) ->
|
||||||
|
?assert(
|
||||||
|
?strict_causality(
|
||||||
|
#{?snk_kind := redis_ee_connector_send_done, result := {error, _}},
|
||||||
|
#{?snk_kind := redis_ee_connector_send_done, result := {ok, _}},
|
||||||
|
Trace
|
||||||
|
)
|
||||||
|
)
|
||||||
|
end
|
||||||
|
),
|
||||||
|
{ok, _} = emqx_bridge:remove(Type, Name).
|
||||||
|
|
||||||
|
t_permanent_error(_Config) ->
|
||||||
|
Name = <<"invalid_command_bridge">>,
|
||||||
|
Type = <<"redis_single">>,
|
||||||
|
Topic = <<"local_topic/test">>,
|
||||||
|
Payload = <<"payload for invalid redis command">>,
|
||||||
|
|
||||||
|
?assertMatch(
|
||||||
|
{ok, _},
|
||||||
|
emqx_bridge:create(Type, Name, invalid_command_bridge_config())
|
||||||
|
),
|
||||||
|
|
||||||
|
?check_trace(
|
||||||
|
begin
|
||||||
|
?wait_async_action(
|
||||||
|
publish_message(Topic, Payload),
|
||||||
|
#{?snk_kind := redis_ee_connector_send_done},
|
||||||
|
10000
|
||||||
|
)
|
||||||
|
end,
|
||||||
|
fun(Trace) ->
|
||||||
|
?assertMatch(
|
||||||
|
[#{result := {error, _}} | _],
|
||||||
|
?of_kind(redis_ee_connector_send_done, Trace)
|
||||||
|
)
|
||||||
|
end
|
||||||
|
),
|
||||||
|
{ok, _} = emqx_bridge:remove(Type, Name).
|
||||||
|
|
||||||
|
t_create_disconnected(Config) ->
|
||||||
|
Name = <<"toxic_bridge">>,
|
||||||
|
Type = <<"redis_single">>,
|
||||||
|
|
||||||
|
?check_trace(
|
||||||
|
with_down_failure(Config, "redis_single_tcp", fun() ->
|
||||||
|
{ok, _} = emqx_bridge:create(
|
||||||
|
Type, Name, toxiproxy_redis_bridge_config()
|
||||||
|
)
|
||||||
|
end),
|
||||||
|
fun(Trace) ->
|
||||||
|
?assertMatch(
|
||||||
|
[#{error := _} | _],
|
||||||
|
?of_kind(redis_ee_connector_start_error, Trace)
|
||||||
|
),
|
||||||
|
ok
|
||||||
|
end
|
||||||
|
),
|
||||||
|
{ok, _} = emqx_bridge:remove(Type, Name).
|
||||||
|
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
%% Helper functions
|
||||||
|
%%------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
with_down_failure(Config, Name, F) ->
|
||||||
|
ProxyPort = ?config(proxy_port, Config),
|
||||||
|
ProxyHost = ?config(proxy_host, Config),
|
||||||
|
emqx_common_test_helpers:with_failure(down, Name, ProxyHost, ProxyPort, F).
|
||||||
|
|
||||||
|
check_resource_queries(ResourceId, Topic, IsBatch) ->
|
||||||
|
RandomPayload = rand:bytes(20),
|
||||||
|
N =
|
||||||
|
case IsBatch of
|
||||||
|
true -> ?BATCH_SIZE;
|
||||||
|
false -> 1
|
||||||
|
end,
|
||||||
|
?check_trace(
|
||||||
|
begin
|
||||||
|
?wait_async_action(
|
||||||
|
lists:foreach(
|
||||||
|
fun(_) ->
|
||||||
|
_ = publish_message(Topic, RandomPayload)
|
||||||
|
end,
|
||||||
|
lists:seq(1, N)
|
||||||
|
),
|
||||||
|
#{?snk_kind := redis_ee_connector_send_done, batch := IsBatch},
|
||||||
|
1000
|
||||||
|
)
|
||||||
|
end,
|
||||||
|
fun(Trace) ->
|
||||||
|
AddedMsgCount = length(added_msgs(ResourceId, RandomPayload)),
|
||||||
|
case IsBatch of
|
||||||
|
true ->
|
||||||
|
?assertMatch(
|
||||||
|
[#{result := {ok, _}, batch := true, batch_size := ?BATCH_SIZE} | _],
|
||||||
|
?of_kind(redis_ee_connector_send_done, Trace)
|
||||||
|
),
|
||||||
|
?assertEqual(?BATCH_SIZE, AddedMsgCount);
|
||||||
|
false ->
|
||||||
|
?assertMatch(
|
||||||
|
[#{result := {ok, _}, batch := false} | _],
|
||||||
|
?of_kind(redis_ee_connector_send_done, Trace)
|
||||||
|
),
|
||||||
|
?assertEqual(1, AddedMsgCount)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
).
|
||||||
|
|
||||||
|
added_msgs(ResourceId, Payload) ->
|
||||||
|
{ok, Results} = emqx_resource:simple_sync_query(
|
||||||
|
ResourceId, {cmd, [<<"LRANGE">>, <<"MSGS">>, <<"0">>, <<"-1">>]}
|
||||||
|
),
|
||||||
|
[El || El <- Results, El =:= Payload].
|
||||||
|
|
||||||
|
conf_schema(StructName) ->
|
||||||
|
#{
|
||||||
|
fields => #{},
|
||||||
|
translations => #{},
|
||||||
|
validations => [],
|
||||||
|
namespace => undefined,
|
||||||
|
roots => [{root, hoconsc:ref(emqx_ee_bridge_redis, StructName)}]
|
||||||
|
}.
|
||||||
|
|
||||||
|
delete_all_bridges() ->
|
||||||
|
lists:foreach(
|
||||||
|
fun(#{name := Name, type := Type}) ->
|
||||||
|
emqx_bridge:remove(Type, Name)
|
||||||
|
end,
|
||||||
|
emqx_bridge:list()
|
||||||
|
).
|
||||||
|
|
||||||
|
all_test_hosts() ->
|
||||||
|
Confs = [
|
||||||
|
?REDIS_TOXYPROXY_CONNECT_CONFIG
|
||||||
|
| lists:concat([
|
||||||
|
maps:values(TypeConfs)
|
||||||
|
|| TypeConfs <- maps:values(redis_connect_configs())
|
||||||
|
])
|
||||||
|
],
|
||||||
|
lists:flatmap(
|
||||||
|
fun
|
||||||
|
(#{<<"servers">> := ServersRaw}) ->
|
||||||
|
lists:map(
|
||||||
|
fun(Server) ->
|
||||||
|
parse_server(Server)
|
||||||
|
end,
|
||||||
|
string:tokens(binary_to_list(ServersRaw), ", ")
|
||||||
|
);
|
||||||
|
(#{<<"server">> := ServerRaw}) ->
|
||||||
|
[parse_server(ServerRaw)]
|
||||||
|
end,
|
||||||
|
Confs
|
||||||
|
).
|
||||||
|
|
||||||
|
parse_server(Server) ->
|
||||||
|
emqx_connector_schema_lib:parse_server(Server, #{
|
||||||
|
host_type => hostname,
|
||||||
|
default_port => 6379
|
||||||
|
}).
|
||||||
|
|
||||||
|
redis_connect_ssl_opts(Type) ->
|
||||||
|
maps:merge(
|
||||||
|
client_ssl_cert_opts(Type),
|
||||||
|
#{
|
||||||
|
<<"enable">> => <<"true">>,
|
||||||
|
<<"verify">> => <<"verify_none">>
|
||||||
|
}
|
||||||
|
).
|
||||||
|
|
||||||
|
client_ssl_cert_opts(redis_single) ->
|
||||||
|
emqx_authn_test_lib:client_ssl_cert_opts();
|
||||||
|
client_ssl_cert_opts(_) ->
|
||||||
|
Dir = code:lib_dir(emqx, etc),
|
||||||
|
#{
|
||||||
|
<<"keyfile">> => filename:join([Dir, <<"certs">>, <<"client-key.pem">>]),
|
||||||
|
<<"certfile">> => filename:join([Dir, <<"certs">>, <<"client-cert.pem">>]),
|
||||||
|
<<"cacertfile">> => filename:join([Dir, <<"certs">>, <<"cacert.pem">>])
|
||||||
|
}.
|
||||||
|
|
||||||
|
redis_connect_configs() ->
|
||||||
|
#{
|
||||||
|
redis_single => #{
|
||||||
|
tcp => #{
|
||||||
|
<<"server">> => <<"redis:6379">>
|
||||||
|
},
|
||||||
|
tls => #{
|
||||||
|
<<"server">> => <<"redis-tls:6380">>,
|
||||||
|
<<"ssl">> => redis_connect_ssl_opts(redis_single)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
redis_sentinel => #{
|
||||||
|
tcp => #{
|
||||||
|
<<"servers">> => <<"redis-sentinel:26379">>,
|
||||||
|
<<"sentinel">> => <<"mymaster">>
|
||||||
|
},
|
||||||
|
tls => #{
|
||||||
|
<<"servers">> => <<"redis-sentinel-tls:26380">>,
|
||||||
|
<<"sentinel">> => <<"mymaster">>,
|
||||||
|
<<"ssl">> => redis_connect_ssl_opts(redis_sentinel)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
redis_cluster => #{
|
||||||
|
tcp => #{
|
||||||
|
<<"servers">> => <<"redis-cluster:7000,redis-cluster:7001,redis-cluster:7002">>
|
||||||
|
},
|
||||||
|
tls => #{
|
||||||
|
<<"servers">> =>
|
||||||
|
<<"redis-cluster-tls:8000,redis-cluster-tls:8001,redis-cluster-tls:8002">>,
|
||||||
|
<<"ssl">> => redis_connect_ssl_opts(redis_cluster)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}.
|
||||||
|
|
||||||
|
toxiproxy_redis_bridge_config() ->
|
||||||
|
Conf0 = ?REDIS_TOXYPROXY_CONNECT_CONFIG#{
|
||||||
|
<<"resource_opts">> => #{
|
||||||
|
<<"query_mode">> => <<"async">>,
|
||||||
|
<<"enable_batch">> => <<"true">>,
|
||||||
|
<<"enable_queue">> => <<"true">>,
|
||||||
|
<<"worker_pool_size">> => <<"1">>,
|
||||||
|
<<"batch_size">> => integer_to_binary(?BATCH_SIZE),
|
||||||
|
<<"health_check_interval">> => <<"1s">>
|
||||||
|
}
|
||||||
|
},
|
||||||
|
maps:merge(Conf0, ?COMMON_REDIS_OPTS).
|
||||||
|
|
||||||
|
invalid_command_bridge_config() ->
|
||||||
|
#{redis_single := #{tcp := Conf0}} = redis_connect_configs(),
|
||||||
|
Conf1 = maps:merge(Conf0, ?COMMON_REDIS_OPTS),
|
||||||
|
Conf1#{
|
||||||
|
<<"resource_opts">> => #{
|
||||||
|
<<"enable_batch">> => <<"false">>,
|
||||||
|
<<"enable_queue">> => <<"false">>,
|
||||||
|
<<"worker_pool_size">> => <<"1">>
|
||||||
|
},
|
||||||
|
<<"command_template">> => [<<"BAD">>, <<"COMMAND">>, <<"${payload}">>]
|
||||||
|
}.
|
||||||
|
|
||||||
|
resource_configs() ->
|
||||||
|
#{
|
||||||
|
batch_off => #{
|
||||||
|
<<"query_mode">> => <<"sync">>,
|
||||||
|
<<"enable_batch">> => <<"false">>,
|
||||||
|
<<"enable_queue">> => <<"false">>
|
||||||
|
},
|
||||||
|
batch_on => #{
|
||||||
|
<<"query_mode">> => <<"async">>,
|
||||||
|
<<"enable_batch">> => <<"true">>,
|
||||||
|
<<"enable_queue">> => <<"true">>,
|
||||||
|
<<"worker_pool_size">> => <<"1">>,
|
||||||
|
<<"batch_size">> => integer_to_binary(?BATCH_SIZE)
|
||||||
|
}
|
||||||
|
}.
|
||||||
|
|
||||||
|
publish_message(Topic, Payload) ->
|
||||||
|
{ok, Client} = emqtt:start_link(),
|
||||||
|
{ok, _} = emqtt:connect(Client),
|
||||||
|
ok = emqtt:publish(Client, Topic, Payload),
|
||||||
|
ok = emqtt:stop(Client).
|
|
@ -0,0 +1,138 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
-module(emqx_ee_connector_redis).
|
||||||
|
|
||||||
|
-include_lib("emqx/include/logger.hrl").
|
||||||
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
|
|
||||||
|
-behaviour(emqx_resource).
|
||||||
|
|
||||||
|
%% callbacks of behaviour emqx_resource
|
||||||
|
-export([
|
||||||
|
callback_mode/0,
|
||||||
|
on_start/2,
|
||||||
|
on_stop/2,
|
||||||
|
on_query/3,
|
||||||
|
on_batch_query/3,
|
||||||
|
on_get_status/2
|
||||||
|
]).
|
||||||
|
|
||||||
|
%% -------------------------------------------------------------------------------------------------
|
||||||
|
%% resource callbacks
|
||||||
|
%% -------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
callback_mode() -> always_sync.
|
||||||
|
|
||||||
|
on_start(InstId, #{command_template := CommandTemplate} = Config) ->
|
||||||
|
case emqx_connector_redis:on_start(InstId, Config) of
|
||||||
|
{ok, RedisConnSt} ->
|
||||||
|
?tp(
|
||||||
|
redis_ee_connector_start_success,
|
||||||
|
#{}
|
||||||
|
),
|
||||||
|
{ok, #{
|
||||||
|
conn_st => RedisConnSt,
|
||||||
|
command_template => preproc_command_template(CommandTemplate)
|
||||||
|
}};
|
||||||
|
{error, _} = Error ->
|
||||||
|
?tp(
|
||||||
|
redis_ee_connector_start_error,
|
||||||
|
#{error => Error}
|
||||||
|
),
|
||||||
|
Error
|
||||||
|
end.
|
||||||
|
|
||||||
|
on_stop(InstId, #{conn_st := RedisConnSt}) ->
|
||||||
|
emqx_connector_redis:on_stop(InstId, RedisConnSt).
|
||||||
|
|
||||||
|
on_get_status(InstId, #{conn_st := RedisConnSt}) ->
|
||||||
|
emqx_connector_redis:on_get_status(InstId, RedisConnSt).
|
||||||
|
|
||||||
|
on_query(
|
||||||
|
InstId,
|
||||||
|
{send_message, Data},
|
||||||
|
_State = #{
|
||||||
|
command_template := CommandTemplate, conn_st := RedisConnSt
|
||||||
|
}
|
||||||
|
) ->
|
||||||
|
Cmd = proc_command_template(CommandTemplate, Data),
|
||||||
|
?tp(
|
||||||
|
redis_ee_connector_cmd,
|
||||||
|
#{cmd => Cmd, batch => false, mode => sync}
|
||||||
|
),
|
||||||
|
Result = query(InstId, {cmd, Cmd}, RedisConnSt),
|
||||||
|
?tp(
|
||||||
|
redis_ee_connector_send_done,
|
||||||
|
#{cmd => Cmd, batch => false, mode => sync, result => Result}
|
||||||
|
),
|
||||||
|
Result;
|
||||||
|
on_query(
|
||||||
|
InstId,
|
||||||
|
Query,
|
||||||
|
_State = #{conn_st := RedisConnSt}
|
||||||
|
) ->
|
||||||
|
?tp(
|
||||||
|
redis_ee_connector_query,
|
||||||
|
#{query => Query, batch => false, mode => sync}
|
||||||
|
),
|
||||||
|
Result = query(InstId, Query, RedisConnSt),
|
||||||
|
?tp(
|
||||||
|
redis_ee_connector_send_done,
|
||||||
|
#{query => Query, batch => false, mode => sync, result => Result}
|
||||||
|
),
|
||||||
|
Result.
|
||||||
|
|
||||||
|
on_batch_query(
|
||||||
|
InstId, BatchData, _State = #{command_template := CommandTemplate, conn_st := RedisConnSt}
|
||||||
|
) ->
|
||||||
|
Cmds = process_batch_data(BatchData, CommandTemplate),
|
||||||
|
?tp(
|
||||||
|
redis_ee_connector_send,
|
||||||
|
#{batch_data => BatchData, batch => true, mode => sync}
|
||||||
|
),
|
||||||
|
Result = query(InstId, {cmds, Cmds}, RedisConnSt),
|
||||||
|
?tp(
|
||||||
|
redis_ee_connector_send_done,
|
||||||
|
#{
|
||||||
|
batch_data => BatchData,
|
||||||
|
batch_size => length(BatchData),
|
||||||
|
batch => true,
|
||||||
|
mode => sync,
|
||||||
|
result => Result
|
||||||
|
}
|
||||||
|
),
|
||||||
|
Result.
|
||||||
|
|
||||||
|
%% -------------------------------------------------------------------------------------------------
|
||||||
|
%% private helpers
|
||||||
|
%% -------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
query(InstId, Query, RedisConnSt) ->
|
||||||
|
case emqx_connector_redis:on_query(InstId, Query, RedisConnSt) of
|
||||||
|
{ok, _} = Ok -> Ok;
|
||||||
|
{error, no_connection} -> {error, {recoverable_error, no_connection}};
|
||||||
|
{error, _} = Error -> Error
|
||||||
|
end.
|
||||||
|
|
||||||
|
process_batch_data(BatchData, CommandTemplate) ->
|
||||||
|
lists:map(
|
||||||
|
fun({send_message, Data}) ->
|
||||||
|
proc_command_template(CommandTemplate, Data)
|
||||||
|
end,
|
||||||
|
BatchData
|
||||||
|
).
|
||||||
|
|
||||||
|
proc_command_template(CommandTemplate, Msg) ->
|
||||||
|
lists:map(
|
||||||
|
fun(ArgTks) ->
|
||||||
|
emqx_plugin_libs_rule:proc_tmpl(ArgTks, Msg, #{return => full_binary})
|
||||||
|
end,
|
||||||
|
CommandTemplate
|
||||||
|
).
|
||||||
|
|
||||||
|
preproc_command_template(CommandTemplate) ->
|
||||||
|
lists:map(
|
||||||
|
fun emqx_plugin_libs_rule:preproc_tmpl/1,
|
||||||
|
CommandTemplate
|
||||||
|
).
|
4
mix.exs
4
mix.exs
|
@ -47,12 +47,12 @@ defmodule EMQXUmbrella.MixProject do
|
||||||
{:lc, github: "emqx/lc", tag: "0.3.2", override: true},
|
{:lc, github: "emqx/lc", tag: "0.3.2", override: true},
|
||||||
{:redbug, "2.0.7"},
|
{:redbug, "2.0.7"},
|
||||||
{:typerefl, github: "ieQu1/typerefl", tag: "0.9.1", override: true},
|
{:typerefl, github: "ieQu1/typerefl", tag: "0.9.1", override: true},
|
||||||
{:ehttpc, github: "emqx/ehttpc", tag: "0.4.0", override: true},
|
{:ehttpc, github: "emqx/ehttpc", tag: "0.4.2", override: true},
|
||||||
{:gproc, github: "uwiger/gproc", tag: "0.8.0", override: true},
|
{:gproc, github: "uwiger/gproc", tag: "0.8.0", override: true},
|
||||||
{:jiffy, github: "emqx/jiffy", tag: "1.0.5", override: true},
|
{:jiffy, github: "emqx/jiffy", tag: "1.0.5", override: true},
|
||||||
{:cowboy, github: "emqx/cowboy", tag: "2.9.0", override: true},
|
{:cowboy, github: "emqx/cowboy", tag: "2.9.0", override: true},
|
||||||
{:esockd, github: "emqx/esockd", tag: "5.9.4", override: true},
|
{:esockd, github: "emqx/esockd", tag: "5.9.4", override: true},
|
||||||
{:ekka, github: "emqx/ekka", tag: "0.13.6", override: true},
|
{:ekka, github: "emqx/ekka", tag: "0.13.7", override: true},
|
||||||
{:gen_rpc, github: "emqx/gen_rpc", tag: "2.8.1", override: true},
|
{:gen_rpc, github: "emqx/gen_rpc", tag: "2.8.1", override: true},
|
||||||
{:grpc, github: "emqx/grpc-erl", tag: "0.6.7", override: true},
|
{:grpc, github: "emqx/grpc-erl", tag: "0.6.7", override: true},
|
||||||
{:minirest, github: "emqx/minirest", tag: "1.3.7", override: true},
|
{:minirest, github: "emqx/minirest", tag: "1.3.7", override: true},
|
||||||
|
|
|
@ -49,12 +49,12 @@
|
||||||
, {gpb, "4.19.5"} %% gpb only used to build, but not for release, pin it here to avoid fetching a wrong version due to rebar plugins scattered in all the deps
|
, {gpb, "4.19.5"} %% gpb only used to build, but not for release, pin it here to avoid fetching a wrong version due to rebar plugins scattered in all the deps
|
||||||
, {typerefl, {git, "https://github.com/ieQu1/typerefl", {tag, "0.9.1"}}}
|
, {typerefl, {git, "https://github.com/ieQu1/typerefl", {tag, "0.9.1"}}}
|
||||||
, {gun, {git, "https://github.com/emqx/gun", {tag, "1.3.9"}}}
|
, {gun, {git, "https://github.com/emqx/gun", {tag, "1.3.9"}}}
|
||||||
, {ehttpc, {git, "https://github.com/emqx/ehttpc", {tag, "0.4.0"}}}
|
, {ehttpc, {git, "https://github.com/emqx/ehttpc", {tag, "0.4.2"}}}
|
||||||
, {gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}}
|
, {gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}}
|
||||||
, {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}}
|
, {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}}
|
||||||
, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}}
|
, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}}
|
||||||
, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.4"}}}
|
, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.4"}}}
|
||||||
, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.6"}}}
|
, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.7"}}}
|
||||||
, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}
|
, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}
|
||||||
, {grpc, {git, "https://github.com/emqx/grpc-erl", {tag, "0.6.7"}}}
|
, {grpc, {git, "https://github.com/emqx/grpc-erl", {tag, "0.6.7"}}}
|
||||||
, {minirest, {git, "https://github.com/emqx/minirest", {tag, "1.3.7"}}}
|
, {minirest, {git, "https://github.com/emqx/minirest", {tag, "1.3.7"}}}
|
||||||
|
|
|
@ -113,6 +113,10 @@ for dep in ${CT_DEPS}; do
|
||||||
'.ci/docker-compose-file/docker-compose-redis-sentinel-tcp.yaml'
|
'.ci/docker-compose-file/docker-compose-redis-sentinel-tcp.yaml'
|
||||||
'.ci/docker-compose-file/docker-compose-redis-sentinel-tls.yaml' )
|
'.ci/docker-compose-file/docker-compose-redis-sentinel-tls.yaml' )
|
||||||
;;
|
;;
|
||||||
|
redis_cluster)
|
||||||
|
FILES+=( '.ci/docker-compose-file/docker-compose-redis-cluster-tcp.yaml'
|
||||||
|
'.ci/docker-compose-file/docker-compose-redis-cluster-tls.yaml' )
|
||||||
|
;;
|
||||||
mysql)
|
mysql)
|
||||||
FILES+=( '.ci/docker-compose-file/docker-compose-mysql-tcp.yaml'
|
FILES+=( '.ci/docker-compose-file/docker-compose-mysql-tcp.yaml'
|
||||||
'.ci/docker-compose-file/docker-compose-mysql-tls.yaml' )
|
'.ci/docker-compose-file/docker-compose-mysql-tls.yaml' )
|
||||||
|
|
|
@ -148,11 +148,14 @@ emqx_test(){
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
"rpm")
|
"rpm")
|
||||||
|
# yum wants python2
|
||||||
|
alternatives --list | grep python && alternatives --set python /usr/bin/python2
|
||||||
YUM_RES=$(yum install -y "${PACKAGE_PATH}/${packagename}"| tee /dev/null)
|
YUM_RES=$(yum install -y "${PACKAGE_PATH}/${packagename}"| tee /dev/null)
|
||||||
if [[ $YUM_RES =~ "Failed" ]]; then
|
if [[ $YUM_RES =~ "Failed" ]]; then
|
||||||
echo "yum install failed"
|
echo "yum install failed"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
alternatives --list | grep python && alternatives --set python /usr/bin/python3
|
||||||
if ! rpm -q "${EMQX_NAME}" | grep -q "${EMQX_NAME}"; then
|
if ! rpm -q "${EMQX_NAME}" | grep -q "${EMQX_NAME}"; then
|
||||||
echo "package install error"
|
echo "package install error"
|
||||||
exit 1
|
exit 1
|
||||||
|
|
Loading…
Reference in New Issue