Merge pull request #9901 from zmstone/0202-merge-release-50-back-to-master
0202 merge release 50 back to master
This commit is contained in:
commit
3587c4c04a
|
@ -1,5 +1,5 @@
|
||||||
MYSQL_TAG=8
|
MYSQL_TAG=8
|
||||||
REDIS_TAG=6
|
REDIS_TAG=7.0
|
||||||
MONGO_TAG=5
|
MONGO_TAG=5
|
||||||
PGSQL_TAG=13
|
PGSQL_TAG=13
|
||||||
LDAP_TAG=2.4.50
|
LDAP_TAG=2.4.50
|
||||||
|
|
|
@ -13,10 +13,10 @@ help:
|
||||||
up:
|
up:
|
||||||
env \
|
env \
|
||||||
MYSQL_TAG=8 \
|
MYSQL_TAG=8 \
|
||||||
REDIS_TAG=6 \
|
REDIS_TAG=7.0 \
|
||||||
MONGO_TAG=5 \
|
MONGO_TAG=5 \
|
||||||
PGSQL_TAG=13 \
|
PGSQL_TAG=13 \
|
||||||
docker compose \
|
docker-compose \
|
||||||
-f .ci/docker-compose-file/docker-compose.yaml \
|
-f .ci/docker-compose-file/docker-compose.yaml \
|
||||||
-f .ci/docker-compose-file/docker-compose-mongo-single-tcp.yaml \
|
-f .ci/docker-compose-file/docker-compose-mongo-single-tcp.yaml \
|
||||||
-f .ci/docker-compose-file/docker-compose-mongo-single-tls.yaml \
|
-f .ci/docker-compose-file/docker-compose-mongo-single-tls.yaml \
|
||||||
|
@ -34,7 +34,7 @@ up:
|
||||||
up -d --build --remove-orphans
|
up -d --build --remove-orphans
|
||||||
|
|
||||||
down:
|
down:
|
||||||
docker compose \
|
docker-compose \
|
||||||
-f .ci/docker-compose-file/docker-compose.yaml \
|
-f .ci/docker-compose-file/docker-compose.yaml \
|
||||||
-f .ci/docker-compose-file/docker-compose-mongo-single-tcp.yaml \
|
-f .ci/docker-compose-file/docker-compose-mongo-single-tcp.yaml \
|
||||||
-f .ci/docker-compose-file/docker-compose-mongo-single-tls.yaml \
|
-f .ci/docker-compose-file/docker-compose-mongo-single-tls.yaml \
|
||||||
|
|
|
@ -1,11 +1,57 @@
|
||||||
version: '3.9'
|
version: '3.9'
|
||||||
|
|
||||||
services:
|
services:
|
||||||
redis_cluster:
|
|
||||||
|
redis-cluster-1: &redis-node
|
||||||
|
container_name: redis-cluster-1
|
||||||
image: redis:${REDIS_TAG}
|
image: redis:${REDIS_TAG}
|
||||||
container_name: redis-cluster
|
|
||||||
volumes:
|
volumes:
|
||||||
- ./redis/:/data/conf
|
- ./redis/cluster-tcp:/usr/local/etc/redis
|
||||||
command: bash -c "/bin/bash /data/conf/redis.sh --node cluster && tail -f /var/log/redis-server.log"
|
command: redis-server /usr/local/etc/redis/redis.conf
|
||||||
networks:
|
networks:
|
||||||
- emqx_bridge
|
- emqx_bridge
|
||||||
|
|
||||||
|
|
||||||
|
redis-cluster-2:
|
||||||
|
<<: *redis-node
|
||||||
|
container_name: redis-cluster-2
|
||||||
|
|
||||||
|
redis-cluster-3:
|
||||||
|
<<: *redis-node
|
||||||
|
container_name: redis-cluster-3
|
||||||
|
|
||||||
|
redis-cluster-4:
|
||||||
|
<<: *redis-node
|
||||||
|
container_name: redis-cluster-4
|
||||||
|
|
||||||
|
redis-cluster-5:
|
||||||
|
<<: *redis-node
|
||||||
|
container_name: redis-cluster-5
|
||||||
|
|
||||||
|
redis-cluster-6:
|
||||||
|
<<: *redis-node
|
||||||
|
container_name: redis-cluster-6
|
||||||
|
|
||||||
|
redis-cluster-create:
|
||||||
|
<<: *redis-node
|
||||||
|
container_name: redis-cluster-create
|
||||||
|
command: >
|
||||||
|
redis-cli
|
||||||
|
--cluster create
|
||||||
|
redis-cluster-1:6379
|
||||||
|
redis-cluster-2:6379
|
||||||
|
redis-cluster-3:6379
|
||||||
|
redis-cluster-4:6379
|
||||||
|
redis-cluster-5:6379
|
||||||
|
redis-cluster-6:6379
|
||||||
|
--cluster-replicas 1
|
||||||
|
--cluster-yes
|
||||||
|
--pass "public"
|
||||||
|
--no-auth-warning
|
||||||
|
depends_on:
|
||||||
|
- redis-cluster-1
|
||||||
|
- redis-cluster-2
|
||||||
|
- redis-cluster-3
|
||||||
|
- redis-cluster-4
|
||||||
|
- redis-cluster-5
|
||||||
|
- redis-cluster-6
|
||||||
|
|
||||||
|
|
|
@ -1,14 +1,59 @@
|
||||||
version: '3.9'
|
version: '3.9'
|
||||||
|
|
||||||
services:
|
services:
|
||||||
redis_cluster_tls:
|
|
||||||
container_name: redis-cluster-tls
|
redis-cluster-tls-1: &redis-node
|
||||||
|
container_name: redis-cluster-tls-1
|
||||||
image: redis:${REDIS_TAG}
|
image: redis:${REDIS_TAG}
|
||||||
volumes:
|
volumes:
|
||||||
- ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.crt
|
- ./redis/cluster-tls:/usr/local/etc/redis
|
||||||
- ../../apps/emqx/etc/certs/cert.pem:/etc/certs/redis.crt
|
- ../../apps/emqx/etc/certs:/etc/certs
|
||||||
- ../../apps/emqx/etc/certs/key.pem:/etc/certs/redis.key
|
command: redis-server /usr/local/etc/redis/redis.conf
|
||||||
- ./redis/:/data/conf
|
|
||||||
command: bash -c "/bin/bash /data/conf/redis.sh --node cluster --tls-enabled && tail -f /var/log/redis-server.log"
|
|
||||||
networks:
|
networks:
|
||||||
- emqx_bridge
|
- emqx_bridge
|
||||||
|
|
||||||
|
redis-cluster-tls-2:
|
||||||
|
<<: *redis-node
|
||||||
|
container_name: redis-cluster-tls-2
|
||||||
|
|
||||||
|
redis-cluster-tls-3:
|
||||||
|
<<: *redis-node
|
||||||
|
container_name: redis-cluster-tls-3
|
||||||
|
|
||||||
|
redis-cluster-tls-4:
|
||||||
|
<<: *redis-node
|
||||||
|
container_name: redis-cluster-tls-4
|
||||||
|
|
||||||
|
redis-cluster-tls-5:
|
||||||
|
<<: *redis-node
|
||||||
|
container_name: redis-cluster-tls-5
|
||||||
|
|
||||||
|
redis-cluster-tls-6:
|
||||||
|
<<: *redis-node
|
||||||
|
container_name: redis-cluster-tls-6
|
||||||
|
|
||||||
|
redis-cluster-tls-create:
|
||||||
|
<<: *redis-node
|
||||||
|
container_name: redis-cluster-tls-create
|
||||||
|
command: >
|
||||||
|
redis-cli
|
||||||
|
--cluster create
|
||||||
|
redis-cluster-tls-1:6389
|
||||||
|
redis-cluster-tls-2:6389
|
||||||
|
redis-cluster-tls-3:6389
|
||||||
|
redis-cluster-tls-4:6389
|
||||||
|
redis-cluster-tls-5:6389
|
||||||
|
redis-cluster-tls-6:6389
|
||||||
|
--cluster-replicas 1
|
||||||
|
--cluster-yes
|
||||||
|
--pass "public"
|
||||||
|
--no-auth-warning
|
||||||
|
--tls
|
||||||
|
--insecure
|
||||||
|
depends_on:
|
||||||
|
- redis-cluster-tls-1
|
||||||
|
- redis-cluster-tls-2
|
||||||
|
- redis-cluster-tls-3
|
||||||
|
- redis-cluster-tls-4
|
||||||
|
- redis-cluster-tls-5
|
||||||
|
- redis-cluster-tls-6
|
||||||
|
|
||||||
|
|
|
@ -1,11 +1,41 @@
|
||||||
version: '3.9'
|
version: "3"
|
||||||
|
|
||||||
services:
|
services:
|
||||||
redis_sentinel_server:
|
|
||||||
|
redis-sentinel-master:
|
||||||
|
container_name: redis-sentinel-master
|
||||||
|
image: redis:${REDIS_TAG}
|
||||||
|
volumes:
|
||||||
|
- ./redis/sentinel-tcp:/usr/local/etc/redis
|
||||||
|
command: redis-server /usr/local/etc/redis/master.conf
|
||||||
|
networks:
|
||||||
|
- emqx_bridge
|
||||||
|
|
||||||
|
redis-sentinel-slave:
|
||||||
|
container_name: redis-sentinel-slave
|
||||||
|
image: redis:${REDIS_TAG}
|
||||||
|
volumes:
|
||||||
|
- ./redis/sentinel-tcp:/usr/local/etc/redis
|
||||||
|
command: redis-server /usr/local/etc/redis/slave.conf
|
||||||
|
networks:
|
||||||
|
- emqx_bridge
|
||||||
|
depends_on:
|
||||||
|
- redis-sentinel-master
|
||||||
|
|
||||||
|
redis-sentinel:
|
||||||
container_name: redis-sentinel
|
container_name: redis-sentinel
|
||||||
image: redis:${REDIS_TAG}
|
image: redis:${REDIS_TAG}
|
||||||
volumes:
|
volumes:
|
||||||
- ./redis/:/data/conf
|
- ./redis/sentinel-tcp/sentinel-base.conf:/usr/local/etc/redis/sentinel-base.conf
|
||||||
command: bash -c "/bin/bash /data/conf/redis.sh --node sentinel && tail -f /var/log/redis-server.log"
|
depends_on:
|
||||||
|
- redis-sentinel-master
|
||||||
|
- redis-sentinel-slave
|
||||||
|
command: >
|
||||||
|
bash -c "cp -f /usr/local/etc/redis/sentinel-base.conf /usr/local/etc/redis/sentinel.conf &&
|
||||||
|
redis-sentinel /usr/local/etc/redis/sentinel.conf"
|
||||||
networks:
|
networks:
|
||||||
- emqx_bridge
|
- emqx_bridge
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,14 +1,44 @@
|
||||||
version: '3.9'
|
version: "3"
|
||||||
|
|
||||||
services:
|
services:
|
||||||
redis_sentinel_server_tls:
|
|
||||||
|
redis-sentinel-tls-master:
|
||||||
|
container_name: redis-sentinel-tls-master
|
||||||
|
image: redis:${REDIS_TAG}
|
||||||
|
volumes:
|
||||||
|
- ./redis/sentinel-tls:/usr/local/etc/redis
|
||||||
|
- ../../apps/emqx/etc/certs:/etc/certs
|
||||||
|
command: redis-server /usr/local/etc/redis/master.conf
|
||||||
|
networks:
|
||||||
|
- emqx_bridge
|
||||||
|
|
||||||
|
redis-sentinel-tls-slave:
|
||||||
|
container_name: redis-sentinel-tls-slave
|
||||||
|
image: redis:${REDIS_TAG}
|
||||||
|
volumes:
|
||||||
|
- ./redis/sentinel-tls:/usr/local/etc/redis
|
||||||
|
- ../../apps/emqx/etc/certs:/etc/certs
|
||||||
|
command: redis-server /usr/local/etc/redis/slave.conf
|
||||||
|
networks:
|
||||||
|
- emqx_bridge
|
||||||
|
depends_on:
|
||||||
|
- redis-sentinel-tls-master
|
||||||
|
|
||||||
|
redis-sentinel-tls:
|
||||||
container_name: redis-sentinel-tls
|
container_name: redis-sentinel-tls
|
||||||
image: redis:${REDIS_TAG}
|
image: redis:${REDIS_TAG}
|
||||||
volumes:
|
volumes:
|
||||||
- ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.crt
|
- ./redis/sentinel-tls/sentinel-base.conf:/usr/local/etc/redis/sentinel-base.conf
|
||||||
- ../../apps/emqx/etc/certs/cert.pem:/etc/certs/redis.crt
|
- ../../apps/emqx/etc/certs:/etc/certs
|
||||||
- ../../apps/emqx/etc/certs/key.pem:/etc/certs/redis.key
|
depends_on:
|
||||||
- ./redis/:/data/conf
|
- redis-sentinel-tls-master
|
||||||
command: bash -c "/bin/bash /data/conf/redis.sh --node sentinel --tls-enabled && tail -f /var/log/redis-server.log"
|
- redis-sentinel-tls-slave
|
||||||
|
command: >
|
||||||
|
bash -c "cp -f /usr/local/etc/redis/sentinel-base.conf /usr/local/etc/redis/sentinel.conf &&
|
||||||
|
redis-sentinel /usr/local/etc/redis/sentinel.conf"
|
||||||
networks:
|
networks:
|
||||||
- emqx_bridge
|
- emqx_bridge
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,3 +0,0 @@
|
||||||
r700?i.log
|
|
||||||
nodes.700?.conf
|
|
||||||
*.rdb
|
|
|
@ -0,0 +1,18 @@
|
||||||
|
bind :: 0.0.0.0
|
||||||
|
port 6379
|
||||||
|
requirepass public
|
||||||
|
|
||||||
|
cluster-enabled yes
|
||||||
|
|
||||||
|
masterauth public
|
||||||
|
|
||||||
|
protected-mode no
|
||||||
|
daemonize no
|
||||||
|
|
||||||
|
loglevel notice
|
||||||
|
logfile ""
|
||||||
|
|
||||||
|
always-show-logo no
|
||||||
|
save ""
|
||||||
|
appendonly no
|
||||||
|
|
|
@ -0,0 +1,28 @@
|
||||||
|
bind :: 0.0.0.0
|
||||||
|
port 6379
|
||||||
|
requirepass public
|
||||||
|
|
||||||
|
cluster-enabled yes
|
||||||
|
|
||||||
|
masterauth public
|
||||||
|
|
||||||
|
tls-port 6389
|
||||||
|
tls-cert-file /etc/certs/cert.pem
|
||||||
|
tls-key-file /etc/certs/key.pem
|
||||||
|
tls-ca-cert-file /etc/certs/cacert.pem
|
||||||
|
tls-auth-clients no
|
||||||
|
|
||||||
|
tls-replication yes
|
||||||
|
tls-cluster yes
|
||||||
|
|
||||||
|
|
||||||
|
protected-mode no
|
||||||
|
daemonize no
|
||||||
|
|
||||||
|
loglevel notice
|
||||||
|
logfile ""
|
||||||
|
|
||||||
|
always-show-logo no
|
||||||
|
save ""
|
||||||
|
appendonly no
|
||||||
|
|
|
@ -1,12 +0,0 @@
|
||||||
daemonize yes
|
|
||||||
bind 0.0.0.0 ::
|
|
||||||
logfile /var/log/redis-server.log
|
|
||||||
protected-mode no
|
|
||||||
requirepass public
|
|
||||||
masterauth public
|
|
||||||
|
|
||||||
tls-cert-file /etc/certs/redis.crt
|
|
||||||
tls-key-file /etc/certs/redis.key
|
|
||||||
tls-ca-cert-file /etc/certs/ca.crt
|
|
||||||
tls-replication yes
|
|
||||||
tls-cluster yes
|
|
|
@ -1,6 +0,0 @@
|
||||||
daemonize yes
|
|
||||||
bind 0.0.0.0 ::
|
|
||||||
logfile /var/log/redis-server.log
|
|
||||||
protected-mode no
|
|
||||||
requirepass public
|
|
||||||
masterauth public
|
|
|
@ -1,126 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -x
|
|
||||||
|
|
||||||
LOCAL_IP=$(hostname -i | grep -oE '((25[0-5]|(2[0-4]|1[0-9]|[1-9]|)[0-9])\.){3}(25[0-5]|(2[0-4]|1[0-9]|[1-9]|)[0-9])' | head -n 1)
|
|
||||||
|
|
||||||
node=single
|
|
||||||
tls=false
|
|
||||||
while [[ $# -gt 0 ]]
|
|
||||||
do
|
|
||||||
key="$1"
|
|
||||||
|
|
||||||
case $key in
|
|
||||||
-n|--node)
|
|
||||||
node="$2"
|
|
||||||
shift # past argument
|
|
||||||
shift # past value
|
|
||||||
;;
|
|
||||||
--tls-enabled)
|
|
||||||
tls=true
|
|
||||||
shift # past argument
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
shift # past argument
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
rm -f \
|
|
||||||
/data/conf/r7000i.log \
|
|
||||||
/data/conf/r7001i.log \
|
|
||||||
/data/conf/r7002i.log \
|
|
||||||
/data/conf/nodes.7000.conf \
|
|
||||||
/data/conf/nodes.7001.conf \
|
|
||||||
/data/conf/nodes.7002.conf
|
|
||||||
|
|
||||||
if [ "$node" = "cluster" ]; then
|
|
||||||
if $tls; then
|
|
||||||
redis-server /data/conf/redis-tls.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf \
|
|
||||||
--tls-port 8000 --cluster-enabled yes
|
|
||||||
redis-server /data/conf/redis-tls.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf \
|
|
||||||
--tls-port 8001 --cluster-enabled yes
|
|
||||||
redis-server /data/conf/redis-tls.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf \
|
|
||||||
--tls-port 8002 --cluster-enabled yes
|
|
||||||
else
|
|
||||||
redis-server /data/conf/redis.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf \
|
|
||||||
--cluster-enabled yes
|
|
||||||
redis-server /data/conf/redis.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf \
|
|
||||||
--cluster-enabled yes
|
|
||||||
redis-server /data/conf/redis.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf \
|
|
||||||
--cluster-enabled yes
|
|
||||||
fi
|
|
||||||
elif [ "$node" = "sentinel" ]; then
|
|
||||||
if $tls; then
|
|
||||||
redis-server /data/conf/redis-tls.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf \
|
|
||||||
--tls-port 8000 --cluster-enabled no
|
|
||||||
redis-server /data/conf/redis-tls.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf \
|
|
||||||
--tls-port 8001 --cluster-enabled no --slaveof "$LOCAL_IP" 8000
|
|
||||||
redis-server /data/conf/redis-tls.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf \
|
|
||||||
--tls-port 8002 --cluster-enabled no --slaveof "$LOCAL_IP" 8000
|
|
||||||
|
|
||||||
else
|
|
||||||
redis-server /data/conf/redis.conf --port 7000 --cluster-config-file /data/conf/nodes.7000.conf \
|
|
||||||
--cluster-enabled no
|
|
||||||
redis-server /data/conf/redis.conf --port 7001 --cluster-config-file /data/conf/nodes.7001.conf \
|
|
||||||
--cluster-enabled no --slaveof "$LOCAL_IP" 7000
|
|
||||||
redis-server /data/conf/redis.conf --port 7002 --cluster-config-file /data/conf/nodes.7002.conf \
|
|
||||||
--cluster-enabled no --slaveof "$LOCAL_IP" 7000
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
REDIS_LOAD_FLG=true
|
|
||||||
|
|
||||||
while $REDIS_LOAD_FLG;
|
|
||||||
do
|
|
||||||
sleep 1
|
|
||||||
redis-cli --pass public --no-auth-warning -p 7000 info 1> /data/conf/r7000i.log 2> /dev/null
|
|
||||||
if ! [ -s /data/conf/r7000i.log ]; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
redis-cli --pass public --no-auth-warning -p 7001 info 1> /data/conf/r7001i.log 2> /dev/null
|
|
||||||
if ! [ -s /data/conf/r7001i.log ]; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
redis-cli --pass public --no-auth-warning -p 7002 info 1> /data/conf/r7002i.log 2> /dev/null;
|
|
||||||
if ! [ -s /data/conf/r7002i.log ]; then
|
|
||||||
continue
|
|
||||||
fi
|
|
||||||
if [ "$node" = "cluster" ] ; then
|
|
||||||
if $tls; then
|
|
||||||
yes "yes" | redis-cli --cluster create "$LOCAL_IP:8000" "$LOCAL_IP:8001" "$LOCAL_IP:8002" \
|
|
||||||
--pass public --no-auth-warning \
|
|
||||||
--tls true --cacert /etc/certs/ca.crt \
|
|
||||||
--cert /etc/certs/redis.crt --key /etc/certs/redis.key
|
|
||||||
else
|
|
||||||
yes "yes" | redis-cli --cluster create "$LOCAL_IP:7000" "$LOCAL_IP:7001" "$LOCAL_IP:7002" \
|
|
||||||
--pass public --no-auth-warning
|
|
||||||
fi
|
|
||||||
elif [ "$node" = "sentinel" ]; then
|
|
||||||
tee /_sentinel.conf>/dev/null << EOF
|
|
||||||
port 26379
|
|
||||||
bind 0.0.0.0 ::
|
|
||||||
daemonize yes
|
|
||||||
logfile /var/log/redis-server.log
|
|
||||||
dir /tmp
|
|
||||||
EOF
|
|
||||||
if $tls; then
|
|
||||||
cat >>/_sentinel.conf<<EOF
|
|
||||||
tls-port 26380
|
|
||||||
tls-replication yes
|
|
||||||
tls-cert-file /etc/certs/redis.crt
|
|
||||||
tls-key-file /etc/certs/redis.key
|
|
||||||
tls-ca-cert-file /etc/certs/ca.crt
|
|
||||||
sentinel monitor mymaster $LOCAL_IP 8000 1
|
|
||||||
EOF
|
|
||||||
else
|
|
||||||
cat >>/_sentinel.conf<<EOF
|
|
||||||
sentinel monitor mymaster $LOCAL_IP 7000 1
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
redis-server /_sentinel.conf --sentinel
|
|
||||||
fi
|
|
||||||
REDIS_LOAD_FLG=false
|
|
||||||
done
|
|
||||||
|
|
||||||
exit 0;
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
bind :: 0.0.0.0
|
||||||
|
port 6379
|
||||||
|
requirepass public
|
||||||
|
|
||||||
|
protected-mode no
|
||||||
|
daemonize no
|
||||||
|
|
||||||
|
loglevel notice
|
||||||
|
logfile ""
|
||||||
|
|
||||||
|
always-show-logo no
|
||||||
|
save ""
|
||||||
|
appendonly no
|
||||||
|
|
|
@ -0,0 +1,7 @@
|
||||||
|
sentinel resolve-hostnames yes
|
||||||
|
bind :: 0.0.0.0
|
||||||
|
|
||||||
|
sentinel monitor mymaster redis-sentinel-master 6379 1
|
||||||
|
sentinel auth-pass mymaster public
|
||||||
|
sentinel down-after-milliseconds mymaster 10000
|
||||||
|
sentinel failover-timeout mymaster 20000
|
|
@ -0,0 +1,17 @@
|
||||||
|
bind :: 0.0.0.0
|
||||||
|
port 6379
|
||||||
|
requirepass public
|
||||||
|
|
||||||
|
replicaof redis-sentinel-master 6379
|
||||||
|
masterauth public
|
||||||
|
|
||||||
|
protected-mode no
|
||||||
|
daemonize no
|
||||||
|
|
||||||
|
loglevel notice
|
||||||
|
logfile ""
|
||||||
|
|
||||||
|
always-show-logo no
|
||||||
|
save ""
|
||||||
|
appendonly no
|
||||||
|
|
|
@ -0,0 +1,20 @@
|
||||||
|
bind :: 0.0.0.0
|
||||||
|
port 6379
|
||||||
|
requirepass public
|
||||||
|
|
||||||
|
tls-port 6389
|
||||||
|
tls-cert-file /etc/certs/cert.pem
|
||||||
|
tls-key-file /etc/certs/key.pem
|
||||||
|
tls-ca-cert-file /etc/certs/cacert.pem
|
||||||
|
tls-auth-clients no
|
||||||
|
|
||||||
|
protected-mode no
|
||||||
|
daemonize no
|
||||||
|
|
||||||
|
loglevel notice
|
||||||
|
logfile ""
|
||||||
|
|
||||||
|
always-show-logo no
|
||||||
|
save ""
|
||||||
|
appendonly no
|
||||||
|
|
|
@ -0,0 +1,14 @@
|
||||||
|
sentinel resolve-hostnames yes
|
||||||
|
bind :: 0.0.0.0
|
||||||
|
|
||||||
|
tls-port 26380
|
||||||
|
tls-replication yes
|
||||||
|
tls-cert-file /etc/certs/cert.pem
|
||||||
|
tls-key-file /etc/certs/key.pem
|
||||||
|
tls-ca-cert-file /etc/certs/cacert.pem
|
||||||
|
tls-auth-clients no
|
||||||
|
|
||||||
|
sentinel monitor mymaster redis-sentinel-tls-master 6389 1
|
||||||
|
sentinel auth-pass mymaster public
|
||||||
|
sentinel down-after-milliseconds mymaster 10000
|
||||||
|
sentinel failover-timeout mymaster 20000
|
|
@ -0,0 +1,24 @@
|
||||||
|
bind :: 0.0.0.0
|
||||||
|
port 6379
|
||||||
|
requirepass public
|
||||||
|
|
||||||
|
replicaof redis-sentinel-tls-master 6389
|
||||||
|
masterauth public
|
||||||
|
|
||||||
|
tls-port 6389
|
||||||
|
tls-replication yes
|
||||||
|
tls-cert-file /etc/certs/cert.pem
|
||||||
|
tls-key-file /etc/certs/key.pem
|
||||||
|
tls-ca-cert-file /etc/certs/cacert.pem
|
||||||
|
tls-auth-clients no
|
||||||
|
|
||||||
|
protected-mode no
|
||||||
|
daemonize no
|
||||||
|
|
||||||
|
loglevel notice
|
||||||
|
logfile ""
|
||||||
|
|
||||||
|
always-show-logo no
|
||||||
|
save ""
|
||||||
|
appendonly no
|
||||||
|
|
|
@ -57,10 +57,6 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
make ${EMQX_NAME}-tgz
|
make ${EMQX_NAME}-tgz
|
||||||
./scripts/pkg-tests.sh ${EMQX_NAME}-tgz
|
./scripts/pkg-tests.sh ${EMQX_NAME}-tgz
|
||||||
- name: run static checks
|
|
||||||
if: contains(matrix.os, 'ubuntu')
|
|
||||||
run: |
|
|
||||||
make static_checks
|
|
||||||
- name: build and test deb/rpm packages
|
- name: build and test deb/rpm packages
|
||||||
run: |
|
run: |
|
||||||
make ${EMQX_NAME}-pkg
|
make ${EMQX_NAME}-pkg
|
||||||
|
|
|
@ -4,13 +4,13 @@ concurrency:
|
||||||
group: relup-${{ github.event_name }}-${{ github.ref }}
|
group: relup-${{ github.event_name }}-${{ github.ref }}
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
on:
|
# on:
|
||||||
push:
|
# push:
|
||||||
branches:
|
# branches:
|
||||||
- '**'
|
# - '**'
|
||||||
tags:
|
# tags:
|
||||||
- e*
|
# - e*
|
||||||
pull_request:
|
# pull_request:
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
relup_test_plan:
|
relup_test_plan:
|
||||||
|
|
|
@ -77,6 +77,7 @@ jobs:
|
||||||
make ensure-rebar3
|
make ensure-rebar3
|
||||||
# fetch all deps and compile
|
# fetch all deps and compile
|
||||||
make ${{ matrix.profile }}
|
make ${{ matrix.profile }}
|
||||||
|
make static_checks
|
||||||
make test-compile
|
make test-compile
|
||||||
cd ..
|
cd ..
|
||||||
zip -ryq source.zip source/* source/.[^.]*
|
zip -ryq source.zip source/* source/.[^.]*
|
||||||
|
@ -155,11 +156,11 @@ jobs:
|
||||||
working-directory: source
|
working-directory: source
|
||||||
env:
|
env:
|
||||||
DOCKER_CT_RUNNER_IMAGE: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu20.04"
|
DOCKER_CT_RUNNER_IMAGE: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu20.04"
|
||||||
MONGO_TAG: 5
|
MONGO_TAG: "5"
|
||||||
MYSQL_TAG: 8
|
MYSQL_TAG: "8"
|
||||||
PGSQL_TAG: 13
|
PGSQL_TAG: "13"
|
||||||
REDIS_TAG: 6
|
REDIS_TAG: "7.0"
|
||||||
INFLUXDB_TAG: 2.5.0
|
INFLUXDB_TAG: "2.5.0"
|
||||||
PROFILE: ${{ matrix.profile }}
|
PROFILE: ${{ matrix.profile }}
|
||||||
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}
|
CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }}
|
||||||
run: ./scripts/ct/run.sh --ci --app ${{ matrix.app }}
|
run: ./scripts/ct/run.sh --ci --app ${{ matrix.app }}
|
||||||
|
|
8
Makefile
8
Makefile
|
@ -6,8 +6,8 @@ export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-26:1.13.4-24.3.4.2-1
|
||||||
export EMQX_DEFAULT_RUNNER = debian:11-slim
|
export EMQX_DEFAULT_RUNNER = debian:11-slim
|
||||||
export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh)
|
export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh)
|
||||||
export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh)
|
export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh)
|
||||||
export EMQX_DASHBOARD_VERSION ?= v1.1.6
|
export EMQX_DASHBOARD_VERSION ?= v1.1.7
|
||||||
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.1
|
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.3
|
||||||
export EMQX_REL_FORM ?= tgz
|
export EMQX_REL_FORM ?= tgz
|
||||||
export QUICER_DOWNLOAD_FROM_RELEASE = 1
|
export QUICER_DOWNLOAD_FROM_RELEASE = 1
|
||||||
ifeq ($(OS),Windows_NT)
|
ifeq ($(OS),Windows_NT)
|
||||||
|
@ -77,9 +77,11 @@ test-compile: $(REBAR) merge-config
|
||||||
ct: $(REBAR) merge-config
|
ct: $(REBAR) merge-config
|
||||||
@ENABLE_COVER_COMPILE=1 $(REBAR) ct --name $(CT_NODE_NAME) -c -v --cover_export_name $(CT_COVER_EXPORT_PREFIX)-ct
|
@ENABLE_COVER_COMPILE=1 $(REBAR) ct --name $(CT_NODE_NAME) -c -v --cover_export_name $(CT_COVER_EXPORT_PREFIX)-ct
|
||||||
|
|
||||||
|
## only check bpapi for enterprise profile because it's a super-set.
|
||||||
.PHONY: static_checks
|
.PHONY: static_checks
|
||||||
static_checks:
|
static_checks:
|
||||||
@$(REBAR) as check do dialyzer, xref, ct --suite apps/emqx/test/emqx_static_checks --readable $(CT_READABLE)
|
@$(REBAR) as check do dialyzer, xref
|
||||||
|
@if [ "$${PROFILE}" = 'emqx-enterprise' ]; then $(REBAR) ct --suite apps/emqx/test/emqx_static_checks --readable $(CT_READABLE); fi
|
||||||
|
|
||||||
APPS=$(shell $(SCRIPTS)/find-apps.sh)
|
APPS=$(shell $(SCRIPTS)/find-apps.sh)
|
||||||
|
|
||||||
|
|
|
@ -46,8 +46,8 @@ emqx_schema {
|
||||||
|
|
||||||
overload_protection_backoff_delay {
|
overload_protection_backoff_delay {
|
||||||
desc {
|
desc {
|
||||||
en: "When at high load, some unimportant tasks could be delayed for execution, here set the duration in milliseconds precision."
|
en: "The maximum duration of delay for background task execution during high load conditions."
|
||||||
zh: "高负载时,一些不重要的任务可能会延迟执行,在这里设置允许延迟的时间。单位为毫秒。"
|
zh: "高负载时,一些不重要的任务可能会延迟执行,在这里设置允许延迟的时间。"
|
||||||
}
|
}
|
||||||
label {
|
label {
|
||||||
en: "Delay Time"
|
en: "Delay Time"
|
||||||
|
@ -188,8 +188,12 @@ emqx_schema {
|
||||||
|
|
||||||
sysmon_vm_long_gc {
|
sysmon_vm_long_gc {
|
||||||
desc {
|
desc {
|
||||||
en: "Enable Long GC monitoring."
|
en: """When an Erlang process spends long time to perform garbage collection, a warning level <code>long_gc</code> log is emitted,
|
||||||
zh: "启用长垃圾回收监控。"
|
and an MQTT message is published to the system topic <code>$SYS/sysmon/long_gc</code>.
|
||||||
|
"""
|
||||||
|
zh: """当系统检测到某个 Erlang 进程垃圾回收占用过长时间,会触发一条带有 <code>long_gc</code> 关键字的日志。
|
||||||
|
同时还会发布一条主题为 <code>$SYS/sysmon/long_gc</code> 的 MQTT 系统消息。
|
||||||
|
"""
|
||||||
}
|
}
|
||||||
label {
|
label {
|
||||||
en: "Enable Long GC monitoring."
|
en: "Enable Long GC monitoring."
|
||||||
|
@ -199,8 +203,12 @@ emqx_schema {
|
||||||
|
|
||||||
sysmon_vm_long_schedule {
|
sysmon_vm_long_schedule {
|
||||||
desc {
|
desc {
|
||||||
en: "Enable Long Schedule monitoring."
|
en: """When the Erlang VM detect a task scheduled for too long, a warning level 'long_schedule' log is emitted,
|
||||||
zh: "启用长调度监控。"
|
and an MQTT message is published to the system topic <code>$SYS/sysmon/long_schedule</code>.
|
||||||
|
"""
|
||||||
|
zh: """启用后,如果 Erlang VM 调度器出现某个任务占用时间过长时,会触发一条带有 'long_schedule' 关键字的日志。
|
||||||
|
同时还会发布一条主题为 <code>$SYS/sysmon/long_schedule</code> 的 MQTT 系统消息。
|
||||||
|
"""
|
||||||
}
|
}
|
||||||
label {
|
label {
|
||||||
en: "Enable Long Schedule monitoring."
|
en: "Enable Long Schedule monitoring."
|
||||||
|
@ -210,8 +218,13 @@ emqx_schema {
|
||||||
|
|
||||||
sysmon_vm_large_heap {
|
sysmon_vm_large_heap {
|
||||||
desc {
|
desc {
|
||||||
en: "Enable Large Heap monitoring."
|
en: """When an Erlang process consumed a large amount of memory for its heap space,
|
||||||
zh: "启用大 heap 监控。"
|
the system will write a warning level <code>large_heap</code> log, and an MQTT message is published to
|
||||||
|
the system topic <code>$SYS/sysmon/large_heap</code>.
|
||||||
|
"""
|
||||||
|
zh: """启用后,当一个 Erlang 进程申请了大量内存,系统会触发一条带有 <code>large_heap</code> 关键字的
|
||||||
|
warning 级别日志。同时还会发布一条主题为 <code>$SYS/sysmon/busy_dist_port</code> 的 MQTT 系统消息。
|
||||||
|
"""
|
||||||
}
|
}
|
||||||
label {
|
label {
|
||||||
en: "Enable Large Heap monitoring."
|
en: "Enable Large Heap monitoring."
|
||||||
|
@ -221,8 +234,13 @@ emqx_schema {
|
||||||
|
|
||||||
sysmon_vm_busy_dist_port {
|
sysmon_vm_busy_dist_port {
|
||||||
desc {
|
desc {
|
||||||
en: "Enable Busy Distribution Port monitoring."
|
en: """When the RPC connection used to communicate with other nodes in the cluster is overloaded,
|
||||||
zh: "启用分布式端口过忙监控。"
|
there will be a <code>busy_dist_port</code> warning log,
|
||||||
|
and an MQTT message is published to system topic <code>$SYS/sysmon/busy_dist_port</code>.
|
||||||
|
"""
|
||||||
|
zh: """启用后,当用于集群接点之间 RPC 的连接过忙时,会触发一条带有 <code>busy_dist_port</code> 关键字的 warning 级别日志。
|
||||||
|
同时还会发布一条主题为 <code>$SYS/sysmon/busy_dist_port</code> 的 MQTT 系统消息。
|
||||||
|
"""
|
||||||
}
|
}
|
||||||
label {
|
label {
|
||||||
en: "Enable Busy Distribution Port monitoring."
|
en: "Enable Busy Distribution Port monitoring."
|
||||||
|
@ -232,8 +250,12 @@ emqx_schema {
|
||||||
|
|
||||||
sysmon_vm_busy_port {
|
sysmon_vm_busy_port {
|
||||||
desc {
|
desc {
|
||||||
en: "Enable Busy Port monitoring."
|
en: """When a port (e.g. TCP socket) is overloaded, there will be a <code>busy_port</code> warning log,
|
||||||
zh: "启用端口过忙监控。"
|
and an MQTT message is published to the system topic <code>$SYS/sysmon/busy_port</code>.
|
||||||
|
"""
|
||||||
|
zh: """当一个系统接口(例如 TCP socket)过忙,会触发一条带有 <code>busy_port</code> 关键字的 warning 级别的日志。
|
||||||
|
同时还会发布一条主题为 <code>$SYS/sysmon/busy_port</code> 的 MQTT 系统消息。
|
||||||
|
"""
|
||||||
}
|
}
|
||||||
label {
|
label {
|
||||||
en: "Enable Busy Port monitoring."
|
en: "Enable Busy Port monitoring."
|
||||||
|
|
|
@ -32,10 +32,10 @@
|
||||||
%% `apps/emqx/src/bpapi/README.md'
|
%% `apps/emqx/src/bpapi/README.md'
|
||||||
|
|
||||||
%% Community edition
|
%% Community edition
|
||||||
-define(EMQX_RELEASE_CE, "5.0.15").
|
-define(EMQX_RELEASE_CE, "5.0.16").
|
||||||
|
|
||||||
%% Enterprise edition
|
%% Enterprise edition
|
||||||
-define(EMQX_RELEASE_EE, "5.0.0-rc.1").
|
-define(EMQX_RELEASE_EE, "5.0.0").
|
||||||
|
|
||||||
%% the HTTP API version
|
%% the HTTP API version
|
||||||
-define(EMQX_API_VERSION, "5.0").
|
-define(EMQX_API_VERSION, "5.0").
|
||||||
|
|
|
@ -48,9 +48,9 @@
|
||||||
-define(TRACE(Level, Tag, Msg, Meta), begin
|
-define(TRACE(Level, Tag, Msg, Meta), begin
|
||||||
case persistent_term:get(?TRACE_FILTER, []) of
|
case persistent_term:get(?TRACE_FILTER, []) of
|
||||||
[] -> ok;
|
[] -> ok;
|
||||||
%% We can't bind filter list to a variablebecause we pollute the calling scope with it.
|
%% We can't bind filter list to a variable because we pollute the calling scope with it.
|
||||||
%% We also don't want to wrap the macro body in a fun
|
%% We also don't want to wrap the macro body in a fun
|
||||||
%% beacause this adds overhead to the happy path.
|
%% because this adds overhead to the happy path.
|
||||||
%% So evaluate `persistent_term:get` twice.
|
%% So evaluate `persistent_term:get` twice.
|
||||||
_ -> emqx_trace:log(persistent_term:get(?TRACE_FILTER, []), Msg, (Meta)#{trace_tag => Tag})
|
_ -> emqx_trace:log(persistent_term:get(?TRACE_FILTER, []), Msg, (Meta)#{trace_tag => Tag})
|
||||||
end,
|
end,
|
||||||
|
|
|
@ -29,7 +29,7 @@
|
||||||
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.4"}}},
|
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.4"}}},
|
||||||
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.9"}}},
|
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.9"}}},
|
||||||
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
|
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
|
||||||
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.35.0"}}},
|
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.35.3"}}},
|
||||||
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
|
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
|
||||||
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},
|
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},
|
||||||
{snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.0"}}}
|
{snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.0"}}}
|
||||||
|
|
|
@ -18,6 +18,7 @@
|
||||||
-behaviour(emqx_config_handler).
|
-behaviour(emqx_config_handler).
|
||||||
|
|
||||||
%% API
|
%% API
|
||||||
|
-export([tr_handlers/1, tr_level/1]).
|
||||||
-export([add_handler/0, remove_handler/0, refresh_config/0]).
|
-export([add_handler/0, remove_handler/0, refresh_config/0]).
|
||||||
-export([post_config_update/5]).
|
-export([post_config_update/5]).
|
||||||
|
|
||||||
|
@ -37,38 +38,238 @@ remove_handler() ->
|
||||||
%% so we need to refresh the logger config after this node starts.
|
%% so we need to refresh the logger config after this node starts.
|
||||||
%% It will not affect the logger config when cluster-override.conf is unchanged.
|
%% It will not affect the logger config when cluster-override.conf is unchanged.
|
||||||
refresh_config() ->
|
refresh_config() ->
|
||||||
case emqx:get_raw_config(?LOG, undefined) of
|
Overrides = emqx_config:read_override_confs(),
|
||||||
%% no logger config when CT is running.
|
refresh_config(Overrides).
|
||||||
undefined ->
|
|
||||||
ok;
|
|
||||||
Log ->
|
|
||||||
{ok, _} = emqx:update_config(?LOG, Log),
|
|
||||||
ok
|
|
||||||
end.
|
|
||||||
|
|
||||||
post_config_update(?LOG, _Req, _NewConf, _OldConf, AppEnvs) ->
|
refresh_config(#{<<"log">> := _}) ->
|
||||||
Kernel = proplists:get_value(kernel, AppEnvs),
|
%% read the checked config
|
||||||
NewHandlers = proplists:get_value(logger, Kernel, []),
|
LogConfig = emqx:get_config(?LOG, undefined),
|
||||||
Level = proplists:get_value(logger_level, Kernel, warning),
|
Conf = #{log => LogConfig},
|
||||||
ok = update_log_handlers(NewHandlers),
|
ok = do_refresh_config(Conf);
|
||||||
ok = emqx_logger:set_primary_log_level(Level),
|
refresh_config(_) ->
|
||||||
application:set_env(kernel, logger_level, Level),
|
%% No config override found for 'log', do nothing
|
||||||
ok;
|
%% because the 'kernel' app should already be configured
|
||||||
|
%% from the base configs. i.e. emqx.conf + env vars
|
||||||
|
ok.
|
||||||
|
|
||||||
|
%% this call is shared between initial config refresh at boot
|
||||||
|
%% and dynamic config update from HTTP API
|
||||||
|
do_refresh_config(Conf) ->
|
||||||
|
Handlers = tr_handlers(Conf),
|
||||||
|
ok = update_log_handlers(Handlers),
|
||||||
|
Level = tr_level(Conf),
|
||||||
|
ok = maybe_update_log_level(Level),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
post_config_update(?LOG, _Req, NewConf, _OldConf, _AppEnvs) ->
|
||||||
|
ok = do_refresh_config(#{log => NewConf});
|
||||||
post_config_update(_ConfPath, _Req, _NewConf, _OldConf, _AppEnvs) ->
|
post_config_update(_ConfPath, _Req, _NewConf, _OldConf, _AppEnvs) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
maybe_update_log_level(NewLevel) ->
|
||||||
|
OldLevel = emqx_logger:get_primary_log_level(),
|
||||||
|
case OldLevel =:= NewLevel of
|
||||||
|
true ->
|
||||||
|
%% no change
|
||||||
|
ok;
|
||||||
|
false ->
|
||||||
|
ok = emqx_logger:set_primary_log_level(NewLevel),
|
||||||
|
%% also update kernel's logger_level for troubleshooting
|
||||||
|
%% what is actually in effect is the logger's primary log level
|
||||||
|
ok = application:set_env(kernel, logger_level, NewLevel),
|
||||||
|
log_to_console("Config override: log level is set to '~p'~n", [NewLevel])
|
||||||
|
end.
|
||||||
|
|
||||||
|
log_to_console(Fmt, Args) ->
|
||||||
|
io:format(standard_error, Fmt, Args).
|
||||||
|
|
||||||
update_log_handlers(NewHandlers) ->
|
update_log_handlers(NewHandlers) ->
|
||||||
OldHandlers = application:get_env(kernel, logger, []),
|
OldHandlers = application:get_env(kernel, logger, []),
|
||||||
lists:foreach(
|
NewHandlersIds = lists:map(fun({handler, Id, _Mod, _Conf}) -> Id end, NewHandlers),
|
||||||
fun({handler, HandlerId, _Mod, _Conf}) ->
|
OldHandlersIds = lists:map(fun({handler, Id, _Mod, _Conf}) -> Id end, OldHandlers),
|
||||||
logger:remove_handler(HandlerId)
|
Removes = lists:map(fun(Id) -> {removed, Id} end, OldHandlersIds -- NewHandlersIds),
|
||||||
|
MapFn = fun({handler, Id, Mod, Conf} = Handler) ->
|
||||||
|
case lists:keyfind(Id, 2, OldHandlers) of
|
||||||
|
{handler, Id, Mod, Conf} ->
|
||||||
|
%% no change
|
||||||
|
false;
|
||||||
|
{handler, Id, _Mod, _Conf} ->
|
||||||
|
{true, {updated, Handler}};
|
||||||
|
false ->
|
||||||
|
{true, {enabled, Handler}}
|
||||||
|
end
|
||||||
|
end,
|
||||||
|
AddsAndUpdates = lists:filtermap(MapFn, NewHandlers),
|
||||||
|
lists:foreach(fun update_log_handler/1, Removes ++ AddsAndUpdates),
|
||||||
|
ok = application:set_env(kernel, logger, NewHandlers),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
update_log_handler({removed, Id}) ->
|
||||||
|
log_to_console("Config override: ~s is removed~n", [id_for_log(Id)]),
|
||||||
|
logger:remove_handler(Id);
|
||||||
|
update_log_handler({Action, {handler, Id, Mod, Conf}}) ->
|
||||||
|
log_to_console("Config override: ~s is ~p~n", [id_for_log(Id), Action]),
|
||||||
|
% may return {error, {not_found, Id}}
|
||||||
|
_ = logger:remove_handler(Id),
|
||||||
|
case logger:add_handler(Id, Mod, Conf) of
|
||||||
|
ok ->
|
||||||
|
ok;
|
||||||
|
%% Don't crash here, otherwise the cluster rpc will retry the wrong handler forever.
|
||||||
|
{error, Reason} ->
|
||||||
|
log_to_console(
|
||||||
|
"Config override: ~s is ~p, but failed to add handler: ~p~n",
|
||||||
|
[id_for_log(Id), Action, Reason]
|
||||||
|
)
|
||||||
|
end,
|
||||||
|
ok.
|
||||||
|
|
||||||
|
id_for_log(console) -> "log.console_handler";
|
||||||
|
id_for_log(Other) -> "log.file_handlers." ++ atom_to_list(Other).
|
||||||
|
|
||||||
|
atom(Id) when is_binary(Id) -> binary_to_atom(Id, utf8);
|
||||||
|
atom(Id) when is_atom(Id) -> Id.
|
||||||
|
|
||||||
|
%% @doc Translate raw config to app-env compatible log handler configs list.
|
||||||
|
tr_handlers(Conf) ->
|
||||||
|
%% mute the default handler
|
||||||
|
tr_console_handler(Conf) ++
|
||||||
|
tr_file_handlers(Conf).
|
||||||
|
|
||||||
|
%% For the default logger that outputs to console
|
||||||
|
tr_console_handler(Conf) ->
|
||||||
|
case conf_get("log.console_handler.enable", Conf) of
|
||||||
|
true ->
|
||||||
|
ConsoleConf = conf_get("log.console_handler", Conf),
|
||||||
|
[
|
||||||
|
{handler, console, logger_std_h, #{
|
||||||
|
level => conf_get("log.console_handler.level", Conf),
|
||||||
|
config => (log_handler_conf(ConsoleConf))#{type => standard_io},
|
||||||
|
formatter => log_formatter(ConsoleConf),
|
||||||
|
filters => log_filter(ConsoleConf)
|
||||||
|
}}
|
||||||
|
];
|
||||||
|
false ->
|
||||||
|
[]
|
||||||
|
end.
|
||||||
|
|
||||||
|
%% For the file logger
|
||||||
|
tr_file_handlers(Conf) ->
|
||||||
|
Handlers = logger_file_handlers(Conf),
|
||||||
|
lists:map(fun tr_file_handler/1, Handlers).
|
||||||
|
|
||||||
|
tr_file_handler({HandlerName, SubConf}) ->
|
||||||
|
{handler, atom(HandlerName), logger_disk_log_h, #{
|
||||||
|
level => conf_get("level", SubConf),
|
||||||
|
config => (log_handler_conf(SubConf))#{
|
||||||
|
type =>
|
||||||
|
case conf_get("rotation.enable", SubConf) of
|
||||||
|
true -> wrap;
|
||||||
|
_ -> halt
|
||||||
|
end,
|
||||||
|
file => conf_get("file", SubConf),
|
||||||
|
max_no_files => conf_get("rotation.count", SubConf),
|
||||||
|
max_no_bytes => conf_get("max_size", SubConf)
|
||||||
|
},
|
||||||
|
formatter => log_formatter(SubConf),
|
||||||
|
filters => log_filter(SubConf),
|
||||||
|
filesync_repeat_interval => no_repeat
|
||||||
|
}}.
|
||||||
|
|
||||||
|
logger_file_handlers(Conf) ->
|
||||||
|
Handlers = maps:to_list(conf_get("log.file_handlers", Conf, #{})),
|
||||||
|
lists:filter(
|
||||||
|
fun({_Name, Opts}) ->
|
||||||
|
B = conf_get("enable", Opts),
|
||||||
|
true = is_boolean(B),
|
||||||
|
B
|
||||||
end,
|
end,
|
||||||
OldHandlers -- NewHandlers
|
Handlers
|
||||||
),
|
).
|
||||||
lists:foreach(
|
|
||||||
fun({handler, HandlerId, Mod, Conf}) ->
|
conf_get(Key, Conf) -> emqx_schema:conf_get(Key, Conf).
|
||||||
logger:add_handler(HandlerId, Mod, Conf)
|
conf_get(Key, Conf, Default) -> emqx_schema:conf_get(Key, Conf, Default).
|
||||||
|
|
||||||
|
log_handler_conf(Conf) ->
|
||||||
|
SycModeQlen = conf_get("sync_mode_qlen", Conf),
|
||||||
|
DropModeQlen = conf_get("drop_mode_qlen", Conf),
|
||||||
|
FlushQlen = conf_get("flush_qlen", Conf),
|
||||||
|
Overkill = conf_get("overload_kill", Conf),
|
||||||
|
BurstLimit = conf_get("burst_limit", Conf),
|
||||||
|
#{
|
||||||
|
sync_mode_qlen => SycModeQlen,
|
||||||
|
drop_mode_qlen => DropModeQlen,
|
||||||
|
flush_qlen => FlushQlen,
|
||||||
|
overload_kill_enable => conf_get("enable", Overkill),
|
||||||
|
overload_kill_qlen => conf_get("qlen", Overkill),
|
||||||
|
overload_kill_mem_size => conf_get("mem_size", Overkill),
|
||||||
|
overload_kill_restart_after => conf_get("restart_after", Overkill),
|
||||||
|
burst_limit_enable => conf_get("enable", BurstLimit),
|
||||||
|
burst_limit_max_count => conf_get("max_count", BurstLimit),
|
||||||
|
burst_limit_window_time => conf_get("window_time", BurstLimit)
|
||||||
|
}.
|
||||||
|
|
||||||
|
log_formatter(Conf) ->
|
||||||
|
CharsLimit =
|
||||||
|
case conf_get("chars_limit", Conf) of
|
||||||
|
unlimited -> unlimited;
|
||||||
|
V when V > 0 -> V
|
||||||
end,
|
end,
|
||||||
NewHandlers -- OldHandlers
|
TimeOffSet =
|
||||||
),
|
case conf_get("time_offset", Conf) of
|
||||||
application:set_env(kernel, logger, NewHandlers).
|
"system" -> "";
|
||||||
|
"utc" -> 0;
|
||||||
|
OffSetStr -> OffSetStr
|
||||||
|
end,
|
||||||
|
SingleLine = conf_get("single_line", Conf),
|
||||||
|
Depth = conf_get("max_depth", Conf),
|
||||||
|
do_formatter(conf_get("formatter", Conf), CharsLimit, SingleLine, TimeOffSet, Depth).
|
||||||
|
|
||||||
|
%% helpers
|
||||||
|
do_formatter(json, CharsLimit, SingleLine, TimeOffSet, Depth) ->
|
||||||
|
{emqx_logger_jsonfmt, #{
|
||||||
|
chars_limit => CharsLimit,
|
||||||
|
single_line => SingleLine,
|
||||||
|
time_offset => TimeOffSet,
|
||||||
|
depth => Depth
|
||||||
|
}};
|
||||||
|
do_formatter(text, CharsLimit, SingleLine, TimeOffSet, Depth) ->
|
||||||
|
{emqx_logger_textfmt, #{
|
||||||
|
template => [time, " [", level, "] ", msg, "\n"],
|
||||||
|
chars_limit => CharsLimit,
|
||||||
|
single_line => SingleLine,
|
||||||
|
time_offset => TimeOffSet,
|
||||||
|
depth => Depth
|
||||||
|
}}.
|
||||||
|
|
||||||
|
log_filter(Conf) ->
|
||||||
|
case conf_get("supervisor_reports", Conf) of
|
||||||
|
error -> [{drop_progress_reports, {fun logger_filters:progress/2, stop}}];
|
||||||
|
progress -> []
|
||||||
|
end.
|
||||||
|
|
||||||
|
tr_level(Conf) ->
|
||||||
|
ConsoleLevel = conf_get("log.console_handler.level", Conf, undefined),
|
||||||
|
FileLevels = [
|
||||||
|
conf_get("level", SubConf)
|
||||||
|
|| {_, SubConf} <-
|
||||||
|
logger_file_handlers(Conf)
|
||||||
|
],
|
||||||
|
case FileLevels ++ [ConsoleLevel || ConsoleLevel =/= undefined] of
|
||||||
|
%% warning is the default level we should use
|
||||||
|
[] -> warning;
|
||||||
|
Levels -> least_severe_log_level(Levels)
|
||||||
|
end.
|
||||||
|
|
||||||
|
least_severe_log_level(Levels) ->
|
||||||
|
hd(sort_log_levels(Levels)).
|
||||||
|
|
||||||
|
sort_log_levels(Levels) ->
|
||||||
|
lists:sort(
|
||||||
|
fun(A, B) ->
|
||||||
|
case logger:compare_levels(A, B) of
|
||||||
|
R when R == lt; R == eq -> true;
|
||||||
|
gt -> false
|
||||||
|
end
|
||||||
|
end,
|
||||||
|
Levels
|
||||||
|
).
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
{id, "emqx"},
|
{id, "emqx"},
|
||||||
{description, "EMQX Core"},
|
{description, "EMQX Core"},
|
||||||
% strict semver, bump manually!
|
% strict semver, bump manually!
|
||||||
{vsn, "5.0.16"},
|
{vsn, "5.0.17"},
|
||||||
{modules, []},
|
{modules, []},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{applications, [
|
{applications, [
|
||||||
|
|
|
@ -325,19 +325,20 @@ deactivate_alarm(
|
||||||
false ->
|
false ->
|
||||||
ok
|
ok
|
||||||
end,
|
end,
|
||||||
|
Now = erlang:system_time(microsecond),
|
||||||
HistoryAlarm = make_deactivated_alarm(
|
HistoryAlarm = make_deactivated_alarm(
|
||||||
ActivateAt,
|
ActivateAt,
|
||||||
Name,
|
Name,
|
||||||
Details0,
|
Details0,
|
||||||
Msg0,
|
Msg0,
|
||||||
erlang:system_time(microsecond)
|
Now
|
||||||
),
|
),
|
||||||
DeActAlarm = make_deactivated_alarm(
|
DeActAlarm = make_deactivated_alarm(
|
||||||
ActivateAt,
|
ActivateAt,
|
||||||
Name,
|
Name,
|
||||||
Details,
|
Details,
|
||||||
normalize_message(Name, iolist_to_binary(Message)),
|
normalize_message(Name, iolist_to_binary(Message)),
|
||||||
erlang:system_time(microsecond)
|
Now
|
||||||
),
|
),
|
||||||
mria:dirty_write(?DEACTIVATED_ALARM, HistoryAlarm),
|
mria:dirty_write(?DEACTIVATED_ALARM, HistoryAlarm),
|
||||||
mria:dirty_delete(?ACTIVATED_ALARM, Name),
|
mria:dirty_delete(?ACTIVATED_ALARM, Name),
|
||||||
|
|
|
@ -152,7 +152,7 @@ start_link() ->
|
||||||
insert_channel_info(ClientId, Info, Stats) ->
|
insert_channel_info(ClientId, Info, Stats) ->
|
||||||
Chan = {ClientId, self()},
|
Chan = {ClientId, self()},
|
||||||
true = ets:insert(?CHAN_INFO_TAB, {Chan, Info, Stats}),
|
true = ets:insert(?CHAN_INFO_TAB, {Chan, Info, Stats}),
|
||||||
?tp(debug, insert_channel_info, #{client_id => ClientId}),
|
?tp(debug, insert_channel_info, #{clientid => ClientId}),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
%% @private
|
%% @private
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
init_load/2,
|
init_load/2,
|
||||||
init_load/3,
|
init_load/3,
|
||||||
read_override_conf/1,
|
read_override_conf/1,
|
||||||
|
read_override_confs/0,
|
||||||
delete_override_conf_files/0,
|
delete_override_conf_files/0,
|
||||||
check_config/2,
|
check_config/2,
|
||||||
fill_defaults/1,
|
fill_defaults/1,
|
||||||
|
@ -326,9 +327,7 @@ init_load(SchemaMod, RawConf, Opts) when is_map(RawConf) ->
|
||||||
ok = save_schema_mod_and_names(SchemaMod),
|
ok = save_schema_mod_and_names(SchemaMod),
|
||||||
%% Merge environment variable overrides on top
|
%% Merge environment variable overrides on top
|
||||||
RawConfWithEnvs = merge_envs(SchemaMod, RawConf),
|
RawConfWithEnvs = merge_envs(SchemaMod, RawConf),
|
||||||
ClusterOverrides = read_override_conf(#{override_to => cluster}),
|
Overrides = read_override_confs(),
|
||||||
LocalOverrides = read_override_conf(#{override_to => local}),
|
|
||||||
Overrides = hocon:deep_merge(ClusterOverrides, LocalOverrides),
|
|
||||||
RawConfWithOverrides = hocon:deep_merge(RawConfWithEnvs, Overrides),
|
RawConfWithOverrides = hocon:deep_merge(RawConfWithEnvs, Overrides),
|
||||||
RootNames = get_root_names(),
|
RootNames = get_root_names(),
|
||||||
RawConfAll = raw_conf_with_default(SchemaMod, RootNames, RawConfWithOverrides, Opts),
|
RawConfAll = raw_conf_with_default(SchemaMod, RootNames, RawConfWithOverrides, Opts),
|
||||||
|
@ -337,6 +336,12 @@ init_load(SchemaMod, RawConf, Opts) when is_map(RawConf) ->
|
||||||
save_to_app_env(AppEnvs),
|
save_to_app_env(AppEnvs),
|
||||||
ok = save_to_config_map(CheckedConf, RawConfAll).
|
ok = save_to_config_map(CheckedConf, RawConfAll).
|
||||||
|
|
||||||
|
%% @doc Read merged cluster + local overrides.
|
||||||
|
read_override_confs() ->
|
||||||
|
ClusterOverrides = read_override_conf(#{override_to => cluster}),
|
||||||
|
LocalOverrides = read_override_conf(#{override_to => local}),
|
||||||
|
hocon:deep_merge(ClusterOverrides, LocalOverrides).
|
||||||
|
|
||||||
%% keep the raw and non-raw conf has the same keys to make update raw conf easier.
|
%% keep the raw and non-raw conf has the same keys to make update raw conf easier.
|
||||||
raw_conf_with_default(SchemaMod, RootNames, RawConf, #{raw_with_default := true}) ->
|
raw_conf_with_default(SchemaMod, RootNames, RawConf, #{raw_with_default := true}) ->
|
||||||
Fun = fun(Name, Acc) ->
|
Fun = fun(Name, Acc) ->
|
||||||
|
@ -424,7 +429,13 @@ check_config(SchemaMod, RawConf, Opts0) ->
|
||||||
%% it's maybe too much when reporting to the user
|
%% it's maybe too much when reporting to the user
|
||||||
-spec compact_errors(any(), any()) -> no_return().
|
-spec compact_errors(any(), any()) -> no_return().
|
||||||
compact_errors(Schema, [Error0 | More]) when is_map(Error0) ->
|
compact_errors(Schema, [Error0 | More]) when is_map(Error0) ->
|
||||||
Error1 = Error0#{discarded_errors_count => length(More)},
|
Error1 =
|
||||||
|
case length(More) of
|
||||||
|
0 ->
|
||||||
|
Error0;
|
||||||
|
_ ->
|
||||||
|
Error0#{unshown_errors => length(More)}
|
||||||
|
end,
|
||||||
Error =
|
Error =
|
||||||
case is_atom(Schema) of
|
case is_atom(Schema) of
|
||||||
true ->
|
true ->
|
||||||
|
@ -581,7 +592,6 @@ save_to_override_conf(RawConf, Opts) ->
|
||||||
add_handlers() ->
|
add_handlers() ->
|
||||||
ok = emqx_config_logger:add_handler(),
|
ok = emqx_config_logger:add_handler(),
|
||||||
emqx_sys_mon:add_handler(),
|
emqx_sys_mon:add_handler(),
|
||||||
emqx_config_logger:refresh_config(),
|
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
remove_handlers() ->
|
remove_handlers() ->
|
||||||
|
@ -593,8 +603,16 @@ load_hocon_file(FileName, LoadType) ->
|
||||||
case filelib:is_regular(FileName) of
|
case filelib:is_regular(FileName) of
|
||||||
true ->
|
true ->
|
||||||
Opts = #{include_dirs => include_dirs(), format => LoadType},
|
Opts = #{include_dirs => include_dirs(), format => LoadType},
|
||||||
{ok, Raw0} = hocon:load(FileName, Opts),
|
case hocon:load(FileName, Opts) of
|
||||||
Raw0;
|
{ok, Raw0} ->
|
||||||
|
Raw0;
|
||||||
|
{error, Reason} ->
|
||||||
|
throw(#{
|
||||||
|
msg => failed_to_load_conf,
|
||||||
|
reason => Reason,
|
||||||
|
file => FileName
|
||||||
|
})
|
||||||
|
end;
|
||||||
false ->
|
false ->
|
||||||
#{}
|
#{}
|
||||||
end.
|
end.
|
||||||
|
|
|
@ -550,6 +550,7 @@ handle_msg(
|
||||||
},
|
},
|
||||||
handle_incoming(Packet, NState);
|
handle_incoming(Packet, NState);
|
||||||
handle_msg({incoming, Packet}, State) ->
|
handle_msg({incoming, Packet}, State) ->
|
||||||
|
?TRACE("MQTT", "mqtt_packet_received", #{packet => Packet}),
|
||||||
handle_incoming(Packet, State);
|
handle_incoming(Packet, State);
|
||||||
handle_msg({outgoing, Packets}, State) ->
|
handle_msg({outgoing, Packets}, State) ->
|
||||||
handle_outgoing(Packets, State);
|
handle_outgoing(Packets, State);
|
||||||
|
@ -731,6 +732,12 @@ handle_timeout(TRef, Msg, State) ->
|
||||||
%% Parse incoming data
|
%% Parse incoming data
|
||||||
-compile({inline, [when_bytes_in/3]}).
|
-compile({inline, [when_bytes_in/3]}).
|
||||||
when_bytes_in(Oct, Data, State) ->
|
when_bytes_in(Oct, Data, State) ->
|
||||||
|
?SLOG(debug, #{
|
||||||
|
msg => "raw_bin_received",
|
||||||
|
size => Oct,
|
||||||
|
bin => binary_to_list(binary:encode_hex(Data)),
|
||||||
|
type => "hex"
|
||||||
|
}),
|
||||||
{Packets, NState} = parse_incoming(Data, [], State),
|
{Packets, NState} = parse_incoming(Data, [], State),
|
||||||
Len = erlang:length(Packets),
|
Len = erlang:length(Packets),
|
||||||
check_limiter(
|
check_limiter(
|
||||||
|
@ -783,7 +790,6 @@ parse_incoming(Data, Packets, State = #state{parse_state = ParseState}) ->
|
||||||
|
|
||||||
handle_incoming(Packet, State) when is_record(Packet, mqtt_packet) ->
|
handle_incoming(Packet, State) when is_record(Packet, mqtt_packet) ->
|
||||||
ok = inc_incoming_stats(Packet),
|
ok = inc_incoming_stats(Packet),
|
||||||
?TRACE("MQTT", "mqtt_packet_received", #{packet => Packet}),
|
|
||||||
with_channel(handle_in, [Packet], State);
|
with_channel(handle_in, [Packet], State);
|
||||||
handle_incoming(FrameError, State) ->
|
handle_incoming(FrameError, State) ->
|
||||||
with_channel(handle_in, [FrameError], State).
|
with_channel(handle_in, [FrameError], State).
|
||||||
|
|
|
@ -22,20 +22,49 @@
|
||||||
|
|
||||||
check_config(X) -> logger_formatter:check_config(X).
|
check_config(X) -> logger_formatter:check_config(X).
|
||||||
|
|
||||||
format(#{msg := {report, Report0}, meta := Meta} = Event, Config) when is_map(Report0) ->
|
format(#{msg := {report, ReportMap}, meta := Meta} = Event, Config) when is_map(ReportMap) ->
|
||||||
Report1 = enrich_report_mfa(Report0, Meta),
|
Report = enrich_report(ReportMap, Meta),
|
||||||
Report2 = enrich_report_clientid(Report1, Meta),
|
logger_formatter:format(Event#{msg := {report, Report}}, Config);
|
||||||
Report3 = enrich_report_peername(Report2, Meta),
|
|
||||||
Report4 = enrich_report_topic(Report3, Meta),
|
|
||||||
logger_formatter:format(Event#{msg := {report, Report4}}, Config);
|
|
||||||
format(#{msg := {string, String}} = Event, Config) ->
|
format(#{msg := {string, String}} = Event, Config) ->
|
||||||
format(Event#{msg => {"~ts ", [String]}}, Config);
|
format(Event#{msg => {"~ts ", [String]}}, Config);
|
||||||
|
%% trace
|
||||||
format(#{msg := Msg0, meta := Meta} = Event, Config) ->
|
format(#{msg := Msg0, meta := Meta} = Event, Config) ->
|
||||||
Msg1 = enrich_client_info(Msg0, Meta),
|
Msg1 = enrich_client_info(Msg0, Meta),
|
||||||
Msg2 = enrich_mfa(Msg1, Meta),
|
Msg2 = enrich_mfa(Msg1, Meta),
|
||||||
Msg3 = enrich_topic(Msg2, Meta),
|
Msg3 = enrich_topic(Msg2, Meta),
|
||||||
logger_formatter:format(Event#{msg := Msg3}, Config).
|
logger_formatter:format(Event#{msg := Msg3}, Config).
|
||||||
|
|
||||||
|
enrich_report(ReportRaw, Meta) ->
|
||||||
|
%% clientid and peername always in emqx_conn's process metadata.
|
||||||
|
%% topic can be put in meta using ?SLOG/3, or put in msg's report by ?SLOG/2
|
||||||
|
Topic =
|
||||||
|
case maps:get(topic, Meta, undefined) of
|
||||||
|
undefined -> maps:get(topic, ReportRaw, undefined);
|
||||||
|
Topic0 -> Topic0
|
||||||
|
end,
|
||||||
|
ClientId = maps:get(clientid, Meta, undefined),
|
||||||
|
Peer = maps:get(peername, Meta, undefined),
|
||||||
|
MFA = maps:get(mfa, Meta, undefined),
|
||||||
|
Line = maps:get(line, Meta, undefined),
|
||||||
|
Msg = maps:get(msg, ReportRaw, undefined),
|
||||||
|
lists:foldl(
|
||||||
|
fun
|
||||||
|
({_, undefined}, Acc) -> Acc;
|
||||||
|
(Item, Acc) -> [Item | Acc]
|
||||||
|
end,
|
||||||
|
maps:to_list(maps:without([topic, msg, clientid], ReportRaw)),
|
||||||
|
[
|
||||||
|
{topic, try_format_unicode(Topic)},
|
||||||
|
{clientid, try_format_unicode(ClientId)},
|
||||||
|
{peername, Peer},
|
||||||
|
{line, Line},
|
||||||
|
{mfa, mfa(MFA)},
|
||||||
|
{msg, Msg}
|
||||||
|
]
|
||||||
|
).
|
||||||
|
|
||||||
|
try_format_unicode(undefined) ->
|
||||||
|
undefined;
|
||||||
try_format_unicode(Char) ->
|
try_format_unicode(Char) ->
|
||||||
List =
|
List =
|
||||||
try
|
try
|
||||||
|
@ -53,30 +82,6 @@ try_format_unicode(Char) ->
|
||||||
_ -> List
|
_ -> List
|
||||||
end.
|
end.
|
||||||
|
|
||||||
enrich_report_mfa(Report, #{mfa := Mfa, line := Line}) ->
|
|
||||||
Report#{mfa => mfa(Mfa), line => Line};
|
|
||||||
enrich_report_mfa(Report, _) ->
|
|
||||||
Report.
|
|
||||||
|
|
||||||
enrich_report_clientid(Report, #{clientid := ClientId}) ->
|
|
||||||
Report#{clientid => try_format_unicode(ClientId)};
|
|
||||||
enrich_report_clientid(Report, _) ->
|
|
||||||
Report.
|
|
||||||
|
|
||||||
enrich_report_peername(Report, #{peername := Peername}) ->
|
|
||||||
Report#{peername => Peername};
|
|
||||||
enrich_report_peername(Report, _) ->
|
|
||||||
Report.
|
|
||||||
|
|
||||||
%% clientid and peername always in emqx_conn's process metadata.
|
|
||||||
%% topic can be put in meta using ?SLOG/3, or put in msg's report by ?SLOG/2
|
|
||||||
enrich_report_topic(Report, #{topic := Topic}) ->
|
|
||||||
Report#{topic => try_format_unicode(Topic)};
|
|
||||||
enrich_report_topic(Report = #{topic := Topic}, _) ->
|
|
||||||
Report#{topic => try_format_unicode(Topic)};
|
|
||||||
enrich_report_topic(Report, _) ->
|
|
||||||
Report.
|
|
||||||
|
|
||||||
enrich_mfa({Fmt, Args}, #{mfa := Mfa, line := Line}) when is_list(Fmt) ->
|
enrich_mfa({Fmt, Args}, #{mfa := Mfa, line := Line}) when is_list(Fmt) ->
|
||||||
{Fmt ++ " mfa: ~ts line: ~w", Args ++ [mfa(Mfa), Line]};
|
{Fmt ++ " mfa: ~ts line: ~w", Args ++ [mfa(Mfa), Line]};
|
||||||
enrich_mfa(Msg, _) ->
|
enrich_mfa(Msg, _) ->
|
||||||
|
@ -96,4 +101,5 @@ enrich_topic({Fmt, Args}, #{topic := Topic}) when is_list(Fmt) ->
|
||||||
enrich_topic(Msg, _) ->
|
enrich_topic(Msg, _) ->
|
||||||
Msg.
|
Msg.
|
||||||
|
|
||||||
mfa({M, F, A}) -> atom_to_list(M) ++ ":" ++ atom_to_list(F) ++ "/" ++ integer_to_list(A).
|
mfa(undefined) -> undefined;
|
||||||
|
mfa({M, F, A}) -> [atom_to_list(M), ":", atom_to_list(F), "/" ++ integer_to_list(A)].
|
||||||
|
|
|
@ -609,7 +609,11 @@ do_redact(K, V, Checker) ->
|
||||||
|
|
||||||
-define(REDACT_VAL, "******").
|
-define(REDACT_VAL, "******").
|
||||||
redact_v(V) when is_binary(V) -> <<?REDACT_VAL>>;
|
redact_v(V) when is_binary(V) -> <<?REDACT_VAL>>;
|
||||||
redact_v(_V) -> ?REDACT_VAL.
|
%% The HOCON schema system may generate sensitive values with this format
|
||||||
|
redact_v([{str, Bin}]) when is_binary(Bin) ->
|
||||||
|
[{str, <<?REDACT_VAL>>}];
|
||||||
|
redact_v(_V) ->
|
||||||
|
?REDACT_VAL.
|
||||||
|
|
||||||
is_redacted(K, V) ->
|
is_redacted(K, V) ->
|
||||||
do_is_redacted(K, V, fun is_sensitive_key/1).
|
do_is_redacted(K, V, fun is_sensitive_key/1).
|
||||||
|
|
|
@ -93,9 +93,9 @@ init([]) ->
|
||||||
%% memsup is not reliable, ignore
|
%% memsup is not reliable, ignore
|
||||||
memsup:set_sysmem_high_watermark(1.0),
|
memsup:set_sysmem_high_watermark(1.0),
|
||||||
SysHW = init_os_monitor(),
|
SysHW = init_os_monitor(),
|
||||||
_ = start_mem_check_timer(),
|
MemRef = start_mem_check_timer(),
|
||||||
_ = start_cpu_check_timer(),
|
CpuRef = start_cpu_check_timer(),
|
||||||
{ok, #{sysmem_high_watermark => SysHW}}.
|
{ok, #{sysmem_high_watermark => SysHW, mem_time_ref => MemRef, cpu_time_ref => CpuRef}}.
|
||||||
|
|
||||||
init_os_monitor() ->
|
init_os_monitor() ->
|
||||||
init_os_monitor(emqx:get_config([sysmon, os])).
|
init_os_monitor(emqx:get_config([sysmon, os])).
|
||||||
|
@ -125,13 +125,15 @@ handle_cast(Msg, State) ->
|
||||||
|
|
||||||
handle_info({timeout, _Timer, mem_check}, #{sysmem_high_watermark := HWM} = State) ->
|
handle_info({timeout, _Timer, mem_check}, #{sysmem_high_watermark := HWM} = State) ->
|
||||||
ok = update_mem_alarm_status(HWM),
|
ok = update_mem_alarm_status(HWM),
|
||||||
ok = start_mem_check_timer(),
|
Ref = start_mem_check_timer(),
|
||||||
{noreply, State};
|
{noreply, State#{mem_time_ref => Ref}};
|
||||||
handle_info({timeout, _Timer, cpu_check}, State) ->
|
handle_info({timeout, _Timer, cpu_check}, State) ->
|
||||||
CPUHighWatermark = emqx:get_config([sysmon, os, cpu_high_watermark]) * 100,
|
CPUHighWatermark = emqx:get_config([sysmon, os, cpu_high_watermark]) * 100,
|
||||||
CPULowWatermark = emqx:get_config([sysmon, os, cpu_low_watermark]) * 100,
|
CPULowWatermark = emqx:get_config([sysmon, os, cpu_low_watermark]) * 100,
|
||||||
case emqx_vm:cpu_util() of
|
CPUVal = emqx_vm:cpu_util(),
|
||||||
0 ->
|
case CPUVal of
|
||||||
|
%% 0 or 0.0
|
||||||
|
Busy when Busy == 0 ->
|
||||||
ok;
|
ok;
|
||||||
Busy when Busy > CPUHighWatermark ->
|
Busy when Busy > CPUHighWatermark ->
|
||||||
_ = emqx_alarm:activate(
|
_ = emqx_alarm:activate(
|
||||||
|
@ -156,11 +158,14 @@ handle_info({timeout, _Timer, cpu_check}, State) ->
|
||||||
_Busy ->
|
_Busy ->
|
||||||
ok
|
ok
|
||||||
end,
|
end,
|
||||||
ok = start_cpu_check_timer(),
|
Ref = start_cpu_check_timer(),
|
||||||
{noreply, State};
|
{noreply, State#{cpu_time_ref => Ref}};
|
||||||
handle_info({monitor_conf_update, OS}, _State) ->
|
handle_info({monitor_conf_update, OS}, State) ->
|
||||||
|
cancel_outdated_timer(State),
|
||||||
SysHW = init_os_monitor(OS),
|
SysHW = init_os_monitor(OS),
|
||||||
{noreply, #{sysmem_high_watermark => SysHW}};
|
MemRef = start_mem_check_timer(),
|
||||||
|
CpuRef = start_cpu_check_timer(),
|
||||||
|
{noreply, #{sysmem_high_watermark => SysHW, mem_time_ref => MemRef, cpu_time_ref => CpuRef}};
|
||||||
handle_info(Info, State) ->
|
handle_info(Info, State) ->
|
||||||
?SLOG(error, #{msg => "unexpected_info", info => Info}),
|
?SLOG(error, #{msg => "unexpected_info", info => Info}),
|
||||||
{noreply, State}.
|
{noreply, State}.
|
||||||
|
@ -174,11 +179,15 @@ code_change(_OldVsn, State, _Extra) ->
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% Internal functions
|
%% Internal functions
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
cancel_outdated_timer(#{mem_time_ref := MemRef, cpu_time_ref := CpuRef}) ->
|
||||||
|
emqx_misc:cancel_timer(MemRef),
|
||||||
|
emqx_misc:cancel_timer(CpuRef),
|
||||||
|
ok.
|
||||||
|
|
||||||
start_cpu_check_timer() ->
|
start_cpu_check_timer() ->
|
||||||
Interval = emqx:get_config([sysmon, os, cpu_check_interval]),
|
Interval = emqx:get_config([sysmon, os, cpu_check_interval]),
|
||||||
case erlang:system_info(system_architecture) of
|
case erlang:system_info(system_architecture) of
|
||||||
"x86_64-pc-linux-musl" -> ok;
|
"x86_64-pc-linux-musl" -> undefined;
|
||||||
_ -> start_timer(Interval, cpu_check)
|
_ -> start_timer(Interval, cpu_check)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
@ -191,12 +200,11 @@ start_mem_check_timer() ->
|
||||||
true ->
|
true ->
|
||||||
start_timer(Interval, mem_check);
|
start_timer(Interval, mem_check);
|
||||||
false ->
|
false ->
|
||||||
ok
|
undefined
|
||||||
end.
|
end.
|
||||||
|
|
||||||
start_timer(Interval, Msg) ->
|
start_timer(Interval, Msg) ->
|
||||||
_ = emqx_misc:start_timer(Interval, Msg),
|
emqx_misc:start_timer(Interval, Msg).
|
||||||
ok.
|
|
||||||
|
|
||||||
update_mem_alarm_status(HWM) when HWM > 1.0 orelse HWM < 0.0 ->
|
update_mem_alarm_status(HWM) when HWM > 1.0 orelse HWM < 0.0 ->
|
||||||
?SLOG(warning, #{msg => "discarded_out_of_range_mem_alarm_threshold", value => HWM}),
|
?SLOG(warning, #{msg => "discarded_out_of_range_mem_alarm_threshold", value => HWM}),
|
||||||
|
@ -223,7 +231,7 @@ do_update_mem_alarm_status(HWM0) ->
|
||||||
},
|
},
|
||||||
usage_msg(Usage, mem)
|
usage_msg(Usage, mem)
|
||||||
);
|
);
|
||||||
_ ->
|
false ->
|
||||||
ok = emqx_alarm:ensure_deactivated(
|
ok = emqx_alarm:ensure_deactivated(
|
||||||
high_system_memory_usage,
|
high_system_memory_usage,
|
||||||
#{
|
#{
|
||||||
|
@ -236,5 +244,5 @@ do_update_mem_alarm_status(HWM0) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
usage_msg(Usage, What) ->
|
usage_msg(Usage, What) ->
|
||||||
%% devide by 1.0 to ensure float point number
|
%% divide by 1.0 to ensure float point number
|
||||||
iolist_to_binary(io_lib:format("~.2f% ~p usage", [Usage / 1.0, What])).
|
iolist_to_binary(io_lib:format("~.2f% ~p usage", [Usage / 1.0, What])).
|
||||||
|
|
|
@ -477,9 +477,13 @@ format(Packet) -> format(Packet, emqx_trace_handler:payload_encode()).
|
||||||
format(#mqtt_packet{header = Header, variable = Variable, payload = Payload}, PayloadEncode) ->
|
format(#mqtt_packet{header = Header, variable = Variable, payload = Payload}, PayloadEncode) ->
|
||||||
HeaderIO = format_header(Header),
|
HeaderIO = format_header(Header),
|
||||||
case format_variable(Variable, Payload, PayloadEncode) of
|
case format_variable(Variable, Payload, PayloadEncode) of
|
||||||
"" -> HeaderIO;
|
"" -> [HeaderIO, ")"];
|
||||||
VarIO -> [HeaderIO, ",", VarIO]
|
VarIO -> [HeaderIO, ", ", VarIO, ")"]
|
||||||
end.
|
end;
|
||||||
|
%% receive a frame error packet, such as {frame_error,frame_too_large} or
|
||||||
|
%% {frame_error,#{expected => <<"'MQTT' or 'MQIsdp'">>,hint => invalid_proto_name,received => <<"bad_name">>}}
|
||||||
|
format(FrameError, _PayloadEncode) ->
|
||||||
|
lists:flatten(io_lib:format("~tp", [FrameError])).
|
||||||
|
|
||||||
format_header(#mqtt_packet_header{
|
format_header(#mqtt_packet_header{
|
||||||
type = Type,
|
type = Type,
|
||||||
|
@ -487,14 +491,14 @@ format_header(#mqtt_packet_header{
|
||||||
qos = QoS,
|
qos = QoS,
|
||||||
retain = Retain
|
retain = Retain
|
||||||
}) ->
|
}) ->
|
||||||
io_lib:format("~ts(Q~p, R~p, D~p)", [type_name(Type), QoS, i(Retain), i(Dup)]).
|
io_lib:format("~ts(Q~p, R~p, D~p", [type_name(Type), QoS, i(Retain), i(Dup)]).
|
||||||
|
|
||||||
format_variable(undefined, _, _) ->
|
format_variable(undefined, _, _) ->
|
||||||
"";
|
"";
|
||||||
format_variable(Variable, undefined, PayloadEncode) ->
|
format_variable(Variable, undefined, PayloadEncode) ->
|
||||||
format_variable(Variable, PayloadEncode);
|
format_variable(Variable, PayloadEncode);
|
||||||
format_variable(Variable, Payload, PayloadEncode) ->
|
format_variable(Variable, Payload, PayloadEncode) ->
|
||||||
[format_variable(Variable, PayloadEncode), ",", format_payload(Payload, PayloadEncode)].
|
[format_variable(Variable, PayloadEncode), ", ", format_payload(Payload, PayloadEncode)].
|
||||||
|
|
||||||
format_variable(
|
format_variable(
|
||||||
#mqtt_packet_connect{
|
#mqtt_packet_connect{
|
||||||
|
|
|
@ -1815,16 +1815,12 @@ desc(_) ->
|
||||||
%% utils
|
%% utils
|
||||||
-spec conf_get(string() | [string()], hocon:config()) -> term().
|
-spec conf_get(string() | [string()], hocon:config()) -> term().
|
||||||
conf_get(Key, Conf) ->
|
conf_get(Key, Conf) ->
|
||||||
V = hocon_maps:get(Key, Conf),
|
ensure_list(hocon_maps:get(Key, Conf)).
|
||||||
case is_binary(V) of
|
|
||||||
true ->
|
|
||||||
binary_to_list(V);
|
|
||||||
false ->
|
|
||||||
V
|
|
||||||
end.
|
|
||||||
|
|
||||||
conf_get(Key, Conf, Default) ->
|
conf_get(Key, Conf, Default) ->
|
||||||
V = hocon_maps:get(Key, Conf, Default),
|
ensure_list(hocon_maps:get(Key, Conf, Default)).
|
||||||
|
|
||||||
|
ensure_list(V) ->
|
||||||
case is_binary(V) of
|
case is_binary(V) of
|
||||||
true ->
|
true ->
|
||||||
binary_to_list(V);
|
binary_to_list(V);
|
||||||
|
|
|
@ -175,9 +175,9 @@ schedulers() ->
|
||||||
|
|
||||||
loads() ->
|
loads() ->
|
||||||
[
|
[
|
||||||
{load1, ftos(avg1() / 256)},
|
{load1, load(avg1())},
|
||||||
{load5, ftos(avg5() / 256)},
|
{load5, load(avg5())},
|
||||||
{load15, ftos(avg15() / 256)}
|
{load15, load(avg15())}
|
||||||
].
|
].
|
||||||
|
|
||||||
system_info_keys() -> ?SYSTEM_INFO_KEYS.
|
system_info_keys() -> ?SYSTEM_INFO_KEYS.
|
||||||
|
@ -232,9 +232,6 @@ mem_info() ->
|
||||||
Free = proplists:get_value(free_memory, Dataset),
|
Free = proplists:get_value(free_memory, Dataset),
|
||||||
[{total_memory, Total}, {used_memory, Total - Free}].
|
[{total_memory, Total}, {used_memory, Total - Free}].
|
||||||
|
|
||||||
ftos(F) ->
|
|
||||||
io_lib:format("~.2f", [F / 1.0]).
|
|
||||||
|
|
||||||
%%%% erlang vm scheduler_usage fun copied from recon
|
%%%% erlang vm scheduler_usage fun copied from recon
|
||||||
scheduler_usage(Interval) when is_integer(Interval) ->
|
scheduler_usage(Interval) when is_integer(Interval) ->
|
||||||
%% We start and stop the scheduler_wall_time system flag
|
%% We start and stop the scheduler_wall_time system flag
|
||||||
|
@ -391,18 +388,32 @@ cpu_util() ->
|
||||||
compat_windows(Fun) ->
|
compat_windows(Fun) ->
|
||||||
case os:type() of
|
case os:type() of
|
||||||
{win32, nt} ->
|
{win32, nt} ->
|
||||||
0;
|
0.0;
|
||||||
_Type ->
|
_Type ->
|
||||||
case catch Fun() of
|
case catch Fun() of
|
||||||
|
Val when is_float(Val) -> floor(Val * 100) / 100;
|
||||||
Val when is_number(Val) -> Val;
|
Val when is_number(Val) -> Val;
|
||||||
_Error -> 0
|
_Error -> 0.0
|
||||||
end
|
end
|
||||||
end.
|
end.
|
||||||
|
|
||||||
%% @doc Return on which Eralng/OTP the current vm is running.
|
load(Avg) ->
|
||||||
%% NOTE: This API reads a file, do not use it in critical code paths.
|
floor((Avg / 256) * 100) / 100.
|
||||||
|
|
||||||
|
%% @doc Return on which Erlang/OTP the current vm is running.
|
||||||
|
%% The dashboard's /api/nodes endpoint will call this function frequently.
|
||||||
|
%% we should avoid reading file every time.
|
||||||
|
%% The OTP version never changes at runtime expect upgrade erts,
|
||||||
|
%% so we cache it in a persistent term for performance.
|
||||||
get_otp_version() ->
|
get_otp_version() ->
|
||||||
read_otp_version().
|
case persistent_term:get(emqx_otp_version, undefined) of
|
||||||
|
undefined ->
|
||||||
|
OtpVsn = read_otp_version(),
|
||||||
|
persistent_term:put(emqx_otp_version, OtpVsn),
|
||||||
|
OtpVsn;
|
||||||
|
OtpVsn when is_binary(OtpVsn) ->
|
||||||
|
OtpVsn
|
||||||
|
end.
|
||||||
|
|
||||||
read_otp_version() ->
|
read_otp_version() ->
|
||||||
ReleasesDir = filename:join([code:root_dir(), "releases"]),
|
ReleasesDir = filename:join([code:root_dir(), "releases"]),
|
||||||
|
@ -416,6 +427,8 @@ read_otp_version() ->
|
||||||
%% running tests etc.
|
%% running tests etc.
|
||||||
OtpMajor = erlang:system_info(otp_release),
|
OtpMajor = erlang:system_info(otp_release),
|
||||||
OtpVsnFile = filename:join([ReleasesDir, OtpMajor, "OTP_VERSION"]),
|
OtpVsnFile = filename:join([ReleasesDir, OtpMajor, "OTP_VERSION"]),
|
||||||
{ok, Vsn} = file:read_file(OtpVsnFile),
|
case file:read_file(OtpVsnFile) of
|
||||||
Vsn
|
{ok, Vsn} -> Vsn;
|
||||||
|
{error, enoent} -> list_to_binary(OtpMajor)
|
||||||
|
end
|
||||||
end.
|
end.
|
||||||
|
|
|
@ -63,7 +63,7 @@ handle_info({timeout, _Timer, check}, State) ->
|
||||||
ProcessCount = erlang:system_info(process_count),
|
ProcessCount = erlang:system_info(process_count),
|
||||||
case ProcessCount / erlang:system_info(process_limit) of
|
case ProcessCount / erlang:system_info(process_limit) of
|
||||||
Percent when Percent > ProcHighWatermark ->
|
Percent when Percent > ProcHighWatermark ->
|
||||||
Usage = io_lib:format("~p%", [Percent * 100]),
|
Usage = usage(Percent),
|
||||||
Message = [Usage, " process usage"],
|
Message = [Usage, " process usage"],
|
||||||
emqx_alarm:activate(
|
emqx_alarm:activate(
|
||||||
too_many_processes,
|
too_many_processes,
|
||||||
|
@ -75,7 +75,7 @@ handle_info({timeout, _Timer, check}, State) ->
|
||||||
Message
|
Message
|
||||||
);
|
);
|
||||||
Percent when Percent < ProcLowWatermark ->
|
Percent when Percent < ProcLowWatermark ->
|
||||||
Usage = io_lib:format("~p%", [Percent * 100]),
|
Usage = usage(Percent),
|
||||||
Message = [Usage, " process usage"],
|
Message = [Usage, " process usage"],
|
||||||
emqx_alarm:ensure_deactivated(
|
emqx_alarm:ensure_deactivated(
|
||||||
too_many_processes,
|
too_many_processes,
|
||||||
|
@ -108,3 +108,6 @@ code_change(_OldVsn, State, _Extra) ->
|
||||||
start_check_timer() ->
|
start_check_timer() ->
|
||||||
Interval = emqx:get_config([sysmon, vm, process_check_interval]),
|
Interval = emqx:get_config([sysmon, vm, process_check_interval]),
|
||||||
emqx_misc:start_timer(Interval, check).
|
emqx_misc:start_timer(Interval, check).
|
||||||
|
|
||||||
|
usage(Percent) ->
|
||||||
|
integer_to_list(floor(Percent * 100)) ++ "%".
|
||||||
|
|
|
@ -399,6 +399,12 @@ get_peer_info(Type, Listener, Req, Opts) ->
|
||||||
websocket_handle({binary, Data}, State) when is_list(Data) ->
|
websocket_handle({binary, Data}, State) when is_list(Data) ->
|
||||||
websocket_handle({binary, iolist_to_binary(Data)}, State);
|
websocket_handle({binary, iolist_to_binary(Data)}, State);
|
||||||
websocket_handle({binary, Data}, State) ->
|
websocket_handle({binary, Data}, State) ->
|
||||||
|
?SLOG(debug, #{
|
||||||
|
msg => "raw_bin_received",
|
||||||
|
size => iolist_size(Data),
|
||||||
|
bin => binary_to_list(binary:encode_hex(Data)),
|
||||||
|
type => "hex"
|
||||||
|
}),
|
||||||
State2 = ensure_stats_timer(State),
|
State2 = ensure_stats_timer(State),
|
||||||
{Packets, State3} = parse_incoming(Data, [], State2),
|
{Packets, State3} = parse_incoming(Data, [], State2),
|
||||||
LenMsg = erlang:length(Packets),
|
LenMsg = erlang:length(Packets),
|
||||||
|
@ -437,6 +443,7 @@ websocket_info({incoming, Packet = ?CONNECT_PACKET(ConnPkt)}, State) ->
|
||||||
NState = State#state{serialize = Serialize},
|
NState = State#state{serialize = Serialize},
|
||||||
handle_incoming(Packet, cancel_idle_timer(NState));
|
handle_incoming(Packet, cancel_idle_timer(NState));
|
||||||
websocket_info({incoming, Packet}, State) ->
|
websocket_info({incoming, Packet}, State) ->
|
||||||
|
?TRACE("WS-MQTT", "mqtt_packet_received", #{packet => Packet}),
|
||||||
handle_incoming(Packet, State);
|
handle_incoming(Packet, State);
|
||||||
websocket_info({outgoing, Packets}, State) ->
|
websocket_info({outgoing, Packets}, State) ->
|
||||||
return(enqueue(Packets, State));
|
return(enqueue(Packets, State));
|
||||||
|
@ -719,7 +726,6 @@ parse_incoming(Data, Packets, State = #state{parse_state = ParseState}) ->
|
||||||
handle_incoming(Packet, State = #state{listener = {Type, Listener}}) when
|
handle_incoming(Packet, State = #state{listener = {Type, Listener}}) when
|
||||||
is_record(Packet, mqtt_packet)
|
is_record(Packet, mqtt_packet)
|
||||||
->
|
->
|
||||||
?TRACE("WS-MQTT", "mqtt_packet_received", #{packet => Packet}),
|
|
||||||
ok = inc_incoming_stats(Packet),
|
ok = inc_incoming_stats(Packet),
|
||||||
NState =
|
NState =
|
||||||
case
|
case
|
||||||
|
|
|
@ -65,7 +65,7 @@
|
||||||
% Reason: legacy code. A fun and a QC query are
|
% Reason: legacy code. A fun and a QC query are
|
||||||
% passed in the args, it's futile to try to statically
|
% passed in the args, it's futile to try to statically
|
||||||
% check it
|
% check it
|
||||||
"emqx_mgmt_api:do_query/2, emqx_mgmt_api:collect_total_from_tail_nodes/3"
|
"emqx_mgmt_api:do_query/2, emqx_mgmt_api:collect_total_from_tail_nodes/2"
|
||||||
).
|
).
|
||||||
|
|
||||||
-define(XREF, myxref).
|
-define(XREF, myxref).
|
||||||
|
|
|
@ -237,7 +237,7 @@ do_async_set_keepalive() ->
|
||||||
{ok, _} = ?block_until(
|
{ok, _} = ?block_until(
|
||||||
#{
|
#{
|
||||||
?snk_kind := insert_channel_info,
|
?snk_kind := insert_channel_info,
|
||||||
client_id := ClientID
|
clientid := ClientID
|
||||||
},
|
},
|
||||||
2000,
|
2000,
|
||||||
100
|
100
|
||||||
|
|
|
@ -25,25 +25,43 @@ all() -> emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
emqx_common_test_helpers:boot_modules(all),
|
emqx_common_test_helpers:boot_modules(all),
|
||||||
emqx_common_test_helpers:start_apps(
|
emqx_common_test_helpers:start_apps([]),
|
||||||
[],
|
|
||||||
fun
|
|
||||||
(emqx) ->
|
|
||||||
application:set_env(emqx, os_mon, [
|
|
||||||
{cpu_check_interval, 1},
|
|
||||||
{cpu_high_watermark, 5},
|
|
||||||
{cpu_low_watermark, 80},
|
|
||||||
{procmem_high_watermark, 5}
|
|
||||||
]);
|
|
||||||
(_) ->
|
|
||||||
ok
|
|
||||||
end
|
|
||||||
),
|
|
||||||
Config.
|
Config.
|
||||||
|
|
||||||
end_per_suite(_Config) ->
|
end_per_suite(_Config) ->
|
||||||
emqx_common_test_helpers:stop_apps([]).
|
emqx_common_test_helpers:stop_apps([]).
|
||||||
|
|
||||||
|
init_per_testcase(t_cpu_check_alarm, Config) ->
|
||||||
|
SysMon = emqx_config:get([sysmon, os], #{}),
|
||||||
|
emqx_config:put([sysmon, os], SysMon#{
|
||||||
|
cpu_high_watermark => 0.9,
|
||||||
|
cpu_low_watermark => 0,
|
||||||
|
%% 200ms
|
||||||
|
cpu_check_interval => 200
|
||||||
|
}),
|
||||||
|
ok = supervisor:terminate_child(emqx_sys_sup, emqx_os_mon),
|
||||||
|
{ok, _} = supervisor:restart_child(emqx_sys_sup, emqx_os_mon),
|
||||||
|
Config;
|
||||||
|
init_per_testcase(t_sys_mem_check_alarm, Config) ->
|
||||||
|
case os:type() of
|
||||||
|
{unix, linux} ->
|
||||||
|
SysMon = emqx_config:get([sysmon, os], #{}),
|
||||||
|
emqx_config:put([sysmon, os], SysMon#{
|
||||||
|
sysmem_high_watermark => 0.51,
|
||||||
|
%% 200ms
|
||||||
|
mem_check_interval => 200
|
||||||
|
}),
|
||||||
|
ok = supervisor:terminate_child(emqx_sys_sup, emqx_os_mon),
|
||||||
|
{ok, _} = supervisor:restart_child(emqx_sys_sup, emqx_os_mon),
|
||||||
|
Config;
|
||||||
|
_ ->
|
||||||
|
Config
|
||||||
|
end;
|
||||||
|
init_per_testcase(_, Config) ->
|
||||||
|
emqx_common_test_helpers:boot_modules(all),
|
||||||
|
emqx_common_test_helpers:start_apps([]),
|
||||||
|
Config.
|
||||||
|
|
||||||
t_api(_) ->
|
t_api(_) ->
|
||||||
?assertEqual(60000, emqx_os_mon:get_mem_check_interval()),
|
?assertEqual(60000, emqx_os_mon:get_mem_check_interval()),
|
||||||
?assertEqual(ok, emqx_os_mon:set_mem_check_interval(30000)),
|
?assertEqual(ok, emqx_os_mon:set_mem_check_interval(30000)),
|
||||||
|
@ -67,3 +85,106 @@ t_api(_) ->
|
||||||
emqx_os_mon ! ignored,
|
emqx_os_mon ! ignored,
|
||||||
gen_server:stop(emqx_os_mon),
|
gen_server:stop(emqx_os_mon),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
t_sys_mem_check_alarm(Config) ->
|
||||||
|
case os:type() of
|
||||||
|
{unix, linux} ->
|
||||||
|
do_sys_mem_check_alarm(Config);
|
||||||
|
_ ->
|
||||||
|
skip
|
||||||
|
end.
|
||||||
|
|
||||||
|
do_sys_mem_check_alarm(_Config) ->
|
||||||
|
emqx_config:put([sysmon, os, mem_check_interval], 200),
|
||||||
|
emqx_os_mon:update(emqx_config:get([sysmon, os])),
|
||||||
|
Mem = 0.52345,
|
||||||
|
Usage = floor(Mem * 10000) / 100,
|
||||||
|
emqx_common_test_helpers:with_mock(
|
||||||
|
load_ctl,
|
||||||
|
get_memory_usage,
|
||||||
|
fun() -> Mem end,
|
||||||
|
fun() ->
|
||||||
|
timer:sleep(500),
|
||||||
|
Alarms = emqx_alarm:get_alarms(activated),
|
||||||
|
?assert(
|
||||||
|
emqx_vm_mon_SUITE:is_existing(
|
||||||
|
high_system_memory_usage, emqx_alarm:get_alarms(activated)
|
||||||
|
),
|
||||||
|
#{
|
||||||
|
load_ctl_memory => load_ctl:get_memory_usage(),
|
||||||
|
config => emqx_config:get([sysmon, os]),
|
||||||
|
process => sys:get_state(emqx_os_mon),
|
||||||
|
alarms => Alarms
|
||||||
|
}
|
||||||
|
),
|
||||||
|
[
|
||||||
|
#{
|
||||||
|
activate_at := _,
|
||||||
|
activated := true,
|
||||||
|
deactivate_at := infinity,
|
||||||
|
details := #{high_watermark := 51.0, usage := RealUsage},
|
||||||
|
message := Msg,
|
||||||
|
name := high_system_memory_usage
|
||||||
|
}
|
||||||
|
] =
|
||||||
|
lists:filter(
|
||||||
|
fun
|
||||||
|
(#{name := high_system_memory_usage}) -> true;
|
||||||
|
(_) -> false
|
||||||
|
end,
|
||||||
|
Alarms
|
||||||
|
),
|
||||||
|
?assert(RealUsage >= Usage, {RealUsage, Usage}),
|
||||||
|
?assert(is_binary(Msg)),
|
||||||
|
emqx_config:put([sysmon, os, sysmem_high_watermark], 0.99999),
|
||||||
|
ok = supervisor:terminate_child(emqx_sys_sup, emqx_os_mon),
|
||||||
|
{ok, _} = supervisor:restart_child(emqx_sys_sup, emqx_os_mon),
|
||||||
|
timer:sleep(600),
|
||||||
|
Activated = emqx_alarm:get_alarms(activated),
|
||||||
|
?assertNot(
|
||||||
|
emqx_vm_mon_SUITE:is_existing(high_system_memory_usage, Activated),
|
||||||
|
#{activated => Activated, process_state => sys:get_state(emqx_os_mon)}
|
||||||
|
)
|
||||||
|
end
|
||||||
|
).
|
||||||
|
|
||||||
|
t_cpu_check_alarm(_) ->
|
||||||
|
CpuUtil = 90.12345,
|
||||||
|
Usage = floor(CpuUtil * 100) / 100,
|
||||||
|
emqx_common_test_helpers:with_mock(
|
||||||
|
cpu_sup,
|
||||||
|
util,
|
||||||
|
fun() -> CpuUtil end,
|
||||||
|
fun() ->
|
||||||
|
timer:sleep(500),
|
||||||
|
Alarms = emqx_alarm:get_alarms(activated),
|
||||||
|
?assert(
|
||||||
|
emqx_vm_mon_SUITE:is_existing(high_cpu_usage, emqx_alarm:get_alarms(activated))
|
||||||
|
),
|
||||||
|
[
|
||||||
|
#{
|
||||||
|
activate_at := _,
|
||||||
|
activated := true,
|
||||||
|
deactivate_at := infinity,
|
||||||
|
details := #{high_watermark := 90.0, low_watermark := 0, usage := RealUsage},
|
||||||
|
message := Msg,
|
||||||
|
name := high_cpu_usage
|
||||||
|
}
|
||||||
|
] =
|
||||||
|
lists:filter(
|
||||||
|
fun
|
||||||
|
(#{name := high_cpu_usage}) -> true;
|
||||||
|
(_) -> false
|
||||||
|
end,
|
||||||
|
Alarms
|
||||||
|
),
|
||||||
|
?assert(RealUsage >= Usage, {RealUsage, Usage}),
|
||||||
|
?assert(is_binary(Msg)),
|
||||||
|
emqx_config:put([sysmon, os, cpu_high_watermark], 1),
|
||||||
|
emqx_config:put([sysmon, os, cpu_low_watermark], 0.96),
|
||||||
|
timer:sleep(500),
|
||||||
|
?assertNot(
|
||||||
|
emqx_vm_mon_SUITE:is_existing(high_cpu_usage, emqx_alarm:get_alarms(activated))
|
||||||
|
)
|
||||||
|
end
|
||||||
|
).
|
||||||
|
|
|
@ -24,7 +24,24 @@
|
||||||
all() -> emqx_common_test_helpers:all(?MODULE).
|
all() -> emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
||||||
t_load(_Config) ->
|
t_load(_Config) ->
|
||||||
?assertMatch([{load1, _}, {load5, _}, {load15, _}], emqx_vm:loads()).
|
lists:foreach(
|
||||||
|
fun({Avg, LoadKey, Int}) ->
|
||||||
|
emqx_common_test_helpers:with_mock(
|
||||||
|
cpu_sup,
|
||||||
|
Avg,
|
||||||
|
fun() -> Int end,
|
||||||
|
fun() ->
|
||||||
|
Load = proplists:get_value(LoadKey, emqx_vm:loads()),
|
||||||
|
?assertEqual(Int / 256, Load)
|
||||||
|
end
|
||||||
|
)
|
||||||
|
end,
|
||||||
|
[{avg1, load1, 0}, {avg5, load5, 128}, {avg15, load15, 256}]
|
||||||
|
),
|
||||||
|
?assertMatch(
|
||||||
|
[{load1, _}, {load5, _}, {load15, _}],
|
||||||
|
emqx_vm:loads()
|
||||||
|
).
|
||||||
|
|
||||||
t_systeminfo(_Config) ->
|
t_systeminfo(_Config) ->
|
||||||
?assertEqual(
|
?assertEqual(
|
||||||
|
|
|
@ -23,13 +23,13 @@
|
||||||
|
|
||||||
all() -> emqx_common_test_helpers:all(?MODULE).
|
all() -> emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
||||||
init_per_testcase(t_alarms, Config) ->
|
init_per_testcase(t_too_many_processes_alarm, Config) ->
|
||||||
emqx_common_test_helpers:boot_modules(all),
|
emqx_common_test_helpers:boot_modules(all),
|
||||||
emqx_common_test_helpers:start_apps([]),
|
emqx_common_test_helpers:start_apps([]),
|
||||||
emqx_config:put([sysmon, vm], #{
|
emqx_config:put([sysmon, vm], #{
|
||||||
process_high_watermark => 0,
|
process_high_watermark => 0,
|
||||||
process_low_watermark => 0,
|
process_low_watermark => 0,
|
||||||
%% 1s
|
%% 100ms
|
||||||
process_check_interval => 100
|
process_check_interval => 100
|
||||||
}),
|
}),
|
||||||
ok = supervisor:terminate_child(emqx_sys_sup, emqx_vm_mon),
|
ok = supervisor:terminate_child(emqx_sys_sup, emqx_vm_mon),
|
||||||
|
@ -43,9 +43,29 @@ init_per_testcase(_, Config) ->
|
||||||
end_per_testcase(_, _Config) ->
|
end_per_testcase(_, _Config) ->
|
||||||
emqx_common_test_helpers:stop_apps([]).
|
emqx_common_test_helpers:stop_apps([]).
|
||||||
|
|
||||||
t_alarms(_) ->
|
t_too_many_processes_alarm(_) ->
|
||||||
timer:sleep(500),
|
timer:sleep(500),
|
||||||
|
Alarms = emqx_alarm:get_alarms(activated),
|
||||||
?assert(is_existing(too_many_processes, emqx_alarm:get_alarms(activated))),
|
?assert(is_existing(too_many_processes, emqx_alarm:get_alarms(activated))),
|
||||||
|
?assertMatch(
|
||||||
|
[
|
||||||
|
#{
|
||||||
|
activate_at := _,
|
||||||
|
activated := true,
|
||||||
|
deactivate_at := infinity,
|
||||||
|
details := #{high_watermark := 0, low_watermark := 0, usage := "0%"},
|
||||||
|
message := <<"0% process usage">>,
|
||||||
|
name := too_many_processes
|
||||||
|
}
|
||||||
|
],
|
||||||
|
lists:filter(
|
||||||
|
fun
|
||||||
|
(#{name := too_many_processes}) -> true;
|
||||||
|
(_) -> false
|
||||||
|
end,
|
||||||
|
Alarms
|
||||||
|
)
|
||||||
|
),
|
||||||
emqx_config:put([sysmon, vm, process_high_watermark], 70),
|
emqx_config:put([sysmon, vm, process_high_watermark], 70),
|
||||||
emqx_config:put([sysmon, vm, process_low_watermark], 60),
|
emqx_config:put([sysmon, vm, process_low_watermark], 60),
|
||||||
timer:sleep(500),
|
timer:sleep(500),
|
||||||
|
|
|
@ -112,8 +112,7 @@ t_update_with_invalid_config(_Config) ->
|
||||||
#{
|
#{
|
||||||
kind := validation_error,
|
kind := validation_error,
|
||||||
path := "authentication.server",
|
path := "authentication.server",
|
||||||
reason := required_field,
|
reason := required_field
|
||||||
value := undefined
|
|
||||||
}
|
}
|
||||||
]}
|
]}
|
||||||
}}},
|
}}},
|
||||||
|
|
|
@ -188,8 +188,7 @@ t_create_invalid_config(_Config) ->
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
{error, #{
|
{error, #{
|
||||||
kind := validation_error,
|
kind := validation_error,
|
||||||
path := "authorization.sources.1",
|
path := "authorization.sources.1.server"
|
||||||
discarded_errors_count := 0
|
|
||||||
}},
|
}},
|
||||||
emqx_authz:update(?CMD_REPLACE, [C])
|
emqx_authz:update(?CMD_REPLACE, [C])
|
||||||
).
|
).
|
||||||
|
|
|
@ -31,6 +31,7 @@
|
||||||
|
|
||||||
-export([
|
-export([
|
||||||
load/0,
|
load/0,
|
||||||
|
unload/0,
|
||||||
lookup/1,
|
lookup/1,
|
||||||
lookup/2,
|
lookup/2,
|
||||||
lookup/3,
|
lookup/3,
|
||||||
|
@ -75,6 +76,21 @@ load() ->
|
||||||
maps:to_list(Bridges)
|
maps:to_list(Bridges)
|
||||||
).
|
).
|
||||||
|
|
||||||
|
unload() ->
|
||||||
|
unload_hook(),
|
||||||
|
Bridges = emqx:get_config([bridges], #{}),
|
||||||
|
lists:foreach(
|
||||||
|
fun({Type, NamedConf}) ->
|
||||||
|
lists:foreach(
|
||||||
|
fun({Name, _Conf}) ->
|
||||||
|
_ = emqx_bridge_resource:stop(Type, Name)
|
||||||
|
end,
|
||||||
|
maps:to_list(NamedConf)
|
||||||
|
)
|
||||||
|
end,
|
||||||
|
maps:to_list(Bridges)
|
||||||
|
).
|
||||||
|
|
||||||
safe_load_bridge(Type, Name, Conf, Opts) ->
|
safe_load_bridge(Type, Name, Conf, Opts) ->
|
||||||
try
|
try
|
||||||
_Res = emqx_bridge_resource:create(Type, Name, Conf, Opts),
|
_Res = emqx_bridge_resource:create(Type, Name, Conf, Opts),
|
||||||
|
@ -263,7 +279,7 @@ create(BridgeType, BridgeName, RawConf) ->
|
||||||
brige_action => create,
|
brige_action => create,
|
||||||
bridge_type => BridgeType,
|
bridge_type => BridgeType,
|
||||||
bridge_name => BridgeName,
|
bridge_name => BridgeName,
|
||||||
bridge_raw_config => RawConf
|
bridge_raw_config => emqx_misc:redact(RawConf)
|
||||||
}),
|
}),
|
||||||
emqx_conf:update(
|
emqx_conf:update(
|
||||||
emqx_bridge:config_key_path() ++ [BridgeType, BridgeName],
|
emqx_bridge:config_key_path() ++ [BridgeType, BridgeName],
|
||||||
|
|
|
@ -171,12 +171,12 @@ bridge_info_examples(Method, WithMetrics) ->
|
||||||
ee_bridge_examples(Method)
|
ee_bridge_examples(Method)
|
||||||
).
|
).
|
||||||
|
|
||||||
|
-if(?EMQX_RELEASE_EDITION == ee).
|
||||||
ee_bridge_examples(Method) ->
|
ee_bridge_examples(Method) ->
|
||||||
try
|
emqx_ee_bridge:examples(Method).
|
||||||
emqx_ee_bridge:examples(Method)
|
-else.
|
||||||
catch
|
ee_bridge_examples(_Method) -> #{}.
|
||||||
_:_ -> #{}
|
-endif.
|
||||||
end.
|
|
||||||
|
|
||||||
info_example(Type, Method, WithMetrics) ->
|
info_example(Type, Method, WithMetrics) ->
|
||||||
maps:merge(
|
maps:merge(
|
||||||
|
|
|
@ -39,7 +39,7 @@ start(_StartType, _StartArgs) ->
|
||||||
stop(_State) ->
|
stop(_State) ->
|
||||||
emqx_conf:remove_handler(?LEAF_NODE_HDLR_PATH),
|
emqx_conf:remove_handler(?LEAF_NODE_HDLR_PATH),
|
||||||
emqx_conf:remove_handler(?TOP_LELVE_HDLR_PATH),
|
emqx_conf:remove_handler(?TOP_LELVE_HDLR_PATH),
|
||||||
ok = emqx_bridge:unload_hook(),
|
ok = emqx_bridge:unload(),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
-if(?EMQX_RELEASE_EDITION == ee).
|
-if(?EMQX_RELEASE_EDITION == ee).
|
||||||
|
|
|
@ -137,7 +137,7 @@ create(Type, Name, Conf, Opts0) ->
|
||||||
msg => "create bridge",
|
msg => "create bridge",
|
||||||
type => Type,
|
type => Type,
|
||||||
name => Name,
|
name => Name,
|
||||||
config => Conf
|
config => emqx_misc:redact(Conf)
|
||||||
}),
|
}),
|
||||||
Opts = override_start_after_created(Conf, Opts0),
|
Opts = override_start_after_created(Conf, Opts0),
|
||||||
{ok, _Data} = emqx_resource:create_local(
|
{ok, _Data} = emqx_resource:create_local(
|
||||||
|
@ -172,7 +172,7 @@ update(Type, Name, {OldConf, Conf}, Opts0) ->
|
||||||
msg => "update bridge",
|
msg => "update bridge",
|
||||||
type => Type,
|
type => Type,
|
||||||
name => Name,
|
name => Name,
|
||||||
config => Conf
|
config => emqx_misc:redact(Conf)
|
||||||
}),
|
}),
|
||||||
case recreate(Type, Name, Conf, Opts) of
|
case recreate(Type, Name, Conf, Opts) of
|
||||||
{ok, _} ->
|
{ok, _} ->
|
||||||
|
@ -182,7 +182,7 @@ update(Type, Name, {OldConf, Conf}, Opts0) ->
|
||||||
msg => "updating_a_non_existing_bridge",
|
msg => "updating_a_non_existing_bridge",
|
||||||
type => Type,
|
type => Type,
|
||||||
name => Name,
|
name => Name,
|
||||||
config => Conf
|
config => emqx_misc:redact(Conf)
|
||||||
}),
|
}),
|
||||||
create(Type, Name, Conf, Opts);
|
create(Type, Name, Conf, Opts);
|
||||||
{error, Reason} ->
|
{error, Reason} ->
|
||||||
|
|
|
@ -72,7 +72,6 @@ up(#{<<"connector">> := Connector} = Config) ->
|
||||||
Cn(proto_ver, <<"v4">>),
|
Cn(proto_ver, <<"v4">>),
|
||||||
Cn(server, undefined),
|
Cn(server, undefined),
|
||||||
Cn(retry_interval, <<"15s">>),
|
Cn(retry_interval, <<"15s">>),
|
||||||
Cn(reconnect_interval, <<"15s">>),
|
|
||||||
Cn(ssl, default_ssl()),
|
Cn(ssl, default_ssl()),
|
||||||
{enable, Enable},
|
{enable, Enable},
|
||||||
{resource_opts, default_resource_opts()},
|
{resource_opts, default_resource_opts()},
|
||||||
|
|
|
@ -56,8 +56,8 @@ api_schema(Method) ->
|
||||||
EE = ee_api_schemas(Method),
|
EE = ee_api_schemas(Method),
|
||||||
hoconsc:union(Broker ++ EE).
|
hoconsc:union(Broker ++ EE).
|
||||||
|
|
||||||
|
-if(?EMQX_RELEASE_EDITION == ee).
|
||||||
ee_api_schemas(Method) ->
|
ee_api_schemas(Method) ->
|
||||||
%% must ensure the app is loaded before checking if fn is defined.
|
|
||||||
ensure_loaded(emqx_ee_bridge, emqx_ee_bridge),
|
ensure_loaded(emqx_ee_bridge, emqx_ee_bridge),
|
||||||
case erlang:function_exported(emqx_ee_bridge, api_schemas, 1) of
|
case erlang:function_exported(emqx_ee_bridge, api_schemas, 1) of
|
||||||
true -> emqx_ee_bridge:api_schemas(Method);
|
true -> emqx_ee_bridge:api_schemas(Method);
|
||||||
|
@ -65,13 +65,31 @@ ee_api_schemas(Method) ->
|
||||||
end.
|
end.
|
||||||
|
|
||||||
ee_fields_bridges() ->
|
ee_fields_bridges() ->
|
||||||
%% must ensure the app is loaded before checking if fn is defined.
|
|
||||||
ensure_loaded(emqx_ee_bridge, emqx_ee_bridge),
|
ensure_loaded(emqx_ee_bridge, emqx_ee_bridge),
|
||||||
case erlang:function_exported(emqx_ee_bridge, fields, 1) of
|
case erlang:function_exported(emqx_ee_bridge, fields, 1) of
|
||||||
true -> emqx_ee_bridge:fields(bridges);
|
true -> emqx_ee_bridge:fields(bridges);
|
||||||
false -> []
|
false -> []
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
%% must ensure the app is loaded before checking if fn is defined.
|
||||||
|
ensure_loaded(App, Mod) ->
|
||||||
|
try
|
||||||
|
_ = application:load(App),
|
||||||
|
_ = Mod:module_info(),
|
||||||
|
ok
|
||||||
|
catch
|
||||||
|
_:_ ->
|
||||||
|
ok
|
||||||
|
end.
|
||||||
|
|
||||||
|
-else.
|
||||||
|
|
||||||
|
ee_api_schemas(_) -> [].
|
||||||
|
|
||||||
|
ee_fields_bridges() -> [].
|
||||||
|
|
||||||
|
-endif.
|
||||||
|
|
||||||
common_bridge_fields() ->
|
common_bridge_fields() ->
|
||||||
[
|
[
|
||||||
{enable,
|
{enable,
|
||||||
|
@ -194,17 +212,3 @@ status() ->
|
||||||
|
|
||||||
node_name() ->
|
node_name() ->
|
||||||
{"node", mk(binary(), #{desc => ?DESC("desc_node_name"), example => "emqx@127.0.0.1"})}.
|
{"node", mk(binary(), #{desc => ?DESC("desc_node_name"), example => "emqx@127.0.0.1"})}.
|
||||||
|
|
||||||
%%=================================================================================================
|
|
||||||
%% Internal fns
|
|
||||||
%%=================================================================================================
|
|
||||||
|
|
||||||
ensure_loaded(App, Mod) ->
|
|
||||||
try
|
|
||||||
_ = application:load(App),
|
|
||||||
_ = Mod:module_info(),
|
|
||||||
ok
|
|
||||||
catch
|
|
||||||
_:_ ->
|
|
||||||
ok
|
|
||||||
end.
|
|
||||||
|
|
|
@ -640,7 +640,7 @@ t_bridges_probe(Config) ->
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
#{
|
#{
|
||||||
<<"code">> := <<"TEST_FAILED">>,
|
<<"code">> := <<"TEST_FAILED">>,
|
||||||
<<"message">> := <<"#{reason => econnrefused", _/binary>>
|
<<"message">> := <<"econnrefused">>
|
||||||
},
|
},
|
||||||
jsx:decode(ConnRefused)
|
jsx:decode(ConnRefused)
|
||||||
),
|
),
|
||||||
|
|
|
@ -224,7 +224,6 @@ bridges {
|
||||||
mode = \"cluster_shareload\"
|
mode = \"cluster_shareload\"
|
||||||
password = \"\"
|
password = \"\"
|
||||||
proto_ver = \"v5\"
|
proto_ver = \"v5\"
|
||||||
reconnect_interval = \"15s\"
|
|
||||||
replayq {offload = false, seg_bytes = \"100MB\"}
|
replayq {offload = false, seg_bytes = \"100MB\"}
|
||||||
retry_interval = \"12s\"
|
retry_interval = \"12s\"
|
||||||
server = \"localhost:1883\"
|
server = \"localhost:1883\"
|
||||||
|
@ -257,7 +256,6 @@ bridges {
|
||||||
mode = \"cluster_shareload\"
|
mode = \"cluster_shareload\"
|
||||||
password = \"\"
|
password = \"\"
|
||||||
proto_ver = \"v4\"
|
proto_ver = \"v4\"
|
||||||
reconnect_interval = \"15s\"
|
|
||||||
replayq {offload = false, seg_bytes = \"100MB\"}
|
replayq {offload = false, seg_bytes = \"100MB\"}
|
||||||
retry_interval = \"44s\"
|
retry_interval = \"44s\"
|
||||||
server = \"localhost:1883\"
|
server = \"localhost:1883\"
|
||||||
|
|
|
@ -32,7 +32,6 @@
|
||||||
|
|
||||||
-define(BRIDGE_CONF_DEFAULT, <<"bridges: {}">>).
|
-define(BRIDGE_CONF_DEFAULT, <<"bridges: {}">>).
|
||||||
-define(TYPE_MQTT, <<"mqtt">>).
|
-define(TYPE_MQTT, <<"mqtt">>).
|
||||||
-define(NAME_MQTT, <<"my_mqtt_bridge">>).
|
|
||||||
-define(BRIDGE_NAME_INGRESS, <<"ingress_mqtt_bridge">>).
|
-define(BRIDGE_NAME_INGRESS, <<"ingress_mqtt_bridge">>).
|
||||||
-define(BRIDGE_NAME_EGRESS, <<"egress_mqtt_bridge">>).
|
-define(BRIDGE_NAME_EGRESS, <<"egress_mqtt_bridge">>).
|
||||||
|
|
||||||
|
@ -98,6 +97,24 @@
|
||||||
}
|
}
|
||||||
}).
|
}).
|
||||||
|
|
||||||
|
-define(assertMetrics(Pat, BridgeID),
|
||||||
|
?assertMetrics(Pat, true, BridgeID)
|
||||||
|
).
|
||||||
|
-define(assertMetrics(Pat, Guard, BridgeID),
|
||||||
|
?assertMatch(
|
||||||
|
#{
|
||||||
|
<<"metrics">> := Pat,
|
||||||
|
<<"node_metrics">> := [
|
||||||
|
#{
|
||||||
|
<<"node">> := _,
|
||||||
|
<<"metrics">> := Pat
|
||||||
|
}
|
||||||
|
]
|
||||||
|
} when Guard,
|
||||||
|
request_bridge_metrics(BridgeID)
|
||||||
|
)
|
||||||
|
).
|
||||||
|
|
||||||
inspect(Selected, _Envs, _Args) ->
|
inspect(Selected, _Envs, _Args) ->
|
||||||
persistent_term:put(?MODULE, #{inspect => Selected}).
|
persistent_term:put(?MODULE, #{inspect => Selected}).
|
||||||
|
|
||||||
|
@ -176,7 +193,7 @@ t_mqtt_conn_bridge_ingress(_) ->
|
||||||
{ok, 201, Bridge} = request(
|
{ok, 201, Bridge} = request(
|
||||||
post,
|
post,
|
||||||
uri(["bridges"]),
|
uri(["bridges"]),
|
||||||
?SERVER_CONF(User1)#{
|
ServerConf = ?SERVER_CONF(User1)#{
|
||||||
<<"type">> => ?TYPE_MQTT,
|
<<"type">> => ?TYPE_MQTT,
|
||||||
<<"name">> => ?BRIDGE_NAME_INGRESS,
|
<<"name">> => ?BRIDGE_NAME_INGRESS,
|
||||||
<<"ingress">> => ?INGRESS_CONF
|
<<"ingress">> => ?INGRESS_CONF
|
||||||
|
@ -186,8 +203,21 @@ t_mqtt_conn_bridge_ingress(_) ->
|
||||||
<<"type">> := ?TYPE_MQTT,
|
<<"type">> := ?TYPE_MQTT,
|
||||||
<<"name">> := ?BRIDGE_NAME_INGRESS
|
<<"name">> := ?BRIDGE_NAME_INGRESS
|
||||||
} = jsx:decode(Bridge),
|
} = jsx:decode(Bridge),
|
||||||
|
|
||||||
BridgeIDIngress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_INGRESS),
|
BridgeIDIngress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_INGRESS),
|
||||||
|
|
||||||
|
%% try to create the bridge again
|
||||||
|
?assertMatch(
|
||||||
|
{ok, 400, _},
|
||||||
|
request(post, uri(["bridges"]), ServerConf)
|
||||||
|
),
|
||||||
|
|
||||||
|
%% try to reconfigure the bridge
|
||||||
|
?assertMatch(
|
||||||
|
{ok, 200, _},
|
||||||
|
request(put, uri(["bridges", BridgeIDIngress]), ServerConf)
|
||||||
|
),
|
||||||
|
|
||||||
%% we now test if the bridge works as expected
|
%% we now test if the bridge works as expected
|
||||||
RemoteTopic = <<?INGRESS_REMOTE_TOPIC, "/1">>,
|
RemoteTopic = <<?INGRESS_REMOTE_TOPIC, "/1">>,
|
||||||
LocalTopic = <<?INGRESS_LOCAL_TOPIC, "/", RemoteTopic/binary>>,
|
LocalTopic = <<?INGRESS_LOCAL_TOPIC, "/", RemoteTopic/binary>>,
|
||||||
|
@ -198,34 +228,12 @@ t_mqtt_conn_bridge_ingress(_) ->
|
||||||
%% the remote broker is also the local one.
|
%% the remote broker is also the local one.
|
||||||
emqx:publish(emqx_message:make(RemoteTopic, Payload)),
|
emqx:publish(emqx_message:make(RemoteTopic, Payload)),
|
||||||
%% we should receive a message on the local broker, with specified topic
|
%% we should receive a message on the local broker, with specified topic
|
||||||
?assert(
|
assert_mqtt_msg_received(LocalTopic, Payload),
|
||||||
receive
|
|
||||||
{deliver, LocalTopic, #message{payload = Payload}} ->
|
|
||||||
ct:pal("local broker got message: ~p on topic ~p", [Payload, LocalTopic]),
|
|
||||||
true;
|
|
||||||
Msg ->
|
|
||||||
ct:pal("Msg: ~p", [Msg]),
|
|
||||||
false
|
|
||||||
after 100 ->
|
|
||||||
false
|
|
||||||
end
|
|
||||||
),
|
|
||||||
|
|
||||||
%% verify the metrics of the bridge
|
%% verify the metrics of the bridge
|
||||||
{ok, 200, BridgeMetricsStr} = request(get, uri(["bridges", BridgeIDIngress, "metrics"]), []),
|
?assertMetrics(
|
||||||
?assertMatch(
|
#{<<"matched">> := 0, <<"received">> := 1},
|
||||||
#{
|
BridgeIDIngress
|
||||||
<<"metrics">> := #{<<"matched">> := 0, <<"received">> := 1},
|
|
||||||
<<"node_metrics">> :=
|
|
||||||
[
|
|
||||||
#{
|
|
||||||
<<"node">> := _,
|
|
||||||
<<"metrics">> :=
|
|
||||||
#{<<"matched">> := 0, <<"received">> := 1}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
jsx:decode(BridgeMetricsStr)
|
|
||||||
),
|
),
|
||||||
|
|
||||||
%% delete the bridge
|
%% delete the bridge
|
||||||
|
@ -234,23 +242,38 @@ t_mqtt_conn_bridge_ingress(_) ->
|
||||||
|
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
t_mqtt_conn_bridge_ignores_clean_start(_) ->
|
||||||
|
BridgeName = atom_to_binary(?FUNCTION_NAME),
|
||||||
|
BridgeID = create_bridge(
|
||||||
|
?SERVER_CONF(<<"user1">>)#{
|
||||||
|
<<"type">> => ?TYPE_MQTT,
|
||||||
|
<<"name">> => BridgeName,
|
||||||
|
<<"ingress">> => ?INGRESS_CONF,
|
||||||
|
<<"clean_start">> => false
|
||||||
|
}
|
||||||
|
),
|
||||||
|
|
||||||
|
{ok, 200, BridgeJSON} = request(get, uri(["bridges", BridgeID]), []),
|
||||||
|
Bridge = jsx:decode(BridgeJSON),
|
||||||
|
|
||||||
|
%% verify that there's no `clean_start` in response
|
||||||
|
?assertEqual(#{}, maps:with([<<"clean_start">>], Bridge)),
|
||||||
|
|
||||||
|
%% delete the bridge
|
||||||
|
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []),
|
||||||
|
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
|
||||||
|
|
||||||
|
ok.
|
||||||
|
|
||||||
t_mqtt_conn_bridge_ingress_no_payload_template(_) ->
|
t_mqtt_conn_bridge_ingress_no_payload_template(_) ->
|
||||||
User1 = <<"user1">>,
|
User1 = <<"user1">>,
|
||||||
%% create an MQTT bridge, using POST
|
BridgeIDIngress = create_bridge(
|
||||||
{ok, 201, Bridge} = request(
|
|
||||||
post,
|
|
||||||
uri(["bridges"]),
|
|
||||||
?SERVER_CONF(User1)#{
|
?SERVER_CONF(User1)#{
|
||||||
<<"type">> => ?TYPE_MQTT,
|
<<"type">> => ?TYPE_MQTT,
|
||||||
<<"name">> => ?BRIDGE_NAME_INGRESS,
|
<<"name">> => ?BRIDGE_NAME_INGRESS,
|
||||||
<<"ingress">> => ?INGRESS_CONF_NO_PAYLOAD_TEMPLATE
|
<<"ingress">> => ?INGRESS_CONF_NO_PAYLOAD_TEMPLATE
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
#{
|
|
||||||
<<"type">> := ?TYPE_MQTT,
|
|
||||||
<<"name">> := ?BRIDGE_NAME_INGRESS
|
|
||||||
} = jsx:decode(Bridge),
|
|
||||||
BridgeIDIngress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_INGRESS),
|
|
||||||
|
|
||||||
%% we now test if the bridge works as expected
|
%% we now test if the bridge works as expected
|
||||||
RemoteTopic = <<?INGRESS_REMOTE_TOPIC, "/1">>,
|
RemoteTopic = <<?INGRESS_REMOTE_TOPIC, "/1">>,
|
||||||
|
@ -262,40 +285,13 @@ t_mqtt_conn_bridge_ingress_no_payload_template(_) ->
|
||||||
%% the remote broker is also the local one.
|
%% the remote broker is also the local one.
|
||||||
emqx:publish(emqx_message:make(RemoteTopic, Payload)),
|
emqx:publish(emqx_message:make(RemoteTopic, Payload)),
|
||||||
%% we should receive a message on the local broker, with specified topic
|
%% we should receive a message on the local broker, with specified topic
|
||||||
?assert(
|
Msg = assert_mqtt_msg_received(LocalTopic),
|
||||||
receive
|
?assertMatch(#{<<"payload">> := Payload}, jsx:decode(Msg#message.payload)),
|
||||||
{deliver, LocalTopic, #message{payload = MapMsg}} ->
|
|
||||||
ct:pal("local broker got message: ~p on topic ~p", [MapMsg, LocalTopic]),
|
|
||||||
%% the MapMsg is all fields outputed by Rule-Engine. it's a binary coded json here.
|
|
||||||
case jsx:decode(MapMsg) of
|
|
||||||
#{<<"payload">> := Payload} ->
|
|
||||||
true;
|
|
||||||
_ ->
|
|
||||||
false
|
|
||||||
end;
|
|
||||||
Msg ->
|
|
||||||
ct:pal("Msg: ~p", [Msg]),
|
|
||||||
false
|
|
||||||
after 100 ->
|
|
||||||
false
|
|
||||||
end
|
|
||||||
),
|
|
||||||
|
|
||||||
%% verify the metrics of the bridge
|
%% verify the metrics of the bridge
|
||||||
{ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDIngress, "metrics"]), []),
|
?assertMetrics(
|
||||||
?assertMatch(
|
#{<<"matched">> := 0, <<"received">> := 1},
|
||||||
#{
|
BridgeIDIngress
|
||||||
<<"metrics">> := #{<<"matched">> := 0, <<"received">> := 1},
|
|
||||||
<<"node_metrics">> :=
|
|
||||||
[
|
|
||||||
#{
|
|
||||||
<<"node">> := _,
|
|
||||||
<<"metrics">> :=
|
|
||||||
#{<<"matched">> := 0, <<"received">> := 1}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
jsx:decode(BridgeStr)
|
|
||||||
),
|
),
|
||||||
|
|
||||||
%% delete the bridge
|
%% delete the bridge
|
||||||
|
@ -307,22 +303,15 @@ t_mqtt_conn_bridge_ingress_no_payload_template(_) ->
|
||||||
t_mqtt_conn_bridge_egress(_) ->
|
t_mqtt_conn_bridge_egress(_) ->
|
||||||
%% then we add a mqtt connector, using POST
|
%% then we add a mqtt connector, using POST
|
||||||
User1 = <<"user1">>,
|
User1 = <<"user1">>,
|
||||||
|
BridgeIDEgress = create_bridge(
|
||||||
{ok, 201, Bridge} = request(
|
|
||||||
post,
|
|
||||||
uri(["bridges"]),
|
|
||||||
?SERVER_CONF(User1)#{
|
?SERVER_CONF(User1)#{
|
||||||
<<"type">> => ?TYPE_MQTT,
|
<<"type">> => ?TYPE_MQTT,
|
||||||
<<"name">> => ?BRIDGE_NAME_EGRESS,
|
<<"name">> => ?BRIDGE_NAME_EGRESS,
|
||||||
<<"egress">> => ?EGRESS_CONF
|
<<"egress">> => ?EGRESS_CONF
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
#{
|
|
||||||
<<"type">> := ?TYPE_MQTT,
|
|
||||||
<<"name">> := ?BRIDGE_NAME_EGRESS
|
|
||||||
} = jsx:decode(Bridge),
|
|
||||||
BridgeIDEgress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS),
|
|
||||||
ResourceID = emqx_bridge_resource:resource_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS),
|
ResourceID = emqx_bridge_resource:resource_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS),
|
||||||
|
|
||||||
%% we now test if the bridge works as expected
|
%% we now test if the bridge works as expected
|
||||||
LocalTopic = <<?EGRESS_LOCAL_TOPIC, "/1">>,
|
LocalTopic = <<?EGRESS_LOCAL_TOPIC, "/1">>,
|
||||||
RemoteTopic = <<?EGRESS_REMOTE_TOPIC, "/", LocalTopic/binary>>,
|
RemoteTopic = <<?EGRESS_REMOTE_TOPIC, "/", LocalTopic/binary>>,
|
||||||
|
@ -334,36 +323,14 @@ t_mqtt_conn_bridge_egress(_) ->
|
||||||
emqx:publish(emqx_message:make(LocalTopic, Payload)),
|
emqx:publish(emqx_message:make(LocalTopic, Payload)),
|
||||||
|
|
||||||
%% we should receive a message on the "remote" broker, with specified topic
|
%% we should receive a message on the "remote" broker, with specified topic
|
||||||
?assert(
|
Msg = assert_mqtt_msg_received(RemoteTopic, Payload),
|
||||||
receive
|
Size = byte_size(ResourceID),
|
||||||
{deliver, RemoteTopic, #message{payload = Payload, from = From}} ->
|
?assertMatch(<<ResourceID:Size/binary, _/binary>>, Msg#message.from),
|
||||||
ct:pal("local broker got message: ~p on topic ~p", [Payload, RemoteTopic]),
|
|
||||||
Size = byte_size(ResourceID),
|
|
||||||
?assertMatch(<<ResourceID:Size/binary, _/binary>>, From),
|
|
||||||
true;
|
|
||||||
Msg ->
|
|
||||||
ct:pal("Msg: ~p", [Msg]),
|
|
||||||
false
|
|
||||||
after 100 ->
|
|
||||||
false
|
|
||||||
end
|
|
||||||
),
|
|
||||||
|
|
||||||
%% verify the metrics of the bridge
|
%% verify the metrics of the bridge
|
||||||
{ok, 200, BridgeMetricsStr} = request(get, uri(["bridges", BridgeIDEgress, "metrics"]), []),
|
?assertMetrics(
|
||||||
?assertMatch(
|
#{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0},
|
||||||
#{
|
BridgeIDEgress
|
||||||
<<"metrics">> := #{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0},
|
|
||||||
<<"node_metrics">> :=
|
|
||||||
[
|
|
||||||
#{
|
|
||||||
<<"node">> := _,
|
|
||||||
<<"metrics">> :=
|
|
||||||
#{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
jsx:decode(BridgeMetricsStr)
|
|
||||||
),
|
),
|
||||||
|
|
||||||
%% delete the bridge
|
%% delete the bridge
|
||||||
|
@ -375,21 +342,15 @@ t_mqtt_conn_bridge_egress_no_payload_template(_) ->
|
||||||
%% then we add a mqtt connector, using POST
|
%% then we add a mqtt connector, using POST
|
||||||
User1 = <<"user1">>,
|
User1 = <<"user1">>,
|
||||||
|
|
||||||
{ok, 201, Bridge} = request(
|
BridgeIDEgress = create_bridge(
|
||||||
post,
|
|
||||||
uri(["bridges"]),
|
|
||||||
?SERVER_CONF(User1)#{
|
?SERVER_CONF(User1)#{
|
||||||
<<"type">> => ?TYPE_MQTT,
|
<<"type">> => ?TYPE_MQTT,
|
||||||
<<"name">> => ?BRIDGE_NAME_EGRESS,
|
<<"name">> => ?BRIDGE_NAME_EGRESS,
|
||||||
<<"egress">> => ?EGRESS_CONF_NO_PAYLOAD_TEMPLATE
|
<<"egress">> => ?EGRESS_CONF_NO_PAYLOAD_TEMPLATE
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
#{
|
|
||||||
<<"type">> := ?TYPE_MQTT,
|
|
||||||
<<"name">> := ?BRIDGE_NAME_EGRESS
|
|
||||||
} = jsx:decode(Bridge),
|
|
||||||
BridgeIDEgress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS),
|
|
||||||
ResourceID = emqx_bridge_resource:resource_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS),
|
ResourceID = emqx_bridge_resource:resource_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS),
|
||||||
|
|
||||||
%% we now test if the bridge works as expected
|
%% we now test if the bridge works as expected
|
||||||
LocalTopic = <<?EGRESS_LOCAL_TOPIC, "/1">>,
|
LocalTopic = <<?EGRESS_LOCAL_TOPIC, "/1">>,
|
||||||
RemoteTopic = <<?EGRESS_REMOTE_TOPIC, "/", LocalTopic/binary>>,
|
RemoteTopic = <<?EGRESS_REMOTE_TOPIC, "/", LocalTopic/binary>>,
|
||||||
|
@ -401,42 +362,15 @@ t_mqtt_conn_bridge_egress_no_payload_template(_) ->
|
||||||
emqx:publish(emqx_message:make(LocalTopic, Payload)),
|
emqx:publish(emqx_message:make(LocalTopic, Payload)),
|
||||||
|
|
||||||
%% we should receive a message on the "remote" broker, with specified topic
|
%% we should receive a message on the "remote" broker, with specified topic
|
||||||
?assert(
|
Msg = assert_mqtt_msg_received(RemoteTopic),
|
||||||
receive
|
%% the MapMsg is all fields outputed by Rule-Engine. it's a binary coded json here.
|
||||||
{deliver, RemoteTopic, #message{payload = MapMsg, from = From}} ->
|
?assertMatch(<<ResourceID:(byte_size(ResourceID))/binary, _/binary>>, Msg#message.from),
|
||||||
ct:pal("local broker got message: ~p on topic ~p", [MapMsg, RemoteTopic]),
|
?assertMatch(#{<<"payload">> := Payload}, jsx:decode(Msg#message.payload)),
|
||||||
%% the MapMsg is all fields outputed by Rule-Engine. it's a binary coded json here.
|
|
||||||
Size = byte_size(ResourceID),
|
|
||||||
?assertMatch(<<ResourceID:Size/binary, _/binary>>, From),
|
|
||||||
case jsx:decode(MapMsg) of
|
|
||||||
#{<<"payload">> := Payload} ->
|
|
||||||
true;
|
|
||||||
_ ->
|
|
||||||
false
|
|
||||||
end;
|
|
||||||
Msg ->
|
|
||||||
ct:pal("Msg: ~p", [Msg]),
|
|
||||||
false
|
|
||||||
after 100 ->
|
|
||||||
false
|
|
||||||
end
|
|
||||||
),
|
|
||||||
|
|
||||||
%% verify the metrics of the bridge
|
%% verify the metrics of the bridge
|
||||||
{ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDEgress, "metrics"]), []),
|
?assertMetrics(
|
||||||
?assertMatch(
|
#{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0},
|
||||||
#{
|
BridgeIDEgress
|
||||||
<<"metrics">> := #{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0},
|
|
||||||
<<"node_metrics">> :=
|
|
||||||
[
|
|
||||||
#{
|
|
||||||
<<"node">> := _,
|
|
||||||
<<"metrics">> :=
|
|
||||||
#{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
jsx:decode(BridgeStr)
|
|
||||||
),
|
),
|
||||||
|
|
||||||
%% delete the bridge
|
%% delete the bridge
|
||||||
|
@ -447,9 +381,7 @@ t_mqtt_conn_bridge_egress_no_payload_template(_) ->
|
||||||
|
|
||||||
t_egress_custom_clientid_prefix(_Config) ->
|
t_egress_custom_clientid_prefix(_Config) ->
|
||||||
User1 = <<"user1">>,
|
User1 = <<"user1">>,
|
||||||
{ok, 201, Bridge} = request(
|
BridgeIDEgress = create_bridge(
|
||||||
post,
|
|
||||||
uri(["bridges"]),
|
|
||||||
?SERVER_CONF(User1)#{
|
?SERVER_CONF(User1)#{
|
||||||
<<"clientid_prefix">> => <<"my-custom-prefix">>,
|
<<"clientid_prefix">> => <<"my-custom-prefix">>,
|
||||||
<<"type">> => ?TYPE_MQTT,
|
<<"type">> => ?TYPE_MQTT,
|
||||||
|
@ -457,11 +389,6 @@ t_egress_custom_clientid_prefix(_Config) ->
|
||||||
<<"egress">> => ?EGRESS_CONF
|
<<"egress">> => ?EGRESS_CONF
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
#{
|
|
||||||
<<"type">> := ?TYPE_MQTT,
|
|
||||||
<<"name">> := ?BRIDGE_NAME_EGRESS
|
|
||||||
} = jsx:decode(Bridge),
|
|
||||||
BridgeIDEgress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS),
|
|
||||||
ResourceID = emqx_bridge_resource:resource_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS),
|
ResourceID = emqx_bridge_resource:resource_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS),
|
||||||
LocalTopic = <<?EGRESS_LOCAL_TOPIC, "/1">>,
|
LocalTopic = <<?EGRESS_LOCAL_TOPIC, "/1">>,
|
||||||
RemoteTopic = <<?EGRESS_REMOTE_TOPIC, "/", LocalTopic/binary>>,
|
RemoteTopic = <<?EGRESS_REMOTE_TOPIC, "/", LocalTopic/binary>>,
|
||||||
|
@ -470,58 +397,36 @@ t_egress_custom_clientid_prefix(_Config) ->
|
||||||
timer:sleep(100),
|
timer:sleep(100),
|
||||||
emqx:publish(emqx_message:make(LocalTopic, Payload)),
|
emqx:publish(emqx_message:make(LocalTopic, Payload)),
|
||||||
|
|
||||||
receive
|
Msg = assert_mqtt_msg_received(RemoteTopic, Payload),
|
||||||
{deliver, RemoteTopic, #message{from = From}} ->
|
Size = byte_size(ResourceID),
|
||||||
Size = byte_size(ResourceID),
|
?assertMatch(<<"my-custom-prefix:", _ResouceID:Size/binary, _/binary>>, Msg#message.from),
|
||||||
?assertMatch(<<"my-custom-prefix:", _ResouceID:Size/binary, _/binary>>, From),
|
|
||||||
ok
|
|
||||||
after 1000 ->
|
|
||||||
ct:fail("should have published message")
|
|
||||||
end,
|
|
||||||
|
|
||||||
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []),
|
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
t_mqtt_conn_bridge_ingress_and_egress(_) ->
|
t_mqtt_conn_bridge_ingress_and_egress(_) ->
|
||||||
User1 = <<"user1">>,
|
User1 = <<"user1">>,
|
||||||
%% create an MQTT bridge, using POST
|
BridgeIDIngress = create_bridge(
|
||||||
{ok, 201, Bridge} = request(
|
|
||||||
post,
|
|
||||||
uri(["bridges"]),
|
|
||||||
?SERVER_CONF(User1)#{
|
?SERVER_CONF(User1)#{
|
||||||
<<"type">> => ?TYPE_MQTT,
|
<<"type">> => ?TYPE_MQTT,
|
||||||
<<"name">> => ?BRIDGE_NAME_INGRESS,
|
<<"name">> => ?BRIDGE_NAME_INGRESS,
|
||||||
<<"ingress">> => ?INGRESS_CONF
|
<<"ingress">> => ?INGRESS_CONF
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
|
BridgeIDEgress = create_bridge(
|
||||||
#{
|
|
||||||
<<"type">> := ?TYPE_MQTT,
|
|
||||||
<<"name">> := ?BRIDGE_NAME_INGRESS
|
|
||||||
} = jsx:decode(Bridge),
|
|
||||||
BridgeIDIngress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_INGRESS),
|
|
||||||
{ok, 201, Bridge2} = request(
|
|
||||||
post,
|
|
||||||
uri(["bridges"]),
|
|
||||||
?SERVER_CONF(User1)#{
|
?SERVER_CONF(User1)#{
|
||||||
<<"type">> => ?TYPE_MQTT,
|
<<"type">> => ?TYPE_MQTT,
|
||||||
<<"name">> => ?BRIDGE_NAME_EGRESS,
|
<<"name">> => ?BRIDGE_NAME_EGRESS,
|
||||||
<<"egress">> => ?EGRESS_CONF
|
<<"egress">> => ?EGRESS_CONF
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
#{
|
|
||||||
<<"type">> := ?TYPE_MQTT,
|
|
||||||
<<"name">> := ?BRIDGE_NAME_EGRESS
|
|
||||||
} = jsx:decode(Bridge2),
|
|
||||||
|
|
||||||
BridgeIDEgress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS),
|
|
||||||
%% we now test if the bridge works as expected
|
%% we now test if the bridge works as expected
|
||||||
LocalTopic = <<?EGRESS_LOCAL_TOPIC, "/1">>,
|
LocalTopic = <<?EGRESS_LOCAL_TOPIC, "/1">>,
|
||||||
RemoteTopic = <<?EGRESS_REMOTE_TOPIC, "/", LocalTopic/binary>>,
|
RemoteTopic = <<?EGRESS_REMOTE_TOPIC, "/", LocalTopic/binary>>,
|
||||||
Payload = <<"hello">>,
|
Payload = <<"hello">>,
|
||||||
emqx:subscribe(RemoteTopic),
|
emqx:subscribe(RemoteTopic),
|
||||||
|
|
||||||
{ok, 200, BridgeMetricsStr1} = request(get, uri(["bridges", BridgeIDEgress, "metrics"]), []),
|
|
||||||
#{
|
#{
|
||||||
<<"metrics">> := #{
|
<<"metrics">> := #{
|
||||||
<<"matched">> := CntMatched1, <<"success">> := CntSuccess1, <<"failed">> := 0
|
<<"matched">> := CntMatched1, <<"success">> := CntSuccess1, <<"failed">> := 0
|
||||||
|
@ -538,29 +443,17 @@ t_mqtt_conn_bridge_ingress_and_egress(_) ->
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
} = jsx:decode(BridgeMetricsStr1),
|
} = request_bridge_metrics(BridgeIDEgress),
|
||||||
timer:sleep(100),
|
timer:sleep(100),
|
||||||
%% PUBLISH a message to the 'local' broker, as we have only one broker,
|
%% PUBLISH a message to the 'local' broker, as we have only one broker,
|
||||||
%% the remote broker is also the local one.
|
%% the remote broker is also the local one.
|
||||||
emqx:publish(emqx_message:make(LocalTopic, Payload)),
|
emqx:publish(emqx_message:make(LocalTopic, Payload)),
|
||||||
|
|
||||||
%% we should receive a message on the "remote" broker, with specified topic
|
%% we should receive a message on the "remote" broker, with specified topic
|
||||||
?assert(
|
assert_mqtt_msg_received(RemoteTopic, Payload),
|
||||||
receive
|
|
||||||
{deliver, RemoteTopic, #message{payload = Payload}} ->
|
|
||||||
ct:pal("local broker got message: ~p on topic ~p", [Payload, RemoteTopic]),
|
|
||||||
true;
|
|
||||||
Msg ->
|
|
||||||
ct:pal("Msg: ~p", [Msg]),
|
|
||||||
false
|
|
||||||
after 100 ->
|
|
||||||
false
|
|
||||||
end
|
|
||||||
),
|
|
||||||
|
|
||||||
%% verify the metrics of the bridge
|
%% verify the metrics of the bridge
|
||||||
timer:sleep(1000),
|
timer:sleep(1000),
|
||||||
{ok, 200, BridgeMetricsStr2} = request(get, uri(["bridges", BridgeIDEgress, "metrics"]), []),
|
|
||||||
#{
|
#{
|
||||||
<<"metrics">> := #{
|
<<"metrics">> := #{
|
||||||
<<"matched">> := CntMatched2, <<"success">> := CntSuccess2, <<"failed">> := 0
|
<<"matched">> := CntMatched2, <<"success">> := CntSuccess2, <<"failed">> := 0
|
||||||
|
@ -577,7 +470,7 @@ t_mqtt_conn_bridge_ingress_and_egress(_) ->
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
} = jsx:decode(BridgeMetricsStr2),
|
} = request_bridge_metrics(BridgeIDEgress),
|
||||||
?assertEqual(CntMatched2, CntMatched1 + 1),
|
?assertEqual(CntMatched2, CntMatched1 + 1),
|
||||||
?assertEqual(CntSuccess2, CntSuccess1 + 1),
|
?assertEqual(CntSuccess2, CntSuccess1 + 1),
|
||||||
?assertEqual(NodeCntMatched2, NodeCntMatched1 + 1),
|
?assertEqual(NodeCntMatched2, NodeCntMatched1 + 1),
|
||||||
|
@ -590,16 +483,13 @@ t_mqtt_conn_bridge_ingress_and_egress(_) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
t_ingress_mqtt_bridge_with_rules(_) ->
|
t_ingress_mqtt_bridge_with_rules(_) ->
|
||||||
{ok, 201, _} = request(
|
BridgeIDIngress = create_bridge(
|
||||||
post,
|
|
||||||
uri(["bridges"]),
|
|
||||||
?SERVER_CONF(<<"user1">>)#{
|
?SERVER_CONF(<<"user1">>)#{
|
||||||
<<"type">> => ?TYPE_MQTT,
|
<<"type">> => ?TYPE_MQTT,
|
||||||
<<"name">> => ?BRIDGE_NAME_INGRESS,
|
<<"name">> => ?BRIDGE_NAME_INGRESS,
|
||||||
<<"ingress">> => ?INGRESS_CONF
|
<<"ingress">> => ?INGRESS_CONF
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
BridgeIDIngress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_INGRESS),
|
|
||||||
|
|
||||||
{ok, 201, Rule} = request(
|
{ok, 201, Rule} = request(
|
||||||
post,
|
post,
|
||||||
|
@ -624,18 +514,7 @@ t_ingress_mqtt_bridge_with_rules(_) ->
|
||||||
%% the remote broker is also the local one.
|
%% the remote broker is also the local one.
|
||||||
emqx:publish(emqx_message:make(RemoteTopic, Payload)),
|
emqx:publish(emqx_message:make(RemoteTopic, Payload)),
|
||||||
%% we should receive a message on the local broker, with specified topic
|
%% we should receive a message on the local broker, with specified topic
|
||||||
?assert(
|
assert_mqtt_msg_received(LocalTopic, Payload),
|
||||||
receive
|
|
||||||
{deliver, LocalTopic, #message{payload = Payload}} ->
|
|
||||||
ct:pal("local broker got message: ~p on topic ~p", [Payload, LocalTopic]),
|
|
||||||
true;
|
|
||||||
Msg ->
|
|
||||||
ct:pal("Msg: ~p", [Msg]),
|
|
||||||
false
|
|
||||||
after 100 ->
|
|
||||||
false
|
|
||||||
end
|
|
||||||
),
|
|
||||||
%% and also the rule should be matched, with matched + 1:
|
%% and also the rule should be matched, with matched + 1:
|
||||||
{ok, 200, Rule1} = request(get, uri(["rules", RuleId]), []),
|
{ok, 200, Rule1} = request(get, uri(["rules", RuleId]), []),
|
||||||
{ok, 200, Metrics} = request(get, uri(["rules", RuleId, "metrics"]), []),
|
{ok, 200, Metrics} = request(get, uri(["rules", RuleId, "metrics"]), []),
|
||||||
|
@ -680,37 +559,22 @@ t_ingress_mqtt_bridge_with_rules(_) ->
|
||||||
),
|
),
|
||||||
|
|
||||||
%% verify the metrics of the bridge
|
%% verify the metrics of the bridge
|
||||||
{ok, 200, BridgeMetricsStr} = request(get, uri(["bridges", BridgeIDIngress, "metrics"]), []),
|
?assertMetrics(
|
||||||
?assertMatch(
|
#{<<"matched">> := 0, <<"received">> := 1},
|
||||||
#{
|
BridgeIDIngress
|
||||||
<<"metrics">> := #{<<"matched">> := 0, <<"received">> := 1},
|
|
||||||
<<"node_metrics">> :=
|
|
||||||
[
|
|
||||||
#{
|
|
||||||
<<"node">> := _,
|
|
||||||
<<"metrics">> :=
|
|
||||||
#{<<"matched">> := 0, <<"received">> := 1}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
jsx:decode(BridgeMetricsStr)
|
|
||||||
),
|
),
|
||||||
|
|
||||||
{ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []),
|
{ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []),
|
||||||
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDIngress]), []).
|
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDIngress]), []).
|
||||||
|
|
||||||
t_egress_mqtt_bridge_with_rules(_) ->
|
t_egress_mqtt_bridge_with_rules(_) ->
|
||||||
{ok, 201, Bridge} = request(
|
BridgeIDEgress = create_bridge(
|
||||||
post,
|
|
||||||
uri(["bridges"]),
|
|
||||||
?SERVER_CONF(<<"user1">>)#{
|
?SERVER_CONF(<<"user1">>)#{
|
||||||
<<"type">> => ?TYPE_MQTT,
|
<<"type">> => ?TYPE_MQTT,
|
||||||
<<"name">> => ?BRIDGE_NAME_EGRESS,
|
<<"name">> => ?BRIDGE_NAME_EGRESS,
|
||||||
<<"egress">> => ?EGRESS_CONF
|
<<"egress">> => ?EGRESS_CONF
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
#{<<"type">> := ?TYPE_MQTT, <<"name">> := ?BRIDGE_NAME_EGRESS} = jsx:decode(Bridge),
|
|
||||||
BridgeIDEgress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS),
|
|
||||||
|
|
||||||
{ok, 201, Rule} = request(
|
{ok, 201, Rule} = request(
|
||||||
post,
|
post,
|
||||||
|
@ -734,18 +598,7 @@ t_egress_mqtt_bridge_with_rules(_) ->
|
||||||
%% the remote broker is also the local one.
|
%% the remote broker is also the local one.
|
||||||
emqx:publish(emqx_message:make(LocalTopic, Payload)),
|
emqx:publish(emqx_message:make(LocalTopic, Payload)),
|
||||||
%% we should receive a message on the "remote" broker, with specified topic
|
%% we should receive a message on the "remote" broker, with specified topic
|
||||||
?assert(
|
assert_mqtt_msg_received(RemoteTopic, Payload),
|
||||||
receive
|
|
||||||
{deliver, RemoteTopic, #message{payload = Payload}} ->
|
|
||||||
ct:pal("remote broker got message: ~p on topic ~p", [Payload, RemoteTopic]),
|
|
||||||
true;
|
|
||||||
Msg ->
|
|
||||||
ct:pal("Msg: ~p", [Msg]),
|
|
||||||
false
|
|
||||||
after 100 ->
|
|
||||||
false
|
|
||||||
end
|
|
||||||
),
|
|
||||||
emqx:unsubscribe(RemoteTopic),
|
emqx:unsubscribe(RemoteTopic),
|
||||||
|
|
||||||
%% PUBLISH a message to the rule.
|
%% PUBLISH a message to the rule.
|
||||||
|
@ -780,35 +633,12 @@ t_egress_mqtt_bridge_with_rules(_) ->
|
||||||
),
|
),
|
||||||
|
|
||||||
%% we should receive a message on the "remote" broker, with specified topic
|
%% we should receive a message on the "remote" broker, with specified topic
|
||||||
?assert(
|
assert_mqtt_msg_received(RemoteTopic2, Payload2),
|
||||||
receive
|
|
||||||
{deliver, RemoteTopic2, #message{payload = Payload2}} ->
|
|
||||||
ct:pal("remote broker got message: ~p on topic ~p", [Payload2, RemoteTopic2]),
|
|
||||||
true;
|
|
||||||
Msg ->
|
|
||||||
ct:pal("Msg: ~p", [Msg]),
|
|
||||||
false
|
|
||||||
after 100 ->
|
|
||||||
false
|
|
||||||
end
|
|
||||||
),
|
|
||||||
|
|
||||||
%% verify the metrics of the bridge
|
%% verify the metrics of the bridge
|
||||||
{ok, 200, BridgeMetricsStr} = request(get, uri(["bridges", BridgeIDEgress, "metrics"]), []),
|
?assertMetrics(
|
||||||
?assertMatch(
|
#{<<"matched">> := 2, <<"success">> := 2, <<"failed">> := 0},
|
||||||
#{
|
BridgeIDEgress
|
||||||
<<"metrics">> := #{<<"matched">> := 2, <<"success">> := 2, <<"failed">> := 0},
|
|
||||||
<<"node_metrics">> :=
|
|
||||||
[
|
|
||||||
#{
|
|
||||||
<<"node">> := _,
|
|
||||||
<<"metrics">> := #{
|
|
||||||
<<"matched">> := 2, <<"success">> := 2, <<"failed">> := 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
jsx:decode(BridgeMetricsStr)
|
|
||||||
),
|
),
|
||||||
|
|
||||||
{ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []),
|
{ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []),
|
||||||
|
@ -817,37 +647,31 @@ t_egress_mqtt_bridge_with_rules(_) ->
|
||||||
t_mqtt_conn_bridge_egress_reconnect(_) ->
|
t_mqtt_conn_bridge_egress_reconnect(_) ->
|
||||||
%% then we add a mqtt connector, using POST
|
%% then we add a mqtt connector, using POST
|
||||||
User1 = <<"user1">>,
|
User1 = <<"user1">>,
|
||||||
|
BridgeIDEgress = create_bridge(
|
||||||
{ok, 201, Bridge} = request(
|
|
||||||
post,
|
|
||||||
uri(["bridges"]),
|
|
||||||
?SERVER_CONF(User1)#{
|
?SERVER_CONF(User1)#{
|
||||||
<<"type">> => ?TYPE_MQTT,
|
<<"type">> => ?TYPE_MQTT,
|
||||||
<<"name">> => ?BRIDGE_NAME_EGRESS,
|
<<"name">> => ?BRIDGE_NAME_EGRESS,
|
||||||
<<"egress">> => ?EGRESS_CONF,
|
<<"egress">> => ?EGRESS_CONF,
|
||||||
%% to make it reconnect quickly
|
|
||||||
<<"reconnect_interval">> => <<"1s">>,
|
|
||||||
<<"resource_opts">> => #{
|
<<"resource_opts">> => #{
|
||||||
<<"worker_pool_size">> => 2,
|
<<"worker_pool_size">> => 2,
|
||||||
<<"query_mode">> => <<"sync">>,
|
<<"query_mode">> => <<"sync">>,
|
||||||
%% using a long time so we can test recovery
|
%% using a long time so we can test recovery
|
||||||
<<"request_timeout">> => <<"15s">>,
|
<<"request_timeout">> => <<"15s">>,
|
||||||
%% to make it check the healthy quickly
|
%% to make it check the healthy quickly
|
||||||
<<"health_check_interval">> => <<"0.5s">>
|
<<"health_check_interval">> => <<"0.5s">>,
|
||||||
|
%% to make it reconnect quickly
|
||||||
|
<<"auto_restart_interval">> => <<"1s">>
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
#{
|
|
||||||
<<"type">> := ?TYPE_MQTT,
|
|
||||||
<<"name">> := ?BRIDGE_NAME_EGRESS
|
|
||||||
} = jsx:decode(Bridge),
|
|
||||||
BridgeIDEgress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS),
|
|
||||||
on_exit(fun() ->
|
on_exit(fun() ->
|
||||||
%% delete the bridge
|
%% delete the bridge
|
||||||
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []),
|
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []),
|
||||||
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
|
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
|
||||||
ok
|
ok
|
||||||
end),
|
end),
|
||||||
|
|
||||||
%% we now test if the bridge works as expected
|
%% we now test if the bridge works as expected
|
||||||
LocalTopic = <<?EGRESS_LOCAL_TOPIC, "/1">>,
|
LocalTopic = <<?EGRESS_LOCAL_TOPIC, "/1">>,
|
||||||
RemoteTopic = <<?EGRESS_REMOTE_TOPIC, "/", LocalTopic/binary>>,
|
RemoteTopic = <<?EGRESS_REMOTE_TOPIC, "/", LocalTopic/binary>>,
|
||||||
|
@ -862,20 +686,9 @@ t_mqtt_conn_bridge_egress_reconnect(_) ->
|
||||||
assert_mqtt_msg_received(RemoteTopic, Payload0),
|
assert_mqtt_msg_received(RemoteTopic, Payload0),
|
||||||
|
|
||||||
%% verify the metrics of the bridge
|
%% verify the metrics of the bridge
|
||||||
{ok, 200, BridgeMetricsStr} = request(get, uri(["bridges", BridgeIDEgress, "metrics"]), []),
|
?assertMetrics(
|
||||||
?assertMatch(
|
#{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0},
|
||||||
#{
|
BridgeIDEgress
|
||||||
<<"metrics">> := #{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0},
|
|
||||||
<<"node_metrics">> :=
|
|
||||||
[
|
|
||||||
#{
|
|
||||||
<<"node">> := _,
|
|
||||||
<<"metrics">> :=
|
|
||||||
#{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
jsx:decode(BridgeMetricsStr)
|
|
||||||
),
|
),
|
||||||
|
|
||||||
%% stop the listener 1883 to make the bridge disconnected
|
%% stop the listener 1883 to make the bridge disconnected
|
||||||
|
@ -899,70 +712,183 @@ t_mqtt_conn_bridge_egress_reconnect(_) ->
|
||||||
),
|
),
|
||||||
Payload1 = <<"hello2">>,
|
Payload1 = <<"hello2">>,
|
||||||
Payload2 = <<"hello3">>,
|
Payload2 = <<"hello3">>,
|
||||||
%% we need to to it in other processes because it'll block due to
|
%% We need to do it in other processes because it'll block due to
|
||||||
%% the long timeout
|
%% the long timeout
|
||||||
spawn(fun() -> emqx:publish(emqx_message:make(LocalTopic, Payload1)) end),
|
spawn(fun() -> emqx:publish(emqx_message:make(LocalTopic, Payload1)) end),
|
||||||
spawn(fun() -> emqx:publish(emqx_message:make(LocalTopic, Payload2)) end),
|
spawn(fun() -> emqx:publish(emqx_message:make(LocalTopic, Payload2)) end),
|
||||||
{ok, _} = snabbkaffe:receive_events(SRef),
|
{ok, _} = snabbkaffe:receive_events(SRef),
|
||||||
|
|
||||||
%% verify the metrics of the bridge, the message should be queued
|
%% verify the metrics of the bridge, the message should be queued
|
||||||
{ok, 200, BridgeStr1} = request(get, uri(["bridges", BridgeIDEgress]), []),
|
|
||||||
{ok, 200, BridgeMetricsStr1} = request(get, uri(["bridges", BridgeIDEgress, "metrics"]), []),
|
|
||||||
Decoded1 = jsx:decode(BridgeStr1),
|
|
||||||
DecodedMetrics1 = jsx:decode(BridgeMetricsStr1),
|
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
Status when (Status == <<"connected">> orelse Status == <<"connecting">>),
|
#{<<"status">> := Status} when
|
||||||
maps:get(<<"status">>, Decoded1)
|
Status == <<"connecting">> orelse Status == <<"disconnected">>,
|
||||||
|
request_bridge(BridgeIDEgress)
|
||||||
),
|
),
|
||||||
%% matched >= 3 because of possible retries.
|
%% matched >= 3 because of possible retries.
|
||||||
?assertMatch(
|
?assertMetrics(
|
||||||
#{
|
#{
|
||||||
<<"matched">> := Matched,
|
<<"matched">> := Matched,
|
||||||
<<"success">> := 1,
|
<<"success">> := 1,
|
||||||
<<"failed">> := 0,
|
<<"failed">> := 0,
|
||||||
<<"queuing">> := Queuing,
|
<<"queuing">> := Queuing,
|
||||||
<<"inflight">> := Inflight
|
<<"inflight">> := Inflight
|
||||||
} when Matched >= 3 andalso Inflight + Queuing == 2,
|
},
|
||||||
maps:get(<<"metrics">>, DecodedMetrics1)
|
Matched >= 3 andalso Inflight + Queuing == 2,
|
||||||
|
BridgeIDEgress
|
||||||
),
|
),
|
||||||
|
|
||||||
%% start the listener 1883 to make the bridge reconnected
|
%% start the listener 1883 to make the bridge reconnected
|
||||||
ok = emqx_listeners:start_listener('tcp:default'),
|
ok = emqx_listeners:start_listener('tcp:default'),
|
||||||
timer:sleep(1500),
|
timer:sleep(1500),
|
||||||
%% verify the metrics of the bridge, the 2 queued messages should have been sent
|
%% verify the metrics of the bridge, the 2 queued messages should have been sent
|
||||||
{ok, 200, BridgeStr2} = request(get, uri(["bridges", BridgeIDEgress]), []),
|
?assertMatch(#{<<"status">> := <<"connected">>}, request_bridge(BridgeIDEgress)),
|
||||||
{ok, 200, BridgeMetricsStr2} = request(get, uri(["bridges", BridgeIDEgress, "metrics"]), []),
|
|
||||||
Decoded2 = jsx:decode(BridgeStr2),
|
|
||||||
?assertEqual(<<"connected">>, maps:get(<<"status">>, Decoded2)),
|
|
||||||
%% matched >= 3 because of possible retries.
|
%% matched >= 3 because of possible retries.
|
||||||
?assertMatch(
|
?assertMetrics(
|
||||||
#{
|
#{
|
||||||
<<"metrics">> := #{
|
<<"matched">> := Matched,
|
||||||
<<"matched">> := Matched,
|
<<"success">> := 3,
|
||||||
<<"success">> := 3,
|
<<"failed">> := 0,
|
||||||
<<"failed">> := 0,
|
<<"queuing">> := 0,
|
||||||
<<"queuing">> := 0,
|
<<"retried">> := _
|
||||||
<<"retried">> := _
|
},
|
||||||
}
|
Matched >= 3,
|
||||||
} when Matched >= 3,
|
BridgeIDEgress
|
||||||
jsx:decode(BridgeMetricsStr2)
|
|
||||||
),
|
),
|
||||||
%% also verify the 2 messages have been sent to the remote broker
|
%% also verify the 2 messages have been sent to the remote broker
|
||||||
assert_mqtt_msg_received(RemoteTopic, Payload1),
|
assert_mqtt_msg_received(RemoteTopic, Payload1),
|
||||||
assert_mqtt_msg_received(RemoteTopic, Payload2),
|
assert_mqtt_msg_received(RemoteTopic, Payload2),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
assert_mqtt_msg_received(Topic, Payload) ->
|
t_mqtt_conn_bridge_egress_async_reconnect(_) ->
|
||||||
ct:pal("checking if ~p has been received on ~p", [Payload, Topic]),
|
User1 = <<"user1">>,
|
||||||
|
BridgeIDEgress = create_bridge(
|
||||||
|
?SERVER_CONF(User1)#{
|
||||||
|
<<"type">> => ?TYPE_MQTT,
|
||||||
|
<<"name">> => ?BRIDGE_NAME_EGRESS,
|
||||||
|
<<"egress">> => ?EGRESS_CONF,
|
||||||
|
<<"resource_opts">> => #{
|
||||||
|
<<"worker_pool_size">> => 2,
|
||||||
|
<<"query_mode">> => <<"async">>,
|
||||||
|
%% using a long time so we can test recovery
|
||||||
|
<<"request_timeout">> => <<"15s">>,
|
||||||
|
%% to make it check the healthy quickly
|
||||||
|
<<"health_check_interval">> => <<"0.5s">>,
|
||||||
|
%% to make it reconnect quickly
|
||||||
|
<<"auto_restart_interval">> => <<"1s">>
|
||||||
|
}
|
||||||
|
}
|
||||||
|
),
|
||||||
|
|
||||||
|
on_exit(fun() ->
|
||||||
|
%% delete the bridge
|
||||||
|
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []),
|
||||||
|
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
|
||||||
|
ok
|
||||||
|
end),
|
||||||
|
|
||||||
|
Self = self(),
|
||||||
|
LocalTopic = <<?EGRESS_LOCAL_TOPIC, "/1">>,
|
||||||
|
RemoteTopic = <<?EGRESS_REMOTE_TOPIC, "/", LocalTopic/binary>>,
|
||||||
|
emqx:subscribe(RemoteTopic),
|
||||||
|
|
||||||
|
Publisher = start_publisher(LocalTopic, 200, Self),
|
||||||
|
ct:sleep(1000),
|
||||||
|
|
||||||
|
%% stop the listener 1883 to make the bridge disconnected
|
||||||
|
ok = emqx_listeners:stop_listener('tcp:default'),
|
||||||
|
ct:sleep(1500),
|
||||||
|
?assertMatch(
|
||||||
|
#{<<"status">> := Status} when
|
||||||
|
Status == <<"connecting">> orelse Status == <<"disconnected">>,
|
||||||
|
request_bridge(BridgeIDEgress)
|
||||||
|
),
|
||||||
|
|
||||||
|
%% start the listener 1883 to make the bridge reconnected
|
||||||
|
ok = emqx_listeners:start_listener('tcp:default'),
|
||||||
|
timer:sleep(1500),
|
||||||
|
?assertMatch(
|
||||||
|
#{<<"status">> := <<"connected">>},
|
||||||
|
request_bridge(BridgeIDEgress)
|
||||||
|
),
|
||||||
|
|
||||||
|
N = stop_publisher(Publisher),
|
||||||
|
|
||||||
|
%% all those messages should eventually be delivered
|
||||||
|
[
|
||||||
|
assert_mqtt_msg_received(RemoteTopic, Payload)
|
||||||
|
|| I <- lists:seq(1, N),
|
||||||
|
Payload <- [integer_to_binary(I)]
|
||||||
|
],
|
||||||
|
|
||||||
|
ok.
|
||||||
|
|
||||||
|
start_publisher(Topic, Interval, CtrlPid) ->
|
||||||
|
spawn_link(fun() -> publisher(Topic, 1, Interval, CtrlPid) end).
|
||||||
|
|
||||||
|
stop_publisher(Pid) ->
|
||||||
|
_ = Pid ! {self(), stop},
|
||||||
receive
|
receive
|
||||||
{deliver, Topic, #message{payload = Payload}} ->
|
{Pid, N} -> N
|
||||||
ct:pal("Got mqtt message: ~p on topic ~p", [Payload, Topic]),
|
after 1_000 -> ct:fail("publisher ~p did not stop", [Pid])
|
||||||
ok
|
|
||||||
after 300 ->
|
|
||||||
{messages, Messages} = process_info(self(), messages),
|
|
||||||
Msg = io_lib:format("timeout waiting for ~p on topic ~p", [Payload, Topic]),
|
|
||||||
error({Msg, #{messages => Messages}})
|
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
publisher(Topic, N, Delay, CtrlPid) ->
|
||||||
|
_ = emqx:publish(emqx_message:make(Topic, integer_to_binary(N))),
|
||||||
|
receive
|
||||||
|
{CtrlPid, stop} ->
|
||||||
|
CtrlPid ! {self(), N}
|
||||||
|
after Delay ->
|
||||||
|
publisher(Topic, N + 1, Delay, CtrlPid)
|
||||||
|
end.
|
||||||
|
|
||||||
|
%%
|
||||||
|
|
||||||
|
assert_mqtt_msg_received(Topic) ->
|
||||||
|
assert_mqtt_msg_received(Topic, '_', 200).
|
||||||
|
|
||||||
|
assert_mqtt_msg_received(Topic, Payload) ->
|
||||||
|
assert_mqtt_msg_received(Topic, Payload, 200).
|
||||||
|
|
||||||
|
assert_mqtt_msg_received(Topic, Payload, Timeout) ->
|
||||||
|
receive
|
||||||
|
{deliver, Topic, Msg = #message{}} when Payload == '_' ->
|
||||||
|
ct:pal("received mqtt ~p on topic ~p", [Msg, Topic]),
|
||||||
|
Msg;
|
||||||
|
{deliver, Topic, Msg = #message{payload = Payload}} ->
|
||||||
|
ct:pal("received mqtt ~p on topic ~p", [Msg, Topic]),
|
||||||
|
Msg
|
||||||
|
after Timeout ->
|
||||||
|
{messages, Messages} = process_info(self(), messages),
|
||||||
|
ct:fail("timeout waiting ~p ms for ~p on topic '~s', messages = ~0p", [
|
||||||
|
Timeout,
|
||||||
|
Payload,
|
||||||
|
Topic,
|
||||||
|
Messages
|
||||||
|
])
|
||||||
|
end.
|
||||||
|
|
||||||
|
create_bridge(Config = #{<<"type">> := Type, <<"name">> := Name}) ->
|
||||||
|
{ok, 201, Bridge} = request(
|
||||||
|
post,
|
||||||
|
uri(["bridges"]),
|
||||||
|
Config
|
||||||
|
),
|
||||||
|
?assertMatch(
|
||||||
|
#{
|
||||||
|
<<"type">> := Type,
|
||||||
|
<<"name">> := Name
|
||||||
|
},
|
||||||
|
jsx:decode(Bridge)
|
||||||
|
),
|
||||||
|
emqx_bridge_resource:bridge_id(Type, Name).
|
||||||
|
|
||||||
|
request_bridge(BridgeID) ->
|
||||||
|
{ok, 200, Bridge} = request(get, uri(["bridges", BridgeID]), []),
|
||||||
|
jsx:decode(Bridge).
|
||||||
|
|
||||||
|
request_bridge_metrics(BridgeID) ->
|
||||||
|
{ok, 200, BridgeMetrics} = request(get, uri(["bridges", BridgeID, "metrics"]), []),
|
||||||
|
jsx:decode(BridgeMetrics).
|
||||||
|
|
||||||
request(Method, Url, Body) ->
|
request(Method, Url, Body) ->
|
||||||
request(<<"connector_admin">>, Method, Url, Body).
|
request(<<"connector_admin">>, Method, Url, Body).
|
||||||
|
|
|
@ -1255,7 +1255,7 @@ Supervisor 报告的类型。默认为 error 类型。
|
||||||
|
|
||||||
log_overload_kill_restart_after {
|
log_overload_kill_restart_after {
|
||||||
desc {
|
desc {
|
||||||
en: """If the handler is terminated, it restarts automatically after a delay specified in milliseconds. The value `infinity` prevents restarts."""
|
en: """The handler restarts automatically after a delay in the event of termination, unless the value `infinity` is set, which blocks any subsequent restarts."""
|
||||||
zh: """如果处理进程终止,它会在以指定的时间后后自动重新启动。 `infinity` 不自动重启。"""
|
zh: """如果处理进程终止,它会在以指定的时间后后自动重新启动。 `infinity` 不自动重启。"""
|
||||||
}
|
}
|
||||||
label {
|
label {
|
||||||
|
|
|
@ -495,15 +495,15 @@ log_and_alarm(IsSuccess, Res, #{kind := ?APPLY_KIND_INITIATE} = Meta) ->
|
||||||
%% because nothing is committed
|
%% because nothing is committed
|
||||||
case IsSuccess of
|
case IsSuccess of
|
||||||
true ->
|
true ->
|
||||||
?SLOG(debug, Meta#{msg => "cluster_rpc_apply_result", result => Res});
|
?SLOG(debug, Meta#{msg => "cluster_rpc_apply_result", result => emqx_misc:redact(Res)});
|
||||||
false ->
|
false ->
|
||||||
?SLOG(warning, Meta#{msg => "cluster_rpc_apply_result", result => Res})
|
?SLOG(warning, Meta#{msg => "cluster_rpc_apply_result", result => emqx_misc:redact(Res)})
|
||||||
end;
|
end;
|
||||||
log_and_alarm(true, Res, Meta) ->
|
log_and_alarm(true, Res, Meta) ->
|
||||||
?SLOG(debug, Meta#{msg => "cluster_rpc_apply_ok", result => Res}),
|
?SLOG(debug, Meta#{msg => "cluster_rpc_apply_ok", result => emqx_misc:redact(Res)}),
|
||||||
do_alarm(deactivate, Res, Meta);
|
do_alarm(deactivate, Res, Meta);
|
||||||
log_and_alarm(false, Res, Meta) ->
|
log_and_alarm(false, Res, Meta) ->
|
||||||
?SLOG(error, Meta#{msg => "cluster_rpc_apply_failed", result => Res}),
|
?SLOG(error, Meta#{msg => "cluster_rpc_apply_failed", result => emqx_misc:redact(Res)}),
|
||||||
do_alarm(activate, Res, Meta).
|
do_alarm(activate, Res, Meta).
|
||||||
|
|
||||||
do_alarm(Fun, Res, #{tnx_id := Id} = Meta) ->
|
do_alarm(Fun, Res, #{tnx_id := Id} = Meta) ->
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
{application, emqx_conf, [
|
{application, emqx_conf, [
|
||||||
{description, "EMQX configuration management"},
|
{description, "EMQX configuration management"},
|
||||||
{vsn, "0.1.10"},
|
{vsn, "0.1.11"},
|
||||||
{registered, []},
|
{registered, []},
|
||||||
{mod, {emqx_conf_app, []}},
|
{mod, {emqx_conf_app, []}},
|
||||||
{applications, [kernel, stdlib]},
|
{applications, [kernel, stdlib]},
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
|
|
||||||
start(_StartType, _StartArgs) ->
|
start(_StartType, _StartArgs) ->
|
||||||
init_conf(),
|
init_conf(),
|
||||||
|
ok = emqx_config_logger:refresh_config(),
|
||||||
emqx_conf_sup:start_link().
|
emqx_conf_sup:start_link().
|
||||||
|
|
||||||
stop(_State) ->
|
stop(_State) ->
|
||||||
|
|
|
@ -993,7 +993,7 @@ translation("ekka") ->
|
||||||
translation("kernel") ->
|
translation("kernel") ->
|
||||||
[
|
[
|
||||||
{"logger_level", fun tr_logger_level/1},
|
{"logger_level", fun tr_logger_level/1},
|
||||||
{"logger", fun tr_logger/1},
|
{"logger", fun tr_logger_handlers/1},
|
||||||
{"error_logger", fun(_) -> silent end}
|
{"error_logger", fun(_) -> silent end}
|
||||||
];
|
];
|
||||||
translation("emqx") ->
|
translation("emqx") ->
|
||||||
|
@ -1065,70 +1065,10 @@ tr_cluster_discovery(Conf) ->
|
||||||
|
|
||||||
-spec tr_logger_level(hocon:config()) -> logger:level().
|
-spec tr_logger_level(hocon:config()) -> logger:level().
|
||||||
tr_logger_level(Conf) ->
|
tr_logger_level(Conf) ->
|
||||||
ConsoleLevel = conf_get("log.console_handler.level", Conf, undefined),
|
emqx_config_logger:tr_level(Conf).
|
||||||
FileLevels = [
|
|
||||||
conf_get("level", SubConf)
|
|
||||||
|| {_, SubConf} <-
|
|
||||||
logger_file_handlers(Conf)
|
|
||||||
],
|
|
||||||
case FileLevels ++ [ConsoleLevel || ConsoleLevel =/= undefined] of
|
|
||||||
%% warning is the default level we should use
|
|
||||||
[] -> warning;
|
|
||||||
Levels -> least_severe_log_level(Levels)
|
|
||||||
end.
|
|
||||||
|
|
||||||
logger_file_handlers(Conf) ->
|
tr_logger_handlers(Conf) ->
|
||||||
Handlers = maps:to_list(conf_get("log.file_handlers", Conf, #{})),
|
emqx_config_logger:tr_handlers(Conf).
|
||||||
lists:filter(
|
|
||||||
fun({_Name, Opts}) ->
|
|
||||||
B = conf_get("enable", Opts),
|
|
||||||
true = is_boolean(B),
|
|
||||||
B
|
|
||||||
end,
|
|
||||||
Handlers
|
|
||||||
).
|
|
||||||
|
|
||||||
tr_logger(Conf) ->
|
|
||||||
%% For the default logger that outputs to console
|
|
||||||
ConsoleHandler =
|
|
||||||
case conf_get("log.console_handler.enable", Conf) of
|
|
||||||
true ->
|
|
||||||
ConsoleConf = conf_get("log.console_handler", Conf),
|
|
||||||
[
|
|
||||||
{handler, console, logger_std_h, #{
|
|
||||||
level => conf_get("log.console_handler.level", Conf),
|
|
||||||
config => (log_handler_conf(ConsoleConf))#{type => standard_io},
|
|
||||||
formatter => log_formatter(ConsoleConf),
|
|
||||||
filters => log_filter(ConsoleConf)
|
|
||||||
}}
|
|
||||||
];
|
|
||||||
false ->
|
|
||||||
[]
|
|
||||||
end,
|
|
||||||
%% For the file logger
|
|
||||||
FileHandlers =
|
|
||||||
[
|
|
||||||
begin
|
|
||||||
{handler, to_atom(HandlerName), logger_disk_log_h, #{
|
|
||||||
level => conf_get("level", SubConf),
|
|
||||||
config => (log_handler_conf(SubConf))#{
|
|
||||||
type =>
|
|
||||||
case conf_get("rotation.enable", SubConf) of
|
|
||||||
true -> wrap;
|
|
||||||
_ -> halt
|
|
||||||
end,
|
|
||||||
file => conf_get("file", SubConf),
|
|
||||||
max_no_files => conf_get("rotation.count", SubConf),
|
|
||||||
max_no_bytes => conf_get("max_size", SubConf)
|
|
||||||
},
|
|
||||||
formatter => log_formatter(SubConf),
|
|
||||||
filters => log_filter(SubConf),
|
|
||||||
filesync_repeat_interval => no_repeat
|
|
||||||
}}
|
|
||||||
end
|
|
||||||
|| {HandlerName, SubConf} <- logger_file_handlers(Conf)
|
|
||||||
],
|
|
||||||
[{handler, default, undefined}] ++ ConsoleHandler ++ FileHandlers.
|
|
||||||
|
|
||||||
log_handler_common_confs(Enable) ->
|
log_handler_common_confs(Enable) ->
|
||||||
[
|
[
|
||||||
|
@ -1225,78 +1165,6 @@ log_handler_common_confs(Enable) ->
|
||||||
)}
|
)}
|
||||||
].
|
].
|
||||||
|
|
||||||
log_handler_conf(Conf) ->
|
|
||||||
SycModeQlen = conf_get("sync_mode_qlen", Conf),
|
|
||||||
DropModeQlen = conf_get("drop_mode_qlen", Conf),
|
|
||||||
FlushQlen = conf_get("flush_qlen", Conf),
|
|
||||||
Overkill = conf_get("overload_kill", Conf),
|
|
||||||
BurstLimit = conf_get("burst_limit", Conf),
|
|
||||||
#{
|
|
||||||
sync_mode_qlen => SycModeQlen,
|
|
||||||
drop_mode_qlen => DropModeQlen,
|
|
||||||
flush_qlen => FlushQlen,
|
|
||||||
overload_kill_enable => conf_get("enable", Overkill),
|
|
||||||
overload_kill_qlen => conf_get("qlen", Overkill),
|
|
||||||
overload_kill_mem_size => conf_get("mem_size", Overkill),
|
|
||||||
overload_kill_restart_after => conf_get("restart_after", Overkill),
|
|
||||||
burst_limit_enable => conf_get("enable", BurstLimit),
|
|
||||||
burst_limit_max_count => conf_get("max_count", BurstLimit),
|
|
||||||
burst_limit_window_time => conf_get("window_time", BurstLimit)
|
|
||||||
}.
|
|
||||||
|
|
||||||
log_formatter(Conf) ->
|
|
||||||
CharsLimit =
|
|
||||||
case conf_get("chars_limit", Conf) of
|
|
||||||
unlimited -> unlimited;
|
|
||||||
V when V > 0 -> V
|
|
||||||
end,
|
|
||||||
TimeOffSet =
|
|
||||||
case conf_get("time_offset", Conf) of
|
|
||||||
"system" -> "";
|
|
||||||
"utc" -> 0;
|
|
||||||
OffSetStr -> OffSetStr
|
|
||||||
end,
|
|
||||||
SingleLine = conf_get("single_line", Conf),
|
|
||||||
Depth = conf_get("max_depth", Conf),
|
|
||||||
do_formatter(conf_get("formatter", Conf), CharsLimit, SingleLine, TimeOffSet, Depth).
|
|
||||||
|
|
||||||
%% helpers
|
|
||||||
do_formatter(json, CharsLimit, SingleLine, TimeOffSet, Depth) ->
|
|
||||||
{emqx_logger_jsonfmt, #{
|
|
||||||
chars_limit => CharsLimit,
|
|
||||||
single_line => SingleLine,
|
|
||||||
time_offset => TimeOffSet,
|
|
||||||
depth => Depth
|
|
||||||
}};
|
|
||||||
do_formatter(text, CharsLimit, SingleLine, TimeOffSet, Depth) ->
|
|
||||||
{emqx_logger_textfmt, #{
|
|
||||||
template => [time, " [", level, "] ", msg, "\n"],
|
|
||||||
chars_limit => CharsLimit,
|
|
||||||
single_line => SingleLine,
|
|
||||||
time_offset => TimeOffSet,
|
|
||||||
depth => Depth
|
|
||||||
}}.
|
|
||||||
|
|
||||||
log_filter(Conf) ->
|
|
||||||
case conf_get("supervisor_reports", Conf) of
|
|
||||||
error -> [{drop_progress_reports, {fun logger_filters:progress/2, stop}}];
|
|
||||||
progress -> []
|
|
||||||
end.
|
|
||||||
|
|
||||||
least_severe_log_level(Levels) ->
|
|
||||||
hd(sort_log_levels(Levels)).
|
|
||||||
|
|
||||||
sort_log_levels(Levels) ->
|
|
||||||
lists:sort(
|
|
||||||
fun(A, B) ->
|
|
||||||
case logger:compare_levels(A, B) of
|
|
||||||
R when R == lt; R == eq -> true;
|
|
||||||
gt -> false
|
|
||||||
end
|
|
||||||
end,
|
|
||||||
Levels
|
|
||||||
).
|
|
||||||
|
|
||||||
crash_dump_file_default() ->
|
crash_dump_file_default() ->
|
||||||
case os:getenv("RUNNER_LOG_DIR") of
|
case os:getenv("RUNNER_LOG_DIR") of
|
||||||
false ->
|
false ->
|
||||||
|
@ -1308,11 +1176,9 @@ crash_dump_file_default() ->
|
||||||
|
|
||||||
%% utils
|
%% utils
|
||||||
-spec conf_get(string() | [string()], hocon:config()) -> term().
|
-spec conf_get(string() | [string()], hocon:config()) -> term().
|
||||||
conf_get(Key, Conf) ->
|
conf_get(Key, Conf) -> emqx_schema:conf_get(Key, Conf).
|
||||||
ensure_list(hocon_maps:get(Key, Conf)).
|
|
||||||
|
|
||||||
conf_get(Key, Conf, Default) ->
|
conf_get(Key, Conf, Default) -> emqx_schema:conf_get(Key, Conf, Default).
|
||||||
ensure_list(hocon_maps:get(Key, Conf, Default)).
|
|
||||||
|
|
||||||
filter(Opts) ->
|
filter(Opts) ->
|
||||||
[{K, V} || {K, V} <- Opts, V =/= undefined].
|
[{K, V} || {K, V} <- Opts, V =/= undefined].
|
||||||
|
@ -1376,15 +1242,6 @@ to_atom(Str) when is_list(Str) ->
|
||||||
to_atom(Bin) when is_binary(Bin) ->
|
to_atom(Bin) when is_binary(Bin) ->
|
||||||
binary_to_atom(Bin, utf8).
|
binary_to_atom(Bin, utf8).
|
||||||
|
|
||||||
-spec ensure_list(binary() | list(char())) -> list(char()).
|
|
||||||
ensure_list(V) ->
|
|
||||||
case is_binary(V) of
|
|
||||||
true ->
|
|
||||||
binary_to_list(V);
|
|
||||||
false ->
|
|
||||||
V
|
|
||||||
end.
|
|
||||||
|
|
||||||
roots(Module) ->
|
roots(Module) ->
|
||||||
lists:map(fun({_BinName, Root}) -> Root end, hocon_schema:roots(Module)).
|
lists:map(fun({_BinName, Root}) -> Root end, hocon_schema:roots(Module)).
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
mongo
|
mongo
|
||||||
redis
|
redis
|
||||||
|
redis_cluster
|
||||||
mysql
|
mysql
|
||||||
pgsql
|
pgsql
|
||||||
|
|
|
@ -4,12 +4,12 @@ emqx_connector_http {
|
||||||
en: """
|
en: """
|
||||||
The base URL is the URL includes only the scheme, host and port.<br/>
|
The base URL is the URL includes only the scheme, host and port.<br/>
|
||||||
When send an HTTP request, the real URL to be used is the concatenation of the base URL and the
|
When send an HTTP request, the real URL to be used is the concatenation of the base URL and the
|
||||||
path parameter (passed by the emqx_resource:query/2,3 or provided by the request parameter).<br/>
|
path parameter<br/>
|
||||||
For example: `http://localhost:9901/`
|
For example: `http://localhost:9901/`
|
||||||
"""
|
"""
|
||||||
zh: """
|
zh: """
|
||||||
base URL 只包含host和port。<br/>
|
base URL 只包含host和port。<br/>
|
||||||
发送HTTP请求时,真实的URL是由base URL 和 path parameter连接而成(通过emqx_resource:query/2,3传递,或者通过请求参数提供)。<br/>
|
发送HTTP请求时,真实的URL是由base URL 和 path parameter连接而成。<br/>
|
||||||
示例:`http://localhost:9901/`
|
示例:`http://localhost:9901/`
|
||||||
"""
|
"""
|
||||||
}
|
}
|
||||||
|
@ -76,14 +76,8 @@ base URL 只包含host和port。<br/>
|
||||||
|
|
||||||
request {
|
request {
|
||||||
desc {
|
desc {
|
||||||
en: """
|
en: """Configure HTTP request parameters."""
|
||||||
If the request is provided, the caller can send HTTP requests via
|
zh: """设置 HTTP 请求的参数。"""
|
||||||
<code>emqx_resource:query(ResourceId, {send_message, BridgeId, Message})</code>
|
|
||||||
"""
|
|
||||||
zh: """
|
|
||||||
如果提供了请求,调用者可以通过以下方式发送 HTTP 请求
|
|
||||||
<code>emqx_resource:query(ResourceId, {send_message, BridgeId, Message})</code>
|
|
||||||
"""
|
|
||||||
}
|
}
|
||||||
label: {
|
label: {
|
||||||
en: "Request"
|
en: "Request"
|
||||||
|
|
|
@ -69,7 +69,7 @@ The Redis default port 6379 is used if `[:Port]` is not specified.
|
||||||
A Node list for Cluster to connect to. The nodes should be separated with commas, such as: `Node[,Node].`
|
A Node list for Cluster to connect to. The nodes should be separated with commas, such as: `Node[,Node].`
|
||||||
For each Node should be: The IPv4 or IPv6 address or the hostname to connect to.
|
For each Node should be: The IPv4 or IPv6 address or the hostname to connect to.
|
||||||
A host entry has the following form: `Host[:Port]`.
|
A host entry has the following form: `Host[:Port]`.
|
||||||
The MongoDB default port 27017 is used if `[:Port]` is not specified.
|
The Redis default port 6379 is used if `[:Port]` is not specified.
|
||||||
"""
|
"""
|
||||||
zh: """
|
zh: """
|
||||||
|
|
||||||
|
|
|
@ -11,17 +11,9 @@
|
||||||
{eldap2, {git, "https://github.com/emqx/eldap2", {tag, "v0.2.2"}}},
|
{eldap2, {git, "https://github.com/emqx/eldap2", {tag, "v0.2.2"}}},
|
||||||
{mysql, {git, "https://github.com/emqx/mysql-otp", {tag, "1.7.2"}}},
|
{mysql, {git, "https://github.com/emqx/mysql-otp", {tag, "1.7.2"}}},
|
||||||
{epgsql, {git, "https://github.com/emqx/epgsql", {tag, "4.7.0.1"}}},
|
{epgsql, {git, "https://github.com/emqx/epgsql", {tag, "4.7.0.1"}}},
|
||||||
%% NOTE: mind poolboy version when updating mongodb-erlang version
|
|
||||||
{mongodb, {git, "https://github.com/emqx/mongodb-erlang", {tag, "v3.0.19"}}},
|
{mongodb, {git, "https://github.com/emqx/mongodb-erlang", {tag, "v3.0.19"}}},
|
||||||
%% NOTE: mind poolboy version when updating eredis_cluster version
|
%% NOTE: mind ecpool version when updating eredis_cluster version
|
||||||
{eredis_cluster, {git, "https://github.com/emqx/eredis_cluster", {tag, "0.7.5"}}},
|
{eredis_cluster, {git, "https://github.com/emqx/eredis_cluster", {tag, "0.8.1"}}}
|
||||||
%% mongodb-erlang uses a special fork https://github.com/comtihon/poolboy.git
|
|
||||||
%% (which has overflow_ttl feature added).
|
|
||||||
%% However, it references `{branch, "master}` (commit 9c06a9a on 2021-04-07).
|
|
||||||
%% By accident, We have always been using the upstream fork due to
|
|
||||||
%% eredis_cluster's dependency getting resolved earlier.
|
|
||||||
%% Here we pin 1.5.2 to avoid surprises in the future.
|
|
||||||
{poolboy, {git, "https://github.com/emqx/poolboy.git", {tag, "1.5.2"}}}
|
|
||||||
]}.
|
]}.
|
||||||
|
|
||||||
{shell, [
|
{shell, [
|
||||||
|
|
|
@ -209,7 +209,7 @@ on_start(
|
||||||
?SLOG(info, #{
|
?SLOG(info, #{
|
||||||
msg => "starting_http_connector",
|
msg => "starting_http_connector",
|
||||||
connector => InstId,
|
connector => InstId,
|
||||||
config => emqx_misc:redact(Config)
|
config => redact(Config)
|
||||||
}),
|
}),
|
||||||
{Transport, TransportOpts} =
|
{Transport, TransportOpts} =
|
||||||
case Scheme of
|
case Scheme of
|
||||||
|
@ -234,6 +234,7 @@ on_start(
|
||||||
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
|
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
|
||||||
State = #{
|
State = #{
|
||||||
pool_name => PoolName,
|
pool_name => PoolName,
|
||||||
|
pool_type => PoolType,
|
||||||
host => Host,
|
host => Host,
|
||||||
port => Port,
|
port => Port,
|
||||||
connect_timeout => ConnectTimeout,
|
connect_timeout => ConnectTimeout,
|
||||||
|
@ -264,9 +265,10 @@ on_query(InstId, {send_message, Msg}, State) ->
|
||||||
path := Path,
|
path := Path,
|
||||||
body := Body,
|
body := Body,
|
||||||
headers := Headers,
|
headers := Headers,
|
||||||
request_timeout := Timeout,
|
request_timeout := Timeout
|
||||||
max_retries := Retry
|
|
||||||
} = process_request(Request, Msg),
|
} = process_request(Request, Msg),
|
||||||
|
%% bridge buffer worker has retry, do not let ehttpc retry
|
||||||
|
Retry = 0,
|
||||||
on_query(
|
on_query(
|
||||||
InstId,
|
InstId,
|
||||||
{undefined, Method, {Path, Headers, Body}, Timeout, Retry},
|
{undefined, Method, {Path, Headers, Body}, Timeout, Retry},
|
||||||
|
@ -274,26 +276,30 @@ on_query(InstId, {send_message, Msg}, State) ->
|
||||||
)
|
)
|
||||||
end;
|
end;
|
||||||
on_query(InstId, {Method, Request}, State) ->
|
on_query(InstId, {Method, Request}, State) ->
|
||||||
on_query(InstId, {undefined, Method, Request, 5000, 2}, State);
|
%% TODO: Get retry from State
|
||||||
|
on_query(InstId, {undefined, Method, Request, 5000, _Retry = 2}, State);
|
||||||
on_query(InstId, {Method, Request, Timeout}, State) ->
|
on_query(InstId, {Method, Request, Timeout}, State) ->
|
||||||
on_query(InstId, {undefined, Method, Request, Timeout, 2}, State);
|
%% TODO: Get retry from State
|
||||||
|
on_query(InstId, {undefined, Method, Request, Timeout, _Retry = 2}, State);
|
||||||
on_query(
|
on_query(
|
||||||
InstId,
|
InstId,
|
||||||
{KeyOrNum, Method, Request, Timeout, Retry},
|
{KeyOrNum, Method, Request, Timeout, Retry},
|
||||||
#{pool_name := PoolName, base_path := BasePath} = State
|
#{base_path := BasePath} = State
|
||||||
) ->
|
) ->
|
||||||
?TRACE(
|
?TRACE(
|
||||||
"QUERY",
|
"QUERY",
|
||||||
"http_connector_received",
|
"http_connector_received",
|
||||||
#{request => Request, connector => InstId, state => State}
|
#{
|
||||||
|
request => redact(Request),
|
||||||
|
connector => InstId,
|
||||||
|
state => redact(State)
|
||||||
|
}
|
||||||
),
|
),
|
||||||
NRequest = formalize_request(Method, BasePath, Request),
|
NRequest = formalize_request(Method, BasePath, Request),
|
||||||
|
Worker = resolve_pool_worker(State, KeyOrNum),
|
||||||
case
|
case
|
||||||
ehttpc:request(
|
ehttpc:request(
|
||||||
case KeyOrNum of
|
Worker,
|
||||||
undefined -> PoolName;
|
|
||||||
_ -> {PoolName, KeyOrNum}
|
|
||||||
end,
|
|
||||||
Method,
|
Method,
|
||||||
NRequest,
|
NRequest,
|
||||||
Timeout,
|
Timeout,
|
||||||
|
@ -310,7 +316,7 @@ on_query(
|
||||||
{error, Reason} = Result ->
|
{error, Reason} = Result ->
|
||||||
?SLOG(error, #{
|
?SLOG(error, #{
|
||||||
msg => "http_connector_do_request_failed",
|
msg => "http_connector_do_request_failed",
|
||||||
request => NRequest,
|
request => redact(NRequest),
|
||||||
reason => Reason,
|
reason => Reason,
|
||||||
connector => InstId
|
connector => InstId
|
||||||
}),
|
}),
|
||||||
|
@ -322,7 +328,7 @@ on_query(
|
||||||
{ok, StatusCode, Headers} ->
|
{ok, StatusCode, Headers} ->
|
||||||
?SLOG(error, #{
|
?SLOG(error, #{
|
||||||
msg => "http connector do request, received error response",
|
msg => "http connector do request, received error response",
|
||||||
request => NRequest,
|
request => redact(NRequest),
|
||||||
connector => InstId,
|
connector => InstId,
|
||||||
status_code => StatusCode
|
status_code => StatusCode
|
||||||
}),
|
}),
|
||||||
|
@ -330,7 +336,7 @@ on_query(
|
||||||
{ok, StatusCode, Headers, Body} ->
|
{ok, StatusCode, Headers, Body} ->
|
||||||
?SLOG(error, #{
|
?SLOG(error, #{
|
||||||
msg => "http connector do request, received error response",
|
msg => "http connector do request, received error response",
|
||||||
request => NRequest,
|
request => redact(NRequest),
|
||||||
connector => InstId,
|
connector => InstId,
|
||||||
status_code => StatusCode
|
status_code => StatusCode
|
||||||
}),
|
}),
|
||||||
|
@ -361,19 +367,19 @@ on_query_async(
|
||||||
InstId,
|
InstId,
|
||||||
{KeyOrNum, Method, Request, Timeout},
|
{KeyOrNum, Method, Request, Timeout},
|
||||||
ReplyFunAndArgs,
|
ReplyFunAndArgs,
|
||||||
#{pool_name := PoolName, base_path := BasePath} = State
|
#{base_path := BasePath} = State
|
||||||
) ->
|
) ->
|
||||||
|
Worker = resolve_pool_worker(State, KeyOrNum),
|
||||||
?TRACE(
|
?TRACE(
|
||||||
"QUERY_ASYNC",
|
"QUERY_ASYNC",
|
||||||
"http_connector_received",
|
"http_connector_received",
|
||||||
#{request => Request, connector => InstId, state => State}
|
#{
|
||||||
|
request => redact(Request),
|
||||||
|
connector => InstId,
|
||||||
|
state => redact(State)
|
||||||
|
}
|
||||||
),
|
),
|
||||||
NRequest = formalize_request(Method, BasePath, Request),
|
NRequest = formalize_request(Method, BasePath, Request),
|
||||||
Worker =
|
|
||||||
case KeyOrNum of
|
|
||||||
undefined -> ehttpc_pool:pick_worker(PoolName);
|
|
||||||
_ -> ehttpc_pool:pick_worker(PoolName, KeyOrNum)
|
|
||||||
end,
|
|
||||||
ok = ehttpc:request_async(
|
ok = ehttpc:request_async(
|
||||||
Worker,
|
Worker,
|
||||||
Method,
|
Method,
|
||||||
|
@ -383,6 +389,16 @@ on_query_async(
|
||||||
),
|
),
|
||||||
{ok, Worker}.
|
{ok, Worker}.
|
||||||
|
|
||||||
|
resolve_pool_worker(State, undefined) ->
|
||||||
|
resolve_pool_worker(State, self());
|
||||||
|
resolve_pool_worker(#{pool_name := PoolName} = State, Key) ->
|
||||||
|
case maps:get(pool_type, State, random) of
|
||||||
|
random ->
|
||||||
|
ehttpc_pool:pick_worker(PoolName);
|
||||||
|
hash ->
|
||||||
|
ehttpc_pool:pick_worker(PoolName, Key)
|
||||||
|
end.
|
||||||
|
|
||||||
on_get_status(_InstId, #{pool_name := PoolName, connect_timeout := Timeout} = State) ->
|
on_get_status(_InstId, #{pool_name := PoolName, connect_timeout := Timeout} = State) ->
|
||||||
case do_get_status(PoolName, Timeout) of
|
case do_get_status(PoolName, Timeout) of
|
||||||
ok ->
|
ok ->
|
||||||
|
@ -401,7 +417,7 @@ do_get_status(PoolName, Timeout) ->
|
||||||
{error, Reason} = Error ->
|
{error, Reason} = Error ->
|
||||||
?SLOG(error, #{
|
?SLOG(error, #{
|
||||||
msg => "http_connector_get_status_failed",
|
msg => "http_connector_get_status_failed",
|
||||||
reason => Reason,
|
reason => redact(Reason),
|
||||||
worker => Worker
|
worker => Worker
|
||||||
}),
|
}),
|
||||||
Error
|
Error
|
||||||
|
@ -554,3 +570,63 @@ reply_delegator(ReplyFunAndArgs, Result) ->
|
||||||
_ ->
|
_ ->
|
||||||
emqx_resource:apply_reply_fun(ReplyFunAndArgs, Result)
|
emqx_resource:apply_reply_fun(ReplyFunAndArgs, Result)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
%% The HOCON schema system may generate sensitive keys with this format
|
||||||
|
is_sensitive_key([{str, StringKey}]) ->
|
||||||
|
is_sensitive_key(StringKey);
|
||||||
|
is_sensitive_key(Atom) when is_atom(Atom) ->
|
||||||
|
is_sensitive_key(erlang:atom_to_binary(Atom));
|
||||||
|
is_sensitive_key(Bin) when is_binary(Bin), (size(Bin) =:= 19 orelse size(Bin) =:= 13) ->
|
||||||
|
try
|
||||||
|
%% This is wrapped in a try-catch since we don't know that Bin is a
|
||||||
|
%% valid string so string:lowercase/1 might throw an exception.
|
||||||
|
%%
|
||||||
|
%% We want to convert this to lowercase since the http header fields
|
||||||
|
%% are case insensitive, which means that a user of the Webhook bridge
|
||||||
|
%% can write this field name in many different ways.
|
||||||
|
LowercaseBin = iolist_to_binary(string:lowercase(Bin)),
|
||||||
|
case LowercaseBin of
|
||||||
|
<<"authorization">> -> true;
|
||||||
|
<<"proxy-authorization">> -> true;
|
||||||
|
_ -> false
|
||||||
|
end
|
||||||
|
catch
|
||||||
|
_:_ -> false
|
||||||
|
end;
|
||||||
|
is_sensitive_key(_) ->
|
||||||
|
false.
|
||||||
|
|
||||||
|
%% Function that will do a deep traversal of Data and remove sensitive
|
||||||
|
%% information (i.e., passwords)
|
||||||
|
redact(Data) ->
|
||||||
|
emqx_misc:redact(Data, fun is_sensitive_key/1).
|
||||||
|
|
||||||
|
-ifdef(TEST).
|
||||||
|
-include_lib("eunit/include/eunit.hrl").
|
||||||
|
|
||||||
|
redact_test_() ->
|
||||||
|
TestData1 = [
|
||||||
|
{<<"content-type">>, <<"application/json">>},
|
||||||
|
{<<"Authorization">>, <<"Basic YWxhZGRpbjpvcGVuc2VzYW1l">>}
|
||||||
|
],
|
||||||
|
|
||||||
|
TestData2 = #{
|
||||||
|
headers =>
|
||||||
|
[
|
||||||
|
{[{str, <<"content-type">>}], [{str, <<"application/json">>}]},
|
||||||
|
{[{str, <<"Authorization">>}], [{str, <<"Basic YWxhZGRpbjpvcGVuc2VzYW1l">>}]}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
[
|
||||||
|
?_assert(is_sensitive_key(<<"Authorization">>)),
|
||||||
|
?_assert(is_sensitive_key(<<"AuthoriZation">>)),
|
||||||
|
?_assert(is_sensitive_key('AuthoriZation')),
|
||||||
|
?_assert(is_sensitive_key(<<"PrOxy-authoRizaTion">>)),
|
||||||
|
?_assert(is_sensitive_key('PrOxy-authoRizaTion')),
|
||||||
|
?_assertNot(is_sensitive_key(<<"Something">>)),
|
||||||
|
?_assertNot(is_sensitive_key(89)),
|
||||||
|
?_assertNotEqual(TestData1, redact(TestData1)),
|
||||||
|
?_assertNotEqual(TestData2, redact(TestData2))
|
||||||
|
].
|
||||||
|
|
||||||
|
-endif.
|
||||||
|
|
|
@ -105,16 +105,15 @@ init([]) ->
|
||||||
{ok, {SupFlag, []}}.
|
{ok, {SupFlag, []}}.
|
||||||
|
|
||||||
bridge_spec(Config) ->
|
bridge_spec(Config) ->
|
||||||
|
{Name, NConfig} = maps:take(name, Config),
|
||||||
#{
|
#{
|
||||||
id => maps:get(name, Config),
|
id => Name,
|
||||||
start => {emqx_connector_mqtt_worker, start_link, [Config]},
|
start => {emqx_connector_mqtt_worker, start_link, [Name, NConfig]},
|
||||||
restart => permanent,
|
restart => temporary,
|
||||||
shutdown => 5000,
|
shutdown => 5000
|
||||||
type => worker,
|
|
||||||
modules => [emqx_connector_mqtt_worker]
|
|
||||||
}.
|
}.
|
||||||
|
|
||||||
-spec bridges() -> [{node(), map()}].
|
-spec bridges() -> [{_Name, _Status}].
|
||||||
bridges() ->
|
bridges() ->
|
||||||
[
|
[
|
||||||
{Name, emqx_connector_mqtt_worker:status(Name)}
|
{Name, emqx_connector_mqtt_worker:status(Name)}
|
||||||
|
@ -144,8 +143,7 @@ on_message_received(Msg, HookPoint, ResId) ->
|
||||||
%% ===================================================================
|
%% ===================================================================
|
||||||
callback_mode() -> async_if_possible.
|
callback_mode() -> async_if_possible.
|
||||||
|
|
||||||
on_start(InstId, Conf) ->
|
on_start(InstanceId, Conf) ->
|
||||||
InstanceId = binary_to_atom(InstId, utf8),
|
|
||||||
?SLOG(info, #{
|
?SLOG(info, #{
|
||||||
msg => "starting_mqtt_connector",
|
msg => "starting_mqtt_connector",
|
||||||
connector => InstanceId,
|
connector => InstanceId,
|
||||||
|
@ -154,8 +152,8 @@ on_start(InstId, Conf) ->
|
||||||
BasicConf = basic_config(Conf),
|
BasicConf = basic_config(Conf),
|
||||||
BridgeConf = BasicConf#{
|
BridgeConf = BasicConf#{
|
||||||
name => InstanceId,
|
name => InstanceId,
|
||||||
clientid => clientid(InstId, Conf),
|
clientid => clientid(InstanceId, Conf),
|
||||||
subscriptions => make_sub_confs(maps:get(ingress, Conf, undefined), Conf, InstId),
|
subscriptions => make_sub_confs(maps:get(ingress, Conf, undefined), Conf, InstanceId),
|
||||||
forwards => make_forward_confs(maps:get(egress, Conf, undefined))
|
forwards => make_forward_confs(maps:get(egress, Conf, undefined))
|
||||||
},
|
},
|
||||||
case ?MODULE:create_bridge(BridgeConf) of
|
case ?MODULE:create_bridge(BridgeConf) of
|
||||||
|
@ -189,44 +187,50 @@ on_stop(_InstId, #{name := InstanceId}) ->
|
||||||
|
|
||||||
on_query(_InstId, {send_message, Msg}, #{name := InstanceId}) ->
|
on_query(_InstId, {send_message, Msg}, #{name := InstanceId}) ->
|
||||||
?TRACE("QUERY", "send_msg_to_remote_node", #{message => Msg, connector => InstanceId}),
|
?TRACE("QUERY", "send_msg_to_remote_node", #{message => Msg, connector => InstanceId}),
|
||||||
emqx_connector_mqtt_worker:send_to_remote(InstanceId, Msg).
|
case emqx_connector_mqtt_worker:send_to_remote(InstanceId, Msg) of
|
||||||
|
ok ->
|
||||||
|
ok;
|
||||||
|
{error, Reason} ->
|
||||||
|
classify_error(Reason)
|
||||||
|
end.
|
||||||
|
|
||||||
on_query_async(
|
on_query_async(_InstId, {send_message, Msg}, Callback, #{name := InstanceId}) ->
|
||||||
_InstId,
|
|
||||||
{send_message, Msg},
|
|
||||||
{ReplyFun, Args},
|
|
||||||
#{name := InstanceId}
|
|
||||||
) ->
|
|
||||||
?TRACE("QUERY", "async_send_msg_to_remote_node", #{message => Msg, connector => InstanceId}),
|
?TRACE("QUERY", "async_send_msg_to_remote_node", #{message => Msg, connector => InstanceId}),
|
||||||
%% this is a cast, currently.
|
case emqx_connector_mqtt_worker:send_to_remote_async(InstanceId, Msg, Callback) of
|
||||||
ok = emqx_connector_mqtt_worker:send_to_remote_async(InstanceId, Msg, {ReplyFun, Args}),
|
ok ->
|
||||||
WorkerPid = get_worker_pid(InstanceId),
|
ok;
|
||||||
{ok, WorkerPid}.
|
{ok, Pid} ->
|
||||||
|
{ok, Pid};
|
||||||
|
{error, Reason} ->
|
||||||
|
classify_error(Reason)
|
||||||
|
end.
|
||||||
|
|
||||||
on_get_status(_InstId, #{name := InstanceId}) ->
|
on_get_status(_InstId, #{name := InstanceId}) ->
|
||||||
case emqx_connector_mqtt_worker:status(InstanceId) of
|
emqx_connector_mqtt_worker:status(InstanceId).
|
||||||
connected -> connected;
|
|
||||||
_ -> connecting
|
classify_error(disconnected = Reason) ->
|
||||||
end.
|
{error, {recoverable_error, Reason}};
|
||||||
|
classify_error({disconnected, _RC, _} = Reason) ->
|
||||||
|
{error, {recoverable_error, Reason}};
|
||||||
|
classify_error({shutdown, _} = Reason) ->
|
||||||
|
{error, {recoverable_error, Reason}};
|
||||||
|
classify_error(Reason) ->
|
||||||
|
{error, {unrecoverable_error, Reason}}.
|
||||||
|
|
||||||
ensure_mqtt_worker_started(InstanceId, BridgeConf) ->
|
ensure_mqtt_worker_started(InstanceId, BridgeConf) ->
|
||||||
case emqx_connector_mqtt_worker:ensure_started(InstanceId) of
|
case emqx_connector_mqtt_worker:connect(InstanceId) of
|
||||||
ok -> {ok, #{name => InstanceId, bridge_conf => BridgeConf}};
|
{ok, Properties} ->
|
||||||
{error, Reason} -> {error, Reason}
|
{ok, #{name => InstanceId, config => BridgeConf, props => Properties}};
|
||||||
|
{error, Reason} ->
|
||||||
|
{error, Reason}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
%% mqtt workers, when created and called via bridge callbacks, are
|
|
||||||
%% registered.
|
|
||||||
-spec get_worker_pid(atom()) -> pid().
|
|
||||||
get_worker_pid(InstanceId) ->
|
|
||||||
whereis(InstanceId).
|
|
||||||
|
|
||||||
make_sub_confs(EmptyMap, _Conf, _) when map_size(EmptyMap) == 0 ->
|
make_sub_confs(EmptyMap, _Conf, _) when map_size(EmptyMap) == 0 ->
|
||||||
undefined;
|
undefined;
|
||||||
make_sub_confs(undefined, _Conf, _) ->
|
make_sub_confs(undefined, _Conf, _) ->
|
||||||
undefined;
|
undefined;
|
||||||
make_sub_confs(SubRemoteConf, Conf, InstId) ->
|
make_sub_confs(SubRemoteConf, Conf, InstanceId) ->
|
||||||
ResId = emqx_resource_manager:manager_id_to_resource_id(InstId),
|
ResId = emqx_resource_manager:manager_id_to_resource_id(InstanceId),
|
||||||
case maps:find(hookpoint, Conf) of
|
case maps:find(hookpoint, Conf) of
|
||||||
error ->
|
error ->
|
||||||
error({no_hookpoint_provided, Conf});
|
error({no_hookpoint_provided, Conf});
|
||||||
|
@ -247,7 +251,6 @@ basic_config(
|
||||||
server := Server,
|
server := Server,
|
||||||
proto_ver := ProtoVer,
|
proto_ver := ProtoVer,
|
||||||
bridge_mode := BridgeMode,
|
bridge_mode := BridgeMode,
|
||||||
clean_start := CleanStart,
|
|
||||||
keepalive := KeepAlive,
|
keepalive := KeepAlive,
|
||||||
retry_interval := RetryIntv,
|
retry_interval := RetryIntv,
|
||||||
max_inflight := MaxInflight,
|
max_inflight := MaxInflight,
|
||||||
|
@ -260,7 +263,6 @@ basic_config(
|
||||||
%% 30s
|
%% 30s
|
||||||
connect_timeout => 30,
|
connect_timeout => 30,
|
||||||
auto_reconnect => true,
|
auto_reconnect => true,
|
||||||
reconnect_interval => ?AUTO_RECONNECT_INTERVAL,
|
|
||||||
proto_ver => ProtoVer,
|
proto_ver => ProtoVer,
|
||||||
%% Opening bridge_mode will form a non-standard mqtt connection message.
|
%% Opening bridge_mode will form a non-standard mqtt connection message.
|
||||||
%% A load balancing server (such as haproxy) is often set up before the emqx broker server.
|
%% A load balancing server (such as haproxy) is often set up before the emqx broker server.
|
||||||
|
@ -268,13 +270,15 @@ basic_config(
|
||||||
%% non-standard mqtt connection packets will be filtered out by LB.
|
%% non-standard mqtt connection packets will be filtered out by LB.
|
||||||
%% So let's disable bridge_mode.
|
%% So let's disable bridge_mode.
|
||||||
bridge_mode => BridgeMode,
|
bridge_mode => BridgeMode,
|
||||||
clean_start => CleanStart,
|
%% NOTE
|
||||||
|
%% We are ignoring the user configuration here because there's currently no reliable way
|
||||||
|
%% to ensure proper session recovery according to the MQTT spec.
|
||||||
|
clean_start => true,
|
||||||
keepalive => ms_to_s(KeepAlive),
|
keepalive => ms_to_s(KeepAlive),
|
||||||
retry_interval => RetryIntv,
|
retry_interval => RetryIntv,
|
||||||
max_inflight => MaxInflight,
|
max_inflight => MaxInflight,
|
||||||
ssl => EnableSsl,
|
ssl => EnableSsl,
|
||||||
ssl_opts => maps:to_list(maps:remove(enable, Ssl)),
|
ssl_opts => maps:to_list(maps:remove(enable, Ssl))
|
||||||
if_record_metrics => true
|
|
||||||
},
|
},
|
||||||
maybe_put_fields([username, password], Conf, BasicConf).
|
maybe_put_fields([username, password], Conf, BasicConf).
|
||||||
|
|
||||||
|
|
|
@ -153,7 +153,7 @@ on_start(
|
||||||
false ->
|
false ->
|
||||||
[{ssl, false}]
|
[{ssl, false}]
|
||||||
end ++ [{sentinel, maps:get(sentinel, Config, undefined)}],
|
end ++ [{sentinel, maps:get(sentinel, Config, undefined)}],
|
||||||
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
|
PoolName = InstId,
|
||||||
State = #{poolname => PoolName, type => Type},
|
State = #{poolname => PoolName, type => Type},
|
||||||
case Type of
|
case Type of
|
||||||
cluster ->
|
cluster ->
|
||||||
|
@ -222,29 +222,15 @@ is_unrecoverable_error(Results) when is_list(Results) ->
|
||||||
lists:any(fun is_unrecoverable_error/1, Results);
|
lists:any(fun is_unrecoverable_error/1, Results);
|
||||||
is_unrecoverable_error({error, <<"ERR unknown command ", _/binary>>}) ->
|
is_unrecoverable_error({error, <<"ERR unknown command ", _/binary>>}) ->
|
||||||
true;
|
true;
|
||||||
|
is_unrecoverable_error({error, invalid_cluster_command}) ->
|
||||||
|
true;
|
||||||
is_unrecoverable_error(_) ->
|
is_unrecoverable_error(_) ->
|
||||||
false.
|
false.
|
||||||
|
|
||||||
extract_eredis_cluster_workers(PoolName) ->
|
|
||||||
lists:flatten([
|
|
||||||
gen_server:call(PoolPid, get_all_workers)
|
|
||||||
|| PoolPid <- eredis_cluster_monitor:get_all_pools(PoolName)
|
|
||||||
]).
|
|
||||||
|
|
||||||
eredis_cluster_workers_exist_and_are_connected(Workers) ->
|
|
||||||
length(Workers) > 0 andalso
|
|
||||||
lists:all(
|
|
||||||
fun({_, Pid, _, _}) ->
|
|
||||||
eredis_cluster_pool_worker:is_connected(Pid) =:= true
|
|
||||||
end,
|
|
||||||
Workers
|
|
||||||
).
|
|
||||||
|
|
||||||
on_get_status(_InstId, #{type := cluster, poolname := PoolName}) ->
|
on_get_status(_InstId, #{type := cluster, poolname := PoolName}) ->
|
||||||
case eredis_cluster:pool_exists(PoolName) of
|
case eredis_cluster:pool_exists(PoolName) of
|
||||||
true ->
|
true ->
|
||||||
Workers = extract_eredis_cluster_workers(PoolName),
|
Health = eredis_cluster:ping_all(PoolName),
|
||||||
Health = eredis_cluster_workers_exist_and_are_connected(Workers),
|
|
||||||
status_result(Health);
|
status_result(Health);
|
||||||
false ->
|
false ->
|
||||||
disconnected
|
disconnected
|
||||||
|
@ -267,7 +253,9 @@ do_cmd(PoolName, cluster, {cmd, Command}) ->
|
||||||
do_cmd(Conn, _Type, {cmd, Command}) ->
|
do_cmd(Conn, _Type, {cmd, Command}) ->
|
||||||
eredis:q(Conn, Command);
|
eredis:q(Conn, Command);
|
||||||
do_cmd(PoolName, cluster, {cmds, Commands}) ->
|
do_cmd(PoolName, cluster, {cmds, Commands}) ->
|
||||||
wrap_qp_result(eredis_cluster:qp(PoolName, Commands));
|
% TODO
|
||||||
|
% Cluster mode is currently incompatible with batching.
|
||||||
|
wrap_qp_result([eredis_cluster:q(PoolName, Command) || Command <- Commands]);
|
||||||
do_cmd(Conn, _Type, {cmds, Commands}) ->
|
do_cmd(Conn, _Type, {cmds, Commands}) ->
|
||||||
wrap_qp_result(eredis:qp(Conn, Commands)).
|
wrap_qp_result(eredis:qp(Conn, Commands)).
|
||||||
|
|
||||||
|
|
|
@ -1,236 +0,0 @@
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
|
||||||
%%
|
|
||||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
%% you may not use this file except in compliance with the License.
|
|
||||||
%% You may obtain a copy of the License at
|
|
||||||
%%
|
|
||||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
%%
|
|
||||||
%% Unless required by applicable law or agreed to in writing, software
|
|
||||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
%% See the License for the specific language governing permissions and
|
|
||||||
%% limitations under the License.
|
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
|
|
||||||
%% @doc This module implements EMQX Bridge transport layer on top of MQTT protocol
|
|
||||||
|
|
||||||
-module(emqx_connector_mqtt_mod).
|
|
||||||
|
|
||||||
-export([
|
|
||||||
start/1,
|
|
||||||
send/2,
|
|
||||||
send_async/3,
|
|
||||||
stop/1,
|
|
||||||
ping/1
|
|
||||||
]).
|
|
||||||
|
|
||||||
-export([
|
|
||||||
ensure_subscribed/3,
|
|
||||||
ensure_unsubscribed/2
|
|
||||||
]).
|
|
||||||
|
|
||||||
%% callbacks for emqtt
|
|
||||||
-export([
|
|
||||||
handle_publish/3,
|
|
||||||
handle_disconnected/2
|
|
||||||
]).
|
|
||||||
|
|
||||||
-include_lib("emqx/include/logger.hrl").
|
|
||||||
-include_lib("emqx/include/emqx_mqtt.hrl").
|
|
||||||
|
|
||||||
-define(ACK_REF(ClientPid, PktId), {ClientPid, PktId}).
|
|
||||||
|
|
||||||
%% Messages towards ack collector process
|
|
||||||
-define(REF_IDS(Ref, Ids), {Ref, Ids}).
|
|
||||||
|
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
%% emqx_bridge_connect callbacks
|
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
|
|
||||||
start(Config) ->
|
|
||||||
Parent = self(),
|
|
||||||
ServerStr = iolist_to_binary(maps:get(server, Config)),
|
|
||||||
{Server, Port} = emqx_connector_mqtt_schema:parse_server(ServerStr),
|
|
||||||
Mountpoint = maps:get(receive_mountpoint, Config, undefined),
|
|
||||||
Subscriptions = maps:get(subscriptions, Config, undefined),
|
|
||||||
Vars = emqx_connector_mqtt_msg:make_pub_vars(Mountpoint, Subscriptions),
|
|
||||||
Handlers = make_hdlr(Parent, Vars, #{server => ServerStr}),
|
|
||||||
Config1 = Config#{
|
|
||||||
msg_handler => Handlers,
|
|
||||||
host => Server,
|
|
||||||
port => Port,
|
|
||||||
force_ping => true,
|
|
||||||
proto_ver => maps:get(proto_ver, Config, v4)
|
|
||||||
},
|
|
||||||
case emqtt:start_link(process_config(Config1)) of
|
|
||||||
{ok, Pid} ->
|
|
||||||
case emqtt:connect(Pid) of
|
|
||||||
{ok, _} ->
|
|
||||||
try
|
|
||||||
ok = sub_remote_topics(Pid, Subscriptions),
|
|
||||||
{ok, #{client_pid => Pid, subscriptions => Subscriptions}}
|
|
||||||
catch
|
|
||||||
throw:Reason ->
|
|
||||||
ok = stop(#{client_pid => Pid}),
|
|
||||||
{error, error_reason(Reason, ServerStr)}
|
|
||||||
end;
|
|
||||||
{error, Reason} ->
|
|
||||||
ok = stop(#{client_pid => Pid}),
|
|
||||||
{error, error_reason(Reason, ServerStr)}
|
|
||||||
end;
|
|
||||||
{error, Reason} ->
|
|
||||||
{error, error_reason(Reason, ServerStr)}
|
|
||||||
end.
|
|
||||||
|
|
||||||
error_reason(Reason, ServerStr) ->
|
|
||||||
#{reason => Reason, server => ServerStr}.
|
|
||||||
|
|
||||||
stop(#{client_pid := Pid}) ->
|
|
||||||
safe_stop(Pid, fun() -> emqtt:stop(Pid) end, 1000),
|
|
||||||
ok.
|
|
||||||
|
|
||||||
ping(undefined) ->
|
|
||||||
pang;
|
|
||||||
ping(#{client_pid := Pid}) ->
|
|
||||||
emqtt:ping(Pid).
|
|
||||||
|
|
||||||
ensure_subscribed(#{client_pid := Pid, subscriptions := Subs} = Conn, Topic, QoS) when
|
|
||||||
is_pid(Pid)
|
|
||||||
->
|
|
||||||
case emqtt:subscribe(Pid, Topic, QoS) of
|
|
||||||
{ok, _, _} -> Conn#{subscriptions => [{Topic, QoS} | Subs]};
|
|
||||||
Error -> {error, Error}
|
|
||||||
end;
|
|
||||||
ensure_subscribed(_Conn, _Topic, _QoS) ->
|
|
||||||
%% return ok for now
|
|
||||||
%% next re-connect should should call start with new topic added to config
|
|
||||||
ok.
|
|
||||||
|
|
||||||
ensure_unsubscribed(#{client_pid := Pid, subscriptions := Subs} = Conn, Topic) when is_pid(Pid) ->
|
|
||||||
case emqtt:unsubscribe(Pid, Topic) of
|
|
||||||
{ok, _, _} -> Conn#{subscriptions => lists:keydelete(Topic, 1, Subs)};
|
|
||||||
Error -> {error, Error}
|
|
||||||
end;
|
|
||||||
ensure_unsubscribed(Conn, _) ->
|
|
||||||
%% return ok for now
|
|
||||||
%% next re-connect should should call start with this topic deleted from config
|
|
||||||
Conn.
|
|
||||||
|
|
||||||
safe_stop(Pid, StopF, Timeout) ->
|
|
||||||
MRef = monitor(process, Pid),
|
|
||||||
unlink(Pid),
|
|
||||||
try
|
|
||||||
StopF()
|
|
||||||
catch
|
|
||||||
_:_ ->
|
|
||||||
ok
|
|
||||||
end,
|
|
||||||
receive
|
|
||||||
{'DOWN', MRef, _, _, _} ->
|
|
||||||
ok
|
|
||||||
after Timeout ->
|
|
||||||
exit(Pid, kill)
|
|
||||||
end.
|
|
||||||
|
|
||||||
send(#{client_pid := ClientPid}, Msg) ->
|
|
||||||
emqtt:publish(ClientPid, Msg).
|
|
||||||
|
|
||||||
send_async(#{client_pid := ClientPid}, Msg, Callback) ->
|
|
||||||
emqtt:publish_async(ClientPid, Msg, infinity, Callback).
|
|
||||||
|
|
||||||
handle_publish(Msg, undefined, _Opts) ->
|
|
||||||
?SLOG(error, #{
|
|
||||||
msg =>
|
|
||||||
"cannot_publish_to_local_broker_as"
|
|
||||||
"_'ingress'_is_not_configured",
|
|
||||||
message => Msg
|
|
||||||
});
|
|
||||||
handle_publish(#{properties := Props} = Msg0, Vars, Opts) ->
|
|
||||||
Msg = format_msg_received(Msg0, Opts),
|
|
||||||
?SLOG(debug, #{
|
|
||||||
msg => "publish_to_local_broker",
|
|
||||||
message => Msg,
|
|
||||||
vars => Vars
|
|
||||||
}),
|
|
||||||
case Vars of
|
|
||||||
#{on_message_received := {Mod, Func, Args}} ->
|
|
||||||
_ = erlang:apply(Mod, Func, [Msg | Args]);
|
|
||||||
_ ->
|
|
||||||
ok
|
|
||||||
end,
|
|
||||||
maybe_publish_to_local_broker(Msg, Vars, Props).
|
|
||||||
|
|
||||||
handle_disconnected(Reason, Parent) ->
|
|
||||||
Parent ! {disconnected, self(), Reason}.
|
|
||||||
|
|
||||||
make_hdlr(Parent, Vars, Opts) ->
|
|
||||||
#{
|
|
||||||
publish => {fun ?MODULE:handle_publish/3, [Vars, Opts]},
|
|
||||||
disconnected => {fun ?MODULE:handle_disconnected/2, [Parent]}
|
|
||||||
}.
|
|
||||||
|
|
||||||
sub_remote_topics(_ClientPid, undefined) ->
|
|
||||||
ok;
|
|
||||||
sub_remote_topics(ClientPid, #{remote := #{topic := FromTopic, qos := QoS}}) ->
|
|
||||||
case emqtt:subscribe(ClientPid, FromTopic, QoS) of
|
|
||||||
{ok, _, _} -> ok;
|
|
||||||
Error -> throw(Error)
|
|
||||||
end.
|
|
||||||
|
|
||||||
process_config(Config) ->
|
|
||||||
maps:without([conn_type, address, receive_mountpoint, subscriptions, name], Config).
|
|
||||||
|
|
||||||
maybe_publish_to_local_broker(Msg, Vars, Props) ->
|
|
||||||
case emqx_map_lib:deep_get([local, topic], Vars, undefined) of
|
|
||||||
%% local topic is not set, discard it
|
|
||||||
undefined -> ok;
|
|
||||||
_ -> emqx_broker:publish(emqx_connector_mqtt_msg:to_broker_msg(Msg, Vars, Props))
|
|
||||||
end.
|
|
||||||
|
|
||||||
format_msg_received(
|
|
||||||
#{
|
|
||||||
dup := Dup,
|
|
||||||
payload := Payload,
|
|
||||||
properties := Props,
|
|
||||||
qos := QoS,
|
|
||||||
retain := Retain,
|
|
||||||
topic := Topic
|
|
||||||
},
|
|
||||||
#{server := Server}
|
|
||||||
) ->
|
|
||||||
#{
|
|
||||||
id => emqx_guid:to_hexstr(emqx_guid:gen()),
|
|
||||||
server => Server,
|
|
||||||
payload => Payload,
|
|
||||||
topic => Topic,
|
|
||||||
qos => QoS,
|
|
||||||
dup => Dup,
|
|
||||||
retain => Retain,
|
|
||||||
pub_props => printable_maps(Props),
|
|
||||||
message_received_at => erlang:system_time(millisecond)
|
|
||||||
}.
|
|
||||||
|
|
||||||
printable_maps(undefined) ->
|
|
||||||
#{};
|
|
||||||
printable_maps(Headers) ->
|
|
||||||
maps:fold(
|
|
||||||
fun
|
|
||||||
('User-Property', V0, AccIn) when is_list(V0) ->
|
|
||||||
AccIn#{
|
|
||||||
'User-Property' => maps:from_list(V0),
|
|
||||||
'User-Property-Pairs' => [
|
|
||||||
#{
|
|
||||||
key => Key,
|
|
||||||
value => Value
|
|
||||||
}
|
|
||||||
|| {Key, Value} <- V0
|
|
||||||
]
|
|
||||||
};
|
|
||||||
(K, V0, AccIn) ->
|
|
||||||
AccIn#{K => V0}
|
|
||||||
end,
|
|
||||||
#{},
|
|
||||||
Headers
|
|
||||||
).
|
|
|
@ -72,12 +72,6 @@ fields("server_configs") ->
|
||||||
)},
|
)},
|
||||||
{server, emqx_schema:servers_sc(#{desc => ?DESC("server")}, ?MQTT_HOST_OPTS)},
|
{server, emqx_schema:servers_sc(#{desc => ?DESC("server")}, ?MQTT_HOST_OPTS)},
|
||||||
{clientid_prefix, mk(binary(), #{required => false, desc => ?DESC("clientid_prefix")})},
|
{clientid_prefix, mk(binary(), #{required => false, desc => ?DESC("clientid_prefix")})},
|
||||||
{reconnect_interval,
|
|
||||||
mk_duration(
|
|
||||||
"Reconnect interval. Delay for the MQTT bridge to retry establishing the connection "
|
|
||||||
"in case of transportation failure.",
|
|
||||||
#{default => "15s"}
|
|
||||||
)},
|
|
||||||
{proto_ver,
|
{proto_ver,
|
||||||
mk(
|
mk(
|
||||||
hoconsc:enum([v3, v4, v5]),
|
hoconsc:enum([v3, v4, v5]),
|
||||||
|
@ -116,7 +110,9 @@ fields("server_configs") ->
|
||||||
boolean(),
|
boolean(),
|
||||||
#{
|
#{
|
||||||
default => true,
|
default => true,
|
||||||
desc => ?DESC("clean_start")
|
desc => ?DESC("clean_start"),
|
||||||
|
hidden => true,
|
||||||
|
deprecated => {since, "v5.0.16"}
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
{keepalive, mk_duration("MQTT Keepalive.", #{default => "300s"})},
|
{keepalive, mk_duration("MQTT Keepalive.", #{default => "300s"})},
|
||||||
|
|
|
@ -60,174 +60,241 @@
|
||||||
%% * Local messages are all normalised to QoS-1 when exporting to remote
|
%% * Local messages are all normalised to QoS-1 when exporting to remote
|
||||||
|
|
||||||
-module(emqx_connector_mqtt_worker).
|
-module(emqx_connector_mqtt_worker).
|
||||||
-behaviour(gen_statem).
|
|
||||||
|
|
||||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
-include_lib("emqx/include/logger.hrl").
|
-include_lib("emqx/include/logger.hrl").
|
||||||
|
|
||||||
%% APIs
|
%% APIs
|
||||||
-export([
|
-export([
|
||||||
start_link/1,
|
start_link/2,
|
||||||
stop/1
|
stop/1
|
||||||
]).
|
]).
|
||||||
|
|
||||||
%% gen_statem callbacks
|
|
||||||
-export([
|
|
||||||
terminate/3,
|
|
||||||
code_change/4,
|
|
||||||
init/1,
|
|
||||||
callback_mode/0
|
|
||||||
]).
|
|
||||||
|
|
||||||
%% state functions
|
|
||||||
-export([
|
|
||||||
idle/3,
|
|
||||||
connected/3
|
|
||||||
]).
|
|
||||||
|
|
||||||
%% management APIs
|
%% management APIs
|
||||||
-export([
|
-export([
|
||||||
ensure_started/1,
|
connect/1,
|
||||||
ensure_stopped/1,
|
|
||||||
status/1,
|
status/1,
|
||||||
ping/1,
|
ping/1,
|
||||||
send_to_remote/2,
|
send_to_remote/2,
|
||||||
send_to_remote_async/3
|
send_to_remote_async/3
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-export([get_forwards/1]).
|
-export([handle_publish/3]).
|
||||||
|
-export([handle_disconnect/1]).
|
||||||
-export([get_subscriptions/1]).
|
|
||||||
|
|
||||||
-export_type([
|
-export_type([
|
||||||
config/0,
|
config/0,
|
||||||
ack_ref/0
|
ack_ref/0
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-type id() :: atom() | string() | pid().
|
-type name() :: term().
|
||||||
-type qos() :: emqx_types:qos().
|
% -type qos() :: emqx_types:qos().
|
||||||
-type config() :: map().
|
-type config() :: map().
|
||||||
-type ack_ref() :: term().
|
-type ack_ref() :: term().
|
||||||
-type topic() :: emqx_types:topic().
|
% -type topic() :: emqx_types:topic().
|
||||||
|
|
||||||
-include_lib("emqx/include/logger.hrl").
|
-include_lib("emqx/include/logger.hrl").
|
||||||
-include_lib("emqx/include/emqx_mqtt.hrl").
|
-include_lib("emqx/include/emqx_mqtt.hrl").
|
||||||
|
|
||||||
%% same as default in-flight limit for emqtt
|
-define(REF(Name), {via, gproc, ?NAME(Name)}).
|
||||||
-define(DEFAULT_INFLIGHT_SIZE, 32).
|
-define(NAME(Name), {n, l, Name}).
|
||||||
-define(DEFAULT_RECONNECT_DELAY_MS, timer:seconds(5)).
|
|
||||||
-define(DEFAULT_SEG_BYTES, (1 bsl 20)).
|
|
||||||
-define(DEFAULT_MAX_TOTAL_SIZE, (1 bsl 31)).
|
|
||||||
|
|
||||||
%% @doc Start a bridge worker. Supported configs:
|
%% @doc Start a bridge worker. Supported configs:
|
||||||
%% start_type: 'manual' (default) or 'auto', when manual, bridge will stay
|
|
||||||
%% at 'idle' state until a manual call to start it.
|
|
||||||
%% connect_module: The module which implements emqx_bridge_connect behaviour
|
|
||||||
%% and work as message batch transport layer
|
|
||||||
%% reconnect_interval: Delay in milli-seconds for the bridge worker to retry
|
|
||||||
%% in case of transportation failure.
|
|
||||||
%% max_inflight: Max number of batches allowed to send-ahead before receiving
|
|
||||||
%% confirmation from remote node/cluster
|
|
||||||
%% mountpoint: The topic mount point for messages sent to remote node/cluster
|
%% mountpoint: The topic mount point for messages sent to remote node/cluster
|
||||||
%% `undefined', `<<>>' or `""' to disable
|
%% `undefined', `<<>>' or `""' to disable
|
||||||
%% forwards: Local topics to subscribe.
|
%% forwards: Local topics to subscribe.
|
||||||
%%
|
%%
|
||||||
%% Find more connection specific configs in the callback modules
|
%% Find more connection specific configs in the callback modules
|
||||||
%% of emqx_bridge_connect behaviour.
|
%% of emqx_bridge_connect behaviour.
|
||||||
start_link(Opts) when is_list(Opts) ->
|
-spec start_link(name(), map()) ->
|
||||||
start_link(maps:from_list(Opts));
|
{ok, pid()} | {error, _Reason}.
|
||||||
start_link(Opts) ->
|
start_link(Name, BridgeOpts) ->
|
||||||
case maps:get(name, Opts, undefined) of
|
?SLOG(debug, #{
|
||||||
undefined ->
|
msg => "client_starting",
|
||||||
gen_statem:start_link(?MODULE, Opts, []);
|
name => Name,
|
||||||
Name ->
|
options => BridgeOpts
|
||||||
Name1 = name(Name),
|
}),
|
||||||
gen_statem:start_link({local, Name1}, ?MODULE, Opts#{name => Name1}, [])
|
Conf = init_config(BridgeOpts),
|
||||||
|
Options = mk_client_options(Conf, BridgeOpts),
|
||||||
|
case emqtt:start_link(Options) of
|
||||||
|
{ok, Pid} ->
|
||||||
|
true = gproc:reg_other(?NAME(Name), Pid, Conf),
|
||||||
|
{ok, Pid};
|
||||||
|
{error, Reason} = Error ->
|
||||||
|
?SLOG(error, #{
|
||||||
|
msg => "client_start_failed",
|
||||||
|
config => emqx_misc:redact(BridgeOpts),
|
||||||
|
reason => Reason
|
||||||
|
}),
|
||||||
|
Error
|
||||||
end.
|
end.
|
||||||
|
|
||||||
ensure_started(Name) ->
|
init_config(Opts) ->
|
||||||
gen_statem:call(name(Name), ensure_started).
|
|
||||||
|
|
||||||
%% @doc Manually stop bridge worker. State idempotency ensured.
|
|
||||||
ensure_stopped(Name) ->
|
|
||||||
gen_statem:call(name(Name), ensure_stopped, 5000).
|
|
||||||
|
|
||||||
stop(Pid) -> gen_statem:stop(Pid).
|
|
||||||
|
|
||||||
status(Pid) when is_pid(Pid) ->
|
|
||||||
gen_statem:call(Pid, status);
|
|
||||||
status(Name) ->
|
|
||||||
gen_statem:call(name(Name), status).
|
|
||||||
|
|
||||||
ping(Pid) when is_pid(Pid) ->
|
|
||||||
gen_statem:call(Pid, ping);
|
|
||||||
ping(Name) ->
|
|
||||||
gen_statem:call(name(Name), ping).
|
|
||||||
|
|
||||||
send_to_remote(Pid, Msg) when is_pid(Pid) ->
|
|
||||||
gen_statem:call(Pid, {send_to_remote, Msg});
|
|
||||||
send_to_remote(Name, Msg) ->
|
|
||||||
gen_statem:call(name(Name), {send_to_remote, Msg}).
|
|
||||||
|
|
||||||
send_to_remote_async(Pid, Msg, Callback) when is_pid(Pid) ->
|
|
||||||
gen_statem:cast(Pid, {send_to_remote_async, Msg, Callback});
|
|
||||||
send_to_remote_async(Name, Msg, Callback) ->
|
|
||||||
gen_statem:cast(name(Name), {send_to_remote_async, Msg, Callback}).
|
|
||||||
|
|
||||||
%% @doc Return all forwards (local subscriptions).
|
|
||||||
-spec get_forwards(id()) -> [topic()].
|
|
||||||
get_forwards(Name) -> gen_statem:call(name(Name), get_forwards, timer:seconds(1000)).
|
|
||||||
|
|
||||||
%% @doc Return all subscriptions (subscription over mqtt connection to remote broker).
|
|
||||||
-spec get_subscriptions(id()) -> [{emqx_types:topic(), qos()}].
|
|
||||||
get_subscriptions(Name) -> gen_statem:call(name(Name), get_subscriptions).
|
|
||||||
|
|
||||||
callback_mode() -> [state_functions].
|
|
||||||
|
|
||||||
%% @doc Config should be a map().
|
|
||||||
init(#{name := Name} = ConnectOpts) ->
|
|
||||||
?SLOG(debug, #{
|
|
||||||
msg => "starting_bridge_worker",
|
|
||||||
name => Name
|
|
||||||
}),
|
|
||||||
erlang:process_flag(trap_exit, true),
|
|
||||||
State = init_state(ConnectOpts),
|
|
||||||
self() ! idle,
|
|
||||||
{ok, idle, State#{
|
|
||||||
connect_opts => pre_process_opts(ConnectOpts)
|
|
||||||
}}.
|
|
||||||
|
|
||||||
init_state(Opts) ->
|
|
||||||
ReconnDelayMs = maps:get(reconnect_interval, Opts, ?DEFAULT_RECONNECT_DELAY_MS),
|
|
||||||
StartType = maps:get(start_type, Opts, manual),
|
|
||||||
Mountpoint = maps:get(forward_mountpoint, Opts, undefined),
|
Mountpoint = maps:get(forward_mountpoint, Opts, undefined),
|
||||||
MaxInflightSize = maps:get(max_inflight, Opts, ?DEFAULT_INFLIGHT_SIZE),
|
Subscriptions = maps:get(subscriptions, Opts, undefined),
|
||||||
Name = maps:get(name, Opts, undefined),
|
Forwards = maps:get(forwards, Opts, undefined),
|
||||||
#{
|
#{
|
||||||
start_type => StartType,
|
|
||||||
reconnect_interval => ReconnDelayMs,
|
|
||||||
mountpoint => format_mountpoint(Mountpoint),
|
mountpoint => format_mountpoint(Mountpoint),
|
||||||
max_inflight => MaxInflightSize,
|
subscriptions => pre_process_subscriptions(Subscriptions),
|
||||||
connection => undefined,
|
forwards => pre_process_forwards(Forwards)
|
||||||
name => Name
|
|
||||||
}.
|
}.
|
||||||
|
|
||||||
pre_process_opts(#{subscriptions := InConf, forwards := OutConf} = ConnectOpts) ->
|
mk_client_options(Conf, BridgeOpts) ->
|
||||||
ConnectOpts#{
|
Server = iolist_to_binary(maps:get(server, BridgeOpts)),
|
||||||
subscriptions => pre_process_in_out(in, InConf),
|
HostPort = emqx_connector_mqtt_schema:parse_server(Server),
|
||||||
forwards => pre_process_in_out(out, OutConf)
|
Mountpoint = maps:get(receive_mountpoint, BridgeOpts, undefined),
|
||||||
|
Subscriptions = maps:get(subscriptions, Conf),
|
||||||
|
Vars = emqx_connector_mqtt_msg:make_pub_vars(Mountpoint, Subscriptions),
|
||||||
|
Opts = maps:without(
|
||||||
|
[
|
||||||
|
address,
|
||||||
|
auto_reconnect,
|
||||||
|
conn_type,
|
||||||
|
mountpoint,
|
||||||
|
forwards,
|
||||||
|
receive_mountpoint,
|
||||||
|
subscriptions
|
||||||
|
],
|
||||||
|
BridgeOpts
|
||||||
|
),
|
||||||
|
Opts#{
|
||||||
|
msg_handler => mk_client_event_handler(Vars, #{server => Server}),
|
||||||
|
hosts => [HostPort],
|
||||||
|
force_ping => true,
|
||||||
|
proto_ver => maps:get(proto_ver, BridgeOpts, v4)
|
||||||
}.
|
}.
|
||||||
|
|
||||||
pre_process_in_out(_, undefined) ->
|
mk_client_event_handler(Vars, Opts) when Vars /= undefined ->
|
||||||
|
#{
|
||||||
|
publish => {fun ?MODULE:handle_publish/3, [Vars, Opts]},
|
||||||
|
disconnected => {fun ?MODULE:handle_disconnect/1, []}
|
||||||
|
};
|
||||||
|
mk_client_event_handler(undefined, _Opts) ->
|
||||||
|
undefined.
|
||||||
|
|
||||||
|
connect(Name) ->
|
||||||
|
#{subscriptions := Subscriptions} = get_config(Name),
|
||||||
|
case emqtt:connect(get_pid(Name)) of
|
||||||
|
{ok, Properties} ->
|
||||||
|
case subscribe_remote_topics(Name, Subscriptions) of
|
||||||
|
ok ->
|
||||||
|
{ok, Properties};
|
||||||
|
{ok, _, _RCs} ->
|
||||||
|
{ok, Properties};
|
||||||
|
{error, Reason} = Error ->
|
||||||
|
?SLOG(error, #{
|
||||||
|
msg => "client_subscribe_failed",
|
||||||
|
subscriptions => Subscriptions,
|
||||||
|
reason => Reason
|
||||||
|
}),
|
||||||
|
Error
|
||||||
|
end;
|
||||||
|
{error, Reason} = Error ->
|
||||||
|
?SLOG(error, #{
|
||||||
|
msg => "client_connect_failed",
|
||||||
|
reason => Reason
|
||||||
|
}),
|
||||||
|
Error
|
||||||
|
end.
|
||||||
|
|
||||||
|
subscribe_remote_topics(Ref, #{remote := #{topic := FromTopic, qos := QoS}}) ->
|
||||||
|
emqtt:subscribe(ref(Ref), FromTopic, QoS);
|
||||||
|
subscribe_remote_topics(_Ref, undefined) ->
|
||||||
|
ok.
|
||||||
|
|
||||||
|
stop(Ref) ->
|
||||||
|
emqtt:stop(ref(Ref)).
|
||||||
|
|
||||||
|
status(Ref) ->
|
||||||
|
try
|
||||||
|
Info = emqtt:info(ref(Ref)),
|
||||||
|
case proplists:get_value(socket, Info) of
|
||||||
|
Socket when Socket /= undefined ->
|
||||||
|
connected;
|
||||||
|
undefined ->
|
||||||
|
connecting
|
||||||
|
end
|
||||||
|
catch
|
||||||
|
exit:{noproc, _} ->
|
||||||
|
disconnected
|
||||||
|
end.
|
||||||
|
|
||||||
|
ping(Ref) ->
|
||||||
|
emqtt:ping(ref(Ref)).
|
||||||
|
|
||||||
|
send_to_remote(Name, MsgIn) ->
|
||||||
|
trycall(fun() -> do_send(Name, export_msg(Name, MsgIn)) end).
|
||||||
|
|
||||||
|
do_send(Name, {true, Msg}) ->
|
||||||
|
case emqtt:publish(get_pid(Name), Msg) of
|
||||||
|
ok ->
|
||||||
|
ok;
|
||||||
|
{ok, #{reason_code := RC}} when
|
||||||
|
RC =:= ?RC_SUCCESS;
|
||||||
|
RC =:= ?RC_NO_MATCHING_SUBSCRIBERS
|
||||||
|
->
|
||||||
|
ok;
|
||||||
|
{ok, #{reason_code := RC, reason_code_name := Reason}} ->
|
||||||
|
?SLOG(warning, #{
|
||||||
|
msg => "remote_publish_failed",
|
||||||
|
message => Msg,
|
||||||
|
reason_code => RC,
|
||||||
|
reason_code_name => Reason
|
||||||
|
}),
|
||||||
|
{error, Reason};
|
||||||
|
{error, Reason} ->
|
||||||
|
?SLOG(info, #{
|
||||||
|
msg => "client_failed",
|
||||||
|
reason => Reason
|
||||||
|
}),
|
||||||
|
{error, Reason}
|
||||||
|
end;
|
||||||
|
do_send(_Name, false) ->
|
||||||
|
ok.
|
||||||
|
|
||||||
|
send_to_remote_async(Name, MsgIn, Callback) ->
|
||||||
|
trycall(fun() -> do_send_async(Name, export_msg(Name, MsgIn), Callback) end).
|
||||||
|
|
||||||
|
do_send_async(Name, {true, Msg}, Callback) ->
|
||||||
|
Pid = get_pid(Name),
|
||||||
|
ok = emqtt:publish_async(Pid, Msg, _Timeout = infinity, Callback),
|
||||||
|
{ok, Pid};
|
||||||
|
do_send_async(_Name, false, _Callback) ->
|
||||||
|
ok.
|
||||||
|
|
||||||
|
ref(Pid) when is_pid(Pid) ->
|
||||||
|
Pid;
|
||||||
|
ref(Term) ->
|
||||||
|
?REF(Term).
|
||||||
|
|
||||||
|
trycall(Fun) ->
|
||||||
|
try
|
||||||
|
Fun()
|
||||||
|
catch
|
||||||
|
throw:noproc ->
|
||||||
|
{error, disconnected};
|
||||||
|
exit:{noproc, _} ->
|
||||||
|
{error, disconnected}
|
||||||
|
end.
|
||||||
|
|
||||||
|
format_mountpoint(undefined) ->
|
||||||
undefined;
|
undefined;
|
||||||
pre_process_in_out(in, #{local := LC} = Conf) when is_map(Conf) ->
|
format_mountpoint(Prefix) ->
|
||||||
|
binary:replace(iolist_to_binary(Prefix), <<"${node}">>, atom_to_binary(node(), utf8)).
|
||||||
|
|
||||||
|
pre_process_subscriptions(undefined) ->
|
||||||
|
undefined;
|
||||||
|
pre_process_subscriptions(#{local := LC} = Conf) when is_map(Conf) ->
|
||||||
Conf#{local => pre_process_in_out_common(LC)};
|
Conf#{local => pre_process_in_out_common(LC)};
|
||||||
pre_process_in_out(in, Conf) when is_map(Conf) ->
|
pre_process_subscriptions(Conf) when is_map(Conf) ->
|
||||||
%% have no 'local' field in the config
|
%% have no 'local' field in the config
|
||||||
|
undefined.
|
||||||
|
|
||||||
|
pre_process_forwards(undefined) ->
|
||||||
undefined;
|
undefined;
|
||||||
pre_process_in_out(out, #{remote := RC} = Conf) when is_map(Conf) ->
|
pre_process_forwards(#{remote := RC} = Conf) when is_map(Conf) ->
|
||||||
Conf#{remote => pre_process_in_out_common(RC)};
|
Conf#{remote => pre_process_in_out_common(RC)};
|
||||||
pre_process_in_out(out, Conf) when is_map(Conf) ->
|
pre_process_forwards(Conf) when is_map(Conf) ->
|
||||||
%% have no 'remote' field in the config
|
%% have no 'remote' field in the config
|
||||||
undefined.
|
undefined.
|
||||||
|
|
||||||
|
@ -247,238 +314,110 @@ pre_process_conf(Key, Conf) ->
|
||||||
Conf#{Key => Val}
|
Conf#{Key => Val}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
code_change(_Vsn, State, Data, _Extra) ->
|
get_pid(Name) ->
|
||||||
{ok, State, Data}.
|
case gproc:where(?NAME(Name)) of
|
||||||
|
Pid when is_pid(Pid) ->
|
||||||
|
Pid;
|
||||||
|
undefined ->
|
||||||
|
throw(noproc)
|
||||||
|
end.
|
||||||
|
|
||||||
terminate(_Reason, _StateName, State) ->
|
get_config(Name) ->
|
||||||
_ = disconnect(State),
|
|
||||||
maybe_destroy_session(State).
|
|
||||||
|
|
||||||
maybe_destroy_session(#{connect_opts := ConnectOpts = #{clean_start := false}} = State) ->
|
|
||||||
try
|
try
|
||||||
%% Destroy session if clean_start is not set.
|
gproc:lookup_value(?NAME(Name))
|
||||||
%% Ignore any crashes, just refresh the clean_start = true.
|
|
||||||
_ = do_connect(State#{connect_opts => ConnectOpts#{clean_start => true}}),
|
|
||||||
_ = disconnect(State),
|
|
||||||
ok
|
|
||||||
catch
|
catch
|
||||||
_:_ ->
|
error:badarg ->
|
||||||
|
throw(noproc)
|
||||||
|
end.
|
||||||
|
|
||||||
|
export_msg(Name, Msg) ->
|
||||||
|
case get_config(Name) of
|
||||||
|
#{forwards := Forwards = #{}, mountpoint := Mountpoint} ->
|
||||||
|
{true, export_msg(Mountpoint, Forwards, Msg)};
|
||||||
|
#{forwards := undefined} ->
|
||||||
|
?SLOG(error, #{
|
||||||
|
msg => "forwarding_unavailable",
|
||||||
|
message => Msg,
|
||||||
|
reason => "egress is not configured"
|
||||||
|
}),
|
||||||
|
false
|
||||||
|
end.
|
||||||
|
|
||||||
|
export_msg(Mountpoint, Forwards, Msg) ->
|
||||||
|
Vars = emqx_connector_mqtt_msg:make_pub_vars(Mountpoint, Forwards),
|
||||||
|
emqx_connector_mqtt_msg:to_remote_msg(Msg, Vars).
|
||||||
|
|
||||||
|
%%
|
||||||
|
|
||||||
|
handle_publish(#{properties := Props} = MsgIn, Vars, Opts) ->
|
||||||
|
Msg = import_msg(MsgIn, Opts),
|
||||||
|
?SLOG(debug, #{
|
||||||
|
msg => "publish_local",
|
||||||
|
message => Msg,
|
||||||
|
vars => Vars
|
||||||
|
}),
|
||||||
|
case Vars of
|
||||||
|
#{on_message_received := {Mod, Func, Args}} ->
|
||||||
|
_ = erlang:apply(Mod, Func, [Msg | Args]);
|
||||||
|
_ ->
|
||||||
ok
|
ok
|
||||||
end;
|
end,
|
||||||
maybe_destroy_session(_State) ->
|
maybe_publish_local(Msg, Vars, Props).
|
||||||
|
|
||||||
|
handle_disconnect(_Reason) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
%% ensure_started will be deprecated in the future
|
maybe_publish_local(Msg, Vars, Props) ->
|
||||||
idle({call, From}, ensure_started, State) ->
|
case emqx_map_lib:deep_get([local, topic], Vars, undefined) of
|
||||||
case do_connect(State) of
|
%% local topic is not set, discard it
|
||||||
{ok, State1} ->
|
undefined ->
|
||||||
{next_state, connected, State1, [{reply, From, ok}, {state_timeout, 0, connected}]};
|
ok;
|
||||||
{error, Reason, _State} ->
|
|
||||||
{keep_state_and_data, [{reply, From, {error, Reason}}]}
|
|
||||||
end;
|
|
||||||
idle({call, From}, {send_to_remote, _}, _State) ->
|
|
||||||
{keep_state_and_data, [{reply, From, {error, {recoverable_error, not_connected}}}]};
|
|
||||||
%% @doc Standing by for manual start.
|
|
||||||
idle(info, idle, #{start_type := manual}) ->
|
|
||||||
keep_state_and_data;
|
|
||||||
%% @doc Standing by for auto start.
|
|
||||||
idle(info, idle, #{start_type := auto} = State) ->
|
|
||||||
connecting(State);
|
|
||||||
idle(state_timeout, reconnect, State) ->
|
|
||||||
connecting(State);
|
|
||||||
idle(Type, Content, State) ->
|
|
||||||
common(idle, Type, Content, State).
|
|
||||||
|
|
||||||
connecting(#{reconnect_interval := ReconnectDelayMs} = State) ->
|
|
||||||
case do_connect(State) of
|
|
||||||
{ok, State1} ->
|
|
||||||
{next_state, connected, State1, {state_timeout, 0, connected}};
|
|
||||||
_ ->
|
_ ->
|
||||||
{keep_state_and_data, {state_timeout, ReconnectDelayMs, reconnect}}
|
emqx_broker:publish(emqx_connector_mqtt_msg:to_broker_msg(Msg, Vars, Props))
|
||||||
end.
|
end.
|
||||||
|
|
||||||
connected(state_timeout, connected, State) ->
|
import_msg(
|
||||||
%% nothing to do
|
|
||||||
{keep_state, State};
|
|
||||||
connected({call, From}, {send_to_remote, Msg}, State) ->
|
|
||||||
case do_send(State, Msg) of
|
|
||||||
{ok, NState} ->
|
|
||||||
{keep_state, NState, [{reply, From, ok}]};
|
|
||||||
{error, Reason} ->
|
|
||||||
{keep_state_and_data, [[reply, From, {error, Reason}]]}
|
|
||||||
end;
|
|
||||||
connected(cast, {send_to_remote_async, Msg, Callback}, State) ->
|
|
||||||
_ = do_send_async(State, Msg, Callback),
|
|
||||||
{keep_state, State};
|
|
||||||
connected(
|
|
||||||
info,
|
|
||||||
{disconnected, Conn, Reason},
|
|
||||||
#{connection := Connection, name := Name, reconnect_interval := ReconnectDelayMs} = State
|
|
||||||
) ->
|
|
||||||
?tp(info, disconnected, #{name => Name, reason => Reason}),
|
|
||||||
case Conn =:= maps:get(client_pid, Connection, undefined) of
|
|
||||||
true ->
|
|
||||||
{next_state, idle, State#{connection => undefined},
|
|
||||||
{state_timeout, ReconnectDelayMs, reconnect}};
|
|
||||||
false ->
|
|
||||||
keep_state_and_data
|
|
||||||
end;
|
|
||||||
connected(Type, Content, State) ->
|
|
||||||
common(connected, Type, Content, State).
|
|
||||||
|
|
||||||
%% Common handlers
|
|
||||||
common(StateName, {call, From}, status, _State) ->
|
|
||||||
{keep_state_and_data, [{reply, From, StateName}]};
|
|
||||||
common(_StateName, {call, From}, ping, #{connection := Conn} = _State) ->
|
|
||||||
Reply = emqx_connector_mqtt_mod:ping(Conn),
|
|
||||||
{keep_state_and_data, [{reply, From, Reply}]};
|
|
||||||
common(_StateName, {call, From}, ensure_stopped, #{connection := undefined} = _State) ->
|
|
||||||
{keep_state_and_data, [{reply, From, ok}]};
|
|
||||||
common(_StateName, {call, From}, ensure_stopped, #{connection := Conn} = State) ->
|
|
||||||
Reply = emqx_connector_mqtt_mod:stop(Conn),
|
|
||||||
{next_state, idle, State#{connection => undefined}, [{reply, From, Reply}]};
|
|
||||||
common(_StateName, {call, From}, get_forwards, #{connect_opts := #{forwards := Forwards}}) ->
|
|
||||||
{keep_state_and_data, [{reply, From, Forwards}]};
|
|
||||||
common(_StateName, {call, From}, get_subscriptions, #{connection := Connection}) ->
|
|
||||||
{keep_state_and_data, [{reply, From, maps:get(subscriptions, Connection, #{})}]};
|
|
||||||
common(_StateName, {call, From}, Req, _State) ->
|
|
||||||
{keep_state_and_data, [{reply, From, {error, {unsupported_request, Req}}}]};
|
|
||||||
common(_StateName, info, {'EXIT', _, _}, State) ->
|
|
||||||
{keep_state, State};
|
|
||||||
common(StateName, Type, Content, #{name := Name} = State) ->
|
|
||||||
?SLOG(error, #{
|
|
||||||
msg => "bridge_discarded_event",
|
|
||||||
name => Name,
|
|
||||||
type => Type,
|
|
||||||
state_name => StateName,
|
|
||||||
content => Content
|
|
||||||
}),
|
|
||||||
{keep_state, State}.
|
|
||||||
|
|
||||||
do_connect(
|
|
||||||
#{
|
#{
|
||||||
connect_opts := ConnectOpts,
|
dup := Dup,
|
||||||
name := Name
|
payload := Payload,
|
||||||
} = State
|
properties := Props,
|
||||||
) ->
|
qos := QoS,
|
||||||
case emqx_connector_mqtt_mod:start(ConnectOpts) of
|
retain := Retain,
|
||||||
{ok, Conn} ->
|
topic := Topic
|
||||||
?tp(info, connected, #{name => Name}),
|
|
||||||
{ok, State#{connection => Conn}};
|
|
||||||
{error, Reason} ->
|
|
||||||
ConnectOpts1 = obfuscate(ConnectOpts),
|
|
||||||
?SLOG(error, #{
|
|
||||||
msg => "failed_to_connect",
|
|
||||||
config => ConnectOpts1,
|
|
||||||
reason => Reason
|
|
||||||
}),
|
|
||||||
{error, Reason, State}
|
|
||||||
end.
|
|
||||||
|
|
||||||
do_send(#{connect_opts := #{forwards := undefined}}, Msg) ->
|
|
||||||
?SLOG(error, #{
|
|
||||||
msg =>
|
|
||||||
"cannot_forward_messages_to_remote_broker"
|
|
||||||
"_as_'egress'_is_not_configured",
|
|
||||||
messages => Msg
|
|
||||||
});
|
|
||||||
do_send(
|
|
||||||
#{
|
|
||||||
connection := Connection,
|
|
||||||
mountpoint := Mountpoint,
|
|
||||||
connect_opts := #{forwards := Forwards}
|
|
||||||
} = State,
|
|
||||||
Msg
|
|
||||||
) ->
|
|
||||||
Vars = emqx_connector_mqtt_msg:make_pub_vars(Mountpoint, Forwards),
|
|
||||||
ExportMsg = emqx_connector_mqtt_msg:to_remote_msg(Msg, Vars),
|
|
||||||
?SLOG(debug, #{
|
|
||||||
msg => "publish_to_remote_broker",
|
|
||||||
message => Msg,
|
|
||||||
vars => Vars
|
|
||||||
}),
|
|
||||||
case emqx_connector_mqtt_mod:send(Connection, ExportMsg) of
|
|
||||||
ok ->
|
|
||||||
{ok, State};
|
|
||||||
{ok, #{reason_code := RC}} when
|
|
||||||
RC =:= ?RC_SUCCESS;
|
|
||||||
RC =:= ?RC_NO_MATCHING_SUBSCRIBERS
|
|
||||||
->
|
|
||||||
{ok, State};
|
|
||||||
{ok, #{reason_code := RC, reason_code_name := RCN}} ->
|
|
||||||
?SLOG(warning, #{
|
|
||||||
msg => "publish_to_remote_node_falied",
|
|
||||||
message => Msg,
|
|
||||||
reason_code => RC,
|
|
||||||
reason_code_name => RCN
|
|
||||||
}),
|
|
||||||
{error, RCN};
|
|
||||||
{error, Reason} ->
|
|
||||||
?SLOG(info, #{
|
|
||||||
msg => "mqtt_bridge_produce_failed",
|
|
||||||
reason => Reason
|
|
||||||
}),
|
|
||||||
{error, Reason}
|
|
||||||
end.
|
|
||||||
|
|
||||||
do_send_async(#{connect_opts := #{forwards := undefined}}, Msg, _Callback) ->
|
|
||||||
%% TODO: eval callback with undefined error
|
|
||||||
?SLOG(error, #{
|
|
||||||
msg =>
|
|
||||||
"cannot_forward_messages_to_remote_broker"
|
|
||||||
"_as_'egress'_is_not_configured",
|
|
||||||
messages => Msg
|
|
||||||
});
|
|
||||||
do_send_async(
|
|
||||||
#{
|
|
||||||
connection := Connection,
|
|
||||||
mountpoint := Mountpoint,
|
|
||||||
connect_opts := #{forwards := Forwards}
|
|
||||||
},
|
},
|
||||||
Msg,
|
#{server := Server}
|
||||||
Callback
|
|
||||||
) ->
|
) ->
|
||||||
Vars = emqx_connector_mqtt_msg:make_pub_vars(Mountpoint, Forwards),
|
#{
|
||||||
ExportMsg = emqx_connector_mqtt_msg:to_remote_msg(Msg, Vars),
|
id => emqx_guid:to_hexstr(emqx_guid:gen()),
|
||||||
?SLOG(debug, #{
|
server => Server,
|
||||||
msg => "publish_to_remote_broker",
|
payload => Payload,
|
||||||
message => Msg,
|
topic => Topic,
|
||||||
vars => Vars
|
qos => QoS,
|
||||||
}),
|
dup => Dup,
|
||||||
emqx_connector_mqtt_mod:send_async(Connection, ExportMsg, Callback).
|
retain => Retain,
|
||||||
|
pub_props => printable_maps(Props),
|
||||||
|
message_received_at => erlang:system_time(millisecond)
|
||||||
|
}.
|
||||||
|
|
||||||
disconnect(#{connection := Conn} = State) when Conn =/= undefined ->
|
printable_maps(undefined) ->
|
||||||
emqx_connector_mqtt_mod:stop(Conn),
|
#{};
|
||||||
State#{connection => undefined};
|
printable_maps(Headers) ->
|
||||||
disconnect(State) ->
|
|
||||||
State.
|
|
||||||
|
|
||||||
format_mountpoint(undefined) ->
|
|
||||||
undefined;
|
|
||||||
format_mountpoint(Prefix) ->
|
|
||||||
binary:replace(iolist_to_binary(Prefix), <<"${node}">>, atom_to_binary(node(), utf8)).
|
|
||||||
|
|
||||||
name(Id) -> list_to_atom(str(Id)).
|
|
||||||
|
|
||||||
obfuscate(Map) ->
|
|
||||||
maps:fold(
|
maps:fold(
|
||||||
fun(K, V, Acc) ->
|
fun
|
||||||
case is_sensitive(K) of
|
('User-Property', V0, AccIn) when is_list(V0) ->
|
||||||
true -> [{K, '***'} | Acc];
|
AccIn#{
|
||||||
false -> [{K, V} | Acc]
|
'User-Property' => maps:from_list(V0),
|
||||||
end
|
'User-Property-Pairs' => [
|
||||||
|
#{
|
||||||
|
key => Key,
|
||||||
|
value => Value
|
||||||
|
}
|
||||||
|
|| {Key, Value} <- V0
|
||||||
|
]
|
||||||
|
};
|
||||||
|
(K, V0, AccIn) ->
|
||||||
|
AccIn#{K => V0}
|
||||||
end,
|
end,
|
||||||
[],
|
#{},
|
||||||
Map
|
Headers
|
||||||
).
|
).
|
||||||
|
|
||||||
is_sensitive(password) -> true;
|
|
||||||
is_sensitive(ssl_opts) -> true;
|
|
||||||
is_sensitive(_) -> false.
|
|
||||||
|
|
||||||
str(A) when is_atom(A) ->
|
|
||||||
atom_to_list(A);
|
|
||||||
str(B) when is_binary(B) ->
|
|
||||||
binary_to_list(B);
|
|
||||||
str(S) when is_list(S) ->
|
|
||||||
S.
|
|
||||||
|
|
|
@ -1,60 +0,0 @@
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
|
||||||
%%
|
|
||||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
%% you may not use this file except in compliance with the License.
|
|
||||||
%% You may obtain a copy of the License at
|
|
||||||
%%
|
|
||||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
%%
|
|
||||||
%% Unless required by applicable law or agreed to in writing, software
|
|
||||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
%% See the License for the specific language governing permissions and
|
|
||||||
%% limitations under the License.
|
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
|
|
||||||
-module(emqx_connector_mqtt_tests).
|
|
||||||
|
|
||||||
-include_lib("eunit/include/eunit.hrl").
|
|
||||||
-include_lib("emqx/include/emqx_mqtt.hrl").
|
|
||||||
|
|
||||||
send_and_ack_test() ->
|
|
||||||
%% delegate from gen_rpc to rpc for unit test
|
|
||||||
meck:new(emqtt, [passthrough, no_history]),
|
|
||||||
meck:expect(
|
|
||||||
emqtt,
|
|
||||||
start_link,
|
|
||||||
1,
|
|
||||||
fun(_) ->
|
|
||||||
{ok, spawn_link(fun() -> ok end)}
|
|
||||||
end
|
|
||||||
),
|
|
||||||
meck:expect(emqtt, connect, 1, {ok, dummy}),
|
|
||||||
meck:expect(
|
|
||||||
emqtt,
|
|
||||||
stop,
|
|
||||||
1,
|
|
||||||
fun(Pid) -> Pid ! stop end
|
|
||||||
),
|
|
||||||
meck:expect(
|
|
||||||
emqtt,
|
|
||||||
publish,
|
|
||||||
2,
|
|
||||||
fun(Client, Msg) ->
|
|
||||||
Client ! {publish, Msg},
|
|
||||||
%% as packet id
|
|
||||||
{ok, Msg}
|
|
||||||
end
|
|
||||||
),
|
|
||||||
try
|
|
||||||
Max = 1,
|
|
||||||
Batch = lists:seq(1, Max),
|
|
||||||
{ok, Conn} = emqx_connector_mqtt_mod:start(#{server => "127.0.0.1:1883"}),
|
|
||||||
%% return last packet id as batch reference
|
|
||||||
{ok, _AckRef} = emqx_connector_mqtt_mod:send(Conn, Batch),
|
|
||||||
|
|
||||||
ok = emqx_connector_mqtt_mod:stop(Conn)
|
|
||||||
after
|
|
||||||
meck:unload(emqtt)
|
|
||||||
end.
|
|
|
@ -1,101 +0,0 @@
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
|
||||||
%%
|
|
||||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
%% you may not use this file except in compliance with the License.
|
|
||||||
%% You may obtain a copy of the License at
|
|
||||||
%%
|
|
||||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
%%
|
|
||||||
%% Unless required by applicable law or agreed to in writing, software
|
|
||||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
%% See the License for the specific language governing permissions and
|
|
||||||
%% limitations under the License.
|
|
||||||
%%--------------------------------------------------------------------
|
|
||||||
|
|
||||||
-module(emqx_connector_mqtt_worker_tests).
|
|
||||||
|
|
||||||
-include_lib("eunit/include/eunit.hrl").
|
|
||||||
-include_lib("emqx/include/emqx.hrl").
|
|
||||||
-include_lib("emqx/include/emqx_mqtt.hrl").
|
|
||||||
|
|
||||||
-define(BRIDGE_NAME, test).
|
|
||||||
-define(BRIDGE_REG_NAME, emqx_connector_mqtt_worker_test).
|
|
||||||
-define(WAIT(PATTERN, TIMEOUT),
|
|
||||||
receive
|
|
||||||
PATTERN ->
|
|
||||||
ok
|
|
||||||
after TIMEOUT ->
|
|
||||||
error(timeout)
|
|
||||||
end
|
|
||||||
).
|
|
||||||
|
|
||||||
-export([start/1, send/2, stop/1]).
|
|
||||||
|
|
||||||
start(#{connect_result := Result, test_pid := Pid, test_ref := Ref}) ->
|
|
||||||
case is_pid(Pid) of
|
|
||||||
true -> Pid ! {connection_start_attempt, Ref};
|
|
||||||
false -> ok
|
|
||||||
end,
|
|
||||||
Result.
|
|
||||||
|
|
||||||
send(SendFun, Batch) when is_function(SendFun, 2) ->
|
|
||||||
SendFun(Batch).
|
|
||||||
|
|
||||||
stop(_Pid) -> ok.
|
|
||||||
|
|
||||||
%% connect first, disconnect, then connect again
|
|
||||||
disturbance_test() ->
|
|
||||||
meck:new(emqx_connector_mqtt_mod, [passthrough, no_history]),
|
|
||||||
meck:expect(emqx_connector_mqtt_mod, start, 1, fun(Conf) -> start(Conf) end),
|
|
||||||
meck:expect(emqx_connector_mqtt_mod, send, 2, fun(SendFun, Batch) -> send(SendFun, Batch) end),
|
|
||||||
meck:expect(emqx_connector_mqtt_mod, stop, 1, fun(Pid) -> stop(Pid) end),
|
|
||||||
try
|
|
||||||
emqx_metrics:start_link(),
|
|
||||||
Ref = make_ref(),
|
|
||||||
TestPid = self(),
|
|
||||||
Config = make_config(Ref, TestPid, {ok, #{client_pid => TestPid}}),
|
|
||||||
{ok, Pid} = emqx_connector_mqtt_worker:start_link(Config#{name => bridge_disturbance}),
|
|
||||||
?assertEqual(Pid, whereis(bridge_disturbance)),
|
|
||||||
?WAIT({connection_start_attempt, Ref}, 1000),
|
|
||||||
Pid ! {disconnected, TestPid, test},
|
|
||||||
?WAIT({connection_start_attempt, Ref}, 1000),
|
|
||||||
emqx_metrics:stop(),
|
|
||||||
ok = emqx_connector_mqtt_worker:stop(Pid)
|
|
||||||
after
|
|
||||||
meck:unload(emqx_connector_mqtt_mod)
|
|
||||||
end.
|
|
||||||
|
|
||||||
manual_start_stop_test() ->
|
|
||||||
meck:new(emqx_connector_mqtt_mod, [passthrough, no_history]),
|
|
||||||
meck:expect(emqx_connector_mqtt_mod, start, 1, fun(Conf) -> start(Conf) end),
|
|
||||||
meck:expect(emqx_connector_mqtt_mod, send, 2, fun(SendFun, Batch) -> send(SendFun, Batch) end),
|
|
||||||
meck:expect(emqx_connector_mqtt_mod, stop, 1, fun(Pid) -> stop(Pid) end),
|
|
||||||
try
|
|
||||||
emqx_metrics:start_link(),
|
|
||||||
Ref = make_ref(),
|
|
||||||
TestPid = self(),
|
|
||||||
BridgeName = manual_start_stop,
|
|
||||||
Config0 = make_config(Ref, TestPid, {ok, #{client_pid => TestPid}}),
|
|
||||||
Config = Config0#{start_type := manual},
|
|
||||||
{ok, Pid} = emqx_connector_mqtt_worker:start_link(Config#{name => BridgeName}),
|
|
||||||
%% call ensure_started again should yield the same result
|
|
||||||
ok = emqx_connector_mqtt_worker:ensure_started(BridgeName),
|
|
||||||
emqx_connector_mqtt_worker:ensure_stopped(BridgeName),
|
|
||||||
emqx_metrics:stop(),
|
|
||||||
ok = emqx_connector_mqtt_worker:stop(Pid)
|
|
||||||
after
|
|
||||||
meck:unload(emqx_connector_mqtt_mod)
|
|
||||||
end.
|
|
||||||
|
|
||||||
make_config(Ref, TestPid, Result) ->
|
|
||||||
#{
|
|
||||||
start_type => auto,
|
|
||||||
subscriptions => undefined,
|
|
||||||
forwards => undefined,
|
|
||||||
reconnect_interval => 50,
|
|
||||||
test_pid => TestPid,
|
|
||||||
test_ref => Ref,
|
|
||||||
connect_result => Result
|
|
||||||
}.
|
|
|
@ -27,6 +27,8 @@
|
||||||
-define(REDIS_SINGLE_PORT, 6379).
|
-define(REDIS_SINGLE_PORT, 6379).
|
||||||
-define(REDIS_SENTINEL_HOST, "redis-sentinel").
|
-define(REDIS_SENTINEL_HOST, "redis-sentinel").
|
||||||
-define(REDIS_SENTINEL_PORT, 26379).
|
-define(REDIS_SENTINEL_PORT, 26379).
|
||||||
|
-define(REDIS_CLUSTER_HOST, "redis-cluster-1").
|
||||||
|
-define(REDIS_CLUSTER_PORT, 6379).
|
||||||
-define(REDIS_RESOURCE_MOD, emqx_connector_redis).
|
-define(REDIS_RESOURCE_MOD, emqx_connector_redis).
|
||||||
|
|
||||||
all() ->
|
all() ->
|
||||||
|
@ -203,8 +205,8 @@ redis_config_base(Type, ServerKey) ->
|
||||||
MaybeSentinel = "",
|
MaybeSentinel = "",
|
||||||
MaybeDatabase = " database = 1\n";
|
MaybeDatabase = " database = 1\n";
|
||||||
"cluster" ->
|
"cluster" ->
|
||||||
Host = ?REDIS_SINGLE_HOST,
|
Host = ?REDIS_CLUSTER_HOST,
|
||||||
Port = ?REDIS_SINGLE_PORT,
|
Port = ?REDIS_CLUSTER_PORT,
|
||||||
MaybeSentinel = "",
|
MaybeSentinel = "",
|
||||||
MaybeDatabase = ""
|
MaybeDatabase = ""
|
||||||
end,
|
end,
|
||||||
|
|
|
@ -325,7 +325,7 @@ is_self_auth_token(Username, Token) ->
|
||||||
end.
|
end.
|
||||||
|
|
||||||
change_pwd(post, #{bindings := #{username := Username}, body := Params}) ->
|
change_pwd(post, #{bindings := #{username := Username}, body := Params}) ->
|
||||||
LogMeta = #{msg => "Dashboard change password", username => Username},
|
LogMeta = #{msg => "Dashboard change password", username => binary_to_list(Username)},
|
||||||
OldPwd = maps:get(<<"old_pwd">>, Params),
|
OldPwd = maps:get(<<"old_pwd">>, Params),
|
||||||
NewPwd = maps:get(<<"new_pwd">>, Params),
|
NewPwd = maps:get(<<"new_pwd">>, Params),
|
||||||
case ?EMPTY(OldPwd) orelse ?EMPTY(NewPwd) of
|
case ?EMPTY(OldPwd) orelse ?EMPTY(NewPwd) of
|
||||||
|
|
|
@ -62,7 +62,7 @@ The default is false."""
|
||||||
|
|
||||||
duration {
|
duration {
|
||||||
desc {
|
desc {
|
||||||
en: """Indicates how long the alarm has lasted, in milliseconds."""
|
en: """Indicates how long the alarm has been active in milliseconds."""
|
||||||
zh: """表明告警已经持续了多久,单位:毫秒。"""
|
zh: """表明告警已经持续了多久,单位:毫秒。"""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,7 +2,7 @@
|
||||||
{application, emqx_management, [
|
{application, emqx_management, [
|
||||||
{description, "EMQX Management API and CLI"},
|
{description, "EMQX Management API and CLI"},
|
||||||
% strict semver, bump manually!
|
% strict semver, bump manually!
|
||||||
{vsn, "5.0.12"},
|
{vsn, "5.0.13"},
|
||||||
{modules, []},
|
{modules, []},
|
||||||
{registered, [emqx_management_sup]},
|
{registered, [emqx_management_sup]},
|
||||||
{applications, [kernel, stdlib, emqx_plugins, minirest, emqx]},
|
{applications, [kernel, stdlib, emqx_plugins, minirest, emqx]},
|
||||||
|
|
|
@ -126,7 +126,7 @@ lookup_node(Node) ->
|
||||||
|
|
||||||
node_info() ->
|
node_info() ->
|
||||||
{UsedRatio, Total} = get_sys_memory(),
|
{UsedRatio, Total} = get_sys_memory(),
|
||||||
Info = maps:from_list([{K, list_to_binary(V)} || {K, V} <- emqx_vm:loads()]),
|
Info = maps:from_list(emqx_vm:loads()),
|
||||||
BrokerInfo = emqx_sys:info(),
|
BrokerInfo = emqx_sys:info(),
|
||||||
Info#{
|
Info#{
|
||||||
node => node(),
|
node => node(),
|
||||||
|
@ -150,7 +150,7 @@ node_info() ->
|
||||||
get_sys_memory() ->
|
get_sys_memory() ->
|
||||||
case os:type() of
|
case os:type() of
|
||||||
{unix, linux} ->
|
{unix, linux} ->
|
||||||
load_ctl:get_sys_memory();
|
emqx_mgmt_cache:get_sys_memory();
|
||||||
_ ->
|
_ ->
|
||||||
{0, 0}
|
{0, 0}
|
||||||
end.
|
end.
|
||||||
|
|
|
@ -159,18 +159,18 @@ fields(node_info) ->
|
||||||
)},
|
)},
|
||||||
{load1,
|
{load1,
|
||||||
mk(
|
mk(
|
||||||
string(),
|
float(),
|
||||||
#{desc => <<"CPU average load in 1 minute">>, example => "2.66"}
|
#{desc => <<"CPU average load in 1 minute">>, example => 2.66}
|
||||||
)},
|
)},
|
||||||
{load5,
|
{load5,
|
||||||
mk(
|
mk(
|
||||||
string(),
|
float(),
|
||||||
#{desc => <<"CPU average load in 5 minute">>, example => "2.66"}
|
#{desc => <<"CPU average load in 5 minute">>, example => 2.66}
|
||||||
)},
|
)},
|
||||||
{load15,
|
{load15,
|
||||||
mk(
|
mk(
|
||||||
string(),
|
float(),
|
||||||
#{desc => <<"CPU average load in 15 minute">>, example => "2.66"}
|
#{desc => <<"CPU average load in 15 minute">>, example => 2.66}
|
||||||
)},
|
)},
|
||||||
{max_fds,
|
{max_fds,
|
||||||
mk(
|
mk(
|
||||||
|
|
|
@ -75,7 +75,7 @@ schema("/topics/:topic") ->
|
||||||
tags => ?TAGS,
|
tags => ?TAGS,
|
||||||
parameters => [topic_param(path)],
|
parameters => [topic_param(path)],
|
||||||
responses => #{
|
responses => #{
|
||||||
200 => hoconsc:mk(hoconsc:ref(topic), #{}),
|
200 => hoconsc:mk(hoconsc:array(hoconsc:ref(topic)), #{}),
|
||||||
404 =>
|
404 =>
|
||||||
emqx_dashboard_swagger:error_codes(['TOPIC_NOT_FOUND'], <<"Topic not found">>)
|
emqx_dashboard_swagger:error_codes(['TOPIC_NOT_FOUND'], <<"Topic not found">>)
|
||||||
}
|
}
|
||||||
|
@ -130,8 +130,9 @@ lookup(#{topic := Topic}) ->
|
||||||
case emqx_router:lookup_routes(Topic) of
|
case emqx_router:lookup_routes(Topic) of
|
||||||
[] ->
|
[] ->
|
||||||
{404, #{code => ?TOPIC_NOT_FOUND, message => <<"Topic not found">>}};
|
{404, #{code => ?TOPIC_NOT_FOUND, message => <<"Topic not found">>}};
|
||||||
[Route] ->
|
Routes when is_list(Routes) ->
|
||||||
{200, format(Route)}
|
Formatted = [format(Route) || Route <- Routes],
|
||||||
|
{200, Formatted}
|
||||||
end.
|
end.
|
||||||
|
|
||||||
%%%==============================================================================================
|
%%%==============================================================================================
|
||||||
|
|
|
@ -0,0 +1,108 @@
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||||
|
%%
|
||||||
|
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
%% you may not use this file except in compliance with the License.
|
||||||
|
%% You may obtain a copy of the License at
|
||||||
|
%%
|
||||||
|
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
%%
|
||||||
|
%% Unless required by applicable law or agreed to in writing, software
|
||||||
|
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
%% See the License for the specific language governing permissions and
|
||||||
|
%% limitations under the License.
|
||||||
|
%%--------------------------------------------------------------------
|
||||||
|
-module(emqx_mgmt_cache).
|
||||||
|
|
||||||
|
-behaviour(gen_server).
|
||||||
|
|
||||||
|
-define(SYS_MEMORY_KEY, sys_memory).
|
||||||
|
-define(EXPIRED_MS, 3000).
|
||||||
|
%% -100ms to early update cache
|
||||||
|
-define(REFRESH_MS, ?EXPIRED_MS - 100).
|
||||||
|
-define(DEFAULT_BAD_MEMORY, {0, 0}).
|
||||||
|
|
||||||
|
-export([start_link/0, get_sys_memory/0]).
|
||||||
|
-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2, code_change/3]).
|
||||||
|
|
||||||
|
get_sys_memory() ->
|
||||||
|
case get_memory_from_cache() of
|
||||||
|
{ok, CacheMem} ->
|
||||||
|
erlang:send(?MODULE, refresh_sys_memory),
|
||||||
|
CacheMem;
|
||||||
|
stale ->
|
||||||
|
get_sys_memory_sync()
|
||||||
|
end.
|
||||||
|
|
||||||
|
get_sys_memory_sync() ->
|
||||||
|
try
|
||||||
|
gen_server:call(?MODULE, get_sys_memory, ?EXPIRED_MS)
|
||||||
|
catch
|
||||||
|
exit:{timeout, _} ->
|
||||||
|
?DEFAULT_BAD_MEMORY
|
||||||
|
end.
|
||||||
|
|
||||||
|
start_link() ->
|
||||||
|
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
|
||||||
|
|
||||||
|
init([]) ->
|
||||||
|
_ = ets:new(?MODULE, [set, named_table, public, {keypos, 1}]),
|
||||||
|
{ok, #{latest_refresh => 0}}.
|
||||||
|
|
||||||
|
handle_call(get_sys_memory, _From, State) ->
|
||||||
|
{Mem, NewState} = refresh_sys_memory(State),
|
||||||
|
{reply, Mem, NewState};
|
||||||
|
handle_call(_Request, _From, State) ->
|
||||||
|
{reply, ok, State}.
|
||||||
|
|
||||||
|
handle_cast(_Request, State) ->
|
||||||
|
{noreply, State}.
|
||||||
|
|
||||||
|
handle_info(refresh_sys_memory, State) ->
|
||||||
|
{_, NewState} = refresh_sys_memory(State),
|
||||||
|
{noreply, NewState};
|
||||||
|
handle_info(_Info, State) ->
|
||||||
|
{noreply, State}.
|
||||||
|
|
||||||
|
terminate(_Reason, _State) ->
|
||||||
|
ok.
|
||||||
|
|
||||||
|
code_change(_OldVsn, State, _Extra) ->
|
||||||
|
{ok, State}.
|
||||||
|
|
||||||
|
%%%===================================================================
|
||||||
|
%%% Internal functions
|
||||||
|
%%%===================================================================
|
||||||
|
|
||||||
|
refresh_sys_memory(State = #{latest_refresh := LatestRefresh}) ->
|
||||||
|
Now = now_millisecond(),
|
||||||
|
case Now - LatestRefresh >= ?REFRESH_MS of
|
||||||
|
true ->
|
||||||
|
do_refresh_sys_memory(Now, State);
|
||||||
|
false ->
|
||||||
|
case get_memory_from_cache() of
|
||||||
|
stale -> do_refresh_sys_memory(Now, State);
|
||||||
|
{ok, Mem} -> {Mem, State}
|
||||||
|
end
|
||||||
|
end.
|
||||||
|
|
||||||
|
do_refresh_sys_memory(RefreshAt, State) ->
|
||||||
|
NewMem = load_ctl:get_sys_memory(),
|
||||||
|
NewExpiredAt = now_millisecond() + ?EXPIRED_MS,
|
||||||
|
ets:insert(?MODULE, {?SYS_MEMORY_KEY, {NewMem, NewExpiredAt}}),
|
||||||
|
{NewMem, State#{latest_refresh => RefreshAt}}.
|
||||||
|
|
||||||
|
get_memory_from_cache() ->
|
||||||
|
case ets:lookup(?MODULE, ?SYS_MEMORY_KEY) of
|
||||||
|
[] ->
|
||||||
|
stale;
|
||||||
|
[{_, {Mem, ExpiredAt}}] ->
|
||||||
|
case now_millisecond() < ExpiredAt of
|
||||||
|
true -> {ok, Mem};
|
||||||
|
false -> stale
|
||||||
|
end
|
||||||
|
end.
|
||||||
|
|
||||||
|
now_millisecond() ->
|
||||||
|
erlang:system_time(millisecond).
|
|
@ -315,7 +315,7 @@ vm([]) ->
|
||||||
vm(["all"]) ->
|
vm(["all"]) ->
|
||||||
[vm([Name]) || Name <- ["load", "memory", "process", "io", "ports"]];
|
[vm([Name]) || Name <- ["load", "memory", "process", "io", "ports"]];
|
||||||
vm(["load"]) ->
|
vm(["load"]) ->
|
||||||
[emqx_ctl:print("cpu/~-20s: ~ts~n", [L, V]) || {L, V} <- emqx_vm:loads()];
|
[emqx_ctl:print("cpu/~-20s: ~w~n", [L, V]) || {L, V} <- emqx_vm:loads()];
|
||||||
vm(["memory"]) ->
|
vm(["memory"]) ->
|
||||||
[emqx_ctl:print("memory/~-17s: ~w~n", [Cat, Val]) || {Cat, Val} <- erlang:memory()];
|
[emqx_ctl:print("memory/~-17s: ~w~n", [Cat, Val]) || {Cat, Val} <- erlang:memory()];
|
||||||
vm(["process"]) ->
|
vm(["process"]) ->
|
||||||
|
|
|
@ -26,4 +26,21 @@ start_link() ->
|
||||||
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
|
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
|
||||||
|
|
||||||
init([]) ->
|
init([]) ->
|
||||||
{ok, {{one_for_one, 1, 5}, []}}.
|
Workers =
|
||||||
|
case os:type() of
|
||||||
|
{unix, linux} ->
|
||||||
|
[child_spec(emqx_mgmt_cache, 5000, worker)];
|
||||||
|
_ ->
|
||||||
|
[]
|
||||||
|
end,
|
||||||
|
{ok, {{one_for_one, 1, 5}, Workers}}.
|
||||||
|
|
||||||
|
child_spec(Mod, Shutdown, Type) ->
|
||||||
|
#{
|
||||||
|
id => Mod,
|
||||||
|
start => {Mod, start_link, []},
|
||||||
|
restart => permanent,
|
||||||
|
shutdown => Shutdown,
|
||||||
|
type => Type,
|
||||||
|
modules => [Mod]
|
||||||
|
}.
|
||||||
|
|
|
@ -40,6 +40,9 @@ t_alarms_api(_) ->
|
||||||
get_alarms(1, true),
|
get_alarms(1, true),
|
||||||
get_alarms(1, false).
|
get_alarms(1, false).
|
||||||
|
|
||||||
|
t_alarm_cpu(_) ->
|
||||||
|
ok.
|
||||||
|
|
||||||
t_delete_alarms_api(_) ->
|
t_delete_alarms_api(_) ->
|
||||||
Path = emqx_mgmt_api_test_util:api_path(["alarms"]),
|
Path = emqx_mgmt_api_test_util:api_path(["alarms"]),
|
||||||
{ok, _} = emqx_mgmt_api_test_util:request_api(delete, Path),
|
{ok, _} = emqx_mgmt_api_test_util:request_api(delete, Path),
|
||||||
|
|
|
@ -20,6 +20,8 @@
|
||||||
|
|
||||||
-include_lib("eunit/include/eunit.hrl").
|
-include_lib("eunit/include/eunit.hrl").
|
||||||
|
|
||||||
|
-define(PORT, (20000 + ?LINE)).
|
||||||
|
|
||||||
all() ->
|
all() ->
|
||||||
emqx_common_test_helpers:all(?MODULE).
|
emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
||||||
|
@ -32,13 +34,38 @@ end_per_suite(_) ->
|
||||||
emqx_conf:remove([listeners, tcp, new1], #{override_to => local}),
|
emqx_conf:remove([listeners, tcp, new1], #{override_to => local}),
|
||||||
emqx_mgmt_api_test_util:end_suite([emqx_conf]).
|
emqx_mgmt_api_test_util:end_suite([emqx_conf]).
|
||||||
|
|
||||||
t_max_connection_default(_Config) ->
|
init_per_testcase(Case, Config) ->
|
||||||
|
try
|
||||||
|
?MODULE:Case({init, Config})
|
||||||
|
catch
|
||||||
|
error:function_clause ->
|
||||||
|
Config
|
||||||
|
end.
|
||||||
|
|
||||||
|
end_per_testcase(Case, Config) ->
|
||||||
|
try
|
||||||
|
?MODULE:Case({'end', Config})
|
||||||
|
catch
|
||||||
|
error:function_clause ->
|
||||||
|
ok
|
||||||
|
end.
|
||||||
|
|
||||||
|
t_max_connection_default({init, Config}) ->
|
||||||
emqx_mgmt_api_test_util:end_suite([emqx_conf]),
|
emqx_mgmt_api_test_util:end_suite([emqx_conf]),
|
||||||
Etc = filename:join(["etc", "emqx.conf.all"]),
|
Etc = filename:join(["etc", "emqx.conf.all"]),
|
||||||
|
TmpConfName = atom_to_list(?FUNCTION_NAME) ++ ".conf",
|
||||||
|
Inc = filename:join(["etc", TmpConfName]),
|
||||||
ConfFile = emqx_common_test_helpers:app_path(emqx_conf, Etc),
|
ConfFile = emqx_common_test_helpers:app_path(emqx_conf, Etc),
|
||||||
Bin = <<"listeners.tcp.max_connection_test {bind = \"0.0.0.0:3883\"}">>,
|
IncFile = emqx_common_test_helpers:app_path(emqx_conf, Inc),
|
||||||
ok = file:write_file(ConfFile, Bin, [append]),
|
Port = integer_to_binary(?PORT),
|
||||||
|
Bin = <<"listeners.tcp.max_connection_test {bind = \"0.0.0.0:", Port/binary, "\"}">>,
|
||||||
|
ok = file:write_file(IncFile, Bin),
|
||||||
|
ok = file:write_file(ConfFile, ["include \"", TmpConfName, "\""], [append]),
|
||||||
emqx_mgmt_api_test_util:init_suite([emqx_conf]),
|
emqx_mgmt_api_test_util:init_suite([emqx_conf]),
|
||||||
|
[{tmp_config_file, IncFile} | Config];
|
||||||
|
t_max_connection_default({'end', Config}) ->
|
||||||
|
ok = file:delete(proplists:get_value(tmp_config_file, Config));
|
||||||
|
t_max_connection_default(Config) when is_list(Config) ->
|
||||||
%% Check infinity is binary not atom.
|
%% Check infinity is binary not atom.
|
||||||
#{<<"listeners">> := Listeners} = emqx_mgmt_api_listeners:do_list_listeners(),
|
#{<<"listeners">> := Listeners} = emqx_mgmt_api_listeners:do_list_listeners(),
|
||||||
Target = lists:filter(
|
Target = lists:filter(
|
||||||
|
@ -51,7 +78,7 @@ t_max_connection_default(_Config) ->
|
||||||
emqx_conf:remove([listeners, tcp, max_connection_test], #{override_to => cluster}),
|
emqx_conf:remove([listeners, tcp, max_connection_test], #{override_to => cluster}),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
t_list_listeners(_) ->
|
t_list_listeners(Config) when is_list(Config) ->
|
||||||
Path = emqx_mgmt_api_test_util:api_path(["listeners"]),
|
Path = emqx_mgmt_api_test_util:api_path(["listeners"]),
|
||||||
Res = request(get, Path, [], []),
|
Res = request(get, Path, [], []),
|
||||||
#{<<"listeners">> := Expect} = emqx_mgmt_api_listeners:do_list_listeners(),
|
#{<<"listeners">> := Expect} = emqx_mgmt_api_listeners:do_list_listeners(),
|
||||||
|
@ -71,9 +98,10 @@ t_list_listeners(_) ->
|
||||||
?assertMatch({error, {"HTTP/1.1", 404, _}}, request(get, NewPath, [], [])),
|
?assertMatch({error, {"HTTP/1.1", 404, _}}, request(get, NewPath, [], [])),
|
||||||
|
|
||||||
OriginListener2 = maps:remove(<<"id">>, OriginListener),
|
OriginListener2 = maps:remove(<<"id">>, OriginListener),
|
||||||
|
Port = integer_to_binary(?PORT),
|
||||||
NewConf = OriginListener2#{
|
NewConf = OriginListener2#{
|
||||||
<<"name">> => <<"new">>,
|
<<"name">> => <<"new">>,
|
||||||
<<"bind">> => <<"0.0.0.0:2883">>,
|
<<"bind">> => <<"0.0.0.0:", Port/binary>>,
|
||||||
<<"max_connections">> := <<"infinity">>
|
<<"max_connections">> := <<"infinity">>
|
||||||
},
|
},
|
||||||
Create = request(post, Path, [], NewConf),
|
Create = request(post, Path, [], NewConf),
|
||||||
|
@ -89,7 +117,7 @@ t_list_listeners(_) ->
|
||||||
?assertMatch({error, {"HTTP/1.1", 404, _}}, request(get, NewPath, [], [])),
|
?assertMatch({error, {"HTTP/1.1", 404, _}}, request(get, NewPath, [], [])),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
t_tcp_crud_listeners_by_id(_) ->
|
t_tcp_crud_listeners_by_id(Config) when is_list(Config) ->
|
||||||
ListenerId = <<"tcp:default">>,
|
ListenerId = <<"tcp:default">>,
|
||||||
NewListenerId = <<"tcp:new">>,
|
NewListenerId = <<"tcp:new">>,
|
||||||
MinListenerId = <<"tcp:min">>,
|
MinListenerId = <<"tcp:min">>,
|
||||||
|
@ -97,7 +125,7 @@ t_tcp_crud_listeners_by_id(_) ->
|
||||||
Type = <<"tcp">>,
|
Type = <<"tcp">>,
|
||||||
crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type).
|
crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type).
|
||||||
|
|
||||||
t_ssl_crud_listeners_by_id(_) ->
|
t_ssl_crud_listeners_by_id(Config) when is_list(Config) ->
|
||||||
ListenerId = <<"ssl:default">>,
|
ListenerId = <<"ssl:default">>,
|
||||||
NewListenerId = <<"ssl:new">>,
|
NewListenerId = <<"ssl:new">>,
|
||||||
MinListenerId = <<"ssl:min">>,
|
MinListenerId = <<"ssl:min">>,
|
||||||
|
@ -105,7 +133,7 @@ t_ssl_crud_listeners_by_id(_) ->
|
||||||
Type = <<"ssl">>,
|
Type = <<"ssl">>,
|
||||||
crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type).
|
crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type).
|
||||||
|
|
||||||
t_ws_crud_listeners_by_id(_) ->
|
t_ws_crud_listeners_by_id(Config) when is_list(Config) ->
|
||||||
ListenerId = <<"ws:default">>,
|
ListenerId = <<"ws:default">>,
|
||||||
NewListenerId = <<"ws:new">>,
|
NewListenerId = <<"ws:new">>,
|
||||||
MinListenerId = <<"ws:min">>,
|
MinListenerId = <<"ws:min">>,
|
||||||
|
@ -113,7 +141,7 @@ t_ws_crud_listeners_by_id(_) ->
|
||||||
Type = <<"ws">>,
|
Type = <<"ws">>,
|
||||||
crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type).
|
crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type).
|
||||||
|
|
||||||
t_wss_crud_listeners_by_id(_) ->
|
t_wss_crud_listeners_by_id(Config) when is_list(Config) ->
|
||||||
ListenerId = <<"wss:default">>,
|
ListenerId = <<"wss:default">>,
|
||||||
NewListenerId = <<"wss:new">>,
|
NewListenerId = <<"wss:new">>,
|
||||||
MinListenerId = <<"wss:min">>,
|
MinListenerId = <<"wss:min">>,
|
||||||
|
@ -121,7 +149,7 @@ t_wss_crud_listeners_by_id(_) ->
|
||||||
Type = <<"wss">>,
|
Type = <<"wss">>,
|
||||||
crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type).
|
crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type).
|
||||||
|
|
||||||
t_api_listeners_list_not_ready(_Config) ->
|
t_api_listeners_list_not_ready(Config) when is_list(Config) ->
|
||||||
net_kernel:start(['listeners@127.0.0.1', longnames]),
|
net_kernel:start(['listeners@127.0.0.1', longnames]),
|
||||||
ct:timetrap({seconds, 120}),
|
ct:timetrap({seconds, 120}),
|
||||||
snabbkaffe:fix_ct_logging(),
|
snabbkaffe:fix_ct_logging(),
|
||||||
|
@ -151,16 +179,17 @@ t_api_listeners_list_not_ready(_Config) ->
|
||||||
emqx_common_test_helpers:stop_slave(Node2)
|
emqx_common_test_helpers:stop_slave(Node2)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
t_clear_certs(_) ->
|
t_clear_certs(Config) when is_list(Config) ->
|
||||||
ListenerId = <<"ssl:default">>,
|
ListenerId = <<"ssl:default">>,
|
||||||
NewListenerId = <<"ssl:clear">>,
|
NewListenerId = <<"ssl:clear">>,
|
||||||
|
|
||||||
OriginPath = emqx_mgmt_api_test_util:api_path(["listeners", ListenerId]),
|
OriginPath = emqx_mgmt_api_test_util:api_path(["listeners", ListenerId]),
|
||||||
NewPath = emqx_mgmt_api_test_util:api_path(["listeners", NewListenerId]),
|
NewPath = emqx_mgmt_api_test_util:api_path(["listeners", NewListenerId]),
|
||||||
ConfTempT = request(get, OriginPath, [], []),
|
ConfTempT = request(get, OriginPath, [], []),
|
||||||
|
Port = integer_to_binary(?PORT),
|
||||||
ConfTemp = ConfTempT#{
|
ConfTemp = ConfTempT#{
|
||||||
<<"id">> => NewListenerId,
|
<<"id">> => NewListenerId,
|
||||||
<<"bind">> => <<"0.0.0.0:2883">>
|
<<"bind">> => <<"0.0.0.0:", Port/binary>>
|
||||||
},
|
},
|
||||||
|
|
||||||
%% create, make sure the cert files are created
|
%% create, make sure the cert files are created
|
||||||
|
@ -245,9 +274,11 @@ crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type) ->
|
||||||
%% create with full options
|
%% create with full options
|
||||||
?assertEqual({error, not_found}, is_running(NewListenerId)),
|
?assertEqual({error, not_found}, is_running(NewListenerId)),
|
||||||
?assertMatch({error, {"HTTP/1.1", 404, _}}, request(get, NewPath, [], [])),
|
?assertMatch({error, {"HTTP/1.1", 404, _}}, request(get, NewPath, [], [])),
|
||||||
|
Port1 = integer_to_binary(?PORT),
|
||||||
|
Port2 = integer_to_binary(?PORT),
|
||||||
NewConf = OriginListener#{
|
NewConf = OriginListener#{
|
||||||
<<"id">> => NewListenerId,
|
<<"id">> => NewListenerId,
|
||||||
<<"bind">> => <<"0.0.0.0:2883">>
|
<<"bind">> => <<"0.0.0.0:", Port1/binary>>
|
||||||
},
|
},
|
||||||
Create = request(post, NewPath, [], NewConf),
|
Create = request(post, NewPath, [], NewConf),
|
||||||
?assertEqual(lists:sort(maps:keys(OriginListener)), lists:sort(maps:keys(Create))),
|
?assertEqual(lists:sort(maps:keys(OriginListener)), lists:sort(maps:keys(Create))),
|
||||||
|
@ -271,7 +302,7 @@ crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type) ->
|
||||||
} ->
|
} ->
|
||||||
#{
|
#{
|
||||||
<<"id">> => MinListenerId,
|
<<"id">> => MinListenerId,
|
||||||
<<"bind">> => <<"0.0.0.0:3883">>,
|
<<"bind">> => <<"0.0.0.0:", Port2/binary>>,
|
||||||
<<"type">> => Type,
|
<<"type">> => Type,
|
||||||
<<"ssl_options">> => #{
|
<<"ssl_options">> => #{
|
||||||
<<"cacertfile">> => CaCertFile,
|
<<"cacertfile">> => CaCertFile,
|
||||||
|
@ -282,7 +313,7 @@ crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type) ->
|
||||||
_ ->
|
_ ->
|
||||||
#{
|
#{
|
||||||
<<"id">> => MinListenerId,
|
<<"id">> => MinListenerId,
|
||||||
<<"bind">> => <<"0.0.0.0:3883">>,
|
<<"bind">> => <<"0.0.0.0:", Port2/binary>>,
|
||||||
<<"type">> => Type
|
<<"type">> => Type
|
||||||
}
|
}
|
||||||
end,
|
end,
|
||||||
|
@ -296,7 +327,7 @@ crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type) ->
|
||||||
BadPath = emqx_mgmt_api_test_util:api_path(["listeners", BadId]),
|
BadPath = emqx_mgmt_api_test_util:api_path(["listeners", BadId]),
|
||||||
BadConf = OriginListener#{
|
BadConf = OriginListener#{
|
||||||
<<"id">> => BadId,
|
<<"id">> => BadId,
|
||||||
<<"bind">> => <<"0.0.0.0:2883">>
|
<<"bind">> => <<"0.0.0.0:", Port1/binary>>
|
||||||
},
|
},
|
||||||
?assertMatch({error, {"HTTP/1.1", 400, _}}, request(post, BadPath, [], BadConf)),
|
?assertMatch({error, {"HTTP/1.1", 400, _}}, request(post, BadPath, [], BadConf)),
|
||||||
|
|
||||||
|
@ -332,12 +363,12 @@ crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type) ->
|
||||||
?assertEqual([], delete(NewPath)),
|
?assertEqual([], delete(NewPath)),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
t_delete_nonexistent_listener(_) ->
|
t_delete_nonexistent_listener(Config) when is_list(Config) ->
|
||||||
NonExist = emqx_mgmt_api_test_util:api_path(["listeners", "tcp:nonexistent"]),
|
NonExist = emqx_mgmt_api_test_util:api_path(["listeners", "tcp:nonexistent"]),
|
||||||
?assertEqual([], delete(NonExist)),
|
?assertEqual([], delete(NonExist)),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
t_action_listeners(_) ->
|
t_action_listeners(Config) when is_list(Config) ->
|
||||||
ID = "tcp:default",
|
ID = "tcp:default",
|
||||||
action_listener(ID, "stop", false),
|
action_listener(ID, "stop", false),
|
||||||
action_listener(ID, "start", true),
|
action_listener(ID, "start", true),
|
||||||
|
|
|
@ -24,11 +24,11 @@ all() ->
|
||||||
emqx_common_test_helpers:all(?MODULE).
|
emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
emqx_mgmt_api_test_util:init_suite([emqx_conf]),
|
emqx_mgmt_api_test_util:init_suite([emqx_conf, emqx_management]),
|
||||||
Config.
|
Config.
|
||||||
|
|
||||||
end_per_suite(_) ->
|
end_per_suite(_) ->
|
||||||
emqx_mgmt_api_test_util:end_suite([emqx_conf]).
|
emqx_mgmt_api_test_util:end_suite([emqx_management, emqx_conf]).
|
||||||
|
|
||||||
init_per_testcase(t_log_path, Config) ->
|
init_per_testcase(t_log_path, Config) ->
|
||||||
emqx_config_logger:add_handler(),
|
emqx_config_logger:add_handler(),
|
||||||
|
@ -152,7 +152,7 @@ cluster(Specs) ->
|
||||||
Env = [{emqx, boot_modules, []}],
|
Env = [{emqx, boot_modules, []}],
|
||||||
emqx_common_test_helpers:emqx_cluster(Specs, [
|
emqx_common_test_helpers:emqx_cluster(Specs, [
|
||||||
{env, Env},
|
{env, Env},
|
||||||
{apps, [emqx_conf]},
|
{apps, [emqx_conf, emqx_management]},
|
||||||
{load_schema, false},
|
{load_schema, false},
|
||||||
{join_to, true},
|
{join_to, true},
|
||||||
{env_handler, fun
|
{env_handler, fun
|
||||||
|
|
|
@ -19,18 +19,25 @@
|
||||||
-compile(nowarn_export_all).
|
-compile(nowarn_export_all).
|
||||||
|
|
||||||
-include_lib("eunit/include/eunit.hrl").
|
-include_lib("eunit/include/eunit.hrl").
|
||||||
|
-include_lib("common_test/include/ct.hrl").
|
||||||
|
|
||||||
|
-define(ROUTE_TAB, emqx_route).
|
||||||
|
|
||||||
all() ->
|
all() ->
|
||||||
emqx_common_test_helpers:all(?MODULE).
|
emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
emqx_mgmt_api_test_util:init_suite(),
|
emqx_mgmt_api_test_util:init_suite(),
|
||||||
Config.
|
Slave = emqx_common_test_helpers:start_slave(some_node, []),
|
||||||
|
[{slave, Slave} | Config].
|
||||||
|
|
||||||
end_per_suite(_) ->
|
end_per_suite(Config) ->
|
||||||
|
Slave = ?config(slave, Config),
|
||||||
|
emqx_common_test_helpers:stop_slave(Slave),
|
||||||
|
mria:clear_table(?ROUTE_TAB),
|
||||||
emqx_mgmt_api_test_util:end_suite().
|
emqx_mgmt_api_test_util:end_suite().
|
||||||
|
|
||||||
t_nodes_api(_) ->
|
t_nodes_api(Config) ->
|
||||||
Node = atom_to_binary(node(), utf8),
|
Node = atom_to_binary(node(), utf8),
|
||||||
Topic = <<"test_topic">>,
|
Topic = <<"test_topic">>,
|
||||||
{ok, Client} = emqtt:start_link(#{
|
{ok, Client} = emqtt:start_link(#{
|
||||||
|
@ -72,8 +79,17 @@ t_nodes_api(_) ->
|
||||||
),
|
),
|
||||||
|
|
||||||
%% get topics/:topic
|
%% get topics/:topic
|
||||||
|
%% We add another route here to ensure that the response handles
|
||||||
|
%% multiple routes for a single topic
|
||||||
|
Slave = ?config(slave, Config),
|
||||||
|
ok = emqx_router:add_route(Topic, Slave),
|
||||||
RoutePath = emqx_mgmt_api_test_util:api_path(["topics", Topic]),
|
RoutePath = emqx_mgmt_api_test_util:api_path(["topics", Topic]),
|
||||||
{ok, RouteResponse} = emqx_mgmt_api_test_util:request_api(get, RoutePath),
|
{ok, RouteResponse} = emqx_mgmt_api_test_util:request_api(get, RoutePath),
|
||||||
RouteData = emqx_json:decode(RouteResponse, [return_maps]),
|
ok = emqx_router:delete_route(Topic, Slave),
|
||||||
?assertEqual(Topic, maps:get(<<"topic">>, RouteData)),
|
|
||||||
?assertEqual(Node, maps:get(<<"node">>, RouteData)).
|
[
|
||||||
|
#{<<"topic">> := Topic, <<"node">> := Node1},
|
||||||
|
#{<<"topic">> := Topic, <<"node">> := Node2}
|
||||||
|
] = emqx_json:decode(RouteResponse, [return_maps]),
|
||||||
|
|
||||||
|
?assertEqual(lists:usort([Node, atom_to_binary(Slave)]), lists:usort([Node1, Node2])).
|
||||||
|
|
|
@ -58,8 +58,8 @@ For bridges only have ingress direction data flow, it can be set to 0 otherwise
|
||||||
|
|
||||||
start_timeout {
|
start_timeout {
|
||||||
desc {
|
desc {
|
||||||
en: """If 'start_after_created' enabled, how long time do we wait for the resource get started, in milliseconds."""
|
en: """Time interval to wait for an auto-started resource to become healthy before responding resource creation requests."""
|
||||||
zh: """如果选择了创建后立即启动资源,此选项用来设置等待资源启动的超时时间,单位毫秒。"""
|
zh: """在回复资源创建请求前等待资源进入健康状态的时间。"""
|
||||||
}
|
}
|
||||||
label {
|
label {
|
||||||
en: """Start Timeout"""
|
en: """Start Timeout"""
|
||||||
|
@ -80,8 +80,19 @@ For bridges only have ingress direction data flow, it can be set to 0 otherwise
|
||||||
|
|
||||||
query_mode {
|
query_mode {
|
||||||
desc {
|
desc {
|
||||||
en: """Query mode. Optional 'sync/async', default 'sync'."""
|
en: """Query mode. Optional 'sync/async', default 'async'."""
|
||||||
zh: """请求模式。可选 '同步/异步',默认为'同步'模式。"""
|
zh: """请求模式。可选 '同步/异步',默认为'异步'模式。"""
|
||||||
|
}
|
||||||
|
label {
|
||||||
|
en: """Query mode"""
|
||||||
|
zh: """请求模式"""
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
query_mode_sync_only {
|
||||||
|
desc {
|
||||||
|
en: """Query mode. Only support 'sync'."""
|
||||||
|
zh: """请求模式。目前只支持同步模式。"""
|
||||||
}
|
}
|
||||||
label {
|
label {
|
||||||
en: """Query mode"""
|
en: """Query mode"""
|
||||||
|
|
|
@ -31,7 +31,9 @@
|
||||||
pick_key => term(),
|
pick_key => term(),
|
||||||
timeout => timeout(),
|
timeout => timeout(),
|
||||||
expire_at => infinity | integer(),
|
expire_at => infinity | integer(),
|
||||||
async_reply_fun => reply_fun()
|
async_reply_fun => reply_fun(),
|
||||||
|
simple_query => boolean(),
|
||||||
|
is_buffer_supported => boolean()
|
||||||
}.
|
}.
|
||||||
-type resource_data() :: #{
|
-type resource_data() :: #{
|
||||||
id := resource_id(),
|
id := resource_id(),
|
||||||
|
|
|
@ -264,7 +264,8 @@ query(ResId, Request, Opts) ->
|
||||||
case {IsBufferSupported, QM} of
|
case {IsBufferSupported, QM} of
|
||||||
{true, _} ->
|
{true, _} ->
|
||||||
%% only Kafka so far
|
%% only Kafka so far
|
||||||
emqx_resource_buffer_worker:simple_async_query(ResId, Request);
|
Opts1 = Opts#{is_buffer_supported => true},
|
||||||
|
emqx_resource_buffer_worker:simple_async_query(ResId, Request, Opts1);
|
||||||
{false, sync} ->
|
{false, sync} ->
|
||||||
emqx_resource_buffer_worker:sync_query(ResId, Request, Opts);
|
emqx_resource_buffer_worker:sync_query(ResId, Request, Opts);
|
||||||
{false, async} ->
|
{false, async} ->
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
-module(emqx_resource_buffer_worker).
|
-module(emqx_resource_buffer_worker).
|
||||||
|
|
||||||
-include("emqx_resource.hrl").
|
-include("emqx_resource.hrl").
|
||||||
-include("emqx_resource_utils.hrl").
|
|
||||||
-include("emqx_resource_errors.hrl").
|
-include("emqx_resource_errors.hrl").
|
||||||
-include_lib("emqx/include/logger.hrl").
|
-include_lib("emqx/include/logger.hrl").
|
||||||
-include_lib("stdlib/include/ms_transform.hrl").
|
-include_lib("stdlib/include/ms_transform.hrl").
|
||||||
|
@ -39,7 +38,7 @@
|
||||||
|
|
||||||
-export([
|
-export([
|
||||||
simple_sync_query/2,
|
simple_sync_query/2,
|
||||||
simple_async_query/2
|
simple_async_query/3
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-export([
|
-export([
|
||||||
|
@ -53,7 +52,7 @@
|
||||||
|
|
||||||
-export([queue_item_marshaller/1, estimate_size/1]).
|
-export([queue_item_marshaller/1, estimate_size/1]).
|
||||||
|
|
||||||
-export([reply_after_query/8, batch_reply_after_query/8]).
|
-export([handle_async_reply/2, handle_async_batch_reply/2]).
|
||||||
|
|
||||||
-export([clear_disk_queue_dir/2]).
|
-export([clear_disk_queue_dir/2]).
|
||||||
|
|
||||||
|
@ -63,11 +62,7 @@
|
||||||
-define(SEND_REQ(FROM, REQUEST), {'$send_req', FROM, REQUEST}).
|
-define(SEND_REQ(FROM, REQUEST), {'$send_req', FROM, REQUEST}).
|
||||||
-define(QUERY(FROM, REQUEST, SENT, EXPIRE_AT), {query, FROM, REQUEST, SENT, EXPIRE_AT}).
|
-define(QUERY(FROM, REQUEST, SENT, EXPIRE_AT), {query, FROM, REQUEST, SENT, EXPIRE_AT}).
|
||||||
-define(SIMPLE_QUERY(REQUEST), ?QUERY(undefined, REQUEST, false, infinity)).
|
-define(SIMPLE_QUERY(REQUEST), ?QUERY(undefined, REQUEST, false, infinity)).
|
||||||
-define(REPLY(FROM, REQUEST, SENT, RESULT), {reply, FROM, REQUEST, SENT, RESULT}).
|
-define(REPLY(FROM, SENT, RESULT), {reply, FROM, SENT, RESULT}).
|
||||||
-define(EXPAND(RESULT, BATCH), [
|
|
||||||
?REPLY(FROM, REQUEST, SENT, RESULT)
|
|
||||||
|| ?QUERY(FROM, REQUEST, SENT, _EXPIRE_AT) <- BATCH
|
|
||||||
]).
|
|
||||||
-define(INFLIGHT_ITEM(Ref, BatchOrQuery, IsRetriable, WorkerMRef),
|
-define(INFLIGHT_ITEM(Ref, BatchOrQuery, IsRetriable, WorkerMRef),
|
||||||
{Ref, BatchOrQuery, IsRetriable, WorkerMRef}
|
{Ref, BatchOrQuery, IsRetriable, WorkerMRef}
|
||||||
).
|
).
|
||||||
|
@ -78,9 +73,8 @@
|
||||||
-type id() :: binary().
|
-type id() :: binary().
|
||||||
-type index() :: pos_integer().
|
-type index() :: pos_integer().
|
||||||
-type expire_at() :: infinity | integer().
|
-type expire_at() :: infinity | integer().
|
||||||
-type queue_query() :: ?QUERY(from(), request(), HasBeenSent :: boolean(), expire_at()).
|
-type queue_query() :: ?QUERY(reply_fun(), request(), HasBeenSent :: boolean(), expire_at()).
|
||||||
-type request() :: term().
|
-type request() :: term().
|
||||||
-type from() :: pid() | reply_fun() | request_from().
|
|
||||||
-type request_from() :: undefined | gen_statem:from().
|
-type request_from() :: undefined | gen_statem:from().
|
||||||
-type state() :: blocked | running.
|
-type state() :: blocked | running.
|
||||||
-type inflight_key() :: integer().
|
-type inflight_key() :: integer().
|
||||||
|
@ -130,18 +124,18 @@ simple_sync_query(Id, Request) ->
|
||||||
Index = undefined,
|
Index = undefined,
|
||||||
QueryOpts = simple_query_opts(),
|
QueryOpts = simple_query_opts(),
|
||||||
emqx_resource_metrics:matched_inc(Id),
|
emqx_resource_metrics:matched_inc(Id),
|
||||||
Ref = make_message_ref(),
|
Ref = make_request_ref(),
|
||||||
Result = call_query(sync, Id, Index, Ref, ?SIMPLE_QUERY(Request), QueryOpts),
|
Result = call_query(sync, Id, Index, Ref, ?SIMPLE_QUERY(Request), QueryOpts),
|
||||||
_ = handle_query_result(Id, Result, _HasBeenSent = false),
|
_ = handle_query_result(Id, Result, _HasBeenSent = false),
|
||||||
Result.
|
Result.
|
||||||
|
|
||||||
%% simple async-query the resource without batching and queuing.
|
%% simple async-query the resource without batching and queuing.
|
||||||
-spec simple_async_query(id(), request()) -> term().
|
-spec simple_async_query(id(), request(), query_opts()) -> term().
|
||||||
simple_async_query(Id, Request) ->
|
simple_async_query(Id, Request, QueryOpts0) ->
|
||||||
Index = undefined,
|
Index = undefined,
|
||||||
QueryOpts = simple_query_opts(),
|
QueryOpts = maps:merge(simple_query_opts(), QueryOpts0),
|
||||||
emqx_resource_metrics:matched_inc(Id),
|
emqx_resource_metrics:matched_inc(Id),
|
||||||
Ref = make_message_ref(),
|
Ref = make_request_ref(),
|
||||||
Result = call_query(async, Id, Index, Ref, ?SIMPLE_QUERY(Request), QueryOpts),
|
Result = call_query(async, Id, Index, Ref, ?SIMPLE_QUERY(Request), QueryOpts),
|
||||||
_ = handle_query_result(Id, Result, _HasBeenSent = false),
|
_ = handle_query_result(Id, Result, _HasBeenSent = false),
|
||||||
Result.
|
Result.
|
||||||
|
@ -201,7 +195,7 @@ init({Id, Index, Opts}) ->
|
||||||
{ok, running, Data}.
|
{ok, running, Data}.
|
||||||
|
|
||||||
running(enter, _, Data) ->
|
running(enter, _, Data) ->
|
||||||
?tp(buffer_worker_enter_running, #{}),
|
?tp(buffer_worker_enter_running, #{id => maps:get(id, Data)}),
|
||||||
%% According to `gen_statem' laws, we mustn't call `maybe_flush'
|
%% According to `gen_statem' laws, we mustn't call `maybe_flush'
|
||||||
%% directly because it may decide to return `{next_state, blocked, _}',
|
%% directly because it may decide to return `{next_state, blocked, _}',
|
||||||
%% and that's an invalid response for a state enter call.
|
%% and that's an invalid response for a state enter call.
|
||||||
|
@ -214,7 +208,7 @@ running(cast, flush, Data) ->
|
||||||
flush(Data);
|
flush(Data);
|
||||||
running(cast, block, St) ->
|
running(cast, block, St) ->
|
||||||
{next_state, blocked, St};
|
{next_state, blocked, St};
|
||||||
running(info, ?SEND_REQ(_From, _Req) = Request0, Data) ->
|
running(info, ?SEND_REQ(_ReplyTo, _Req) = Request0, Data) ->
|
||||||
handle_query_requests(Request0, Data);
|
handle_query_requests(Request0, Data);
|
||||||
running(info, {flush, Ref}, St = #{tref := {_TRef, Ref}}) ->
|
running(info, {flush, Ref}, St = #{tref := {_TRef, Ref}}) ->
|
||||||
flush(St#{tref := undefined});
|
flush(St#{tref := undefined});
|
||||||
|
@ -242,8 +236,8 @@ blocked(cast, flush, Data) ->
|
||||||
resume_from_blocked(Data);
|
resume_from_blocked(Data);
|
||||||
blocked(state_timeout, unblock, St) ->
|
blocked(state_timeout, unblock, St) ->
|
||||||
resume_from_blocked(St);
|
resume_from_blocked(St);
|
||||||
blocked(info, ?SEND_REQ(_ReqFrom, {query, _Request, _Opts}) = Request0, Data0) ->
|
blocked(info, ?SEND_REQ(_ReplyTo, _Req) = Request0, Data0) ->
|
||||||
{_Queries, Data} = collect_and_enqueue_query_requests(Request0, Data0),
|
Data = collect_and_enqueue_query_requests(Request0, Data0),
|
||||||
{keep_state, Data};
|
{keep_state, Data};
|
||||||
blocked(info, {flush, _Ref}, _Data) ->
|
blocked(info, {flush, _Ref}, _Data) ->
|
||||||
keep_state_and_data;
|
keep_state_and_data;
|
||||||
|
@ -270,15 +264,17 @@ code_change(_OldVsn, State, _Extra) ->
|
||||||
|
|
||||||
%%==============================================================================
|
%%==============================================================================
|
||||||
-define(PICK(ID, KEY, PID, EXPR),
|
-define(PICK(ID, KEY, PID, EXPR),
|
||||||
try gproc_pool:pick_worker(ID, KEY) of
|
try
|
||||||
PID when is_pid(PID) ->
|
case gproc_pool:pick_worker(ID, KEY) of
|
||||||
EXPR;
|
PID when is_pid(PID) ->
|
||||||
_ ->
|
EXPR;
|
||||||
?RESOURCE_ERROR(worker_not_created, "resource not created")
|
_ ->
|
||||||
|
?RESOURCE_ERROR(worker_not_created, "resource not created")
|
||||||
|
end
|
||||||
catch
|
catch
|
||||||
error:badarg ->
|
error:badarg ->
|
||||||
?RESOURCE_ERROR(worker_not_created, "resource not created");
|
?RESOURCE_ERROR(worker_not_created, "resource not created");
|
||||||
exit:{timeout, _} ->
|
error:timeout ->
|
||||||
?RESOURCE_ERROR(timeout, "call resource timeout")
|
?RESOURCE_ERROR(timeout, "call resource timeout")
|
||||||
end
|
end
|
||||||
).
|
).
|
||||||
|
@ -288,7 +284,8 @@ pick_call(Id, Key, Query, Timeout) ->
|
||||||
Caller = self(),
|
Caller = self(),
|
||||||
MRef = erlang:monitor(process, Pid, [{alias, reply_demonitor}]),
|
MRef = erlang:monitor(process, Pid, [{alias, reply_demonitor}]),
|
||||||
From = {Caller, MRef},
|
From = {Caller, MRef},
|
||||||
erlang:send(Pid, ?SEND_REQ(From, Query)),
|
ReplyTo = {fun gen_statem:reply/2, [From]},
|
||||||
|
erlang:send(Pid, ?SEND_REQ(ReplyTo, Query)),
|
||||||
receive
|
receive
|
||||||
{MRef, Response} ->
|
{MRef, Response} ->
|
||||||
erlang:demonitor(MRef, [flush]),
|
erlang:demonitor(MRef, [flush]),
|
||||||
|
@ -308,8 +305,8 @@ pick_call(Id, Key, Query, Timeout) ->
|
||||||
|
|
||||||
pick_cast(Id, Key, Query) ->
|
pick_cast(Id, Key, Query) ->
|
||||||
?PICK(Id, Key, Pid, begin
|
?PICK(Id, Key, Pid, begin
|
||||||
From = undefined,
|
ReplyTo = undefined,
|
||||||
erlang:send(Pid, ?SEND_REQ(From, Query)),
|
erlang:send(Pid, ?SEND_REQ(ReplyTo, Query)),
|
||||||
ok
|
ok
|
||||||
end).
|
end).
|
||||||
|
|
||||||
|
@ -370,8 +367,8 @@ retry_inflight_sync(Ref, QueryOrBatch, Data0) ->
|
||||||
Result = call_query(sync, Id, Index, Ref, QueryOrBatch, QueryOpts),
|
Result = call_query(sync, Id, Index, Ref, QueryOrBatch, QueryOpts),
|
||||||
ReplyResult =
|
ReplyResult =
|
||||||
case QueryOrBatch of
|
case QueryOrBatch of
|
||||||
?QUERY(From, CoreReq, HasBeenSent, _ExpireAt) ->
|
?QUERY(ReplyTo, _, HasBeenSent, _ExpireAt) ->
|
||||||
Reply = ?REPLY(From, CoreReq, HasBeenSent, Result),
|
Reply = ?REPLY(ReplyTo, HasBeenSent, Result),
|
||||||
reply_caller_defer_metrics(Id, Reply, QueryOpts);
|
reply_caller_defer_metrics(Id, Reply, QueryOpts);
|
||||||
[?QUERY(_, _, _, _) | _] = Batch ->
|
[?QUERY(_, _, _, _) | _] = Batch ->
|
||||||
batch_reply_caller_defer_metrics(Id, Result, Batch, QueryOpts)
|
batch_reply_caller_defer_metrics(Id, Result, Batch, QueryOpts)
|
||||||
|
@ -412,7 +409,7 @@ retry_inflight_sync(Ref, QueryOrBatch, Data0) ->
|
||||||
-spec handle_query_requests(?SEND_REQ(request_from(), request()), data()) ->
|
-spec handle_query_requests(?SEND_REQ(request_from(), request()), data()) ->
|
||||||
gen_statem:event_handler_result(state(), data()).
|
gen_statem:event_handler_result(state(), data()).
|
||||||
handle_query_requests(Request0, Data0) ->
|
handle_query_requests(Request0, Data0) ->
|
||||||
{_Queries, Data} = collect_and_enqueue_query_requests(Request0, Data0),
|
Data = collect_and_enqueue_query_requests(Request0, Data0),
|
||||||
maybe_flush(Data).
|
maybe_flush(Data).
|
||||||
|
|
||||||
collect_and_enqueue_query_requests(Request0, Data0) ->
|
collect_and_enqueue_query_requests(Request0, Data0) ->
|
||||||
|
@ -425,21 +422,37 @@ collect_and_enqueue_query_requests(Request0, Data0) ->
|
||||||
Queries =
|
Queries =
|
||||||
lists:map(
|
lists:map(
|
||||||
fun
|
fun
|
||||||
(?SEND_REQ(undefined = _From, {query, Req, Opts})) ->
|
(?SEND_REQ(undefined = _ReplyTo, {query, Req, Opts})) ->
|
||||||
ReplyFun = maps:get(async_reply_fun, Opts, undefined),
|
ReplyFun = maps:get(async_reply_fun, Opts, undefined),
|
||||||
HasBeenSent = false,
|
HasBeenSent = false,
|
||||||
ExpireAt = maps:get(expire_at, Opts),
|
ExpireAt = maps:get(expire_at, Opts),
|
||||||
?QUERY(ReplyFun, Req, HasBeenSent, ExpireAt);
|
?QUERY(ReplyFun, Req, HasBeenSent, ExpireAt);
|
||||||
(?SEND_REQ(From, {query, Req, Opts})) ->
|
(?SEND_REQ(ReplyTo, {query, Req, Opts})) ->
|
||||||
HasBeenSent = false,
|
HasBeenSent = false,
|
||||||
ExpireAt = maps:get(expire_at, Opts),
|
ExpireAt = maps:get(expire_at, Opts),
|
||||||
?QUERY(From, Req, HasBeenSent, ExpireAt)
|
?QUERY(ReplyTo, Req, HasBeenSent, ExpireAt)
|
||||||
end,
|
end,
|
||||||
Requests
|
Requests
|
||||||
),
|
),
|
||||||
NewQ = append_queue(Id, Index, Q, Queries),
|
{Overflown, NewQ} = append_queue(Id, Index, Q, Queries),
|
||||||
Data = Data0#{queue := NewQ},
|
ok = reply_overflown(Overflown),
|
||||||
{Queries, Data}.
|
Data0#{queue := NewQ}.
|
||||||
|
|
||||||
|
reply_overflown([]) ->
|
||||||
|
ok;
|
||||||
|
reply_overflown([?QUERY(ReplyTo, _Req, _HasBeenSent, _ExpireAt) | More]) ->
|
||||||
|
do_reply_caller(ReplyTo, {error, buffer_overflow}),
|
||||||
|
reply_overflown(More).
|
||||||
|
|
||||||
|
do_reply_caller(undefined, _Result) ->
|
||||||
|
ok;
|
||||||
|
do_reply_caller({F, Args}, {async_return, Result}) ->
|
||||||
|
%% this is an early return to async caller, the retry
|
||||||
|
%% decision has to be made by the caller
|
||||||
|
do_reply_caller({F, Args}, Result);
|
||||||
|
do_reply_caller({F, Args}, Result) when is_function(F) ->
|
||||||
|
_ = erlang:apply(F, Args ++ [Result]),
|
||||||
|
ok.
|
||||||
|
|
||||||
maybe_flush(Data0) ->
|
maybe_flush(Data0) ->
|
||||||
#{
|
#{
|
||||||
|
@ -498,7 +511,7 @@ flush(Data0) ->
|
||||||
buffer_worker_flush_potentially_partial,
|
buffer_worker_flush_potentially_partial,
|
||||||
#{expired => Expired, not_expired => NotExpired}
|
#{expired => Expired, not_expired => NotExpired}
|
||||||
),
|
),
|
||||||
Ref = make_message_ref(),
|
Ref = make_request_ref(),
|
||||||
do_flush(Data2, #{
|
do_flush(Data2, #{
|
||||||
new_queue => Q1,
|
new_queue => Q1,
|
||||||
is_batch => IsBatch,
|
is_batch => IsBatch,
|
||||||
|
@ -533,10 +546,10 @@ do_flush(
|
||||||
inflight_tid := InflightTID
|
inflight_tid := InflightTID
|
||||||
} = Data0,
|
} = Data0,
|
||||||
%% unwrap when not batching (i.e., batch size == 1)
|
%% unwrap when not batching (i.e., batch size == 1)
|
||||||
[?QUERY(From, CoreReq, HasBeenSent, _ExpireAt) = Request] = Batch,
|
[?QUERY(ReplyTo, _, HasBeenSent, _ExpireAt) = Request] = Batch,
|
||||||
QueryOpts = #{inflight_tid => InflightTID, simple_query => false},
|
QueryOpts = #{inflight_tid => InflightTID, simple_query => false},
|
||||||
Result = call_query(configured, Id, Index, Ref, Request, QueryOpts),
|
Result = call_query(configured, Id, Index, Ref, Request, QueryOpts),
|
||||||
Reply = ?REPLY(From, CoreReq, HasBeenSent, Result),
|
Reply = ?REPLY(ReplyTo, HasBeenSent, Result),
|
||||||
case reply_caller(Id, Reply, QueryOpts) of
|
case reply_caller(Id, Reply, QueryOpts) of
|
||||||
%% Failed; remove the request from the queue, as we cannot pop
|
%% Failed; remove the request from the queue, as we cannot pop
|
||||||
%% from it again, but we'll retry it using the inflight table.
|
%% from it again, but we'll retry it using the inflight table.
|
||||||
|
@ -690,6 +703,14 @@ batch_reply_caller(Id, BatchResult, Batch, QueryOpts) ->
|
||||||
ShouldBlock.
|
ShouldBlock.
|
||||||
|
|
||||||
batch_reply_caller_defer_metrics(Id, BatchResult, Batch, QueryOpts) ->
|
batch_reply_caller_defer_metrics(Id, BatchResult, Batch, QueryOpts) ->
|
||||||
|
%% the `Mod:on_batch_query/3` returns a single result for a batch,
|
||||||
|
%% so we need to expand
|
||||||
|
Replies = lists:map(
|
||||||
|
fun(?QUERY(FROM, _REQUEST, SENT, _EXPIRE_AT)) ->
|
||||||
|
?REPLY(FROM, SENT, BatchResult)
|
||||||
|
end,
|
||||||
|
Batch
|
||||||
|
),
|
||||||
{ShouldAck, PostFns} =
|
{ShouldAck, PostFns} =
|
||||||
lists:foldl(
|
lists:foldl(
|
||||||
fun(Reply, {_ShouldAck, PostFns}) ->
|
fun(Reply, {_ShouldAck, PostFns}) ->
|
||||||
|
@ -697,9 +718,7 @@ batch_reply_caller_defer_metrics(Id, BatchResult, Batch, QueryOpts) ->
|
||||||
{ShouldAck, [PostFn | PostFns]}
|
{ShouldAck, [PostFn | PostFns]}
|
||||||
end,
|
end,
|
||||||
{ack, []},
|
{ack, []},
|
||||||
%% the `Mod:on_batch_query/3` returns a single result for a batch,
|
Replies
|
||||||
%% so we need to expand
|
|
||||||
?EXPAND(BatchResult, Batch)
|
|
||||||
),
|
),
|
||||||
PostFn = fun() -> lists:foreach(fun(F) -> F() end, PostFns) end,
|
PostFn = fun() -> lists:foreach(fun(F) -> F() end, PostFns) end,
|
||||||
{ShouldAck, PostFn}.
|
{ShouldAck, PostFn}.
|
||||||
|
@ -711,48 +730,23 @@ reply_caller(Id, Reply, QueryOpts) ->
|
||||||
|
|
||||||
%% Should only reply to the caller when the decision is final (not
|
%% Should only reply to the caller when the decision is final (not
|
||||||
%% retriable). See comment on `handle_query_result_pure'.
|
%% retriable). See comment on `handle_query_result_pure'.
|
||||||
reply_caller_defer_metrics(Id, ?REPLY(undefined, _, HasBeenSent, Result), _QueryOpts) ->
|
reply_caller_defer_metrics(Id, ?REPLY(undefined, HasBeenSent, Result), _QueryOpts) ->
|
||||||
handle_query_result_pure(Id, Result, HasBeenSent);
|
handle_query_result_pure(Id, Result, HasBeenSent);
|
||||||
reply_caller_defer_metrics(Id, ?REPLY({ReplyFun, Args}, _, HasBeenSent, Result), QueryOpts) when
|
reply_caller_defer_metrics(Id, ?REPLY(ReplyTo, HasBeenSent, Result), QueryOpts) ->
|
||||||
is_function(ReplyFun)
|
|
||||||
->
|
|
||||||
IsSimpleQuery = maps:get(simple_query, QueryOpts, false),
|
IsSimpleQuery = maps:get(simple_query, QueryOpts, false),
|
||||||
IsUnrecoverableError = is_unrecoverable_error(Result),
|
IsUnrecoverableError = is_unrecoverable_error(Result),
|
||||||
{ShouldAck, PostFn} = handle_query_result_pure(Id, Result, HasBeenSent),
|
{ShouldAck, PostFn} = handle_query_result_pure(Id, Result, HasBeenSent),
|
||||||
case {ShouldAck, Result, IsUnrecoverableError, IsSimpleQuery} of
|
case {ShouldAck, Result, IsUnrecoverableError, IsSimpleQuery} of
|
||||||
{ack, {async_return, _}, true, _} ->
|
{ack, {async_return, _}, true, _} ->
|
||||||
apply(ReplyFun, Args ++ [Result]),
|
ok = do_reply_caller(ReplyTo, Result);
|
||||||
ok;
|
|
||||||
{ack, {async_return, _}, false, _} ->
|
{ack, {async_return, _}, false, _} ->
|
||||||
ok;
|
ok;
|
||||||
{_, _, _, true} ->
|
{_, _, _, true} ->
|
||||||
apply(ReplyFun, Args ++ [Result]),
|
ok = do_reply_caller(ReplyTo, Result);
|
||||||
ok;
|
|
||||||
{nack, _, _, _} ->
|
{nack, _, _, _} ->
|
||||||
ok;
|
ok;
|
||||||
{ack, _, _, _} ->
|
{ack, _, _, _} ->
|
||||||
apply(ReplyFun, Args ++ [Result]),
|
ok = do_reply_caller(ReplyTo, Result)
|
||||||
ok
|
|
||||||
end,
|
|
||||||
{ShouldAck, PostFn};
|
|
||||||
reply_caller_defer_metrics(Id, ?REPLY(From, _, HasBeenSent, Result), QueryOpts) ->
|
|
||||||
IsSimpleQuery = maps:get(simple_query, QueryOpts, false),
|
|
||||||
IsUnrecoverableError = is_unrecoverable_error(Result),
|
|
||||||
{ShouldAck, PostFn} = handle_query_result_pure(Id, Result, HasBeenSent),
|
|
||||||
case {ShouldAck, Result, IsUnrecoverableError, IsSimpleQuery} of
|
|
||||||
{ack, {async_return, _}, true, _} ->
|
|
||||||
gen_statem:reply(From, Result),
|
|
||||||
ok;
|
|
||||||
{ack, {async_return, _}, false, _} ->
|
|
||||||
ok;
|
|
||||||
{_, _, _, true} ->
|
|
||||||
gen_statem:reply(From, Result),
|
|
||||||
ok;
|
|
||||||
{nack, _, _, _} ->
|
|
||||||
ok;
|
|
||||||
{ack, _, _, _} ->
|
|
||||||
gen_statem:reply(From, Result),
|
|
||||||
ok
|
|
||||||
end,
|
end,
|
||||||
{ShouldAck, PostFn}.
|
{ShouldAck, PostFn}.
|
||||||
|
|
||||||
|
@ -857,23 +851,33 @@ handle_async_worker_down(Data0, Pid) ->
|
||||||
call_query(QM0, Id, Index, Ref, Query, QueryOpts) ->
|
call_query(QM0, Id, Index, Ref, Query, QueryOpts) ->
|
||||||
?tp(call_query_enter, #{id => Id, query => Query}),
|
?tp(call_query_enter, #{id => Id, query => Query}),
|
||||||
case emqx_resource_manager:ets_lookup(Id) of
|
case emqx_resource_manager:ets_lookup(Id) of
|
||||||
{ok, _Group, #{mod := Mod, state := ResSt, status := connected} = Data} ->
|
|
||||||
QM =
|
|
||||||
case QM0 =:= configured of
|
|
||||||
true -> maps:get(query_mode, Data);
|
|
||||||
false -> QM0
|
|
||||||
end,
|
|
||||||
CBM = maps:get(callback_mode, Data),
|
|
||||||
CallMode = call_mode(QM, CBM),
|
|
||||||
apply_query_fun(CallMode, Mod, Id, Index, Ref, Query, ResSt, QueryOpts);
|
|
||||||
{ok, _Group, #{status := stopped}} ->
|
{ok, _Group, #{status := stopped}} ->
|
||||||
?RESOURCE_ERROR(stopped, "resource stopped or disabled");
|
?RESOURCE_ERROR(stopped, "resource stopped or disabled");
|
||||||
{ok, _Group, #{status := S}} when S == connecting; S == disconnected ->
|
{ok, _Group, Resource} ->
|
||||||
?RESOURCE_ERROR(not_connected, "resource not connected");
|
QM =
|
||||||
|
case QM0 =:= configured of
|
||||||
|
true -> maps:get(query_mode, Resource);
|
||||||
|
false -> QM0
|
||||||
|
end,
|
||||||
|
do_call_query(QM, Id, Index, Ref, Query, QueryOpts, Resource);
|
||||||
{error, not_found} ->
|
{error, not_found} ->
|
||||||
?RESOURCE_ERROR(not_found, "resource not found")
|
?RESOURCE_ERROR(not_found, "resource not found")
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
do_call_query(QM, Id, Index, Ref, Query, #{is_buffer_supported := true} = QueryOpts, Resource) ->
|
||||||
|
%% The connector supports buffer, send even in disconnected state
|
||||||
|
#{mod := Mod, state := ResSt, callback_mode := CBM} = Resource,
|
||||||
|
CallMode = call_mode(QM, CBM),
|
||||||
|
apply_query_fun(CallMode, Mod, Id, Index, Ref, Query, ResSt, QueryOpts);
|
||||||
|
do_call_query(QM, Id, Index, Ref, Query, QueryOpts, #{status := connected} = Resource) ->
|
||||||
|
%% when calling from the buffer worker or other simple queries,
|
||||||
|
%% only apply the query fun when it's at connected status
|
||||||
|
#{mod := Mod, state := ResSt, callback_mode := CBM} = Resource,
|
||||||
|
CallMode = call_mode(QM, CBM),
|
||||||
|
apply_query_fun(CallMode, Mod, Id, Index, Ref, Query, ResSt, QueryOpts);
|
||||||
|
do_call_query(_QM, _Id, _Index, _Ref, _Query, _QueryOpts, _Data) ->
|
||||||
|
?RESOURCE_ERROR(not_connected, "resource not connected").
|
||||||
|
|
||||||
-define(APPLY_RESOURCE(NAME, EXPR, REQ),
|
-define(APPLY_RESOURCE(NAME, EXPR, REQ),
|
||||||
try
|
try
|
||||||
%% if the callback module (connector) wants to return an error that
|
%% if the callback module (connector) wants to return an error that
|
||||||
|
@ -903,13 +907,21 @@ apply_query_fun(async, Mod, Id, Index, Ref, ?QUERY(_, Request, _, _) = Query, Re
|
||||||
?APPLY_RESOURCE(
|
?APPLY_RESOURCE(
|
||||||
call_query_async,
|
call_query_async,
|
||||||
begin
|
begin
|
||||||
ReplyFun = fun ?MODULE:reply_after_query/8,
|
ReplyFun = fun ?MODULE:handle_async_reply/2,
|
||||||
Args = [self(), Id, Index, InflightTID, Ref, Query, QueryOpts],
|
ReplyContext = #{
|
||||||
|
buffer_worker => self(),
|
||||||
|
resource_id => Id,
|
||||||
|
worker_index => Index,
|
||||||
|
inflight_tid => InflightTID,
|
||||||
|
request_ref => Ref,
|
||||||
|
query_opts => QueryOpts,
|
||||||
|
query => minimize(Query)
|
||||||
|
},
|
||||||
IsRetriable = false,
|
IsRetriable = false,
|
||||||
WorkerMRef = undefined,
|
WorkerMRef = undefined,
|
||||||
InflightItem = ?INFLIGHT_ITEM(Ref, Query, IsRetriable, WorkerMRef),
|
InflightItem = ?INFLIGHT_ITEM(Ref, Query, IsRetriable, WorkerMRef),
|
||||||
ok = inflight_append(InflightTID, InflightItem, Id, Index),
|
ok = inflight_append(InflightTID, InflightItem, Id, Index),
|
||||||
Result = Mod:on_query_async(Id, Request, {ReplyFun, Args}, ResSt),
|
Result = Mod:on_query_async(Id, Request, {ReplyFun, [ReplyContext]}, ResSt),
|
||||||
{async_return, Result}
|
{async_return, Result}
|
||||||
end,
|
end,
|
||||||
Request
|
Request
|
||||||
|
@ -918,7 +930,7 @@ apply_query_fun(sync, Mod, Id, _Index, _Ref, [?QUERY(_, _, _, _) | _] = Batch, R
|
||||||
?tp(call_batch_query, #{
|
?tp(call_batch_query, #{
|
||||||
id => Id, mod => Mod, batch => Batch, res_st => ResSt, call_mode => sync
|
id => Id, mod => Mod, batch => Batch, res_st => ResSt, call_mode => sync
|
||||||
}),
|
}),
|
||||||
Requests = [Request || ?QUERY(_From, Request, _, _ExpireAt) <- Batch],
|
Requests = lists:map(fun(?QUERY(_ReplyTo, Request, _, _ExpireAt)) -> Request end, Batch),
|
||||||
?APPLY_RESOURCE(call_batch_query, Mod:on_batch_query(Id, Requests, ResSt), Batch);
|
?APPLY_RESOURCE(call_batch_query, Mod:on_batch_query(Id, Requests, ResSt), Batch);
|
||||||
apply_query_fun(async, Mod, Id, Index, Ref, [?QUERY(_, _, _, _) | _] = Batch, ResSt, QueryOpts) ->
|
apply_query_fun(async, Mod, Id, Index, Ref, [?QUERY(_, _, _, _) | _] = Batch, ResSt, QueryOpts) ->
|
||||||
?tp(call_batch_query_async, #{
|
?tp(call_batch_query_async, #{
|
||||||
|
@ -928,32 +940,43 @@ apply_query_fun(async, Mod, Id, Index, Ref, [?QUERY(_, _, _, _) | _] = Batch, Re
|
||||||
?APPLY_RESOURCE(
|
?APPLY_RESOURCE(
|
||||||
call_batch_query_async,
|
call_batch_query_async,
|
||||||
begin
|
begin
|
||||||
ReplyFun = fun ?MODULE:batch_reply_after_query/8,
|
ReplyFun = fun ?MODULE:handle_async_batch_reply/2,
|
||||||
ReplyFunAndArgs = {ReplyFun, [self(), Id, Index, InflightTID, Ref, Batch, QueryOpts]},
|
ReplyContext = #{
|
||||||
Requests = [Request || ?QUERY(_From, Request, _, _ExpireAt) <- Batch],
|
buffer_worker => self(),
|
||||||
|
resource_id => Id,
|
||||||
|
worker_index => Index,
|
||||||
|
inflight_tid => InflightTID,
|
||||||
|
request_ref => Ref,
|
||||||
|
query_opts => QueryOpts,
|
||||||
|
batch => minimize(Batch)
|
||||||
|
},
|
||||||
|
Requests = lists:map(
|
||||||
|
fun(?QUERY(_ReplyTo, Request, _, _ExpireAt)) -> Request end, Batch
|
||||||
|
),
|
||||||
IsRetriable = false,
|
IsRetriable = false,
|
||||||
WorkerMRef = undefined,
|
WorkerMRef = undefined,
|
||||||
InflightItem = ?INFLIGHT_ITEM(Ref, Batch, IsRetriable, WorkerMRef),
|
InflightItem = ?INFLIGHT_ITEM(Ref, Batch, IsRetriable, WorkerMRef),
|
||||||
ok = inflight_append(InflightTID, InflightItem, Id, Index),
|
ok = inflight_append(InflightTID, InflightItem, Id, Index),
|
||||||
Result = Mod:on_batch_query_async(Id, Requests, ReplyFunAndArgs, ResSt),
|
Result = Mod:on_batch_query_async(Id, Requests, {ReplyFun, [ReplyContext]}, ResSt),
|
||||||
{async_return, Result}
|
{async_return, Result}
|
||||||
end,
|
end,
|
||||||
Batch
|
Batch
|
||||||
).
|
).
|
||||||
|
|
||||||
reply_after_query(
|
handle_async_reply(
|
||||||
Pid,
|
#{
|
||||||
Id,
|
request_ref := Ref,
|
||||||
Index,
|
inflight_tid := InflightTID,
|
||||||
InflightTID,
|
resource_id := Id,
|
||||||
Ref,
|
worker_index := Index,
|
||||||
?QUERY(_From, _Request, _HasBeenSent, ExpireAt) = Query,
|
buffer_worker := Pid,
|
||||||
QueryOpts,
|
query := ?QUERY(_, _, _, ExpireAt) = _Query
|
||||||
|
} = ReplyContext,
|
||||||
Result
|
Result
|
||||||
) ->
|
) ->
|
||||||
?tp(
|
?tp(
|
||||||
buffer_worker_reply_after_query_enter,
|
handle_async_reply_enter,
|
||||||
#{batch_or_query => [Query], ref => Ref}
|
#{batch_or_query => [_Query], ref => Ref}
|
||||||
),
|
),
|
||||||
Now = now_(),
|
Now = now_(),
|
||||||
case is_expired(ExpireAt, Now) of
|
case is_expired(ExpireAt, Now) of
|
||||||
|
@ -962,52 +985,60 @@ reply_after_query(
|
||||||
IsAcked = ack_inflight(InflightTID, Ref, Id, Index),
|
IsAcked = ack_inflight(InflightTID, Ref, Id, Index),
|
||||||
IsAcked andalso emqx_resource_metrics:late_reply_inc(Id),
|
IsAcked andalso emqx_resource_metrics:late_reply_inc(Id),
|
||||||
IsFullBefore andalso ?MODULE:flush_worker(Pid),
|
IsFullBefore andalso ?MODULE:flush_worker(Pid),
|
||||||
?tp(buffer_worker_reply_after_query_expired, #{expired => [Query]}),
|
?tp(handle_async_reply_expired, #{expired => [_Query]}),
|
||||||
ok;
|
ok;
|
||||||
false ->
|
false ->
|
||||||
do_reply_after_query(Pid, Id, Index, InflightTID, Ref, Query, QueryOpts, Result)
|
do_handle_async_reply(ReplyContext, Result)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
do_reply_after_query(
|
do_handle_async_reply(
|
||||||
Pid,
|
#{
|
||||||
Id,
|
query_opts := QueryOpts,
|
||||||
Index,
|
resource_id := Id,
|
||||||
InflightTID,
|
request_ref := Ref,
|
||||||
Ref,
|
worker_index := Index,
|
||||||
?QUERY(From, Request, HasBeenSent, _ExpireAt),
|
buffer_worker := Pid,
|
||||||
QueryOpts,
|
inflight_tid := InflightTID,
|
||||||
|
query := ?QUERY(ReplyTo, _, Sent, _ExpireAt) = _Query
|
||||||
|
},
|
||||||
Result
|
Result
|
||||||
) ->
|
) ->
|
||||||
%% NOTE: 'inflight' is the count of messages that were sent async
|
%% NOTE: 'inflight' is the count of messages that were sent async
|
||||||
%% but received no ACK, NOT the number of messages queued in the
|
%% but received no ACK, NOT the number of messages queued in the
|
||||||
%% inflight window.
|
%% inflight window.
|
||||||
{Action, PostFn} = reply_caller_defer_metrics(
|
{Action, PostFn} = reply_caller_defer_metrics(
|
||||||
Id, ?REPLY(From, Request, HasBeenSent, Result), QueryOpts
|
Id, ?REPLY(ReplyTo, Sent, Result), QueryOpts
|
||||||
),
|
),
|
||||||
|
|
||||||
|
?tp(handle_async_reply, #{
|
||||||
|
action => Action,
|
||||||
|
batch_or_query => [_Query],
|
||||||
|
ref => Ref,
|
||||||
|
result => Result
|
||||||
|
}),
|
||||||
|
|
||||||
case Action of
|
case Action of
|
||||||
nack ->
|
nack ->
|
||||||
%% Keep retrying.
|
%% Keep retrying.
|
||||||
?tp(buffer_worker_reply_after_query, #{
|
|
||||||
action => Action,
|
|
||||||
batch_or_query => ?QUERY(From, Request, HasBeenSent, _ExpireAt),
|
|
||||||
ref => Ref,
|
|
||||||
result => Result
|
|
||||||
}),
|
|
||||||
mark_inflight_as_retriable(InflightTID, Ref),
|
mark_inflight_as_retriable(InflightTID, Ref),
|
||||||
?MODULE:block(Pid);
|
?MODULE:block(Pid);
|
||||||
ack ->
|
ack ->
|
||||||
?tp(buffer_worker_reply_after_query, #{
|
|
||||||
action => Action,
|
|
||||||
batch_or_query => ?QUERY(From, Request, HasBeenSent, _ExpireAt),
|
|
||||||
ref => Ref,
|
|
||||||
result => Result
|
|
||||||
}),
|
|
||||||
do_ack(InflightTID, Ref, Id, Index, PostFn, Pid, QueryOpts)
|
do_ack(InflightTID, Ref, Id, Index, PostFn, Pid, QueryOpts)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
batch_reply_after_query(Pid, Id, Index, InflightTID, Ref, Batch, QueryOpts, Result) ->
|
handle_async_batch_reply(
|
||||||
|
#{
|
||||||
|
buffer_worker := Pid,
|
||||||
|
resource_id := Id,
|
||||||
|
worker_index := Index,
|
||||||
|
inflight_tid := InflightTID,
|
||||||
|
request_ref := Ref,
|
||||||
|
batch := Batch
|
||||||
|
} = ReplyContext,
|
||||||
|
Result
|
||||||
|
) ->
|
||||||
?tp(
|
?tp(
|
||||||
buffer_worker_reply_after_query_enter,
|
handle_async_reply_enter,
|
||||||
#{batch_or_query => Batch, ref => Ref}
|
#{batch_or_query => Batch, ref => Ref}
|
||||||
),
|
),
|
||||||
Now = now_(),
|
Now = now_(),
|
||||||
|
@ -1017,45 +1048,41 @@ batch_reply_after_query(Pid, Id, Index, InflightTID, Ref, Batch, QueryOpts, Resu
|
||||||
IsAcked = ack_inflight(InflightTID, Ref, Id, Index),
|
IsAcked = ack_inflight(InflightTID, Ref, Id, Index),
|
||||||
IsAcked andalso emqx_resource_metrics:late_reply_inc(Id),
|
IsAcked andalso emqx_resource_metrics:late_reply_inc(Id),
|
||||||
IsFullBefore andalso ?MODULE:flush_worker(Pid),
|
IsFullBefore andalso ?MODULE:flush_worker(Pid),
|
||||||
?tp(buffer_worker_reply_after_query_expired, #{expired => Batch}),
|
?tp(handle_async_reply_expired, #{expired => Batch}),
|
||||||
ok;
|
ok;
|
||||||
{NotExpired, Expired} ->
|
{NotExpired, Expired} ->
|
||||||
NumExpired = length(Expired),
|
NumExpired = length(Expired),
|
||||||
emqx_resource_metrics:late_reply_inc(Id, NumExpired),
|
emqx_resource_metrics:late_reply_inc(Id, NumExpired),
|
||||||
NumExpired > 0 andalso
|
NumExpired > 0 andalso
|
||||||
?tp(buffer_worker_reply_after_query_expired, #{expired => Expired}),
|
?tp(handle_async_reply_expired, #{expired => Expired}),
|
||||||
do_batch_reply_after_query(
|
do_handle_async_batch_reply(ReplyContext#{batch := NotExpired}, Result)
|
||||||
Pid, Id, Index, InflightTID, Ref, NotExpired, QueryOpts, Result
|
|
||||||
)
|
|
||||||
end.
|
end.
|
||||||
|
|
||||||
do_batch_reply_after_query(Pid, Id, Index, InflightTID, Ref, Batch, QueryOpts, Result) ->
|
do_handle_async_batch_reply(
|
||||||
?tp(
|
#{
|
||||||
buffer_worker_reply_after_query_enter,
|
buffer_worker := Pid,
|
||||||
#{batch_or_query => Batch, ref => Ref}
|
resource_id := Id,
|
||||||
),
|
worker_index := Index,
|
||||||
%% NOTE: 'inflight' is the count of messages that were sent async
|
inflight_tid := InflightTID,
|
||||||
%% but received no ACK, NOT the number of messages queued in the
|
request_ref := Ref,
|
||||||
%% inflight window.
|
batch := Batch,
|
||||||
|
query_opts := QueryOpts
|
||||||
|
},
|
||||||
|
Result
|
||||||
|
) ->
|
||||||
{Action, PostFn} = batch_reply_caller_defer_metrics(Id, Result, Batch, QueryOpts),
|
{Action, PostFn} = batch_reply_caller_defer_metrics(Id, Result, Batch, QueryOpts),
|
||||||
|
?tp(handle_async_reply, #{
|
||||||
|
action => Action,
|
||||||
|
batch_or_query => Batch,
|
||||||
|
ref => Ref,
|
||||||
|
result => Result
|
||||||
|
}),
|
||||||
case Action of
|
case Action of
|
||||||
nack ->
|
nack ->
|
||||||
%% Keep retrying.
|
%% Keep retrying.
|
||||||
?tp(buffer_worker_reply_after_query, #{
|
|
||||||
action => nack,
|
|
||||||
batch_or_query => Batch,
|
|
||||||
ref => Ref,
|
|
||||||
result => Result
|
|
||||||
}),
|
|
||||||
mark_inflight_as_retriable(InflightTID, Ref),
|
mark_inflight_as_retriable(InflightTID, Ref),
|
||||||
?MODULE:block(Pid);
|
?MODULE:block(Pid);
|
||||||
ack ->
|
ack ->
|
||||||
?tp(buffer_worker_reply_after_query, #{
|
|
||||||
action => ack,
|
|
||||||
batch_or_query => Batch,
|
|
||||||
ref => Ref,
|
|
||||||
result => Result
|
|
||||||
}),
|
|
||||||
do_ack(InflightTID, Ref, Id, Index, PostFn, Pid, QueryOpts)
|
do_ack(InflightTID, Ref, Id, Index, PostFn, Pid, QueryOpts)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
@ -1083,23 +1110,30 @@ queue_item_marshaller(Item) ->
|
||||||
estimate_size(QItem) ->
|
estimate_size(QItem) ->
|
||||||
erlang:external_size(QItem).
|
erlang:external_size(QItem).
|
||||||
|
|
||||||
-spec append_queue(id(), index(), replayq:q(), [queue_query()]) -> replayq:q().
|
-spec append_queue(id(), index(), replayq:q(), [queue_query()]) ->
|
||||||
append_queue(Id, Index, Q, Queries) when not is_binary(Q) ->
|
{[queue_query()], replayq:q()}.
|
||||||
%% we must not append a raw binary because the marshaller will get
|
append_queue(Id, Index, Q, Queries) ->
|
||||||
%% lost.
|
%% this assertion is to ensure that we never append a raw binary
|
||||||
|
%% because the marshaller will get lost.
|
||||||
|
false = is_binary(hd(Queries)),
|
||||||
Q0 = replayq:append(Q, Queries),
|
Q0 = replayq:append(Q, Queries),
|
||||||
Q2 =
|
{Overflown, Q2} =
|
||||||
case replayq:overflow(Q0) of
|
case replayq:overflow(Q0) of
|
||||||
Overflow when Overflow =< 0 ->
|
OverflownBytes when OverflownBytes =< 0 ->
|
||||||
Q0;
|
{[], Q0};
|
||||||
Overflow ->
|
OverflownBytes ->
|
||||||
PopOpts = #{bytes_limit => Overflow, count_limit => 999999999},
|
PopOpts = #{bytes_limit => OverflownBytes, count_limit => 999999999},
|
||||||
{Q1, QAckRef, Items2} = replayq:pop(Q0, PopOpts),
|
{Q1, QAckRef, Items2} = replayq:pop(Q0, PopOpts),
|
||||||
ok = replayq:ack(Q1, QAckRef),
|
ok = replayq:ack(Q1, QAckRef),
|
||||||
Dropped = length(Items2),
|
Dropped = length(Items2),
|
||||||
emqx_resource_metrics:dropped_queue_full_inc(Id),
|
emqx_resource_metrics:dropped_queue_full_inc(Id, Dropped),
|
||||||
?SLOG(error, #{msg => drop_query, reason => queue_full, dropped => Dropped}),
|
?SLOG(info, #{
|
||||||
Q1
|
msg => buffer_worker_overflow,
|
||||||
|
resource_id => Id,
|
||||||
|
worker_index => Index,
|
||||||
|
dropped => Dropped
|
||||||
|
}),
|
||||||
|
{Items2, Q1}
|
||||||
end,
|
end,
|
||||||
emqx_resource_metrics:queuing_set(Id, Index, queue_count(Q2)),
|
emqx_resource_metrics:queuing_set(Id, Index, queue_count(Q2)),
|
||||||
?tp(
|
?tp(
|
||||||
|
@ -1107,10 +1141,11 @@ append_queue(Id, Index, Q, Queries) when not is_binary(Q) ->
|
||||||
#{
|
#{
|
||||||
id => Id,
|
id => Id,
|
||||||
items => Queries,
|
items => Queries,
|
||||||
queue_count => queue_count(Q2)
|
queue_count => queue_count(Q2),
|
||||||
|
overflown => length(Overflown)
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
Q2.
|
{Overflown, Q2}.
|
||||||
|
|
||||||
%%==============================================================================
|
%%==============================================================================
|
||||||
%% the inflight queue for async query
|
%% the inflight queue for async query
|
||||||
|
@ -1119,6 +1154,10 @@ append_queue(Id, Index, Q, Queries) when not is_binary(Q) ->
|
||||||
-define(INITIAL_TIME_REF, initial_time).
|
-define(INITIAL_TIME_REF, initial_time).
|
||||||
-define(INITIAL_MONOTONIC_TIME_REF, initial_monotonic_time).
|
-define(INITIAL_MONOTONIC_TIME_REF, initial_monotonic_time).
|
||||||
|
|
||||||
|
%% NOTE
|
||||||
|
%% There are 4 metadata rows in an inflight table, keyed by atoms declared above. ☝
|
||||||
|
-define(INFLIGHT_META_ROWS, 4).
|
||||||
|
|
||||||
inflight_new(InfltWinSZ, Id, Index) ->
|
inflight_new(InfltWinSZ, Id, Index) ->
|
||||||
TableId = ets:new(
|
TableId = ets:new(
|
||||||
emqx_resource_buffer_worker_inflight_tab,
|
emqx_resource_buffer_worker_inflight_tab,
|
||||||
|
@ -1130,7 +1169,7 @@ inflight_new(InfltWinSZ, Id, Index) ->
|
||||||
inflight_append(TableId, {?SIZE_REF, 0}, Id, Index),
|
inflight_append(TableId, {?SIZE_REF, 0}, Id, Index),
|
||||||
inflight_append(TableId, {?INITIAL_TIME_REF, erlang:system_time()}, Id, Index),
|
inflight_append(TableId, {?INITIAL_TIME_REF, erlang:system_time()}, Id, Index),
|
||||||
inflight_append(
|
inflight_append(
|
||||||
TableId, {?INITIAL_MONOTONIC_TIME_REF, make_message_ref()}, Id, Index
|
TableId, {?INITIAL_MONOTONIC_TIME_REF, make_request_ref()}, Id, Index
|
||||||
),
|
),
|
||||||
TableId.
|
TableId.
|
||||||
|
|
||||||
|
@ -1151,7 +1190,7 @@ inflight_get_first_retriable(InflightTID, Now) ->
|
||||||
case ets:select(InflightTID, MatchSpec, _Limit = 1) of
|
case ets:select(InflightTID, MatchSpec, _Limit = 1) of
|
||||||
'$end_of_table' ->
|
'$end_of_table' ->
|
||||||
none;
|
none;
|
||||||
{[{Ref, Query = ?QUERY(_From, _CoreReq, _HasBeenSent, ExpireAt)}], _Continuation} ->
|
{[{Ref, Query = ?QUERY(_ReplyTo, _CoreReq, _HasBeenSent, ExpireAt)}], _Continuation} ->
|
||||||
case is_expired(ExpireAt, Now) of
|
case is_expired(ExpireAt, Now) of
|
||||||
true ->
|
true ->
|
||||||
{expired, Ref, [Query]};
|
{expired, Ref, [Query]};
|
||||||
|
@ -1179,12 +1218,9 @@ is_inflight_full(InflightTID) ->
|
||||||
Size >= MaxSize.
|
Size >= MaxSize.
|
||||||
|
|
||||||
inflight_num_batches(InflightTID) ->
|
inflight_num_batches(InflightTID) ->
|
||||||
%% Note: we subtract 2 because there're 2 metadata rows that hold
|
|
||||||
%% the maximum size value and the number of messages.
|
|
||||||
MetadataRowCount = 2,
|
|
||||||
case ets:info(InflightTID, size) of
|
case ets:info(InflightTID, size) of
|
||||||
undefined -> 0;
|
undefined -> 0;
|
||||||
Size -> max(0, Size - MetadataRowCount)
|
Size -> max(0, Size - ?INFLIGHT_META_ROWS)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
inflight_num_msgs(InflightTID) ->
|
inflight_num_msgs(InflightTID) ->
|
||||||
|
@ -1210,7 +1246,7 @@ inflight_append(
|
||||||
inflight_append(
|
inflight_append(
|
||||||
InflightTID,
|
InflightTID,
|
||||||
?INFLIGHT_ITEM(
|
?INFLIGHT_ITEM(
|
||||||
Ref, ?QUERY(_From, _Req, _HasBeenSent, _ExpireAt) = Query0, IsRetriable, WorkerMRef
|
Ref, ?QUERY(_ReplyTo, _Req, _HasBeenSent, _ExpireAt) = Query0, IsRetriable, WorkerMRef
|
||||||
),
|
),
|
||||||
Id,
|
Id,
|
||||||
Index
|
Index
|
||||||
|
@ -1369,8 +1405,8 @@ cancel_flush_timer(St = #{tref := {TRef, _Ref}}) ->
|
||||||
_ = erlang:cancel_timer(TRef),
|
_ = erlang:cancel_timer(TRef),
|
||||||
St#{tref => undefined}.
|
St#{tref => undefined}.
|
||||||
|
|
||||||
-spec make_message_ref() -> inflight_key().
|
-spec make_request_ref() -> inflight_key().
|
||||||
make_message_ref() ->
|
make_request_ref() ->
|
||||||
now_().
|
now_().
|
||||||
|
|
||||||
collect_requests(Acc, Limit) ->
|
collect_requests(Acc, Limit) ->
|
||||||
|
@ -1381,7 +1417,7 @@ do_collect_requests(Acc, Count, Limit) when Count >= Limit ->
|
||||||
lists:reverse(Acc);
|
lists:reverse(Acc);
|
||||||
do_collect_requests(Acc, Count, Limit) ->
|
do_collect_requests(Acc, Count, Limit) ->
|
||||||
receive
|
receive
|
||||||
?SEND_REQ(_From, _Req) = Request ->
|
?SEND_REQ(_ReplyTo, _Req) = Request ->
|
||||||
do_collect_requests([Request | Acc], Count + 1, Limit)
|
do_collect_requests([Request | Acc], Count + 1, Limit)
|
||||||
after 0 ->
|
after 0 ->
|
||||||
lists:reverse(Acc)
|
lists:reverse(Acc)
|
||||||
|
@ -1389,9 +1425,9 @@ do_collect_requests(Acc, Count, Limit) ->
|
||||||
|
|
||||||
mark_as_sent(Batch) when is_list(Batch) ->
|
mark_as_sent(Batch) when is_list(Batch) ->
|
||||||
lists:map(fun mark_as_sent/1, Batch);
|
lists:map(fun mark_as_sent/1, Batch);
|
||||||
mark_as_sent(?QUERY(From, Req, _HasBeenSent, ExpireAt)) ->
|
mark_as_sent(?QUERY(ReplyTo, Req, _HasBeenSent, ExpireAt)) ->
|
||||||
HasBeenSent = true,
|
HasBeenSent = true,
|
||||||
?QUERY(From, Req, HasBeenSent, ExpireAt).
|
?QUERY(ReplyTo, Req, HasBeenSent, ExpireAt).
|
||||||
|
|
||||||
is_unrecoverable_error({error, {unrecoverable_error, _}}) ->
|
is_unrecoverable_error({error, {unrecoverable_error, _}}) ->
|
||||||
true;
|
true;
|
||||||
|
@ -1415,7 +1451,7 @@ is_async_return(_) ->
|
||||||
sieve_expired_requests(Batch, Now) ->
|
sieve_expired_requests(Batch, Now) ->
|
||||||
{Expired, NotExpired} =
|
{Expired, NotExpired} =
|
||||||
lists:partition(
|
lists:partition(
|
||||||
fun(?QUERY(_From, _CoreReq, _HasBeenSent, ExpireAt)) ->
|
fun(?QUERY(_ReplyTo, _CoreReq, _HasBeenSent, ExpireAt)) ->
|
||||||
is_expired(ExpireAt, Now)
|
is_expired(ExpireAt, Now)
|
||||||
end,
|
end,
|
||||||
Batch
|
Batch
|
||||||
|
@ -1456,3 +1492,15 @@ ensure_expire_at(#{timeout := TimeoutMS} = Opts) ->
|
||||||
TimeoutNS = erlang:convert_time_unit(TimeoutMS, millisecond, nanosecond),
|
TimeoutNS = erlang:convert_time_unit(TimeoutMS, millisecond, nanosecond),
|
||||||
ExpireAt = now_() + TimeoutNS,
|
ExpireAt = now_() + TimeoutNS,
|
||||||
Opts#{expire_at => ExpireAt}.
|
Opts#{expire_at => ExpireAt}.
|
||||||
|
|
||||||
|
%% no need to keep the request for async reply handler
|
||||||
|
minimize(?QUERY(_, _, _, _) = Q) ->
|
||||||
|
do_minimize(Q);
|
||||||
|
minimize(L) when is_list(L) ->
|
||||||
|
lists:map(fun do_minimize/1, L).
|
||||||
|
|
||||||
|
-ifdef(TEST).
|
||||||
|
do_minimize(?QUERY(_ReplyTo, _Req, _Sent, _ExpireAt) = Query) -> Query.
|
||||||
|
-else.
|
||||||
|
do_minimize(?QUERY(ReplyTo, _Req, Sent, ExpireAt)) -> ?QUERY(ReplyTo, [], Sent, ExpireAt).
|
||||||
|
-endif.
|
||||||
|
|
|
@ -23,7 +23,7 @@
|
||||||
%% External API
|
%% External API
|
||||||
-export([start_link/0]).
|
-export([start_link/0]).
|
||||||
|
|
||||||
-export([start_workers/2, stop_workers/2]).
|
-export([start_workers/2, stop_workers/2, worker_pids/1]).
|
||||||
|
|
||||||
%% Callbacks
|
%% Callbacks
|
||||||
-export([init/1]).
|
-export([init/1]).
|
||||||
|
@ -75,6 +75,14 @@ stop_workers(ResId, Opts) ->
|
||||||
ensure_worker_pool_removed(ResId),
|
ensure_worker_pool_removed(ResId),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
|
worker_pids(ResId) ->
|
||||||
|
lists:map(
|
||||||
|
fun({_Name, Pid}) ->
|
||||||
|
Pid
|
||||||
|
end,
|
||||||
|
gproc_pool:active_workers(ResId)
|
||||||
|
).
|
||||||
|
|
||||||
%%%=============================================================================
|
%%%=============================================================================
|
||||||
%%% Internal
|
%%% Internal
|
||||||
%%%=============================================================================
|
%%%=============================================================================
|
||||||
|
|
|
@ -194,7 +194,7 @@ remove(ResId, ClearMetrics) when is_binary(ResId) ->
|
||||||
restart(ResId, Opts) when is_binary(ResId) ->
|
restart(ResId, Opts) when is_binary(ResId) ->
|
||||||
case safe_call(ResId, restart, ?T_OPERATION) of
|
case safe_call(ResId, restart, ?T_OPERATION) of
|
||||||
ok ->
|
ok ->
|
||||||
wait_for_ready(ResId, maps:get(start_timeout, Opts, 5000)),
|
_ = wait_for_ready(ResId, maps:get(start_timeout, Opts, 5000)),
|
||||||
ok;
|
ok;
|
||||||
{error, _Reason} = Error ->
|
{error, _Reason} = Error ->
|
||||||
Error
|
Error
|
||||||
|
@ -205,7 +205,7 @@ restart(ResId, Opts) when is_binary(ResId) ->
|
||||||
start(ResId, Opts) ->
|
start(ResId, Opts) ->
|
||||||
case safe_call(ResId, start, ?T_OPERATION) of
|
case safe_call(ResId, start, ?T_OPERATION) of
|
||||||
ok ->
|
ok ->
|
||||||
wait_for_ready(ResId, maps:get(start_timeout, Opts, 5000)),
|
_ = wait_for_ready(ResId, maps:get(start_timeout, Opts, 5000)),
|
||||||
ok;
|
ok;
|
||||||
{error, _Reason} = Error ->
|
{error, _Reason} = Error ->
|
||||||
Error
|
Error
|
||||||
|
@ -309,6 +309,7 @@ init({Data, Opts}) ->
|
||||||
end.
|
end.
|
||||||
|
|
||||||
terminate(_Reason, _State, Data) ->
|
terminate(_Reason, _State, Data) ->
|
||||||
|
_ = stop_resource(Data),
|
||||||
_ = maybe_clear_alarm(Data#data.id),
|
_ = maybe_clear_alarm(Data#data.id),
|
||||||
delete_cache(Data#data.id, Data#data.manager_id),
|
delete_cache(Data#data.id, Data#data.manager_id),
|
||||||
ok.
|
ok.
|
||||||
|
@ -334,8 +335,7 @@ handle_event({call, From}, start, _State, _Data) ->
|
||||||
% Called when the resource received a `quit` message
|
% Called when the resource received a `quit` message
|
||||||
handle_event(info, quit, stopped, _Data) ->
|
handle_event(info, quit, stopped, _Data) ->
|
||||||
{stop, {shutdown, quit}};
|
{stop, {shutdown, quit}};
|
||||||
handle_event(info, quit, _State, Data) ->
|
handle_event(info, quit, _State, _Data) ->
|
||||||
_ = stop_resource(Data),
|
|
||||||
{stop, {shutdown, quit}};
|
{stop, {shutdown, quit}};
|
||||||
% Called when the resource is to be stopped
|
% Called when the resource is to be stopped
|
||||||
handle_event({call, From}, stop, stopped, _Data) ->
|
handle_event({call, From}, stop, stopped, _Data) ->
|
||||||
|
@ -487,7 +487,7 @@ start_resource(Data, From) ->
|
||||||
Actions = maybe_reply([{state_timeout, 0, health_check}], From, ok),
|
Actions = maybe_reply([{state_timeout, 0, health_check}], From, ok),
|
||||||
{next_state, connecting, UpdatedData, Actions};
|
{next_state, connecting, UpdatedData, Actions};
|
||||||
{error, Reason} = Err ->
|
{error, Reason} = Err ->
|
||||||
?SLOG(error, #{
|
?SLOG(warning, #{
|
||||||
msg => start_resource_failed,
|
msg => start_resource_failed,
|
||||||
id => Data#data.id,
|
id => Data#data.id,
|
||||||
reason => Reason
|
reason => Reason
|
||||||
|
@ -546,7 +546,7 @@ handle_connected_health_check(Data) ->
|
||||||
Actions = [{state_timeout, health_check_interval(Data#data.opts), health_check}],
|
Actions = [{state_timeout, health_check_interval(Data#data.opts), health_check}],
|
||||||
{keep_state, UpdatedData, Actions};
|
{keep_state, UpdatedData, Actions};
|
||||||
(Status, UpdatedData) ->
|
(Status, UpdatedData) ->
|
||||||
?SLOG(error, #{
|
?SLOG(warning, #{
|
||||||
msg => health_check_failed,
|
msg => health_check_failed,
|
||||||
id => Data#data.id,
|
id => Data#data.id,
|
||||||
status => Status
|
status => Status
|
||||||
|
@ -555,12 +555,14 @@ handle_connected_health_check(Data) ->
|
||||||
end
|
end
|
||||||
).
|
).
|
||||||
|
|
||||||
|
with_health_check(#data{state = undefined} = Data, Func) ->
|
||||||
|
Func(disconnected, Data);
|
||||||
with_health_check(Data, Func) ->
|
with_health_check(Data, Func) ->
|
||||||
ResId = Data#data.id,
|
ResId = Data#data.id,
|
||||||
HCRes = emqx_resource:call_health_check(Data#data.manager_id, Data#data.mod, Data#data.state),
|
HCRes = emqx_resource:call_health_check(Data#data.manager_id, Data#data.mod, Data#data.state),
|
||||||
{Status, NewState, Err} = parse_health_check_result(HCRes, Data),
|
{Status, NewState, Err} = parse_health_check_result(HCRes, Data),
|
||||||
_ = maybe_alarm(Status, ResId),
|
_ = maybe_alarm(Status, ResId),
|
||||||
ok = maybe_resume_resource_workers(Status),
|
ok = maybe_resume_resource_workers(ResId, Status),
|
||||||
UpdatedData = Data#data{
|
UpdatedData = Data#data{
|
||||||
state = NewState, status = Status, error = Err
|
state = NewState, status = Status, error = Err
|
||||||
},
|
},
|
||||||
|
@ -581,14 +583,12 @@ maybe_alarm(_Status, ResId) ->
|
||||||
<<"resource down: ", ResId/binary>>
|
<<"resource down: ", ResId/binary>>
|
||||||
).
|
).
|
||||||
|
|
||||||
maybe_resume_resource_workers(connected) ->
|
maybe_resume_resource_workers(ResId, connected) ->
|
||||||
lists:foreach(
|
lists:foreach(
|
||||||
fun({_, Pid, _, _}) ->
|
fun emqx_resource_buffer_worker:resume/1,
|
||||||
emqx_resource_buffer_worker:resume(Pid)
|
emqx_resource_buffer_worker_sup:worker_pids(ResId)
|
||||||
end,
|
|
||||||
supervisor:which_children(emqx_resource_buffer_worker_sup)
|
|
||||||
);
|
);
|
||||||
maybe_resume_resource_workers(_) ->
|
maybe_resume_resource_workers(_, _) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
maybe_clear_alarm(<<?TEST_ID_PREFIX, _/binary>>) ->
|
maybe_clear_alarm(<<?TEST_ID_PREFIX, _/binary>>) ->
|
||||||
|
|
|
@ -30,16 +30,25 @@ namespace() -> "resource_schema".
|
||||||
|
|
||||||
roots() -> [].
|
roots() -> [].
|
||||||
|
|
||||||
|
fields("resource_opts_sync_only") ->
|
||||||
|
[
|
||||||
|
{resource_opts,
|
||||||
|
mk(
|
||||||
|
ref(?MODULE, "creation_opts_sync_only"),
|
||||||
|
resource_opts_meta()
|
||||||
|
)}
|
||||||
|
];
|
||||||
|
fields("creation_opts_sync_only") ->
|
||||||
|
Fields0 = fields("creation_opts"),
|
||||||
|
Fields1 = lists:keydelete(async_inflight_window, 1, Fields0),
|
||||||
|
QueryMod = {query_mode, fun query_mode_sync_only/1},
|
||||||
|
lists:keyreplace(query_mode, 1, Fields1, QueryMod);
|
||||||
fields("resource_opts") ->
|
fields("resource_opts") ->
|
||||||
[
|
[
|
||||||
{resource_opts,
|
{resource_opts,
|
||||||
mk(
|
mk(
|
||||||
ref(?MODULE, "creation_opts"),
|
ref(?MODULE, "creation_opts"),
|
||||||
#{
|
resource_opts_meta()
|
||||||
required => false,
|
|
||||||
default => #{},
|
|
||||||
desc => ?DESC(<<"resource_opts">>)
|
|
||||||
}
|
|
||||||
)}
|
)}
|
||||||
];
|
];
|
||||||
fields("creation_opts") ->
|
fields("creation_opts") ->
|
||||||
|
@ -59,6 +68,13 @@ fields("creation_opts") ->
|
||||||
{max_queue_bytes, fun max_queue_bytes/1}
|
{max_queue_bytes, fun max_queue_bytes/1}
|
||||||
].
|
].
|
||||||
|
|
||||||
|
resource_opts_meta() ->
|
||||||
|
#{
|
||||||
|
required => false,
|
||||||
|
default => #{},
|
||||||
|
desc => ?DESC(<<"resource_opts">>)
|
||||||
|
}.
|
||||||
|
|
||||||
worker_pool_size(type) -> non_neg_integer();
|
worker_pool_size(type) -> non_neg_integer();
|
||||||
worker_pool_size(desc) -> ?DESC("worker_pool_size");
|
worker_pool_size(desc) -> ?DESC("worker_pool_size");
|
||||||
worker_pool_size(default) -> ?WORKER_POOL_SIZE;
|
worker_pool_size(default) -> ?WORKER_POOL_SIZE;
|
||||||
|
@ -95,6 +111,12 @@ query_mode(default) -> async;
|
||||||
query_mode(required) -> false;
|
query_mode(required) -> false;
|
||||||
query_mode(_) -> undefined.
|
query_mode(_) -> undefined.
|
||||||
|
|
||||||
|
query_mode_sync_only(type) -> enum([sync]);
|
||||||
|
query_mode_sync_only(desc) -> ?DESC("query_mode_sync_only");
|
||||||
|
query_mode_sync_only(default) -> sync;
|
||||||
|
query_mode_sync_only(required) -> false;
|
||||||
|
query_mode_sync_only(_) -> undefined.
|
||||||
|
|
||||||
request_timeout(type) -> hoconsc:union([infinity, emqx_schema:duration_ms()]);
|
request_timeout(type) -> hoconsc:union([infinity, emqx_schema:duration_ms()]);
|
||||||
request_timeout(desc) -> ?DESC("request_timeout");
|
request_timeout(desc) -> ?DESC("request_timeout");
|
||||||
request_timeout(default) -> <<"15s">>;
|
request_timeout(default) -> <<"15s">>;
|
||||||
|
@ -139,4 +161,6 @@ max_queue_bytes(required) -> false;
|
||||||
max_queue_bytes(_) -> undefined.
|
max_queue_bytes(_) -> undefined.
|
||||||
|
|
||||||
desc("creation_opts") ->
|
desc("creation_opts") ->
|
||||||
|
?DESC("creation_opts");
|
||||||
|
desc("creation_opts_sync_only") ->
|
||||||
?DESC("creation_opts").
|
?DESC("creation_opts").
|
||||||
|
|
|
@ -19,13 +19,12 @@
|
||||||
-compile(export_all).
|
-compile(export_all).
|
||||||
|
|
||||||
-include_lib("eunit/include/eunit.hrl").
|
-include_lib("eunit/include/eunit.hrl").
|
||||||
-include_lib("common_test/include/ct.hrl").
|
|
||||||
-include("emqx_resource.hrl").
|
|
||||||
-include_lib("stdlib/include/ms_transform.hrl").
|
-include_lib("stdlib/include/ms_transform.hrl").
|
||||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||||
|
|
||||||
-define(TEST_RESOURCE, emqx_connector_demo).
|
-define(TEST_RESOURCE, emqx_connector_demo).
|
||||||
-define(ID, <<"id">>).
|
-define(ID, <<"id">>).
|
||||||
|
-define(ID1, <<"id1">>).
|
||||||
-define(DEFAULT_RESOURCE_GROUP, <<"default">>).
|
-define(DEFAULT_RESOURCE_GROUP, <<"default">>).
|
||||||
-define(RESOURCE_ERROR(REASON), {error, {resource_error, #{reason := REASON}}}).
|
-define(RESOURCE_ERROR(REASON), {error, {resource_error, #{reason := REASON}}}).
|
||||||
-define(TRACE_OPTS, #{timetrap => 10000, timeout => 1000}).
|
-define(TRACE_OPTS, #{timetrap => 10000, timeout => 1000}).
|
||||||
|
@ -413,7 +412,8 @@ t_query_counter_async_inflight(_) ->
|
||||||
?check_trace(
|
?check_trace(
|
||||||
{_, {ok, _}} =
|
{_, {ok, _}} =
|
||||||
?wait_async_action(
|
?wait_async_action(
|
||||||
inc_counter_in_parallel(WindowSize, ReqOpts),
|
%% one more so that inflight would be already full upon last query
|
||||||
|
inc_counter_in_parallel(WindowSize + 1, ReqOpts),
|
||||||
#{?snk_kind := buffer_worker_flush_but_inflight_full},
|
#{?snk_kind := buffer_worker_flush_but_inflight_full},
|
||||||
1_000
|
1_000
|
||||||
),
|
),
|
||||||
|
@ -447,9 +447,9 @@ t_query_counter_async_inflight(_) ->
|
||||||
%% all responses should be received after the resource is resumed.
|
%% all responses should be received after the resource is resumed.
|
||||||
{ok, SRef0} = snabbkaffe:subscribe(
|
{ok, SRef0} = snabbkaffe:subscribe(
|
||||||
?match_event(#{?snk_kind := connector_demo_inc_counter_async}),
|
?match_event(#{?snk_kind := connector_demo_inc_counter_async}),
|
||||||
%% +1 because the tmp_query above will be retried and succeed
|
%% +2 because the tmp_query above will be retried and succeed
|
||||||
%% this time.
|
%% this time.
|
||||||
WindowSize + 1,
|
WindowSize + 2,
|
||||||
_Timeout0 = 10_000
|
_Timeout0 = 10_000
|
||||||
),
|
),
|
||||||
?assertMatch(ok, emqx_resource:simple_sync_query(?ID, resume)),
|
?assertMatch(ok, emqx_resource:simple_sync_query(?ID, resume)),
|
||||||
|
@ -477,7 +477,7 @@ t_query_counter_async_inflight(_) ->
|
||||||
fun(Trace) ->
|
fun(Trace) ->
|
||||||
QueryTrace = ?of_kind(call_query_async, Trace),
|
QueryTrace = ?of_kind(call_query_async, Trace),
|
||||||
?assertMatch([#{query := {query, _, {inc_counter, _}, _, _}} | _], QueryTrace),
|
?assertMatch([#{query := {query, _, {inc_counter, _}, _, _}} | _], QueryTrace),
|
||||||
?assertEqual(WindowSize + Num, ets:info(Tab0, size), #{tab => ets:tab2list(Tab0)}),
|
?assertEqual(WindowSize + Num + 1, ets:info(Tab0, size), #{tab => ets:tab2list(Tab0)}),
|
||||||
tap_metrics(?LINE),
|
tap_metrics(?LINE),
|
||||||
ok
|
ok
|
||||||
end
|
end
|
||||||
|
@ -489,7 +489,8 @@ t_query_counter_async_inflight(_) ->
|
||||||
?check_trace(
|
?check_trace(
|
||||||
{_, {ok, _}} =
|
{_, {ok, _}} =
|
||||||
?wait_async_action(
|
?wait_async_action(
|
||||||
inc_counter_in_parallel(WindowSize, ReqOpts),
|
%% one more so that inflight would be already full upon last query
|
||||||
|
inc_counter_in_parallel(WindowSize + 1, ReqOpts),
|
||||||
#{?snk_kind := buffer_worker_flush_but_inflight_full},
|
#{?snk_kind := buffer_worker_flush_but_inflight_full},
|
||||||
1_000
|
1_000
|
||||||
),
|
),
|
||||||
|
@ -502,10 +503,10 @@ t_query_counter_async_inflight(_) ->
|
||||||
%% this will block the resource_worker
|
%% this will block the resource_worker
|
||||||
ok = emqx_resource:query(?ID, {inc_counter, 4}),
|
ok = emqx_resource:query(?ID, {inc_counter, 4}),
|
||||||
|
|
||||||
Sent = WindowSize + Num + WindowSize,
|
Sent = WindowSize + 1 + Num + WindowSize + 1,
|
||||||
{ok, SRef1} = snabbkaffe:subscribe(
|
{ok, SRef1} = snabbkaffe:subscribe(
|
||||||
?match_event(#{?snk_kind := connector_demo_inc_counter_async}),
|
?match_event(#{?snk_kind := connector_demo_inc_counter_async}),
|
||||||
WindowSize,
|
WindowSize + 1,
|
||||||
_Timeout0 = 10_000
|
_Timeout0 = 10_000
|
||||||
),
|
),
|
||||||
?assertMatch(ok, emqx_resource:simple_sync_query(?ID, resume)),
|
?assertMatch(ok, emqx_resource:simple_sync_query(?ID, resume)),
|
||||||
|
@ -595,7 +596,8 @@ t_query_counter_async_inflight_batch(_) ->
|
||||||
?check_trace(
|
?check_trace(
|
||||||
{_, {ok, _}} =
|
{_, {ok, _}} =
|
||||||
?wait_async_action(
|
?wait_async_action(
|
||||||
inc_counter_in_parallel(NumMsgs, ReqOpts),
|
%% a batch more so that inflight would be already full upon last query
|
||||||
|
inc_counter_in_parallel(NumMsgs + BatchSize, ReqOpts),
|
||||||
#{?snk_kind := buffer_worker_flush_but_inflight_full},
|
#{?snk_kind := buffer_worker_flush_but_inflight_full},
|
||||||
5_000
|
5_000
|
||||||
),
|
),
|
||||||
|
@ -617,12 +619,14 @@ t_query_counter_async_inflight_batch(_) ->
|
||||||
),
|
),
|
||||||
tap_metrics(?LINE),
|
tap_metrics(?LINE),
|
||||||
|
|
||||||
|
Sent1 = NumMsgs + BatchSize,
|
||||||
|
|
||||||
?check_trace(
|
?check_trace(
|
||||||
begin
|
begin
|
||||||
%% this will block the resource_worker as the inflight window is full now
|
%% this will block the resource_worker as the inflight window is full now
|
||||||
{ok, {ok, _}} =
|
{ok, {ok, _}} =
|
||||||
?wait_async_action(
|
?wait_async_action(
|
||||||
emqx_resource:query(?ID, {inc_counter, 2}),
|
emqx_resource:query(?ID, {inc_counter, 2}, ReqOpts()),
|
||||||
#{?snk_kind := buffer_worker_flush_but_inflight_full},
|
#{?snk_kind := buffer_worker_flush_but_inflight_full},
|
||||||
5_000
|
5_000
|
||||||
),
|
),
|
||||||
|
@ -632,6 +636,8 @@ t_query_counter_async_inflight_batch(_) ->
|
||||||
[]
|
[]
|
||||||
),
|
),
|
||||||
|
|
||||||
|
Sent2 = Sent1 + 1,
|
||||||
|
|
||||||
tap_metrics(?LINE),
|
tap_metrics(?LINE),
|
||||||
%% send query now will fail because the resource is blocked.
|
%% send query now will fail because the resource is blocked.
|
||||||
Insert = fun(Tab, Ref, Result) ->
|
Insert = fun(Tab, Ref, Result) ->
|
||||||
|
@ -654,10 +660,10 @@ t_query_counter_async_inflight_batch(_) ->
|
||||||
%% all responses should be received after the resource is resumed.
|
%% all responses should be received after the resource is resumed.
|
||||||
{ok, SRef0} = snabbkaffe:subscribe(
|
{ok, SRef0} = snabbkaffe:subscribe(
|
||||||
?match_event(#{?snk_kind := connector_demo_inc_counter_async}),
|
?match_event(#{?snk_kind := connector_demo_inc_counter_async}),
|
||||||
%% +1 because the tmp_query above will be retried and succeed
|
%% +2 because the tmp_query above will be retried and succeed
|
||||||
%% this time.
|
%% this time.
|
||||||
WindowSize + 1,
|
WindowSize + 2,
|
||||||
10_000
|
5_000
|
||||||
),
|
),
|
||||||
?assertMatch(ok, emqx_resource:simple_sync_query(?ID, resume)),
|
?assertMatch(ok, emqx_resource:simple_sync_query(?ID, resume)),
|
||||||
tap_metrics(?LINE),
|
tap_metrics(?LINE),
|
||||||
|
@ -665,8 +671,8 @@ t_query_counter_async_inflight_batch(_) ->
|
||||||
%% since the previous tmp_query was enqueued to be retried, we
|
%% since the previous tmp_query was enqueued to be retried, we
|
||||||
%% take it again from the table; this time, it should have
|
%% take it again from the table; this time, it should have
|
||||||
%% succeeded.
|
%% succeeded.
|
||||||
?assertMatch([{tmp_query, ok}], ets:take(Tab0, tmp_query)),
|
?assertEqual([{tmp_query, ok}], ets:take(Tab0, tmp_query)),
|
||||||
?assertEqual(NumMsgs, ets:info(Tab0, size), #{tab => ets:tab2list(Tab0)}),
|
?assertEqual(Sent2, ets:info(Tab0, size), #{tab => ets:tab2list(Tab0)}),
|
||||||
tap_metrics(?LINE),
|
tap_metrics(?LINE),
|
||||||
|
|
||||||
%% send async query, this time everything should be ok.
|
%% send async query, this time everything should be ok.
|
||||||
|
@ -678,7 +684,7 @@ t_query_counter_async_inflight_batch(_) ->
|
||||||
{ok, SRef} = snabbkaffe:subscribe(
|
{ok, SRef} = snabbkaffe:subscribe(
|
||||||
?match_event(#{?snk_kind := connector_demo_inc_counter_async}),
|
?match_event(#{?snk_kind := connector_demo_inc_counter_async}),
|
||||||
NumBatches1,
|
NumBatches1,
|
||||||
10_000
|
5_000
|
||||||
),
|
),
|
||||||
inc_counter_in_parallel(NumMsgs1, ReqOpts),
|
inc_counter_in_parallel(NumMsgs1, ReqOpts),
|
||||||
{ok, _} = snabbkaffe:receive_events(SRef),
|
{ok, _} = snabbkaffe:receive_events(SRef),
|
||||||
|
@ -692,11 +698,10 @@ t_query_counter_async_inflight_batch(_) ->
|
||||||
)
|
)
|
||||||
end
|
end
|
||||||
),
|
),
|
||||||
?assertEqual(
|
|
||||||
NumMsgs + NumMsgs1,
|
Sent3 = Sent2 + NumMsgs1,
|
||||||
ets:info(Tab0, size),
|
|
||||||
#{tab => ets:tab2list(Tab0)}
|
?assertEqual(Sent3, ets:info(Tab0, size), #{tab => ets:tab2list(Tab0)}),
|
||||||
),
|
|
||||||
tap_metrics(?LINE),
|
tap_metrics(?LINE),
|
||||||
|
|
||||||
%% block the resource
|
%% block the resource
|
||||||
|
@ -705,7 +710,8 @@ t_query_counter_async_inflight_batch(_) ->
|
||||||
?check_trace(
|
?check_trace(
|
||||||
{_, {ok, _}} =
|
{_, {ok, _}} =
|
||||||
?wait_async_action(
|
?wait_async_action(
|
||||||
inc_counter_in_parallel(NumMsgs, ReqOpts),
|
%% a batch more so that inflight would be already full upon last query
|
||||||
|
inc_counter_in_parallel(NumMsgs + BatchSize, ReqOpts),
|
||||||
#{?snk_kind := buffer_worker_flush_but_inflight_full},
|
#{?snk_kind := buffer_worker_flush_but_inflight_full},
|
||||||
5_000
|
5_000
|
||||||
),
|
),
|
||||||
|
@ -718,22 +724,23 @@ t_query_counter_async_inflight_batch(_) ->
|
||||||
end
|
end
|
||||||
),
|
),
|
||||||
|
|
||||||
|
Sent4 = Sent3 + NumMsgs + BatchSize,
|
||||||
|
|
||||||
%% this will block the resource_worker
|
%% this will block the resource_worker
|
||||||
ok = emqx_resource:query(?ID, {inc_counter, 1}),
|
ok = emqx_resource:query(?ID, {inc_counter, 1}),
|
||||||
|
|
||||||
Sent = NumMsgs + NumMsgs1 + NumMsgs,
|
|
||||||
{ok, SRef1} = snabbkaffe:subscribe(
|
{ok, SRef1} = snabbkaffe:subscribe(
|
||||||
?match_event(#{?snk_kind := connector_demo_inc_counter_async}),
|
?match_event(#{?snk_kind := connector_demo_inc_counter_async}),
|
||||||
WindowSize,
|
WindowSize + 1,
|
||||||
10_000
|
5_000
|
||||||
),
|
),
|
||||||
?assertMatch(ok, emqx_resource:simple_sync_query(?ID, resume)),
|
?assertMatch(ok, emqx_resource:simple_sync_query(?ID, resume)),
|
||||||
{ok, _} = snabbkaffe:receive_events(SRef1),
|
{ok, _} = snabbkaffe:receive_events(SRef1),
|
||||||
?assertEqual(Sent, ets:info(Tab0, size), #{tab => ets:tab2list(Tab0)}),
|
?assertEqual(Sent4, ets:info(Tab0, size), #{tab => ets:tab2list(Tab0)}),
|
||||||
|
|
||||||
{ok, Counter} = emqx_resource:simple_sync_query(?ID, get_counter),
|
{ok, Counter} = emqx_resource:simple_sync_query(?ID, get_counter),
|
||||||
ct:pal("get_counter: ~p, sent: ~p", [Counter, Sent]),
|
ct:pal("get_counter: ~p, sent: ~p", [Counter, Sent4]),
|
||||||
?assert(Sent =< Counter),
|
?assert(Sent4 =< Counter),
|
||||||
|
|
||||||
%% give the metrics some time to stabilize.
|
%% give the metrics some time to stabilize.
|
||||||
ct:sleep(1000),
|
ct:sleep(1000),
|
||||||
|
@ -772,7 +779,10 @@ t_healthy_timeout(_) ->
|
||||||
%% the ?TEST_RESOURCE always returns the `Mod:on_get_status/2` 300ms later.
|
%% the ?TEST_RESOURCE always returns the `Mod:on_get_status/2` 300ms later.
|
||||||
#{health_check_interval => 200}
|
#{health_check_interval => 200}
|
||||||
),
|
),
|
||||||
?assertError(timeout, emqx_resource:query(?ID, get_state, #{timeout => 1_000})),
|
?assertMatch(
|
||||||
|
{error, {resource_error, #{reason := timeout}}},
|
||||||
|
emqx_resource:query(?ID, get_state, #{timeout => 1_000})
|
||||||
|
),
|
||||||
?assertMatch({ok, _Group, #{status := disconnected}}, emqx_resource_manager:ets_lookup(?ID)),
|
?assertMatch({ok, _Group, #{status := disconnected}}, emqx_resource_manager:ets_lookup(?ID)),
|
||||||
ok = emqx_resource:remove_local(?ID).
|
ok = emqx_resource:remove_local(?ID).
|
||||||
|
|
||||||
|
@ -1020,6 +1030,63 @@ t_auto_retry(_) ->
|
||||||
),
|
),
|
||||||
?assertEqual(ok, Res).
|
?assertEqual(ok, Res).
|
||||||
|
|
||||||
|
t_health_check_disconnected(_) ->
|
||||||
|
_ = emqx_resource:create_local(
|
||||||
|
?ID,
|
||||||
|
?DEFAULT_RESOURCE_GROUP,
|
||||||
|
?TEST_RESOURCE,
|
||||||
|
#{name => test_resource, create_error => true},
|
||||||
|
#{auto_retry_interval => 100}
|
||||||
|
),
|
||||||
|
?assertEqual(
|
||||||
|
{ok, disconnected},
|
||||||
|
emqx_resource:health_check(?ID)
|
||||||
|
).
|
||||||
|
|
||||||
|
t_unblock_only_required_buffer_workers(_) ->
|
||||||
|
{ok, _} = emqx_resource:create(
|
||||||
|
?ID,
|
||||||
|
?DEFAULT_RESOURCE_GROUP,
|
||||||
|
?TEST_RESOURCE,
|
||||||
|
#{name => test_resource},
|
||||||
|
#{
|
||||||
|
query_mode => async,
|
||||||
|
batch_size => 5
|
||||||
|
}
|
||||||
|
),
|
||||||
|
lists:foreach(
|
||||||
|
fun emqx_resource_buffer_worker:block/1,
|
||||||
|
emqx_resource_buffer_worker_sup:worker_pids(?ID)
|
||||||
|
),
|
||||||
|
emqx_resource:create(
|
||||||
|
?ID1,
|
||||||
|
?DEFAULT_RESOURCE_GROUP,
|
||||||
|
?TEST_RESOURCE,
|
||||||
|
#{name => test_resource},
|
||||||
|
#{
|
||||||
|
query_mode => async,
|
||||||
|
batch_size => 5
|
||||||
|
}
|
||||||
|
),
|
||||||
|
%% creation of `?ID1` should not have unblocked `?ID`'s buffer workers
|
||||||
|
%% so we should see resumes now (`buffer_worker_enter_running`).
|
||||||
|
?check_trace(
|
||||||
|
?wait_async_action(
|
||||||
|
lists:foreach(
|
||||||
|
fun emqx_resource_buffer_worker:resume/1,
|
||||||
|
emqx_resource_buffer_worker_sup:worker_pids(?ID)
|
||||||
|
),
|
||||||
|
#{?snk_kind := buffer_worker_enter_running},
|
||||||
|
5000
|
||||||
|
),
|
||||||
|
fun(Trace) ->
|
||||||
|
?assertMatch(
|
||||||
|
[#{id := ?ID} | _],
|
||||||
|
?of_kind(buffer_worker_enter_running, Trace)
|
||||||
|
)
|
||||||
|
end
|
||||||
|
).
|
||||||
|
|
||||||
t_retry_batch(_Config) ->
|
t_retry_batch(_Config) ->
|
||||||
{ok, _} = emqx_resource:create(
|
{ok, _} = emqx_resource:create(
|
||||||
?ID,
|
?ID,
|
||||||
|
@ -1226,8 +1293,8 @@ t_always_overflow(_Config) ->
|
||||||
Payload = binary:copy(<<"a">>, 100),
|
Payload = binary:copy(<<"a">>, 100),
|
||||||
%% since it's sync and it should never send a request, this
|
%% since it's sync and it should never send a request, this
|
||||||
%% errors with `timeout'.
|
%% errors with `timeout'.
|
||||||
?assertError(
|
?assertEqual(
|
||||||
timeout,
|
{error, buffer_overflow},
|
||||||
emqx_resource:query(
|
emqx_resource:query(
|
||||||
?ID,
|
?ID,
|
||||||
{big_payload, Payload},
|
{big_payload, Payload},
|
||||||
|
@ -1583,8 +1650,8 @@ do_t_expiration_before_sending(QueryMode) ->
|
||||||
spawn_link(fun() ->
|
spawn_link(fun() ->
|
||||||
case QueryMode of
|
case QueryMode of
|
||||||
sync ->
|
sync ->
|
||||||
?assertError(
|
?assertMatch(
|
||||||
timeout,
|
{error, {resource_error, #{reason := timeout}}},
|
||||||
emqx_resource:query(?ID, {inc_counter, 99}, #{timeout => TimeoutMS})
|
emqx_resource:query(?ID, {inc_counter, 99}, #{timeout => TimeoutMS})
|
||||||
);
|
);
|
||||||
async ->
|
async ->
|
||||||
|
@ -1690,8 +1757,8 @@ do_t_expiration_before_sending_partial_batch(QueryMode) ->
|
||||||
spawn_link(fun() ->
|
spawn_link(fun() ->
|
||||||
case QueryMode of
|
case QueryMode of
|
||||||
sync ->
|
sync ->
|
||||||
?assertError(
|
?assertMatch(
|
||||||
timeout,
|
{error, {resource_error, #{reason := timeout}}},
|
||||||
emqx_resource:query(?ID, {inc_counter, 199}, #{timeout => TimeoutMS})
|
emqx_resource:query(?ID, {inc_counter, 199}, #{timeout => TimeoutMS})
|
||||||
);
|
);
|
||||||
async ->
|
async ->
|
||||||
|
@ -1717,7 +1784,7 @@ do_t_expiration_before_sending_partial_batch(QueryMode) ->
|
||||||
async ->
|
async ->
|
||||||
{ok, _} = ?block_until(
|
{ok, _} = ?block_until(
|
||||||
#{
|
#{
|
||||||
?snk_kind := buffer_worker_reply_after_query,
|
?snk_kind := handle_async_reply,
|
||||||
action := ack,
|
action := ack,
|
||||||
batch_or_query := [{query, _, {inc_counter, 99}, _, _}]
|
batch_or_query := [{query, _, {inc_counter, 99}, _, _}]
|
||||||
},
|
},
|
||||||
|
@ -1848,7 +1915,7 @@ do_t_expiration_async_after_reply(IsBatch) ->
|
||||||
?force_ordering(
|
?force_ordering(
|
||||||
#{?snk_kind := delay},
|
#{?snk_kind := delay},
|
||||||
#{
|
#{
|
||||||
?snk_kind := buffer_worker_reply_after_query_enter,
|
?snk_kind := handle_async_reply_enter,
|
||||||
batch_or_query := [{query, _, {inc_counter, 199}, _, _} | _]
|
batch_or_query := [{query, _, {inc_counter, 199}, _, _} | _]
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
|
@ -1873,7 +1940,7 @@ do_t_expiration_async_after_reply(IsBatch) ->
|
||||||
#{?snk_kind := buffer_worker_flush_potentially_partial}, 4 * TimeoutMS
|
#{?snk_kind := buffer_worker_flush_potentially_partial}, 4 * TimeoutMS
|
||||||
),
|
),
|
||||||
{ok, _} = ?block_until(
|
{ok, _} = ?block_until(
|
||||||
#{?snk_kind := buffer_worker_reply_after_query_expired}, 10 * TimeoutMS
|
#{?snk_kind := handle_async_reply_expired}, 10 * TimeoutMS
|
||||||
),
|
),
|
||||||
|
|
||||||
unlink(Pid0),
|
unlink(Pid0),
|
||||||
|
@ -1887,7 +1954,7 @@ do_t_expiration_async_after_reply(IsBatch) ->
|
||||||
expired := [{query, _, {inc_counter, 199}, _, _}]
|
expired := [{query, _, {inc_counter, 199}, _, _}]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
?of_kind(buffer_worker_reply_after_query_expired, Trace)
|
?of_kind(handle_async_reply_expired, Trace)
|
||||||
),
|
),
|
||||||
wait_telemetry_event(success, #{n_events => 1, timeout => 4_000}),
|
wait_telemetry_event(success, #{n_events => 1, timeout => 4_000}),
|
||||||
Metrics = tap_metrics(?LINE),
|
Metrics = tap_metrics(?LINE),
|
||||||
|
@ -1935,7 +2002,7 @@ t_expiration_batch_all_expired_after_reply(_Config) ->
|
||||||
?force_ordering(
|
?force_ordering(
|
||||||
#{?snk_kind := delay},
|
#{?snk_kind := delay},
|
||||||
#{
|
#{
|
||||||
?snk_kind := buffer_worker_reply_after_query_enter,
|
?snk_kind := handle_async_reply_enter,
|
||||||
batch_or_query := [{query, _, {inc_counter, 199}, _, _} | _]
|
batch_or_query := [{query, _, {inc_counter, 199}, _, _} | _]
|
||||||
}
|
}
|
||||||
),
|
),
|
||||||
|
@ -1954,7 +2021,7 @@ t_expiration_batch_all_expired_after_reply(_Config) ->
|
||||||
end),
|
end),
|
||||||
|
|
||||||
{ok, _} = ?block_until(
|
{ok, _} = ?block_until(
|
||||||
#{?snk_kind := buffer_worker_reply_after_query_expired}, 10 * TimeoutMS
|
#{?snk_kind := handle_async_reply_expired}, 10 * TimeoutMS
|
||||||
),
|
),
|
||||||
|
|
||||||
unlink(Pid0),
|
unlink(Pid0),
|
||||||
|
@ -1968,7 +2035,7 @@ t_expiration_batch_all_expired_after_reply(_Config) ->
|
||||||
expired := [{query, _, {inc_counter, 199}, _, _}]
|
expired := [{query, _, {inc_counter, 199}, _, _}]
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
?of_kind(buffer_worker_reply_after_query_expired, Trace)
|
?of_kind(handle_async_reply_expired, Trace)
|
||||||
),
|
),
|
||||||
Metrics = tap_metrics(?LINE),
|
Metrics = tap_metrics(?LINE),
|
||||||
?assertMatch(
|
?assertMatch(
|
||||||
|
@ -2043,8 +2110,8 @@ do_t_expiration_retry(IsBatch) ->
|
||||||
ResumeInterval * 2
|
ResumeInterval * 2
|
||||||
),
|
),
|
||||||
spawn_link(fun() ->
|
spawn_link(fun() ->
|
||||||
?assertError(
|
?assertMatch(
|
||||||
timeout,
|
{error, {resource_error, #{reason := timeout}}},
|
||||||
emqx_resource:query(
|
emqx_resource:query(
|
||||||
?ID,
|
?ID,
|
||||||
{inc_counter, 1},
|
{inc_counter, 1},
|
||||||
|
@ -2127,8 +2194,8 @@ t_expiration_retry_batch_multiple_times(_Config) ->
|
||||||
),
|
),
|
||||||
TimeoutMS = 100,
|
TimeoutMS = 100,
|
||||||
spawn_link(fun() ->
|
spawn_link(fun() ->
|
||||||
?assertError(
|
?assertMatch(
|
||||||
timeout,
|
{error, {resource_error, #{reason := timeout}}},
|
||||||
emqx_resource:query(
|
emqx_resource:query(
|
||||||
?ID,
|
?ID,
|
||||||
{inc_counter, 1},
|
{inc_counter, 1},
|
||||||
|
@ -2137,8 +2204,8 @@ t_expiration_retry_batch_multiple_times(_Config) ->
|
||||||
)
|
)
|
||||||
end),
|
end),
|
||||||
spawn_link(fun() ->
|
spawn_link(fun() ->
|
||||||
?assertError(
|
?assertMatch(
|
||||||
timeout,
|
{error, {resource_error, #{reason := timeout}}},
|
||||||
emqx_resource:query(
|
emqx_resource:query(
|
||||||
?ID,
|
?ID,
|
||||||
{inc_counter, 2},
|
{inc_counter, 2},
|
||||||
|
@ -2334,7 +2401,7 @@ assert_async_retry_fail_then_succeed_inflight(Trace) ->
|
||||||
ct:pal(" ~p", [Trace]),
|
ct:pal(" ~p", [Trace]),
|
||||||
?assert(
|
?assert(
|
||||||
?strict_causality(
|
?strict_causality(
|
||||||
#{?snk_kind := buffer_worker_reply_after_query, action := nack, ref := _Ref},
|
#{?snk_kind := handle_async_reply, action := nack},
|
||||||
#{?snk_kind := buffer_worker_retry_inflight_failed, ref := _Ref},
|
#{?snk_kind := buffer_worker_retry_inflight_failed, ref := _Ref},
|
||||||
Trace
|
Trace
|
||||||
)
|
)
|
||||||
|
|
14
bin/emqx
14
bin/emqx
|
@ -159,7 +159,7 @@ usage() {
|
||||||
echo "Evaluate an Erlang expression in the EMQX node, even on Elixir node"
|
echo "Evaluate an Erlang expression in the EMQX node, even on Elixir node"
|
||||||
;;
|
;;
|
||||||
versions)
|
versions)
|
||||||
echo "List installed EMQX versions and their status"
|
echo "List installed EMQX release versions and their status"
|
||||||
;;
|
;;
|
||||||
unpack)
|
unpack)
|
||||||
echo "Usage: $REL_NAME unpack [VERSION]"
|
echo "Usage: $REL_NAME unpack [VERSION]"
|
||||||
|
@ -217,12 +217,12 @@ usage() {
|
||||||
echo " ctl: Administration commands, execute '$REL_NAME ctl help' for more details"
|
echo " ctl: Administration commands, execute '$REL_NAME ctl help' for more details"
|
||||||
echo ''
|
echo ''
|
||||||
echo "More:"
|
echo "More:"
|
||||||
echo " Shell attach: remote_console | attach"
|
echo " Shell attach: remote_console | attach"
|
||||||
echo " Up/Down-grade: upgrade | downgrade | install | uninstall"
|
# echo " Up/Down-grade: upgrade | downgrade | install | uninstall | versions" # TODO enable when supported
|
||||||
echo " Install info: ertspath | root_dir"
|
echo " Install Info: ertspath | root_dir"
|
||||||
echo " Runtime info: pid | ping | versions"
|
echo " Runtime Status: pid | ping"
|
||||||
echo " Validate Config: check_config"
|
echo " Validate Config: check_config"
|
||||||
echo " Advanced: console_clean | escript | rpc | rpcterms | eval | eval-erl"
|
echo " Advanced: console_clean | escript | rpc | rpcterms | eval | eval-erl"
|
||||||
echo ''
|
echo ''
|
||||||
echo "Execute '$REL_NAME COMMAND help' for more information"
|
echo "Execute '$REL_NAME COMMAND help' for more information"
|
||||||
;;
|
;;
|
||||||
|
@ -361,7 +361,7 @@ if [ "$IS_BOOT_COMMAND" = 'yes' ]; then
|
||||||
logerr "$COMPATIBILITY_INFO"
|
logerr "$COMPATIBILITY_INFO"
|
||||||
exit 2
|
exit 2
|
||||||
fi
|
fi
|
||||||
logerr "Using libs from '${DYNLIBS_DIR}' due to missing from the OS."
|
logwarn "Using libs from '${DYNLIBS_DIR}' due to missing from the OS."
|
||||||
fi
|
fi
|
||||||
[ "$DEBUG" -eq 1 ] && set -x
|
[ "$DEBUG" -eq 1 ] && set -x
|
||||||
fi
|
fi
|
||||||
|
|
|
@ -18,27 +18,18 @@ main([Command0, DistInfoStr | CommandArgs]) ->
|
||||||
Opts = parse_arguments(CommandArgs),
|
Opts = parse_arguments(CommandArgs),
|
||||||
%% invoke the command passed as argument
|
%% invoke the command passed as argument
|
||||||
F = case Command0 of
|
F = case Command0 of
|
||||||
%% "install" -> fun(A, B) -> install(A, B) end;
|
"install" -> fun(A, B) -> install(A, B) end;
|
||||||
%% "unpack" -> fun(A, B) -> unpack(A, B) end;
|
"unpack" -> fun(A, B) -> unpack(A, B) end;
|
||||||
%% "upgrade" -> fun(A, B) -> upgrade(A, B) end;
|
"upgrade" -> fun(A, B) -> upgrade(A, B) end;
|
||||||
%% "downgrade" -> fun(A, B) -> downgrade(A, B) end;
|
"downgrade" -> fun(A, B) -> downgrade(A, B) end;
|
||||||
%% "uninstall" -> fun(A, B) -> uninstall(A, B) end;
|
"uninstall" -> fun(A, B) -> uninstall(A, B) end;
|
||||||
"versions" -> fun(A, B) -> versions(A, B) end;
|
"versions" -> fun(A, B) -> versions(A, B) end
|
||||||
_ -> fun fail_upgrade/2
|
|
||||||
end,
|
end,
|
||||||
F(DistInfo, Opts);
|
F(DistInfo, Opts);
|
||||||
main(Args) ->
|
main(Args) ->
|
||||||
?INFO("unknown args: ~p", [Args]),
|
?INFO("unknown args: ~p", [Args]),
|
||||||
erlang:halt(1).
|
erlang:halt(1).
|
||||||
|
|
||||||
%% temporary block for hot-upgrades; next release will just remove
|
|
||||||
%% this and the new script version shall be used instead of this
|
|
||||||
%% current version.
|
|
||||||
%% TODO: always deny relup for macos (unsupported)
|
|
||||||
fail_upgrade(_DistInfo, _Opts) ->
|
|
||||||
?ERROR("Unsupported upgrade path", []),
|
|
||||||
erlang:halt(1).
|
|
||||||
|
|
||||||
unpack({RelName, NameTypeArg, NodeName, Cookie}, Opts) ->
|
unpack({RelName, NameTypeArg, NodeName, Cookie}, Opts) ->
|
||||||
TargetNode = start_distribution(NodeName, NameTypeArg, Cookie),
|
TargetNode = start_distribution(NodeName, NameTypeArg, Cookie),
|
||||||
Version = proplists:get_value(version, Opts),
|
Version = proplists:get_value(version, Opts),
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
Refactor `/authorization/sources/built_in_database/` by adding `rules/` to the path.
|
|
|
@ -1 +0,0 @@
|
||||||
重构 `/authorization/sources/built_in_database/` 接口,将 `rules/` 添加到了其路径中。
|
|
|
@ -1 +0,0 @@
|
||||||
`/bridges_probe` API endpoint to test params for creating a new data bridge.
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue