Merge pull request #9482 from emqx/1206-chore-merge-ee50-to-release-50

Merge ee50 to release-50
This commit is contained in:
Zaiming (Stone) Shi 2022-12-08 14:12:33 +01:00 committed by GitHub
commit 9da12a0814
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
196 changed files with 12538 additions and 3613 deletions

View File

@ -3,6 +3,7 @@ REDIS_TAG=6
MONGO_TAG=5
PGSQL_TAG=13
LDAP_TAG=2.4.50
INFLUXDB_TAG=2.5.0
TARGET=emqx/emqx
EMQX_TAG=build-alpine-amd64

View File

@ -0,0 +1,36 @@
version: '3.9'
services:
influxdb_server_tcp:
container_name: influxdb_tcp
image: influxdb:${INFLUXDB_TAG}
expose:
- "8086"
- "8089/udp"
- "8083"
# ports:
# - "8086:8086"
environment:
DOCKER_INFLUXDB_INIT_MODE: setup
DOCKER_INFLUXDB_INIT_USERNAME: root
DOCKER_INFLUXDB_INIT_PASSWORD: emqx@123
DOCKER_INFLUXDB_INIT_ORG: emqx
DOCKER_INFLUXDB_INIT_BUCKET: mqtt
DOCKER_INFLUXDB_INIT_ADMIN_TOKEN: abcdefg
volumes:
- "./influxdb/setup-v1.sh:/docker-entrypoint-initdb.d/setup-v1.sh"
restart: always
networks:
- emqx_bridge
# networks:
# emqx_bridge:
# driver: bridge
# name: emqx_bridge
# ipam:
# driver: default
# config:
# - subnet: 172.100.239.0/24
# gateway: 172.100.239.1
# - subnet: 2001:3200:3200::/64
# gateway: 2001:3200:3200::1

View File

@ -0,0 +1,42 @@
version: '3.9'
services:
influxdb_server_tls:
container_name: influxdb_tls
image: influxdb:${INFLUXDB_TAG}
expose:
- "8086"
- "8089/udp"
- "8083"
# ports:
# - "8087:8086"
environment:
DOCKER_INFLUXDB_INIT_MODE: setup
DOCKER_INFLUXDB_INIT_USERNAME: root
DOCKER_INFLUXDB_INIT_PASSWORD: emqx@123
DOCKER_INFLUXDB_INIT_ORG: emqx
DOCKER_INFLUXDB_INIT_BUCKET: mqtt
DOCKER_INFLUXDB_INIT_ADMIN_TOKEN: abcdefg
volumes:
- ./certs/server.crt:/etc/influxdb/cert.pem
- ./certs/server.key:/etc/influxdb/key.pem
- "./influxdb/setup-v1.sh:/docker-entrypoint-initdb.d/setup-v1.sh"
command:
- influxd
- --tls-cert=/etc/influxdb/cert.pem
- --tls-key=/etc/influxdb/key.pem
restart: always
networks:
- emqx_bridge
# networks:
# emqx_bridge:
# driver: bridge
# name: emqx_bridge
# ipam:
# driver: default
# config:
# - subnet: 172.100.239.0/24
# gateway: 172.100.239.1
# - subnet: 2001:3200:3200::/64
# gateway: 2001:3200:3200::1

View File

@ -0,0 +1,73 @@
version: '3.9'
services:
zookeeper:
image: wurstmeister/zookeeper
ports:
- "2181:2181"
container_name: zookeeper
hostname: zookeeper
networks:
emqx_bridge:
ssl_cert_gen:
image: fredrikhgrelland/alpine-jdk11-openssl
container_name: ssl_cert_gen
volumes:
- emqx-shared-secret:/var/lib/secret
- ./kafka/generate-certs.sh:/bin/generate-certs.sh
entrypoint: /bin/sh
command: /bin/generate-certs.sh
kdc:
hostname: kdc.emqx.net
image: ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-ubuntu20.04
container_name: kdc.emqx.net
networks:
emqx_bridge:
volumes:
- emqx-shared-secret:/var/lib/secret
- ./kerberos/krb5.conf:/etc/kdc/krb5.conf
- ./kerberos/krb5.conf:/etc/krb5.conf
- ./kerberos/run.sh:/usr/bin/run.sh
command: run.sh
kafka_1:
image: wurstmeister/kafka:2.13-2.7.0
ports:
- "9092:9092"
- "9093:9093"
- "9094:9094"
- "9095:9095"
container_name: kafka-1.emqx.net
hostname: kafka-1.emqx.net
depends_on:
- "kdc"
- "zookeeper"
- "ssl_cert_gen"
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_LISTENERS: PLAINTEXT://:9092,SASL_PLAINTEXT://:9093,SSL://:9094,SASL_SSL://:9095
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-1.emqx.net:9092,SASL_PLAINTEXT://kafka-1.emqx.net:9093,SSL://kafka-1.emqx.net:9094,SASL_SSL://kafka-1.emqx.net:9095
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,SASL_PLAINTEXT:SASL_PLAINTEXT,SSL:SSL,SASL_SSL:SASL_SSL
KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
KAFKA_SASL_ENABLED_MECHANISMS: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512,GSSAPI
KAFKA_SASL_KERBEROS_SERVICE_NAME: kafka
KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN
KAFKA_JMX_OPTS: "-Djava.security.auth.login.config=/etc/kafka/jaas.conf"
KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true"
KAFKA_CREATE_TOPICS: test-topic-one-partition:1:1,test-topic-two-partitions:2:1,test-topic-three-partitions:3:1,
KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer
KAFKA_SSL_TRUSTSTORE_LOCATION: /var/lib/secret/kafka.truststore.jks
KAFKA_SSL_TRUSTSTORE_PASSWORD: password
KAFKA_SSL_KEYSTORE_LOCATION: /var/lib/secret/kafka.keystore.jks
KAFKA_SSL_KEYSTORE_PASSWORD: password
KAFKA_SSL_KEY_PASSWORD: password
networks:
emqx_bridge:
volumes:
- emqx-shared-secret:/var/lib/secret
- ./kafka/jaas.conf:/etc/kafka/jaas.conf
- ./kafka/run_add_scram_users.sh:/bin/run_add_scram_users.sh
- ./kerberos/krb5.conf:/etc/kdc/krb5.conf
- ./kerberos/krb5.conf:/etc/krb5.conf
command: run_add_scram_users.sh

View File

@ -18,7 +18,7 @@ services:
--ipv6
--bind_ip_all
--replSet rs0
mongo2:
hostname: mongo2
container_name: mongo2
@ -54,10 +54,10 @@ services:
--ipv6
--bind_ip_all
--replSet rs0
mongo_client:
mongo_rs_client:
image: mongo:${MONGO_TAG}
container_name: mongo_client
container_name: mongo_rs_client
networks:
- emqx_bridge
depends_on:

View File

@ -0,0 +1,90 @@
version: "3"
services:
mongosharded1:
hostname: mongosharded1
container_name: mongosharded1
image: mongo:${MONGO_TAG}
environment:
MONGO_INITDB_DATABASE: mqtt
networks:
- emqx_bridge
expose:
- 27017
ports:
- 27014:27017
restart: always
command:
--configsvr
--replSet cfg0
--port 27017
--ipv6
--bind_ip_all
mongosharded2:
hostname: mongosharded2
container_name: mongosharded2
image: mongo:${MONGO_TAG}
environment:
MONGO_INITDB_DATABASE: mqtt
networks:
- emqx_bridge
expose:
- 27017
ports:
- 27015:27017
restart: always
command:
--shardsvr
--replSet rs0
--port 27017
--ipv6
--bind_ip_all
mongosharded3:
hostname: mongosharded3
container_name: mongosharded3
image: mongo:${MONGO_TAG}
environment:
MONGO_INITDB_DATABASE: mqtt
networks:
- emqx_bridge
expose:
- 27017
ports:
- 27016:27017
restart: always
entrypoint: mongos
command:
--configdb cfg0/mongosharded1:27017
--port 27017
--ipv6
--bind_ip_all
mongosharded_client:
image: mongo:${MONGO_TAG}
container_name: mongosharded_client
networks:
- emqx_bridge
depends_on:
- mongosharded1
- mongosharded2
- mongosharded3
command:
- /bin/bash
- -c
- |
while ! mongo --host mongosharded1 --eval 'db.runCommand("ping").ok' --quiet >/dev/null 2>&1 ; do
sleep 1
done
mongo --host mongosharded1 --eval "rs.initiate( { _id : 'cfg0', configsvr: true, members: [ { _id : 0, host : 'mongosharded1:27017' } ] })"
while ! mongo --host mongosharded2 --eval 'db.runCommand("ping").ok' --quiet >/dev/null 2>&1 ; do
sleep 1
done
mongo --host mongosharded2 --eval "rs.initiate( { _id : 'rs0', members: [ { _id : 0, host : 'mongosharded2:27017' } ] })"
mongo --host mongosharded2 --eval "rs.status()"
while ! mongo --host mongosharded3 --eval 'db.runCommand("ping").ok' --quiet >/dev/null 2>&1 ; do
sleep 1
done
mongo --host mongosharded3 --eval "sh.addShard('rs0/mongosharded2:27017')"
mongo --host mongosharded3 --eval "sh.enableSharding('mqtt')"

View File

@ -2,7 +2,7 @@ version: '3.9'
services:
python:
container_name: python
container_name: python
image: python:3.7.2-alpine3.9
depends_on:
- emqx1

View File

@ -0,0 +1,20 @@
version: '3.9'
services:
toxiproxy:
container_name: toxiproxy
image: ghcr.io/shopify/toxiproxy:2.5.0
restart: always
networks:
- emqx_bridge
volumes:
- "./toxiproxy.json:/config/toxiproxy.json"
ports:
- 8474:8474
- 8086:8086
- 8087:8087
- 13306:3306
- 13307:3307
command:
- "-host=0.0.0.0"
- "-config=/config/toxiproxy.json"

View File

@ -18,6 +18,9 @@ services:
- emqx_bridge
volumes:
- ../..:/emqx
- emqx-shared-secret:/var/lib/secret
- ./kerberos/krb5.conf:/etc/kdc/krb5.conf
- ./kerberos/krb5.conf:/etc/krb5.conf
working_dir: /emqx
tty: true
user: "${UID_GID}"
@ -34,3 +37,6 @@ networks:
gateway: 172.100.239.1
- subnet: 2001:3200:3200::/64
gateway: 2001:3200:3200::1
volumes: # add this section
emqx-shared-secret: # does not need anything underneath this

View File

@ -0,0 +1,16 @@
#!/usr/bin/env bash
set -e
# influx v1 dbrp create \
# --bucket-id ${DOCKER_INFLUXDB_INIT_BUCKET_ID} \
# --db ${V1_DB_NAME} \
# --rp ${V1_RP_NAME} \
# --default \
# --org ${DOCKER_INFLUXDB_INIT_ORG}
influx v1 auth create \
--username "${DOCKER_INFLUXDB_INIT_USERNAME}" \
--password "${DOCKER_INFLUXDB_INIT_PASSWORD}" \
--write-bucket "${DOCKER_INFLUXDB_INIT_BUCKET_ID}" \
--org "${DOCKER_INFLUXDB_INIT_ORG}"

View File

@ -0,0 +1,46 @@
#!/usr/bin/bash
set -euo pipefail
set -x
# Source https://github.com/zmstone/docker-kafka/blob/master/generate-certs.sh
HOST="*."
DAYS=3650
PASS="password"
cd /var/lib/secret/
# Delete old files
(rm ca.key ca.crt server.key server.csr server.crt client.key client.csr client.crt server.p12 kafka.keystore.jks kafka.truststore.jks 2>/dev/null || true)
ls
echo '== Generate self-signed server and client certificates'
echo '= generate CA'
openssl req -new -x509 -keyout ca.key -out ca.crt -days $DAYS -nodes -subj "/C=SE/ST=Stockholm/L=Stockholm/O=brod/OU=test/CN=$HOST"
echo '= generate server certificate request'
openssl req -newkey rsa:2048 -sha256 -keyout server.key -out server.csr -days "$DAYS" -nodes -subj "/C=SE/ST=Stockholm/L=Stockholm/O=brod/OU=test/CN=$HOST"
echo '= sign server certificate'
openssl x509 -req -CA ca.crt -CAkey ca.key -in server.csr -out server.crt -days "$DAYS" -CAcreateserial
echo '= generate client certificate request'
openssl req -newkey rsa:2048 -sha256 -keyout client.key -out client.csr -days "$DAYS" -nodes -subj "/C=SE/ST=Stockholm/L=Stockholm/O=brod/OU=test/CN=$HOST"
echo '== sign client certificate'
openssl x509 -req -CA ca.crt -CAkey ca.key -in client.csr -out client.crt -days $DAYS -CAserial ca.srl
echo '= Convert self-signed certificate to PKCS#12 format'
openssl pkcs12 -export -name "$HOST" -in server.crt -inkey server.key -out server.p12 -CAfile ca.crt -passout pass:"$PASS"
echo '= Import PKCS#12 into a java keystore'
echo $PASS | keytool -importkeystore -destkeystore kafka.keystore.jks -srckeystore server.p12 -srcstoretype pkcs12 -alias "$HOST" -storepass "$PASS"
echo '= Import CA into java truststore'
echo yes | keytool -keystore kafka.truststore.jks -alias CARoot -import -file ca.crt -storepass "$PASS"

View File

@ -0,0 +1,16 @@
KafkaServer {
org.apache.kafka.common.security.plain.PlainLoginModule required
user_admin="password"
user_emqxuser="password";
org.apache.kafka.common.security.scram.ScramLoginModule required
username="admin"
password="password";
com.sun.security.auth.module.Krb5LoginModule required
useKeyTab=true
storeKey=true
keyTab="/var/lib/secret/kafka.keytab"
principal="kafka/kafka-1.emqx.net@KDC.EMQX.NET";
};

View File

@ -0,0 +1,49 @@
#!/usr/bin/env bash
set -euo pipefail
TIMEOUT=60
echo "+++++++ Sleep for a while to make sure that old keytab and truststore is deleted ++++++++"
sleep 5
echo "+++++++ Wait until Kerberos Keytab is created ++++++++"
timeout $TIMEOUT bash -c 'until [ -f /var/lib/secret/kafka.keytab ]; do sleep 1; done'
echo "+++++++ Wait until SSL certs are generated ++++++++"
timeout $TIMEOUT bash -c 'until [ -f /var/lib/secret/kafka.truststore.jks ]; do sleep 1; done'
sleep 3
echo "+++++++ Starting Kafka ++++++++"
start-kafka.sh &
SERVER=localhost
PORT1=9092
PORT2=9093
TIMEOUT=60
echo "+++++++ Wait until Kafka ports are up ++++++++"
# shellcheck disable=SC2016
timeout $TIMEOUT bash -c 'until printf "" 2>>/dev/null >>/dev/tcp/$0/$1; do sleep 1; done' $SERVER $PORT1
# shellcheck disable=SC2016
timeout $TIMEOUT bash -c 'until printf "" 2>>/dev/null >>/dev/tcp/$0/$1; do sleep 1; done' $SERVER $PORT2
echo "+++++++ Run config commands ++++++++"
kafka-configs.sh --bootstrap-server localhost:9092 --alter --add-config 'SCRAM-SHA-256=[iterations=8192,password=password],SCRAM-SHA-512=[password=password]' --entity-type users --entity-name emqxuser
echo "+++++++ Wait until Kafka ports are down ++++++++"
bash -c 'while printf "" 2>>/dev/null >>/dev/tcp/$0/$1; do sleep 1; done' $SERVER $PORT1
echo "+++++++ Kafka ports are down ++++++++"

View File

@ -0,0 +1,23 @@
[libdefaults]
default_realm = KDC.EMQX.NET
ticket_lifetime = 24h
renew_lifetime = 7d
forwardable = true
rdns = false
dns_lookup_kdc = no
dns_lookup_realm = no
[realms]
KDC.EMQX.NET = {
kdc = kdc
admin_server = kadmin
}
[domain_realm]
kdc.emqx.net = KDC.EMQX.NET
.kdc.emqx.net = KDC.EMQX.NET
[logging]
kdc = FILE:/var/log/kerberos/krb5kdc.log
admin_server = FILE:/var/log/kerberos/kadmin.log
default = FILE:/var/log/kerberos/krb5lib.log

View File

@ -0,0 +1,25 @@
#!/bin/sh
echo "Remove old keytabs"
rm -f /var/lib/secret/kafka.keytab > /dev/null 2>&1
rm -f /var/lib/secret/rig.keytab > /dev/null 2>&1
echo "Create realm"
kdb5_util -P emqx -r KDC.EMQX.NET create -s
echo "Add principals"
kadmin.local -w password -q "add_principal -randkey kafka/kafka-1.emqx.net@KDC.EMQX.NET"
kadmin.local -w password -q "add_principal -randkey rig@KDC.EMQX.NET" > /dev/null
echo "Create keytabs"
kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka.keytab -norandkey kafka/kafka-1.emqx.net@KDC.EMQX.NET " > /dev/null
kadmin.local -w password -q "ktadd -k /var/lib/secret/rig.keytab -norandkey rig@KDC.EMQX.NET " > /dev/null
echo STARTING KDC
/usr/sbin/krb5kdc -n

View File

@ -0,0 +1,26 @@
[
{
"name": "influxdb_tcp",
"listen": "0.0.0.0:8086",
"upstream": "influxdb_tcp:8086",
"enabled": true
},
{
"name": "influxdb_tls",
"listen": "0.0.0.0:8087",
"upstream": "influxdb_tls:8086",
"enabled": true
},
{
"name": "mysql_tcp",
"listen": "0.0.0.0:3306",
"upstream": "mysql:3306",
"enabled": true
},
{
"name": "mysql_tls",
"listen": "0.0.0.0:3307",
"upstream": "mysql-tls:3306",
"enabled": true
}
]

View File

@ -115,7 +115,9 @@ jobs:
- 24.3.4.2-1 # update to latest
elixir:
- 1.13.4 # update to latest
exclude: # TODO: publish enterprise to ecr too?
- registry: 'public.ecr.aws'
profile: emqx-enterprise
steps:
- uses: AutoModality/action-clean@v1
if: matrix.arch[1] == 'aws-arm64'
@ -261,6 +263,9 @@ jobs:
registry:
- 'docker.io'
- 'public.ecr.aws'
exclude:
- registry: 'public.ecr.aws'
profile: emqx-enterprise
steps:
- uses: actions/download-artifact@v3

View File

@ -86,14 +86,13 @@ jobs:
windows:
runs-on: windows-2019
if: startsWith(github.ref_name, 'v')
needs: prepare
strategy:
fail-fast: false
matrix:
profile: # for now only CE for windows
- emqx
otp:
- 24.2.1
steps:
- uses: actions/download-artifact@v3
with:
@ -104,7 +103,7 @@ jobs:
- uses: ilammy/msvc-dev-cmd@v1.12.0
- uses: erlef/setup-beam@v1
with:
otp-version: ${{ matrix.otp }}
otp-version: 24.2.1
- name: build
env:
PYTHON: python
@ -129,7 +128,7 @@ jobs:
echo "EMQX uninstalled"
- uses: actions/upload-artifact@v3
with:
name: ${{ matrix.profile }}-windows
name: ${{ matrix.profile }}
path: source/_packages/${{ matrix.profile }}/
mac:
@ -167,7 +166,7 @@ jobs:
apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }}
- uses: actions/upload-artifact@v3
with:
name: ${{ matrix.profile }}-${{ matrix.otp }}
name: ${{ matrix.profile }}
path: _packages/${{ matrix.profile }}/
linux:
@ -182,7 +181,7 @@ jobs:
profile:
- ${{ needs.prepare.outputs.BUILD_PROFILE }}
otp:
- 24.3.4.2-1 # we test with OTP 23, but only build package on OTP 24 versions
- 24.3.4.2-1
elixir:
- 1.13.4
# used to split elixir packages into a separate job, since the
@ -200,51 +199,31 @@ jobs:
os:
- ubuntu20.04
- ubuntu18.04
- ubuntu16.04
- debian11
- debian10
- debian9
- el8
- el7
- raspbian10
build_machine:
- aws-arm64
- ubuntu-20.04
exclude:
- arch: arm64
build_machine: ubuntu-20.04
- arch: amd64
build_machine: aws-arm64
- os: raspbian9
arch: amd64
- os: raspbian10
arch: amd64
- os: raspbian10 # we only have arm32 image
arch: arm64
- os: raspbian9
profile: emqx
- os: raspbian10
profile: emqx
- os: raspbian9
profile: emqx-enterprise
- os: raspbian10
profile: emqx-enterprise
include:
- profile: emqx
otp: 24.3.4.2-1
elixir: 1.13.4
build_elixir: with_elixir
arch: amd64
os: ubuntu20.04
- arch: arm64
build_machine: ubuntu-20.04
- profile: emqx
otp: 24.3.4.2-1
elixir: 1.13.4
build_elixir: with_elixir
arch: amd64
os: el8
build_machine: ubuntu-20.04
- arch: amd64
build_machine: aws-arm64
# elixir: only for opensource edition and only on ubuntu20.04 and el8 on amd64
- build_elixir: with_elixir
profile: emqx-enterprise
- build_elixir: with_elixir
arch: arm64
- build_elixir: with_elixir
os: ubuntu18.04
- build_elixir: with_elixir
os: debian10
- build_elixir: with_elixir
os: debian11
- build_elixir: with_elixir
os: el7
defaults:
run:
shell: bash
@ -293,7 +272,7 @@ jobs:
done
- uses: actions/upload-artifact@v3
with:
name: ${{ matrix.profile }}-${{ matrix.otp }}
name: ${{ matrix.profile }}
path: source/_packages/${{ matrix.profile }}/
publish_artifacts:
@ -305,15 +284,10 @@ jobs:
matrix:
profile:
- ${{ needs.prepare.outputs.BUILD_PROFILE }}
otp:
- 24.3.4.2-1
include:
- profile: emqx
otp: windows # otp version on windows is rather fixed
steps:
- uses: actions/download-artifact@v3
with:
name: ${{ matrix.profile }}-${{ matrix.otp }}
name: ${{ matrix.profile }}
path: packages/${{ matrix.profile }}
- name: install dos2unix
run: sudo apt-get update && sudo apt install -y dos2unix

View File

@ -12,8 +12,12 @@ on:
jobs:
elixir_release_build:
runs-on: ubuntu-latest
strategy:
matrix:
profile:
- emqx
- emqx-enterprise
container: ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04
steps:
- name: Checkout
uses: actions/checkout@v3
@ -23,15 +27,15 @@ jobs:
run: |
git config --global --add safe.directory "$GITHUB_WORKSPACE"
- name: elixir release
run: make emqx-elixir
run: make ${{ matrix.profile }}-elixir
- name: start release
run: |
cd _build/emqx/rel/emqx
cd _build/${{ matrix.profile }}/rel/emqx
bin/emqx start
- name: check if started
run: |
sleep 10
nc -zv localhost 1883
cd _build/emqx/rel/emqx
cd _build/${{ matrix.profile }}/rel/emqx
bin/emqx ping
bin/emqx ctl status

View File

@ -15,41 +15,74 @@ on:
jobs:
prepare:
runs-on: ubuntu-20.04
runs-on: aws-amd64
# prepare source with any OTP version, no need for a matrix
container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04"
outputs:
fast_ct_apps: ${{ steps.run_find_apps.outputs.fast_ct_apps }}
docker_ct_apps: ${{ steps.run_find_apps.outputs.docker_ct_apps }}
fast_ct_apps: ${{ steps.find_ct_apps.outputs.fast_ct_apps }}
docker_ct_apps: ${{ steps.find_ct_apps.outputs.docker_ct_apps }}
steps:
- uses: AutoModality/action-clean@v1
- uses: actions/checkout@v3
with:
path: source
fetch-depth: 0
- name: find_ct_apps
- name: Find CT Apps
working-directory: source
id: run_find_apps
id: find_ct_apps
run: |
fast_ct_apps="$(./scripts/find-apps.sh --ct fast --json)"
docker_ct_apps="$(./scripts/find-apps.sh --ct docker --json)"
echo "fast-ct-apps: $fast_ct_apps"
echo "docer-ct-apps: $docker_ct_apps"
fast_ct_apps="$(./scripts/find-apps.sh --ci fast)"
docker_ct_apps="$(./scripts/find-apps.sh --ci docker)"
echo "fast: $fast_ct_apps"
echo "docker: $docker_ct_apps"
echo "::set-output name=fast_ct_apps::$fast_ct_apps"
echo "::set-output name=docker_ct_apps::$docker_ct_apps"
- name: get_all_deps
working-directory: source
env:
PROFILE: emqx
#DIAGNOSTIC: 1
run: |
make deps-all
./rebar3 as test compile
make ensure-rebar3
# fetch all deps and compile
make emqx
make test-compile
cd ..
zip -ryq source.zip source/* source/.[^.]*
- uses: actions/upload-artifact@v3
with:
name: source
name: source-emqx
path: source.zip
prepare_ee:
runs-on: aws-amd64
# prepare source with any OTP version, no need for a matrix
container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04"
steps:
- uses: AutoModality/action-clean@v1
- uses: actions/checkout@v3
with:
path: source
- name: get_all_deps
working-directory: source
env:
PROFILE: emqx-enterprise
#DIAGNOSTIC: 1
run: |
make ensure-rebar3
# fetch all deps and compile
make emqx-enterprise
make test-compile
cd ..
zip -ryq source.zip source/* source/.[^.]*
- uses: actions/upload-artifact@v3
with:
name: source-emqx-enterprise
path: source.zip
eunit_and_proper:
needs: prepare
needs:
- prepare
- prepare_ee
runs-on: aws-amd64
strategy:
fail-fast: false
@ -66,7 +99,7 @@ jobs:
- uses: AutoModality/action-clean@v1
- uses: actions/download-artifact@v3
with:
name: source
name: source-${{ matrix.profile }}
path: .
- name: unzip source code
env:
@ -92,11 +125,13 @@ jobs:
path: source/_build/test/cover
ct_docker:
needs: prepare
needs:
- prepare
- prepare_ee
strategy:
fail-fast: false
matrix:
app_name: ${{ fromJson(needs.prepare.outputs.docker_ct_apps) }}
app: ${{ fromJson(needs.prepare.outputs.docker_ct_apps) }}
runs-on: aws-amd64
defaults:
@ -107,20 +142,24 @@ jobs:
- uses: AutoModality/action-clean@v1
- uses: actions/download-artifact@v3
with:
name: source
name: source-${{ matrix.app[1] }}
path: .
- name: unzip source code
run: unzip -q source.zip
- name: docker compose up
- name: run tests
working-directory: source
env:
MONGO_TAG: 5
MYSQL_TAG: 8
PGSQL_TAG: 13
REDIS_TAG: 6
INFLUXDB_TAG: 2.5.0
WHICH_APP: ${{ matrix.app[0] }}
PROFILE: ${{ matrix.app[1] }}
run: |
echo $PROFILE
rm _build/default/lib/rocksdb/_build/cmake/CMakeCache.txt
./scripts/ct/run.sh --app ${{ matrix.app_name }}
./scripts/ct/run.sh --app $WHICH_APP
- uses: actions/upload-artifact@v3
with:
name: coverdata
@ -128,19 +167,17 @@ jobs:
- uses: actions/upload-artifact@v3
if: failure()
with:
name: logs-${{ matrix.profile }}
name: logs-${{ matrix.app[0] }}-${{ matrix.app[1] }}
path: source/_build/test/logs
ct:
needs: prepare
needs:
- prepare
- prepare_ee
strategy:
fail-fast: false
matrix:
app_name: ${{ fromJson(needs.prepare.outputs.fast_ct_apps) }}
profile:
- emqx
- emqx-enterprise
app: ${{ fromJson(needs.prepare.outputs.fast_ct_apps) }}
runs-on: aws-amd64
container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04"
defaults:
@ -151,37 +188,19 @@ jobs:
- uses: AutoModality/action-clean@v1
- uses: actions/download-artifact@v3
with:
name: source
name: source-${{ matrix.app[1] }}
path: .
- name: unzip source code
run: unzip -q source.zip
# produces <app-name>.coverdata
# produces $PROFILE-<app-name>.coverdata
- name: run common test
working-directory: source
env:
PROFILE: ${{ matrix.profile }}
WHICH_APP: ${{ matrix.app_name }}
WHICH_APP: ${{ matrix.app[0] }}
PROFILE: ${{ matrix.app[1] }}
run: |
if [ "$PROFILE" = 'emqx-enterprise' ]; then
COMPILE_FLAGS="$(grep -R "EMQX_RELEASE_EDITION" "$WHICH_APP" | wc -l || true)"
if [ "$COMPILE_FLAGS" -gt 0 ]; then
# need to clean first because the default profile was
make clean
make "${WHICH_APP}-ct"
else
echo "skip_common_test_run_for_app ${WHICH_APP}-ct"
fi
else
case "$WHICH_APP" in
lib-ee/*)
echo "skip_opensource_edition_test_for_lib-ee"
;;
*)
make "${WHICH_APP}-ct"
;;
esac
fi
make "${WHICH_APP}-ct"
- uses: actions/upload-artifact@v3
with:
name: coverdata
@ -190,7 +209,7 @@ jobs:
- uses: actions/upload-artifact@v3
if: failure()
with:
name: logs-${{ matrix.profile }}
name: logs-${{ matrix.app[0] }}-${{ matrix.app[1] }}
path: source/_build/test/logs
make_cover:
@ -204,7 +223,7 @@ jobs:
- uses: AutoModality/action-clean@v1
- uses: actions/download-artifact@v3
with:
name: source
name: source-emqx-enterprise
path: .
- name: unzip source code
run: unzip -q source.zip
@ -217,12 +236,15 @@ jobs:
- name: make cover
working-directory: source
env:
PROFILE: emqx-enterprise
run: make cover
- name: send to coveralls
working-directory: source
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PROFILE: emqx-enterprise
run: make coveralls
- name: get coveralls logs
@ -242,17 +264,3 @@ jobs:
curl -v -k https://coveralls.io/webhook \
--header "Content-Type: application/json" \
--data "{\"repo_name\":\"$GITHUB_REPOSITORY\",\"repo_token\":\"$GITHUB_TOKEN\",\"payload\":{\"build_num\":$GITHUB_RUN_ID,\"status\":\"done\"}}" || true
allgood_functional_tests:
runs-on: ubuntu-20.04
needs:
- eunit_and_proper
- ct_docker
- ct
steps:
- name: Check if all functional tests succeeded
uses: re-actors/alls-green@release/v1
with:
#allowed-failures:
#allowed-skips:
jobs: ${{ toJSON(needs) }}

View File

@ -6,8 +6,8 @@ export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-d
export EMQX_DEFAULT_RUNNER = debian:11-slim
export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh)
export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh)
export EMQX_DASHBOARD_VERSION ?= v1.1.2
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.0
export EMQX_DASHBOARD_VERSION ?= v1.1.3-sync-code
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.1-beta.5
export EMQX_REL_FORM ?= tgz
export QUICER_DOWNLOAD_FROM_RELEASE = 1
ifeq ($(OS),Windows_NT)
@ -61,15 +61,19 @@ mix-deps-get: $(ELIXIR_COMMON_DEPS)
@mix deps.get
.PHONY: eunit
eunit: $(REBAR) conf-segs
eunit: $(REBAR) merge-config
@ENABLE_COVER_COMPILE=1 $(REBAR) eunit -v -c --cover_export_name $(PROFILE)-eunit
.PHONY: proper
proper: $(REBAR)
@ENABLE_COVER_COMPILE=1 $(REBAR) proper -d test/props -c
.PHONY: test-compile
test-compile: $(REBAR) merge-config
$(REBAR) as test compile
.PHONY: ct
ct: $(REBAR) conf-segs
ct: $(REBAR) merge-config
@ENABLE_COVER_COMPILE=1 $(REBAR) ct --name $(CT_NODE_NAME) -c -v --cover_export_name $(PROFILE)-ct
.PHONY: static_checks
@ -97,7 +101,11 @@ $(foreach app,$(APPS),$(eval $(call gen-app-prop-target,$(app))))
.PHONY: ct-suite
ct-suite: $(REBAR)
ifneq ($(TESTCASE),)
ifneq ($(GROUP),)
$(REBAR) ct -v --readable=$(CT_READABLE) --name $(CT_NODE_NAME) --suite $(SUITE) --case $(TESTCASE) --group $(GROUP)
else
$(REBAR) ct -v --readable=$(CT_READABLE) --name $(CT_NODE_NAME) --suite $(SUITE) --case $(TESTCASE)
endif
else ifneq ($(GROUP),)
$(REBAR) ct -v --readable=$(CT_READABLE) --name $(CT_NODE_NAME) --suite $(SUITE) --group $(GROUP)
else
@ -114,8 +122,6 @@ coveralls: $(REBAR)
COMMON_DEPS := $(REBAR)
ELIXIR_COMMON_DEPS := ensure-hex ensure-mix-rebar3 ensure-mix-rebar
.PHONY: $(REL_PROFILES)
$(REL_PROFILES:%=%): $(COMMON_DEPS)
@$(BUILD) $(@) rel
@ -218,19 +224,19 @@ ALL_DOCKERS = $(REL_PROFILES) $(REL_PROFILES:%=%-elixir)
$(foreach zt,$(ALL_DOCKERS),$(eval $(call gen-docker-target,$(zt))))
.PHONY:
conf-segs:
merge-config:
@$(SCRIPTS)/merge-config.escript
@$(SCRIPTS)/merge-i18n.escript
## elixir target is to create release packages using Elixir's Mix
.PHONY: $(REL_PROFILES:%=%-elixir) $(PKG_PROFILES:%=%-elixir)
$(REL_PROFILES:%=%-elixir) $(PKG_PROFILES:%=%-elixir): $(COMMON_DEPS) $(ELIXIR_COMMON_DEPS) mix-deps-get
$(REL_PROFILES:%=%-elixir) $(PKG_PROFILES:%=%-elixir): $(COMMON_DEPS)
@env IS_ELIXIR=yes $(BUILD) $(subst -elixir,,$(@)) elixir
.PHONY: $(REL_PROFILES:%=%-elixir-pkg)
define gen-elixir-pkg-target
# the Elixir places the tar in a different path than Rebar3
$1-elixir-pkg: $(COMMON_DEPS) $(ELIXIR_COMMON_DEPS) mix-deps-get
$1-elixir-pkg: $(COMMON_DEPS)
@env TAR_PKG_DIR=_build/$1-pkg \
IS_ELIXIR=yes \
$(BUILD) $1-pkg pkg
@ -239,7 +245,7 @@ $(foreach pt,$(REL_PROFILES),$(eval $(call gen-elixir-pkg-target,$(pt))))
.PHONY: $(REL_PROFILES:%=%-elixir-tgz)
define gen-elixir-tgz-target
$1-elixir-tgz: $(COMMON_DEPS) $(ELIXIR_COMMON_DEPS) mix-deps-get
$1-elixir-tgz: $(COMMON_DEPS)
@env IS_ELIXIR=yes $(BUILD) $1 tgz
endef
ALL_ELIXIR_TGZS = $(REL_PROFILES)

View File

@ -35,7 +35,7 @@
-define(EMQX_RELEASE_CE, "5.0.11").
%% Enterprise edition
-define(EMQX_RELEASE_EE, "5.0.0-alpha.1").
-define(EMQX_RELEASE_EE, "5.0.0-beta.5").
%% the HTTP API version
-define(EMQX_API_VERSION, "5.0").

View File

@ -22,14 +22,14 @@
%% This rebar.config is necessary because the app may be used as a
%% `git_subdir` dependency in other projects.
{deps, [
{lc, {git, "https://github.com/emqx/lc.git", {tag, "0.3.1"}}},
{lc, {git, "https://github.com/emqx/lc.git", {tag, "0.3.2"}}},
{gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}},
{jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}},
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}},
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.4"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.6"}}},
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.30.0"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.31.2"}}},
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},
{snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.0"}}}
@ -43,7 +43,7 @@
{meck, "0.9.2"},
{proper, "1.4.0"},
{bbmustache, "1.10.0"},
{emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.6.0"}}}
{emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.7.0-rc.1"}}}
]},
{extra_src_dirs, [{"test", [recursive]}]}
]}

View File

@ -133,7 +133,7 @@ deep_merge(BaseMap, NewMap) ->
),
maps:merge(MergedBase, maps:with(NewKeys, NewMap)).
-spec deep_convert(map(), convert_fun(), Args :: list()) -> map().
-spec deep_convert(any(), convert_fun(), Args :: list()) -> any().
deep_convert(Map, ConvFun, Args) when is_map(Map) ->
maps:fold(
fun(K, V, Acc) ->

View File

@ -173,7 +173,7 @@ get_metrics(Name, Id) ->
inc(Name, Id, Metric) ->
inc(Name, Id, Metric, 1).
-spec inc(handler_name(), metric_id(), atom(), pos_integer()) -> ok.
-spec inc(handler_name(), metric_id(), atom(), integer()) -> ok.
inc(Name, Id, Metric, Val) ->
counters:add(get_ref(Name, Id), idx_metric(Name, Id, Metric), Val).

View File

@ -18,6 +18,7 @@
-export([
edition/0,
edition_longstr/0,
description/0,
version/0
]).
@ -44,8 +45,12 @@ description() ->
-spec edition() -> ce | ee.
-ifdef(EMQX_RELEASE_EDITION).
edition() -> ?EMQX_RELEASE_EDITION.
edition_longstr() -> <<"Enterprise">>.
-else.
edition() -> ce.
edition_longstr() -> <<"Opensource">>.
-endif.
%% @doc Return the release version.

View File

@ -1908,6 +1908,7 @@ common_ssl_opts_schema(Defaults) ->
sensitive => true,
required => false,
example => <<"">>,
format => <<"password">>,
desc => ?DESC(common_ssl_opts_schema_password)
}
)},

View File

@ -16,7 +16,6 @@
-module(emqx_common_test_helpers).
-define(THIS_APP, ?MODULE).
-include_lib("common_test/include/ct.hrl").
-type special_config_handler() :: fun().
@ -28,13 +27,14 @@
boot_modules/1,
start_apps/1,
start_apps/2,
start_app/4,
stop_apps/1,
reload/2,
app_path/2,
proj_root/0,
deps_path/2,
flush/0,
flush/1
flush/1,
render_and_load_app_config/1
]).
-export([
@ -64,6 +64,15 @@
stop_slave/1
]).
-export([clear_screen/0]).
-export([with_mock/4]).
%% Toxiproxy API
-export([
with_failure/5,
reset_proxy/2
]).
-define(CERTS_PATH(CertName), filename:join(["etc", "certs", CertName])).
-define(MQTT_SSL_TWOWAY, [
@ -155,13 +164,13 @@ start_apps(Apps) ->
start_apps(Apps, fun(_) -> ok end).
-spec start_apps(Apps :: apps(), Handler :: special_config_handler()) -> ok.
start_apps(Apps, Handler) when is_function(Handler) ->
start_apps(Apps, SpecAppConfig) when is_function(SpecAppConfig) ->
%% Load all application code to beam vm first
%% Because, minirest, ekka etc.. application will scan these modules
lists:foreach(fun load/1, [emqx | Apps]),
ok = start_ekka(),
ok = emqx_ratelimiter_SUITE:load_conf(),
lists:foreach(fun(App) -> start_app(App, Handler) end, [emqx | Apps]).
lists:foreach(fun(App) -> start_app(App, SpecAppConfig) end, [emqx | Apps]).
load(App) ->
case application:load(App) of
@ -170,13 +179,36 @@ load(App) ->
{error, Reason} -> error({failed_to_load_app, App, Reason})
end.
start_app(App, Handler) ->
start_app(
App,
app_schema(App),
app_path(App, filename:join(["etc", app_conf_file(App)])),
Handler
).
render_and_load_app_config(App) ->
load(App),
Schema = app_schema(App),
Conf = app_path(App, filename:join(["etc", app_conf_file(App)])),
try
do_render_app_config(App, Schema, Conf)
catch
throw:E:St ->
%% turn throw into error
error({Conf, E, St})
end.
do_render_app_config(App, Schema, ConfigFile) ->
Vars = mustache_vars(App),
RenderedConfigFile = render_config_file(ConfigFile, Vars),
read_schema_configs(Schema, RenderedConfigFile),
force_set_config_file_paths(App, [RenderedConfigFile]),
copy_certs(App, RenderedConfigFile),
ok.
start_app(App, SpecAppConfig) ->
render_and_load_app_config(App),
SpecAppConfig(App),
case application:ensure_all_started(App) of
{ok, _} ->
ok = ensure_dashboard_listeners_started(App),
ok;
{error, Reason} ->
error({failed_to_start_app, App, Reason})
end.
app_conf_file(emqx_conf) -> "emqx.conf.all";
app_conf_file(App) -> atom_to_list(App) ++ ".conf".
@ -198,21 +230,6 @@ mustache_vars(App) ->
{platform_log_dir, app_path(App, "log")}
].
start_app(App, Schema, ConfigFile, SpecAppConfig) ->
Vars = mustache_vars(App),
RenderedConfigFile = render_config_file(ConfigFile, Vars),
read_schema_configs(Schema, RenderedConfigFile),
force_set_config_file_paths(App, [RenderedConfigFile]),
copy_certs(App, RenderedConfigFile),
SpecAppConfig(App),
case application:ensure_all_started(App) of
{ok, _} ->
ok = ensure_dashboard_listeners_started(App),
ok;
{error, Reason} ->
error({failed_to_start_app, App, Reason})
end.
render_config_file(ConfigFile, Vars0) ->
Temp =
case file:read_file(ConfigFile) of
@ -245,47 +262,21 @@ stop_apps(Apps) ->
[application:stop(App) || App <- Apps ++ [emqx, ekka, mria, mnesia]],
ok.
proj_root() ->
filename:join(
lists:takewhile(
fun(X) -> iolist_to_binary(X) =/= <<"_build">> end,
filename:split(app_path(emqx, "."))
)
).
%% backward compatible
deps_path(App, RelativePath) -> app_path(App, RelativePath).
app_path(App, RelativePath) ->
ok = ensure_app_loaded(App),
Lib = code:lib_dir(App),
safe_relative_path(filename:join([Lib, RelativePath])).
assert_app_loaded(App) ->
case code:lib_dir(App) of
{error, bad_name} -> error({not_loaded, ?THIS_APP});
_ -> ok
end.
ensure_app_loaded(?THIS_APP) ->
ok = assert_app_loaded(?THIS_APP);
ensure_app_loaded(App) ->
case code:lib_dir(App) of
{error, bad_name} ->
ok = assert_app_loaded(?THIS_APP),
Dir0 = code:lib_dir(?THIS_APP),
LibRoot = upper_level(Dir0),
Dir = filename:join([LibRoot, atom_to_list(App), "ebin"]),
case code:add_pathz(Dir) of
true -> ok;
{error, bad_directory} -> error({bad_directory, Dir})
end,
case application:load(App) of
ok -> ok;
{error, Reason} -> error({failed_to_load, App, Reason})
end,
ok = assert_app_loaded(App);
_ ->
ok
end.
upper_level(Dir) ->
Split = filename:split(Dir),
UpperReverse = tl(lists:reverse(Split)),
filename:join(lists:reverse(UpperReverse)).
safe_relative_path(Path) ->
case filename:split(Path) of
["/" | T] ->
@ -793,3 +784,139 @@ expand_node_specs(Specs, CommonOpts) ->
end,
Specs
).
%% is useful when iterating on the tests in a loop, to get rid of all
%% the garbaged printed before the test itself beings.
clear_screen() ->
io:format(standard_io, "\033[H\033[2J", []),
io:format(standard_error, "\033[H\033[2J", []),
io:format(standard_io, "\033[H\033[3J", []),
io:format(standard_error, "\033[H\033[3J", []),
ok.
with_mock(Mod, FnName, MockedFn, Fun) ->
ok = meck:new(Mod, [non_strict, no_link, no_history, passthrough]),
ok = meck:expect(Mod, FnName, MockedFn),
try
Fun()
after
ok = meck:unload(Mod)
end.
%%-------------------------------------------------------------------------------
%% Toxiproxy utils
%%-------------------------------------------------------------------------------
reset_proxy(ProxyHost, ProxyPort) ->
Url = "http://" ++ ProxyHost ++ ":" ++ integer_to_list(ProxyPort) ++ "/reset",
Body = <<>>,
{ok, {{_, 204, _}, _, _}} = httpc:request(
post,
{Url, [], "application/json", Body},
[],
[{body_format, binary}]
).
with_failure(FailureType, Name, ProxyHost, ProxyPort, Fun) ->
enable_failure(FailureType, Name, ProxyHost, ProxyPort),
try
Fun()
after
heal_failure(FailureType, Name, ProxyHost, ProxyPort)
end.
enable_failure(FailureType, Name, ProxyHost, ProxyPort) ->
case FailureType of
down -> switch_proxy(off, Name, ProxyHost, ProxyPort);
timeout -> timeout_proxy(on, Name, ProxyHost, ProxyPort);
latency_up -> latency_up_proxy(on, Name, ProxyHost, ProxyPort)
end.
heal_failure(FailureType, Name, ProxyHost, ProxyPort) ->
case FailureType of
down -> switch_proxy(on, Name, ProxyHost, ProxyPort);
timeout -> timeout_proxy(off, Name, ProxyHost, ProxyPort);
latency_up -> latency_up_proxy(off, Name, ProxyHost, ProxyPort)
end.
switch_proxy(Switch, Name, ProxyHost, ProxyPort) ->
Url = "http://" ++ ProxyHost ++ ":" ++ integer_to_list(ProxyPort) ++ "/proxies/" ++ Name,
Body =
case Switch of
off -> #{<<"enabled">> => false};
on -> #{<<"enabled">> => true}
end,
BodyBin = emqx_json:encode(Body),
{ok, {{_, 200, _}, _, _}} = httpc:request(
post,
{Url, [], "application/json", BodyBin},
[],
[{body_format, binary}]
).
timeout_proxy(on, Name, ProxyHost, ProxyPort) ->
Url =
"http://" ++ ProxyHost ++ ":" ++ integer_to_list(ProxyPort) ++ "/proxies/" ++ Name ++
"/toxics",
NameBin = list_to_binary(Name),
Body = #{
<<"name">> => <<NameBin/binary, "_timeout">>,
<<"type">> => <<"timeout">>,
<<"stream">> => <<"upstream">>,
<<"toxicity">> => 1.0,
<<"attributes">> => #{<<"timeout">> => 0}
},
BodyBin = emqx_json:encode(Body),
{ok, {{_, 200, _}, _, _}} = httpc:request(
post,
{Url, [], "application/json", BodyBin},
[],
[{body_format, binary}]
);
timeout_proxy(off, Name, ProxyHost, ProxyPort) ->
ToxicName = Name ++ "_timeout",
Url =
"http://" ++ ProxyHost ++ ":" ++ integer_to_list(ProxyPort) ++ "/proxies/" ++ Name ++
"/toxics/" ++ ToxicName,
Body = <<>>,
{ok, {{_, 204, _}, _, _}} = httpc:request(
delete,
{Url, [], "application/json", Body},
[],
[{body_format, binary}]
).
latency_up_proxy(on, Name, ProxyHost, ProxyPort) ->
Url =
"http://" ++ ProxyHost ++ ":" ++ integer_to_list(ProxyPort) ++ "/proxies/" ++ Name ++
"/toxics",
NameBin = list_to_binary(Name),
Body = #{
<<"name">> => <<NameBin/binary, "_latency_up">>,
<<"type">> => <<"latency">>,
<<"stream">> => <<"upstream">>,
<<"toxicity">> => 1.0,
<<"attributes">> => #{
<<"latency">> => 20_000,
<<"jitter">> => 3_000
}
},
BodyBin = emqx_json:encode(Body),
{ok, {{_, 200, _}, _, _}} = httpc:request(
post,
{Url, [], "application/json", BodyBin},
[],
[{body_format, binary}]
);
latency_up_proxy(off, Name, ProxyHost, ProxyPort) ->
ToxicName = Name ++ "_latency_up",
Url =
"http://" ++ ProxyHost ++ ":" ++ integer_to_list(ProxyPort) ++ "/proxies/" ++ Name ++
"/toxics/" ++ ToxicName,
Body = <<>>,
{ok, {{_, 204, _}, _, _}} = httpc:request(
delete,
{Url, [], "application/json", Body},
[],
[{body_format, binary}]
).

View File

@ -115,7 +115,7 @@ message_expiry_interval_init() ->
message_expiry_interval_exipred(CPublish, CControl, QoS) ->
ct:pal("~p ~p", [?FUNCTION_NAME, QoS]),
%% publish to t/a and waiting for the message expired
emqtt:publish(
_ = emqtt:publish(
CPublish,
<<"t/a">>,
#{'Message-Expiry-Interval' => 1},
@ -152,7 +152,7 @@ message_expiry_interval_exipred(CPublish, CControl, QoS) ->
message_expiry_interval_not_exipred(CPublish, CControl, QoS) ->
ct:pal("~p ~p", [?FUNCTION_NAME, QoS]),
%% publish to t/a
emqtt:publish(
_ = emqtt:publish(
CPublish,
<<"t/a">>,
#{'Message-Expiry-Interval' => 20},

View File

@ -529,8 +529,11 @@ t_connack_max_qos_allowed(Config) ->
%% [MQTT-3.2.2-10]
{ok, _, [2]} = emqtt:subscribe(Client1, Topic, 2),
{ok, _} = emqtt:publish(Client1, Topic, <<"Unsupported Qos 1">>, qos1),
%% [MQTT-3.2.2-11]
?assertMatch(
{error, {disconnected, 155, _}},
emqtt:publish(Client1, Topic, <<"Unsupported Qos 1">>, qos1)
),
?assertEqual(155, receive_disconnect_reasoncode()),
waiting_client_process_exit(Client1),
@ -563,8 +566,11 @@ t_connack_max_qos_allowed(Config) ->
%% [MQTT-3.2.2-10]
{ok, _, [2]} = emqtt:subscribe(Client3, Topic, 2),
{ok, _} = emqtt:publish(Client3, Topic, <<"Unsupported Qos 2">>, qos2),
%% [MQTT-3.2.2-11]
?assertMatch(
{error, {disconnected, 155, _}},
emqtt:publish(Client3, Topic, <<"Unsupported Qos 2">>, qos2)
),
?assertEqual(155, receive_disconnect_reasoncode()),
waiting_client_process_exit(Client3),

View File

@ -4,7 +4,7 @@
{vsn, "0.1.10"},
{modules, []},
{registered, [emqx_authn_sup, emqx_authn_registry]},
{applications, [kernel, stdlib, emqx_resource, ehttpc, epgsql, mysql, jose]},
{applications, [kernel, stdlib, emqx_resource, emqx_connector, ehttpc, epgsql, mysql, jose]},
{mod, {emqx_authn_app, []}},
{env, []},
{licenses, ["Apache-2.0"]},

View File

@ -47,7 +47,6 @@
]).
-define(DEFAULT_RESOURCE_OPTS, #{
auto_retry_interval => 6000,
start_after_created => false
}).

View File

@ -22,15 +22,18 @@
%% callbacks of behaviour emqx_resource
-export([
callback_mode/0,
on_start/2,
on_stop/2,
on_query/4,
on_query/3,
on_get_status/2,
connect/1
]).
-define(DEFAULT_POOL_SIZE, 8).
callback_mode() -> always_sync.
on_start(InstId, Opts) ->
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
PoolOpts = [
@ -45,7 +48,7 @@ on_start(InstId, Opts) ->
on_stop(_InstId, #{pool_name := PoolName}) ->
emqx_plugin_libs_pool:stop_pool(PoolName).
on_query(InstId, get_jwks, AfterQuery, #{pool_name := PoolName}) ->
on_query(InstId, get_jwks, #{pool_name := PoolName}) ->
Result = ecpool:pick_and_do(PoolName, {emqx_authn_jwks_client, get_jwks, []}, no_handover),
case Result of
{error, Reason} ->
@ -54,20 +57,18 @@ on_query(InstId, get_jwks, AfterQuery, #{pool_name := PoolName}) ->
connector => InstId,
command => get_jwks,
reason => Reason
}),
emqx_resource:query_failed(AfterQuery);
});
_ ->
emqx_resource:query_success(AfterQuery)
ok
end,
Result;
on_query(_InstId, {update, Opts}, AfterQuery, #{pool_name := PoolName}) ->
on_query(_InstId, {update, Opts}, #{pool_name := PoolName}) ->
lists:foreach(
fun({_, Worker}) ->
ok = ecpool_worker:exec(Worker, {emqx_authn_jwks_client, update, [Opts]}, infinity)
end,
ecpool:workers(PoolName)
),
emqx_resource:query_success(AfterQuery),
ok.
on_get_status(_InstId, #{pool_name := PoolName}) ->

View File

@ -164,7 +164,7 @@ authenticate(
) ->
Filter = emqx_authn_utils:render_deep(FilterTemplate, Credential),
case emqx_resource:query(ResourceId, {find_one, Collection, Filter, #{}}) of
undefined ->
{ok, undefined} ->
ignore;
{error, Reason} ->
?TRACE_AUTHN_PROVIDER(error, "mongodb_query_failed", #{
@ -174,7 +174,7 @@ authenticate(
reason => Reason
}),
ignore;
Doc ->
{ok, Doc} ->
case check_password(Password, Doc, State) of
ok ->
{ok, is_superuser(Doc, State)};

View File

@ -50,7 +50,7 @@ init_per_suite(Config) ->
case emqx_common_test_helpers:is_tcp_server_available(?MONGO_HOST, ?MONGO_DEFAULT_PORT) of
true ->
ok = emqx_common_test_helpers:start_apps([emqx_authn]),
ok = start_apps([emqx_resource, emqx_connector]),
ok = start_apps([emqx_resource]),
Config;
false ->
{skip, no_mongo}
@ -61,7 +61,7 @@ end_per_suite(_Config) ->
[authentication],
?GLOBAL
),
ok = stop_apps([emqx_resource, emqx_connector]),
ok = stop_apps([emqx_resource]),
ok = emqx_common_test_helpers:stop_apps([emqx_authn]).
%%------------------------------------------------------------------------------

View File

@ -46,7 +46,7 @@ init_per_suite(Config) ->
case emqx_common_test_helpers:is_tcp_server_available(?MONGO_HOST, ?MONGO_DEFAULT_PORT) of
true ->
ok = emqx_common_test_helpers:start_apps([emqx_authn]),
ok = start_apps([emqx_resource, emqx_connector]),
ok = start_apps([emqx_resource]),
Config;
false ->
{skip, no_mongo}
@ -57,7 +57,7 @@ end_per_suite(_Config) ->
[authentication],
?GLOBAL
),
ok = stop_apps([emqx_resource, emqx_connector]),
ok = stop_apps([emqx_resource]),
ok = emqx_common_test_helpers:stop_apps([emqx_authn]).
%%------------------------------------------------------------------------------

View File

@ -58,7 +58,7 @@ init_per_suite(Config) ->
case emqx_common_test_helpers:is_tcp_server_available(?MYSQL_HOST, ?MYSQL_DEFAULT_PORT) of
true ->
ok = emqx_common_test_helpers:start_apps([emqx_authn]),
ok = start_apps([emqx_resource, emqx_connector]),
ok = start_apps([emqx_resource]),
{ok, _} = emqx_resource:create_local(
?MYSQL_RESOURCE,
?RESOURCE_GROUP,
@ -77,7 +77,7 @@ end_per_suite(_Config) ->
?GLOBAL
),
ok = emqx_resource:remove_local(?MYSQL_RESOURCE),
ok = stop_apps([emqx_resource, emqx_connector]),
ok = stop_apps([emqx_resource]),
ok = emqx_common_test_helpers:stop_apps([emqx_authn]).
%%------------------------------------------------------------------------------

View File

@ -49,7 +49,7 @@ init_per_suite(Config) ->
case emqx_common_test_helpers:is_tcp_server_available(?MYSQL_HOST, ?MYSQL_DEFAULT_PORT) of
true ->
ok = emqx_common_test_helpers:start_apps([emqx_authn]),
ok = start_apps([emqx_resource, emqx_connector]),
ok = start_apps([emqx_resource]),
Config;
false ->
{skip, no_mysql_tls}
@ -60,7 +60,7 @@ end_per_suite(_Config) ->
[authentication],
?GLOBAL
),
ok = stop_apps([emqx_resource, emqx_connector]),
ok = stop_apps([emqx_resource]),
ok = emqx_common_test_helpers:stop_apps([emqx_authn]).
%%------------------------------------------------------------------------------

View File

@ -59,7 +59,7 @@ init_per_suite(Config) ->
case emqx_common_test_helpers:is_tcp_server_available(?PGSQL_HOST, ?PGSQL_DEFAULT_PORT) of
true ->
ok = emqx_common_test_helpers:start_apps([emqx_authn]),
ok = start_apps([emqx_resource, emqx_connector]),
ok = start_apps([emqx_resource]),
{ok, _} = emqx_resource:create_local(
?PGSQL_RESOURCE,
?RESOURCE_GROUP,
@ -78,7 +78,7 @@ end_per_suite(_Config) ->
?GLOBAL
),
ok = emqx_resource:remove_local(?PGSQL_RESOURCE),
ok = stop_apps([emqx_resource, emqx_connector]),
ok = stop_apps([emqx_resource]),
ok = emqx_common_test_helpers:stop_apps([emqx_authn]).
%%------------------------------------------------------------------------------

View File

@ -49,7 +49,7 @@ init_per_suite(Config) ->
case emqx_common_test_helpers:is_tcp_server_available(?PGSQL_HOST, ?PGSQL_DEFAULT_PORT) of
true ->
ok = emqx_common_test_helpers:start_apps([emqx_authn]),
ok = start_apps([emqx_resource, emqx_connector]),
ok = start_apps([emqx_resource]),
Config;
false ->
{skip, no_pgsql_tls}
@ -60,7 +60,7 @@ end_per_suite(_Config) ->
[authentication],
?GLOBAL
),
ok = stop_apps([emqx_resource, emqx_connector]),
ok = stop_apps([emqx_resource]),
ok = emqx_common_test_helpers:stop_apps([emqx_authn]).
%%------------------------------------------------------------------------------

View File

@ -58,7 +58,7 @@ init_per_suite(Config) ->
case emqx_common_test_helpers:is_tcp_server_available(?REDIS_HOST, ?REDIS_DEFAULT_PORT) of
true ->
ok = emqx_common_test_helpers:start_apps([emqx_authn]),
ok = start_apps([emqx_resource, emqx_connector]),
ok = start_apps([emqx_resource]),
{ok, _} = emqx_resource:create_local(
?REDIS_RESOURCE,
?RESOURCE_GROUP,
@ -77,7 +77,7 @@ end_per_suite(_Config) ->
?GLOBAL
),
ok = emqx_resource:remove_local(?REDIS_RESOURCE),
ok = stop_apps([emqx_resource, emqx_connector]),
ok = stop_apps([emqx_resource]),
ok = emqx_common_test_helpers:stop_apps([emqx_authn]).
%%------------------------------------------------------------------------------

View File

@ -49,7 +49,7 @@ init_per_suite(Config) ->
case emqx_common_test_helpers:is_tcp_server_available(?REDIS_HOST, ?REDIS_TLS_PORT) of
true ->
ok = emqx_common_test_helpers:start_apps([emqx_authn]),
ok = start_apps([emqx_resource, emqx_connector]),
ok = start_apps([emqx_resource]),
Config;
false ->
{skip, no_redis}
@ -60,7 +60,7 @@ end_per_suite(_Config) ->
[authentication],
?GLOBAL
),
ok = stop_apps([emqx_resource, emqx_connector]),
ok = stop_apps([emqx_resource]),
ok = emqx_common_test_helpers:stop_apps([emqx_authn]).
%%------------------------------------------------------------------------------

View File

@ -1,13 +1,14 @@
%% -*- mode: erlang -*-
{application, emqx_authz, [
{description, "An OTP application"},
{vsn, "0.1.8"},
{vsn, "0.1.9"},
{registered, []},
{mod, {emqx_authz_app, []}},
{applications, [
kernel,
stdlib,
crypto,
emqx_resource,
emqx_connector
]},
{env, []},

View File

@ -94,9 +94,9 @@ authorize(
resource_id => ResourceID
}),
nomatch;
[] ->
{ok, []} ->
nomatch;
Rows ->
{ok, Rows} ->
Rules = [
emqx_authz_rule:compile({Permission, all, Action, Topics})
|| #{

View File

@ -40,7 +40,6 @@
]).
-define(DEFAULT_RESOURCE_OPTS, #{
auto_retry_interval => 6000,
start_after_created => false
}).

View File

@ -45,7 +45,7 @@ init_per_suite(Config) ->
),
ok = emqx_common_test_helpers:start_apps(
[emqx_connector, emqx_conf, emqx_authz],
[emqx_conf, emqx_authz],
fun set_special_configs/1
),
Config.
@ -59,8 +59,7 @@ end_per_suite(_Config) ->
<<"sources">> => []
}
),
ok = stop_apps([emqx_resource]),
emqx_common_test_helpers:stop_apps([emqx_connector, emqx_authz, emqx_conf]),
emqx_common_test_helpers:stop_apps([emqx_authz, emqx_conf]),
meck:unload(emqx_resource),
ok.

View File

@ -23,6 +23,8 @@
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
suite() -> [{timetrap, {seconds, 60}}].
all() ->
emqx_common_test_helpers:all(?MODULE).
@ -45,7 +47,6 @@ end_per_suite(_Config) ->
<<"sources">> => []
}
),
ok = stop_apps([emqx_resource, emqx_connector]),
emqx_common_test_helpers:stop_apps([emqx_dashboard, emqx_authz, emqx_conf, emqx_management]),
ok.

View File

@ -45,7 +45,7 @@ end_per_suite(_Config) ->
<<"sources">> => []
}
),
ok = stop_apps([emqx_resource, emqx_connector]),
ok = stop_apps([emqx_resource]),
emqx_common_test_helpers:stop_apps([emqx_dashboard, emqx_authz, emqx_conf]),
ok.

View File

@ -103,7 +103,7 @@ groups() ->
[].
init_per_suite(Config) ->
ok = stop_apps([emqx_resource, emqx_connector]),
ok = stop_apps([emqx_resource]),
meck:new(emqx_resource, [non_strict, passthrough, no_history, no_link]),
meck:expect(emqx_resource, create_local, fun(_, _, _, _) -> {ok, meck_data} end),
meck:expect(emqx_resource, health_check, fun(St) -> {ok, St} end),
@ -120,7 +120,7 @@ init_per_suite(Config) ->
[emqx_conf, emqx_authz, emqx_dashboard],
fun set_special_configs/1
),
ok = start_apps([emqx_resource, emqx_connector]),
ok = start_apps([emqx_resource]),
Config.
end_per_suite(_Config) ->
@ -134,7 +134,7 @@ end_per_suite(_Config) ->
),
%% resource and connector should be stop first,
%% or authz_[mysql|pgsql|redis..]_SUITE would be failed
ok = stop_apps([emqx_resource, emqx_connector]),
ok = stop_apps([emqx_resource]),
emqx_common_test_helpers:stop_apps([emqx_dashboard, emqx_authz, emqx_conf]),
meck:unload(emqx_resource),
ok.

View File

@ -55,7 +55,6 @@ init_per_suite(Config) ->
end_per_suite(_Config) ->
ok = emqx_authz_test_lib:restore_authorizers(),
ok = stop_apps([emqx_resource, emqx_connector]),
ok = emqx_common_test_helpers:stop_apps([emqx_authz]).
init_per_testcase(_TestCase, Config) ->

View File

@ -40,17 +40,17 @@ all() ->
emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) ->
ok = stop_apps([emqx_resource, emqx_connector, cowboy]),
ok = stop_apps([emqx_resource, cowboy]),
ok = emqx_common_test_helpers:start_apps(
[emqx_conf, emqx_authz],
fun set_special_configs/1
),
ok = start_apps([emqx_resource, emqx_connector, cowboy]),
ok = start_apps([emqx_resource, cowboy]),
Config.
end_per_suite(_Config) ->
ok = emqx_authz_test_lib:restore_authorizers(),
ok = stop_apps([emqx_resource, emqx_connector, cowboy]),
ok = stop_apps([emqx_resource, cowboy]),
ok = emqx_common_test_helpers:stop_apps([emqx_authz]).
set_special_configs(emqx_authz) ->

View File

@ -34,14 +34,14 @@ groups() ->
[].
init_per_suite(Config) ->
ok = stop_apps([emqx_resource, emqx_connector]),
ok = stop_apps([emqx_resource]),
case emqx_common_test_helpers:is_tcp_server_available(?MONGO_HOST, ?MONGO_DEFAULT_PORT) of
true ->
ok = emqx_common_test_helpers:start_apps(
[emqx_conf, emqx_authz],
fun set_special_configs/1
),
ok = start_apps([emqx_resource, emqx_connector]),
ok = start_apps([emqx_resource]),
Config;
false ->
{skip, no_mongo}
@ -49,7 +49,7 @@ init_per_suite(Config) ->
end_per_suite(_Config) ->
ok = emqx_authz_test_lib:restore_authorizers(),
ok = stop_apps([emqx_resource, emqx_connector]),
ok = stop_apps([emqx_resource]),
ok = emqx_common_test_helpers:stop_apps([emqx_authz]).
set_special_configs(emqx_authz) ->

View File

@ -33,14 +33,14 @@ groups() ->
[].
init_per_suite(Config) ->
ok = stop_apps([emqx_resource, emqx_connector]),
ok = stop_apps([emqx_resource]),
case emqx_common_test_helpers:is_tcp_server_available(?MYSQL_HOST, ?MYSQL_DEFAULT_PORT) of
true ->
ok = emqx_common_test_helpers:start_apps(
[emqx_conf, emqx_authz],
fun set_special_configs/1
),
ok = start_apps([emqx_resource, emqx_connector]),
ok = start_apps([emqx_resource]),
{ok, _} = emqx_resource:create_local(
?MYSQL_RESOURCE,
?RESOURCE_GROUP,
@ -56,7 +56,7 @@ init_per_suite(Config) ->
end_per_suite(_Config) ->
ok = emqx_authz_test_lib:restore_authorizers(),
ok = emqx_resource:remove_local(?MYSQL_RESOURCE),
ok = stop_apps([emqx_resource, emqx_connector]),
ok = stop_apps([emqx_resource]),
ok = emqx_common_test_helpers:stop_apps([emqx_authz]).
init_per_testcase(_TestCase, Config) ->

View File

@ -33,14 +33,14 @@ groups() ->
[].
init_per_suite(Config) ->
ok = stop_apps([emqx_resource, emqx_connector]),
ok = stop_apps([emqx_resource]),
case emqx_common_test_helpers:is_tcp_server_available(?PGSQL_HOST, ?PGSQL_DEFAULT_PORT) of
true ->
ok = emqx_common_test_helpers:start_apps(
[emqx_conf, emqx_authz],
fun set_special_configs/1
),
ok = start_apps([emqx_resource, emqx_connector]),
ok = start_apps([emqx_resource]),
{ok, _} = emqx_resource:create_local(
?PGSQL_RESOURCE,
?RESOURCE_GROUP,
@ -56,7 +56,7 @@ init_per_suite(Config) ->
end_per_suite(_Config) ->
ok = emqx_authz_test_lib:restore_authorizers(),
ok = emqx_resource:remove_local(?PGSQL_RESOURCE),
ok = stop_apps([emqx_resource, emqx_connector]),
ok = stop_apps([emqx_resource]),
ok = emqx_common_test_helpers:stop_apps([emqx_authz]).
init_per_testcase(_TestCase, Config) ->

View File

@ -34,14 +34,14 @@ groups() ->
[].
init_per_suite(Config) ->
ok = stop_apps([emqx_resource, emqx_connector]),
ok = stop_apps([emqx_resource]),
case emqx_common_test_helpers:is_tcp_server_available(?REDIS_HOST, ?REDIS_DEFAULT_PORT) of
true ->
ok = emqx_common_test_helpers:start_apps(
[emqx_conf, emqx_authz],
fun set_special_configs/1
),
ok = start_apps([emqx_resource, emqx_connector]),
ok = start_apps([emqx_resource]),
{ok, _} = emqx_resource:create_local(
?REDIS_RESOURCE,
?RESOURCE_GROUP,
@ -57,7 +57,7 @@ init_per_suite(Config) ->
end_per_suite(_Config) ->
ok = emqx_authz_test_lib:restore_authorizers(),
ok = emqx_resource:remove_local(?REDIS_RESOURCE),
ok = stop_apps([emqx_resource, emqx_connector]),
ok = stop_apps([emqx_resource]),
ok = emqx_common_test_helpers:stop_apps([emqx_authz]).
init_per_testcase(_TestCase, Config) ->

View File

@ -1,16 +1,14 @@
emqx_bridge_mqtt_schema {
desc_rec {
desc {
en: """Configuration for MQTT bridge."""
zh: """MQTT Bridge 配置"""
}
label: {
en: "MQTT Bridge Configuration"
zh: "MQTT Bridge 配置"
}
}
config {
desc {
en: """The config for MQTT Bridges."""
zh: """MQTT Bridge 的配置。"""
}
label: {
en: "Config"
zh: "配置"
}
}
desc_type {
desc {
en: """The bridge type."""

View File

@ -11,24 +11,6 @@ emqx_bridge_schema {
}
}
desc_connector {
desc {
en: """
The ID or the configs of the connector to be used for this bridge. Connector IDs must be of format:
<code>{type}:{name}</code>.<br/>
In config files, you can find the corresponding config entry for a connector by such path:
'connectors.{type}.{name}'.<br/>
"""
zh: """
Bridge 使用的 Connector 的 ID 或者配置。Connector ID 的格式必须为:<code>{type}:{name}</code>。<br/>
在配置文件中,您可以通过以下路径找到 Connector 的相应配置条目:'connector.{type}.{name}'。<br/>"""
}
label: {
en: "Connector ID"
zh: "Connector ID"
}
}
desc_metrics {
desc {
en: """The metrics of the bridge"""
@ -85,7 +67,7 @@ Bridge 使用的 Connector 的 ID 或者配置。Connector ID 的格式必须为
}
bridges_name {
bridges_mqtt {
desc {
en: """MQTT bridges to/from another MQTT broker"""
zh: """桥接到另一个 MQTT Broker 的 MQTT Bridge"""
@ -96,36 +78,139 @@ Bridge 使用的 Connector 的 ID 或者配置。Connector ID 的格式必须为
}
}
metric_batching {
desc {
en: """Count of messages that are currently accumulated in memory waiting for sending in one batch."""
zh: """当前积压在内存里,等待批量发送的消息个数"""
}
label: {
en: "Batched"
zh: "等待批量发送"
}
}
metric_dropped {
desc {
en: """Count of messages dropped."""
zh: """被丢弃的消息个数。"""
}
label: {
en: "Dropped"
zh: "丢弃"
}
}
metric_dropped_other {
desc {
en: """Count of messages dropped due to other reasons."""
zh: """因为其他原因被丢弃的消息个数。"""
}
label: {
en: "Dropped Other"
zh: "其他丢弃"
}
}
metric_dropped_queue_full {
desc {
en: """Count of messages dropped due to the queue is full."""
zh: """因为队列已满被丢弃的消息个数。"""
}
label: {
en: "Dropped Queue Full"
zh: "队列已满被丢弃"
}
}
metric_dropped_queue_not_enabled {
desc {
en: """Count of messages dropped due to the queue is not enabled."""
zh: """因为队列未启用被丢弃的消息个数。"""
}
label: {
en: "Dropped Queue Disabled"
zh: "队列未启用被丢弃"
}
}
metric_dropped_resource_not_found {
desc {
en: """Count of messages dropped due to the resource is not found."""
zh: """因为资源不存在被丢弃的消息个数。"""
}
label: {
en: "Dropped Resource NotFound"
zh: "资源不存在被丢弃"
}
}
metric_dropped_resource_stopped {
desc {
en: """Count of messages dropped due to the resource is stopped."""
zh: """因为资源已停用被丢弃的消息个数。"""
}
label: {
en: "Dropped Resource Stopped"
zh: "资源停用被丢弃"
}
}
metric_matched {
desc {
en: """Count of this bridge is queried"""
zh: """Bridge 执行操作的次数"""
en: """Count of this bridge is matched and queried."""
zh: """Bridge 被匹配到(被请求)的次数。"""
}
label: {
en: "Bridge Matched"
zh: "Bridge 执行操作的次数"
en: "Matched"
zh: "匹配次数"
}
}
metric_success {
metric_queuing {
desc {
en: """Count of query success"""
zh: """Bridge 执行操作成功的次数"""
en: """Count of messages that are currently queuing."""
zh: """当前被缓存到磁盘队列的消息个数。"""
}
label: {
en: "Bridge Success"
zh: "Bridge 执行操作成功的次数"
en: "Queued"
zh: "被缓存"
}
}
metric_retried {
desc {
en: """Times of retried."""
zh: """重试的次数。"""
}
label: {
en: "Retried"
zh: "已重试"
}
}
metric_failed {
metric_sent_failed {
desc {
en: """Count of query failed"""
zh: """Bridge 执行操作失败的次数"""
en: """Count of messages that sent failed."""
zh: """发送失败的消息个数。"""
}
label: {
en: "Bridge Failed"
zh: "Bridge 执行操作失败的次数"
en: "Sent Failed"
zh: "发送失败"
}
}
metric_sent_inflight {
desc {
en: """Count of messages that were sent asynchronously but ACKs are not received."""
zh: """已异步地发送但没有收到 ACK 的消息个数。"""
}
label: {
en: "Sent Inflight"
zh: "已发送未确认"
}
}
metric_sent_success {
desc {
en: """Count of messages that sent successfully."""
zh: """已经发送成功的消息个数。"""
}
label: {
en: "Sent Success"
zh: "发送成功"
}
}
@ -162,6 +247,17 @@ Bridge 使用的 Connector 的 ID 或者配置。Connector ID 的格式必须为
}
}
metric_received {
desc {
en: """Count of messages that is received from the remote system."""
zh: """从远程系统收到的消息个数。"""
}
label: {
en: "Received"
zh: "已接收"
}
}
desc_bridges {
desc {
en: """Configuration for MQTT bridges."""

View File

@ -11,17 +11,6 @@ emqx_bridge_webhook_schema {
}
}
config_direction {
desc {
en: """The direction of this bridge, MUST be 'egress'"""
zh: """Bridge 的方向, 必须是 egress"""
}
label: {
en: "Bridge Direction"
zh: "Bridge 方向"
}
}
config_url {
desc {
en: """

View File

@ -0,0 +1,95 @@
-define(EMPTY_METRICS,
?METRICS(
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
)
).
-define(METRICS(
Batched,
Dropped,
DroppedOther,
DroppedQueueFull,
DroppedQueueNotEnabled,
DroppedResourceNotFound,
DroppedResourceStopped,
Matched,
Queued,
Retried,
SentFailed,
SentInflight,
SentSucc,
RATE,
RATE_5,
RATE_MAX,
Rcvd
),
#{
'batching' => Batched,
'dropped' => Dropped,
'dropped.other' => DroppedOther,
'dropped.queue_full' => DroppedQueueFull,
'dropped.queue_not_enabled' => DroppedQueueNotEnabled,
'dropped.resource_not_found' => DroppedResourceNotFound,
'dropped.resource_stopped' => DroppedResourceStopped,
'matched' => Matched,
'queuing' => Queued,
'retried' => Retried,
'failed' => SentFailed,
'inflight' => SentInflight,
'success' => SentSucc,
rate => RATE,
rate_last5m => RATE_5,
rate_max => RATE_MAX,
received => Rcvd
}
).
-define(metrics(
Batched,
Dropped,
DroppedOther,
DroppedQueueFull,
DroppedQueueNotEnabled,
DroppedResourceNotFound,
DroppedResourceStopped,
Matched,
Queued,
Retried,
SentFailed,
SentInflight,
SentSucc,
RATE,
RATE_5,
RATE_MAX,
Rcvd
),
#{
'batching' := Batched,
'dropped' := Dropped,
'dropped.other' := DroppedOther,
'dropped.queue_full' := DroppedQueueFull,
'dropped.queue_not_enabled' := DroppedQueueNotEnabled,
'dropped.resource_not_found' := DroppedResourceNotFound,
'dropped.resource_stopped' := DroppedResourceStopped,
'matched' := Matched,
'queuing' := Queued,
'retried' := Retried,
'failed' := SentFailed,
'inflight' := SentInflight,
'success' := SentSucc,
rate := RATE,
rate_last5m := RATE_5,
rate_max := RATE_MAX,
received := Rcvd
}
).
-define(METRICS_EXAMPLE, #{
metrics => ?EMPTY_METRICS,
node_metrics => [
#{
node => node(),
metrics => ?EMPTY_METRICS
}
]
}).

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_bridge, [
{description, "An OTP application"},
{vsn, "0.1.5"},
{description, "EMQX bridges"},
{vsn, "0.1.6"},
{registered, []},
{mod, {emqx_bridge_app, []}},
{applications, [

View File

@ -37,8 +37,8 @@
create/3,
disable_enable/3,
remove/2,
list/0,
list_bridges_by_connector/1
check_deps_and_remove/3,
list/0
]).
-export([send_message/2]).
@ -48,15 +48,23 @@
%% exported for `emqx_telemetry'
-export([get_basic_usage_info/0]).
-define(EGRESS_DIR_BRIDGES(T),
T == webhook;
T == mysql;
T == influxdb_api_v1;
T == influxdb_api_v2
%% T == influxdb_udp
).
load() ->
%% set wait_for_resource_ready => 0 to start resources async
Opts = #{auto_retry_interval => 60000, wait_for_resource_ready => 0},
Bridges = emqx:get_config([bridges], #{}),
lists:foreach(
fun({Type, NamedConf}) ->
lists:foreach(
fun({Name, Conf}) ->
safe_load_bridge(Type, Name, Conf, Opts)
%% fetch opts for `emqx_resource_worker`
ResOpts = emqx_resource:fetch_creation_opts(Conf),
safe_load_bridge(Type, Name, Conf, ResOpts)
end,
maps:to_list(NamedConf)
)
@ -93,10 +101,10 @@ load_hook() ->
load_hook(Bridges) ->
lists:foreach(
fun({_Type, Bridge}) ->
fun({Type, Bridge}) ->
lists:foreach(
fun({_Name, BridgeConf}) ->
do_load_hook(BridgeConf)
do_load_hook(Type, BridgeConf)
end,
maps:to_list(Bridge)
)
@ -104,12 +112,13 @@ load_hook(Bridges) ->
maps:to_list(Bridges)
).
do_load_hook(#{local_topic := _} = Conf) ->
case maps:get(direction, Conf, egress) of
egress -> emqx_hooks:put('message.publish', {?MODULE, on_message_publish, []}, ?HP_BRIDGE);
ingress -> ok
end;
do_load_hook(_Conf) ->
do_load_hook(Type, #{local_topic := _}) when ?EGRESS_DIR_BRIDGES(Type) ->
emqx_hooks:put('message.publish', {?MODULE, on_message_publish, []}, ?HP_BRIDGE);
do_load_hook(mqtt, #{egress := #{local := #{topic := _}}}) ->
emqx_hooks:put('message.publish', {?MODULE, on_message_publish, []}, ?HP_BRIDGE);
do_load_hook(kafka, #{producer := #{mqtt := #{topic := _}}}) ->
emqx_hooks:put('message.publish', {?MODULE, on_message_publish, []}, ?HP_BRIDGE);
do_load_hook(_Type, _Conf) ->
ok.
unload_hook() ->
@ -171,9 +180,9 @@ post_config_update(_, _Req, NewConf, OldConf, _AppEnv) ->
diff_confs(NewConf, OldConf),
%% The config update will be failed if any task in `perform_bridge_changes` failed.
Result = perform_bridge_changes([
{fun emqx_bridge_resource:remove/3, Removed},
{fun emqx_bridge_resource:create/3, Added},
{fun emqx_bridge_resource:update/3, Updated}
{fun emqx_bridge_resource:remove/4, Removed},
{fun emqx_bridge_resource:create/4, Added},
{fun emqx_bridge_resource:update/4, Updated}
]),
ok = unload_hook(),
ok = load_hook(NewConf),
@ -197,13 +206,6 @@ list() ->
maps:to_list(emqx:get_raw_config([bridges], #{}))
).
list_bridges_by_connector(ConnectorId) ->
[
B
|| B = #{raw_config := #{<<"connector">> := Id}} <- list(),
ConnectorId =:= Id
].
lookup(Id) ->
{Type, Name} = emqx_bridge_resource:parse_bridge_id(Id),
lookup(Type, Name).
@ -211,6 +213,7 @@ lookup(Id) ->
lookup(Type, Name) ->
RawConf = emqx:get_raw_config([bridges, Type, Name], #{}),
lookup(Type, Name, RawConf).
lookup(Type, Name, RawConf) ->
case emqx_resource:get_instance(emqx_bridge_resource:resource_id(Type, Name)) of
{error, not_found} ->
@ -220,10 +223,15 @@ lookup(Type, Name, RawConf) ->
type => Type,
name => Name,
resource_data => Data,
raw_config => RawConf
raw_config => maybe_upgrade(Type, RawConf)
}}
end.
maybe_upgrade(mqtt, Config) ->
emqx_bridge_mqtt_config:maybe_upgrade(Config);
maybe_upgrade(_Other, Config) ->
Config.
disable_enable(Action, BridgeType, BridgeName) when
Action =:= disable; Action =:= enable
->
@ -246,6 +254,24 @@ remove(BridgeType, BridgeName) ->
#{override_to => cluster}
).
check_deps_and_remove(BridgeType, BridgeName, RemoveDeps) ->
BridgeId = emqx_bridge_resource:bridge_id(BridgeType, BridgeName),
%% NOTE: This violates the design: Rule depends on data-bridge but not vice versa.
case emqx_rule_engine:get_rule_ids_by_action(BridgeId) of
[] ->
remove(BridgeType, BridgeName);
RuleIds when RemoveDeps =:= false ->
{error, {rules_deps_on_this_bridge, RuleIds}};
RuleIds when RemoveDeps =:= true ->
lists:foreach(
fun(R) ->
emqx_rule_engine:ensure_action_removed(R, BridgeId)
end,
RuleIds
),
remove(BridgeType, BridgeName)
end.
%%========================================================================================
%% Helper functions
%%========================================================================================
@ -260,8 +286,16 @@ perform_bridge_changes([{Action, MapConfs} | Tasks], Result0) ->
fun
({_Type, _Name}, _Conf, {error, Reason}) ->
{error, Reason};
%% for emqx_bridge_resource:update/4
({Type, Name}, {OldConf, Conf}, _) ->
ResOpts = emqx_resource:fetch_creation_opts(Conf),
case Action(Type, Name, {OldConf, Conf}, ResOpts) of
{error, Reason} -> {error, Reason};
Return -> Return
end;
({Type, Name}, Conf, _) ->
case Action(Type, Name, Conf) of
ResOpts = emqx_resource:fetch_creation_opts(Conf),
case Action(Type, Name, Conf, ResOpts) of
{error, Reason} -> {error, Reason};
Return -> Return
end
@ -295,13 +329,8 @@ get_matched_bridges(Topic) ->
maps:fold(
fun(BType, Conf, Acc0) ->
maps:fold(
fun
%% Confs for MQTT, Kafka bridges have the `direction` flag
(_BName, #{direction := ingress}, Acc1) ->
Acc1;
(BName, #{direction := egress} = Egress, Acc1) ->
%% WebHook, MySQL bridges only have egress direction
get_matched_bridge_id(Egress, Topic, BType, BName, Acc1)
fun(BName, BConf, Acc1) ->
get_matched_bridge_id(BType, BConf, Topic, BName, Acc1)
end,
Acc0,
Conf
@ -311,9 +340,18 @@ get_matched_bridges(Topic) ->
Bridges
).
get_matched_bridge_id(#{enable := false}, _Topic, _BType, _BName, Acc) ->
get_matched_bridge_id(_BType, #{enable := false}, _Topic, _BName, Acc) ->
Acc;
get_matched_bridge_id(#{local_topic := Filter}, Topic, BType, BName, Acc) ->
get_matched_bridge_id(BType, #{local_topic := Filter}, Topic, BName, Acc) when
?EGRESS_DIR_BRIDGES(BType)
->
do_get_matched_bridge_id(Topic, Filter, BType, BName, Acc);
get_matched_bridge_id(mqtt, #{egress := #{local := #{topic := Filter}}}, Topic, BName, Acc) ->
do_get_matched_bridge_id(Topic, Filter, mqtt, BName, Acc);
get_matched_bridge_id(kafka, #{producer := #{mqtt := #{topic := Filter}}}, Topic, BName, Acc) ->
do_get_matched_bridge_id(Topic, Filter, kafka, BName, Acc).
do_get_matched_bridge_id(Topic, Filter, BType, BName, Acc) ->
case emqx_topic:match(Topic, Filter) of
true -> [emqx_bridge_resource:bridge_id(BType, BName) | Acc];
false -> Acc

View File

@ -20,6 +20,7 @@
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-include_lib("emqx/include/logger.hrl").
-include_lib("emqx_bridge/include/emqx_bridge.hrl").
-import(hoconsc, [mk/2, array/1, enum/1]).
@ -42,40 +43,20 @@
-export([lookup_from_local_node/2]).
-define(CONN_TYPES, [mqtt]).
-define(TRY_PARSE_ID(ID, EXPR),
try emqx_bridge_resource:parse_bridge_id(Id) of
{BridgeType, BridgeName} ->
EXPR
catch
error:{invalid_bridge_id, Id0} ->
throw:{invalid_bridge_id, Reason} ->
{400,
error_msg(
'INVALID_ID',
<<"invalid_bridge_id: ", Id0/binary,
". Bridge Ids must be of format {type}:{name}">>
<<"Invalid bride ID, ", Reason/binary>>
)}
end
).
-define(METRICS(MATCH, SUCC, FAILED, RATE, RATE_5, RATE_MAX), #{
matched => MATCH,
success => SUCC,
failed => FAILED,
rate => RATE,
rate_last5m => RATE_5,
rate_max => RATE_MAX
}).
-define(metrics(MATCH, SUCC, FAILED, RATE, RATE_5, RATE_MAX), #{
matched := MATCH,
success := SUCC,
failed := FAILED,
rate := RATE,
rate_last5m := RATE_5,
rate_max := RATE_MAX
}).
namespace() -> "bridge".
api_spec() ->
@ -110,7 +91,7 @@ param_path_operation_cluster() ->
#{
in => path,
required => true,
example => <<"start">>,
example => <<"restart">>,
desc => ?DESC("desc_param_path_operation_cluster")
}
)}.
@ -146,7 +127,7 @@ param_path_id() ->
#{
in => path,
required => true,
example => <<"webhook:my_webhook">>,
example => <<"webhook:webhook_example">>,
desc => ?DESC("desc_param_path_id")
}
)}.
@ -155,70 +136,58 @@ bridge_info_array_example(Method) ->
[Config || #{value := Config} <- maps:values(bridge_info_examples(Method))].
bridge_info_examples(Method) ->
maps:merge(conn_bridge_examples(Method), #{
<<"my_webhook">> => #{
summary => <<"WebHook">>,
value => info_example(webhook, awesome, Method)
}
}).
conn_bridge_examples(Method) ->
lists:foldl(
fun(Type, Acc) ->
SType = atom_to_list(Type),
KeyIngress = bin(SType ++ "_ingress"),
KeyEgress = bin(SType ++ "_egress"),
maps:merge(Acc, #{
KeyIngress => #{
summary => bin(string:uppercase(SType) ++ " Ingress Bridge"),
value => info_example(Type, ingress, Method)
},
KeyEgress => #{
summary => bin(string:uppercase(SType) ++ " Egress Bridge"),
value => info_example(Type, egress, Method)
}
})
end,
#{},
?CONN_TYPES
).
info_example(Type, Direction, Method) ->
maps:merge(
info_example_basic(Type, Direction),
method_example(Type, Direction, Method)
#{
<<"webhook_example">> => #{
summary => <<"WebHook">>,
value => info_example(webhook, Method)
},
<<"mqtt_example">> => #{
summary => <<"MQTT Bridge">>,
value => info_example(mqtt, Method)
}
},
ee_bridge_examples(Method)
).
method_example(Type, Direction, Method) when Method == get; Method == post ->
ee_bridge_examples(Method) ->
try
emqx_ee_bridge:examples(Method)
catch
_:_ -> #{}
end.
info_example(Type, Method) ->
maps:merge(
info_example_basic(Type),
method_example(Type, Method)
).
method_example(Type, Method) when Method == get; Method == post ->
SType = atom_to_list(Type),
SDir = atom_to_list(Direction),
SName =
case Type of
webhook -> "my_" ++ SType;
_ -> "my_" ++ SDir ++ "_" ++ SType ++ "_bridge"
end,
TypeNameExamp = #{
SName = SType ++ "_example",
TypeNameExam = #{
type => bin(SType),
name => bin(SName)
},
maybe_with_metrics_example(TypeNameExamp, Method);
method_example(_Type, _Direction, put) ->
maybe_with_metrics_example(TypeNameExam, Method);
method_example(_Type, put) ->
#{}.
maybe_with_metrics_example(TypeNameExamp, get) ->
TypeNameExamp#{
metrics => ?METRICS(0, 0, 0, 0, 0, 0),
maybe_with_metrics_example(TypeNameExam, get) ->
TypeNameExam#{
metrics => ?EMPTY_METRICS,
node_metrics => [
#{
node => node(),
metrics => ?METRICS(0, 0, 0, 0, 0, 0)
metrics => ?EMPTY_METRICS
}
]
};
maybe_with_metrics_example(TypeNameExamp, _) ->
TypeNameExamp.
maybe_with_metrics_example(TypeNameExam, _) ->
TypeNameExam.
info_example_basic(webhook, _) ->
info_example_basic(webhook) ->
#{
enable => true,
url => <<"http://localhost:9901/messages/${topic}">>,
@ -231,30 +200,70 @@ info_example_basic(webhook, _) ->
ssl => #{enable => false},
local_topic => <<"emqx_webhook/#">>,
method => post,
body => <<"${payload}">>
body => <<"${payload}">>,
resource_opts => #{
worker_pool_size => 1,
health_check_interval => 15000,
auto_restart_interval => 15000,
query_mode => async,
async_inflight_window => 100,
enable_queue => false,
max_queue_bytes => 100 * 1024 * 1024
}
};
info_example_basic(mqtt, ingress) ->
info_example_basic(mqtt) ->
(mqtt_main_example())#{
egress => mqtt_egress_example(),
ingress => mqtt_ingress_example()
}.
mqtt_main_example() ->
#{
enable => true,
connector => <<"mqtt:my_mqtt_connector">>,
direction => ingress,
remote_topic => <<"aws/#">>,
remote_qos => 1,
local_topic => <<"from_aws/${topic}">>,
local_qos => <<"${qos}">>,
payload => <<"${payload}">>,
retain => <<"${retain}">>
};
info_example_basic(mqtt, egress) ->
mode => cluster_shareload,
server => <<"127.0.0.1:1883">>,
proto_ver => <<"v4">>,
username => <<"foo">>,
password => <<"bar">>,
clean_start => true,
keepalive => <<"300s">>,
retry_interval => <<"15s">>,
max_inflight => 100,
resource_opts => #{
health_check_interval => <<"15s">>,
auto_restart_interval => <<"60s">>,
query_mode => sync,
enable_queue => false,
max_queue_bytes => 100 * 1024 * 1024
},
ssl => #{
enable => false
}
}.
mqtt_egress_example() ->
#{
enable => true,
connector => <<"mqtt:my_mqtt_connector">>,
direction => egress,
local_topic => <<"emqx/#">>,
remote_topic => <<"from_emqx/${topic}">>,
remote_qos => <<"${qos}">>,
payload => <<"${payload}">>,
retain => false
local => #{
topic => <<"emqx/#">>
},
remote => #{
topic => <<"from_emqx/${topic}">>,
qos => <<"${qos}">>,
payload => <<"${payload}">>,
retain => false
}
}.
mqtt_ingress_example() ->
#{
remote => #{
topic => <<"aws/#">>,
qos => 1
},
local => #{
topic => <<"from_aws/${topic}">>,
qos => <<"${qos}">>,
payload => <<"${payload}">>,
retain => <<"${retain}">>
}
}.
schema("/bridges") ->
@ -321,6 +330,7 @@ schema("/bridges/:id") ->
responses => #{
204 => <<"Bridge deleted">>,
400 => error_schema(['INVALID_ID'], "Update bridge failed"),
403 => error_schema('FORBIDDEN_REQUEST', "Forbidden operation"),
503 => error_schema('SERVICE_UNAVAILABLE', "Service unavailable")
}
}
@ -414,13 +424,28 @@ schema("/nodes/:node/bridges/:id/operation/:operation") ->
{404, error_msg('NOT_FOUND', <<"bridge not found">>)}
end
);
'/bridges/:id'(delete, #{bindings := #{id := Id}}) ->
'/bridges/:id'(delete, #{bindings := #{id := Id}, query_string := Qs}) ->
AlsoDeleteActs =
case maps:get(<<"also_delete_dep_actions">>, Qs, <<"false">>) of
<<"true">> -> true;
true -> true;
_ -> false
end,
?TRY_PARSE_ID(
Id,
case emqx_bridge:remove(BridgeType, BridgeName) of
{ok, _} -> {204};
{error, timeout} -> {503, error_msg('SERVICE_UNAVAILABLE', <<"request timeout">>)};
{error, Reason} -> {500, error_msg('INTERNAL_ERROR', Reason)}
case emqx_bridge:check_deps_and_remove(BridgeType, BridgeName, AlsoDeleteActs) of
{ok, _} ->
204;
{error, {rules_deps_on_this_bridge, RuleIds}} ->
{403,
error_msg(
'FORBIDDEN_REQUEST',
{<<"There're some rules dependent on this bridge">>, RuleIds}
)};
{error, timeout} ->
{503, error_msg('SERVICE_UNAVAILABLE', <<"request timeout">>)};
{error, Reason} ->
{500, error_msg('INTERNAL_ERROR', Reason)}
end
).
@ -602,19 +627,36 @@ collect_metrics(Bridges) ->
[maps:with([node, metrics], B) || B <- Bridges].
aggregate_metrics(AllMetrics) ->
InitMetrics = ?METRICS(0, 0, 0, 0, 0, 0),
InitMetrics = ?EMPTY_METRICS,
lists:foldl(
fun(
#{metrics := ?metrics(Match1, Succ1, Failed1, Rate1, Rate5m1, RateMax1)},
?metrics(Match0, Succ0, Failed0, Rate0, Rate5m0, RateMax0)
#{
metrics := ?metrics(
M1, M2, M3, M4, M5, M6, M7, M8, M9, M10, M11, M12, M13, M14, M15, M16, M17
)
},
?metrics(
N1, N2, N3, N4, N5, N6, N7, N8, N9, N10, N11, N12, N13, N14, N15, N16, N17
)
) ->
?METRICS(
Match1 + Match0,
Succ1 + Succ0,
Failed1 + Failed0,
Rate1 + Rate0,
Rate5m1 + Rate5m0,
RateMax1 + RateMax0
M1 + N1,
M2 + N2,
M3 + N3,
M4 + N4,
M5 + N5,
M6 + N6,
M7 + N7,
M8 + N8,
M9 + N9,
M10 + N10,
M11 + N11,
M12 + N12,
M13 + N13,
M14 + N14,
M15 + N15,
M16 + N16,
M17 + N17
)
end,
InitMetrics,
@ -643,12 +685,45 @@ format_resp(
}.
format_metrics(#{
counters := #{failed := Failed, exception := Ex, matched := Match, success := Succ},
counters := #{
'batching' := Batched,
'dropped' := Dropped,
'dropped.other' := DroppedOther,
'dropped.queue_full' := DroppedQueueFull,
'dropped.queue_not_enabled' := DroppedQueueNotEnabled,
'dropped.resource_not_found' := DroppedResourceNotFound,
'dropped.resource_stopped' := DroppedResourceStopped,
'matched' := Matched,
'queuing' := Queued,
'retried' := Retried,
'failed' := SentFailed,
'inflight' := SentInflight,
'success' := SentSucc,
'received' := Rcvd
},
rate := #{
matched := #{current := Rate, last5m := Rate5m, max := RateMax}
}
}) ->
?METRICS(Match, Succ, Failed + Ex, Rate, Rate5m, RateMax).
?METRICS(
Batched,
Dropped,
DroppedOther,
DroppedQueueFull,
DroppedQueueNotEnabled,
DroppedResourceNotFound,
DroppedResourceStopped,
Matched,
Queued,
Retried,
SentFailed,
SentInflight,
SentSucc,
Rate,
Rate5m,
RateMax,
Rcvd
).
fill_defaults(Type, RawConf) ->
PackedConf = pack_bridge_conf(Type, RawConf),
@ -713,6 +788,17 @@ call_operation(Node, OperFunc, BridgeType, BridgeName) ->
{200};
{error, timeout} ->
{503, error_msg('SERVICE_UNAVAILABLE', <<"request timeout">>)};
{error, {start_pool_failed, Name, Reason}} ->
{503,
error_msg(
'SERVICE_UNAVAILABLE',
bin(
io_lib:format(
"failed to start ~p pool for reason ~p",
[Name, Reason]
)
)
)};
{error, Reason} ->
{500, error_msg('INTERNAL_ERROR', Reason)}
end;

View File

@ -29,6 +29,7 @@
start(_StartType, _StartArgs) ->
{ok, Sup} = emqx_bridge_sup:start_link(),
ok = start_ee_apps(),
ok = emqx_bridge:load(),
ok = emqx_bridge:load_hook(),
ok = emqx_config_handler:add_handler(?LEAF_NODE_HDLR_PATH, ?MODULE),
@ -41,6 +42,15 @@ stop(_State) ->
ok = emqx_bridge:unload_hook(),
ok.
-if(?EMQX_RELEASE_EDITION == ee).
start_ee_apps() ->
{ok, _} = application:ensure_all_started(emqx_ee_bridge),
ok.
-else.
start_ee_apps() ->
ok.
-endif.
%% NOTE: We depends on the `emqx_bridge:pre_config_update/3` to restart/stop the
%% underlying resources.
pre_config_update(_, {_Oper, _, _}, undefined) ->

View File

@ -1,68 +0,0 @@
-module(emqx_bridge_mqtt_schema).
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-import(hoconsc, [mk/2]).
-export([roots/0, fields/1, desc/1]).
%%======================================================================================
%% Hocon Schema Definitions
roots() -> [].
fields("ingress") ->
[emqx_bridge_schema:direction_field(ingress, emqx_connector_mqtt_schema:ingress_desc())] ++
emqx_bridge_schema:common_bridge_fields(mqtt_connector_ref()) ++
proplists:delete(hookpoint, emqx_connector_mqtt_schema:fields("ingress"));
fields("egress") ->
[emqx_bridge_schema:direction_field(egress, emqx_connector_mqtt_schema:egress_desc())] ++
emqx_bridge_schema:common_bridge_fields(mqtt_connector_ref()) ++
emqx_connector_mqtt_schema:fields("egress");
fields("post_ingress") ->
[
type_field(),
name_field()
] ++ proplists:delete(enable, fields("ingress"));
fields("post_egress") ->
[
type_field(),
name_field()
] ++ proplists:delete(enable, fields("egress"));
fields("put_ingress") ->
proplists:delete(enable, fields("ingress"));
fields("put_egress") ->
proplists:delete(enable, fields("egress"));
fields("get_ingress") ->
emqx_bridge_schema:metrics_status_fields() ++ fields("post_ingress");
fields("get_egress") ->
emqx_bridge_schema:metrics_status_fields() ++ fields("post_egress").
desc(Rec) when Rec =:= "ingress"; Rec =:= "egress" ->
?DESC("desc_rec");
desc(_) ->
undefined.
%%======================================================================================
type_field() ->
{type,
mk(
mqtt,
#{
required => true,
desc => ?DESC("desc_type")
}
)}.
name_field() ->
{name,
mk(
binary(),
#{
required => true,
desc => ?DESC("desc_name")
}
)}.
mqtt_connector_ref() ->
?R_REF(emqx_connector_mqtt_schema, "connector").

View File

@ -34,18 +34,30 @@
create_dry_run/2,
remove/1,
remove/2,
remove/3,
remove/4,
update/2,
update/3,
update/4,
stop/2,
restart/2,
reset_metrics/1
]).
%% bi-directional bridge with producer/consumer or ingress/egress configs
-define(IS_BI_DIR_BRIDGE(TYPE), TYPE =:= <<"mqtt">>; TYPE =:= <<"kafka">>).
-if(?EMQX_RELEASE_EDITION == ee).
bridge_to_resource_type(<<"mqtt">>) -> emqx_connector_mqtt;
bridge_to_resource_type(mqtt) -> emqx_connector_mqtt;
bridge_to_resource_type(<<"webhook">>) -> emqx_connector_http;
bridge_to_resource_type(webhook) -> emqx_connector_http;
bridge_to_resource_type(BridgeType) -> emqx_ee_bridge:resource_type(BridgeType).
-else.
bridge_to_resource_type(<<"mqtt">>) -> emqx_connector_mqtt;
bridge_to_resource_type(mqtt) -> emqx_connector_mqtt;
bridge_to_resource_type(<<"webhook">>) -> emqx_connector_http;
bridge_to_resource_type(webhook) -> emqx_connector_http.
-endif.
resource_id(BridgeId) when is_binary(BridgeId) ->
<<"bridge:", BridgeId/binary>>.
@ -63,14 +75,44 @@ bridge_id(BridgeType, BridgeName) ->
parse_bridge_id(BridgeId) ->
case string:split(bin(BridgeId), ":", all) of
[Type, Name] ->
case emqx_misc:safe_to_existing_atom(Type, utf8) of
{ok, Type1} ->
{Type1, Name};
_ ->
error({invalid_bridge_id, BridgeId})
end;
{to_type_atom(Type), validate_name(Name)};
_ ->
error({invalid_bridge_id, BridgeId})
invalid_bridge_id(
<<"should be of forst {type}:{name}, but got ", BridgeId/binary>>
)
end.
validate_name(Name0) ->
Name = unicode:characters_to_list(Name0, utf8),
case is_list(Name) andalso Name =/= [] of
true ->
case lists:all(fun is_id_char/1, Name) of
true ->
Name0;
false ->
invalid_bridge_id(<<"bad name: ", Name0/binary>>)
end;
false ->
invalid_bridge_id(<<"only 0-9a-zA-Z_-. is allowed in name: ", Name0/binary>>)
end.
-spec invalid_bridge_id(binary()) -> no_return().
invalid_bridge_id(Reason) -> throw({?FUNCTION_NAME, Reason}).
is_id_char(C) when C >= $0 andalso C =< $9 -> true;
is_id_char(C) when C >= $a andalso C =< $z -> true;
is_id_char(C) when C >= $A andalso C =< $Z -> true;
is_id_char($_) -> true;
is_id_char($-) -> true;
is_id_char($.) -> true;
is_id_char(_) -> false.
to_type_atom(Type) ->
try
erlang:binary_to_existing_atom(Type, utf8)
catch
_:_ ->
invalid_bridge_id(<<"unknown type: ", Type/binary>>)
end.
reset_metrics(ResourceId) ->
@ -88,7 +130,7 @@ create(BridgeId, Conf) ->
create(BridgeType, BridgeName, Conf).
create(Type, Name, Conf) ->
create(Type, Name, Conf, #{auto_retry_interval => 60000}).
create(Type, Name, Conf, #{}).
create(Type, Name, Conf, Opts) ->
?SLOG(info, #{
@ -101,7 +143,7 @@ create(Type, Name, Conf, Opts) ->
resource_id(Type, Name),
<<"emqx_bridge">>,
bridge_to_resource_type(Type),
parse_confs(Type, Name, Conf),
parse_confs(bin(Type), Name, Conf),
Opts
),
maybe_disable_bridge(Type, Name, Conf).
@ -111,6 +153,9 @@ update(BridgeId, {OldConf, Conf}) ->
update(BridgeType, BridgeName, {OldConf, Conf}).
update(Type, Name, {OldConf, Conf}) ->
update(Type, Name, {OldConf, Conf}, #{}).
update(Type, Name, {OldConf, Conf}, Opts) ->
%% TODO: sometimes its not necessary to restart the bridge connection.
%%
%% - if the connection related configs like `servers` is updated, we should restart/start
@ -127,7 +172,7 @@ update(Type, Name, {OldConf, Conf}) ->
name => Name,
config => Conf
}),
case recreate(Type, Name, Conf) of
case recreate(Type, Name, Conf, Opts) of
{ok, _} ->
maybe_disable_bridge(Type, Name, Conf);
{error, not_found} ->
@ -137,7 +182,7 @@ update(Type, Name, {OldConf, Conf}) ->
name => Name,
config => Conf
}),
create(Type, Name, Conf);
create(Type, Name, Conf, Opts);
{error, Reason} ->
{error, {update_bridge_failed, Reason}}
end;
@ -158,41 +203,38 @@ recreate(Type, Name) ->
recreate(Type, Name, emqx:get_config([bridges, Type, Name])).
recreate(Type, Name, Conf) ->
recreate(Type, Name, Conf, #{}).
recreate(Type, Name, Conf, Opts) ->
emqx_resource:recreate_local(
resource_id(Type, Name),
bridge_to_resource_type(Type),
parse_confs(Type, Name, Conf),
#{auto_retry_interval => 60000}
parse_confs(bin(Type), Name, Conf),
Opts
).
create_dry_run(Type, Conf) ->
Conf0 = fill_dry_run_conf(Conf),
case emqx_resource:check_config(bridge_to_resource_type(Type), Conf0) of
{ok, Conf1} ->
TmpPath = iolist_to_binary(["bridges-create-dry-run:", emqx_misc:gen_id(8)]),
case emqx_connector_ssl:convert_certs(TmpPath, Conf1) of
{error, Reason} ->
{error, Reason};
{ok, ConfNew} ->
Res = emqx_resource:create_dry_run_local(
bridge_to_resource_type(Type), ConfNew
),
_ = maybe_clear_certs(TmpPath, ConfNew),
Res
end;
{error, _} = Error ->
Error
TmpPath = iolist_to_binary(["bridges-create-dry-run:", emqx_misc:gen_id(8)]),
case emqx_connector_ssl:convert_certs(TmpPath, Conf) of
{error, Reason} ->
{error, Reason};
{ok, ConfNew} ->
Res = emqx_resource:create_dry_run_local(
bridge_to_resource_type(Type), ConfNew
),
_ = maybe_clear_certs(TmpPath, ConfNew),
Res
end.
remove(BridgeId) ->
{BridgeType, BridgeName} = parse_bridge_id(BridgeId),
remove(BridgeType, BridgeName, #{}).
remove(BridgeType, BridgeName, #{}, #{}).
remove(Type, Name) ->
remove(Type, Name, undefined).
remove(Type, Name, #{}, #{}).
%% just for perform_bridge_changes/1
remove(Type, Name, _Conf) ->
remove(Type, Name, _Conf, _Opts) ->
?SLOG(info, #{msg => "remove_bridge", type => Type, name => Name}),
case emqx_resource:remove_local(resource_id(Type, Name)) of
ok -> ok;
@ -206,19 +248,6 @@ maybe_disable_bridge(Type, Name, Conf) ->
true -> ok
end.
fill_dry_run_conf(Conf) ->
Conf#{
<<"egress">> =>
#{
<<"remote_topic">> => <<"t">>,
<<"remote_qos">> => 0,
<<"retain">> => true,
<<"payload">> => <<"val">>
},
<<"ingress">> =>
#{<<"remote_topic">> => <<"t">>}
}.
maybe_clear_certs(TmpPath, #{ssl := SslConf} = Conf) ->
%% don't remove the cert files if they are in use
case is_tmp_path_conf(TmpPath, SslConf) of
@ -238,8 +267,9 @@ is_tmp_path_conf(_TmpPath, _Conf) ->
is_tmp_path(TmpPath, File) ->
string:str(str(File), str(TmpPath)) > 0.
%% convert bridge configs to what the connector modules want
parse_confs(
webhook,
<<"webhook">>,
_Name,
#{
url := Url,
@ -264,42 +294,14 @@ parse_confs(
max_retries => Retry
}
};
parse_confs(Type, Name, #{connector := ConnId, direction := Direction} = Conf) when
is_binary(ConnId)
->
case emqx_connector:parse_connector_id(ConnId) of
{Type, ConnName} ->
ConnectorConfs = emqx:get_config([connectors, Type, ConnName]),
make_resource_confs(
Direction,
ConnectorConfs,
maps:without([connector, direction], Conf),
Type,
Name
);
{_ConnType, _ConnName} ->
error({cannot_use_connector_with_different_type, ConnId})
end;
parse_confs(Type, Name, #{connector := ConnectorConfs, direction := Direction} = Conf) when
is_map(ConnectorConfs)
->
make_resource_confs(
Direction,
ConnectorConfs,
maps:without([connector, direction], Conf),
Type,
Name
).
make_resource_confs(ingress, ConnectorConfs, BridgeConf, Type, Name) ->
parse_confs(Type, Name, Conf) when ?IS_BI_DIR_BRIDGE(Type) ->
%% For some drivers that can be used as data-sources, we need to provide a
%% hookpoint. The underlying driver will run `emqx_hooks:run/3` when it
%% receives a message from the external database.
BName = bridge_id(Type, Name),
ConnectorConfs#{
ingress => BridgeConf#{hookpoint => <<"$bridges/", BName/binary>>}
};
make_resource_confs(egress, ConnectorConfs, BridgeConf, _Type, _Name) ->
ConnectorConfs#{
egress => BridgeConf
}.
Conf#{hookpoint => <<"$bridges/", BName/binary>>, bridge_name => Name};
parse_confs(_Type, _Name, Conf) ->
Conf.
parse_url(Url) ->
case string:split(Url, "//", leading) of

View File

@ -1,173 +0,0 @@
-module(emqx_bridge_schema).
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-import(hoconsc, [mk/2, ref/2]).
-export([roots/0, fields/1, desc/1, namespace/0]).
-export([
get_response/0,
put_request/0,
post_request/0
]).
-export([
common_bridge_fields/1,
metrics_status_fields/0,
direction_field/2
]).
%%======================================================================================
%% Hocon Schema Definitions
-define(CONN_TYPES, [mqtt]).
%%======================================================================================
%% For HTTP APIs
get_response() ->
http_schema("get").
put_request() ->
http_schema("put").
post_request() ->
http_schema("post").
http_schema(Method) ->
Schemas = lists:flatmap(
fun(Type) ->
[
ref(schema_mod(Type), Method ++ "_ingress"),
ref(schema_mod(Type), Method ++ "_egress")
]
end,
?CONN_TYPES
),
hoconsc:union([
ref(emqx_bridge_webhook_schema, Method)
| Schemas
]).
common_bridge_fields(ConnectorRef) ->
[
{enable,
mk(
boolean(),
#{
desc => ?DESC("desc_enable"),
default => true
}
)},
{connector,
mk(
hoconsc:union([binary(), ConnectorRef]),
#{
required => true,
example => <<"mqtt:my_mqtt_connector">>,
desc => ?DESC("desc_connector")
}
)}
].
metrics_status_fields() ->
[
{"metrics", mk(ref(?MODULE, "metrics"), #{desc => ?DESC("desc_metrics")})},
{"node_metrics",
mk(
hoconsc:array(ref(?MODULE, "node_metrics")),
#{desc => ?DESC("desc_node_metrics")}
)},
{"status", mk(status(), #{desc => ?DESC("desc_status")})},
{"node_status",
mk(
hoconsc:array(ref(?MODULE, "node_status")),
#{desc => ?DESC("desc_node_status")}
)}
].
direction_field(Dir, Desc) ->
{direction,
mk(
Dir,
#{
required => true,
default => egress,
desc => "The direction of the bridge. Can be one of 'ingress' or 'egress'.<br/>" ++
Desc
}
)}.
%%======================================================================================
%% For config files
namespace() -> "bridge".
roots() -> [bridges].
fields(bridges) ->
[
{webhook,
mk(
hoconsc:map(name, ref(emqx_bridge_webhook_schema, "config")),
#{desc => ?DESC("bridges_webhook")}
)}
] ++
[
{T,
mk(
hoconsc:map(
name,
hoconsc:union([
ref(schema_mod(T), "ingress"),
ref(schema_mod(T), "egress")
])
),
#{desc => ?DESC("bridges_name")}
)}
|| T <- ?CONN_TYPES
];
fields("metrics") ->
[
{"matched", mk(integer(), #{desc => ?DESC("metric_matched")})},
{"success", mk(integer(), #{desc => ?DESC("metric_success")})},
{"failed", mk(integer(), #{desc => ?DESC("metric_failed")})},
{"rate", mk(float(), #{desc => ?DESC("metric_rate")})},
{"rate_max", mk(float(), #{desc => ?DESC("metric_rate_max")})},
{"rate_last5m",
mk(
float(),
#{desc => ?DESC("metric_rate_last5m")}
)}
];
fields("node_metrics") ->
[
node_name(),
{"metrics", mk(ref(?MODULE, "metrics"), #{})}
];
fields("node_status") ->
[
node_name(),
{"status", mk(status(), #{})}
].
desc(bridges) ->
?DESC("desc_bridges");
desc("metrics") ->
?DESC("desc_metrics");
desc("node_metrics") ->
?DESC("desc_node_metrics");
desc("node_status") ->
?DESC("desc_node_status");
desc(_) ->
undefined.
status() ->
hoconsc:enum([connected, disconnected, connecting]).
node_name() ->
{"node", mk(binary(), #{desc => ?DESC("desc_node_name"), example => "emqx@127.0.0.1"})}.
schema_mod(Type) ->
list_to_atom(lists:concat(["emqx_bridge_", Type, "_schema"])).

View File

@ -0,0 +1,118 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2020-2022 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
%% @doc This module was created to convert old version (from v5.0.0 to v5.0.11)
%% mqtt connector configs to newer version (developed for enterprise edition).
-module(emqx_bridge_mqtt_config).
-export([
upgrade_pre_ee/1,
maybe_upgrade/1
]).
upgrade_pre_ee(undefined) ->
undefined;
upgrade_pre_ee(Conf0) when is_map(Conf0) ->
maps:from_list(upgrade_pre_ee(maps:to_list(Conf0)));
upgrade_pre_ee([]) ->
[];
upgrade_pre_ee([{Name, Config} | Bridges]) ->
[{Name, maybe_upgrade(Config)} | upgrade_pre_ee(Bridges)].
maybe_upgrade(#{<<"connector">> := _} = Config0) ->
Config1 = up(Config0),
Config = lists:map(fun binary_key/1, Config1),
maps:from_list(Config);
maybe_upgrade(NewVersion) ->
NewVersion.
binary_key({K, V}) ->
{atom_to_binary(K, utf8), V}.
up(#{<<"connector">> := Connector} = Config) ->
Cn = fun(Key0, Default) ->
Key = atom_to_binary(Key0, utf8),
{Key0, maps:get(Key, Connector, Default)}
end,
Direction =
case maps:get(<<"direction">>, Config) of
<<"egress">> ->
{egress, egress(Config)};
<<"ingress">> ->
{ingress, ingress(Config)}
end,
Enable = maps:get(<<"enable">>, Config, true),
[
Cn(bridge_mode, false),
Cn(username, <<>>),
Cn(password, <<>>),
Cn(clean_start, true),
Cn(keepalive, <<"60s">>),
Cn(mode, <<"cluster_shareload">>),
Cn(proto_ver, <<"v4">>),
Cn(server, undefined),
Cn(retry_interval, <<"15s">>),
Cn(reconnect_interval, <<"15s">>),
Cn(ssl, default_ssl()),
{enable, Enable},
{resource_opts, default_resource_opts()},
Direction
].
default_ssl() ->
#{
<<"enable">> => false,
<<"verify">> => <<"verify_peer">>
}.
default_resource_opts() ->
#{
<<"async_inflight_window">> => 100,
<<"auto_restart_interval">> => <<"60s">>,
<<"enable_queue">> => false,
<<"health_check_interval">> => <<"15s">>,
<<"max_queue_bytes">> => <<"1GB">>,
<<"query_mode">> => <<"sync">>,
<<"worker_pool_size">> => 16
}.
egress(Config) ->
% <<"local">> % the old version has no 'local' config for egress
#{
<<"remote">> =>
#{
<<"topic">> => maps:get(<<"remote_topic">>, Config),
<<"qos">> => maps:get(<<"remote_qos">>, Config),
<<"retain">> => maps:get(<<"retain">>, Config),
<<"payload">> => maps:get(<<"payload">>, Config)
}
}.
ingress(Config) ->
#{
<<"remote">> =>
#{
<<"qos">> => maps:get(<<"remote_qos">>, Config),
<<"topic">> => maps:get(<<"remote_topic">>, Config)
},
<<"local">> =>
#{
<<"payload">> => maps:get(<<"payload">>, Config),
<<"qos">> => maps:get(<<"local_qos">>, Config),
<<"retain">> => maps:get(<<"retain">>, Config, false)
%% <<"topic">> % th old version has no local topic for ingress
}
}.

View File

@ -0,0 +1,57 @@
-module(emqx_bridge_mqtt_schema).
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-import(hoconsc, [mk/2, ref/2]).
-export([roots/0, fields/1, desc/1, namespace/0]).
%%======================================================================================
%% Hocon Schema Definitions
namespace() -> "bridge_mqtt".
roots() -> [].
fields("config") ->
%% enable
emqx_bridge_schema:common_bridge_fields() ++
[
{resource_opts,
mk(
ref(?MODULE, "creation_opts"),
#{
required => false,
default => #{},
desc => ?DESC(emqx_resource_schema, <<"resource_opts">>)
}
)}
] ++
emqx_connector_mqtt_schema:fields("config");
fields("creation_opts") ->
Opts = emqx_resource_schema:fields("creation_opts"),
[O || {Field, _} = O <- Opts, not is_hidden_opts(Field)];
fields("post") ->
[type_field(), name_field() | fields("config")];
fields("put") ->
fields("config");
fields("get") ->
emqx_bridge_schema:metrics_status_fields() ++ fields("config").
desc("config") ->
?DESC("config");
desc("creation_opts" = Name) ->
emqx_resource_schema:desc(Name);
desc(_) ->
undefined.
%%======================================================================================
%% internal
is_hidden_opts(Field) ->
lists:member(Field, [enable_batch, batch_size, batch_time]).
type_field() ->
{type, mk(mqtt, #{required => true, desc => ?DESC("desc_type")})}.
name_field() ->
{name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}.

View File

@ -0,0 +1,181 @@
-module(emqx_bridge_schema).
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-import(hoconsc, [mk/2, ref/2]).
-export([roots/0, fields/1, desc/1, namespace/0]).
-export([
get_response/0,
put_request/0,
post_request/0
]).
-export([
common_bridge_fields/0,
metrics_status_fields/0
]).
%%======================================================================================
%% Hocon Schema Definitions
%%======================================================================================
%% For HTTP APIs
get_response() ->
api_schema("get").
put_request() ->
api_schema("put").
post_request() ->
api_schema("post").
api_schema(Method) ->
Broker = [
ref(Mod, Method)
|| Mod <- [emqx_bridge_webhook_schema, emqx_bridge_mqtt_schema]
],
EE = ee_api_schemas(Method),
hoconsc:union(Broker ++ EE).
ee_api_schemas(Method) ->
%% must ensure the app is loaded before checking if fn is defined.
ensure_loaded(emqx_ee_bridge, emqx_ee_bridge),
case erlang:function_exported(emqx_ee_bridge, api_schemas, 1) of
true -> emqx_ee_bridge:api_schemas(Method);
false -> []
end.
ee_fields_bridges() ->
%% must ensure the app is loaded before checking if fn is defined.
ensure_loaded(emqx_ee_bridge, emqx_ee_bridge),
case erlang:function_exported(emqx_ee_bridge, fields, 1) of
true -> emqx_ee_bridge:fields(bridges);
false -> []
end.
common_bridge_fields() ->
[
{enable,
mk(
boolean(),
#{
desc => ?DESC("desc_enable"),
default => true
}
)}
].
metrics_status_fields() ->
[
{"metrics", mk(ref(?MODULE, "metrics"), #{desc => ?DESC("desc_metrics")})},
{"node_metrics",
mk(
hoconsc:array(ref(?MODULE, "node_metrics")),
#{desc => ?DESC("desc_node_metrics")}
)},
{"status", mk(status(), #{desc => ?DESC("desc_status")})},
{"node_status",
mk(
hoconsc:array(ref(?MODULE, "node_status")),
#{desc => ?DESC("desc_node_status")}
)}
].
%%======================================================================================
%% For config files
namespace() -> "bridge".
roots() -> [bridges].
fields(bridges) ->
[
{webhook,
mk(
hoconsc:map(name, ref(emqx_bridge_webhook_schema, "config")),
#{
desc => ?DESC("bridges_webhook"),
required => false
}
)},
{mqtt,
mk(
hoconsc:map(name, ref(emqx_bridge_mqtt_schema, "config")),
#{
desc => ?DESC("bridges_mqtt"),
required => false,
converter => fun emqx_bridge_mqtt_config:upgrade_pre_ee/1
}
)}
] ++ ee_fields_bridges();
fields("metrics") ->
[
{"batching", mk(integer(), #{desc => ?DESC("metric_batching")})},
{"dropped", mk(integer(), #{desc => ?DESC("metric_dropped")})},
{"dropped.other", mk(integer(), #{desc => ?DESC("metric_dropped_other")})},
{"dropped.queue_full", mk(integer(), #{desc => ?DESC("metric_dropped_queue_full")})},
{"dropped.queue_not_enabled",
mk(integer(), #{desc => ?DESC("metric_dropped_queue_not_enabled")})},
{"dropped.resource_not_found",
mk(integer(), #{desc => ?DESC("metric_dropped_resource_not_found")})},
{"dropped.resource_stopped",
mk(integer(), #{desc => ?DESC("metric_dropped_resource_stopped")})},
{"matched", mk(integer(), #{desc => ?DESC("metric_matched")})},
{"queuing", mk(integer(), #{desc => ?DESC("metric_queuing")})},
{"retried", mk(integer(), #{desc => ?DESC("metric_retried")})},
{"failed", mk(integer(), #{desc => ?DESC("metric_sent_failed")})},
{"inflight", mk(integer(), #{desc => ?DESC("metric_sent_inflight")})},
{"success", mk(integer(), #{desc => ?DESC("metric_sent_success")})},
{"rate", mk(float(), #{desc => ?DESC("metric_rate")})},
{"rate_max", mk(float(), #{desc => ?DESC("metric_rate_max")})},
{"rate_last5m",
mk(
float(),
#{desc => ?DESC("metric_rate_last5m")}
)},
{"received", mk(float(), #{desc => ?DESC("metric_received")})}
];
fields("node_metrics") ->
[
node_name(),
{"metrics", mk(ref(?MODULE, "metrics"), #{})}
];
fields("node_status") ->
[
node_name(),
{"status", mk(status(), #{})}
].
desc(bridges) ->
?DESC("desc_bridges");
desc("metrics") ->
?DESC("desc_metrics");
desc("node_metrics") ->
?DESC("desc_node_metrics");
desc("node_status") ->
?DESC("desc_node_status");
desc(_) ->
undefined.
status() ->
hoconsc:enum([connected, disconnected, connecting]).
node_name() ->
{"node", mk(binary(), #{desc => ?DESC("desc_node_name"), example => "emqx@127.0.0.1"})}.
%%=================================================================================================
%% Internal fns
%%=================================================================================================
ensure_loaded(App, Mod) ->
try
_ = application:load(App),
_ = Mod:module_info(),
ok
catch
_:_ ->
ok
end.

View File

@ -3,13 +3,13 @@
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-import(hoconsc, [mk/2, enum/1]).
-import(hoconsc, [mk/2, enum/1, ref/2]).
-export([roots/0, fields/1, namespace/0, desc/1]).
%%======================================================================================
%% Hocon Schema Definitions
namespace() -> "bridge".
namespace() -> "bridge_webhook".
roots() -> [].
@ -23,10 +23,19 @@ fields("post") ->
fields("put") ->
fields("config");
fields("get") ->
emqx_bridge_schema:metrics_status_fields() ++ fields("post").
emqx_bridge_schema:metrics_status_fields() ++ fields("post");
fields("creation_opts") ->
lists:filter(
fun({K, _V}) ->
not lists:member(K, unsupported_opts())
end,
emqx_resource_schema:fields("creation_opts")
).
desc("config") ->
?DESC("desc_config");
desc("creation_opts") ->
?DESC(emqx_resource_schema, "creation_opts");
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->
["Configuration for WebHook using `", string:to_upper(Method), "` method."];
desc(_) ->
@ -41,16 +50,8 @@ basic_config() ->
desc => ?DESC("config_enable"),
default => true
}
)},
{direction,
mk(
egress,
#{
desc => ?DESC("config_direction"),
default => egress
}
)}
] ++
] ++ webhook_creation_opts() ++
proplists:delete(
max_retries, proplists:delete(base_url, emqx_connector_http:fields(config))
).
@ -68,7 +69,10 @@ request_config() ->
{local_topic,
mk(
binary(),
#{desc => ?DESC("config_local_topic")}
#{
desc => ?DESC("config_local_topic"),
required => false
}
)},
{method,
mk(
@ -118,6 +122,26 @@ request_config() ->
)}
].
webhook_creation_opts() ->
[
{resource_opts,
mk(
ref(?MODULE, "creation_opts"),
#{
required => false,
default => #{},
desc => ?DESC(emqx_resource_schema, <<"resource_opts">>)
}
)}
].
unsupported_opts() ->
[
enable_batch,
batch_size,
batch_time
].
%%======================================================================================
type_field() ->

View File

@ -44,6 +44,9 @@ init_per_testcase(t_get_basic_usage_info_1, Config) ->
{ok, _} = emqx_cluster_rpc:start_link(node(), emqx_cluster_rpc, 1000),
setup_fake_telemetry_data(),
Config;
init_per_testcase(t_update_ssl_conf, Config) ->
Path = [bridges, <<"mqtt">>, <<"ssl_update_test">>],
[{config_path, Path} | Config];
init_per_testcase(_TestCase, Config) ->
{ok, _} = emqx_cluster_rpc:start_link(node(), emqx_cluster_rpc, 1000),
Config.
@ -63,6 +66,9 @@ end_per_testcase(t_get_basic_usage_info_1, _Config) ->
ok = emqx_config:put([bridges], #{}),
ok = emqx_config:put_raw([bridges], #{}),
ok;
end_per_testcase(t_update_ssl_conf, Config) ->
Path = proplists:get_value(config_path, Config),
emqx:remove_config(Path);
end_per_testcase(_TestCase, _Config) ->
ok.
@ -89,36 +95,29 @@ t_get_basic_usage_info_1(_Config) ->
).
setup_fake_telemetry_data() ->
ConnectorConf =
#{
<<"connectors">> =>
#{
<<"mqtt">> => #{
<<"my_mqtt_connector">> =>
#{server => "127.0.0.1:1883"},
<<"my_mqtt_connector2">> =>
#{server => "127.0.0.1:1884"}
}
}
},
MQTTConfig1 = #{
connector => <<"mqtt:my_mqtt_connector">>,
server => "127.0.0.1:1883",
enable => true,
direction => ingress,
remote_topic => <<"aws/#">>,
remote_qos => 1
ingress => #{
remote => #{
topic => <<"aws/#">>,
qos => 1
}
}
},
MQTTConfig2 = #{
connector => <<"mqtt:my_mqtt_connector2">>,
server => "127.0.0.1:1884",
enable => true,
direction => ingress,
remote_topic => <<"$bridges/mqtt:some_bridge_in">>,
remote_qos => 1
ingress => #{
remote => #{
topic => <<"$bridges/mqtt:some_bridge_in">>,
qos => 1
}
}
},
HTTPConfig = #{
url => <<"http://localhost:9901/messages/${topic}">>,
enable => true,
direction => egress,
local_topic => "emqx_webhook/#",
method => post,
body => <<"${payload}">>,
@ -143,7 +142,6 @@ setup_fake_telemetry_data() ->
}
},
Opts = #{raw_with_default => true},
ok = emqx_common_test_helpers:load_config(emqx_connector_schema, ConnectorConf, Opts),
ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, Conf, Opts),
ok = snabbkaffe:start_trace(),
@ -157,82 +155,30 @@ setup_fake_telemetry_data() ->
ok = snabbkaffe:stop(),
ok.
t_update_ssl_conf(_) ->
Path = [bridges, <<"mqtt">>, <<"ssl_update_test">>],
t_update_ssl_conf(Config) ->
Path = proplists:get_value(config_path, Config),
EnableSSLConf = #{
<<"connector">> =>
<<"bridge_mode">> => false,
<<"clean_start">> => true,
<<"keepalive">> => <<"60s">>,
<<"mode">> => <<"cluster_shareload">>,
<<"proto_ver">> => <<"v4">>,
<<"server">> => <<"127.0.0.1:1883">>,
<<"ssl">> =>
#{
<<"bridge_mode">> => false,
<<"clean_start">> => true,
<<"keepalive">> => <<"60s">>,
<<"mode">> => <<"cluster_shareload">>,
<<"proto_ver">> => <<"v4">>,
<<"server">> => <<"127.0.0.1:1883">>,
<<"ssl">> =>
#{
<<"cacertfile">> => cert_file("cafile"),
<<"certfile">> => cert_file("certfile"),
<<"enable">> => true,
<<"keyfile">> => cert_file("keyfile"),
<<"verify">> => <<"verify_peer">>
}
},
<<"direction">> => <<"ingress">>,
<<"local_qos">> => 1,
<<"payload">> => <<"${payload}">>,
<<"remote_qos">> => 1,
<<"remote_topic">> => <<"t/#">>,
<<"retain">> => false
<<"cacertfile">> => cert_file("cafile"),
<<"certfile">> => cert_file("certfile"),
<<"enable">> => true,
<<"keyfile">> => cert_file("keyfile"),
<<"verify">> => <<"verify_peer">>
}
},
emqx:update_config(Path, EnableSSLConf),
?assertMatch({ok, [_, _, _]}, list_pem_dir(Path)),
NoSSLConf = #{
<<"connector">> =>
#{
<<"bridge_mode">> => false,
<<"clean_start">> => true,
<<"keepalive">> => <<"60s">>,
<<"max_inflight">> => 32,
<<"mode">> => <<"cluster_shareload">>,
<<"password">> => <<>>,
<<"proto_ver">> => <<"v4">>,
<<"reconnect_interval">> => <<"15s">>,
<<"replayq">> =>
#{<<"offload">> => false, <<"seg_bytes">> => <<"100MB">>},
<<"retry_interval">> => <<"15s">>,
<<"server">> => <<"127.0.0.1:1883">>,
<<"ssl">> =>
#{
<<"ciphers">> => <<>>,
<<"depth">> => 10,
<<"enable">> => false,
<<"reuse_sessions">> => true,
<<"secure_renegotiate">> => true,
<<"user_lookup_fun">> => <<"emqx_tls_psk:lookup">>,
<<"verify">> => <<"verify_peer">>,
<<"versions">> =>
[
<<"tlsv1.3">>,
<<"tlsv1.2">>,
<<"tlsv1.1">>,
<<"tlsv1">>
]
},
<<"username">> => <<>>
},
<<"direction">> => <<"ingress">>,
<<"enable">> => true,
<<"local_qos">> => 1,
<<"payload">> => <<"${payload}">>,
<<"remote_qos">> => 1,
<<"remote_topic">> => <<"t/#">>,
<<"retain">> => false
},
emqx:update_config(Path, NoSSLConf),
{ok, _} = emqx:update_config(Path, EnableSSLConf),
{ok, Certs} = list_pem_dir(Path),
?assertMatch([_, _, _], Certs),
NoSSLConf = EnableSSLConf#{<<"ssl">> := #{<<"enable">> => false}},
{ok, _} = emqx:update_config(Path, NoSSLConf),
?assertMatch({error, not_dir}, list_pem_dir(Path)),
emqx:remove_config(Path),
ok.
list_pem_dir(Path) ->

View File

@ -24,7 +24,7 @@
-include_lib("common_test/include/ct.hrl").
-define(CONF_DEFAULT, <<"bridges: {}">>).
-define(BRIDGE_TYPE, <<"webhook">>).
-define(BRIDGE_NAME, <<"test_bridge">>).
-define(BRIDGE_NAME, (atom_to_binary(?FUNCTION_NAME))).
-define(URL(PORT, PATH),
list_to_binary(
io_lib:format(
@ -61,14 +61,18 @@ init_per_suite(Config) ->
_ = application:stop(emqx_resource),
_ = application:stop(emqx_connector),
ok = emqx_common_test_helpers:start_apps(
[emqx_bridge, emqx_dashboard],
[emqx_rule_engine, emqx_bridge, emqx_dashboard],
fun set_special_configs/1
),
ok = emqx_common_test_helpers:load_config(
emqx_rule_engine_schema,
<<"rule_engine {rules {}}">>
),
ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, ?CONF_DEFAULT),
Config.
end_per_suite(_Config) ->
emqx_common_test_helpers:stop_apps([emqx_bridge, emqx_dashboard]),
emqx_common_test_helpers:stop_apps([emqx_rule_engine, emqx_bridge, emqx_dashboard]),
ok.
set_special_configs(emqx_dashboard) ->
@ -78,8 +82,12 @@ set_special_configs(_) ->
init_per_testcase(_, Config) ->
{ok, _} = emqx_cluster_rpc:start_link(node(), emqx_cluster_rpc, 1000),
Config.
end_per_testcase(_, _Config) ->
{Port, Sock, Acceptor} = start_http_server(fun handle_fun_200_ok/2),
[{port, Port}, {sock, Sock}, {acceptor, Acceptor} | Config].
end_per_testcase(_, Config) ->
Sock = ?config(sock, Config),
Acceptor = ?config(acceptor, Config),
stop_http_server(Sock, Acceptor),
clear_resources(),
ok.
@ -95,31 +103,39 @@ clear_resources() ->
%% HTTP server for testing
%%------------------------------------------------------------------------------
start_http_server(HandleFun) ->
process_flag(trap_exit, true),
Parent = self(),
spawn_link(fun() ->
{Port, Sock} = listen_on_random_port(),
Parent ! {port, Port},
loop(Sock, HandleFun, Parent)
{Port, Sock} = listen_on_random_port(),
Acceptor = spawn_link(fun() ->
accept_loop(Sock, HandleFun, Parent)
end),
receive
{port, Port} -> Port
after 2000 -> error({timeout, start_http_server})
end.
timer:sleep(100),
{Port, Sock, Acceptor}.
stop_http_server(Sock, Acceptor) ->
exit(Acceptor, kill),
gen_tcp:close(Sock).
listen_on_random_port() ->
Min = 1024,
Max = 65000,
rand:seed(exsplus, erlang:timestamp()),
Port = rand:uniform(Max - Min) + Min,
case gen_tcp:listen(Port, [{active, false}, {reuseaddr, true}, binary]) of
case
gen_tcp:listen(Port, [
binary, {active, false}, {packet, raw}, {reuseaddr, true}, {backlog, 1000}
])
of
{ok, Sock} -> {Port, Sock};
{error, eaddrinuse} -> listen_on_random_port()
end.
loop(Sock, HandleFun, Parent) ->
accept_loop(Sock, HandleFun, Parent) ->
process_flag(trap_exit, true),
{ok, Conn} = gen_tcp:accept(Sock),
Handler = spawn(fun() -> HandleFun(Conn, Parent) end),
Handler = spawn_link(fun() -> HandleFun(Conn, Parent) end),
gen_tcp:controlling_process(Conn, Handler),
loop(Sock, HandleFun, Parent).
accept_loop(Sock, HandleFun, Parent).
make_response(CodeStr, Str) ->
B = iolist_to_binary(Str),
@ -138,7 +154,9 @@ handle_fun_200_ok(Conn, Parent) ->
Parent ! {http_server, received, Req},
gen_tcp:send(Conn, make_response("200 OK", "Request OK")),
handle_fun_200_ok(Conn, Parent);
{error, closed} ->
{error, Reason} ->
ct:pal("the http handler recv error: ~p", [Reason]),
timer:sleep(100),
gen_tcp:close(Conn)
end.
@ -153,24 +171,25 @@ parse_http_request(ReqStr0) ->
%% Testcases
%%------------------------------------------------------------------------------
t_http_crud_apis(_) ->
Port = start_http_server(fun handle_fun_200_ok/2),
t_http_crud_apis(Config) ->
Port = ?config(port, Config),
%% assert we there's no bridges at first
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
%% then we add a webhook bridge, using POST
%% POST /bridges/ will create a bridge
URL1 = ?URL(Port, "path1"),
Name = ?BRIDGE_NAME,
{ok, 201, Bridge} = request(
post,
uri(["bridges"]),
?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME)
?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, Name)
),
%ct:pal("---bridge: ~p", [Bridge]),
#{
<<"type">> := ?BRIDGE_TYPE,
<<"name">> := ?BRIDGE_NAME,
<<"name">> := Name,
<<"enable">> := true,
<<"status">> := _,
<<"node_status">> := [_ | _],
@ -179,7 +198,7 @@ t_http_crud_apis(_) ->
<<"url">> := URL1
} = jsx:decode(Bridge),
BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME),
BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name),
%% send an message to emqx and the message should be forwarded to the HTTP server
Body = <<"my msg">>,
emqx:publish(emqx_message:make(<<"emqx_webhook/1">>, Body)),
@ -203,12 +222,12 @@ t_http_crud_apis(_) ->
{ok, 200, Bridge2} = request(
put,
uri(["bridges", BridgeID]),
?HTTP_BRIDGE(URL2, ?BRIDGE_TYPE, ?BRIDGE_NAME)
?HTTP_BRIDGE(URL2, ?BRIDGE_TYPE, Name)
),
?assertMatch(
#{
<<"type">> := ?BRIDGE_TYPE,
<<"name">> := ?BRIDGE_NAME,
<<"name">> := Name,
<<"enable">> := true,
<<"status">> := _,
<<"node_status">> := [_ | _],
@ -225,7 +244,7 @@ t_http_crud_apis(_) ->
[
#{
<<"type">> := ?BRIDGE_TYPE,
<<"name">> := ?BRIDGE_NAME,
<<"name">> := Name,
<<"enable">> := true,
<<"status">> := _,
<<"node_status">> := [_ | _],
@ -242,7 +261,7 @@ t_http_crud_apis(_) ->
?assertMatch(
#{
<<"type">> := ?BRIDGE_TYPE,
<<"name">> := ?BRIDGE_NAME,
<<"name">> := Name,
<<"enable">> := true,
<<"status">> := _,
<<"node_status">> := [_ | _],
@ -275,7 +294,7 @@ t_http_crud_apis(_) ->
{ok, 404, ErrMsg2} = request(
put,
uri(["bridges", BridgeID]),
?HTTP_BRIDGE(URL2, ?BRIDGE_TYPE, ?BRIDGE_NAME)
?HTTP_BRIDGE(URL2, ?BRIDGE_TYPE, Name)
),
?assertMatch(
#{
@ -286,29 +305,102 @@ t_http_crud_apis(_) ->
),
ok.
t_start_stop_bridges(_) ->
lists:foreach(
fun(Type) ->
do_start_stop_bridges(Type)
end,
[node, cluster]
).
do_start_stop_bridges(Type) ->
t_check_dependent_actions_on_delete(Config) ->
Port = ?config(port, Config),
%% assert we there's no bridges at first
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
Port = start_http_server(fun handle_fun_200_ok/2),
%% then we add a webhook bridge, using POST
%% POST /bridges/ will create a bridge
URL1 = ?URL(Port, "path1"),
Name = <<"t_http_crud_apis">>,
BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name),
{ok, 201, _} = request(
post,
uri(["bridges"]),
?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, Name)
),
{ok, 201, Rule} = request(
post,
uri(["rules"]),
#{
<<"name">> => <<"t_http_crud_apis">>,
<<"enable">> => true,
<<"actions">> => [BridgeID],
<<"sql">> => <<"SELECT * from \"t\"">>
}
),
#{<<"id">> := RuleId} = jsx:decode(Rule),
%% delete the bridge should fail because there is a rule depenents on it
{ok, 403, _} = request(delete, uri(["bridges", BridgeID]), []),
%% delete the rule first
{ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []),
%% then delete the bridge is OK
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []),
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
ok.
t_cascade_delete_actions(Config) ->
Port = ?config(port, Config),
%% assert we there's no bridges at first
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
%% then we add a webhook bridge, using POST
%% POST /bridges/ will create a bridge
URL1 = ?URL(Port, "path1"),
Name = <<"t_http_crud_apis">>,
BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name),
{ok, 201, _} = request(
post,
uri(["bridges"]),
?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, Name)
),
{ok, 201, Rule} = request(
post,
uri(["rules"]),
#{
<<"name">> => <<"t_http_crud_apis">>,
<<"enable">> => true,
<<"actions">> => [BridgeID],
<<"sql">> => <<"SELECT * from \"t\"">>
}
),
#{<<"id">> := RuleId} = jsx:decode(Rule),
%% delete the bridge will also delete the actions from the rules
{ok, 204, _} = request(delete, uri(["bridges", BridgeID]) ++ "?also_delete_dep_actions", []),
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
{ok, 200, Rule1} = request(get, uri(["rules", RuleId]), []),
?assertMatch(
#{
<<"actions">> := []
},
jsx:decode(Rule1)
),
{ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []),
ok.
t_start_stop_bridges_node(Config) ->
do_start_stop_bridges(node, Config).
t_start_stop_bridges_cluster(Config) ->
do_start_stop_bridges(cluster, Config).
do_start_stop_bridges(Type, Config) ->
%% assert we there's no bridges at first
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
Port = ?config(port, Config),
URL1 = ?URL(Port, "abc"),
Name = atom_to_binary(Type),
{ok, 201, Bridge} = request(
post,
uri(["bridges"]),
?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME)
?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, Name)
),
%ct:pal("the bridge ==== ~p", [Bridge]),
#{
<<"type">> := ?BRIDGE_TYPE,
<<"name">> := ?BRIDGE_NAME,
<<"name">> := Name,
<<"enable">> := true,
<<"status">> := <<"connected">>,
<<"node_status">> := [_ | _],
@ -316,11 +408,11 @@ do_start_stop_bridges(Type) ->
<<"node_metrics">> := [_ | _],
<<"url">> := URL1
} = jsx:decode(Bridge),
BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME),
BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name),
%% stop it
{ok, 200, <<>>} = request(post, operation_path(Type, stop, BridgeID), <<"">>),
{ok, 200, Bridge2} = request(get, uri(["bridges", BridgeID]), []),
?assertMatch(#{<<"status">> := <<"disconnected">>}, jsx:decode(Bridge2)),
?assertMatch(#{<<"status">> := <<"stopped">>}, jsx:decode(Bridge2)),
%% start again
{ok, 200, <<>>} = request(post, operation_path(Type, restart, BridgeID), <<"">>),
{ok, 200, Bridge3} = request(get, uri(["bridges", BridgeID]), []),
@ -339,21 +431,22 @@ do_start_stop_bridges(Type) ->
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []),
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []).
t_enable_disable_bridges(_) ->
t_enable_disable_bridges(Config) ->
%% assert we there's no bridges at first
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
Port = start_http_server(fun handle_fun_200_ok/2),
Name = ?BRIDGE_NAME,
Port = ?config(port, Config),
URL1 = ?URL(Port, "abc"),
{ok, 201, Bridge} = request(
post,
uri(["bridges"]),
?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME)
?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, Name)
),
%ct:pal("the bridge ==== ~p", [Bridge]),
#{
<<"type">> := ?BRIDGE_TYPE,
<<"name">> := ?BRIDGE_NAME,
<<"name">> := Name,
<<"enable">> := true,
<<"status">> := <<"connected">>,
<<"node_status">> := [_ | _],
@ -361,11 +454,11 @@ t_enable_disable_bridges(_) ->
<<"node_metrics">> := [_ | _],
<<"url">> := URL1
} = jsx:decode(Bridge),
BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME),
BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name),
%% disable it
{ok, 200, <<>>} = request(post, operation_path(cluster, disable, BridgeID), <<"">>),
{ok, 200, Bridge2} = request(get, uri(["bridges", BridgeID]), []),
?assertMatch(#{<<"status">> := <<"disconnected">>}, jsx:decode(Bridge2)),
?assertMatch(#{<<"status">> := <<"stopped">>}, jsx:decode(Bridge2)),
%% enable again
{ok, 200, <<>>} = request(post, operation_path(cluster, enable, BridgeID), <<"">>),
{ok, 200, Bridge3} = request(get, uri(["bridges", BridgeID]), []),
@ -391,21 +484,22 @@ t_enable_disable_bridges(_) ->
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []),
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []).
t_reset_bridges(_) ->
t_reset_bridges(Config) ->
%% assert we there's no bridges at first
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
Port = start_http_server(fun handle_fun_200_ok/2),
Name = ?BRIDGE_NAME,
Port = ?config(port, Config),
URL1 = ?URL(Port, "abc"),
{ok, 201, Bridge} = request(
post,
uri(["bridges"]),
?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME)
?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, Name)
),
%ct:pal("the bridge ==== ~p", [Bridge]),
#{
<<"type">> := ?BRIDGE_TYPE,
<<"name">> := ?BRIDGE_NAME,
<<"name">> := Name,
<<"enable">> := true,
<<"status">> := <<"connected">>,
<<"node_status">> := [_ | _],
@ -413,7 +507,7 @@ t_reset_bridges(_) ->
<<"node_metrics">> := [_ | _],
<<"url">> := URL1
} = jsx:decode(Bridge),
BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME),
BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name),
{ok, 200, <<"Reset success">>} = request(put, uri(["bridges", BridgeID, "reset_metrics"]), []),
%% delete the bridge

View File

@ -0,0 +1,633 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2020-2022 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_bridge_mqtt_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-import(emqx_dashboard_api_test_helpers, [request/4, uri/1]).
-include("emqx/include/emqx.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-include("emqx_dashboard/include/emqx_dashboard.hrl").
%% output functions
-export([inspect/3]).
-define(BRIDGE_CONF_DEFAULT, <<"bridges: {}">>).
-define(TYPE_MQTT, <<"mqtt">>).
-define(NAME_MQTT, <<"my_mqtt_bridge">>).
-define(BRIDGE_NAME_INGRESS, <<"ingress_mqtt_bridge">>).
-define(BRIDGE_NAME_EGRESS, <<"egress_mqtt_bridge">>).
-define(SERVER_CONF(Username), #{
<<"server">> => <<"127.0.0.1:1883">>,
<<"username">> => Username,
<<"password">> => <<"">>,
<<"proto_ver">> => <<"v4">>,
<<"ssl">> => #{<<"enable">> => false}
}).
-define(INGRESS_CONF, #{
<<"remote">> => #{
<<"topic">> => <<"remote_topic/#">>,
<<"qos">> => 2
},
<<"local">> => #{
<<"topic">> => <<"local_topic/${topic}">>,
<<"qos">> => <<"${qos}">>,
<<"payload">> => <<"${payload}">>,
<<"retain">> => <<"${retain}">>
}
}).
-define(EGRESS_CONF, #{
<<"local">> => #{
<<"topic">> => <<"local_topic/#">>
},
<<"remote">> => #{
<<"topic">> => <<"remote_topic/${topic}">>,
<<"payload">> => <<"${payload}">>,
<<"qos">> => <<"${qos}">>,
<<"retain">> => <<"${retain}">>
}
}).
inspect(Selected, _Envs, _Args) ->
persistent_term:put(?MODULE, #{inspect => Selected}).
all() ->
emqx_common_test_helpers:all(?MODULE).
groups() ->
[].
suite() ->
[{timetrap, {seconds, 30}}].
init_per_suite(Config) ->
_ = application:load(emqx_conf),
%% some testcases (may from other app) already get emqx_connector started
_ = application:stop(emqx_resource),
_ = application:stop(emqx_connector),
ok = emqx_common_test_helpers:start_apps(
[
emqx_rule_engine,
emqx_bridge,
emqx_dashboard
],
fun set_special_configs/1
),
ok = emqx_common_test_helpers:load_config(
emqx_rule_engine_schema,
<<"rule_engine {rules {}}">>
),
ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, ?BRIDGE_CONF_DEFAULT),
Config.
end_per_suite(_Config) ->
emqx_common_test_helpers:stop_apps([
emqx_rule_engine,
emqx_bridge,
emqx_dashboard
]),
ok.
set_special_configs(emqx_dashboard) ->
emqx_dashboard_api_test_helpers:set_default_config(<<"connector_admin">>);
set_special_configs(_) ->
ok.
init_per_testcase(_, Config) ->
{ok, _} = emqx_cluster_rpc:start_link(node(), emqx_cluster_rpc, 1000),
Config.
end_per_testcase(_, _Config) ->
clear_resources(),
ok.
clear_resources() ->
lists:foreach(
fun(#{id := Id}) ->
ok = emqx_rule_engine:delete_rule(Id)
end,
emqx_rule_engine:get_rules()
),
lists:foreach(
fun(#{type := Type, name := Name}) ->
{ok, _} = emqx_bridge:remove(Type, Name)
end,
emqx_bridge:list()
).
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
t_mqtt_conn_bridge_ingress(_) ->
User1 = <<"user1">>,
%% create an MQTT bridge, using POST
{ok, 201, Bridge} = request(
post,
uri(["bridges"]),
?SERVER_CONF(User1)#{
<<"type">> => ?TYPE_MQTT,
<<"name">> => ?BRIDGE_NAME_INGRESS,
<<"ingress">> => ?INGRESS_CONF
}
),
#{
<<"type">> := ?TYPE_MQTT,
<<"name">> := ?BRIDGE_NAME_INGRESS
} = jsx:decode(Bridge),
BridgeIDIngress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_INGRESS),
%% we now test if the bridge works as expected
RemoteTopic = <<"remote_topic/1">>,
LocalTopic = <<"local_topic/", RemoteTopic/binary>>,
Payload = <<"hello">>,
emqx:subscribe(LocalTopic),
timer:sleep(100),
%% PUBLISH a message to the 'remote' broker, as we have only one broker,
%% the remote broker is also the local one.
emqx:publish(emqx_message:make(RemoteTopic, Payload)),
%% we should receive a message on the local broker, with specified topic
?assert(
receive
{deliver, LocalTopic, #message{payload = Payload}} ->
ct:pal("local broker got message: ~p on topic ~p", [Payload, LocalTopic]),
true;
Msg ->
ct:pal("Msg: ~p", [Msg]),
false
after 100 ->
false
end
),
%% verify the metrics of the bridge
{ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDIngress]), []),
?assertMatch(
#{
<<"metrics">> := #{<<"matched">> := 0, <<"received">> := 1},
<<"node_metrics">> :=
[
#{
<<"node">> := _,
<<"metrics">> :=
#{<<"matched">> := 0, <<"received">> := 1}
}
]
},
jsx:decode(BridgeStr)
),
%% delete the bridge
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDIngress]), []),
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
ok.
t_mqtt_conn_bridge_egress(_) ->
%% then we add a mqtt connector, using POST
User1 = <<"user1">>,
{ok, 201, Bridge} = request(
post,
uri(["bridges"]),
?SERVER_CONF(User1)#{
<<"type">> => ?TYPE_MQTT,
<<"name">> => ?BRIDGE_NAME_EGRESS,
<<"egress">> => ?EGRESS_CONF
}
),
#{
<<"type">> := ?TYPE_MQTT,
<<"name">> := ?BRIDGE_NAME_EGRESS
} = jsx:decode(Bridge),
BridgeIDEgress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS),
%% we now test if the bridge works as expected
LocalTopic = <<"local_topic/1">>,
RemoteTopic = <<"remote_topic/", LocalTopic/binary>>,
Payload = <<"hello">>,
emqx:subscribe(RemoteTopic),
timer:sleep(100),
%% PUBLISH a message to the 'local' broker, as we have only one broker,
%% the remote broker is also the local one.
emqx:publish(emqx_message:make(LocalTopic, Payload)),
%% we should receive a message on the "remote" broker, with specified topic
?assert(
receive
{deliver, RemoteTopic, #message{payload = Payload}} ->
ct:pal("local broker got message: ~p on topic ~p", [Payload, RemoteTopic]),
true;
Msg ->
ct:pal("Msg: ~p", [Msg]),
false
after 100 ->
false
end
),
%% verify the metrics of the bridge
{ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDEgress]), []),
?assertMatch(
#{
<<"metrics">> := #{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0},
<<"node_metrics">> :=
[
#{
<<"node">> := _,
<<"metrics">> :=
#{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0}
}
]
},
jsx:decode(BridgeStr)
),
%% delete the bridge
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []),
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
ok.
t_ingress_mqtt_bridge_with_rules(_) ->
{ok, 201, _} = request(
post,
uri(["bridges"]),
?SERVER_CONF(<<"user1">>)#{
<<"type">> => ?TYPE_MQTT,
<<"name">> => ?BRIDGE_NAME_INGRESS,
<<"ingress">> => ?INGRESS_CONF
}
),
BridgeIDIngress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_INGRESS),
{ok, 201, Rule} = request(
post,
uri(["rules"]),
#{
<<"name">> => <<"A_rule_get_messages_from_a_source_mqtt_bridge">>,
<<"enable">> => true,
<<"actions">> => [#{<<"function">> => "emqx_bridge_mqtt_SUITE:inspect"}],
<<"sql">> => <<"SELECT * from \"$bridges/", BridgeIDIngress/binary, "\"">>
}
),
#{<<"id">> := RuleId} = jsx:decode(Rule),
%% we now test if the bridge works as expected
RemoteTopic = <<"remote_topic/1">>,
LocalTopic = <<"local_topic/", RemoteTopic/binary>>,
Payload = <<"hello">>,
emqx:subscribe(LocalTopic),
timer:sleep(100),
%% PUBLISH a message to the 'remote' broker, as we have only one broker,
%% the remote broker is also the local one.
emqx:publish(emqx_message:make(RemoteTopic, Payload)),
%% we should receive a message on the local broker, with specified topic
?assert(
receive
{deliver, LocalTopic, #message{payload = Payload}} ->
ct:pal("local broker got message: ~p on topic ~p", [Payload, LocalTopic]),
true;
Msg ->
ct:pal("Msg: ~p", [Msg]),
false
after 100 ->
false
end
),
%% and also the rule should be matched, with matched + 1:
{ok, 200, Rule1} = request(get, uri(["rules", RuleId]), []),
{ok, 200, Metrics} = request(get, uri(["rules", RuleId, "metrics"]), []),
?assertMatch(#{<<"id">> := RuleId}, jsx:decode(Rule1)),
?assertMatch(
#{
<<"metrics">> := #{
<<"matched">> := 1,
<<"passed">> := 1,
<<"failed">> := 0,
<<"failed.exception">> := 0,
<<"failed.no_result">> := 0,
<<"matched.rate">> := _,
<<"matched.rate.max">> := _,
<<"matched.rate.last5m">> := _,
<<"actions.total">> := 1,
<<"actions.success">> := 1,
<<"actions.failed">> := 0,
<<"actions.failed.out_of_service">> := 0,
<<"actions.failed.unknown">> := 0
}
},
jsx:decode(Metrics)
),
%% we also check if the actions of the rule is triggered
?assertMatch(
#{
inspect := #{
event := <<"$bridges/mqtt", _/binary>>,
id := MsgId,
payload := Payload,
topic := RemoteTopic,
qos := 0,
dup := false,
retain := false,
pub_props := #{},
timestamp := _
}
} when is_binary(MsgId),
persistent_term:get(?MODULE)
),
%% verify the metrics of the bridge
{ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDIngress]), []),
?assertMatch(
#{
<<"metrics">> := #{<<"matched">> := 0, <<"received">> := 1},
<<"node_metrics">> :=
[
#{
<<"node">> := _,
<<"metrics">> :=
#{<<"matched">> := 0, <<"received">> := 1}
}
]
},
jsx:decode(BridgeStr)
),
{ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []),
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDIngress]), []).
t_egress_mqtt_bridge_with_rules(_) ->
{ok, 201, Bridge} = request(
post,
uri(["bridges"]),
?SERVER_CONF(<<"user1">>)#{
<<"type">> => ?TYPE_MQTT,
<<"name">> => ?BRIDGE_NAME_EGRESS,
<<"egress">> => ?EGRESS_CONF
}
),
#{<<"type">> := ?TYPE_MQTT, <<"name">> := ?BRIDGE_NAME_EGRESS} = jsx:decode(Bridge),
BridgeIDEgress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS),
{ok, 201, Rule} = request(
post,
uri(["rules"]),
#{
<<"name">> => <<"A_rule_send_messages_to_a_sink_mqtt_bridge">>,
<<"enable">> => true,
<<"actions">> => [BridgeIDEgress],
<<"sql">> => <<"SELECT * from \"t/1\"">>
}
),
#{<<"id">> := RuleId} = jsx:decode(Rule),
%% we now test if the bridge works as expected
LocalTopic = <<"local_topic/1">>,
RemoteTopic = <<"remote_topic/", LocalTopic/binary>>,
Payload = <<"hello">>,
emqx:subscribe(RemoteTopic),
timer:sleep(100),
%% PUBLISH a message to the 'local' broker, as we have only one broker,
%% the remote broker is also the local one.
emqx:publish(emqx_message:make(LocalTopic, Payload)),
%% we should receive a message on the "remote" broker, with specified topic
?assert(
receive
{deliver, RemoteTopic, #message{payload = Payload}} ->
ct:pal("remote broker got message: ~p on topic ~p", [Payload, RemoteTopic]),
true;
Msg ->
ct:pal("Msg: ~p", [Msg]),
false
after 100 ->
false
end
),
emqx:unsubscribe(RemoteTopic),
%% PUBLISH a message to the rule.
Payload2 = <<"hi">>,
RuleTopic = <<"t/1">>,
RemoteTopic2 = <<"remote_topic/", RuleTopic/binary>>,
emqx:subscribe(RemoteTopic2),
timer:sleep(100),
emqx:publish(emqx_message:make(RuleTopic, Payload2)),
{ok, 200, Rule1} = request(get, uri(["rules", RuleId]), []),
?assertMatch(#{<<"id">> := RuleId, <<"name">> := _}, jsx:decode(Rule1)),
{ok, 200, Metrics} = request(get, uri(["rules", RuleId, "metrics"]), []),
?assertMatch(
#{
<<"metrics">> := #{
<<"matched">> := 1,
<<"passed">> := 1,
<<"failed">> := 0,
<<"failed.exception">> := 0,
<<"failed.no_result">> := 0,
<<"matched.rate">> := _,
<<"matched.rate.max">> := _,
<<"matched.rate.last5m">> := _,
<<"actions.total">> := 1,
<<"actions.success">> := 1,
<<"actions.failed">> := 0,
<<"actions.failed.out_of_service">> := 0,
<<"actions.failed.unknown">> := 0
}
},
jsx:decode(Metrics)
),
%% we should receive a message on the "remote" broker, with specified topic
?assert(
receive
{deliver, RemoteTopic2, #message{payload = Payload2}} ->
ct:pal("remote broker got message: ~p on topic ~p", [Payload2, RemoteTopic2]),
true;
Msg ->
ct:pal("Msg: ~p", [Msg]),
false
after 100 ->
false
end
),
%% verify the metrics of the bridge
{ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDEgress]), []),
?assertMatch(
#{
<<"metrics">> := #{<<"matched">> := 2, <<"success">> := 2, <<"failed">> := 0},
<<"node_metrics">> :=
[
#{
<<"node">> := _,
<<"metrics">> := #{
<<"matched">> := 2, <<"success">> := 2, <<"failed">> := 0
}
}
]
},
jsx:decode(BridgeStr)
),
{ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []),
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []).
t_mqtt_conn_bridge_egress_reconnect(_) ->
%% then we add a mqtt connector, using POST
User1 = <<"user1">>,
{ok, 201, Bridge} = request(
post,
uri(["bridges"]),
?SERVER_CONF(User1)#{
<<"type">> => ?TYPE_MQTT,
<<"name">> => ?BRIDGE_NAME_EGRESS,
<<"egress">> => ?EGRESS_CONF,
%% to make it reconnect quickly
<<"reconnect_interval">> => <<"1s">>,
<<"resource_opts">> => #{
<<"worker_pool_size">> => 2,
<<"enable_queue">> => true,
<<"query_mode">> => <<"sync">>,
%% to make it check the healthy quickly
<<"health_check_interval">> => <<"0.5s">>
}
}
),
#{
<<"type">> := ?TYPE_MQTT,
<<"name">> := ?BRIDGE_NAME_EGRESS
} = jsx:decode(Bridge),
BridgeIDEgress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS),
%% we now test if the bridge works as expected
LocalTopic = <<"local_topic/1">>,
RemoteTopic = <<"remote_topic/", LocalTopic/binary>>,
Payload0 = <<"hello">>,
emqx:subscribe(RemoteTopic),
timer:sleep(100),
%% PUBLISH a message to the 'local' broker, as we have only one broker,
%% the remote broker is also the local one.
emqx:publish(emqx_message:make(LocalTopic, Payload0)),
%% we should receive a message on the "remote" broker, with specified topic
assert_mqtt_msg_received(RemoteTopic, Payload0),
%% verify the metrics of the bridge
{ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDEgress]), []),
?assertMatch(
#{
<<"metrics">> := #{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0},
<<"node_metrics">> :=
[
#{
<<"node">> := _,
<<"metrics">> :=
#{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0}
}
]
},
jsx:decode(BridgeStr)
),
%% stop the listener 1883 to make the bridge disconnected
ok = emqx_listeners:stop_listener('tcp:default'),
ct:sleep(1500),
%% PUBLISH 2 messages to the 'local' broker, the message should
ok = snabbkaffe:start_trace(),
{ok, SRef} =
snabbkaffe:subscribe(
fun
(
#{
?snk_kind := call_query_enter,
query := {query, _From, {send_message, #{}}, _Sent}
}
) ->
true;
(_) ->
false
end,
_NEvents = 2,
_Timeout = 1_000
),
Payload1 = <<"hello2">>,
Payload2 = <<"hello3">>,
emqx:publish(emqx_message:make(LocalTopic, Payload1)),
emqx:publish(emqx_message:make(LocalTopic, Payload2)),
{ok, _} = snabbkaffe:receive_events(SRef),
ok = snabbkaffe:stop(),
%% verify the metrics of the bridge, the message should be queued
{ok, 200, BridgeStr1} = request(get, uri(["bridges", BridgeIDEgress]), []),
%% matched >= 3 because of possible retries.
?assertMatch(
#{
<<"status">> := Status,
<<"metrics">> := #{
<<"matched">> := Matched, <<"success">> := 1, <<"failed">> := 0, <<"queuing">> := 2
}
} when Matched >= 3 andalso (Status == <<"connected">> orelse Status == <<"connecting">>),
jsx:decode(BridgeStr1)
),
%% start the listener 1883 to make the bridge reconnected
ok = emqx_listeners:start_listener('tcp:default'),
timer:sleep(1500),
%% verify the metrics of the bridge, the 2 queued messages should have been sent
{ok, 200, BridgeStr2} = request(get, uri(["bridges", BridgeIDEgress]), []),
%% matched >= 3 because of possible retries.
?assertMatch(
#{
<<"status">> := <<"connected">>,
<<"metrics">> := #{
<<"matched">> := Matched,
<<"success">> := 3,
<<"failed">> := 0,
<<"queuing">> := 0,
<<"retried">> := _
}
} when Matched >= 3,
jsx:decode(BridgeStr2)
),
%% also verify the 2 messages have been sent to the remote broker
assert_mqtt_msg_received(RemoteTopic, Payload1),
assert_mqtt_msg_received(RemoteTopic, Payload2),
%% delete the bridge
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []),
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
ok.
assert_mqtt_msg_received(Topic, Payload) ->
?assert(
receive
{deliver, Topic, #message{payload = Payload}} ->
ct:pal("Got mqtt message: ~p on topic ~p", [Payload, Topic]),
true;
Msg ->
ct:pal("Unexpected Msg: ~p", [Msg]),
false
after 100 ->
false
end
).
request(Method, Url, Body) ->
request(<<"connector_admin">>, Method, Url, Body).

View File

@ -0,0 +1,229 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_bridge_mqtt_config_tests).
-include_lib("eunit/include/eunit.hrl").
empty_config_test() ->
Conf1 = #{<<"bridges">> => #{}},
Conf2 = #{<<"bridges">> => #{<<"webhook">> => #{}}},
?assertEqual(Conf1, check(Conf1)),
?assertEqual(Conf2, check(Conf2)),
ok.
%% ensure webhook config can be checked
webhook_config_test() ->
Conf = parse(webhook_v5011_hocon()),
?assertMatch(
#{
<<"bridges">> :=
#{
<<"webhook">> := #{
<<"the_name">> :=
#{
<<"method">> := get,
<<"body">> := <<"${payload}">>
}
}
}
},
check(Conf)
),
ok.
up(#{<<"bridges">> := Bridges0} = Conf0) ->
Bridges = up(Bridges0),
Conf0#{<<"bridges">> := Bridges};
up(#{<<"mqtt">> := MqttBridges0} = Bridges) ->
MqttBridges = emqx_bridge_mqtt_config:upgrade_pre_ee(MqttBridges0),
Bridges#{<<"mqtt">> := MqttBridges}.
parse(HOCON) ->
{ok, Conf} = hocon:binary(HOCON),
Conf.
mqtt_config_test_() ->
Conf0 = mqtt_v5011_hocon(),
Conf1 = mqtt_v5011_full_hocon(),
[
{Tag, fun() ->
Parsed = parse(Conf),
Upgraded = up(Parsed),
Checked = check(Upgraded),
assert_upgraded(Checked)
end}
|| {Tag, Conf} <- [{"minimum", Conf0}, {"full", Conf1}]
].
assert_upgraded(#{<<"bridges">> := Bridges}) ->
assert_upgraded(Bridges);
assert_upgraded(#{<<"mqtt">> := Mqtt}) ->
assert_upgraded(Mqtt);
assert_upgraded(#{<<"bridge_one">> := Map}) ->
assert_upgraded1(Map);
assert_upgraded(#{<<"bridge_two">> := Map}) ->
assert_upgraded1(Map).
assert_upgraded1(Map) ->
?assertNot(maps:is_key(<<"connector">>, Map)),
?assertNot(maps:is_key(<<"direction">>, Map)),
?assert(maps:is_key(<<"server">>, Map)),
?assert(maps:is_key(<<"ssl">>, Map)).
check(Conf) when is_map(Conf) ->
hocon_tconf:check_plain(emqx_bridge_schema, Conf).
%% erlfmt-ignore
%% this is config generated from v5.0.11
webhook_v5011_hocon() ->
"""
bridges{
webhook {
the_name{
body = \"${payload}\"
connect_timeout = \"5s\"
enable_pipelining = 100
headers {\"content-type\" = \"application/json\"}
max_retries = 3
method = \"get\"
pool_size = 4
request_timeout = \"5s\"
ssl {enable = false, verify = \"verify_peer\"}
url = \"http://localhost:8080\"
}
}
}
""".
%% erlfmt-ignore
%% this is a generated from v5.0.11
mqtt_v5011_hocon() ->
"""
bridges {
mqtt {
bridge_one {
connector {
bridge_mode = false
clean_start = true
keepalive = \"60s\"
mode = cluster_shareload
proto_ver = \"v4\"
server = \"localhost:1883\"
ssl {enable = false, verify = \"verify_peer\"}
}
direction = egress
enable = true
payload = \"${payload}\"
remote_qos = 1
remote_topic = \"tttttttttt\"
retain = false
}
bridge_two {
connector {
bridge_mode = false
clean_start = true
keepalive = \"60s\"
mode = \"cluster_shareload\"
proto_ver = \"v4\"
server = \"localhost:1883\"
ssl {enable = false, verify = \"verify_peer\"}
}
direction = ingress
enable = true
local_qos = 1
payload = \"${payload}\"
remote_qos = 1
remote_topic = \"tttttttt/#\"
retain = false
}
}
}
""".
%% erlfmt-ignore
%% a more complete version
mqtt_v5011_full_hocon() ->
"""
bridges {
mqtt {
bridge_one {
connector {
bridge_mode = false
clean_start = true
keepalive = \"60s\"
max_inflight = 32
mode = \"cluster_shareload\"
password = \"\"
proto_ver = \"v5\"
reconnect_interval = \"15s\"
replayq {offload = false, seg_bytes = \"100MB\"}
retry_interval = \"12s\"
server = \"localhost:1883\"
ssl {
ciphers = \"\"
depth = 10
enable = false
reuse_sessions = true
secure_renegotiate = true
user_lookup_fun = \"emqx_tls_psk:lookup\"
verify = \"verify_peer\"
versions = [\"tlsv1.3\", \"tlsv1.2\", \"tlsv1.1\", \"tlsv1\"]
}
username = \"\"
}
direction = \"ingress\"
enable = true
local_qos = 1
payload = \"${payload}\"
remote_qos = 1
remote_topic = \"tttt/a\"
retain = false
}
bridge_two {
connector {
bridge_mode = false
clean_start = true
keepalive = \"60s\"
max_inflight = 32
mode = \"cluster_shareload\"
password = \"\"
proto_ver = \"v4\"
reconnect_interval = \"15s\"
replayq {offload = false, seg_bytes = \"100MB\"}
retry_interval = \"44s\"
server = \"localhost:1883\"
ssl {
ciphers = \"\"
depth = 10
enable = false
reuse_sessions = true
secure_renegotiate = true
user_lookup_fun = \"emqx_tls_psk:lookup\"
verify = verify_peer
versions = [\"tlsv1.3\", \"tlsv1.2\", \"tlsv1.1\", \"tlsv1\"]
}
username = \"\"
}
direction = egress
enable = true
payload = \"${payload.x}\"
remote_qos = 1
remote_topic = \"remotetopic/1\"
retain = false
}
}
}
""".

View File

@ -165,7 +165,6 @@ gen_schema_json(Dir, I18nFile, SchemaModule) ->
gen_api_schema_json(Dir, I18nFile, Lang) ->
emqx_dashboard:init_i18n(I18nFile, Lang),
gen_api_schema_json_hotconf(Dir, Lang),
gen_api_schema_json_connector(Dir, Lang),
gen_api_schema_json_bridge(Dir, Lang),
emqx_dashboard:clear_i18n().
@ -174,11 +173,6 @@ gen_api_schema_json_hotconf(Dir, Lang) ->
File = schema_filename(Dir, "hot-config-schema-", Lang),
ok = do_gen_api_schema_json(File, emqx_mgmt_api_configs, SchemaInfo).
gen_api_schema_json_connector(Dir, Lang) ->
SchemaInfo = #{title => <<"EMQX Connector API Schema">>, version => <<"0.1.0">>},
File = schema_filename(Dir, "connector-api-", Lang),
ok = do_gen_api_schema_json(File, emqx_connector_api, SchemaInfo).
gen_api_schema_json_bridge(Dir, Lang) ->
SchemaInfo = #{title => <<"EMQX Data Bridge API Schema">>, version => <<"0.1.0">>},
File = schema_filename(Dir, "bridge-api-", Lang),
@ -399,6 +393,10 @@ typename_to_spec("failure_strategy()", _Mod) ->
#{type => enum, symbols => [force, drop, throw]};
typename_to_spec("initial()", _Mod) ->
#{type => string};
typename_to_spec("map()", _Mod) ->
#{type => object};
typename_to_spec("#{" ++ _, Mod) ->
typename_to_spec("map()", Mod);
typename_to_spec(Name, Mod) ->
Spec = range(Name),
Spec1 = remote_module_type(Spec, Name, Mod),

View File

@ -60,7 +60,6 @@
emqx_exhook_schema,
emqx_psk_schema,
emqx_limiter_schema,
emqx_connector_schema,
emqx_slow_subs_schema
]).

View File

@ -1,5 +1,4 @@
emqx_connector_mqtt {
num_of_bridges {
desc {
en: "The current number of bridges that are using this connector."

View File

@ -1,4 +1,85 @@
emqx_connector_mqtt_schema {
ingress_desc {
desc {
en: """The ingress config defines how this bridge receive messages from the remote MQTT broker, and then
send them to the local broker.<br/>
Template with variables is allowed in 'remote.qos', 'local.topic', 'local.qos', 'local.retain', 'local.payload'.<br/>
NOTE: if this bridge is used as the input of a rule, and also 'local.topic' is
configured, then messages got from the remote broker will be sent to both the 'local.topic' and
the rule."""
zh: """入口配置定义了该桥接如何从远程 MQTT Broker 接收消息,然后将消息发送到本地 Broker。<br/>
以下字段中允许使用带有变量的模板:'remote.qos', 'local.topic', 'local.qos', 'local.retain', 'local.payload'。<br/>
注意:如果此桥接被用作规则的输入,并且配置了 'local.topic',则从远程代理获取的消息将同时被发送到 'local.topic' 和规则。
"""
}
label: {
en: "Ingress Configs"
zh: "入方向配置"
}
}
egress_desc {
desc {
en: """The egress config defines how this bridge forwards messages from the local broker to the remote broker.<br/>
Template with variables is allowed in 'remote.topic', 'local.qos', 'local.retain', 'local.payload'.<br/>
NOTE: if this bridge is used as the action of a rule, and also 'local.topic'
is configured, then both the data got from the rule and the MQTT messages that matches
'local.topic' will be forwarded."""
zh: """出口配置定义了该桥接如何将消息从本地 Broker 转发到远程 Broker。
以下字段中允许使用带有变量的模板:'remote.topic', 'local.qos', 'local.retain', 'local.payload'。<br/>
注意:如果此桥接被用作规则的动作,并且配置了 'local.topic',则从规则输出的数据以及匹配到 'local.topic' 的 MQTT 消息都会被转发。
"""
}
label: {
en: "Egress Configs"
zh: "出方向配置"
}
}
ingress_remote {
desc {
en: """The configs about subscribing to the remote broker."""
zh: """订阅远程 Broker 相关的配置。"""
}
label: {
en: "Remote Configs"
zh: "远程配置"
}
}
ingress_local {
desc {
en: """The configs about sending message to the local broker."""
zh: """发送消息到本地 Broker 相关的配置。"""
}
label: {
en: "Local Configs"
zh: "本地配置"
}
}
egress_remote {
desc {
en: """The configs about sending message to the remote broker."""
zh: """发送消息到远程 Broker 相关的配置。"""
}
label: {
en: "Remote Configs"
zh: "远程配置"
}
}
egress_local {
desc {
en: """The configs about receiving messages from local broker."""
zh: """如何从本地 Broker 接收消息相关的配置。"""
}
label: {
en: "Local Configs"
zh: "本地配置"
}
}
mode {
desc {
en: """
@ -9,15 +90,15 @@ In 'cluster_shareload' mode, the incoming load from the remote broker is shared
using shared subscription.<br/>
Note that the 'clientid' is suffixed by the node name, this is to avoid
clientid conflicts between different nodes. And we can only use shared subscription
topic filters for <code>remote_topic</code> of ingress connections.
topic filters for <code>remote.topic</code> of ingress connections.
"""
zh: """
MQTT 桥的模式。 <br/>
- cluster_shareload在 emqx 集群的每个节点上创建一个 MQTT 连接。<br/>
在“cluster_shareload”模式下来自远程代理的传入负载通过共享订阅的方式接收。<br/>
请注意,<code>clientid</code> 以节点名称为后缀这是为了避免不同节点之间的clientid冲突。
而且对于入口连接的 <code>remote_topic</code>,我们只能使用共享订阅主题过滤器。
请注意,<code>clientid</code> 以节点名称为后缀,这是为了避免不同节点之间的 <code> clientid</code> 冲突。
而且对于入口连接的 <code>remote.topic</code>,我们只能使用共享订阅主题过滤器。
"""
}
label: {
@ -166,17 +247,6 @@ Template with variables is allowed.
}
}
ingress_hookpoint {
desc {
en: "The hook point will be triggered when there's any message received from the remote broker."
zh: "当从远程borker收到任何消息时将触发钩子。"
}
label: {
en: "Hookpoint"
zh: "挂载点"
}
}
egress_local_topic {
desc {
en: "The local topic to be forwarded to the remote broker"
@ -222,59 +292,6 @@ Template with variables is allowed.
}
}
dir {
desc {
en: """
The dir where the replayq file saved.<br/>
Set to 'false' disables the replayq feature.
"""
zh: """
replayq 文件保存的目录。<br/>
设置为 'false' 会禁用 replayq 功能。
"""
}
label: {
en: "Replyq file Save Dir"
zh: "Replyq 文件保存目录"
}
}
seg_bytes {
desc {
en: """
The size in bytes of a single segment.<br/>
A segment is mapping to a file in the replayq dir. If the current segment is full, a new segment
(file) will be opened to write.
"""
zh: """
单个段的大小(以字节为单位)。<br/>
一个段映射到 replayq 目录中的一个文件。 如果当前段已满,则新段(文件)将被打开写入。
"""
}
label: {
en: "Segment Size"
zh: "Segment 大小"
}
}
offload {
desc {
en: """
In offload mode, the disk queue is only used to offload queue tail segments.<br/>
The messages are cached in the memory first, then it writes to the replayq files after the size of
the memory cache reaches 'seg_bytes'.
"""
zh: """
在Offload模式下磁盘队列仅用于卸载队列尾段。<br/>
消息首先缓存在内存中然后写入replayq文件。内存缓大小为“seg_bytes” 指定的值。
"""
}
label: {
en: "Offload Mode"
zh: "Offload 模式"
}
}
retain {
desc {
en: """
@ -309,66 +326,15 @@ Template with variables is allowed.
}
}
desc_connector {
server_configs {
desc {
en: """Generic configuration for the connector."""
zh: """连接器的通用配置。"""
en: """Configs related to the server."""
zh: """服务器相关的配置。"""
}
label: {
en: "Connector Generic Configuration"
zh: "连接器通用配置。"
en: "Server Configs"
zh: "服务配置。"
}
}
desc_ingress {
desc {
en: """
The ingress config defines how this bridge receive messages from the remote MQTT broker, and then send them to the local broker.<br/>
Template with variables is allowed in 'local_topic', 'remote_qos', 'qos', 'retain', 'payload'.<br/>
NOTE: if this bridge is used as the input of a rule (emqx rule engine), and also local_topic is configured, then messages got from the remote broker will be sent to both the 'local_topic' and the rule.
"""
zh: """
Ingress 模式定义了这个 bridge 如何从远程 MQTT broker 接收消息,然后将它们发送到本地 broker 。<br/>
允许带有的模板变量: 'local_topic'、'remote_qos'、'qos'、'retain'、'payload' 。<br/>
注意:如果这个 bridge 被用作规则的输入emqx 规则引擎),并且还配置了 local_topic那么从远程 broker 获取的消息将同时被发送到 'local_topic' 和规则引擎。
"""
}
label: {
en: "Ingress Config"
zh: "Ingress 模式配置"
}
}
desc_egress {
desc {
en: """
The egress config defines how this bridge forwards messages from the local broker to the remote broker.<br/>
Template with variables is allowed in 'remote_topic', 'qos', 'retain', 'payload'.<br/>
NOTE: if this bridge is used as the action of a rule (emqx rule engine), and also local_topic is configured, then both the data got from the rule and the MQTT messages that matches local_topic will be forwarded.
"""
zh: """
Egress 模式定义了 bridge 如何将消息从本地 broker 转发到远程 broker。<br/>
允许带有的模板变量: 'remote_topic'、'qos'、'retain'、'payload' 。<br/>
注意:如果这个 bridge 作为规则emqx 规则引擎)的输出,并且还配置了 local_topic那么从规则引擎中获取的数据和匹配 local_topic 的 MQTT 消息都会被转发到远程 broker 。
"""
}
label: {
en: "Egress Config"
zh: "Egress 模式配置"
}
}
desc_replayq {
desc {
en: """Queue messages in disk files."""
zh: """本地磁盘消息队列"""
}
label: {
en: "Replayq"
zh: "本地磁盘消息队列"
}
}
}

View File

@ -1,31 +0,0 @@
emqx_connector_schema {
mqtt {
desc {
en: "MQTT bridges."
zh: "MQTT bridges。"
}
label: {
en: "MQTT bridges"
zh: "MQTT bridges"
}
}
desc_connector {
desc {
en: """
Configuration for EMQX connectors.<br/>
A connector maintains the data related to the external resources, such as MySQL database.
"""
zh: """
EMQX 连接器的配置。<br/>
连接器维护与外部资源相关的数据,比如 MySQL 数据库。
"""
}
label: {
en: "Connector"
zh: "连接器"
}
}
}

View File

@ -20,8 +20,7 @@
%% By accident, We have always been using the upstream fork due to
%% eredis_cluster's dependency getting resolved earlier.
%% Here we pin 1.5.2 to avoid surprises in the future.
{poolboy, {git, "https://github.com/emqx/poolboy.git", {tag, "1.5.2"}}},
{emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.6.0"}}}
{poolboy, {git, "https://github.com/emqx/poolboy.git", {tag, "1.5.2"}}}
]}.
{shell, [

View File

@ -1,166 +0,0 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2020-2022 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_connector).
-export([
config_key_path/0,
pre_config_update/3,
post_config_update/5
]).
-export([
parse_connector_id/1,
connector_id/2
]).
-export([
list_raw/0,
lookup_raw/1,
lookup_raw/2,
create_dry_run/2,
update/2,
update/3,
delete/1,
delete/2
]).
config_key_path() ->
[connectors].
pre_config_update(Path, Conf, _OldConfig) when is_map(Conf) ->
emqx_connector_ssl:convert_certs(filename:join(Path), Conf).
-dialyzer([{nowarn_function, [post_config_update/5]}, error_handling]).
post_config_update([connectors, Type, Name] = Path, '$remove', _, OldConf, _AppEnvs) ->
ConnId = connector_id(Type, Name),
try
foreach_linked_bridges(ConnId, fun(#{type := BType, name := BName}) ->
throw({dependency_bridges_exist, emqx_bridge_resource:bridge_id(BType, BName)})
end),
_ = emqx_connector_ssl:clear_certs(filename:join(Path), OldConf)
catch
throw:Error -> {error, Error}
end;
post_config_update([connectors, Type, Name], _Req, NewConf, OldConf, _AppEnvs) ->
ConnId = connector_id(Type, Name),
foreach_linked_bridges(
ConnId,
fun(#{type := BType, name := BName}) ->
BridgeConf = emqx:get_config([bridges, BType, BName]),
case
emqx_bridge_resource:update(
BType,
BName,
{BridgeConf#{connector => OldConf}, BridgeConf#{connector => NewConf}}
)
of
ok -> ok;
{error, Reason} -> error({update_bridge_error, Reason})
end
end
).
connector_id(Type0, Name0) ->
Type = bin(Type0),
Name = bin(Name0),
<<Type/binary, ":", Name/binary>>.
-spec parse_connector_id(binary() | list() | atom()) -> {atom(), binary()}.
parse_connector_id(ConnectorId) ->
case string:split(bin(ConnectorId), ":", all) of
[Type, Name] -> {binary_to_atom(Type, utf8), Name};
_ -> error({invalid_connector_id, ConnectorId})
end.
list_raw() ->
case get_raw_connector_conf() of
not_found ->
[];
Config ->
lists:foldl(
fun({Type, NameAndConf}, Connectors) ->
lists:foldl(
fun({Name, RawConf}, Acc) ->
[RawConf#{<<"type">> => Type, <<"name">> => Name} | Acc]
end,
Connectors,
maps:to_list(NameAndConf)
)
end,
[],
maps:to_list(Config)
)
end.
lookup_raw(Id) when is_binary(Id) ->
{Type, Name} = parse_connector_id(Id),
lookup_raw(Type, Name).
lookup_raw(Type, Name) ->
Path = [bin(P) || P <- [Type, Name]],
case get_raw_connector_conf() of
not_found ->
{error, not_found};
Conf ->
case emqx_map_lib:deep_get(Path, Conf, not_found) of
not_found -> {error, not_found};
Conf1 -> {ok, Conf1#{<<"type">> => Type, <<"name">> => Name}}
end
end.
-spec create_dry_run(module(), binary() | #{binary() => term()} | [#{binary() => term()}]) ->
ok | {error, Reason :: term()}.
create_dry_run(Type, Conf) ->
emqx_bridge_resource:create_dry_run(Type, Conf).
update(Id, Conf) when is_binary(Id) ->
{Type, Name} = parse_connector_id(Id),
update(Type, Name, Conf).
update(Type, Name, Conf) ->
emqx_conf:update(config_key_path() ++ [Type, Name], Conf, #{override_to => cluster}).
delete(Id) when is_binary(Id) ->
{Type, Name} = parse_connector_id(Id),
delete(Type, Name).
delete(Type, Name) ->
emqx_conf:remove(config_key_path() ++ [Type, Name], #{override_to => cluster}).
get_raw_connector_conf() ->
case emqx:get_raw_config(config_key_path(), not_found) of
not_found ->
not_found;
RawConf ->
#{<<"connectors">> := Conf} =
emqx_config:fill_defaults(#{<<"connectors">> => RawConf}),
Conf
end.
bin(Bin) when is_binary(Bin) -> Bin;
bin(Str) when is_list(Str) -> list_to_binary(Str);
bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8).
foreach_linked_bridges(ConnId, Do) ->
lists:foreach(
fun
(#{raw_config := #{<<"connector">> := ConnId0}} = Bridge) when ConnId0 == ConnId ->
Do(Bridge);
(_) ->
ok
end,
emqx_bridge:list()
).

View File

@ -1,331 +0,0 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2020-2022 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_connector_api).
-behaviour(minirest_api).
-include("emqx_connector.hrl").
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-import(hoconsc, [mk/2, ref/2, array/1, enum/1]).
%% Swagger specs from hocon schema
-export([api_spec/0, paths/0, schema/1, namespace/0]).
%% API callbacks
-export(['/connectors_test'/2, '/connectors'/2, '/connectors/:id'/2]).
-define(CONN_TYPES, [mqtt]).
-define(TRY_PARSE_ID(ID, EXPR),
try emqx_connector:parse_connector_id(Id) of
{ConnType, ConnName} ->
_ = ConnName,
EXPR
catch
error:{invalid_connector_id, Id0} ->
{400, #{
code => 'INVALID_ID',
message =>
<<"invalid_connector_id: ", Id0/binary,
". Connector Ids must be of format {type}:{name}">>
}}
end
).
namespace() -> "connector".
api_spec() ->
emqx_dashboard_swagger:spec(?MODULE, #{check_schema => false}).
paths() -> ["/connectors_test", "/connectors", "/connectors/:id"].
error_schema(Codes, Message) when is_list(Message) ->
error_schema(Codes, list_to_binary(Message));
error_schema(Codes, Message) when is_binary(Message) ->
emqx_dashboard_swagger:error_codes(Codes, Message).
put_request_body_schema() ->
emqx_dashboard_swagger:schema_with_examples(
emqx_connector_schema:put_request(), connector_info_examples(put)
).
post_request_body_schema() ->
emqx_dashboard_swagger:schema_with_examples(
emqx_connector_schema:post_request(), connector_info_examples(post)
).
get_response_body_schema() ->
emqx_dashboard_swagger:schema_with_examples(
emqx_connector_schema:get_response(), connector_info_examples(get)
).
connector_info_array_example(Method) ->
[Config || #{value := Config} <- maps:values(connector_info_examples(Method))].
connector_info_examples(Method) ->
lists:foldl(
fun(Type, Acc) ->
SType = atom_to_list(Type),
maps:merge(Acc, #{
Type => #{
summary => bin(string:uppercase(SType) ++ " Connector"),
value => info_example(Type, Method)
}
})
end,
#{},
?CONN_TYPES
).
info_example(Type, Method) ->
maps:merge(
info_example_basic(Type),
method_example(Type, Method)
).
method_example(Type, Method) when Method == get; Method == post ->
SType = atom_to_list(Type),
SName = "my_" ++ SType ++ "_connector",
#{
type => bin(SType),
name => bin(SName)
};
method_example(_Type, put) ->
#{}.
info_example_basic(mqtt) ->
#{
mode => cluster_shareload,
server => <<"127.0.0.1:1883">>,
reconnect_interval => <<"15s">>,
proto_ver => <<"v4">>,
username => <<"foo">>,
password => <<"bar">>,
clientid => <<"foo">>,
clean_start => true,
keepalive => <<"300s">>,
retry_interval => <<"15s">>,
max_inflight => 100,
ssl => #{
enable => false
}
}.
param_path_id() ->
[
{id,
mk(
binary(),
#{
in => path,
example => <<"mqtt:my_mqtt_connector">>,
desc => ?DESC("id")
}
)}
].
schema("/connectors_test") ->
#{
'operationId' => '/connectors_test',
post => #{
tags => [<<"connectors">>],
desc => ?DESC("conn_test_post"),
summary => <<"Test creating connector">>,
'requestBody' => post_request_body_schema(),
responses => #{
204 => <<"Test connector OK">>,
400 => error_schema(['TEST_FAILED'], "connector test failed")
}
}
};
schema("/connectors") ->
#{
'operationId' => '/connectors',
get => #{
tags => [<<"connectors">>],
desc => ?DESC("conn_get"),
summary => <<"List connectors">>,
responses => #{
200 => emqx_dashboard_swagger:schema_with_example(
array(emqx_connector_schema:get_response()),
connector_info_array_example(get)
)
}
},
post => #{
tags => [<<"connectors">>],
desc => ?DESC("conn_post"),
summary => <<"Create connector">>,
'requestBody' => post_request_body_schema(),
responses => #{
201 => get_response_body_schema(),
400 => error_schema(['ALREADY_EXISTS'], "connector already exists")
}
}
};
schema("/connectors/:id") ->
#{
'operationId' => '/connectors/:id',
get => #{
tags => [<<"connectors">>],
desc => ?DESC("conn_id_get"),
summary => <<"Get connector">>,
parameters => param_path_id(),
responses => #{
200 => get_response_body_schema(),
404 => error_schema(['NOT_FOUND'], "Connector not found"),
400 => error_schema(['INVALID_ID'], "Bad connector ID")
}
},
put => #{
tags => [<<"connectors">>],
desc => ?DESC("conn_id_put"),
summary => <<"Update connector">>,
parameters => param_path_id(),
'requestBody' => put_request_body_schema(),
responses => #{
200 => get_response_body_schema(),
404 => error_schema(['NOT_FOUND'], "Connector not found"),
400 => error_schema(['INVALID_ID'], "Bad connector ID")
}
},
delete => #{
tags => [<<"connectors">>],
desc => ?DESC("conn_id_delete"),
summary => <<"Delete connector">>,
parameters => param_path_id(),
responses => #{
204 => <<"Delete connector successfully">>,
403 => error_schema(['DEPENDENCY_EXISTS'], "Cannot remove dependent connector"),
404 => error_schema(['NOT_FOUND'], "Delete failed, not found"),
400 => error_schema(['INVALID_ID'], "Bad connector ID")
}
}
}.
'/connectors_test'(post, #{body := #{<<"type">> := ConnType} = Params}) ->
case emqx_connector:create_dry_run(ConnType, maps:remove(<<"type">>, Params)) of
ok ->
{204};
{error, Error} ->
{400, error_msg(['TEST_FAILED'], Error)}
end.
'/connectors'(get, _Request) ->
{200, [format_resp(Conn) || Conn <- emqx_connector:list_raw()]};
'/connectors'(post, #{body := #{<<"type">> := ConnType, <<"name">> := ConnName} = Params}) ->
case emqx_connector:lookup_raw(ConnType, ConnName) of
{ok, _} ->
{400, error_msg('ALREADY_EXISTS', <<"connector already exists">>)};
{error, not_found} ->
case
emqx_connector:update(
ConnType,
ConnName,
filter_out_request_body(Params)
)
of
{ok, #{raw_config := RawConf}} ->
{201,
format_resp(RawConf#{
<<"type">> => ConnType,
<<"name">> => ConnName
})};
{error, Error} ->
{400, error_msg('BAD_REQUEST', Error)}
end
end;
'/connectors'(post, _) ->
{400, error_msg('BAD_REQUEST', <<"missing some required fields: [name, type]">>)}.
'/connectors/:id'(get, #{bindings := #{id := Id}}) ->
?TRY_PARSE_ID(
Id,
case emqx_connector:lookup_raw(ConnType, ConnName) of
{ok, Conf} ->
{200, format_resp(Conf)};
{error, not_found} ->
{404, error_msg('NOT_FOUND', <<"connector not found">>)}
end
);
'/connectors/:id'(put, #{bindings := #{id := Id}, body := Params0}) ->
Params = filter_out_request_body(Params0),
?TRY_PARSE_ID(
Id,
case emqx_connector:lookup_raw(ConnType, ConnName) of
{ok, _} ->
case emqx_connector:update(ConnType, ConnName, Params) of
{ok, #{raw_config := RawConf}} ->
{200,
format_resp(RawConf#{
<<"type">> => ConnType,
<<"name">> => ConnName
})};
{error, Error} ->
{500, error_msg('INTERNAL_ERROR', Error)}
end;
{error, not_found} ->
{404, error_msg('NOT_FOUND', <<"connector not found">>)}
end
);
'/connectors/:id'(delete, #{bindings := #{id := Id}}) ->
?TRY_PARSE_ID(
Id,
case emqx_connector:lookup_raw(ConnType, ConnName) of
{ok, _} ->
case emqx_connector:delete(ConnType, ConnName) of
{ok, _} ->
{204};
{error, {post_config_update, _, {dependency_bridges_exist, BridgeID}}} ->
{403,
error_msg(
'DEPENDENCY_EXISTS',
<<"Cannot remove the connector as it's in use by a bridge: ",
BridgeID/binary>>
)};
{error, Error} ->
{500, error_msg('INTERNAL_ERROR', Error)}
end;
{error, not_found} ->
{404, error_msg('NOT_FOUND', <<"connector not found">>)}
end
).
error_msg(Code, Msg) ->
#{code => Code, message => emqx_misc:readable_error_msg(Msg)}.
format_resp(#{<<"type">> := ConnType, <<"name">> := ConnName} = RawConf) ->
NumOfBridges = length(
emqx_bridge:list_bridges_by_connector(
emqx_connector:connector_id(ConnType, ConnName)
)
),
RawConf#{
<<"type">> => ConnType,
<<"name">> => ConnName,
<<"num_of_bridges">> => NumOfBridges
}.
filter_out_request_body(Conf) ->
ExtraConfs = [<<"clientid">>, <<"num_of_bridges">>, <<"type">>, <<"name">>],
maps:without(ExtraConfs, Conf).
bin(S) when is_list(S) ->
list_to_binary(S).

View File

@ -20,15 +20,10 @@
-export([start/2, stop/1]).
-define(CONF_HDLR_PATH, (emqx_connector:config_key_path() ++ ['?', '?'])).
start(_StartType, _StartArgs) ->
ok = emqx_config_handler:add_handler(?CONF_HDLR_PATH, emqx_connector),
emqx_connector_mqtt_worker:register_metrics(),
emqx_connector_sup:start_link().
stop(_State) ->
emqx_config_handler:remove_handler(?CONF_HDLR_PATH),
ok.
%% internal functions

View File

@ -26,10 +26,13 @@
%% callbacks of behaviour emqx_resource
-export([
callback_mode/0,
on_start/2,
on_stop/2,
on_query/4,
on_get_status/2
on_query/3,
on_query_async/4,
on_get_status/2,
reply_delegator/2
]).
-type url() :: emqx_http_lib:uri_map().
@ -44,7 +47,7 @@
namespace/0
]).
-export([check_ssl_opts/2]).
-export([check_ssl_opts/2, validate_method/1]).
-type connect_timeout() :: emqx_schema:duration() | infinity.
-type pool_type() :: random | hash.
@ -135,8 +138,10 @@ fields(config) ->
fields("request") ->
[
{method,
hoconsc:mk(hoconsc:enum([post, put, get, delete]), #{
required => false, desc => ?DESC("method")
hoconsc:mk(binary(), #{
required => false,
desc => ?DESC("method"),
validator => fun ?MODULE:validate_method/1
})},
{path, hoconsc:mk(binary(), #{required => false, desc => ?DESC("path")})},
{body, hoconsc:mk(binary(), #{required => false, desc => ?DESC("body")})},
@ -169,11 +174,24 @@ desc(_) ->
validations() ->
[{check_ssl_opts, fun check_ssl_opts/1}].
validate_method(M) when M =:= <<"post">>; M =:= <<"put">>; M =:= <<"get">>; M =:= <<"delete">> ->
ok;
validate_method(M) ->
case string:find(M, "${") of
nomatch ->
{error,
<<"Invalid method, should be one of 'post', 'put', 'get', 'delete' or variables in ${field} format.">>};
_ ->
ok
end.
sc(Type, Meta) -> hoconsc:mk(Type, Meta).
ref(Field) -> hoconsc:ref(?MODULE, Field).
%% ===================================================================
callback_mode() -> async_if_possible.
on_start(
InstId,
#{
@ -235,10 +253,11 @@ on_stop(InstId, #{pool_name := PoolName}) ->
}),
ehttpc_sup:stop_pool(PoolName).
on_query(InstId, {send_message, Msg}, AfterQuery, State) ->
on_query(InstId, {send_message, Msg}, State) ->
case maps:get(request, State, undefined) of
undefined ->
?SLOG(error, #{msg => "request_not_found", connector => InstId});
?SLOG(error, #{msg => "arg_request_not_found", connector => InstId}),
{error, arg_request_not_found};
Request ->
#{
method := Method,
@ -251,18 +270,16 @@ on_query(InstId, {send_message, Msg}, AfterQuery, State) ->
on_query(
InstId,
{undefined, Method, {Path, Headers, Body}, Timeout, Retry},
AfterQuery,
State
)
end;
on_query(InstId, {Method, Request}, AfterQuery, State) ->
on_query(InstId, {undefined, Method, Request, 5000, 2}, AfterQuery, State);
on_query(InstId, {Method, Request, Timeout}, AfterQuery, State) ->
on_query(InstId, {undefined, Method, Request, Timeout, 2}, AfterQuery, State);
on_query(InstId, {Method, Request}, State) ->
on_query(InstId, {undefined, Method, Request, 5000, 2}, State);
on_query(InstId, {Method, Request, Timeout}, State) ->
on_query(InstId, {undefined, Method, Request, Timeout, 2}, State);
on_query(
InstId,
{KeyOrNum, Method, Request, Timeout, Retry},
AfterQuery,
#{pool_name := PoolName, base_path := BasePath} = State
) ->
?TRACE(
@ -272,7 +289,7 @@ on_query(
),
NRequest = formalize_request(Method, BasePath, Request),
case
Result = ehttpc:request(
ehttpc:request(
case KeyOrNum of
undefined -> PoolName;
_ -> {PoolName, KeyOrNum}
@ -283,36 +300,87 @@ on_query(
Retry
)
of
{error, Reason} ->
{error, Reason} when Reason =:= econnrefused; Reason =:= timeout ->
?SLOG(warning, #{
msg => "http_connector_do_request_failed",
reason => Reason,
connector => InstId
}),
{error, {recoverable_error, Reason}};
{error, Reason} = Result ->
?SLOG(error, #{
msg => "http_connector_do_reqeust_failed",
msg => "http_connector_do_request_failed",
request => NRequest,
reason => Reason,
connector => InstId
}),
emqx_resource:query_failed(AfterQuery);
{ok, StatusCode, _} when StatusCode >= 200 andalso StatusCode < 300 ->
emqx_resource:query_success(AfterQuery);
{ok, StatusCode, _, _} when StatusCode >= 200 andalso StatusCode < 300 ->
emqx_resource:query_success(AfterQuery);
{ok, StatusCode, _} ->
Result;
{ok, StatusCode, _} = Result when StatusCode >= 200 andalso StatusCode < 300 ->
Result;
{ok, StatusCode, _, _} = Result when StatusCode >= 200 andalso StatusCode < 300 ->
Result;
{ok, StatusCode, Headers} ->
?SLOG(error, #{
msg => "http connector do request, received error response",
request => NRequest,
connector => InstId,
status_code => StatusCode
}),
emqx_resource:query_failed(AfterQuery);
{ok, StatusCode, _, _} ->
{error, #{status_code => StatusCode, headers => Headers}};
{ok, StatusCode, Headers, Body} ->
?SLOG(error, #{
msg => "http connector do request, received error response",
request => NRequest,
connector => InstId,
status_code => StatusCode
}),
emqx_resource:query_failed(AfterQuery)
end,
Result.
{error, #{status_code => StatusCode, headers => Headers, body => Body}}
end.
on_query_async(InstId, {send_message, Msg}, ReplyFunAndArgs, State) ->
case maps:get(request, State, undefined) of
undefined ->
?SLOG(error, #{msg => "arg_request_not_found", connector => InstId}),
{error, arg_request_not_found};
Request ->
#{
method := Method,
path := Path,
body := Body,
headers := Headers,
request_timeout := Timeout
} = process_request(Request, Msg),
on_query_async(
InstId,
{undefined, Method, {Path, Headers, Body}, Timeout},
ReplyFunAndArgs,
State
)
end;
on_query_async(
InstId,
{KeyOrNum, Method, Request, Timeout},
ReplyFunAndArgs,
#{pool_name := PoolName, base_path := BasePath} = State
) ->
?TRACE(
"QUERY_ASYNC",
"http_connector_received",
#{request => Request, connector => InstId, state => State}
),
NRequest = formalize_request(Method, BasePath, Request),
Worker =
case KeyOrNum of
undefined -> ehttpc_pool:pick_worker(PoolName);
_ -> ehttpc_pool:pick_worker(PoolName, KeyOrNum)
end,
ok = ehttpc:request_async(
Worker,
Method,
NRequest,
Timeout,
{fun ?MODULE:reply_delegator/2, [ReplyFunAndArgs]}
).
on_get_status(_InstId, #{pool_name := PoolName, connect_timeout := Timeout} = State) ->
case do_get_status(PoolName, Timeout) of
@ -355,7 +423,6 @@ do_get_status(PoolName, Timeout) ->
%%--------------------------------------------------------------------
%% Internal functions
%%--------------------------------------------------------------------
preprocess_request(undefined) ->
undefined;
preprocess_request(Req) when map_size(Req) == 0 ->
@ -468,3 +535,12 @@ bin(Str) when is_list(Str) ->
list_to_binary(Str);
bin(Atom) when is_atom(Atom) ->
atom_to_binary(Atom, utf8).
reply_delegator(ReplyFunAndArgs, Result) ->
case Result of
{error, Reason} when Reason =:= econnrefused; Reason =:= timeout ->
Result1 = {error, {recoverable_error, Reason}},
emqx_resource:apply_reply_fun(ReplyFunAndArgs, Result1);
_ ->
emqx_resource:apply_reply_fun(ReplyFunAndArgs, Result)
end.

View File

@ -25,9 +25,10 @@
%% callbacks of behaviour emqx_resource
-export([
callback_mode/0,
on_start/2,
on_stop/2,
on_query/4,
on_query/3,
on_get_status/2
]).
@ -42,6 +43,8 @@ roots() ->
fields(_) -> [].
%% ===================================================================
callback_mode() -> always_sync.
on_start(
InstId,
#{
@ -99,7 +102,7 @@ on_stop(InstId, #{poolname := PoolName}) ->
}),
emqx_plugin_libs_pool:stop_pool(PoolName).
on_query(InstId, {search, Base, Filter, Attributes}, AfterQuery, #{poolname := PoolName} = State) ->
on_query(InstId, {search, Base, Filter, Attributes}, #{poolname := PoolName} = State) ->
Request = {Base, Filter, Attributes},
?TRACE(
"QUERY",
@ -119,10 +122,9 @@ on_query(InstId, {search, Base, Filter, Attributes}, AfterQuery, #{poolname := P
request => Request,
connector => InstId,
reason => Reason
}),
emqx_resource:query_failed(AfterQuery);
});
_ ->
emqx_resource:query_success(AfterQuery)
ok
end,
Result.

View File

@ -25,9 +25,10 @@
%% callbacks of behaviour emqx_resource
-export([
callback_mode/0,
on_start/2,
on_stop/2,
on_query/4,
on_query/3,
on_get_status/2
]).
@ -36,7 +37,7 @@
-export([roots/0, fields/1, desc/1]).
-export([mongo_query/5, check_worker_health/1]).
-export([mongo_query/5, mongo_insert/3, check_worker_health/1]).
-define(HEALTH_CHECK_TIMEOUT, 30000).
@ -46,6 +47,10 @@
default_port => ?MONGO_DEFAULT_PORT
}).
-ifdef(TEST).
-export([to_servers_raw/1]).
-endif.
%%=====================================================================
roots() ->
[
@ -139,6 +144,8 @@ mongo_fields() ->
%% ===================================================================
callback_mode() -> always_sync.
on_start(
InstId,
Config = #{
@ -174,9 +181,16 @@ on_start(
{worker_options, init_worker_options(maps:to_list(NConfig), SslOpts)}
],
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
Collection = maps:get(collection, Config, <<"mqtt">>),
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts) of
ok -> {ok, #{poolname => PoolName, type => Type}};
{error, Reason} -> {error, Reason}
ok ->
{ok, #{
poolname => PoolName,
type => Type,
collection => Collection
}};
{error, Reason} ->
{error, Reason}
end.
on_stop(InstId, #{poolname := PoolName}) ->
@ -186,10 +200,38 @@ on_stop(InstId, #{poolname := PoolName}) ->
}),
emqx_plugin_libs_pool:stop_pool(PoolName).
on_query(
InstId,
{send_message, Document},
#{poolname := PoolName, collection := Collection} = State
) ->
Request = {insert, Collection, Document},
?TRACE(
"QUERY",
"mongodb_connector_received",
#{request => Request, connector => InstId, state => State}
),
case
ecpool:pick_and_do(
PoolName,
{?MODULE, mongo_insert, [Collection, Document]},
no_handover
)
of
{{false, Reason}, _Document} ->
?SLOG(error, #{
msg => "mongodb_connector_do_query_failed",
request => Request,
reason => Reason,
connector => InstId
}),
{error, Reason};
{{true, _Info}, _Document} ->
ok
end;
on_query(
InstId,
{Action, Collection, Filter, Projector},
AfterQuery,
#{poolname := PoolName} = State
) ->
Request = {Action, Collection, Filter, Projector},
@ -212,14 +254,11 @@ on_query(
reason => Reason,
connector => InstId
}),
emqx_resource:query_failed(AfterQuery),
{error, Reason};
{ok, Cursor} when is_pid(Cursor) ->
emqx_resource:query_success(AfterQuery),
mc_cursor:foldl(fun(O, Acc2) -> [O | Acc2] end, [], Cursor, 1000);
{ok, mc_cursor:foldl(fun(O, Acc2) -> [O | Acc2] end, [], Cursor, 1000)};
Result ->
emqx_resource:query_success(AfterQuery),
Result
{ok, Result}
end.
-dialyzer({nowarn_function, [on_get_status/2]}).
@ -293,6 +332,9 @@ mongo_query(Conn, find_one, Collection, Filter, Projector) ->
mongo_query(_Conn, _Action, _Collection, _Filter, _Projector) ->
ok.
mongo_insert(Conn, Collection, Documents) ->
mongo_api:insert(Conn, Collection, Documents).
init_type(#{mongo_type := rs, replica_set_name := ReplicaSetName}) ->
{rs, ReplicaSetName};
init_type(#{mongo_type := Type}) ->
@ -409,7 +451,7 @@ may_parse_srv_and_txt_records_(
true ->
error({missing_parameter, replica_set_name});
false ->
Config#{hosts => servers_to_bin(Servers)}
Config#{hosts => servers_to_bin(lists:flatten(Servers))}
end;
may_parse_srv_and_txt_records_(
#{
@ -519,9 +561,33 @@ to_servers_raw(Servers) ->
fun(Server) ->
emqx_connector_schema_lib:parse_server(Server, ?MONGO_HOST_OPTIONS)
end,
string:tokens(str(Servers), ", ")
split_servers(Servers)
).
split_servers(L) when is_list(L) ->
PossibleTypes = [
list(binary()),
list(string()),
string()
],
TypeChecks = lists:map(fun(T) -> typerefl:typecheck(T, L) end, PossibleTypes),
case TypeChecks of
[ok, _, _] ->
%% list(binary())
lists:map(fun binary_to_list/1, L);
[_, ok, _] ->
%% list(string())
L;
[_, _, ok] ->
%% string()
string:tokens(L, ", ");
[_, _, _] ->
%% invalid input
throw("List of servers must contain only strings")
end;
split_servers(B) when is_binary(B) ->
string:tokens(str(B), ", ").
str(A) when is_atom(A) ->
atom_to_list(A);
str(B) when is_binary(B) ->

View File

@ -24,6 +24,7 @@
%% API and callbacks for supervisor
-export([
callback_mode/0,
start_link/0,
init/1,
create_bridge/1,
@ -37,7 +38,8 @@
-export([
on_start/2,
on_stop/2,
on_query/4,
on_query/3,
on_query_async/4,
on_get_status/2
]).
@ -66,7 +68,7 @@ fields("get") ->
)}
] ++ fields("post");
fields("put") ->
emqx_connector_mqtt_schema:fields("connector");
emqx_connector_mqtt_schema:fields("server_configs");
fields("post") ->
[
{type,
@ -133,11 +135,13 @@ drop_bridge(Name) ->
%% ===================================================================
%% When use this bridge as a data source, ?MODULE:on_message_received will be called
%% if the bridge received msgs from the remote broker.
on_message_received(Msg, HookPoint, InstId) ->
_ = emqx_resource:query(InstId, {message_received, Msg}),
on_message_received(Msg, HookPoint, ResId) ->
emqx_resource:inc_received(ResId),
emqx:run_hook(HookPoint, [Msg]).
%% ===================================================================
callback_mode() -> async_if_possible.
on_start(InstId, Conf) ->
InstanceId = binary_to_atom(InstId, utf8),
?SLOG(info, #{
@ -149,7 +153,7 @@ on_start(InstId, Conf) ->
BridgeConf = BasicConf#{
name => InstanceId,
clientid => clientid(InstId),
subscriptions => make_sub_confs(maps:get(ingress, Conf, undefined), InstId),
subscriptions => make_sub_confs(maps:get(ingress, Conf, undefined), Conf, InstId),
forwards => make_forward_confs(maps:get(egress, Conf, undefined))
},
case ?MODULE:create_bridge(BridgeConf) of
@ -181,12 +185,18 @@ on_stop(_InstId, #{name := InstanceId}) ->
})
end.
on_query(_InstId, {message_received, _Msg}, AfterQuery, _State) ->
emqx_resource:query_success(AfterQuery);
on_query(_InstId, {send_message, Msg}, AfterQuery, #{name := InstanceId}) ->
on_query(_InstId, {send_message, Msg}, #{name := InstanceId}) ->
?TRACE("QUERY", "send_msg_to_remote_node", #{message => Msg, connector => InstanceId}),
emqx_connector_mqtt_worker:send_to_remote(InstanceId, Msg),
emqx_resource:query_success(AfterQuery).
emqx_connector_mqtt_worker:send_to_remote(InstanceId, Msg).
on_query_async(
_InstId,
{send_message, Msg},
{ReplayFun, Args},
#{name := InstanceId}
) ->
?TRACE("QUERY", "async_send_msg_to_remote_node", #{message => Msg, connector => InstanceId}),
emqx_connector_mqtt_worker:send_to_remote_async(InstanceId, Msg, {ReplayFun, Args}).
on_get_status(_InstId, #{name := InstanceId, bridge_conf := Conf}) ->
AutoReconn = maps:get(auto_reconnect, Conf, true),
@ -202,17 +212,18 @@ ensure_mqtt_worker_started(InstanceId, BridgeConf) ->
{error, Reason} -> {error, Reason}
end.
make_sub_confs(EmptyMap, _) when map_size(EmptyMap) == 0 ->
make_sub_confs(EmptyMap, _Conf, _) when map_size(EmptyMap) == 0 ->
undefined;
make_sub_confs(undefined, _) ->
make_sub_confs(undefined, _Conf, _) ->
undefined;
make_sub_confs(SubRemoteConf, InstId) ->
case maps:take(hookpoint, SubRemoteConf) of
make_sub_confs(SubRemoteConf, Conf, InstId) ->
ResId = emqx_resource_manager:manager_id_to_resource_id(InstId),
case maps:find(hookpoint, Conf) of
error ->
SubRemoteConf;
{HookPoint, SubConf} ->
MFA = {?MODULE, on_message_received, [HookPoint, InstId]},
SubConf#{on_message_received => MFA}
error({no_hookpoint_provided, Conf});
{ok, HookPoint} ->
MFA = {?MODULE, on_message_received, [HookPoint, ResId]},
SubRemoteConf#{on_message_received => MFA}
end.
make_forward_confs(EmptyMap) when map_size(EmptyMap) == 0 ->
@ -232,12 +243,10 @@ basic_config(
keepalive := KeepAlive,
retry_interval := RetryIntv,
max_inflight := MaxInflight,
replayq := ReplayQ,
ssl := #{enable := EnableSsl} = Ssl
} = Conf
) ->
#{
replayq => ReplayQ,
BaiscConf = #{
%% connection opts
server => Server,
%% 30s
@ -251,9 +260,6 @@ basic_config(
%% non-standard mqtt connection packets will be filtered out by LB.
%% So let's disable bridge_mode.
bridge_mode => BridgeMode,
%% should be iolist for emqtt
username => maps:get(username, Conf, <<>>),
password => maps:get(password, Conf, <<>>),
clean_start => CleanStart,
keepalive => ms_to_s(KeepAlive),
retry_interval => RetryIntv,
@ -261,7 +267,20 @@ basic_config(
ssl => EnableSsl,
ssl_opts => maps:to_list(maps:remove(enable, Ssl)),
if_record_metrics => true
}.
},
maybe_put_fields([username, password], Conf, BaiscConf).
maybe_put_fields(Fields, Conf, Acc0) ->
lists:foldl(
fun(Key, Acc) ->
case maps:find(Key, Conf) of
error -> Acc;
{ok, Val} -> Acc#{Key => Val}
end
end,
Acc0,
Fields
).
ms_to_s(Ms) ->
erlang:ceil(Ms / 1000).

View File

@ -19,14 +19,17 @@
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-include_lib("emqx/include/logger.hrl").
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
-behaviour(emqx_resource).
%% callbacks of behaviour emqx_resource
-export([
callback_mode/0,
on_start/2,
on_stop/2,
on_query/4,
on_query/3,
on_batch_query/3,
on_get_status/2
]).
@ -44,6 +47,19 @@
default_port => ?MYSQL_DEFAULT_PORT
}).
-type prepares() :: #{atom() => binary()}.
-type params_tokens() :: #{atom() => list()}.
-type sqls() :: #{atom() => binary()}.
-type state() ::
#{
poolname := atom(),
auto_reconnect := boolean(),
prepare_statement := prepares(),
params_tokens := params_tokens(),
batch_inserts := sqls(),
batch_params_tokens := params_tokens()
}.
%%=====================================================================
%% Hocon schema
roots() ->
@ -63,6 +79,9 @@ server(desc) -> ?DESC("server");
server(_) -> undefined.
%% ===================================================================
callback_mode() -> always_sync.
-spec on_start(binary(), hoconsc:config()) -> {ok, state()} | {error, _}.
on_start(
InstId,
#{
@ -97,11 +116,17 @@ on_start(
{pool_size, PoolSize}
],
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
Prepares = maps:get(prepare_statement, Config, #{}),
State = #{poolname => PoolName, prepare_statement => Prepares, auto_reconnect => AutoReconn},
Prepares = parse_prepare_sql(Config),
State = maps:merge(#{poolname => PoolName, auto_reconnect => AutoReconn}, Prepares),
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options ++ SslOpts) of
ok -> {ok, init_prepare(State)};
{error, Reason} -> {error, Reason}
ok ->
{ok, init_prepare(State)};
{error, Reason} ->
?tp(
mysql_connector_start_failed,
#{error => Reason}
),
{error, Reason}
end.
on_stop(InstId, #{poolname := PoolName}) ->
@ -111,63 +136,62 @@ on_stop(InstId, #{poolname := PoolName}) ->
}),
emqx_plugin_libs_pool:stop_pool(PoolName).
on_query(InstId, {Type, SQLOrKey}, AfterQuery, State) ->
on_query(InstId, {Type, SQLOrKey, [], default_timeout}, AfterQuery, State);
on_query(InstId, {Type, SQLOrKey, Params}, AfterQuery, State) ->
on_query(InstId, {Type, SQLOrKey, Params, default_timeout}, AfterQuery, State);
on_query(InstId, {TypeOrKey, SQLOrKey}, State) ->
on_query(InstId, {TypeOrKey, SQLOrKey, [], default_timeout}, State);
on_query(InstId, {TypeOrKey, SQLOrKey, Params}, State) ->
on_query(InstId, {TypeOrKey, SQLOrKey, Params, default_timeout}, State);
on_query(
InstId,
{Type, SQLOrKey, Params, Timeout},
AfterQuery,
{TypeOrKey, SQLOrKey, Params, Timeout},
#{poolname := PoolName, prepare_statement := Prepares} = State
) ->
LogMeta = #{connector => InstId, sql => SQLOrKey, state => State},
?TRACE("QUERY", "mysql_connector_received", LogMeta),
Worker = ecpool:get_client(PoolName),
{ok, Conn} = ecpool_worker:client(Worker),
MySqlFunction = mysql_function(Type),
Result = erlang:apply(mysql, MySqlFunction, [Conn, SQLOrKey, Params, Timeout]),
case Result of
{error, disconnected} ->
?SLOG(
error,
LogMeta#{msg => "mysql_connector_do_sql_query_failed", reason => disconnected}
),
%% kill the poll worker to trigger reconnection
_ = exit(Conn, restart),
emqx_resource:query_failed(AfterQuery),
Result;
MySqlFunction = mysql_function(TypeOrKey),
{SQLOrKey2, Data} = proc_sql_params(TypeOrKey, SQLOrKey, Params, State),
case on_sql_query(InstId, MySqlFunction, SQLOrKey2, Data, Timeout, State) of
{error, not_prepared} ->
?SLOG(
warning,
LogMeta#{msg => "mysql_connector_prepare_query_failed", reason => not_prepared}
),
case prepare_sql(Prepares, PoolName) of
ok ->
%% not return result, next loop will try again
on_query(InstId, {Type, SQLOrKey, Params, Timeout}, AfterQuery, State);
on_query(InstId, {TypeOrKey, SQLOrKey, Params, Timeout}, State);
{error, Reason} ->
LogMeta = #{connector => InstId, sql => SQLOrKey, state => State},
?SLOG(
error,
LogMeta#{msg => "mysql_connector_do_prepare_failed", reason => Reason}
),
emqx_resource:query_failed(AfterQuery),
{error, Reason}
end;
{error, Reason} ->
?SLOG(
error,
LogMeta#{msg => "mysql_connector_do_sql_query_failed", reason => Reason}
),
emqx_resource:query_failed(AfterQuery),
Result;
_ ->
emqx_resource:query_success(AfterQuery),
Result ->
Result
end.
mysql_function(sql) -> query;
mysql_function(prepared_query) -> execute.
on_batch_query(
InstId,
BatchReq,
#{batch_inserts := Inserts, batch_params_tokens := ParamsTokens} = State
) ->
case hd(BatchReq) of
{Key, _} ->
case maps:get(Key, Inserts, undefined) of
undefined ->
{error, batch_select_not_implemented};
InsertSQL ->
Tokens = maps:get(Key, ParamsTokens),
on_batch_insert(InstId, BatchReq, InsertSQL, Tokens, State)
end;
Request ->
LogMeta = #{connector => InstId, first_request => Request, state => State},
?SLOG(error, LogMeta#{msg => "invalid request"}),
{error, invald_request}
end.
mysql_function(sql) ->
query;
mysql_function(prepared_query) ->
execute;
%% for bridge
mysql_function(_) ->
mysql_function(prepared_query).
on_get_status(_InstId, #{poolname := Pool, auto_reconnect := AutoReconn} = State) ->
case emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1) of
@ -287,3 +311,143 @@ prepare_sql_to_conn(Conn, [{Key, SQL} | PrepareList]) when is_pid(Conn) ->
unprepare_sql_to_conn(Conn, PrepareSqlKey) ->
mysql:unprepare(Conn, PrepareSqlKey).
parse_prepare_sql(Config) ->
SQL =
case maps:get(prepare_statement, Config, undefined) of
undefined ->
case maps:get(sql, Config, undefined) of
undefined -> #{};
Template -> #{send_message => Template}
end;
Any ->
Any
end,
parse_prepare_sql(maps:to_list(SQL), #{}, #{}, #{}, #{}).
parse_prepare_sql([{Key, H} | _] = L, Prepares, Tokens, BatchInserts, BatchTks) ->
{PrepareSQL, ParamsTokens} = emqx_plugin_libs_rule:preproc_sql(H),
parse_batch_prepare_sql(
L, Prepares#{Key => PrepareSQL}, Tokens#{Key => ParamsTokens}, BatchInserts, BatchTks
);
parse_prepare_sql([], Prepares, Tokens, BatchInserts, BatchTks) ->
#{
prepare_statement => Prepares,
params_tokens => Tokens,
batch_inserts => BatchInserts,
batch_params_tokens => BatchTks
}.
parse_batch_prepare_sql([{Key, H} | T], Prepares, Tokens, BatchInserts, BatchTks) ->
case emqx_plugin_libs_rule:detect_sql_type(H) of
{ok, select} ->
parse_prepare_sql(T, Prepares, Tokens, BatchInserts, BatchTks);
{ok, insert} ->
case emqx_plugin_libs_rule:split_insert_sql(H) of
{ok, {InsertSQL, Params}} ->
ParamsTks = emqx_plugin_libs_rule:preproc_tmpl(Params),
parse_prepare_sql(
T,
Prepares,
Tokens,
BatchInserts#{Key => InsertSQL},
BatchTks#{Key => ParamsTks}
);
{error, Reason} ->
?SLOG(error, #{msg => "split sql failed", sql => H, reason => Reason}),
parse_prepare_sql(T, Prepares, Tokens, BatchInserts, BatchTks)
end;
{error, Reason} ->
?SLOG(error, #{msg => "detect sql type failed", sql => H, reason => Reason}),
parse_prepare_sql(T, Prepares, Tokens, BatchInserts, BatchTks)
end.
proc_sql_params(query, SQLOrKey, Params, _State) ->
{SQLOrKey, Params};
proc_sql_params(prepared_query, SQLOrKey, Params, _State) ->
{SQLOrKey, Params};
proc_sql_params(TypeOrKey, SQLOrData, Params, #{params_tokens := ParamsTokens}) ->
case maps:get(TypeOrKey, ParamsTokens, undefined) of
undefined ->
{SQLOrData, Params};
Tokens ->
{TypeOrKey, emqx_plugin_libs_rule:proc_sql(Tokens, SQLOrData)}
end.
on_batch_insert(InstId, BatchReqs, InsertPart, Tokens, State) ->
JoinFun = fun
([Msg]) ->
emqx_plugin_libs_rule:proc_sql_param_str(Tokens, Msg);
([H | T]) ->
lists:foldl(
fun(Msg, Acc) ->
Value = emqx_plugin_libs_rule:proc_sql_param_str(Tokens, Msg),
<<Acc/binary, ", ", Value/binary>>
end,
emqx_plugin_libs_rule:proc_sql_param_str(Tokens, H),
T
)
end,
{_, Msgs} = lists:unzip(BatchReqs),
JoinPart = JoinFun(Msgs),
SQL = <<InsertPart/binary, " values ", JoinPart/binary>>,
on_sql_query(InstId, query, SQL, [], default_timeout, State).
on_sql_query(
InstId,
SQLFunc,
SQLOrKey,
Data,
Timeout,
#{poolname := PoolName} = State
) ->
LogMeta = #{connector => InstId, sql => SQLOrKey, state => State},
?TRACE("QUERY", "mysql_connector_received", LogMeta),
Worker = ecpool:get_client(PoolName),
{ok, Conn} = ecpool_worker:client(Worker),
?tp(
mysql_connector_send_query,
#{sql_or_key => SQLOrKey, data => Data}
),
try mysql:SQLFunc(Conn, SQLOrKey, Data, Timeout) of
{error, disconnected} = Result ->
?SLOG(
error,
LogMeta#{msg => "mysql_connector_do_sql_query_failed", reason => disconnected}
),
%% kill the poll worker to trigger reconnection
_ = exit(Conn, restart),
Result;
{error, not_prepared} = Error ->
?SLOG(
warning,
LogMeta#{msg => "mysql_connector_prepare_query_failed", reason => not_prepared}
),
Error;
{error, {1053, <<"08S01">>, Reason}} ->
%% mysql sql server shutdown in progress
?SLOG(
error,
LogMeta#{msg => "mysql_connector_do_sql_query_failed", reason => Reason}
),
{error, {recoverable_error, Reason}};
{error, Reason} = Result ->
?SLOG(
error,
LogMeta#{msg => "mysql_connector_do_sql_query_failed", reason => Reason}
),
Result;
Result ->
?tp(
mysql_connector_query_return,
#{result => Result}
),
Result
catch
error:badarg ->
?SLOG(
error,
LogMeta#{msg => "mysql_connector_invalid_params", params => Data}
),
{error, {invalid_params, Data}}
end.

View File

@ -27,9 +27,10 @@
%% callbacks of behaviour emqx_resource
-export([
callback_mode/0,
on_start/2,
on_stop/2,
on_query/4,
on_query/3,
on_get_status/2
]).
@ -66,6 +67,8 @@ server(desc) -> ?DESC("server");
server(_) -> undefined.
%% ===================================================================
callback_mode() -> always_sync.
on_start(
InstId,
#{
@ -116,9 +119,9 @@ on_stop(InstId, #{poolname := PoolName}) ->
}),
emqx_plugin_libs_pool:stop_pool(PoolName).
on_query(InstId, {Type, NameOrSQL}, AfterQuery, #{poolname := _PoolName} = State) ->
on_query(InstId, {Type, NameOrSQL, []}, AfterQuery, State);
on_query(InstId, {Type, NameOrSQL, Params}, AfterQuery, #{poolname := PoolName} = State) ->
on_query(InstId, {Type, NameOrSQL}, #{poolname := _PoolName} = State) ->
on_query(InstId, {Type, NameOrSQL, []}, State);
on_query(InstId, {Type, NameOrSQL, Params}, #{poolname := PoolName} = State) ->
?SLOG(debug, #{
msg => "postgresql connector received sql query",
connector => InstId,
@ -132,10 +135,9 @@ on_query(InstId, {Type, NameOrSQL, Params}, AfterQuery, #{poolname := PoolName}
connector => InstId,
sql => NameOrSQL,
reason => Reason
}),
emqx_resource:query_failed(AfterQuery);
});
_ ->
emqx_resource:query_success(AfterQuery)
ok
end,
Result.

View File

@ -26,9 +26,10 @@
%% callbacks of behaviour emqx_resource
-export([
callback_mode/0,
on_start/2,
on_stop/2,
on_query/4,
on_query/3,
on_get_status/2
]).
@ -112,6 +113,8 @@ servers(desc) -> ?DESC("servers");
servers(_) -> undefined.
%% ===================================================================
callback_mode() -> always_sync.
on_start(
InstId,
#{
@ -177,7 +180,7 @@ on_stop(InstId, #{poolname := PoolName, type := Type}) ->
_ -> emqx_plugin_libs_pool:stop_pool(PoolName)
end.
on_query(InstId, {cmd, Command}, AfterCommand, #{poolname := PoolName, type := Type} = State) ->
on_query(InstId, {cmd, Command}, #{poolname := PoolName, type := Type} = State) ->
?TRACE(
"QUERY",
"redis_connector_received",
@ -195,10 +198,9 @@ on_query(InstId, {cmd, Command}, AfterCommand, #{poolname := PoolName, type := T
connector => InstId,
sql => Command,
reason => Reason
}),
emqx_resource:query_failed(AfterCommand);
});
_ ->
emqx_resource:query_success(AfterCommand)
ok
end,
Result.

View File

@ -1,77 +0,0 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2020-2022 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_connector_schema).
-behaviour(hocon_schema).
-include_lib("typerefl/include/types.hrl").
-include_lib("hocon/include/hoconsc.hrl").
-export([namespace/0, roots/0, fields/1, desc/1]).
-export([
get_response/0,
put_request/0,
post_request/0
]).
%% the config for webhook bridges do not need connectors
-define(CONN_TYPES, [mqtt]).
%%======================================================================================
%% For HTTP APIs
get_response() ->
http_schema("get").
put_request() ->
http_schema("put").
post_request() ->
http_schema("post").
http_schema(Method) ->
Schemas = [?R_REF(schema_mod(Type), Method) || Type <- ?CONN_TYPES],
?UNION(Schemas).
%%======================================================================================
%% Hocon Schema Definitions
namespace() -> connector.
roots() -> ["connectors"].
fields(connectors) ->
fields("connectors");
fields("connectors") ->
[
{mqtt,
?HOCON(
?MAP(name, ?R_REF(emqx_connector_mqtt_schema, "connector")),
#{desc => ?DESC("mqtt")}
)}
].
desc(Record) when
Record =:= connectors;
Record =:= "connectors"
->
?DESC("desc_connector");
desc(_) ->
undefined.
schema_mod(Type) ->
list_to_atom(lists:concat(["emqx_connector_", Type])).

View File

@ -68,6 +68,8 @@ ssl_fields() ->
relational_db_fields() ->
[
{database, fun database/1},
%% TODO: The `pool_size` for drivers will be deprecated. Ues `worker_pool_size` for emqx_resource
%% See emqx_resource.hrl
{pool_size, fun pool_size/1},
{username, fun username/1},
{password, fun password/1},
@ -102,6 +104,7 @@ username(_) -> undefined.
password(type) -> binary();
password(desc) -> ?DESC("password");
password(required) -> false;
password(format) -> <<"password">>;
password(_) -> undefined.
auto_reconnect(type) -> boolean();

View File

@ -24,20 +24,6 @@
try_clear_certs/3
]).
%% TODO: rm `connector` case after `dev/ee5.0` merged into `master`.
%% The `connector` config layer will be removed.
%% for bridges with `connector` field. i.e. `mqtt_source` and `mqtt_sink`
convert_certs(RltvDir, #{<<"connector">> := Connector} = Config) when
is_map(Connector)
->
SSL = maps:get(<<"ssl">>, Connector, undefined),
new_ssl_config(RltvDir, Config, SSL);
convert_certs(RltvDir, #{connector := Connector} = Config) when
is_map(Connector)
->
SSL = maps:get(ssl, Connector, undefined),
new_ssl_config(RltvDir, Config, SSL);
%% for bridges without `connector` field. i.e. webhook
convert_certs(RltvDir, #{<<"ssl">> := SSL} = Config) ->
new_ssl_config(RltvDir, Config, SSL);
convert_certs(RltvDir, #{ssl := SSL} = Config) ->
@ -49,14 +35,6 @@ convert_certs(_RltvDir, Config) ->
clear_certs(RltvDir, Config) ->
clear_certs2(RltvDir, normalize_key_to_bin(Config)).
clear_certs2(RltvDir, #{<<"connector">> := Connector} = _Config) when
is_map(Connector)
->
%% TODO remove the 'connector' clause after dev/ee5.0 is merged back to master
%% The `connector` config layer will be removed.
%% for bridges with `connector` field. i.e. `mqtt_source` and `mqtt_sink`
OldSSL = maps:get(<<"ssl">>, Connector, undefined),
ok = emqx_tls_lib:delete_ssl_files(RltvDir, undefined, OldSSL);
clear_certs2(RltvDir, #{<<"ssl">> := OldSSL} = _Config) ->
ok = emqx_tls_lib:delete_ssl_files(RltvDir, undefined, OldSSL);
clear_certs2(_RltvDir, _) ->
@ -69,8 +47,6 @@ try_clear_certs(RltvDir, NewConf, OldConf) ->
normalize_key_to_bin(OldConf)
).
try_clear_certs2(RltvDir, #{<<"connector">> := NewConnector}, #{<<"connector">> := OldConnector}) ->
try_clear_certs2(RltvDir, NewConnector, OldConnector);
try_clear_certs2(RltvDir, NewConf, OldConf) ->
NewSSL = try_map_get(<<"ssl">>, NewConf, undefined),
OldSSL = try_map_get(<<"ssl">>, OldConf, undefined),
@ -95,7 +71,9 @@ new_ssl_config(#{<<"ssl">> := _} = Config, NewSSL) ->
new_ssl_config(Config, _NewSSL) ->
Config.
normalize_key_to_bin(Map) ->
normalize_key_to_bin(undefined) ->
undefined;
normalize_key_to_bin(Map) when is_map(Map) ->
emqx_map_lib:binary_key_map(Map).
try_map_get(Key, Map, Default) when is_map(Map) ->

View File

@ -0,0 +1,19 @@
-module(emqx_connector_utils).
-export([split_insert_sql/1]).
%% SQL = <<"INSERT INTO \"abc\" (c1,c2,c3) VALUES (${1}, ${1}, ${1})">>
split_insert_sql(SQL) ->
case re:split(SQL, "((?i)values)", [{return, binary}]) of
[Part1, _, Part3] ->
case string:trim(Part1, leading) of
<<"insert", _/binary>> = InsertSQL ->
{ok, {InsertSQL, Part3}};
<<"INSERT", _/binary>> = InsertSQL ->
{ok, {InsertSQL, Part3}};
_ ->
{error, not_insert_sql}
end;
_ ->
{error, not_insert_sql}
end.

View File

@ -21,6 +21,7 @@
-export([
start/1,
send/2,
send_async/3,
stop/1,
ping/1
]).
@ -32,7 +33,6 @@
%% callbacks for emqtt
-export([
handle_puback/2,
handle_publish/3,
handle_disconnected/2
]).
@ -134,44 +134,11 @@ safe_stop(Pid, StopF, Timeout) ->
exit(Pid, kill)
end.
send(Conn, Msgs) ->
send(Conn, Msgs, []).
send(#{client_pid := ClientPid}, Msg) ->
emqtt:publish(ClientPid, Msg).
send(_Conn, [], []) ->
%% all messages in the batch are QoS-0
Ref = make_ref(),
%% QoS-0 messages do not have packet ID
%% the batch ack is simulated with a loop-back message
self() ! {batch_ack, Ref},
{ok, Ref};
send(_Conn, [], PktIds) ->
%% PktIds is not an empty list if there is any non-QoS-0 message in the batch,
%% And the worker should wait for all acks
{ok, PktIds};
send(#{client_pid := ClientPid} = Conn, [Msg | Rest], PktIds) ->
case emqtt:publish(ClientPid, Msg) of
ok ->
send(Conn, Rest, PktIds);
{ok, PktId} ->
send(Conn, Rest, [PktId | PktIds]);
{error, Reason} ->
%% NOTE: There is no partial success of a batch and recover from the middle
%% only to retry all messages in one batch
{error, Reason}
end.
handle_puback(#{packet_id := PktId, reason_code := RC}, Parent) when
RC =:= ?RC_SUCCESS;
RC =:= ?RC_NO_MATCHING_SUBSCRIBERS
->
Parent ! {batch_ack, PktId},
ok;
handle_puback(#{packet_id := PktId, reason_code := RC}, _Parent) ->
?SLOG(warning, #{
msg => "publish_to_remote_node_falied",
packet_id => PktId,
reason_code => RC
}).
send_async(#{client_pid := ClientPid}, Msg, Callback) ->
emqtt:publish_async(ClientPid, Msg, infinity, Callback).
handle_publish(Msg, undefined, _Opts) ->
?SLOG(error, #{
@ -200,14 +167,13 @@ handle_disconnected(Reason, Parent) ->
make_hdlr(Parent, Vars, Opts) ->
#{
puback => {fun ?MODULE:handle_puback/2, [Parent]},
publish => {fun ?MODULE:handle_publish/3, [Vars, Opts]},
disconnected => {fun ?MODULE:handle_disconnected/2, [Parent]}
}.
sub_remote_topics(_ClientPid, undefined) ->
ok;
sub_remote_topics(ClientPid, #{remote_topic := FromTopic, remote_qos := QoS}) ->
sub_remote_topics(ClientPid, #{remote := #{topic := FromTopic, qos := QoS}}) ->
case emqtt:subscribe(ClientPid, FromTopic, QoS) of
{ok, _, _} -> ok;
Error -> throw(Error)
@ -217,12 +183,10 @@ process_config(Config) ->
maps:without([conn_type, address, receive_mountpoint, subscriptions, name], Config).
maybe_publish_to_local_broker(Msg, Vars, Props) ->
case maps:get(local_topic, Vars, undefined) of
undefined ->
%% local topic is not set, discard it
ok;
_ ->
_ = emqx_broker:publish(emqx_connector_mqtt_msg:to_broker_msg(Msg, Vars, Props))
case emqx_map_lib:deep_get([local, topic], Vars, undefined) of
%% local topic is not set, discard it
undefined -> ok;
_ -> emqx_broker:publish(emqx_connector_mqtt_msg:to_broker_msg(Msg, Vars, Props))
end.
format_msg_received(

View File

@ -38,14 +38,16 @@
-type msg() :: emqx_types:message().
-type exp_msg() :: emqx_types:message() | #mqtt_msg{}.
-type variables() :: #{
mountpoint := undefined | binary(),
remote_topic := binary(),
remote_qos := original | integer(),
-type remote_config() :: #{
topic := binary(),
qos := original | integer(),
retain := original | boolean(),
payload := binary()
}.
-type variables() :: #{
mountpoint := undefined | binary(),
remote := remote_config()
}.
make_pub_vars(_, undefined) ->
undefined;
@ -67,10 +69,12 @@ to_remote_msg(#message{flags = Flags0} = Msg, Vars) ->
MapMsg = maps:put(retain, Retain0, Columns),
to_remote_msg(MapMsg, Vars);
to_remote_msg(MapMsg, #{
remote_topic := TopicToken,
payload := PayloadToken,
remote_qos := QoSToken,
retain := RetainToken,
remote := #{
topic := TopicToken,
payload := PayloadToken,
qos := QoSToken,
retain := RetainToken
},
mountpoint := Mountpoint
}) when is_map(MapMsg) ->
Topic = replace_vars_in_str(TopicToken, MapMsg),
@ -94,10 +98,12 @@ to_broker_msg(Msg, Vars, undefined) ->
to_broker_msg(
#{dup := Dup} = MapMsg,
#{
local_topic := TopicToken,
payload := PayloadToken,
local_qos := QoSToken,
retain := RetainToken,
local := #{
topic := TopicToken,
payload := PayloadToken,
qos := QoSToken,
retain := RetainToken
},
mountpoint := Mountpoint
},
Props

View File

@ -28,25 +28,39 @@
desc/1
]).
-export([
ingress_desc/0,
egress_desc/0
]).
-import(emqx_schema, [mk_duration/2]).
-import(hoconsc, [mk/2, ref/2]).
namespace() -> "connector-mqtt".
roots() ->
fields("config").
fields("config") ->
fields("connector") ++
topic_mappings();
fields("connector") ->
fields("server_configs") ++
[
{"ingress",
mk(
ref(?MODULE, "ingress"),
#{
required => {false, recursively},
desc => ?DESC("ingress_desc")
}
)},
{"egress",
mk(
ref(?MODULE, "egress"),
#{
required => {false, recursively},
desc => ?DESC("egress_desc")
}
)}
];
fields("server_configs") ->
[
{mode,
sc(
mk(
hoconsc:enum([cluster_shareload]),
#{
default => cluster_shareload,
@ -54,7 +68,7 @@ fields("connector") ->
}
)},
{server,
sc(
mk(
emqx_schema:host_port(),
#{
required => true,
@ -68,7 +82,7 @@ fields("connector") ->
#{default => "15s"}
)},
{proto_ver,
sc(
mk(
hoconsc:enum([v3, v4, v5]),
#{
default => v4,
@ -76,7 +90,7 @@ fields("connector") ->
}
)},
{bridge_mode,
sc(
mk(
boolean(),
#{
default => false,
@ -84,21 +98,23 @@ fields("connector") ->
}
)},
{username,
sc(
mk(
binary(),
#{
desc => ?DESC("username")
}
)},
{password,
sc(
mk(
binary(),
#{
format => <<"password">>,
sensitive => true,
desc => ?DESC("password")
}
)},
{clean_start,
sc(
mk(
boolean(),
#{
default => true,
@ -113,20 +129,34 @@ fields("connector") ->
#{default => "15s"}
)},
{max_inflight,
sc(
mk(
non_neg_integer(),
#{
default => 32,
desc => ?DESC("max_inflight")
}
)},
{replayq, sc(ref("replayq"), #{})}
)}
] ++ emqx_connector_schema_lib:ssl_fields();
fields("ingress") ->
%% the message maybe subscribed by rules, in this case 'local_topic' is not necessary
[
{remote_topic,
sc(
{"remote",
mk(
ref(?MODULE, "ingress_remote"),
#{desc => ?DESC(emqx_connector_mqtt_schema, "ingress_remote")}
)},
{"local",
mk(
ref(?MODULE, "ingress_local"),
#{
desc => ?DESC(emqx_connector_mqtt_schema, "ingress_local"),
is_required => false
}
)}
];
fields("ingress_remote") ->
[
{topic,
mk(
binary(),
#{
required => true,
@ -134,47 +164,44 @@ fields("ingress") ->
desc => ?DESC("ingress_remote_topic")
}
)},
{remote_qos,
sc(
{qos,
mk(
qos(),
#{
default => 1,
desc => ?DESC("ingress_remote_qos")
}
)},
{local_topic,
sc(
)}
];
fields("ingress_local") ->
[
{topic,
mk(
binary(),
#{
validator => fun emqx_schema:non_empty_string/1,
desc => ?DESC("ingress_local_topic")
desc => ?DESC("ingress_local_topic"),
required => false
}
)},
{local_qos,
sc(
{qos,
mk(
qos(),
#{
default => <<"${qos}">>,
desc => ?DESC("ingress_local_qos")
}
)},
{hookpoint,
sc(
binary(),
#{desc => ?DESC("ingress_hookpoint")}
)},
{retain,
sc(
mk(
hoconsc:union([boolean(), binary()]),
#{
default => <<"${retain}">>,
desc => ?DESC("retain")
}
)},
{payload,
sc(
mk(
binary(),
#{
default => undefined,
@ -183,18 +210,40 @@ fields("ingress") ->
)}
];
fields("egress") ->
%% the message maybe sent from rules, in this case 'local_topic' is not necessary
[
{local_topic,
sc(
{"local",
mk(
ref(?MODULE, "egress_local"),
#{
desc => ?DESC(emqx_connector_mqtt_schema, "egress_local"),
required => false
}
)},
{"remote",
mk(
ref(?MODULE, "egress_remote"),
#{
desc => ?DESC(emqx_connector_mqtt_schema, "egress_remote"),
required => true
}
)}
];
fields("egress_local") ->
[
{topic,
mk(
binary(),
#{
desc => ?DESC("egress_local_topic"),
required => false,
validator => fun emqx_schema:non_empty_string/1
}
)},
{remote_topic,
sc(
)}
];
fields("egress_remote") ->
[
{topic,
mk(
binary(),
#{
required => true,
@ -202,104 +251,48 @@ fields("egress") ->
desc => ?DESC("egress_remote_topic")
}
)},
{remote_qos,
sc(
{qos,
mk(
qos(),
#{
required => true,
desc => ?DESC("egress_remote_qos")
}
)},
{retain,
sc(
mk(
hoconsc:union([boolean(), binary()]),
#{
required => true,
desc => ?DESC("retain")
}
)},
{payload,
sc(
mk(
binary(),
#{
default => undefined,
desc => ?DESC("payload")
}
)}
];
fields("replayq") ->
[
{dir,
sc(
hoconsc:union([boolean(), string()]),
#{desc => ?DESC("dir")}
)},
{seg_bytes,
sc(
emqx_schema:bytesize(),
#{
default => "100MB",
desc => ?DESC("seg_bytes")
}
)},
{offload,
sc(
boolean(),
#{
default => false,
desc => ?DESC("offload")
}
)}
].
desc("connector") ->
?DESC("desc_connector");
desc("server_configs") ->
?DESC("server_configs");
desc("ingress") ->
ingress_desc();
?DESC("ingress_desc");
desc("ingress_remote") ->
?DESC("ingress_remote");
desc("ingress_local") ->
?DESC("ingress_local");
desc("egress") ->
egress_desc();
desc("replayq") ->
?DESC("desc_replayq");
?DESC("egress_desc");
desc("egress_remote") ->
?DESC("egress_remote");
desc("egress_local") ->
?DESC("egress_local");
desc(_) ->
undefined.
topic_mappings() ->
[
{ingress,
sc(
ref("ingress"),
#{default => #{}}
)},
{egress,
sc(
ref("egress"),
#{default => #{}}
)}
].
ingress_desc() ->
"\n"
"The ingress config defines how this bridge receive messages from the remote MQTT broker, and then\n"
"send them to the local broker.<br/>"
"Template with variables is allowed in 'local_topic', 'remote_qos', 'qos', 'retain',\n"
"'payload'.<br/>"
"NOTE: if this bridge is used as the input of a rule (emqx rule engine), and also local_topic is\n"
"configured, then messages got from the remote broker will be sent to both the 'local_topic' and\n"
"the rule.\n".
egress_desc() ->
"\n"
"The egress config defines how this bridge forwards messages from the local broker to the remote\n"
"broker.<br/>"
"Template with variables is allowed in 'remote_topic', 'qos', 'retain', 'payload'.<br/>"
"NOTE: if this bridge is used as the action of a rule (emqx rule engine), and also local_topic\n"
"is configured, then both the data got from the rule and the MQTT messages that matches\n"
"local_topic will be forwarded.\n".
qos() ->
hoconsc:union([emqx_schema:qos(), binary()]).
sc(Type, Meta) -> hoconsc:mk(Type, Meta).
ref(Field) -> hoconsc:ref(?MODULE, Field).

View File

@ -68,7 +68,6 @@
%% APIs
-export([
start_link/1,
register_metrics/0,
stop/1
]).
@ -92,16 +91,14 @@
ensure_stopped/1,
status/1,
ping/1,
send_to_remote/2
send_to_remote/2,
send_to_remote_async/3
]).
-export([get_forwards/1]).
-export([get_subscriptions/1]).
%% Internal
-export([msg_marshaller/1]).
-export_type([
config/0,
ack_ref/0
@ -134,12 +131,6 @@
%% mountpoint: The topic mount point for messages sent to remote node/cluster
%% `undefined', `<<>>' or `""' to disable
%% forwards: Local topics to subscribe.
%% replayq.batch_bytes_limit: Max number of bytes to collect in a batch for each
%% send call towards emqx_bridge_connect
%% replayq.batch_count_limit: Max number of messages to collect in a batch for
%% each send call towards emqx_bridge_connect
%% replayq.dir: Directory where replayq should persist messages
%% replayq.seg_bytes: Size in bytes for each replayq segment file
%%
%% Find more connection specific configs in the callback modules
%% of emqx_bridge_connect behaviour.
@ -174,9 +165,14 @@ ping(Name) ->
gen_statem:call(name(Name), ping).
send_to_remote(Pid, Msg) when is_pid(Pid) ->
gen_statem:cast(Pid, {send_to_remote, Msg});
gen_statem:call(Pid, {send_to_remote, Msg});
send_to_remote(Name, Msg) ->
gen_statem:cast(name(Name), {send_to_remote, Msg}).
gen_statem:call(name(Name), {send_to_remote, Msg}).
send_to_remote_async(Pid, Msg, Callback) when is_pid(Pid) ->
gen_statem:cast(Pid, {send_to_remote_async, Msg, Callback});
send_to_remote_async(Name, Msg, Callback) ->
gen_statem:cast(name(Name), {send_to_remote_async, Msg, Callback}).
%% @doc Return all forwards (local subscriptions).
-spec get_forwards(id()) -> [topic()].
@ -195,12 +191,10 @@ init(#{name := Name} = ConnectOpts) ->
name => Name
}),
erlang:process_flag(trap_exit, true),
Queue = open_replayq(Name, maps:get(replayq, ConnectOpts, #{})),
State = init_state(ConnectOpts),
self() ! idle,
{ok, idle, State#{
connect_opts => pre_process_opts(ConnectOpts),
replayq => Queue
connect_opts => pre_process_opts(ConnectOpts)
}}.
init_state(Opts) ->
@ -213,32 +207,11 @@ init_state(Opts) ->
start_type => StartType,
reconnect_interval => ReconnDelayMs,
mountpoint => format_mountpoint(Mountpoint),
inflight => [],
max_inflight => MaxInflightSize,
connection => undefined,
name => Name
}.
open_replayq(Name, QCfg) ->
Dir = maps:get(dir, QCfg, undefined),
SegBytes = maps:get(seg_bytes, QCfg, ?DEFAULT_SEG_BYTES),
MaxTotalSize = maps:get(max_total_size, QCfg, ?DEFAULT_MAX_TOTAL_SIZE),
QueueConfig =
case Dir =:= undefined orelse Dir =:= "" of
true ->
#{mem_only => true};
false ->
#{
dir => filename:join([Dir, node(), Name]),
seg_bytes => SegBytes,
max_total_size => MaxTotalSize
}
end,
replayq:open(QueueConfig#{
sizer => fun emqx_connector_mqtt_msg:estimate_size/1,
marshaller => fun ?MODULE:msg_marshaller/1
}).
pre_process_opts(#{subscriptions := InConf, forwards := OutConf} = ConnectOpts) ->
ConnectOpts#{
subscriptions => pre_process_in_out(in, InConf),
@ -247,18 +220,22 @@ pre_process_opts(#{subscriptions := InConf, forwards := OutConf} = ConnectOpts)
pre_process_in_out(_, undefined) ->
undefined;
pre_process_in_out(in, #{local := LC} = Conf) when is_map(Conf) ->
Conf#{local => pre_process_in_out_common(LC)};
pre_process_in_out(in, Conf) when is_map(Conf) ->
Conf1 = pre_process_conf(local_topic, Conf),
Conf2 = pre_process_conf(local_qos, Conf1),
pre_process_in_out_common(Conf2);
%% have no 'local' field in the config
undefined;
pre_process_in_out(out, #{remote := RC} = Conf) when is_map(Conf) ->
Conf#{remote => pre_process_in_out_common(RC)};
pre_process_in_out(out, Conf) when is_map(Conf) ->
Conf1 = pre_process_conf(remote_topic, Conf),
Conf2 = pre_process_conf(remote_qos, Conf1),
pre_process_in_out_common(Conf2).
%% have no 'remote' field in the config
undefined.
pre_process_in_out_common(Conf) ->
Conf1 = pre_process_conf(payload, Conf),
pre_process_conf(retain, Conf1).
pre_process_in_out_common(Conf0) ->
Conf1 = pre_process_conf(topic, Conf0),
Conf2 = pre_process_conf(qos, Conf1),
Conf3 = pre_process_conf(payload, Conf2),
pre_process_conf(retain, Conf3).
pre_process_conf(Key, Conf) ->
case maps:find(Key, Conf) of
@ -273,9 +250,8 @@ pre_process_conf(Key, Conf) ->
code_change(_Vsn, State, Data, _Extra) ->
{ok, State, Data}.
terminate(_Reason, _StateName, #{replayq := Q} = State) ->
terminate(_Reason, _StateName, State) ->
_ = disconnect(State),
_ = replayq:close(Q),
maybe_destroy_session(State).
maybe_destroy_session(#{connect_opts := ConnectOpts = #{clean_start := false}} = State) ->
@ -300,6 +276,8 @@ idle({call, From}, ensure_started, State) ->
{error, Reason, _State} ->
{keep_state_and_data, [{reply, From, {error, Reason}}]}
end;
idle({call, From}, {send_to_remote, _}, _State) ->
{keep_state_and_data, [{reply, From, {error, {recoverable_error, not_connected}}}]};
%% @doc Standing by for manual start.
idle(info, idle, #{start_type := manual}) ->
keep_state_and_data;
@ -319,16 +297,19 @@ connecting(#{reconnect_interval := ReconnectDelayMs} = State) ->
{keep_state_and_data, {state_timeout, ReconnectDelayMs, reconnect}}
end.
connected(state_timeout, connected, #{inflight := Inflight} = State) ->
case retry_inflight(State#{inflight := []}, Inflight) of
{ok, NewState} ->
{keep_state, NewState, {next_event, internal, maybe_send}};
{error, NewState} ->
{keep_state, NewState}
connected(state_timeout, connected, State) ->
%% nothing to do
{keep_state, State};
connected({call, From}, {send_to_remote, Msg}, State) ->
case do_send(State, Msg) of
{ok, NState} ->
{keep_state, NState, [{reply, From, ok}]};
{error, Reason} ->
{keep_state_and_data, [[reply, From, {error, Reason}]]}
end;
connected(internal, maybe_send, State) ->
{_, NewState} = pop_and_send(State),
{keep_state, NewState};
connected(cast, {send_to_remote_async, Msg, Callback}, State) ->
_ = do_send_async(State, Msg, Callback),
{keep_state, State};
connected(
info,
{disconnected, Conn, Reason},
@ -342,9 +323,6 @@ connected(
false ->
keep_state_and_data
end;
connected(info, {batch_ack, Ref}, State) ->
NewState = handle_batch_ack(State, Ref),
{keep_state, NewState, {next_event, internal, maybe_send}};
connected(Type, Content, State) ->
common(connected, Type, Content, State).
@ -363,13 +341,12 @@ common(_StateName, {call, From}, get_forwards, #{connect_opts := #{forwards := F
{keep_state_and_data, [{reply, From, Forwards}]};
common(_StateName, {call, From}, get_subscriptions, #{connection := Connection}) ->
{keep_state_and_data, [{reply, From, maps:get(subscriptions, Connection, #{})}]};
common(_StateName, {call, From}, Req, _State) ->
{keep_state_and_data, [{reply, From, {error, {unsupported_request, Req}}}]};
common(_StateName, info, {'EXIT', _, _}, State) ->
{keep_state, State};
common(_StateName, cast, {send_to_remote, Msg}, #{replayq := Q} = State) ->
NewQ = replayq:append(Q, [Msg]),
{keep_state, State#{replayq => NewQ}, {next_event, internal, maybe_send}};
common(StateName, Type, Content, #{name := Name} = State) ->
?SLOG(notice, #{
?SLOG(error, #{
msg => "bridge_discarded_event",
name => Name,
type => Type,
@ -381,13 +358,12 @@ common(StateName, Type, Content, #{name := Name} = State) ->
do_connect(
#{
connect_opts := ConnectOpts,
inflight := Inflight,
name := Name
} = State
) ->
case emqx_connector_mqtt_mod:start(ConnectOpts) of
{ok, Conn} ->
?tp(info, connected, #{name => Name, inflight => length(Inflight)}),
?tp(info, connected, #{name => Name}),
{ok, State#{connection => Conn}};
{error, Reason} ->
ConnectOpts1 = obfuscate(ConnectOpts),
@ -399,39 +375,7 @@ do_connect(
{error, Reason, State}
end.
%% Retry all inflight (previously sent but not acked) batches.
retry_inflight(State, []) ->
{ok, State};
retry_inflight(State, [#{q_ack_ref := QAckRef, msg := Msg} | Rest] = OldInf) ->
case do_send(State, QAckRef, Msg) of
{ok, State1} ->
retry_inflight(State1, Rest);
{error, #{inflight := NewInf} = State1} ->
{error, State1#{inflight := NewInf ++ OldInf}}
end.
pop_and_send(#{inflight := Inflight, max_inflight := Max} = State) ->
pop_and_send_loop(State, Max - length(Inflight)).
pop_and_send_loop(State, 0) ->
?tp(debug, inflight_full, #{}),
{ok, State};
pop_and_send_loop(#{replayq := Q} = State, N) ->
case replayq:is_empty(Q) of
true ->
?tp(debug, replayq_drained, #{}),
{ok, State};
false ->
BatchSize = 1,
Opts = #{count_limit => BatchSize, bytes_limit => 999999999},
{Q1, QAckRef, [Msg]} = replayq:pop(Q, Opts),
case do_send(State#{replayq := Q1}, QAckRef, Msg) of
{ok, NewState} -> pop_and_send_loop(NewState, N - 1);
{error, NewState} -> {error, NewState}
end
end.
do_send(#{connect_opts := #{forwards := undefined}}, _QAckRef, Msg) ->
do_send(#{connect_opts := #{forwards := undefined}}, Msg) ->
?SLOG(error, #{
msg =>
"cannot_forward_messages_to_remote_broker"
@ -440,99 +384,68 @@ do_send(#{connect_opts := #{forwards := undefined}}, _QAckRef, Msg) ->
});
do_send(
#{
inflight := Inflight,
connection := Connection,
mountpoint := Mountpoint,
connect_opts := #{forwards := Forwards}
} = State,
QAckRef,
Msg
) ->
Vars = emqx_connector_mqtt_msg:make_pub_vars(Mountpoint, Forwards),
ExportMsg = fun(Message) ->
emqx_metrics:inc('bridge.mqtt.message_sent_to_remote'),
emqx_connector_mqtt_msg:to_remote_msg(Message, Vars)
end,
ExportMsg = emqx_connector_mqtt_msg:to_remote_msg(Msg, Vars),
?SLOG(debug, #{
msg => "publish_to_remote_broker",
message => Msg,
vars => Vars
}),
case emqx_connector_mqtt_mod:send(Connection, [ExportMsg(Msg)]) of
{ok, Refs} ->
{ok, State#{
inflight := Inflight ++
[
#{
q_ack_ref => QAckRef,
send_ack_ref => map_set(Refs),
msg => Msg
}
]
}};
case emqx_connector_mqtt_mod:send(Connection, ExportMsg) of
ok ->
{ok, State};
{ok, #{reason_code := RC}} when
RC =:= ?RC_SUCCESS;
RC =:= ?RC_NO_MATCHING_SUBSCRIBERS
->
{ok, State};
{ok, #{reason_code := RC, reason_code_name := RCN}} ->
?SLOG(warning, #{
msg => "publish_to_remote_node_falied",
message => Msg,
reason_code => RC,
reason_code_name => RCN
}),
{error, RCN};
{error, Reason} ->
?SLOG(info, #{
msg => "mqtt_bridge_produce_failed",
reason => Reason
}),
{error, State}
{error, Reason}
end.
%% map as set, ack-reference -> 1
map_set(Ref) when is_reference(Ref) ->
%% QoS-0 or RPC call returns a reference
map_set([Ref]);
map_set(List) ->
map_set(List, #{}).
map_set([], Set) -> Set;
map_set([H | T], Set) -> map_set(T, Set#{H => 1}).
handle_batch_ack(#{inflight := Inflight0, replayq := Q} = State, Ref) ->
Inflight1 = do_ack(Inflight0, Ref),
Inflight = drop_acked_batches(Q, Inflight1),
State#{inflight := Inflight}.
do_ack([], Ref) ->
?SLOG(debug, #{
msg => "stale_batch_ack_reference",
ref => Ref
}),
[];
do_ack([#{send_ack_ref := Refs} = First | Rest], Ref) ->
case maps:is_key(Ref, Refs) of
true ->
NewRefs = maps:without([Ref], Refs),
[First#{send_ack_ref := NewRefs} | Rest];
false ->
[First | do_ack(Rest, Ref)]
end.
%% Drop the consecutive header of the inflight list having empty send_ack_ref
drop_acked_batches(_Q, []) ->
?tp(debug, inflight_drained, #{}),
[];
drop_acked_batches(
Q,
[
#{
send_ack_ref := Refs,
q_ack_ref := QAckRef
}
| Rest
] = All
do_send_async(#{connect_opts := #{forwards := undefined}}, Msg, _Callback) ->
%% TODO: eval callback with undefined error
?SLOG(error, #{
msg =>
"cannot_forward_messages_to_remote_broker"
"_as_'egress'_is_not_configured",
messages => Msg
});
do_send_async(
#{
connection := Connection,
mountpoint := Mountpoint,
connect_opts := #{forwards := Forwards}
},
Msg,
Callback
) ->
case maps:size(Refs) of
0 ->
%% all messages are acked by bridge target
%% now it's safe to ack replayq (delete from disk)
ok = replayq:ack(Q, QAckRef),
%% continue to check more sent batches
drop_acked_batches(Q, Rest);
_ ->
%% the head (oldest) inflight batch is not acked, keep waiting
All
end.
Vars = emqx_connector_mqtt_msg:make_pub_vars(Mountpoint, Forwards),
ExportMsg = emqx_connector_mqtt_msg:to_remote_msg(Msg, Vars),
?SLOG(debug, #{
msg => "publish_to_remote_broker",
message => Msg,
vars => Vars
}),
emqx_connector_mqtt_mod:send_async(Connection, ExportMsg, Callback).
disconnect(#{connection := Conn} = State) when Conn =/= undefined ->
emqx_connector_mqtt_mod:stop(Conn),
@ -540,10 +453,6 @@ disconnect(#{connection := Conn} = State) when Conn =/= undefined ->
disconnect(State) ->
State.
%% Called only when replayq needs to dump it to disk.
msg_marshaller(Bin) when is_binary(Bin) -> emqx_connector_mqtt_msg:from_binary(Bin);
msg_marshaller(Msg) -> emqx_connector_mqtt_msg:to_binary(Msg).
format_mountpoint(undefined) ->
undefined;
format_mountpoint(Prefix) ->
@ -551,15 +460,6 @@ format_mountpoint(Prefix) ->
name(Id) -> list_to_atom(str(Id)).
register_metrics() ->
lists:foreach(
fun emqx_metrics:ensure/1,
[
'bridge.mqtt.message_sent_to_remote',
'bridge.mqtt.message_received_from_remote'
]
).
obfuscate(Map) ->
maps:fold(
fun(K, V, Acc) ->

View File

@ -1,94 +0,0 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_connector_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-include("emqx/include/emqx.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-define(BRIDGE_CONF_DEFAULT, <<"bridges: {}">>).
-define(MQTT_CONNECTOR(Username), #{
<<"server">> => <<"127.0.0.1:1883">>,
<<"username">> => Username,
<<"password">> => <<"">>,
<<"proto_ver">> => <<"v4">>,
<<"ssl">> => #{<<"enable">> => false}
}).
-define(CONNECTOR_TYPE, <<"mqtt">>).
-define(CONNECTOR_NAME, <<"test_connector_42">>).
all() ->
emqx_common_test_helpers:all(?MODULE).
groups() ->
[].
suite() ->
[].
init_per_suite(Config) ->
_ = application:load(emqx_conf),
%% some testcases (may from other app) already get emqx_connector started
_ = application:stop(emqx_resource),
_ = application:stop(emqx_connector),
ok = emqx_common_test_helpers:start_apps(
[
emqx_connector,
emqx_bridge
]
),
ok = emqx_common_test_helpers:load_config(emqx_connector_schema, <<"connectors: {}">>),
Config.
end_per_suite(_Config) ->
emqx_common_test_helpers:stop_apps([
emqx_connector,
emqx_bridge
]),
ok.
init_per_testcase(_, Config) ->
{ok, _} = emqx_cluster_rpc:start_link(),
Config.
end_per_testcase(_, _Config) ->
ok.
t_list_raw_empty(_) ->
ok = emqx_config:erase(hd(emqx_connector:config_key_path())),
Result = emqx_connector:list_raw(),
?assertEqual([], Result).
t_lookup_raw_error(_) ->
Result = emqx_connector:lookup_raw(<<"foo:bar">>),
?assertEqual({error, not_found}, Result).
t_parse_connector_id_error(_) ->
?assertError(
{invalid_connector_id, <<"foobar">>}, emqx_connector:parse_connector_id(<<"foobar">>)
).
t_update_connector_does_not_exist(_) ->
Config = ?MQTT_CONNECTOR(<<"user1">>),
?assertMatch({ok, _Config}, emqx_connector:update(?CONNECTOR_TYPE, ?CONNECTOR_NAME, Config)).
t_delete_connector_does_not_exist(_) ->
?assertEqual({ok, #{post_config_update => #{}}}, emqx_connector:delete(<<"foo:bar">>)).
t_connector_id_using_list(_) ->
<<"foo:bar">> = emqx_connector:connector_id("foo", "bar").

View File

@ -1,812 +0,0 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2020-2022 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_connector_api_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-import(emqx_dashboard_api_test_helpers, [request/4, uri/1]).
-include("emqx/include/emqx.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-include("emqx_dashboard/include/emqx_dashboard.hrl").
%% output functions
-export([inspect/3]).
-define(BRIDGE_CONF_DEFAULT, <<"bridges: {}">>).
-define(CONNECTR_TYPE, <<"mqtt">>).
-define(CONNECTR_NAME, <<"test_connector">>).
-define(BRIDGE_NAME_INGRESS, <<"ingress_test_bridge">>).
-define(BRIDGE_NAME_EGRESS, <<"egress_test_bridge">>).
-define(MQTT_CONNECTOR(Username), #{
<<"server">> => <<"127.0.0.1:1883">>,
<<"username">> => Username,
<<"password">> => <<"">>,
<<"proto_ver">> => <<"v4">>,
<<"ssl">> => #{<<"enable">> => false}
}).
-define(MQTT_CONNECTOR2(Server), ?MQTT_CONNECTOR(<<"user1">>)#{<<"server">> => Server}).
-define(MQTT_BRIDGE_INGRESS(ID), #{
<<"connector">> => ID,
<<"direction">> => <<"ingress">>,
<<"remote_topic">> => <<"remote_topic/#">>,
<<"remote_qos">> => 2,
<<"local_topic">> => <<"local_topic/${topic}">>,
<<"local_qos">> => <<"${qos}">>,
<<"payload">> => <<"${payload}">>,
<<"retain">> => <<"${retain}">>
}).
-define(MQTT_BRIDGE_EGRESS(ID), #{
<<"connector">> => ID,
<<"direction">> => <<"egress">>,
<<"local_topic">> => <<"local_topic/#">>,
<<"remote_topic">> => <<"remote_topic/${topic}">>,
<<"payload">> => <<"${payload}">>,
<<"remote_qos">> => <<"${qos}">>,
<<"retain">> => <<"${retain}">>
}).
-define(metrics(MATCH, SUCC, FAILED, SPEED, SPEED5M, SPEEDMAX), #{
<<"matched">> := MATCH,
<<"success">> := SUCC,
<<"failed">> := FAILED,
<<"rate">> := SPEED,
<<"rate_last5m">> := SPEED5M,
<<"rate_max">> := SPEEDMAX
}).
inspect(Selected, _Envs, _Args) ->
persistent_term:put(?MODULE, #{inspect => Selected}).
all() ->
emqx_common_test_helpers:all(?MODULE).
groups() ->
[].
suite() ->
[{timetrap, {seconds, 30}}].
init_per_suite(Config) ->
_ = application:load(emqx_conf),
%% some testcases (may from other app) already get emqx_connector started
_ = application:stop(emqx_resource),
_ = application:stop(emqx_connector),
ok = emqx_common_test_helpers:start_apps(
[
emqx_rule_engine,
emqx_connector,
emqx_bridge,
emqx_dashboard
],
fun set_special_configs/1
),
ok = emqx_common_test_helpers:load_config(emqx_connector_schema, <<"connectors: {}">>),
ok = emqx_common_test_helpers:load_config(
emqx_rule_engine_schema,
<<"rule_engine {rules {}}">>
),
ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, ?BRIDGE_CONF_DEFAULT),
Config.
end_per_suite(_Config) ->
emqx_common_test_helpers:stop_apps([
emqx_rule_engine,
emqx_connector,
emqx_bridge,
emqx_dashboard
]),
ok.
set_special_configs(emqx_dashboard) ->
emqx_dashboard_api_test_helpers:set_default_config(<<"connector_admin">>);
set_special_configs(_) ->
ok.
init_per_testcase(_, Config) ->
{ok, _} = emqx_cluster_rpc:start_link(node(), emqx_cluster_rpc, 1000),
Config.
end_per_testcase(_, _Config) ->
clear_resources(),
ok.
clear_resources() ->
lists:foreach(
fun(#{id := Id}) ->
ok = emqx_rule_engine:delete_rule(Id)
end,
emqx_rule_engine:get_rules()
),
lists:foreach(
fun(#{type := Type, name := Name}) ->
{ok, _} = emqx_bridge:remove(Type, Name)
end,
emqx_bridge:list()
),
lists:foreach(
fun(#{<<"type">> := Type, <<"name">> := Name}) ->
{ok, _} = emqx_connector:delete(Type, Name)
end,
emqx_connector:list_raw()
).
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
t_mqtt_crud_apis(_) ->
%% assert we there's no connectors at first
{ok, 200, <<"[]">>} = request(get, uri(["connectors"]), []),
%% then we add a mqtt connector, using POST
%% POST /connectors/ will create a connector
User1 = <<"user1">>,
{ok, 400, <<
"{\"code\":\"BAD_REQUEST\",\"message\""
":\"missing some required fields: [name, type]\"}"
>>} =
request(
post,
uri(["connectors"]),
?MQTT_CONNECTOR(User1)#{<<"type">> => ?CONNECTR_TYPE}
),
{ok, 201, Connector} = request(
post,
uri(["connectors"]),
?MQTT_CONNECTOR(User1)#{
<<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?CONNECTR_NAME
}
),
#{
<<"type">> := ?CONNECTR_TYPE,
<<"name">> := ?CONNECTR_NAME,
<<"server">> := <<"127.0.0.1:1883">>,
<<"username">> := User1,
<<"password">> := <<"">>,
<<"proto_ver">> := <<"v4">>,
<<"ssl">> := #{<<"enable">> := false}
} = jsx:decode(Connector),
ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME),
%% update the request-path of the connector
User2 = <<"user2">>,
{ok, 200, Connector2} = request(
put,
uri(["connectors", ConnctorID]),
?MQTT_CONNECTOR(User2)
),
?assertMatch(
#{
<<"type">> := ?CONNECTR_TYPE,
<<"name">> := ?CONNECTR_NAME,
<<"server">> := <<"127.0.0.1:1883">>,
<<"username">> := User2,
<<"password">> := <<"">>,
<<"proto_ver">> := <<"v4">>,
<<"ssl">> := #{<<"enable">> := false}
},
jsx:decode(Connector2)
),
%% list all connectors again, assert Connector2 is in it
{ok, 200, Connector2Str} = request(get, uri(["connectors"]), []),
?assertMatch(
[
#{
<<"type">> := ?CONNECTR_TYPE,
<<"name">> := ?CONNECTR_NAME,
<<"server">> := <<"127.0.0.1:1883">>,
<<"username">> := User2,
<<"password">> := <<"">>,
<<"proto_ver">> := <<"v4">>,
<<"ssl">> := #{<<"enable">> := false}
}
],
jsx:decode(Connector2Str)
),
%% get the connector by id
{ok, 200, Connector3Str} = request(get, uri(["connectors", ConnctorID]), []),
?assertMatch(
#{
<<"type">> := ?CONNECTR_TYPE,
<<"name">> := ?CONNECTR_NAME,
<<"server">> := <<"127.0.0.1:1883">>,
<<"username">> := User2,
<<"password">> := <<"">>,
<<"proto_ver">> := <<"v4">>,
<<"ssl">> := #{<<"enable">> := false}
},
jsx:decode(Connector3Str)
),
%% delete the connector
{ok, 204, <<>>} = request(delete, uri(["connectors", ConnctorID]), []),
{ok, 200, <<"[]">>} = request(get, uri(["connectors"]), []),
%% update a deleted connector returns an error
{ok, 404, ErrMsg2} = request(
put,
uri(["connectors", ConnctorID]),
?MQTT_CONNECTOR(User2)
),
?assertMatch(
#{
<<"code">> := _,
<<"message">> := <<"connector not found">>
},
jsx:decode(ErrMsg2)
),
ok.
t_mqtt_conn_bridge_ingress(_) ->
%% then we add a mqtt connector, using POST
User1 = <<"user1">>,
{ok, 201, Connector} = request(
post,
uri(["connectors"]),
?MQTT_CONNECTOR(User1)#{
<<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?CONNECTR_NAME
}
),
#{
<<"type">> := ?CONNECTR_TYPE,
<<"name">> := ?CONNECTR_NAME,
<<"server">> := <<"127.0.0.1:1883">>,
<<"num_of_bridges">> := 0,
<<"username">> := User1,
<<"password">> := <<"">>,
<<"proto_ver">> := <<"v4">>,
<<"ssl">> := #{<<"enable">> := false}
} = jsx:decode(Connector),
ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME),
%% ... and a MQTT bridge, using POST
%% we bind this bridge to the connector created just now
timer:sleep(50),
{ok, 201, Bridge} = request(
post,
uri(["bridges"]),
?MQTT_BRIDGE_INGRESS(ConnctorID)#{
<<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?BRIDGE_NAME_INGRESS
}
),
#{
<<"type">> := ?CONNECTR_TYPE,
<<"name">> := ?BRIDGE_NAME_INGRESS,
<<"connector">> := ConnctorID
} = jsx:decode(Bridge),
BridgeIDIngress = emqx_bridge_resource:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_INGRESS),
wait_for_resource_ready(BridgeIDIngress, 5),
%% we now test if the bridge works as expected
RemoteTopic = <<"remote_topic/1">>,
LocalTopic = <<"local_topic/", RemoteTopic/binary>>,
Payload = <<"hello">>,
emqx:subscribe(LocalTopic),
timer:sleep(100),
%% PUBLISH a message to the 'remote' broker, as we have only one broker,
%% the remote broker is also the local one.
emqx:publish(emqx_message:make(RemoteTopic, Payload)),
%% we should receive a message on the local broker, with specified topic
?assert(
receive
{deliver, LocalTopic, #message{payload = Payload}} ->
ct:pal("local broker got message: ~p on topic ~p", [Payload, LocalTopic]),
true;
Msg ->
ct:pal("Msg: ~p", [Msg]),
false
after 100 ->
false
end
),
%% get the connector by id, verify the num_of_bridges now is 1
{ok, 200, Connector1Str} = request(get, uri(["connectors", ConnctorID]), []),
?assertMatch(#{<<"num_of_bridges">> := 1}, jsx:decode(Connector1Str)),
%% delete the bridge
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDIngress]), []),
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
%% delete the connector
{ok, 204, <<>>} = request(delete, uri(["connectors", ConnctorID]), []),
{ok, 200, <<"[]">>} = request(get, uri(["connectors"]), []),
ok.
t_mqtt_conn_bridge_egress(_) ->
%% then we add a mqtt connector, using POST
User1 = <<"user1">>,
{ok, 201, Connector} = request(
post,
uri(["connectors"]),
?MQTT_CONNECTOR(User1)#{
<<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?CONNECTR_NAME
}
),
%ct:pal("---connector: ~p", [Connector]),
#{
<<"server">> := <<"127.0.0.1:1883">>,
<<"username">> := User1,
<<"password">> := <<"">>,
<<"proto_ver">> := <<"v4">>,
<<"ssl">> := #{<<"enable">> := false}
} = jsx:decode(Connector),
ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME),
%% ... and a MQTT bridge, using POST
%% we bind this bridge to the connector created just now
{ok, 201, Bridge} = request(
post,
uri(["bridges"]),
?MQTT_BRIDGE_EGRESS(ConnctorID)#{
<<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?BRIDGE_NAME_EGRESS
}
),
#{
<<"type">> := ?CONNECTR_TYPE,
<<"name">> := ?BRIDGE_NAME_EGRESS,
<<"connector">> := ConnctorID
} = jsx:decode(Bridge),
BridgeIDEgress = emqx_bridge_resource:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_EGRESS),
wait_for_resource_ready(BridgeIDEgress, 5),
%% we now test if the bridge works as expected
LocalTopic = <<"local_topic/1">>,
RemoteTopic = <<"remote_topic/", LocalTopic/binary>>,
Payload = <<"hello">>,
emqx:subscribe(RemoteTopic),
timer:sleep(100),
%% PUBLISH a message to the 'local' broker, as we have only one broker,
%% the remote broker is also the local one.
emqx:publish(emqx_message:make(LocalTopic, Payload)),
%% we should receive a message on the "remote" broker, with specified topic
?assert(
receive
{deliver, RemoteTopic, #message{payload = Payload}} ->
ct:pal("local broker got message: ~p on topic ~p", [Payload, RemoteTopic]),
true;
Msg ->
ct:pal("Msg: ~p", [Msg]),
false
after 100 ->
false
end
),
%% verify the metrics of the bridge
{ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDEgress]), []),
?assertMatch(
#{
<<"metrics">> := ?metrics(1, 1, 0, _, _, _),
<<"node_metrics">> :=
[#{<<"node">> := _, <<"metrics">> := ?metrics(1, 1, 0, _, _, _)}]
},
jsx:decode(BridgeStr)
),
%% delete the bridge
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []),
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
%% delete the connector
{ok, 204, <<>>} = request(delete, uri(["connectors", ConnctorID]), []),
{ok, 200, <<"[]">>} = request(get, uri(["connectors"]), []),
ok.
%% t_mqtt_conn_update:
%% - update a connector should also update all of the the bridges
%% - cannot delete a connector that is used by at least one bridge
t_mqtt_conn_update(_) ->
%% then we add a mqtt connector, using POST
{ok, 201, Connector} = request(
post,
uri(["connectors"]),
?MQTT_CONNECTOR2(<<"127.0.0.1:1883">>)#{
<<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?CONNECTR_NAME
}
),
%ct:pal("---connector: ~p", [Connector]),
#{<<"server">> := <<"127.0.0.1:1883">>} = jsx:decode(Connector),
ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME),
%% ... and a MQTT bridge, using POST
%% we bind this bridge to the connector created just now
{ok, 201, Bridge} = request(
post,
uri(["bridges"]),
?MQTT_BRIDGE_EGRESS(ConnctorID)#{
<<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?BRIDGE_NAME_EGRESS
}
),
#{
<<"type">> := ?CONNECTR_TYPE,
<<"name">> := ?BRIDGE_NAME_EGRESS,
<<"connector">> := ConnctorID
} = jsx:decode(Bridge),
BridgeIDEgress = emqx_bridge_resource:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_EGRESS),
wait_for_resource_ready(BridgeIDEgress, 5),
%% Then we try to update 'server' of the connector, to an unavailable IP address
%% The update OK, we recreate the resource even if the resource is current connected,
%% and the target resource we're going to update is unavailable.
{ok, 200, _} = request(
put,
uri(["connectors", ConnctorID]),
?MQTT_CONNECTOR2(<<"127.0.0.1:2603">>)
),
%% we fix the 'server' parameter to a normal one, it should work
{ok, 200, _} = request(
put,
uri(["connectors", ConnctorID]),
?MQTT_CONNECTOR2(<<"127.0.0.1 : 1883">>)
),
%% delete the bridge
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []),
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
%% delete the connector
{ok, 204, <<>>} = request(delete, uri(["connectors", ConnctorID]), []),
{ok, 200, <<"[]">>} = request(get, uri(["connectors"]), []).
t_mqtt_conn_update2(_) ->
%% then we add a mqtt connector, using POST
%% but this connector is point to a unreachable server "2603"
{ok, 201, Connector} = request(
post,
uri(["connectors"]),
?MQTT_CONNECTOR2(<<"127.0.0.1:2603">>)#{
<<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?CONNECTR_NAME
}
),
#{<<"server">> := <<"127.0.0.1:2603">>} = jsx:decode(Connector),
ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME),
%% ... and a MQTT bridge, using POST
%% we bind this bridge to the connector created just now
{ok, 201, Bridge} = request(
post,
uri(["bridges"]),
?MQTT_BRIDGE_EGRESS(ConnctorID)#{
<<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?BRIDGE_NAME_EGRESS
}
),
#{
<<"type">> := ?CONNECTR_TYPE,
<<"name">> := ?BRIDGE_NAME_EGRESS,
<<"status">> := <<"disconnected">>,
<<"connector">> := ConnctorID
} = jsx:decode(Bridge),
BridgeIDEgress = emqx_bridge_resource:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_EGRESS),
%% We try to fix the 'server' parameter, to another unavailable server..
%% The update should success: we don't check the connectivity of the new config
%% if the resource is now disconnected.
{ok, 200, _} = request(
put,
uri(["connectors", ConnctorID]),
?MQTT_CONNECTOR2(<<"127.0.0.1:2604">>)
),
%% we fix the 'server' parameter to a normal one, it should work
{ok, 200, _} = request(
put,
uri(["connectors", ConnctorID]),
?MQTT_CONNECTOR2(<<"127.0.0.1:1883">>)
),
wait_for_resource_ready(BridgeIDEgress, 5),
{ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDEgress]), []),
?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(BridgeStr)),
%% delete the bridge
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []),
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
%% delete the connector
{ok, 204, <<>>} = request(delete, uri(["connectors", ConnctorID]), []),
{ok, 200, <<"[]">>} = request(get, uri(["connectors"]), []).
t_mqtt_conn_update3(_) ->
%% we add a mqtt connector, using POST
{ok, 201, _} = request(
post,
uri(["connectors"]),
?MQTT_CONNECTOR2(<<"127.0.0.1:1883">>)#{
<<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?CONNECTR_NAME
}
),
ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME),
%% ... and a MQTT bridge, using POST
%% we bind this bridge to the connector created just now
{ok, 201, Bridge} = request(
post,
uri(["bridges"]),
?MQTT_BRIDGE_EGRESS(ConnctorID)#{
<<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?BRIDGE_NAME_EGRESS
}
),
#{<<"connector">> := ConnctorID} = jsx:decode(Bridge),
BridgeIDEgress = emqx_bridge_resource:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_EGRESS),
wait_for_resource_ready(BridgeIDEgress, 5),
%% delete the connector should fail because it is in use by a bridge
{ok, 403, _} = request(delete, uri(["connectors", ConnctorID]), []),
%% delete the bridge
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []),
%% the connector now can be deleted without problems
{ok, 204, <<>>} = request(delete, uri(["connectors", ConnctorID]), []).
t_mqtt_conn_testing(_) ->
%% APIs for testing the connectivity
%% then we add a mqtt connector, using POST
{ok, 204, <<>>} = request(
post,
uri(["connectors_test"]),
?MQTT_CONNECTOR2(<<"127.0.0.1:1883">>)#{
<<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?BRIDGE_NAME_EGRESS
}
),
{ok, 400, _} = request(
post,
uri(["connectors_test"]),
?MQTT_CONNECTOR2(<<"127.0.0.1:2883">>)#{
<<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?BRIDGE_NAME_EGRESS
}
).
t_ingress_mqtt_bridge_with_rules(_) ->
{ok, 201, _} = request(
post,
uri(["connectors"]),
?MQTT_CONNECTOR(<<"user1">>)#{
<<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?CONNECTR_NAME
}
),
ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME),
{ok, 201, _} = request(
post,
uri(["bridges"]),
?MQTT_BRIDGE_INGRESS(ConnctorID)#{
<<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?BRIDGE_NAME_INGRESS
}
),
BridgeIDIngress = emqx_bridge_resource:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_INGRESS),
{ok, 201, Rule} = request(
post,
uri(["rules"]),
#{
<<"name">> => <<"A_rule_get_messages_from_a_source_mqtt_bridge">>,
<<"enable">> => true,
<<"actions">> => [#{<<"function">> => "emqx_connector_api_SUITE:inspect"}],
<<"sql">> => <<"SELECT * from \"$bridges/", BridgeIDIngress/binary, "\"">>
}
),
#{<<"id">> := RuleId} = jsx:decode(Rule),
%% we now test if the bridge works as expected
RemoteTopic = <<"remote_topic/1">>,
LocalTopic = <<"local_topic/", RemoteTopic/binary>>,
Payload = <<"hello">>,
emqx:subscribe(LocalTopic),
timer:sleep(100),
%% PUBLISH a message to the 'remote' broker, as we have only one broker,
%% the remote broker is also the local one.
wait_for_resource_ready(BridgeIDIngress, 5),
emqx:publish(emqx_message:make(RemoteTopic, Payload)),
%% we should receive a message on the local broker, with specified topic
?assert(
receive
{deliver, LocalTopic, #message{payload = Payload}} ->
ct:pal("local broker got message: ~p on topic ~p", [Payload, LocalTopic]),
true;
Msg ->
ct:pal("Msg: ~p", [Msg]),
false
after 100 ->
false
end
),
%% and also the rule should be matched, with matched + 1:
{ok, 200, Rule1} = request(get, uri(["rules", RuleId, "metrics"]), []),
#{
<<"id">> := RuleId,
<<"metrics">> := #{
<<"matched">> := 1,
<<"passed">> := 1,
<<"failed">> := 0,
<<"failed.exception">> := 0,
<<"failed.no_result">> := 0,
<<"matched.rate">> := _,
<<"matched.rate.max">> := _,
<<"matched.rate.last5m">> := _,
<<"actions.total">> := 1,
<<"actions.success">> := 1,
<<"actions.failed">> := 0,
<<"actions.failed.out_of_service">> := 0,
<<"actions.failed.unknown">> := 0
}
} = jsx:decode(Rule1),
%% we also check if the actions of the rule is triggered
?assertMatch(
#{
inspect := #{
event := <<"$bridges/mqtt", _/binary>>,
id := MsgId,
payload := Payload,
topic := RemoteTopic,
qos := 0,
dup := false,
retain := false,
pub_props := #{},
timestamp := _
}
} when is_binary(MsgId),
persistent_term:get(?MODULE)
),
{ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []),
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDIngress]), []),
{ok, 204, <<>>} = request(delete, uri(["connectors", ConnctorID]), []).
t_egress_mqtt_bridge_with_rules(_) ->
{ok, 201, _} = request(
post,
uri(["connectors"]),
?MQTT_CONNECTOR(<<"user1">>)#{
<<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?CONNECTR_NAME
}
),
ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME),
{ok, 201, Bridge} = request(
post,
uri(["bridges"]),
?MQTT_BRIDGE_EGRESS(ConnctorID)#{
<<"type">> => ?CONNECTR_TYPE,
<<"name">> => ?BRIDGE_NAME_EGRESS
}
),
#{<<"type">> := ?CONNECTR_TYPE, <<"name">> := ?BRIDGE_NAME_EGRESS} = jsx:decode(Bridge),
BridgeIDEgress = emqx_bridge_resource:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_EGRESS),
{ok, 201, Rule} = request(
post,
uri(["rules"]),
#{
<<"name">> => <<"A_rule_send_messages_to_a_sink_mqtt_bridge">>,
<<"enable">> => true,
<<"actions">> => [BridgeIDEgress],
<<"sql">> => <<"SELECT * from \"t/1\"">>
}
),
#{<<"id">> := RuleId} = jsx:decode(Rule),
%% we now test if the bridge works as expected
LocalTopic = <<"local_topic/1">>,
RemoteTopic = <<"remote_topic/", LocalTopic/binary>>,
Payload = <<"hello">>,
emqx:subscribe(RemoteTopic),
timer:sleep(100),
%% PUBLISH a message to the 'local' broker, as we have only one broker,
%% the remote broker is also the local one.
wait_for_resource_ready(BridgeIDEgress, 5),
emqx:publish(emqx_message:make(LocalTopic, Payload)),
%% we should receive a message on the "remote" broker, with specified topic
?assert(
receive
{deliver, RemoteTopic, #message{payload = Payload}} ->
ct:pal("remote broker got message: ~p on topic ~p", [Payload, RemoteTopic]),
true;
Msg ->
ct:pal("Msg: ~p", [Msg]),
false
after 100 ->
false
end
),
emqx:unsubscribe(RemoteTopic),
%% PUBLISH a message to the rule.
Payload2 = <<"hi">>,
RuleTopic = <<"t/1">>,
RemoteTopic2 = <<"remote_topic/", RuleTopic/binary>>,
emqx:subscribe(RemoteTopic2),
timer:sleep(100),
wait_for_resource_ready(BridgeIDEgress, 5),
emqx:publish(emqx_message:make(RuleTopic, Payload2)),
{ok, 200, Rule1} = request(get, uri(["rules", RuleId, "metrics"]), []),
#{
<<"id">> := RuleId,
<<"metrics">> := #{
<<"matched">> := 1,
<<"passed">> := 1,
<<"failed">> := 0,
<<"failed.exception">> := 0,
<<"failed.no_result">> := 0,
<<"matched.rate">> := _,
<<"matched.rate.max">> := _,
<<"matched.rate.last5m">> := _,
<<"actions.total">> := 1,
<<"actions.success">> := 1,
<<"actions.failed">> := 0,
<<"actions.failed.out_of_service">> := 0,
<<"actions.failed.unknown">> := 0
}
} = jsx:decode(Rule1),
%% we should receive a message on the "remote" broker, with specified topic
?assert(
receive
{deliver, RemoteTopic2, #message{payload = Payload2}} ->
ct:pal("remote broker got message: ~p on topic ~p", [Payload2, RemoteTopic2]),
true;
Msg ->
ct:pal("Msg: ~p", [Msg]),
false
after 100 ->
false
end
),
%% verify the metrics of the bridge
{ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDEgress]), []),
?assertMatch(
#{
<<"metrics">> := ?metrics(2, 2, 0, _, _, _),
<<"node_metrics">> :=
[#{<<"node">> := _, <<"metrics">> := ?metrics(2, 2, 0, _, _, _)}]
},
jsx:decode(BridgeStr)
),
{ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []),
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []),
{ok, 204, <<>>} = request(delete, uri(["connectors", ConnctorID]), []).
request(Method, Url, Body) ->
request(<<"connector_admin">>, Method, Url, Body).
wait_for_resource_ready(InstId, 0) ->
ct:pal("--- bridge ~p: ~p", [InstId, emqx_bridge:lookup(InstId)]),
ct:fail(wait_resource_timeout);
wait_for_resource_ready(InstId, Retry) ->
case emqx_bridge:lookup(InstId) of
{ok, #{resource_data := #{status := connected}}} ->
ok;
_ ->
timer:sleep(100),
wait_for_resource_ready(InstId, Retry - 1)
end.

Some files were not shown because too many files have changed in this diff Show More