diff --git a/.ci/docker-compose-file/.env b/.ci/docker-compose-file/.env index ae3d12c64..bd925e224 100644 --- a/.ci/docker-compose-file/.env +++ b/.ci/docker-compose-file/.env @@ -3,6 +3,7 @@ REDIS_TAG=6 MONGO_TAG=5 PGSQL_TAG=13 LDAP_TAG=2.4.50 +INFLUXDB_TAG=2.5.0 TARGET=emqx/emqx EMQX_TAG=build-alpine-amd64 diff --git a/.ci/docker-compose-file/docker-compose-influxdb-tcp.yaml b/.ci/docker-compose-file/docker-compose-influxdb-tcp.yaml new file mode 100644 index 000000000..1780bc7e2 --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-influxdb-tcp.yaml @@ -0,0 +1,36 @@ +version: '3.9' + +services: + influxdb_server_tcp: + container_name: influxdb_tcp + image: influxdb:${INFLUXDB_TAG} + expose: + - "8086" + - "8089/udp" + - "8083" + # ports: + # - "8086:8086" + environment: + DOCKER_INFLUXDB_INIT_MODE: setup + DOCKER_INFLUXDB_INIT_USERNAME: root + DOCKER_INFLUXDB_INIT_PASSWORD: emqx@123 + DOCKER_INFLUXDB_INIT_ORG: emqx + DOCKER_INFLUXDB_INIT_BUCKET: mqtt + DOCKER_INFLUXDB_INIT_ADMIN_TOKEN: abcdefg + volumes: + - "./influxdb/setup-v1.sh:/docker-entrypoint-initdb.d/setup-v1.sh" + restart: always + networks: + - emqx_bridge + +# networks: +# emqx_bridge: +# driver: bridge +# name: emqx_bridge +# ipam: +# driver: default +# config: +# - subnet: 172.100.239.0/24 +# gateway: 172.100.239.1 +# - subnet: 2001:3200:3200::/64 +# gateway: 2001:3200:3200::1 diff --git a/.ci/docker-compose-file/docker-compose-influxdb-tls.yaml b/.ci/docker-compose-file/docker-compose-influxdb-tls.yaml new file mode 100644 index 000000000..ec1600bf2 --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-influxdb-tls.yaml @@ -0,0 +1,42 @@ +version: '3.9' + +services: + influxdb_server_tls: + container_name: influxdb_tls + image: influxdb:${INFLUXDB_TAG} + expose: + - "8086" + - "8089/udp" + - "8083" + # ports: + # - "8087:8086" + environment: + DOCKER_INFLUXDB_INIT_MODE: setup + DOCKER_INFLUXDB_INIT_USERNAME: root + DOCKER_INFLUXDB_INIT_PASSWORD: emqx@123 + DOCKER_INFLUXDB_INIT_ORG: emqx + DOCKER_INFLUXDB_INIT_BUCKET: mqtt + DOCKER_INFLUXDB_INIT_ADMIN_TOKEN: abcdefg + volumes: + - ./certs/server.crt:/etc/influxdb/cert.pem + - ./certs/server.key:/etc/influxdb/key.pem + - "./influxdb/setup-v1.sh:/docker-entrypoint-initdb.d/setup-v1.sh" + command: + - influxd + - --tls-cert=/etc/influxdb/cert.pem + - --tls-key=/etc/influxdb/key.pem + restart: always + networks: + - emqx_bridge + +# networks: +# emqx_bridge: +# driver: bridge +# name: emqx_bridge +# ipam: +# driver: default +# config: +# - subnet: 172.100.239.0/24 +# gateway: 172.100.239.1 +# - subnet: 2001:3200:3200::/64 +# gateway: 2001:3200:3200::1 diff --git a/.ci/docker-compose-file/docker-compose-kafka.yaml b/.ci/docker-compose-file/docker-compose-kafka.yaml new file mode 100644 index 000000000..ba0161293 --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-kafka.yaml @@ -0,0 +1,73 @@ +version: '3.9' + +services: + zookeeper: + image: wurstmeister/zookeeper + ports: + - "2181:2181" + container_name: zookeeper + hostname: zookeeper + networks: + emqx_bridge: + ssl_cert_gen: + image: fredrikhgrelland/alpine-jdk11-openssl + container_name: ssl_cert_gen + volumes: + - emqx-shared-secret:/var/lib/secret + - ./kafka/generate-certs.sh:/bin/generate-certs.sh + entrypoint: /bin/sh + command: /bin/generate-certs.sh + kdc: + hostname: kdc.emqx.net + image: ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-ubuntu20.04 + container_name: kdc.emqx.net + networks: + emqx_bridge: + volumes: + - emqx-shared-secret:/var/lib/secret + - ./kerberos/krb5.conf:/etc/kdc/krb5.conf + - ./kerberos/krb5.conf:/etc/krb5.conf + - ./kerberos/run.sh:/usr/bin/run.sh + command: run.sh + kafka_1: + image: wurstmeister/kafka:2.13-2.7.0 + ports: + - "9092:9092" + - "9093:9093" + - "9094:9094" + - "9095:9095" + container_name: kafka-1.emqx.net + hostname: kafka-1.emqx.net + depends_on: + - "kdc" + - "zookeeper" + - "ssl_cert_gen" + environment: + KAFKA_BROKER_ID: 1 + KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 + KAFKA_LISTENERS: PLAINTEXT://:9092,SASL_PLAINTEXT://:9093,SSL://:9094,SASL_SSL://:9095 + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka-1.emqx.net:9092,SASL_PLAINTEXT://kafka-1.emqx.net:9093,SSL://kafka-1.emqx.net:9094,SASL_SSL://kafka-1.emqx.net:9095 + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,SASL_PLAINTEXT:SASL_PLAINTEXT,SSL:SSL,SASL_SSL:SASL_SSL + KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT + KAFKA_SASL_ENABLED_MECHANISMS: PLAIN,SCRAM-SHA-256,SCRAM-SHA-512,GSSAPI + KAFKA_SASL_KERBEROS_SERVICE_NAME: kafka + KAFKA_SASL_MECHANISM_INTER_BROKER_PROTOCOL: PLAIN + KAFKA_JMX_OPTS: "-Djava.security.auth.login.config=/etc/kafka/jaas.conf" + KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true" + KAFKA_CREATE_TOPICS: test-topic-one-partition:1:1,test-topic-two-partitions:2:1,test-topic-three-partitions:3:1, + KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer + KAFKA_SSL_TRUSTSTORE_LOCATION: /var/lib/secret/kafka.truststore.jks + KAFKA_SSL_TRUSTSTORE_PASSWORD: password + KAFKA_SSL_KEYSTORE_LOCATION: /var/lib/secret/kafka.keystore.jks + KAFKA_SSL_KEYSTORE_PASSWORD: password + KAFKA_SSL_KEY_PASSWORD: password + networks: + emqx_bridge: + volumes: + - emqx-shared-secret:/var/lib/secret + - ./kafka/jaas.conf:/etc/kafka/jaas.conf + - ./kafka/run_add_scram_users.sh:/bin/run_add_scram_users.sh + - ./kerberos/krb5.conf:/etc/kdc/krb5.conf + - ./kerberos/krb5.conf:/etc/krb5.conf + command: run_add_scram_users.sh + diff --git a/.ci/docker-compose-file/docker-compose-mongo-replicaset-tcp.yaml b/.ci/docker-compose-file/docker-compose-mongo-replicaset-tcp.yaml index f83fe0932..54506abd8 100644 --- a/.ci/docker-compose-file/docker-compose-mongo-replicaset-tcp.yaml +++ b/.ci/docker-compose-file/docker-compose-mongo-replicaset-tcp.yaml @@ -18,7 +18,7 @@ services: --ipv6 --bind_ip_all --replSet rs0 - + mongo2: hostname: mongo2 container_name: mongo2 @@ -54,10 +54,10 @@ services: --ipv6 --bind_ip_all --replSet rs0 - - mongo_client: + + mongo_rs_client: image: mongo:${MONGO_TAG} - container_name: mongo_client + container_name: mongo_rs_client networks: - emqx_bridge depends_on: diff --git a/.ci/docker-compose-file/docker-compose-mongo-sharded-tcp.yaml b/.ci/docker-compose-file/docker-compose-mongo-sharded-tcp.yaml new file mode 100644 index 000000000..a8b51689b --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-mongo-sharded-tcp.yaml @@ -0,0 +1,90 @@ +version: "3" + +services: + mongosharded1: + hostname: mongosharded1 + container_name: mongosharded1 + image: mongo:${MONGO_TAG} + environment: + MONGO_INITDB_DATABASE: mqtt + networks: + - emqx_bridge + expose: + - 27017 + ports: + - 27014:27017 + restart: always + command: + --configsvr + --replSet cfg0 + --port 27017 + --ipv6 + --bind_ip_all + + mongosharded2: + hostname: mongosharded2 + container_name: mongosharded2 + image: mongo:${MONGO_TAG} + environment: + MONGO_INITDB_DATABASE: mqtt + networks: + - emqx_bridge + expose: + - 27017 + ports: + - 27015:27017 + restart: always + command: + --shardsvr + --replSet rs0 + --port 27017 + --ipv6 + --bind_ip_all + + mongosharded3: + hostname: mongosharded3 + container_name: mongosharded3 + image: mongo:${MONGO_TAG} + environment: + MONGO_INITDB_DATABASE: mqtt + networks: + - emqx_bridge + expose: + - 27017 + ports: + - 27016:27017 + restart: always + entrypoint: mongos + command: + --configdb cfg0/mongosharded1:27017 + --port 27017 + --ipv6 + --bind_ip_all + + mongosharded_client: + image: mongo:${MONGO_TAG} + container_name: mongosharded_client + networks: + - emqx_bridge + depends_on: + - mongosharded1 + - mongosharded2 + - mongosharded3 + command: + - /bin/bash + - -c + - | + while ! mongo --host mongosharded1 --eval 'db.runCommand("ping").ok' --quiet >/dev/null 2>&1 ; do + sleep 1 + done + mongo --host mongosharded1 --eval "rs.initiate( { _id : 'cfg0', configsvr: true, members: [ { _id : 0, host : 'mongosharded1:27017' } ] })" + while ! mongo --host mongosharded2 --eval 'db.runCommand("ping").ok' --quiet >/dev/null 2>&1 ; do + sleep 1 + done + mongo --host mongosharded2 --eval "rs.initiate( { _id : 'rs0', members: [ { _id : 0, host : 'mongosharded2:27017' } ] })" + mongo --host mongosharded2 --eval "rs.status()" + while ! mongo --host mongosharded3 --eval 'db.runCommand("ping").ok' --quiet >/dev/null 2>&1 ; do + sleep 1 + done + mongo --host mongosharded3 --eval "sh.addShard('rs0/mongosharded2:27017')" + mongo --host mongosharded3 --eval "sh.enableSharding('mqtt')" diff --git a/.ci/docker-compose-file/docker-compose-python.yaml b/.ci/docker-compose-file/docker-compose-python.yaml index 0b9af4517..14e798c6b 100644 --- a/.ci/docker-compose-file/docker-compose-python.yaml +++ b/.ci/docker-compose-file/docker-compose-python.yaml @@ -2,7 +2,7 @@ version: '3.9' services: python: - container_name: python + container_name: python image: python:3.7.2-alpine3.9 depends_on: - emqx1 diff --git a/.ci/docker-compose-file/docker-compose-toxiproxy.yaml b/.ci/docker-compose-file/docker-compose-toxiproxy.yaml new file mode 100644 index 000000000..66e7ec308 --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-toxiproxy.yaml @@ -0,0 +1,20 @@ +version: '3.9' + +services: + toxiproxy: + container_name: toxiproxy + image: ghcr.io/shopify/toxiproxy:2.5.0 + restart: always + networks: + - emqx_bridge + volumes: + - "./toxiproxy.json:/config/toxiproxy.json" + ports: + - 8474:8474 + - 8086:8086 + - 8087:8087 + - 13306:3306 + - 13307:3307 + command: + - "-host=0.0.0.0" + - "-config=/config/toxiproxy.json" diff --git a/.ci/docker-compose-file/docker-compose.yaml b/.ci/docker-compose-file/docker-compose.yaml index 3d99d3969..4a5ef7070 100644 --- a/.ci/docker-compose-file/docker-compose.yaml +++ b/.ci/docker-compose-file/docker-compose.yaml @@ -18,6 +18,9 @@ services: - emqx_bridge volumes: - ../..:/emqx + - emqx-shared-secret:/var/lib/secret + - ./kerberos/krb5.conf:/etc/kdc/krb5.conf + - ./kerberos/krb5.conf:/etc/krb5.conf working_dir: /emqx tty: true user: "${UID_GID}" @@ -34,3 +37,6 @@ networks: gateway: 172.100.239.1 - subnet: 2001:3200:3200::/64 gateway: 2001:3200:3200::1 + +volumes: # add this section + emqx-shared-secret: # does not need anything underneath this diff --git a/.ci/docker-compose-file/influxdb/setup-v1.sh b/.ci/docker-compose-file/influxdb/setup-v1.sh new file mode 100755 index 000000000..92baf9905 --- /dev/null +++ b/.ci/docker-compose-file/influxdb/setup-v1.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +set -e + +# influx v1 dbrp create \ +# --bucket-id ${DOCKER_INFLUXDB_INIT_BUCKET_ID} \ +# --db ${V1_DB_NAME} \ +# --rp ${V1_RP_NAME} \ +# --default \ +# --org ${DOCKER_INFLUXDB_INIT_ORG} + +influx v1 auth create \ + --username "${DOCKER_INFLUXDB_INIT_USERNAME}" \ + --password "${DOCKER_INFLUXDB_INIT_PASSWORD}" \ + --write-bucket "${DOCKER_INFLUXDB_INIT_BUCKET_ID}" \ + --org "${DOCKER_INFLUXDB_INIT_ORG}" diff --git a/.ci/docker-compose-file/kafka/generate-certs.sh b/.ci/docker-compose-file/kafka/generate-certs.sh new file mode 100755 index 000000000..3f1c75550 --- /dev/null +++ b/.ci/docker-compose-file/kafka/generate-certs.sh @@ -0,0 +1,46 @@ +#!/usr/bin/bash + +set -euo pipefail + +set -x + +# Source https://github.com/zmstone/docker-kafka/blob/master/generate-certs.sh + +HOST="*." +DAYS=3650 +PASS="password" + +cd /var/lib/secret/ + +# Delete old files +(rm ca.key ca.crt server.key server.csr server.crt client.key client.csr client.crt server.p12 kafka.keystore.jks kafka.truststore.jks 2>/dev/null || true) + +ls + +echo '== Generate self-signed server and client certificates' +echo '= generate CA' +openssl req -new -x509 -keyout ca.key -out ca.crt -days $DAYS -nodes -subj "/C=SE/ST=Stockholm/L=Stockholm/O=brod/OU=test/CN=$HOST" + +echo '= generate server certificate request' +openssl req -newkey rsa:2048 -sha256 -keyout server.key -out server.csr -days "$DAYS" -nodes -subj "/C=SE/ST=Stockholm/L=Stockholm/O=brod/OU=test/CN=$HOST" + +echo '= sign server certificate' +openssl x509 -req -CA ca.crt -CAkey ca.key -in server.csr -out server.crt -days "$DAYS" -CAcreateserial + +echo '= generate client certificate request' +openssl req -newkey rsa:2048 -sha256 -keyout client.key -out client.csr -days "$DAYS" -nodes -subj "/C=SE/ST=Stockholm/L=Stockholm/O=brod/OU=test/CN=$HOST" + +echo '== sign client certificate' +openssl x509 -req -CA ca.crt -CAkey ca.key -in client.csr -out client.crt -days $DAYS -CAserial ca.srl + +echo '= Convert self-signed certificate to PKCS#12 format' +openssl pkcs12 -export -name "$HOST" -in server.crt -inkey server.key -out server.p12 -CAfile ca.crt -passout pass:"$PASS" + +echo '= Import PKCS#12 into a java keystore' + +echo $PASS | keytool -importkeystore -destkeystore kafka.keystore.jks -srckeystore server.p12 -srcstoretype pkcs12 -alias "$HOST" -storepass "$PASS" + + +echo '= Import CA into java truststore' + +echo yes | keytool -keystore kafka.truststore.jks -alias CARoot -import -file ca.crt -storepass "$PASS" diff --git a/.ci/docker-compose-file/kafka/jaas.conf b/.ci/docker-compose-file/kafka/jaas.conf new file mode 100644 index 000000000..8ffe8457d --- /dev/null +++ b/.ci/docker-compose-file/kafka/jaas.conf @@ -0,0 +1,16 @@ +KafkaServer { + org.apache.kafka.common.security.plain.PlainLoginModule required + user_admin="password" + user_emqxuser="password"; + + org.apache.kafka.common.security.scram.ScramLoginModule required + username="admin" + password="password"; + + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + storeKey=true + keyTab="/var/lib/secret/kafka.keytab" + principal="kafka/kafka-1.emqx.net@KDC.EMQX.NET"; + +}; diff --git a/.ci/docker-compose-file/kafka/run_add_scram_users.sh b/.ci/docker-compose-file/kafka/run_add_scram_users.sh new file mode 100755 index 000000000..4b51fee0d --- /dev/null +++ b/.ci/docker-compose-file/kafka/run_add_scram_users.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +set -euo pipefail + + +TIMEOUT=60 + +echo "+++++++ Sleep for a while to make sure that old keytab and truststore is deleted ++++++++" + +sleep 5 + +echo "+++++++ Wait until Kerberos Keytab is created ++++++++" + +timeout $TIMEOUT bash -c 'until [ -f /var/lib/secret/kafka.keytab ]; do sleep 1; done' + + +echo "+++++++ Wait until SSL certs are generated ++++++++" + +timeout $TIMEOUT bash -c 'until [ -f /var/lib/secret/kafka.truststore.jks ]; do sleep 1; done' + +sleep 3 + +echo "+++++++ Starting Kafka ++++++++" + +start-kafka.sh & + +SERVER=localhost +PORT1=9092 +PORT2=9093 +TIMEOUT=60 + +echo "+++++++ Wait until Kafka ports are up ++++++++" + +# shellcheck disable=SC2016 +timeout $TIMEOUT bash -c 'until printf "" 2>>/dev/null >>/dev/tcp/$0/$1; do sleep 1; done' $SERVER $PORT1 + +# shellcheck disable=SC2016 +timeout $TIMEOUT bash -c 'until printf "" 2>>/dev/null >>/dev/tcp/$0/$1; do sleep 1; done' $SERVER $PORT2 + +echo "+++++++ Run config commands ++++++++" + +kafka-configs.sh --bootstrap-server localhost:9092 --alter --add-config 'SCRAM-SHA-256=[iterations=8192,password=password],SCRAM-SHA-512=[password=password]' --entity-type users --entity-name emqxuser + +echo "+++++++ Wait until Kafka ports are down ++++++++" + +bash -c 'while printf "" 2>>/dev/null >>/dev/tcp/$0/$1; do sleep 1; done' $SERVER $PORT1 + +echo "+++++++ Kafka ports are down ++++++++" + diff --git a/.ci/docker-compose-file/kerberos/krb5.conf b/.ci/docker-compose-file/kerberos/krb5.conf new file mode 100644 index 000000000..032236888 --- /dev/null +++ b/.ci/docker-compose-file/kerberos/krb5.conf @@ -0,0 +1,23 @@ +[libdefaults] + default_realm = KDC.EMQX.NET + ticket_lifetime = 24h + renew_lifetime = 7d + forwardable = true + rdns = false + dns_lookup_kdc = no + dns_lookup_realm = no + +[realms] + KDC.EMQX.NET = { + kdc = kdc + admin_server = kadmin + } + +[domain_realm] + kdc.emqx.net = KDC.EMQX.NET + .kdc.emqx.net = KDC.EMQX.NET + +[logging] + kdc = FILE:/var/log/kerberos/krb5kdc.log + admin_server = FILE:/var/log/kerberos/kadmin.log + default = FILE:/var/log/kerberos/krb5lib.log diff --git a/.ci/docker-compose-file/kerberos/run.sh b/.ci/docker-compose-file/kerberos/run.sh new file mode 100755 index 000000000..c9580073f --- /dev/null +++ b/.ci/docker-compose-file/kerberos/run.sh @@ -0,0 +1,25 @@ +#!/bin/sh + + +echo "Remove old keytabs" + +rm -f /var/lib/secret/kafka.keytab > /dev/null 2>&1 +rm -f /var/lib/secret/rig.keytab > /dev/null 2>&1 + +echo "Create realm" + +kdb5_util -P emqx -r KDC.EMQX.NET create -s + +echo "Add principals" + +kadmin.local -w password -q "add_principal -randkey kafka/kafka-1.emqx.net@KDC.EMQX.NET" +kadmin.local -w password -q "add_principal -randkey rig@KDC.EMQX.NET" > /dev/null + + +echo "Create keytabs" + +kadmin.local -w password -q "ktadd -k /var/lib/secret/kafka.keytab -norandkey kafka/kafka-1.emqx.net@KDC.EMQX.NET " > /dev/null +kadmin.local -w password -q "ktadd -k /var/lib/secret/rig.keytab -norandkey rig@KDC.EMQX.NET " > /dev/null + +echo STARTING KDC +/usr/sbin/krb5kdc -n diff --git a/.ci/docker-compose-file/toxiproxy.json b/.ci/docker-compose-file/toxiproxy.json new file mode 100644 index 000000000..2d3a30b6b --- /dev/null +++ b/.ci/docker-compose-file/toxiproxy.json @@ -0,0 +1,26 @@ +[ + { + "name": "influxdb_tcp", + "listen": "0.0.0.0:8086", + "upstream": "influxdb_tcp:8086", + "enabled": true + }, + { + "name": "influxdb_tls", + "listen": "0.0.0.0:8087", + "upstream": "influxdb_tls:8086", + "enabled": true + }, + { + "name": "mysql_tcp", + "listen": "0.0.0.0:3306", + "upstream": "mysql:3306", + "enabled": true + }, + { + "name": "mysql_tls", + "listen": "0.0.0.0:3307", + "upstream": "mysql-tls:3306", + "enabled": true + } +] diff --git a/.github/workflows/build_and_push_docker_images.yaml b/.github/workflows/build_and_push_docker_images.yaml index 3372fefc9..565fe5147 100644 --- a/.github/workflows/build_and_push_docker_images.yaml +++ b/.github/workflows/build_and_push_docker_images.yaml @@ -115,7 +115,9 @@ jobs: - 24.3.4.2-1 # update to latest elixir: - 1.13.4 # update to latest - + exclude: # TODO: publish enterprise to ecr too? + - registry: 'public.ecr.aws' + profile: emqx-enterprise steps: - uses: AutoModality/action-clean@v1 if: matrix.arch[1] == 'aws-arm64' @@ -261,6 +263,9 @@ jobs: registry: - 'docker.io' - 'public.ecr.aws' + exclude: + - registry: 'public.ecr.aws' + profile: emqx-enterprise steps: - uses: actions/download-artifact@v3 diff --git a/.github/workflows/build_packages.yaml b/.github/workflows/build_packages.yaml index 5149b5d46..8f09fd7ec 100644 --- a/.github/workflows/build_packages.yaml +++ b/.github/workflows/build_packages.yaml @@ -86,14 +86,13 @@ jobs: windows: runs-on: windows-2019 + if: startsWith(github.ref_name, 'v') needs: prepare strategy: fail-fast: false matrix: profile: # for now only CE for windows - emqx - otp: - - 24.2.1 steps: - uses: actions/download-artifact@v3 with: @@ -104,7 +103,7 @@ jobs: - uses: ilammy/msvc-dev-cmd@v1.12.0 - uses: erlef/setup-beam@v1 with: - otp-version: ${{ matrix.otp }} + otp-version: 24.2.1 - name: build env: PYTHON: python @@ -129,7 +128,7 @@ jobs: echo "EMQX uninstalled" - uses: actions/upload-artifact@v3 with: - name: ${{ matrix.profile }}-windows + name: ${{ matrix.profile }} path: source/_packages/${{ matrix.profile }}/ mac: @@ -167,7 +166,7 @@ jobs: apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }} - uses: actions/upload-artifact@v3 with: - name: ${{ matrix.profile }}-${{ matrix.otp }} + name: ${{ matrix.profile }} path: _packages/${{ matrix.profile }}/ linux: @@ -182,7 +181,7 @@ jobs: profile: - ${{ needs.prepare.outputs.BUILD_PROFILE }} otp: - - 24.3.4.2-1 # we test with OTP 23, but only build package on OTP 24 versions + - 24.3.4.2-1 elixir: - 1.13.4 # used to split elixir packages into a separate job, since the @@ -200,51 +199,31 @@ jobs: os: - ubuntu20.04 - ubuntu18.04 - - ubuntu16.04 - debian11 - debian10 - - debian9 - el8 - el7 - - raspbian10 build_machine: - aws-arm64 - ubuntu-20.04 exclude: - - arch: arm64 - build_machine: ubuntu-20.04 - - arch: amd64 - build_machine: aws-arm64 - - os: raspbian9 - arch: amd64 - - os: raspbian10 - arch: amd64 - - os: raspbian10 # we only have arm32 image - arch: arm64 - - os: raspbian9 - profile: emqx - - os: raspbian10 - profile: emqx - - os: raspbian9 - profile: emqx-enterprise - - os: raspbian10 - profile: emqx-enterprise - include: - - profile: emqx - otp: 24.3.4.2-1 - elixir: 1.13.4 - build_elixir: with_elixir - arch: amd64 - os: ubuntu20.04 + - arch: arm64 build_machine: ubuntu-20.04 - - profile: emqx - otp: 24.3.4.2-1 - elixir: 1.13.4 - build_elixir: with_elixir - arch: amd64 - os: el8 - build_machine: ubuntu-20.04 - + - arch: amd64 + build_machine: aws-arm64 + # elixir: only for opensource edition and only on ubuntu20.04 and el8 on amd64 + - build_elixir: with_elixir + profile: emqx-enterprise + - build_elixir: with_elixir + arch: arm64 + - build_elixir: with_elixir + os: ubuntu18.04 + - build_elixir: with_elixir + os: debian10 + - build_elixir: with_elixir + os: debian11 + - build_elixir: with_elixir + os: el7 defaults: run: shell: bash @@ -293,7 +272,7 @@ jobs: done - uses: actions/upload-artifact@v3 with: - name: ${{ matrix.profile }}-${{ matrix.otp }} + name: ${{ matrix.profile }} path: source/_packages/${{ matrix.profile }}/ publish_artifacts: @@ -305,15 +284,10 @@ jobs: matrix: profile: - ${{ needs.prepare.outputs.BUILD_PROFILE }} - otp: - - 24.3.4.2-1 - include: - - profile: emqx - otp: windows # otp version on windows is rather fixed steps: - uses: actions/download-artifact@v3 with: - name: ${{ matrix.profile }}-${{ matrix.otp }} + name: ${{ matrix.profile }} path: packages/${{ matrix.profile }} - name: install dos2unix run: sudo apt-get update && sudo apt install -y dos2unix diff --git a/.github/workflows/elixir_release.yml b/.github/workflows/elixir_release.yml index cef1095a2..b93e6a675 100644 --- a/.github/workflows/elixir_release.yml +++ b/.github/workflows/elixir_release.yml @@ -12,8 +12,12 @@ on: jobs: elixir_release_build: runs-on: ubuntu-latest + strategy: + matrix: + profile: + - emqx + - emqx-enterprise container: ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04 - steps: - name: Checkout uses: actions/checkout@v3 @@ -23,15 +27,15 @@ jobs: run: | git config --global --add safe.directory "$GITHUB_WORKSPACE" - name: elixir release - run: make emqx-elixir + run: make ${{ matrix.profile }}-elixir - name: start release run: | - cd _build/emqx/rel/emqx + cd _build/${{ matrix.profile }}/rel/emqx bin/emqx start - name: check if started run: | sleep 10 nc -zv localhost 1883 - cd _build/emqx/rel/emqx + cd _build/${{ matrix.profile }}/rel/emqx bin/emqx ping bin/emqx ctl status diff --git a/.github/workflows/run_test_cases.yaml b/.github/workflows/run_test_cases.yaml index f35a7156e..896617d15 100644 --- a/.github/workflows/run_test_cases.yaml +++ b/.github/workflows/run_test_cases.yaml @@ -15,41 +15,74 @@ on: jobs: prepare: - runs-on: ubuntu-20.04 + runs-on: aws-amd64 # prepare source with any OTP version, no need for a matrix container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04" outputs: - fast_ct_apps: ${{ steps.run_find_apps.outputs.fast_ct_apps }} - docker_ct_apps: ${{ steps.run_find_apps.outputs.docker_ct_apps }} + fast_ct_apps: ${{ steps.find_ct_apps.outputs.fast_ct_apps }} + docker_ct_apps: ${{ steps.find_ct_apps.outputs.docker_ct_apps }} steps: + - uses: AutoModality/action-clean@v1 - uses: actions/checkout@v3 with: path: source - fetch-depth: 0 - - name: find_ct_apps + - name: Find CT Apps working-directory: source - id: run_find_apps + id: find_ct_apps run: | - fast_ct_apps="$(./scripts/find-apps.sh --ct fast --json)" - docker_ct_apps="$(./scripts/find-apps.sh --ct docker --json)" - echo "fast-ct-apps: $fast_ct_apps" - echo "docer-ct-apps: $docker_ct_apps" + fast_ct_apps="$(./scripts/find-apps.sh --ci fast)" + docker_ct_apps="$(./scripts/find-apps.sh --ci docker)" + echo "fast: $fast_ct_apps" + echo "docker: $docker_ct_apps" echo "::set-output name=fast_ct_apps::$fast_ct_apps" echo "::set-output name=docker_ct_apps::$docker_ct_apps" - name: get_all_deps working-directory: source + env: + PROFILE: emqx + #DIAGNOSTIC: 1 run: | - make deps-all - ./rebar3 as test compile + make ensure-rebar3 + # fetch all deps and compile + make emqx + make test-compile cd .. zip -ryq source.zip source/* source/.[^.]* - uses: actions/upload-artifact@v3 with: - name: source + name: source-emqx + path: source.zip + + prepare_ee: + runs-on: aws-amd64 + # prepare source with any OTP version, no need for a matrix + container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04" + steps: + - uses: AutoModality/action-clean@v1 + - uses: actions/checkout@v3 + with: + path: source + - name: get_all_deps + working-directory: source + env: + PROFILE: emqx-enterprise + #DIAGNOSTIC: 1 + run: | + make ensure-rebar3 + # fetch all deps and compile + make emqx-enterprise + make test-compile + cd .. + zip -ryq source.zip source/* source/.[^.]* + - uses: actions/upload-artifact@v3 + with: + name: source-emqx-enterprise path: source.zip eunit_and_proper: - needs: prepare + needs: + - prepare + - prepare_ee runs-on: aws-amd64 strategy: fail-fast: false @@ -66,7 +99,7 @@ jobs: - uses: AutoModality/action-clean@v1 - uses: actions/download-artifact@v3 with: - name: source + name: source-${{ matrix.profile }} path: . - name: unzip source code env: @@ -92,11 +125,13 @@ jobs: path: source/_build/test/cover ct_docker: - needs: prepare + needs: + - prepare + - prepare_ee strategy: fail-fast: false matrix: - app_name: ${{ fromJson(needs.prepare.outputs.docker_ct_apps) }} + app: ${{ fromJson(needs.prepare.outputs.docker_ct_apps) }} runs-on: aws-amd64 defaults: @@ -107,20 +142,24 @@ jobs: - uses: AutoModality/action-clean@v1 - uses: actions/download-artifact@v3 with: - name: source + name: source-${{ matrix.app[1] }} path: . - name: unzip source code run: unzip -q source.zip - - name: docker compose up + - name: run tests working-directory: source env: MONGO_TAG: 5 MYSQL_TAG: 8 PGSQL_TAG: 13 REDIS_TAG: 6 + INFLUXDB_TAG: 2.5.0 + WHICH_APP: ${{ matrix.app[0] }} + PROFILE: ${{ matrix.app[1] }} run: | + echo $PROFILE rm _build/default/lib/rocksdb/_build/cmake/CMakeCache.txt - ./scripts/ct/run.sh --app ${{ matrix.app_name }} + ./scripts/ct/run.sh --app $WHICH_APP - uses: actions/upload-artifact@v3 with: name: coverdata @@ -128,19 +167,17 @@ jobs: - uses: actions/upload-artifact@v3 if: failure() with: - name: logs-${{ matrix.profile }} + name: logs-${{ matrix.app[0] }}-${{ matrix.app[1] }} path: source/_build/test/logs ct: - needs: prepare + needs: + - prepare + - prepare_ee strategy: fail-fast: false matrix: - app_name: ${{ fromJson(needs.prepare.outputs.fast_ct_apps) }} - profile: - - emqx - - emqx-enterprise - + app: ${{ fromJson(needs.prepare.outputs.fast_ct_apps) }} runs-on: aws-amd64 container: "ghcr.io/emqx/emqx-builder/5.0-18:1.13.4-24.3.4.2-1-ubuntu20.04" defaults: @@ -151,37 +188,19 @@ jobs: - uses: AutoModality/action-clean@v1 - uses: actions/download-artifact@v3 with: - name: source + name: source-${{ matrix.app[1] }} path: . - name: unzip source code run: unzip -q source.zip - # produces .coverdata + # produces $PROFILE-.coverdata - name: run common test working-directory: source env: - PROFILE: ${{ matrix.profile }} - WHICH_APP: ${{ matrix.app_name }} + WHICH_APP: ${{ matrix.app[0] }} + PROFILE: ${{ matrix.app[1] }} run: | - if [ "$PROFILE" = 'emqx-enterprise' ]; then - COMPILE_FLAGS="$(grep -R "EMQX_RELEASE_EDITION" "$WHICH_APP" | wc -l || true)" - if [ "$COMPILE_FLAGS" -gt 0 ]; then - # need to clean first because the default profile was - make clean - make "${WHICH_APP}-ct" - else - echo "skip_common_test_run_for_app ${WHICH_APP}-ct" - fi - else - case "$WHICH_APP" in - lib-ee/*) - echo "skip_opensource_edition_test_for_lib-ee" - ;; - *) - make "${WHICH_APP}-ct" - ;; - esac - fi + make "${WHICH_APP}-ct" - uses: actions/upload-artifact@v3 with: name: coverdata @@ -190,7 +209,7 @@ jobs: - uses: actions/upload-artifact@v3 if: failure() with: - name: logs-${{ matrix.profile }} + name: logs-${{ matrix.app[0] }}-${{ matrix.app[1] }} path: source/_build/test/logs make_cover: @@ -204,7 +223,7 @@ jobs: - uses: AutoModality/action-clean@v1 - uses: actions/download-artifact@v3 with: - name: source + name: source-emqx-enterprise path: . - name: unzip source code run: unzip -q source.zip @@ -217,12 +236,15 @@ jobs: - name: make cover working-directory: source + env: + PROFILE: emqx-enterprise run: make cover - name: send to coveralls working-directory: source env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PROFILE: emqx-enterprise run: make coveralls - name: get coveralls logs @@ -242,17 +264,3 @@ jobs: curl -v -k https://coveralls.io/webhook \ --header "Content-Type: application/json" \ --data "{\"repo_name\":\"$GITHUB_REPOSITORY\",\"repo_token\":\"$GITHUB_TOKEN\",\"payload\":{\"build_num\":$GITHUB_RUN_ID,\"status\":\"done\"}}" || true - - allgood_functional_tests: - runs-on: ubuntu-20.04 - needs: - - eunit_and_proper - - ct_docker - - ct - steps: - - name: Check if all functional tests succeeded - uses: re-actors/alls-green@release/v1 - with: - #allowed-failures: - #allowed-skips: - jobs: ${{ toJSON(needs) }} diff --git a/Makefile b/Makefile index 02bcbf458..f9310e636 100644 --- a/Makefile +++ b/Makefile @@ -6,8 +6,8 @@ export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-d export EMQX_DEFAULT_RUNNER = debian:11-slim export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh) export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh) -export EMQX_DASHBOARD_VERSION ?= v1.1.2 -export EMQX_EE_DASHBOARD_VERSION ?= e1.0.0 +export EMQX_DASHBOARD_VERSION ?= v1.1.3-sync-code +export EMQX_EE_DASHBOARD_VERSION ?= e1.0.1-beta.5 export EMQX_REL_FORM ?= tgz export QUICER_DOWNLOAD_FROM_RELEASE = 1 ifeq ($(OS),Windows_NT) @@ -61,15 +61,19 @@ mix-deps-get: $(ELIXIR_COMMON_DEPS) @mix deps.get .PHONY: eunit -eunit: $(REBAR) conf-segs +eunit: $(REBAR) merge-config @ENABLE_COVER_COMPILE=1 $(REBAR) eunit -v -c --cover_export_name $(PROFILE)-eunit .PHONY: proper proper: $(REBAR) @ENABLE_COVER_COMPILE=1 $(REBAR) proper -d test/props -c +.PHONY: test-compile +test-compile: $(REBAR) merge-config + $(REBAR) as test compile + .PHONY: ct -ct: $(REBAR) conf-segs +ct: $(REBAR) merge-config @ENABLE_COVER_COMPILE=1 $(REBAR) ct --name $(CT_NODE_NAME) -c -v --cover_export_name $(PROFILE)-ct .PHONY: static_checks @@ -97,7 +101,11 @@ $(foreach app,$(APPS),$(eval $(call gen-app-prop-target,$(app)))) .PHONY: ct-suite ct-suite: $(REBAR) ifneq ($(TESTCASE),) +ifneq ($(GROUP),) + $(REBAR) ct -v --readable=$(CT_READABLE) --name $(CT_NODE_NAME) --suite $(SUITE) --case $(TESTCASE) --group $(GROUP) +else $(REBAR) ct -v --readable=$(CT_READABLE) --name $(CT_NODE_NAME) --suite $(SUITE) --case $(TESTCASE) +endif else ifneq ($(GROUP),) $(REBAR) ct -v --readable=$(CT_READABLE) --name $(CT_NODE_NAME) --suite $(SUITE) --group $(GROUP) else @@ -114,8 +122,6 @@ coveralls: $(REBAR) COMMON_DEPS := $(REBAR) -ELIXIR_COMMON_DEPS := ensure-hex ensure-mix-rebar3 ensure-mix-rebar - .PHONY: $(REL_PROFILES) $(REL_PROFILES:%=%): $(COMMON_DEPS) @$(BUILD) $(@) rel @@ -218,19 +224,19 @@ ALL_DOCKERS = $(REL_PROFILES) $(REL_PROFILES:%=%-elixir) $(foreach zt,$(ALL_DOCKERS),$(eval $(call gen-docker-target,$(zt)))) .PHONY: -conf-segs: +merge-config: @$(SCRIPTS)/merge-config.escript @$(SCRIPTS)/merge-i18n.escript ## elixir target is to create release packages using Elixir's Mix .PHONY: $(REL_PROFILES:%=%-elixir) $(PKG_PROFILES:%=%-elixir) -$(REL_PROFILES:%=%-elixir) $(PKG_PROFILES:%=%-elixir): $(COMMON_DEPS) $(ELIXIR_COMMON_DEPS) mix-deps-get +$(REL_PROFILES:%=%-elixir) $(PKG_PROFILES:%=%-elixir): $(COMMON_DEPS) @env IS_ELIXIR=yes $(BUILD) $(subst -elixir,,$(@)) elixir .PHONY: $(REL_PROFILES:%=%-elixir-pkg) define gen-elixir-pkg-target # the Elixir places the tar in a different path than Rebar3 -$1-elixir-pkg: $(COMMON_DEPS) $(ELIXIR_COMMON_DEPS) mix-deps-get +$1-elixir-pkg: $(COMMON_DEPS) @env TAR_PKG_DIR=_build/$1-pkg \ IS_ELIXIR=yes \ $(BUILD) $1-pkg pkg @@ -239,7 +245,7 @@ $(foreach pt,$(REL_PROFILES),$(eval $(call gen-elixir-pkg-target,$(pt)))) .PHONY: $(REL_PROFILES:%=%-elixir-tgz) define gen-elixir-tgz-target -$1-elixir-tgz: $(COMMON_DEPS) $(ELIXIR_COMMON_DEPS) mix-deps-get +$1-elixir-tgz: $(COMMON_DEPS) @env IS_ELIXIR=yes $(BUILD) $1 tgz endef ALL_ELIXIR_TGZS = $(REL_PROFILES) diff --git a/apps/emqx/include/emqx_release.hrl b/apps/emqx/include/emqx_release.hrl index 1dfb1bd61..f17a8d3f2 100644 --- a/apps/emqx/include/emqx_release.hrl +++ b/apps/emqx/include/emqx_release.hrl @@ -35,7 +35,7 @@ -define(EMQX_RELEASE_CE, "5.0.11"). %% Enterprise edition --define(EMQX_RELEASE_EE, "5.0.0-alpha.1"). +-define(EMQX_RELEASE_EE, "5.0.0-beta.5"). %% the HTTP API version -define(EMQX_API_VERSION, "5.0"). diff --git a/apps/emqx/rebar.config b/apps/emqx/rebar.config index 156ca5c21..d13fda30a 100644 --- a/apps/emqx/rebar.config +++ b/apps/emqx/rebar.config @@ -22,14 +22,14 @@ %% This rebar.config is necessary because the app may be used as a %% `git_subdir` dependency in other projects. {deps, [ - {lc, {git, "https://github.com/emqx/lc.git", {tag, "0.3.1"}}}, + {lc, {git, "https://github.com/emqx/lc.git", {tag, "0.3.2"}}}, {gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}}, {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}}, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}}, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.4"}}}, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.6"}}}, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}, - {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.30.0"}}}, + {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.31.2"}}}, {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}}, {recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}}, {snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.0"}}} @@ -43,7 +43,7 @@ {meck, "0.9.2"}, {proper, "1.4.0"}, {bbmustache, "1.10.0"}, - {emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.6.0"}}} + {emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.7.0-rc.1"}}} ]}, {extra_src_dirs, [{"test", [recursive]}]} ]} diff --git a/apps/emqx/src/emqx_map_lib.erl b/apps/emqx/src/emqx_map_lib.erl index b01391c7b..6484d4269 100644 --- a/apps/emqx/src/emqx_map_lib.erl +++ b/apps/emqx/src/emqx_map_lib.erl @@ -133,7 +133,7 @@ deep_merge(BaseMap, NewMap) -> ), maps:merge(MergedBase, maps:with(NewKeys, NewMap)). --spec deep_convert(map(), convert_fun(), Args :: list()) -> map(). +-spec deep_convert(any(), convert_fun(), Args :: list()) -> any(). deep_convert(Map, ConvFun, Args) when is_map(Map) -> maps:fold( fun(K, V, Acc) -> diff --git a/apps/emqx/src/emqx_metrics_worker.erl b/apps/emqx/src/emqx_metrics_worker.erl index 21e73ff51..ab6a0b1a6 100644 --- a/apps/emqx/src/emqx_metrics_worker.erl +++ b/apps/emqx/src/emqx_metrics_worker.erl @@ -173,7 +173,7 @@ get_metrics(Name, Id) -> inc(Name, Id, Metric) -> inc(Name, Id, Metric, 1). --spec inc(handler_name(), metric_id(), atom(), pos_integer()) -> ok. +-spec inc(handler_name(), metric_id(), atom(), integer()) -> ok. inc(Name, Id, Metric, Val) -> counters:add(get_ref(Name, Id), idx_metric(Name, Id, Metric), Val). diff --git a/apps/emqx/src/emqx_release.erl b/apps/emqx/src/emqx_release.erl index 62dcd89dc..f6a3db5d0 100644 --- a/apps/emqx/src/emqx_release.erl +++ b/apps/emqx/src/emqx_release.erl @@ -18,6 +18,7 @@ -export([ edition/0, + edition_longstr/0, description/0, version/0 ]). @@ -44,8 +45,12 @@ description() -> -spec edition() -> ce | ee. -ifdef(EMQX_RELEASE_EDITION). edition() -> ?EMQX_RELEASE_EDITION. + +edition_longstr() -> <<"Enterprise">>. -else. edition() -> ce. + +edition_longstr() -> <<"Opensource">>. -endif. %% @doc Return the release version. diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl index eb5238cfe..f2358aa32 100644 --- a/apps/emqx/src/emqx_schema.erl +++ b/apps/emqx/src/emqx_schema.erl @@ -1908,6 +1908,7 @@ common_ssl_opts_schema(Defaults) -> sensitive => true, required => false, example => <<"">>, + format => <<"password">>, desc => ?DESC(common_ssl_opts_schema_password) } )}, diff --git a/apps/emqx/test/emqx_common_test_helpers.erl b/apps/emqx/test/emqx_common_test_helpers.erl index 2d51f6f14..87d9c1368 100644 --- a/apps/emqx/test/emqx_common_test_helpers.erl +++ b/apps/emqx/test/emqx_common_test_helpers.erl @@ -16,7 +16,6 @@ -module(emqx_common_test_helpers). --define(THIS_APP, ?MODULE). -include_lib("common_test/include/ct.hrl"). -type special_config_handler() :: fun(). @@ -28,13 +27,14 @@ boot_modules/1, start_apps/1, start_apps/2, - start_app/4, stop_apps/1, reload/2, app_path/2, + proj_root/0, deps_path/2, flush/0, - flush/1 + flush/1, + render_and_load_app_config/1 ]). -export([ @@ -64,6 +64,15 @@ stop_slave/1 ]). +-export([clear_screen/0]). +-export([with_mock/4]). + +%% Toxiproxy API +-export([ + with_failure/5, + reset_proxy/2 +]). + -define(CERTS_PATH(CertName), filename:join(["etc", "certs", CertName])). -define(MQTT_SSL_TWOWAY, [ @@ -155,13 +164,13 @@ start_apps(Apps) -> start_apps(Apps, fun(_) -> ok end). -spec start_apps(Apps :: apps(), Handler :: special_config_handler()) -> ok. -start_apps(Apps, Handler) when is_function(Handler) -> +start_apps(Apps, SpecAppConfig) when is_function(SpecAppConfig) -> %% Load all application code to beam vm first %% Because, minirest, ekka etc.. application will scan these modules lists:foreach(fun load/1, [emqx | Apps]), ok = start_ekka(), ok = emqx_ratelimiter_SUITE:load_conf(), - lists:foreach(fun(App) -> start_app(App, Handler) end, [emqx | Apps]). + lists:foreach(fun(App) -> start_app(App, SpecAppConfig) end, [emqx | Apps]). load(App) -> case application:load(App) of @@ -170,13 +179,36 @@ load(App) -> {error, Reason} -> error({failed_to_load_app, App, Reason}) end. -start_app(App, Handler) -> - start_app( - App, - app_schema(App), - app_path(App, filename:join(["etc", app_conf_file(App)])), - Handler - ). +render_and_load_app_config(App) -> + load(App), + Schema = app_schema(App), + Conf = app_path(App, filename:join(["etc", app_conf_file(App)])), + try + do_render_app_config(App, Schema, Conf) + catch + throw:E:St -> + %% turn throw into error + error({Conf, E, St}) + end. + +do_render_app_config(App, Schema, ConfigFile) -> + Vars = mustache_vars(App), + RenderedConfigFile = render_config_file(ConfigFile, Vars), + read_schema_configs(Schema, RenderedConfigFile), + force_set_config_file_paths(App, [RenderedConfigFile]), + copy_certs(App, RenderedConfigFile), + ok. + +start_app(App, SpecAppConfig) -> + render_and_load_app_config(App), + SpecAppConfig(App), + case application:ensure_all_started(App) of + {ok, _} -> + ok = ensure_dashboard_listeners_started(App), + ok; + {error, Reason} -> + error({failed_to_start_app, App, Reason}) + end. app_conf_file(emqx_conf) -> "emqx.conf.all"; app_conf_file(App) -> atom_to_list(App) ++ ".conf". @@ -198,21 +230,6 @@ mustache_vars(App) -> {platform_log_dir, app_path(App, "log")} ]. -start_app(App, Schema, ConfigFile, SpecAppConfig) -> - Vars = mustache_vars(App), - RenderedConfigFile = render_config_file(ConfigFile, Vars), - read_schema_configs(Schema, RenderedConfigFile), - force_set_config_file_paths(App, [RenderedConfigFile]), - copy_certs(App, RenderedConfigFile), - SpecAppConfig(App), - case application:ensure_all_started(App) of - {ok, _} -> - ok = ensure_dashboard_listeners_started(App), - ok; - {error, Reason} -> - error({failed_to_start_app, App, Reason}) - end. - render_config_file(ConfigFile, Vars0) -> Temp = case file:read_file(ConfigFile) of @@ -245,47 +262,21 @@ stop_apps(Apps) -> [application:stop(App) || App <- Apps ++ [emqx, ekka, mria, mnesia]], ok. +proj_root() -> + filename:join( + lists:takewhile( + fun(X) -> iolist_to_binary(X) =/= <<"_build">> end, + filename:split(app_path(emqx, ".")) + ) + ). + %% backward compatible deps_path(App, RelativePath) -> app_path(App, RelativePath). app_path(App, RelativePath) -> - ok = ensure_app_loaded(App), Lib = code:lib_dir(App), safe_relative_path(filename:join([Lib, RelativePath])). -assert_app_loaded(App) -> - case code:lib_dir(App) of - {error, bad_name} -> error({not_loaded, ?THIS_APP}); - _ -> ok - end. - -ensure_app_loaded(?THIS_APP) -> - ok = assert_app_loaded(?THIS_APP); -ensure_app_loaded(App) -> - case code:lib_dir(App) of - {error, bad_name} -> - ok = assert_app_loaded(?THIS_APP), - Dir0 = code:lib_dir(?THIS_APP), - LibRoot = upper_level(Dir0), - Dir = filename:join([LibRoot, atom_to_list(App), "ebin"]), - case code:add_pathz(Dir) of - true -> ok; - {error, bad_directory} -> error({bad_directory, Dir}) - end, - case application:load(App) of - ok -> ok; - {error, Reason} -> error({failed_to_load, App, Reason}) - end, - ok = assert_app_loaded(App); - _ -> - ok - end. - -upper_level(Dir) -> - Split = filename:split(Dir), - UpperReverse = tl(lists:reverse(Split)), - filename:join(lists:reverse(UpperReverse)). - safe_relative_path(Path) -> case filename:split(Path) of ["/" | T] -> @@ -793,3 +784,139 @@ expand_node_specs(Specs, CommonOpts) -> end, Specs ). + +%% is useful when iterating on the tests in a loop, to get rid of all +%% the garbaged printed before the test itself beings. +clear_screen() -> + io:format(standard_io, "\033[H\033[2J", []), + io:format(standard_error, "\033[H\033[2J", []), + io:format(standard_io, "\033[H\033[3J", []), + io:format(standard_error, "\033[H\033[3J", []), + ok. + +with_mock(Mod, FnName, MockedFn, Fun) -> + ok = meck:new(Mod, [non_strict, no_link, no_history, passthrough]), + ok = meck:expect(Mod, FnName, MockedFn), + try + Fun() + after + ok = meck:unload(Mod) + end. + +%%------------------------------------------------------------------------------- +%% Toxiproxy utils +%%------------------------------------------------------------------------------- + +reset_proxy(ProxyHost, ProxyPort) -> + Url = "http://" ++ ProxyHost ++ ":" ++ integer_to_list(ProxyPort) ++ "/reset", + Body = <<>>, + {ok, {{_, 204, _}, _, _}} = httpc:request( + post, + {Url, [], "application/json", Body}, + [], + [{body_format, binary}] + ). + +with_failure(FailureType, Name, ProxyHost, ProxyPort, Fun) -> + enable_failure(FailureType, Name, ProxyHost, ProxyPort), + try + Fun() + after + heal_failure(FailureType, Name, ProxyHost, ProxyPort) + end. + +enable_failure(FailureType, Name, ProxyHost, ProxyPort) -> + case FailureType of + down -> switch_proxy(off, Name, ProxyHost, ProxyPort); + timeout -> timeout_proxy(on, Name, ProxyHost, ProxyPort); + latency_up -> latency_up_proxy(on, Name, ProxyHost, ProxyPort) + end. + +heal_failure(FailureType, Name, ProxyHost, ProxyPort) -> + case FailureType of + down -> switch_proxy(on, Name, ProxyHost, ProxyPort); + timeout -> timeout_proxy(off, Name, ProxyHost, ProxyPort); + latency_up -> latency_up_proxy(off, Name, ProxyHost, ProxyPort) + end. + +switch_proxy(Switch, Name, ProxyHost, ProxyPort) -> + Url = "http://" ++ ProxyHost ++ ":" ++ integer_to_list(ProxyPort) ++ "/proxies/" ++ Name, + Body = + case Switch of + off -> #{<<"enabled">> => false}; + on -> #{<<"enabled">> => true} + end, + BodyBin = emqx_json:encode(Body), + {ok, {{_, 200, _}, _, _}} = httpc:request( + post, + {Url, [], "application/json", BodyBin}, + [], + [{body_format, binary}] + ). + +timeout_proxy(on, Name, ProxyHost, ProxyPort) -> + Url = + "http://" ++ ProxyHost ++ ":" ++ integer_to_list(ProxyPort) ++ "/proxies/" ++ Name ++ + "/toxics", + NameBin = list_to_binary(Name), + Body = #{ + <<"name">> => <>, + <<"type">> => <<"timeout">>, + <<"stream">> => <<"upstream">>, + <<"toxicity">> => 1.0, + <<"attributes">> => #{<<"timeout">> => 0} + }, + BodyBin = emqx_json:encode(Body), + {ok, {{_, 200, _}, _, _}} = httpc:request( + post, + {Url, [], "application/json", BodyBin}, + [], + [{body_format, binary}] + ); +timeout_proxy(off, Name, ProxyHost, ProxyPort) -> + ToxicName = Name ++ "_timeout", + Url = + "http://" ++ ProxyHost ++ ":" ++ integer_to_list(ProxyPort) ++ "/proxies/" ++ Name ++ + "/toxics/" ++ ToxicName, + Body = <<>>, + {ok, {{_, 204, _}, _, _}} = httpc:request( + delete, + {Url, [], "application/json", Body}, + [], + [{body_format, binary}] + ). + +latency_up_proxy(on, Name, ProxyHost, ProxyPort) -> + Url = + "http://" ++ ProxyHost ++ ":" ++ integer_to_list(ProxyPort) ++ "/proxies/" ++ Name ++ + "/toxics", + NameBin = list_to_binary(Name), + Body = #{ + <<"name">> => <>, + <<"type">> => <<"latency">>, + <<"stream">> => <<"upstream">>, + <<"toxicity">> => 1.0, + <<"attributes">> => #{ + <<"latency">> => 20_000, + <<"jitter">> => 3_000 + } + }, + BodyBin = emqx_json:encode(Body), + {ok, {{_, 200, _}, _, _}} = httpc:request( + post, + {Url, [], "application/json", BodyBin}, + [], + [{body_format, binary}] + ); +latency_up_proxy(off, Name, ProxyHost, ProxyPort) -> + ToxicName = Name ++ "_latency_up", + Url = + "http://" ++ ProxyHost ++ ":" ++ integer_to_list(ProxyPort) ++ "/proxies/" ++ Name ++ + "/toxics/" ++ ToxicName, + Body = <<>>, + {ok, {{_, 204, _}, _, _}} = httpc:request( + delete, + {Url, [], "application/json", Body}, + [], + [{body_format, binary}] + ). diff --git a/apps/emqx/test/emqx_mqtt_SUITE.erl b/apps/emqx/test/emqx_mqtt_SUITE.erl index f43804991..7032e553c 100644 --- a/apps/emqx/test/emqx_mqtt_SUITE.erl +++ b/apps/emqx/test/emqx_mqtt_SUITE.erl @@ -115,7 +115,7 @@ message_expiry_interval_init() -> message_expiry_interval_exipred(CPublish, CControl, QoS) -> ct:pal("~p ~p", [?FUNCTION_NAME, QoS]), %% publish to t/a and waiting for the message expired - emqtt:publish( + _ = emqtt:publish( CPublish, <<"t/a">>, #{'Message-Expiry-Interval' => 1}, @@ -152,7 +152,7 @@ message_expiry_interval_exipred(CPublish, CControl, QoS) -> message_expiry_interval_not_exipred(CPublish, CControl, QoS) -> ct:pal("~p ~p", [?FUNCTION_NAME, QoS]), %% publish to t/a - emqtt:publish( + _ = emqtt:publish( CPublish, <<"t/a">>, #{'Message-Expiry-Interval' => 20}, diff --git a/apps/emqx/test/emqx_mqtt_protocol_v5_SUITE.erl b/apps/emqx/test/emqx_mqtt_protocol_v5_SUITE.erl index 88ccda452..cb71cef95 100644 --- a/apps/emqx/test/emqx_mqtt_protocol_v5_SUITE.erl +++ b/apps/emqx/test/emqx_mqtt_protocol_v5_SUITE.erl @@ -529,8 +529,11 @@ t_connack_max_qos_allowed(Config) -> %% [MQTT-3.2.2-10] {ok, _, [2]} = emqtt:subscribe(Client1, Topic, 2), - {ok, _} = emqtt:publish(Client1, Topic, <<"Unsupported Qos 1">>, qos1), %% [MQTT-3.2.2-11] + ?assertMatch( + {error, {disconnected, 155, _}}, + emqtt:publish(Client1, Topic, <<"Unsupported Qos 1">>, qos1) + ), ?assertEqual(155, receive_disconnect_reasoncode()), waiting_client_process_exit(Client1), @@ -563,8 +566,11 @@ t_connack_max_qos_allowed(Config) -> %% [MQTT-3.2.2-10] {ok, _, [2]} = emqtt:subscribe(Client3, Topic, 2), - {ok, _} = emqtt:publish(Client3, Topic, <<"Unsupported Qos 2">>, qos2), %% [MQTT-3.2.2-11] + ?assertMatch( + {error, {disconnected, 155, _}}, + emqtt:publish(Client3, Topic, <<"Unsupported Qos 2">>, qos2) + ), ?assertEqual(155, receive_disconnect_reasoncode()), waiting_client_process_exit(Client3), diff --git a/apps/emqx_authn/src/emqx_authn.app.src b/apps/emqx_authn/src/emqx_authn.app.src index 5c27c4b3b..7f0305871 100644 --- a/apps/emqx_authn/src/emqx_authn.app.src +++ b/apps/emqx_authn/src/emqx_authn.app.src @@ -4,7 +4,7 @@ {vsn, "0.1.10"}, {modules, []}, {registered, [emqx_authn_sup, emqx_authn_registry]}, - {applications, [kernel, stdlib, emqx_resource, ehttpc, epgsql, mysql, jose]}, + {applications, [kernel, stdlib, emqx_resource, emqx_connector, ehttpc, epgsql, mysql, jose]}, {mod, {emqx_authn_app, []}}, {env, []}, {licenses, ["Apache-2.0"]}, diff --git a/apps/emqx_authn/src/emqx_authn_utils.erl b/apps/emqx_authn/src/emqx_authn_utils.erl index 099da0077..d920d2719 100644 --- a/apps/emqx_authn/src/emqx_authn_utils.erl +++ b/apps/emqx_authn/src/emqx_authn_utils.erl @@ -47,7 +47,6 @@ ]). -define(DEFAULT_RESOURCE_OPTS, #{ - auto_retry_interval => 6000, start_after_created => false }). diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_jwks_connector.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_jwks_connector.erl index 8f98e2f1e..480950143 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_jwks_connector.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_jwks_connector.erl @@ -22,15 +22,18 @@ %% callbacks of behaviour emqx_resource -export([ + callback_mode/0, on_start/2, on_stop/2, - on_query/4, + on_query/3, on_get_status/2, connect/1 ]). -define(DEFAULT_POOL_SIZE, 8). +callback_mode() -> always_sync. + on_start(InstId, Opts) -> PoolName = emqx_plugin_libs_pool:pool_name(InstId), PoolOpts = [ @@ -45,7 +48,7 @@ on_start(InstId, Opts) -> on_stop(_InstId, #{pool_name := PoolName}) -> emqx_plugin_libs_pool:stop_pool(PoolName). -on_query(InstId, get_jwks, AfterQuery, #{pool_name := PoolName}) -> +on_query(InstId, get_jwks, #{pool_name := PoolName}) -> Result = ecpool:pick_and_do(PoolName, {emqx_authn_jwks_client, get_jwks, []}, no_handover), case Result of {error, Reason} -> @@ -54,20 +57,18 @@ on_query(InstId, get_jwks, AfterQuery, #{pool_name := PoolName}) -> connector => InstId, command => get_jwks, reason => Reason - }), - emqx_resource:query_failed(AfterQuery); + }); _ -> - emqx_resource:query_success(AfterQuery) + ok end, Result; -on_query(_InstId, {update, Opts}, AfterQuery, #{pool_name := PoolName}) -> +on_query(_InstId, {update, Opts}, #{pool_name := PoolName}) -> lists:foreach( fun({_, Worker}) -> ok = ecpool_worker:exec(Worker, {emqx_authn_jwks_client, update, [Opts]}, infinity) end, ecpool:workers(PoolName) ), - emqx_resource:query_success(AfterQuery), ok. on_get_status(_InstId, #{pool_name := PoolName}) -> diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_mongodb.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_mongodb.erl index f7249ae57..1351ae0dd 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_mongodb.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_mongodb.erl @@ -164,7 +164,7 @@ authenticate( ) -> Filter = emqx_authn_utils:render_deep(FilterTemplate, Credential), case emqx_resource:query(ResourceId, {find_one, Collection, Filter, #{}}) of - undefined -> + {ok, undefined} -> ignore; {error, Reason} -> ?TRACE_AUTHN_PROVIDER(error, "mongodb_query_failed", #{ @@ -174,7 +174,7 @@ authenticate( reason => Reason }), ignore; - Doc -> + {ok, Doc} -> case check_password(Password, Doc, State) of ok -> {ok, is_superuser(Doc, State)}; diff --git a/apps/emqx_authn/test/emqx_authn_mongo_SUITE.erl b/apps/emqx_authn/test/emqx_authn_mongo_SUITE.erl index 2f7dd2391..0016274ea 100644 --- a/apps/emqx_authn/test/emqx_authn_mongo_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_mongo_SUITE.erl @@ -50,7 +50,7 @@ init_per_suite(Config) -> case emqx_common_test_helpers:is_tcp_server_available(?MONGO_HOST, ?MONGO_DEFAULT_PORT) of true -> ok = emqx_common_test_helpers:start_apps([emqx_authn]), - ok = start_apps([emqx_resource, emqx_connector]), + ok = start_apps([emqx_resource]), Config; false -> {skip, no_mongo} @@ -61,7 +61,7 @@ end_per_suite(_Config) -> [authentication], ?GLOBAL ), - ok = stop_apps([emqx_resource, emqx_connector]), + ok = stop_apps([emqx_resource]), ok = emqx_common_test_helpers:stop_apps([emqx_authn]). %%------------------------------------------------------------------------------ diff --git a/apps/emqx_authn/test/emqx_authn_mongo_tls_SUITE.erl b/apps/emqx_authn/test/emqx_authn_mongo_tls_SUITE.erl index ebece6c3e..1e9981d11 100644 --- a/apps/emqx_authn/test/emqx_authn_mongo_tls_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_mongo_tls_SUITE.erl @@ -46,7 +46,7 @@ init_per_suite(Config) -> case emqx_common_test_helpers:is_tcp_server_available(?MONGO_HOST, ?MONGO_DEFAULT_PORT) of true -> ok = emqx_common_test_helpers:start_apps([emqx_authn]), - ok = start_apps([emqx_resource, emqx_connector]), + ok = start_apps([emqx_resource]), Config; false -> {skip, no_mongo} @@ -57,7 +57,7 @@ end_per_suite(_Config) -> [authentication], ?GLOBAL ), - ok = stop_apps([emqx_resource, emqx_connector]), + ok = stop_apps([emqx_resource]), ok = emqx_common_test_helpers:stop_apps([emqx_authn]). %%------------------------------------------------------------------------------ diff --git a/apps/emqx_authn/test/emqx_authn_mysql_SUITE.erl b/apps/emqx_authn/test/emqx_authn_mysql_SUITE.erl index bd6a0159a..2f84b7b90 100644 --- a/apps/emqx_authn/test/emqx_authn_mysql_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_mysql_SUITE.erl @@ -58,7 +58,7 @@ init_per_suite(Config) -> case emqx_common_test_helpers:is_tcp_server_available(?MYSQL_HOST, ?MYSQL_DEFAULT_PORT) of true -> ok = emqx_common_test_helpers:start_apps([emqx_authn]), - ok = start_apps([emqx_resource, emqx_connector]), + ok = start_apps([emqx_resource]), {ok, _} = emqx_resource:create_local( ?MYSQL_RESOURCE, ?RESOURCE_GROUP, @@ -77,7 +77,7 @@ end_per_suite(_Config) -> ?GLOBAL ), ok = emqx_resource:remove_local(?MYSQL_RESOURCE), - ok = stop_apps([emqx_resource, emqx_connector]), + ok = stop_apps([emqx_resource]), ok = emqx_common_test_helpers:stop_apps([emqx_authn]). %%------------------------------------------------------------------------------ diff --git a/apps/emqx_authn/test/emqx_authn_mysql_tls_SUITE.erl b/apps/emqx_authn/test/emqx_authn_mysql_tls_SUITE.erl index 7d642c230..653be8daa 100644 --- a/apps/emqx_authn/test/emqx_authn_mysql_tls_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_mysql_tls_SUITE.erl @@ -49,7 +49,7 @@ init_per_suite(Config) -> case emqx_common_test_helpers:is_tcp_server_available(?MYSQL_HOST, ?MYSQL_DEFAULT_PORT) of true -> ok = emqx_common_test_helpers:start_apps([emqx_authn]), - ok = start_apps([emqx_resource, emqx_connector]), + ok = start_apps([emqx_resource]), Config; false -> {skip, no_mysql_tls} @@ -60,7 +60,7 @@ end_per_suite(_Config) -> [authentication], ?GLOBAL ), - ok = stop_apps([emqx_resource, emqx_connector]), + ok = stop_apps([emqx_resource]), ok = emqx_common_test_helpers:stop_apps([emqx_authn]). %%------------------------------------------------------------------------------ diff --git a/apps/emqx_authn/test/emqx_authn_pgsql_SUITE.erl b/apps/emqx_authn/test/emqx_authn_pgsql_SUITE.erl index 41b07bfc3..a0fbefb01 100644 --- a/apps/emqx_authn/test/emqx_authn_pgsql_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_pgsql_SUITE.erl @@ -59,7 +59,7 @@ init_per_suite(Config) -> case emqx_common_test_helpers:is_tcp_server_available(?PGSQL_HOST, ?PGSQL_DEFAULT_PORT) of true -> ok = emqx_common_test_helpers:start_apps([emqx_authn]), - ok = start_apps([emqx_resource, emqx_connector]), + ok = start_apps([emqx_resource]), {ok, _} = emqx_resource:create_local( ?PGSQL_RESOURCE, ?RESOURCE_GROUP, @@ -78,7 +78,7 @@ end_per_suite(_Config) -> ?GLOBAL ), ok = emqx_resource:remove_local(?PGSQL_RESOURCE), - ok = stop_apps([emqx_resource, emqx_connector]), + ok = stop_apps([emqx_resource]), ok = emqx_common_test_helpers:stop_apps([emqx_authn]). %%------------------------------------------------------------------------------ diff --git a/apps/emqx_authn/test/emqx_authn_pgsql_tls_SUITE.erl b/apps/emqx_authn/test/emqx_authn_pgsql_tls_SUITE.erl index e2b44b93d..59d37ba96 100644 --- a/apps/emqx_authn/test/emqx_authn_pgsql_tls_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_pgsql_tls_SUITE.erl @@ -49,7 +49,7 @@ init_per_suite(Config) -> case emqx_common_test_helpers:is_tcp_server_available(?PGSQL_HOST, ?PGSQL_DEFAULT_PORT) of true -> ok = emqx_common_test_helpers:start_apps([emqx_authn]), - ok = start_apps([emqx_resource, emqx_connector]), + ok = start_apps([emqx_resource]), Config; false -> {skip, no_pgsql_tls} @@ -60,7 +60,7 @@ end_per_suite(_Config) -> [authentication], ?GLOBAL ), - ok = stop_apps([emqx_resource, emqx_connector]), + ok = stop_apps([emqx_resource]), ok = emqx_common_test_helpers:stop_apps([emqx_authn]). %%------------------------------------------------------------------------------ diff --git a/apps/emqx_authn/test/emqx_authn_redis_SUITE.erl b/apps/emqx_authn/test/emqx_authn_redis_SUITE.erl index cbf094549..7f4726dda 100644 --- a/apps/emqx_authn/test/emqx_authn_redis_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_redis_SUITE.erl @@ -58,7 +58,7 @@ init_per_suite(Config) -> case emqx_common_test_helpers:is_tcp_server_available(?REDIS_HOST, ?REDIS_DEFAULT_PORT) of true -> ok = emqx_common_test_helpers:start_apps([emqx_authn]), - ok = start_apps([emqx_resource, emqx_connector]), + ok = start_apps([emqx_resource]), {ok, _} = emqx_resource:create_local( ?REDIS_RESOURCE, ?RESOURCE_GROUP, @@ -77,7 +77,7 @@ end_per_suite(_Config) -> ?GLOBAL ), ok = emqx_resource:remove_local(?REDIS_RESOURCE), - ok = stop_apps([emqx_resource, emqx_connector]), + ok = stop_apps([emqx_resource]), ok = emqx_common_test_helpers:stop_apps([emqx_authn]). %%------------------------------------------------------------------------------ diff --git a/apps/emqx_authn/test/emqx_authn_redis_tls_SUITE.erl b/apps/emqx_authn/test/emqx_authn_redis_tls_SUITE.erl index 781d84d98..601b58c3c 100644 --- a/apps/emqx_authn/test/emqx_authn_redis_tls_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_redis_tls_SUITE.erl @@ -49,7 +49,7 @@ init_per_suite(Config) -> case emqx_common_test_helpers:is_tcp_server_available(?REDIS_HOST, ?REDIS_TLS_PORT) of true -> ok = emqx_common_test_helpers:start_apps([emqx_authn]), - ok = start_apps([emqx_resource, emqx_connector]), + ok = start_apps([emqx_resource]), Config; false -> {skip, no_redis} @@ -60,7 +60,7 @@ end_per_suite(_Config) -> [authentication], ?GLOBAL ), - ok = stop_apps([emqx_resource, emqx_connector]), + ok = stop_apps([emqx_resource]), ok = emqx_common_test_helpers:stop_apps([emqx_authn]). %%------------------------------------------------------------------------------ diff --git a/apps/emqx_authz/src/emqx_authz.app.src b/apps/emqx_authz/src/emqx_authz.app.src index e98e8c6b3..3f0f96e72 100644 --- a/apps/emqx_authz/src/emqx_authz.app.src +++ b/apps/emqx_authz/src/emqx_authz.app.src @@ -1,13 +1,14 @@ %% -*- mode: erlang -*- {application, emqx_authz, [ {description, "An OTP application"}, - {vsn, "0.1.8"}, + {vsn, "0.1.9"}, {registered, []}, {mod, {emqx_authz_app, []}}, {applications, [ kernel, stdlib, crypto, + emqx_resource, emqx_connector ]}, {env, []}, diff --git a/apps/emqx_authz/src/emqx_authz_mongodb.erl b/apps/emqx_authz/src/emqx_authz_mongodb.erl index 931d83c07..753416ab9 100644 --- a/apps/emqx_authz/src/emqx_authz_mongodb.erl +++ b/apps/emqx_authz/src/emqx_authz_mongodb.erl @@ -94,9 +94,9 @@ authorize( resource_id => ResourceID }), nomatch; - [] -> + {ok, []} -> nomatch; - Rows -> + {ok, Rows} -> Rules = [ emqx_authz_rule:compile({Permission, all, Action, Topics}) || #{ diff --git a/apps/emqx_authz/src/emqx_authz_utils.erl b/apps/emqx_authz/src/emqx_authz_utils.erl index 1bf5d774c..d1302d84f 100644 --- a/apps/emqx_authz/src/emqx_authz_utils.erl +++ b/apps/emqx_authz/src/emqx_authz_utils.erl @@ -40,7 +40,6 @@ ]). -define(DEFAULT_RESOURCE_OPTS, #{ - auto_retry_interval => 6000, start_after_created => false }). diff --git a/apps/emqx_authz/test/emqx_authz_SUITE.erl b/apps/emqx_authz/test/emqx_authz_SUITE.erl index f602acedc..36841cb1a 100644 --- a/apps/emqx_authz/test/emqx_authz_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_SUITE.erl @@ -45,7 +45,7 @@ init_per_suite(Config) -> ), ok = emqx_common_test_helpers:start_apps( - [emqx_connector, emqx_conf, emqx_authz], + [emqx_conf, emqx_authz], fun set_special_configs/1 ), Config. @@ -59,8 +59,7 @@ end_per_suite(_Config) -> <<"sources">> => [] } ), - ok = stop_apps([emqx_resource]), - emqx_common_test_helpers:stop_apps([emqx_connector, emqx_authz, emqx_conf]), + emqx_common_test_helpers:stop_apps([emqx_authz, emqx_conf]), meck:unload(emqx_resource), ok. diff --git a/apps/emqx_authz/test/emqx_authz_api_cache_SUITE.erl b/apps/emqx_authz/test/emqx_authz_api_cache_SUITE.erl index 306fe3f13..7ddab7321 100644 --- a/apps/emqx_authz/test/emqx_authz_api_cache_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_api_cache_SUITE.erl @@ -23,6 +23,8 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). +suite() -> [{timetrap, {seconds, 60}}]. + all() -> emqx_common_test_helpers:all(?MODULE). @@ -45,7 +47,6 @@ end_per_suite(_Config) -> <<"sources">> => [] } ), - ok = stop_apps([emqx_resource, emqx_connector]), emqx_common_test_helpers:stop_apps([emqx_dashboard, emqx_authz, emqx_conf, emqx_management]), ok. diff --git a/apps/emqx_authz/test/emqx_authz_api_settings_SUITE.erl b/apps/emqx_authz/test/emqx_authz_api_settings_SUITE.erl index b53b7aa1b..186f04740 100644 --- a/apps/emqx_authz/test/emqx_authz_api_settings_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_api_settings_SUITE.erl @@ -45,7 +45,7 @@ end_per_suite(_Config) -> <<"sources">> => [] } ), - ok = stop_apps([emqx_resource, emqx_connector]), + ok = stop_apps([emqx_resource]), emqx_common_test_helpers:stop_apps([emqx_dashboard, emqx_authz, emqx_conf]), ok. diff --git a/apps/emqx_authz/test/emqx_authz_api_sources_SUITE.erl b/apps/emqx_authz/test/emqx_authz_api_sources_SUITE.erl index 6ac67d81b..e26ad9839 100644 --- a/apps/emqx_authz/test/emqx_authz_api_sources_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_api_sources_SUITE.erl @@ -103,7 +103,7 @@ groups() -> []. init_per_suite(Config) -> - ok = stop_apps([emqx_resource, emqx_connector]), + ok = stop_apps([emqx_resource]), meck:new(emqx_resource, [non_strict, passthrough, no_history, no_link]), meck:expect(emqx_resource, create_local, fun(_, _, _, _) -> {ok, meck_data} end), meck:expect(emqx_resource, health_check, fun(St) -> {ok, St} end), @@ -120,7 +120,7 @@ init_per_suite(Config) -> [emqx_conf, emqx_authz, emqx_dashboard], fun set_special_configs/1 ), - ok = start_apps([emqx_resource, emqx_connector]), + ok = start_apps([emqx_resource]), Config. end_per_suite(_Config) -> @@ -134,7 +134,7 @@ end_per_suite(_Config) -> ), %% resource and connector should be stop first, %% or authz_[mysql|pgsql|redis..]_SUITE would be failed - ok = stop_apps([emqx_resource, emqx_connector]), + ok = stop_apps([emqx_resource]), emqx_common_test_helpers:stop_apps([emqx_dashboard, emqx_authz, emqx_conf]), meck:unload(emqx_resource), ok. diff --git a/apps/emqx_authz/test/emqx_authz_file_SUITE.erl b/apps/emqx_authz/test/emqx_authz_file_SUITE.erl index 059a350e2..164271c6d 100644 --- a/apps/emqx_authz/test/emqx_authz_file_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_file_SUITE.erl @@ -55,7 +55,6 @@ init_per_suite(Config) -> end_per_suite(_Config) -> ok = emqx_authz_test_lib:restore_authorizers(), - ok = stop_apps([emqx_resource, emqx_connector]), ok = emqx_common_test_helpers:stop_apps([emqx_authz]). init_per_testcase(_TestCase, Config) -> diff --git a/apps/emqx_authz/test/emqx_authz_http_SUITE.erl b/apps/emqx_authz/test/emqx_authz_http_SUITE.erl index 628e8dbfe..6ad966344 100644 --- a/apps/emqx_authz/test/emqx_authz_http_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_http_SUITE.erl @@ -40,17 +40,17 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - ok = stop_apps([emqx_resource, emqx_connector, cowboy]), + ok = stop_apps([emqx_resource, cowboy]), ok = emqx_common_test_helpers:start_apps( [emqx_conf, emqx_authz], fun set_special_configs/1 ), - ok = start_apps([emqx_resource, emqx_connector, cowboy]), + ok = start_apps([emqx_resource, cowboy]), Config. end_per_suite(_Config) -> ok = emqx_authz_test_lib:restore_authorizers(), - ok = stop_apps([emqx_resource, emqx_connector, cowboy]), + ok = stop_apps([emqx_resource, cowboy]), ok = emqx_common_test_helpers:stop_apps([emqx_authz]). set_special_configs(emqx_authz) -> diff --git a/apps/emqx_authz/test/emqx_authz_mongodb_SUITE.erl b/apps/emqx_authz/test/emqx_authz_mongodb_SUITE.erl index f080f7e72..61c37b8a1 100644 --- a/apps/emqx_authz/test/emqx_authz_mongodb_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_mongodb_SUITE.erl @@ -34,14 +34,14 @@ groups() -> []. init_per_suite(Config) -> - ok = stop_apps([emqx_resource, emqx_connector]), + ok = stop_apps([emqx_resource]), case emqx_common_test_helpers:is_tcp_server_available(?MONGO_HOST, ?MONGO_DEFAULT_PORT) of true -> ok = emqx_common_test_helpers:start_apps( [emqx_conf, emqx_authz], fun set_special_configs/1 ), - ok = start_apps([emqx_resource, emqx_connector]), + ok = start_apps([emqx_resource]), Config; false -> {skip, no_mongo} @@ -49,7 +49,7 @@ init_per_suite(Config) -> end_per_suite(_Config) -> ok = emqx_authz_test_lib:restore_authorizers(), - ok = stop_apps([emqx_resource, emqx_connector]), + ok = stop_apps([emqx_resource]), ok = emqx_common_test_helpers:stop_apps([emqx_authz]). set_special_configs(emqx_authz) -> diff --git a/apps/emqx_authz/test/emqx_authz_mysql_SUITE.erl b/apps/emqx_authz/test/emqx_authz_mysql_SUITE.erl index 930426318..e1acfd771 100644 --- a/apps/emqx_authz/test/emqx_authz_mysql_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_mysql_SUITE.erl @@ -33,14 +33,14 @@ groups() -> []. init_per_suite(Config) -> - ok = stop_apps([emqx_resource, emqx_connector]), + ok = stop_apps([emqx_resource]), case emqx_common_test_helpers:is_tcp_server_available(?MYSQL_HOST, ?MYSQL_DEFAULT_PORT) of true -> ok = emqx_common_test_helpers:start_apps( [emqx_conf, emqx_authz], fun set_special_configs/1 ), - ok = start_apps([emqx_resource, emqx_connector]), + ok = start_apps([emqx_resource]), {ok, _} = emqx_resource:create_local( ?MYSQL_RESOURCE, ?RESOURCE_GROUP, @@ -56,7 +56,7 @@ init_per_suite(Config) -> end_per_suite(_Config) -> ok = emqx_authz_test_lib:restore_authorizers(), ok = emqx_resource:remove_local(?MYSQL_RESOURCE), - ok = stop_apps([emqx_resource, emqx_connector]), + ok = stop_apps([emqx_resource]), ok = emqx_common_test_helpers:stop_apps([emqx_authz]). init_per_testcase(_TestCase, Config) -> diff --git a/apps/emqx_authz/test/emqx_authz_postgresql_SUITE.erl b/apps/emqx_authz/test/emqx_authz_postgresql_SUITE.erl index fa1672ba7..7ed19716f 100644 --- a/apps/emqx_authz/test/emqx_authz_postgresql_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_postgresql_SUITE.erl @@ -33,14 +33,14 @@ groups() -> []. init_per_suite(Config) -> - ok = stop_apps([emqx_resource, emqx_connector]), + ok = stop_apps([emqx_resource]), case emqx_common_test_helpers:is_tcp_server_available(?PGSQL_HOST, ?PGSQL_DEFAULT_PORT) of true -> ok = emqx_common_test_helpers:start_apps( [emqx_conf, emqx_authz], fun set_special_configs/1 ), - ok = start_apps([emqx_resource, emqx_connector]), + ok = start_apps([emqx_resource]), {ok, _} = emqx_resource:create_local( ?PGSQL_RESOURCE, ?RESOURCE_GROUP, @@ -56,7 +56,7 @@ init_per_suite(Config) -> end_per_suite(_Config) -> ok = emqx_authz_test_lib:restore_authorizers(), ok = emqx_resource:remove_local(?PGSQL_RESOURCE), - ok = stop_apps([emqx_resource, emqx_connector]), + ok = stop_apps([emqx_resource]), ok = emqx_common_test_helpers:stop_apps([emqx_authz]). init_per_testcase(_TestCase, Config) -> diff --git a/apps/emqx_authz/test/emqx_authz_redis_SUITE.erl b/apps/emqx_authz/test/emqx_authz_redis_SUITE.erl index 1b21936b4..ebf2b4d06 100644 --- a/apps/emqx_authz/test/emqx_authz_redis_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_redis_SUITE.erl @@ -34,14 +34,14 @@ groups() -> []. init_per_suite(Config) -> - ok = stop_apps([emqx_resource, emqx_connector]), + ok = stop_apps([emqx_resource]), case emqx_common_test_helpers:is_tcp_server_available(?REDIS_HOST, ?REDIS_DEFAULT_PORT) of true -> ok = emqx_common_test_helpers:start_apps( [emqx_conf, emqx_authz], fun set_special_configs/1 ), - ok = start_apps([emqx_resource, emqx_connector]), + ok = start_apps([emqx_resource]), {ok, _} = emqx_resource:create_local( ?REDIS_RESOURCE, ?RESOURCE_GROUP, @@ -57,7 +57,7 @@ init_per_suite(Config) -> end_per_suite(_Config) -> ok = emqx_authz_test_lib:restore_authorizers(), ok = emqx_resource:remove_local(?REDIS_RESOURCE), - ok = stop_apps([emqx_resource, emqx_connector]), + ok = stop_apps([emqx_resource]), ok = emqx_common_test_helpers:stop_apps([emqx_authz]). init_per_testcase(_TestCase, Config) -> diff --git a/apps/emqx_bridge/i18n/emqx_bridge_mqtt_schema.conf b/apps/emqx_bridge/i18n/emqx_bridge_mqtt_schema.conf index c0f549db3..b935b360c 100644 --- a/apps/emqx_bridge/i18n/emqx_bridge_mqtt_schema.conf +++ b/apps/emqx_bridge/i18n/emqx_bridge_mqtt_schema.conf @@ -1,16 +1,14 @@ emqx_bridge_mqtt_schema { - - desc_rec { - desc { - en: """Configuration for MQTT bridge.""" - zh: """MQTT Bridge 配置""" - } - label: { - en: "MQTT Bridge Configuration" - zh: "MQTT Bridge 配置" - } - } - + config { + desc { + en: """The config for MQTT Bridges.""" + zh: """MQTT Bridge 的配置。""" + } + label: { + en: "Config" + zh: "配置" + } + } desc_type { desc { en: """The bridge type.""" diff --git a/apps/emqx_bridge/i18n/emqx_bridge_schema.conf b/apps/emqx_bridge/i18n/emqx_bridge_schema.conf index d64ca3b3a..c465ef242 100644 --- a/apps/emqx_bridge/i18n/emqx_bridge_schema.conf +++ b/apps/emqx_bridge/i18n/emqx_bridge_schema.conf @@ -11,24 +11,6 @@ emqx_bridge_schema { } } - desc_connector { - desc { - en: """ -The ID or the configs of the connector to be used for this bridge. Connector IDs must be of format: -{type}:{name}.
-In config files, you can find the corresponding config entry for a connector by such path: -'connectors.{type}.{name}'.
-""" - zh: """ -Bridge 使用的 Connector 的 ID 或者配置。Connector ID 的格式必须为:{type}:{name}
-在配置文件中,您可以通过以下路径找到 Connector 的相应配置条目:'connector.{type}.{name}'。
""" - } - label: { - en: "Connector ID" - zh: "Connector ID" - } - } - desc_metrics { desc { en: """The metrics of the bridge""" @@ -85,7 +67,7 @@ Bridge 使用的 Connector 的 ID 或者配置。Connector ID 的格式必须为 } - bridges_name { + bridges_mqtt { desc { en: """MQTT bridges to/from another MQTT broker""" zh: """桥接到另一个 MQTT Broker 的 MQTT Bridge""" @@ -96,36 +78,139 @@ Bridge 使用的 Connector 的 ID 或者配置。Connector ID 的格式必须为 } } + metric_batching { + desc { + en: """Count of messages that are currently accumulated in memory waiting for sending in one batch.""" + zh: """当前积压在内存里,等待批量发送的消息个数""" + } + label: { + en: "Batched" + zh: "等待批量发送" + } + } + + metric_dropped { + desc { + en: """Count of messages dropped.""" + zh: """被丢弃的消息个数。""" + } + label: { + en: "Dropped" + zh: "丢弃" + } + } + + metric_dropped_other { + desc { + en: """Count of messages dropped due to other reasons.""" + zh: """因为其他原因被丢弃的消息个数。""" + } + label: { + en: "Dropped Other" + zh: "其他丢弃" + } + } + metric_dropped_queue_full { + desc { + en: """Count of messages dropped due to the queue is full.""" + zh: """因为队列已满被丢弃的消息个数。""" + } + label: { + en: "Dropped Queue Full" + zh: "队列已满被丢弃" + } + } + metric_dropped_queue_not_enabled { + desc { + en: """Count of messages dropped due to the queue is not enabled.""" + zh: """因为队列未启用被丢弃的消息个数。""" + } + label: { + en: "Dropped Queue Disabled" + zh: "队列未启用被丢弃" + } + } + metric_dropped_resource_not_found { + desc { + en: """Count of messages dropped due to the resource is not found.""" + zh: """因为资源不存在被丢弃的消息个数。""" + } + label: { + en: "Dropped Resource NotFound" + zh: "资源不存在被丢弃" + } + } + metric_dropped_resource_stopped { + desc { + en: """Count of messages dropped due to the resource is stopped.""" + zh: """因为资源已停用被丢弃的消息个数。""" + } + label: { + en: "Dropped Resource Stopped" + zh: "资源停用被丢弃" + } + } metric_matched { desc { - en: """Count of this bridge is queried""" - zh: """Bridge 执行操作的次数""" + en: """Count of this bridge is matched and queried.""" + zh: """Bridge 被匹配到(被请求)的次数。""" } label: { - en: "Bridge Matched" - zh: "Bridge 执行操作的次数" + en: "Matched" + zh: "匹配次数" } } - metric_success { + metric_queuing { desc { - en: """Count of query success""" - zh: """Bridge 执行操作成功的次数""" + en: """Count of messages that are currently queuing.""" + zh: """当前被缓存到磁盘队列的消息个数。""" } label: { - en: "Bridge Success" - zh: "Bridge 执行操作成功的次数" + en: "Queued" + zh: "被缓存" + } + } + metric_retried { + desc { + en: """Times of retried.""" + zh: """重试的次数。""" + } + label: { + en: "Retried" + zh: "已重试" } } - metric_failed { + metric_sent_failed { desc { - en: """Count of query failed""" - zh: """Bridge 执行操作失败的次数""" + en: """Count of messages that sent failed.""" + zh: """发送失败的消息个数。""" } label: { - en: "Bridge Failed" - zh: "Bridge 执行操作失败的次数" + en: "Sent Failed" + zh: "发送失败" + } + } + + metric_sent_inflight { + desc { + en: """Count of messages that were sent asynchronously but ACKs are not received.""" + zh: """已异步地发送但没有收到 ACK 的消息个数。""" + } + label: { + en: "Sent Inflight" + zh: "已发送未确认" + } + } + metric_sent_success { + desc { + en: """Count of messages that sent successfully.""" + zh: """已经发送成功的消息个数。""" + } + label: { + en: "Sent Success" + zh: "发送成功" } } @@ -162,6 +247,17 @@ Bridge 使用的 Connector 的 ID 或者配置。Connector ID 的格式必须为 } } + metric_received { + desc { + en: """Count of messages that is received from the remote system.""" + zh: """从远程系统收到的消息个数。""" + } + label: { + en: "Received" + zh: "已接收" + } + } + desc_bridges { desc { en: """Configuration for MQTT bridges.""" diff --git a/apps/emqx_bridge/i18n/emqx_bridge_webhook_schema.conf b/apps/emqx_bridge/i18n/emqx_bridge_webhook_schema.conf index b26c6ffb6..d9d2d0c40 100644 --- a/apps/emqx_bridge/i18n/emqx_bridge_webhook_schema.conf +++ b/apps/emqx_bridge/i18n/emqx_bridge_webhook_schema.conf @@ -11,17 +11,6 @@ emqx_bridge_webhook_schema { } } - config_direction { - desc { - en: """The direction of this bridge, MUST be 'egress'""" - zh: """Bridge 的方向, 必须是 egress""" - } - label: { - en: "Bridge Direction" - zh: "Bridge 方向" - } - } - config_url { desc { en: """ diff --git a/apps/emqx_bridge/include/emqx_bridge.hrl b/apps/emqx_bridge/include/emqx_bridge.hrl new file mode 100644 index 000000000..6bc80f9cc --- /dev/null +++ b/apps/emqx_bridge/include/emqx_bridge.hrl @@ -0,0 +1,95 @@ +-define(EMPTY_METRICS, + ?METRICS( + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ) +). + +-define(METRICS( + Batched, + Dropped, + DroppedOther, + DroppedQueueFull, + DroppedQueueNotEnabled, + DroppedResourceNotFound, + DroppedResourceStopped, + Matched, + Queued, + Retried, + SentFailed, + SentInflight, + SentSucc, + RATE, + RATE_5, + RATE_MAX, + Rcvd +), + #{ + 'batching' => Batched, + 'dropped' => Dropped, + 'dropped.other' => DroppedOther, + 'dropped.queue_full' => DroppedQueueFull, + 'dropped.queue_not_enabled' => DroppedQueueNotEnabled, + 'dropped.resource_not_found' => DroppedResourceNotFound, + 'dropped.resource_stopped' => DroppedResourceStopped, + 'matched' => Matched, + 'queuing' => Queued, + 'retried' => Retried, + 'failed' => SentFailed, + 'inflight' => SentInflight, + 'success' => SentSucc, + rate => RATE, + rate_last5m => RATE_5, + rate_max => RATE_MAX, + received => Rcvd + } +). + +-define(metrics( + Batched, + Dropped, + DroppedOther, + DroppedQueueFull, + DroppedQueueNotEnabled, + DroppedResourceNotFound, + DroppedResourceStopped, + Matched, + Queued, + Retried, + SentFailed, + SentInflight, + SentSucc, + RATE, + RATE_5, + RATE_MAX, + Rcvd +), + #{ + 'batching' := Batched, + 'dropped' := Dropped, + 'dropped.other' := DroppedOther, + 'dropped.queue_full' := DroppedQueueFull, + 'dropped.queue_not_enabled' := DroppedQueueNotEnabled, + 'dropped.resource_not_found' := DroppedResourceNotFound, + 'dropped.resource_stopped' := DroppedResourceStopped, + 'matched' := Matched, + 'queuing' := Queued, + 'retried' := Retried, + 'failed' := SentFailed, + 'inflight' := SentInflight, + 'success' := SentSucc, + rate := RATE, + rate_last5m := RATE_5, + rate_max := RATE_MAX, + received := Rcvd + } +). + +-define(METRICS_EXAMPLE, #{ + metrics => ?EMPTY_METRICS, + node_metrics => [ + #{ + node => node(), + metrics => ?EMPTY_METRICS + } + ] +}). diff --git a/apps/emqx_bridge/src/emqx_bridge.app.src b/apps/emqx_bridge/src/emqx_bridge.app.src index 6d01e004e..3cc858665 100644 --- a/apps/emqx_bridge/src/emqx_bridge.app.src +++ b/apps/emqx_bridge/src/emqx_bridge.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_bridge, [ - {description, "An OTP application"}, - {vsn, "0.1.5"}, + {description, "EMQX bridges"}, + {vsn, "0.1.6"}, {registered, []}, {mod, {emqx_bridge_app, []}}, {applications, [ diff --git a/apps/emqx_bridge/src/emqx_bridge.erl b/apps/emqx_bridge/src/emqx_bridge.erl index ba6c64dbc..44028e900 100644 --- a/apps/emqx_bridge/src/emqx_bridge.erl +++ b/apps/emqx_bridge/src/emqx_bridge.erl @@ -37,8 +37,8 @@ create/3, disable_enable/3, remove/2, - list/0, - list_bridges_by_connector/1 + check_deps_and_remove/3, + list/0 ]). -export([send_message/2]). @@ -48,15 +48,23 @@ %% exported for `emqx_telemetry' -export([get_basic_usage_info/0]). +-define(EGRESS_DIR_BRIDGES(T), + T == webhook; + T == mysql; + T == influxdb_api_v1; + T == influxdb_api_v2 + %% T == influxdb_udp +). + load() -> - %% set wait_for_resource_ready => 0 to start resources async - Opts = #{auto_retry_interval => 60000, wait_for_resource_ready => 0}, Bridges = emqx:get_config([bridges], #{}), lists:foreach( fun({Type, NamedConf}) -> lists:foreach( fun({Name, Conf}) -> - safe_load_bridge(Type, Name, Conf, Opts) + %% fetch opts for `emqx_resource_worker` + ResOpts = emqx_resource:fetch_creation_opts(Conf), + safe_load_bridge(Type, Name, Conf, ResOpts) end, maps:to_list(NamedConf) ) @@ -93,10 +101,10 @@ load_hook() -> load_hook(Bridges) -> lists:foreach( - fun({_Type, Bridge}) -> + fun({Type, Bridge}) -> lists:foreach( fun({_Name, BridgeConf}) -> - do_load_hook(BridgeConf) + do_load_hook(Type, BridgeConf) end, maps:to_list(Bridge) ) @@ -104,12 +112,13 @@ load_hook(Bridges) -> maps:to_list(Bridges) ). -do_load_hook(#{local_topic := _} = Conf) -> - case maps:get(direction, Conf, egress) of - egress -> emqx_hooks:put('message.publish', {?MODULE, on_message_publish, []}, ?HP_BRIDGE); - ingress -> ok - end; -do_load_hook(_Conf) -> +do_load_hook(Type, #{local_topic := _}) when ?EGRESS_DIR_BRIDGES(Type) -> + emqx_hooks:put('message.publish', {?MODULE, on_message_publish, []}, ?HP_BRIDGE); +do_load_hook(mqtt, #{egress := #{local := #{topic := _}}}) -> + emqx_hooks:put('message.publish', {?MODULE, on_message_publish, []}, ?HP_BRIDGE); +do_load_hook(kafka, #{producer := #{mqtt := #{topic := _}}}) -> + emqx_hooks:put('message.publish', {?MODULE, on_message_publish, []}, ?HP_BRIDGE); +do_load_hook(_Type, _Conf) -> ok. unload_hook() -> @@ -171,9 +180,9 @@ post_config_update(_, _Req, NewConf, OldConf, _AppEnv) -> diff_confs(NewConf, OldConf), %% The config update will be failed if any task in `perform_bridge_changes` failed. Result = perform_bridge_changes([ - {fun emqx_bridge_resource:remove/3, Removed}, - {fun emqx_bridge_resource:create/3, Added}, - {fun emqx_bridge_resource:update/3, Updated} + {fun emqx_bridge_resource:remove/4, Removed}, + {fun emqx_bridge_resource:create/4, Added}, + {fun emqx_bridge_resource:update/4, Updated} ]), ok = unload_hook(), ok = load_hook(NewConf), @@ -197,13 +206,6 @@ list() -> maps:to_list(emqx:get_raw_config([bridges], #{})) ). -list_bridges_by_connector(ConnectorId) -> - [ - B - || B = #{raw_config := #{<<"connector">> := Id}} <- list(), - ConnectorId =:= Id - ]. - lookup(Id) -> {Type, Name} = emqx_bridge_resource:parse_bridge_id(Id), lookup(Type, Name). @@ -211,6 +213,7 @@ lookup(Id) -> lookup(Type, Name) -> RawConf = emqx:get_raw_config([bridges, Type, Name], #{}), lookup(Type, Name, RawConf). + lookup(Type, Name, RawConf) -> case emqx_resource:get_instance(emqx_bridge_resource:resource_id(Type, Name)) of {error, not_found} -> @@ -220,10 +223,15 @@ lookup(Type, Name, RawConf) -> type => Type, name => Name, resource_data => Data, - raw_config => RawConf + raw_config => maybe_upgrade(Type, RawConf) }} end. +maybe_upgrade(mqtt, Config) -> + emqx_bridge_mqtt_config:maybe_upgrade(Config); +maybe_upgrade(_Other, Config) -> + Config. + disable_enable(Action, BridgeType, BridgeName) when Action =:= disable; Action =:= enable -> @@ -246,6 +254,24 @@ remove(BridgeType, BridgeName) -> #{override_to => cluster} ). +check_deps_and_remove(BridgeType, BridgeName, RemoveDeps) -> + BridgeId = emqx_bridge_resource:bridge_id(BridgeType, BridgeName), + %% NOTE: This violates the design: Rule depends on data-bridge but not vice versa. + case emqx_rule_engine:get_rule_ids_by_action(BridgeId) of + [] -> + remove(BridgeType, BridgeName); + RuleIds when RemoveDeps =:= false -> + {error, {rules_deps_on_this_bridge, RuleIds}}; + RuleIds when RemoveDeps =:= true -> + lists:foreach( + fun(R) -> + emqx_rule_engine:ensure_action_removed(R, BridgeId) + end, + RuleIds + ), + remove(BridgeType, BridgeName) + end. + %%======================================================================================== %% Helper functions %%======================================================================================== @@ -260,8 +286,16 @@ perform_bridge_changes([{Action, MapConfs} | Tasks], Result0) -> fun ({_Type, _Name}, _Conf, {error, Reason}) -> {error, Reason}; + %% for emqx_bridge_resource:update/4 + ({Type, Name}, {OldConf, Conf}, _) -> + ResOpts = emqx_resource:fetch_creation_opts(Conf), + case Action(Type, Name, {OldConf, Conf}, ResOpts) of + {error, Reason} -> {error, Reason}; + Return -> Return + end; ({Type, Name}, Conf, _) -> - case Action(Type, Name, Conf) of + ResOpts = emqx_resource:fetch_creation_opts(Conf), + case Action(Type, Name, Conf, ResOpts) of {error, Reason} -> {error, Reason}; Return -> Return end @@ -295,13 +329,8 @@ get_matched_bridges(Topic) -> maps:fold( fun(BType, Conf, Acc0) -> maps:fold( - fun - %% Confs for MQTT, Kafka bridges have the `direction` flag - (_BName, #{direction := ingress}, Acc1) -> - Acc1; - (BName, #{direction := egress} = Egress, Acc1) -> - %% WebHook, MySQL bridges only have egress direction - get_matched_bridge_id(Egress, Topic, BType, BName, Acc1) + fun(BName, BConf, Acc1) -> + get_matched_bridge_id(BType, BConf, Topic, BName, Acc1) end, Acc0, Conf @@ -311,9 +340,18 @@ get_matched_bridges(Topic) -> Bridges ). -get_matched_bridge_id(#{enable := false}, _Topic, _BType, _BName, Acc) -> +get_matched_bridge_id(_BType, #{enable := false}, _Topic, _BName, Acc) -> Acc; -get_matched_bridge_id(#{local_topic := Filter}, Topic, BType, BName, Acc) -> +get_matched_bridge_id(BType, #{local_topic := Filter}, Topic, BName, Acc) when + ?EGRESS_DIR_BRIDGES(BType) +-> + do_get_matched_bridge_id(Topic, Filter, BType, BName, Acc); +get_matched_bridge_id(mqtt, #{egress := #{local := #{topic := Filter}}}, Topic, BName, Acc) -> + do_get_matched_bridge_id(Topic, Filter, mqtt, BName, Acc); +get_matched_bridge_id(kafka, #{producer := #{mqtt := #{topic := Filter}}}, Topic, BName, Acc) -> + do_get_matched_bridge_id(Topic, Filter, kafka, BName, Acc). + +do_get_matched_bridge_id(Topic, Filter, BType, BName, Acc) -> case emqx_topic:match(Topic, Filter) of true -> [emqx_bridge_resource:bridge_id(BType, BName) | Acc]; false -> Acc diff --git a/apps/emqx_bridge/src/emqx_bridge_api.erl b/apps/emqx_bridge/src/emqx_bridge_api.erl index 66a9079f8..6b5e307d8 100644 --- a/apps/emqx_bridge/src/emqx_bridge_api.erl +++ b/apps/emqx_bridge/src/emqx_bridge_api.erl @@ -20,6 +20,7 @@ -include_lib("typerefl/include/types.hrl"). -include_lib("hocon/include/hoconsc.hrl"). -include_lib("emqx/include/logger.hrl"). +-include_lib("emqx_bridge/include/emqx_bridge.hrl"). -import(hoconsc, [mk/2, array/1, enum/1]). @@ -42,40 +43,20 @@ -export([lookup_from_local_node/2]). --define(CONN_TYPES, [mqtt]). - -define(TRY_PARSE_ID(ID, EXPR), try emqx_bridge_resource:parse_bridge_id(Id) of {BridgeType, BridgeName} -> EXPR catch - error:{invalid_bridge_id, Id0} -> + throw:{invalid_bridge_id, Reason} -> {400, error_msg( 'INVALID_ID', - <<"invalid_bridge_id: ", Id0/binary, - ". Bridge Ids must be of format {type}:{name}">> + <<"Invalid bride ID, ", Reason/binary>> )} end ). --define(METRICS(MATCH, SUCC, FAILED, RATE, RATE_5, RATE_MAX), #{ - matched => MATCH, - success => SUCC, - failed => FAILED, - rate => RATE, - rate_last5m => RATE_5, - rate_max => RATE_MAX -}). --define(metrics(MATCH, SUCC, FAILED, RATE, RATE_5, RATE_MAX), #{ - matched := MATCH, - success := SUCC, - failed := FAILED, - rate := RATE, - rate_last5m := RATE_5, - rate_max := RATE_MAX -}). - namespace() -> "bridge". api_spec() -> @@ -110,7 +91,7 @@ param_path_operation_cluster() -> #{ in => path, required => true, - example => <<"start">>, + example => <<"restart">>, desc => ?DESC("desc_param_path_operation_cluster") } )}. @@ -146,7 +127,7 @@ param_path_id() -> #{ in => path, required => true, - example => <<"webhook:my_webhook">>, + example => <<"webhook:webhook_example">>, desc => ?DESC("desc_param_path_id") } )}. @@ -155,70 +136,58 @@ bridge_info_array_example(Method) -> [Config || #{value := Config} <- maps:values(bridge_info_examples(Method))]. bridge_info_examples(Method) -> - maps:merge(conn_bridge_examples(Method), #{ - <<"my_webhook">> => #{ - summary => <<"WebHook">>, - value => info_example(webhook, awesome, Method) - } - }). - -conn_bridge_examples(Method) -> - lists:foldl( - fun(Type, Acc) -> - SType = atom_to_list(Type), - KeyIngress = bin(SType ++ "_ingress"), - KeyEgress = bin(SType ++ "_egress"), - maps:merge(Acc, #{ - KeyIngress => #{ - summary => bin(string:uppercase(SType) ++ " Ingress Bridge"), - value => info_example(Type, ingress, Method) - }, - KeyEgress => #{ - summary => bin(string:uppercase(SType) ++ " Egress Bridge"), - value => info_example(Type, egress, Method) - } - }) - end, - #{}, - ?CONN_TYPES - ). - -info_example(Type, Direction, Method) -> maps:merge( - info_example_basic(Type, Direction), - method_example(Type, Direction, Method) + #{ + <<"webhook_example">> => #{ + summary => <<"WebHook">>, + value => info_example(webhook, Method) + }, + <<"mqtt_example">> => #{ + summary => <<"MQTT Bridge">>, + value => info_example(mqtt, Method) + } + }, + ee_bridge_examples(Method) ). -method_example(Type, Direction, Method) when Method == get; Method == post -> +ee_bridge_examples(Method) -> + try + emqx_ee_bridge:examples(Method) + catch + _:_ -> #{} + end. + +info_example(Type, Method) -> + maps:merge( + info_example_basic(Type), + method_example(Type, Method) + ). + +method_example(Type, Method) when Method == get; Method == post -> SType = atom_to_list(Type), - SDir = atom_to_list(Direction), - SName = - case Type of - webhook -> "my_" ++ SType; - _ -> "my_" ++ SDir ++ "_" ++ SType ++ "_bridge" - end, - TypeNameExamp = #{ + SName = SType ++ "_example", + TypeNameExam = #{ type => bin(SType), name => bin(SName) }, - maybe_with_metrics_example(TypeNameExamp, Method); -method_example(_Type, _Direction, put) -> + maybe_with_metrics_example(TypeNameExam, Method); +method_example(_Type, put) -> #{}. -maybe_with_metrics_example(TypeNameExamp, get) -> - TypeNameExamp#{ - metrics => ?METRICS(0, 0, 0, 0, 0, 0), +maybe_with_metrics_example(TypeNameExam, get) -> + TypeNameExam#{ + metrics => ?EMPTY_METRICS, node_metrics => [ #{ node => node(), - metrics => ?METRICS(0, 0, 0, 0, 0, 0) + metrics => ?EMPTY_METRICS } ] }; -maybe_with_metrics_example(TypeNameExamp, _) -> - TypeNameExamp. +maybe_with_metrics_example(TypeNameExam, _) -> + TypeNameExam. -info_example_basic(webhook, _) -> +info_example_basic(webhook) -> #{ enable => true, url => <<"http://localhost:9901/messages/${topic}">>, @@ -231,30 +200,70 @@ info_example_basic(webhook, _) -> ssl => #{enable => false}, local_topic => <<"emqx_webhook/#">>, method => post, - body => <<"${payload}">> + body => <<"${payload}">>, + resource_opts => #{ + worker_pool_size => 1, + health_check_interval => 15000, + auto_restart_interval => 15000, + query_mode => async, + async_inflight_window => 100, + enable_queue => false, + max_queue_bytes => 100 * 1024 * 1024 + } }; -info_example_basic(mqtt, ingress) -> +info_example_basic(mqtt) -> + (mqtt_main_example())#{ + egress => mqtt_egress_example(), + ingress => mqtt_ingress_example() + }. + +mqtt_main_example() -> #{ enable => true, - connector => <<"mqtt:my_mqtt_connector">>, - direction => ingress, - remote_topic => <<"aws/#">>, - remote_qos => 1, - local_topic => <<"from_aws/${topic}">>, - local_qos => <<"${qos}">>, - payload => <<"${payload}">>, - retain => <<"${retain}">> - }; -info_example_basic(mqtt, egress) -> + mode => cluster_shareload, + server => <<"127.0.0.1:1883">>, + proto_ver => <<"v4">>, + username => <<"foo">>, + password => <<"bar">>, + clean_start => true, + keepalive => <<"300s">>, + retry_interval => <<"15s">>, + max_inflight => 100, + resource_opts => #{ + health_check_interval => <<"15s">>, + auto_restart_interval => <<"60s">>, + query_mode => sync, + enable_queue => false, + max_queue_bytes => 100 * 1024 * 1024 + }, + ssl => #{ + enable => false + } + }. +mqtt_egress_example() -> #{ - enable => true, - connector => <<"mqtt:my_mqtt_connector">>, - direction => egress, - local_topic => <<"emqx/#">>, - remote_topic => <<"from_emqx/${topic}">>, - remote_qos => <<"${qos}">>, - payload => <<"${payload}">>, - retain => false + local => #{ + topic => <<"emqx/#">> + }, + remote => #{ + topic => <<"from_emqx/${topic}">>, + qos => <<"${qos}">>, + payload => <<"${payload}">>, + retain => false + } + }. +mqtt_ingress_example() -> + #{ + remote => #{ + topic => <<"aws/#">>, + qos => 1 + }, + local => #{ + topic => <<"from_aws/${topic}">>, + qos => <<"${qos}">>, + payload => <<"${payload}">>, + retain => <<"${retain}">> + } }. schema("/bridges") -> @@ -321,6 +330,7 @@ schema("/bridges/:id") -> responses => #{ 204 => <<"Bridge deleted">>, 400 => error_schema(['INVALID_ID'], "Update bridge failed"), + 403 => error_schema('FORBIDDEN_REQUEST', "Forbidden operation"), 503 => error_schema('SERVICE_UNAVAILABLE', "Service unavailable") } } @@ -414,13 +424,28 @@ schema("/nodes/:node/bridges/:id/operation/:operation") -> {404, error_msg('NOT_FOUND', <<"bridge not found">>)} end ); -'/bridges/:id'(delete, #{bindings := #{id := Id}}) -> +'/bridges/:id'(delete, #{bindings := #{id := Id}, query_string := Qs}) -> + AlsoDeleteActs = + case maps:get(<<"also_delete_dep_actions">>, Qs, <<"false">>) of + <<"true">> -> true; + true -> true; + _ -> false + end, ?TRY_PARSE_ID( Id, - case emqx_bridge:remove(BridgeType, BridgeName) of - {ok, _} -> {204}; - {error, timeout} -> {503, error_msg('SERVICE_UNAVAILABLE', <<"request timeout">>)}; - {error, Reason} -> {500, error_msg('INTERNAL_ERROR', Reason)} + case emqx_bridge:check_deps_and_remove(BridgeType, BridgeName, AlsoDeleteActs) of + {ok, _} -> + 204; + {error, {rules_deps_on_this_bridge, RuleIds}} -> + {403, + error_msg( + 'FORBIDDEN_REQUEST', + {<<"There're some rules dependent on this bridge">>, RuleIds} + )}; + {error, timeout} -> + {503, error_msg('SERVICE_UNAVAILABLE', <<"request timeout">>)}; + {error, Reason} -> + {500, error_msg('INTERNAL_ERROR', Reason)} end ). @@ -602,19 +627,36 @@ collect_metrics(Bridges) -> [maps:with([node, metrics], B) || B <- Bridges]. aggregate_metrics(AllMetrics) -> - InitMetrics = ?METRICS(0, 0, 0, 0, 0, 0), + InitMetrics = ?EMPTY_METRICS, lists:foldl( fun( - #{metrics := ?metrics(Match1, Succ1, Failed1, Rate1, Rate5m1, RateMax1)}, - ?metrics(Match0, Succ0, Failed0, Rate0, Rate5m0, RateMax0) + #{ + metrics := ?metrics( + M1, M2, M3, M4, M5, M6, M7, M8, M9, M10, M11, M12, M13, M14, M15, M16, M17 + ) + }, + ?metrics( + N1, N2, N3, N4, N5, N6, N7, N8, N9, N10, N11, N12, N13, N14, N15, N16, N17 + ) ) -> ?METRICS( - Match1 + Match0, - Succ1 + Succ0, - Failed1 + Failed0, - Rate1 + Rate0, - Rate5m1 + Rate5m0, - RateMax1 + RateMax0 + M1 + N1, + M2 + N2, + M3 + N3, + M4 + N4, + M5 + N5, + M6 + N6, + M7 + N7, + M8 + N8, + M9 + N9, + M10 + N10, + M11 + N11, + M12 + N12, + M13 + N13, + M14 + N14, + M15 + N15, + M16 + N16, + M17 + N17 ) end, InitMetrics, @@ -643,12 +685,45 @@ format_resp( }. format_metrics(#{ - counters := #{failed := Failed, exception := Ex, matched := Match, success := Succ}, + counters := #{ + 'batching' := Batched, + 'dropped' := Dropped, + 'dropped.other' := DroppedOther, + 'dropped.queue_full' := DroppedQueueFull, + 'dropped.queue_not_enabled' := DroppedQueueNotEnabled, + 'dropped.resource_not_found' := DroppedResourceNotFound, + 'dropped.resource_stopped' := DroppedResourceStopped, + 'matched' := Matched, + 'queuing' := Queued, + 'retried' := Retried, + 'failed' := SentFailed, + 'inflight' := SentInflight, + 'success' := SentSucc, + 'received' := Rcvd + }, rate := #{ matched := #{current := Rate, last5m := Rate5m, max := RateMax} } }) -> - ?METRICS(Match, Succ, Failed + Ex, Rate, Rate5m, RateMax). + ?METRICS( + Batched, + Dropped, + DroppedOther, + DroppedQueueFull, + DroppedQueueNotEnabled, + DroppedResourceNotFound, + DroppedResourceStopped, + Matched, + Queued, + Retried, + SentFailed, + SentInflight, + SentSucc, + Rate, + Rate5m, + RateMax, + Rcvd + ). fill_defaults(Type, RawConf) -> PackedConf = pack_bridge_conf(Type, RawConf), @@ -713,6 +788,17 @@ call_operation(Node, OperFunc, BridgeType, BridgeName) -> {200}; {error, timeout} -> {503, error_msg('SERVICE_UNAVAILABLE', <<"request timeout">>)}; + {error, {start_pool_failed, Name, Reason}} -> + {503, + error_msg( + 'SERVICE_UNAVAILABLE', + bin( + io_lib:format( + "failed to start ~p pool for reason ~p", + [Name, Reason] + ) + ) + )}; {error, Reason} -> {500, error_msg('INTERNAL_ERROR', Reason)} end; diff --git a/apps/emqx_bridge/src/emqx_bridge_app.erl b/apps/emqx_bridge/src/emqx_bridge_app.erl index 958bbf288..077d37d4a 100644 --- a/apps/emqx_bridge/src/emqx_bridge_app.erl +++ b/apps/emqx_bridge/src/emqx_bridge_app.erl @@ -29,6 +29,7 @@ start(_StartType, _StartArgs) -> {ok, Sup} = emqx_bridge_sup:start_link(), + ok = start_ee_apps(), ok = emqx_bridge:load(), ok = emqx_bridge:load_hook(), ok = emqx_config_handler:add_handler(?LEAF_NODE_HDLR_PATH, ?MODULE), @@ -41,6 +42,15 @@ stop(_State) -> ok = emqx_bridge:unload_hook(), ok. +-if(?EMQX_RELEASE_EDITION == ee). +start_ee_apps() -> + {ok, _} = application:ensure_all_started(emqx_ee_bridge), + ok. +-else. +start_ee_apps() -> + ok. +-endif. + %% NOTE: We depends on the `emqx_bridge:pre_config_update/3` to restart/stop the %% underlying resources. pre_config_update(_, {_Oper, _, _}, undefined) -> diff --git a/apps/emqx_bridge/src/emqx_bridge_mqtt_schema.erl b/apps/emqx_bridge/src/emqx_bridge_mqtt_schema.erl deleted file mode 100644 index 9fc06ec0e..000000000 --- a/apps/emqx_bridge/src/emqx_bridge_mqtt_schema.erl +++ /dev/null @@ -1,68 +0,0 @@ --module(emqx_bridge_mqtt_schema). - --include_lib("typerefl/include/types.hrl"). --include_lib("hocon/include/hoconsc.hrl"). - --import(hoconsc, [mk/2]). - --export([roots/0, fields/1, desc/1]). - -%%====================================================================================== -%% Hocon Schema Definitions -roots() -> []. - -fields("ingress") -> - [emqx_bridge_schema:direction_field(ingress, emqx_connector_mqtt_schema:ingress_desc())] ++ - emqx_bridge_schema:common_bridge_fields(mqtt_connector_ref()) ++ - proplists:delete(hookpoint, emqx_connector_mqtt_schema:fields("ingress")); -fields("egress") -> - [emqx_bridge_schema:direction_field(egress, emqx_connector_mqtt_schema:egress_desc())] ++ - emqx_bridge_schema:common_bridge_fields(mqtt_connector_ref()) ++ - emqx_connector_mqtt_schema:fields("egress"); -fields("post_ingress") -> - [ - type_field(), - name_field() - ] ++ proplists:delete(enable, fields("ingress")); -fields("post_egress") -> - [ - type_field(), - name_field() - ] ++ proplists:delete(enable, fields("egress")); -fields("put_ingress") -> - proplists:delete(enable, fields("ingress")); -fields("put_egress") -> - proplists:delete(enable, fields("egress")); -fields("get_ingress") -> - emqx_bridge_schema:metrics_status_fields() ++ fields("post_ingress"); -fields("get_egress") -> - emqx_bridge_schema:metrics_status_fields() ++ fields("post_egress"). - -desc(Rec) when Rec =:= "ingress"; Rec =:= "egress" -> - ?DESC("desc_rec"); -desc(_) -> - undefined. - -%%====================================================================================== -type_field() -> - {type, - mk( - mqtt, - #{ - required => true, - desc => ?DESC("desc_type") - } - )}. - -name_field() -> - {name, - mk( - binary(), - #{ - required => true, - desc => ?DESC("desc_name") - } - )}. - -mqtt_connector_ref() -> - ?R_REF(emqx_connector_mqtt_schema, "connector"). diff --git a/apps/emqx_bridge/src/emqx_bridge_resource.erl b/apps/emqx_bridge/src/emqx_bridge_resource.erl index c64d16d19..ad35485ed 100644 --- a/apps/emqx_bridge/src/emqx_bridge_resource.erl +++ b/apps/emqx_bridge/src/emqx_bridge_resource.erl @@ -34,18 +34,30 @@ create_dry_run/2, remove/1, remove/2, - remove/3, + remove/4, update/2, update/3, + update/4, stop/2, restart/2, reset_metrics/1 ]). +%% bi-directional bridge with producer/consumer or ingress/egress configs +-define(IS_BI_DIR_BRIDGE(TYPE), TYPE =:= <<"mqtt">>; TYPE =:= <<"kafka">>). + +-if(?EMQX_RELEASE_EDITION == ee). +bridge_to_resource_type(<<"mqtt">>) -> emqx_connector_mqtt; +bridge_to_resource_type(mqtt) -> emqx_connector_mqtt; +bridge_to_resource_type(<<"webhook">>) -> emqx_connector_http; +bridge_to_resource_type(webhook) -> emqx_connector_http; +bridge_to_resource_type(BridgeType) -> emqx_ee_bridge:resource_type(BridgeType). +-else. bridge_to_resource_type(<<"mqtt">>) -> emqx_connector_mqtt; bridge_to_resource_type(mqtt) -> emqx_connector_mqtt; bridge_to_resource_type(<<"webhook">>) -> emqx_connector_http; bridge_to_resource_type(webhook) -> emqx_connector_http. +-endif. resource_id(BridgeId) when is_binary(BridgeId) -> <<"bridge:", BridgeId/binary>>. @@ -63,14 +75,44 @@ bridge_id(BridgeType, BridgeName) -> parse_bridge_id(BridgeId) -> case string:split(bin(BridgeId), ":", all) of [Type, Name] -> - case emqx_misc:safe_to_existing_atom(Type, utf8) of - {ok, Type1} -> - {Type1, Name}; - _ -> - error({invalid_bridge_id, BridgeId}) - end; + {to_type_atom(Type), validate_name(Name)}; _ -> - error({invalid_bridge_id, BridgeId}) + invalid_bridge_id( + <<"should be of forst {type}:{name}, but got ", BridgeId/binary>> + ) + end. + +validate_name(Name0) -> + Name = unicode:characters_to_list(Name0, utf8), + case is_list(Name) andalso Name =/= [] of + true -> + case lists:all(fun is_id_char/1, Name) of + true -> + Name0; + false -> + invalid_bridge_id(<<"bad name: ", Name0/binary>>) + end; + false -> + invalid_bridge_id(<<"only 0-9a-zA-Z_-. is allowed in name: ", Name0/binary>>) + end. + +-spec invalid_bridge_id(binary()) -> no_return(). +invalid_bridge_id(Reason) -> throw({?FUNCTION_NAME, Reason}). + +is_id_char(C) when C >= $0 andalso C =< $9 -> true; +is_id_char(C) when C >= $a andalso C =< $z -> true; +is_id_char(C) when C >= $A andalso C =< $Z -> true; +is_id_char($_) -> true; +is_id_char($-) -> true; +is_id_char($.) -> true; +is_id_char(_) -> false. + +to_type_atom(Type) -> + try + erlang:binary_to_existing_atom(Type, utf8) + catch + _:_ -> + invalid_bridge_id(<<"unknown type: ", Type/binary>>) end. reset_metrics(ResourceId) -> @@ -88,7 +130,7 @@ create(BridgeId, Conf) -> create(BridgeType, BridgeName, Conf). create(Type, Name, Conf) -> - create(Type, Name, Conf, #{auto_retry_interval => 60000}). + create(Type, Name, Conf, #{}). create(Type, Name, Conf, Opts) -> ?SLOG(info, #{ @@ -101,7 +143,7 @@ create(Type, Name, Conf, Opts) -> resource_id(Type, Name), <<"emqx_bridge">>, bridge_to_resource_type(Type), - parse_confs(Type, Name, Conf), + parse_confs(bin(Type), Name, Conf), Opts ), maybe_disable_bridge(Type, Name, Conf). @@ -111,6 +153,9 @@ update(BridgeId, {OldConf, Conf}) -> update(BridgeType, BridgeName, {OldConf, Conf}). update(Type, Name, {OldConf, Conf}) -> + update(Type, Name, {OldConf, Conf}, #{}). + +update(Type, Name, {OldConf, Conf}, Opts) -> %% TODO: sometimes its not necessary to restart the bridge connection. %% %% - if the connection related configs like `servers` is updated, we should restart/start @@ -127,7 +172,7 @@ update(Type, Name, {OldConf, Conf}) -> name => Name, config => Conf }), - case recreate(Type, Name, Conf) of + case recreate(Type, Name, Conf, Opts) of {ok, _} -> maybe_disable_bridge(Type, Name, Conf); {error, not_found} -> @@ -137,7 +182,7 @@ update(Type, Name, {OldConf, Conf}) -> name => Name, config => Conf }), - create(Type, Name, Conf); + create(Type, Name, Conf, Opts); {error, Reason} -> {error, {update_bridge_failed, Reason}} end; @@ -158,41 +203,38 @@ recreate(Type, Name) -> recreate(Type, Name, emqx:get_config([bridges, Type, Name])). recreate(Type, Name, Conf) -> + recreate(Type, Name, Conf, #{}). + +recreate(Type, Name, Conf, Opts) -> emqx_resource:recreate_local( resource_id(Type, Name), bridge_to_resource_type(Type), - parse_confs(Type, Name, Conf), - #{auto_retry_interval => 60000} + parse_confs(bin(Type), Name, Conf), + Opts ). create_dry_run(Type, Conf) -> - Conf0 = fill_dry_run_conf(Conf), - case emqx_resource:check_config(bridge_to_resource_type(Type), Conf0) of - {ok, Conf1} -> - TmpPath = iolist_to_binary(["bridges-create-dry-run:", emqx_misc:gen_id(8)]), - case emqx_connector_ssl:convert_certs(TmpPath, Conf1) of - {error, Reason} -> - {error, Reason}; - {ok, ConfNew} -> - Res = emqx_resource:create_dry_run_local( - bridge_to_resource_type(Type), ConfNew - ), - _ = maybe_clear_certs(TmpPath, ConfNew), - Res - end; - {error, _} = Error -> - Error + TmpPath = iolist_to_binary(["bridges-create-dry-run:", emqx_misc:gen_id(8)]), + case emqx_connector_ssl:convert_certs(TmpPath, Conf) of + {error, Reason} -> + {error, Reason}; + {ok, ConfNew} -> + Res = emqx_resource:create_dry_run_local( + bridge_to_resource_type(Type), ConfNew + ), + _ = maybe_clear_certs(TmpPath, ConfNew), + Res end. remove(BridgeId) -> {BridgeType, BridgeName} = parse_bridge_id(BridgeId), - remove(BridgeType, BridgeName, #{}). + remove(BridgeType, BridgeName, #{}, #{}). remove(Type, Name) -> - remove(Type, Name, undefined). + remove(Type, Name, #{}, #{}). %% just for perform_bridge_changes/1 -remove(Type, Name, _Conf) -> +remove(Type, Name, _Conf, _Opts) -> ?SLOG(info, #{msg => "remove_bridge", type => Type, name => Name}), case emqx_resource:remove_local(resource_id(Type, Name)) of ok -> ok; @@ -206,19 +248,6 @@ maybe_disable_bridge(Type, Name, Conf) -> true -> ok end. -fill_dry_run_conf(Conf) -> - Conf#{ - <<"egress">> => - #{ - <<"remote_topic">> => <<"t">>, - <<"remote_qos">> => 0, - <<"retain">> => true, - <<"payload">> => <<"val">> - }, - <<"ingress">> => - #{<<"remote_topic">> => <<"t">>} - }. - maybe_clear_certs(TmpPath, #{ssl := SslConf} = Conf) -> %% don't remove the cert files if they are in use case is_tmp_path_conf(TmpPath, SslConf) of @@ -238,8 +267,9 @@ is_tmp_path_conf(_TmpPath, _Conf) -> is_tmp_path(TmpPath, File) -> string:str(str(File), str(TmpPath)) > 0. +%% convert bridge configs to what the connector modules want parse_confs( - webhook, + <<"webhook">>, _Name, #{ url := Url, @@ -264,42 +294,14 @@ parse_confs( max_retries => Retry } }; -parse_confs(Type, Name, #{connector := ConnId, direction := Direction} = Conf) when - is_binary(ConnId) --> - case emqx_connector:parse_connector_id(ConnId) of - {Type, ConnName} -> - ConnectorConfs = emqx:get_config([connectors, Type, ConnName]), - make_resource_confs( - Direction, - ConnectorConfs, - maps:without([connector, direction], Conf), - Type, - Name - ); - {_ConnType, _ConnName} -> - error({cannot_use_connector_with_different_type, ConnId}) - end; -parse_confs(Type, Name, #{connector := ConnectorConfs, direction := Direction} = Conf) when - is_map(ConnectorConfs) --> - make_resource_confs( - Direction, - ConnectorConfs, - maps:without([connector, direction], Conf), - Type, - Name - ). - -make_resource_confs(ingress, ConnectorConfs, BridgeConf, Type, Name) -> +parse_confs(Type, Name, Conf) when ?IS_BI_DIR_BRIDGE(Type) -> + %% For some drivers that can be used as data-sources, we need to provide a + %% hookpoint. The underlying driver will run `emqx_hooks:run/3` when it + %% receives a message from the external database. BName = bridge_id(Type, Name), - ConnectorConfs#{ - ingress => BridgeConf#{hookpoint => <<"$bridges/", BName/binary>>} - }; -make_resource_confs(egress, ConnectorConfs, BridgeConf, _Type, _Name) -> - ConnectorConfs#{ - egress => BridgeConf - }. + Conf#{hookpoint => <<"$bridges/", BName/binary>>, bridge_name => Name}; +parse_confs(_Type, _Name, Conf) -> + Conf. parse_url(Url) -> case string:split(Url, "//", leading) of diff --git a/apps/emqx_bridge/src/emqx_bridge_schema.erl b/apps/emqx_bridge/src/emqx_bridge_schema.erl deleted file mode 100644 index e4d0e3d2d..000000000 --- a/apps/emqx_bridge/src/emqx_bridge_schema.erl +++ /dev/null @@ -1,173 +0,0 @@ --module(emqx_bridge_schema). - --include_lib("typerefl/include/types.hrl"). --include_lib("hocon/include/hoconsc.hrl"). - --import(hoconsc, [mk/2, ref/2]). - --export([roots/0, fields/1, desc/1, namespace/0]). - --export([ - get_response/0, - put_request/0, - post_request/0 -]). - --export([ - common_bridge_fields/1, - metrics_status_fields/0, - direction_field/2 -]). - -%%====================================================================================== -%% Hocon Schema Definitions - --define(CONN_TYPES, [mqtt]). - -%%====================================================================================== -%% For HTTP APIs -get_response() -> - http_schema("get"). - -put_request() -> - http_schema("put"). - -post_request() -> - http_schema("post"). - -http_schema(Method) -> - Schemas = lists:flatmap( - fun(Type) -> - [ - ref(schema_mod(Type), Method ++ "_ingress"), - ref(schema_mod(Type), Method ++ "_egress") - ] - end, - ?CONN_TYPES - ), - hoconsc:union([ - ref(emqx_bridge_webhook_schema, Method) - | Schemas - ]). - -common_bridge_fields(ConnectorRef) -> - [ - {enable, - mk( - boolean(), - #{ - desc => ?DESC("desc_enable"), - default => true - } - )}, - {connector, - mk( - hoconsc:union([binary(), ConnectorRef]), - #{ - required => true, - example => <<"mqtt:my_mqtt_connector">>, - desc => ?DESC("desc_connector") - } - )} - ]. - -metrics_status_fields() -> - [ - {"metrics", mk(ref(?MODULE, "metrics"), #{desc => ?DESC("desc_metrics")})}, - {"node_metrics", - mk( - hoconsc:array(ref(?MODULE, "node_metrics")), - #{desc => ?DESC("desc_node_metrics")} - )}, - {"status", mk(status(), #{desc => ?DESC("desc_status")})}, - {"node_status", - mk( - hoconsc:array(ref(?MODULE, "node_status")), - #{desc => ?DESC("desc_node_status")} - )} - ]. - -direction_field(Dir, Desc) -> - {direction, - mk( - Dir, - #{ - required => true, - default => egress, - desc => "The direction of the bridge. Can be one of 'ingress' or 'egress'.
" ++ - Desc - } - )}. - -%%====================================================================================== -%% For config files - -namespace() -> "bridge". - -roots() -> [bridges]. - -fields(bridges) -> - [ - {webhook, - mk( - hoconsc:map(name, ref(emqx_bridge_webhook_schema, "config")), - #{desc => ?DESC("bridges_webhook")} - )} - ] ++ - [ - {T, - mk( - hoconsc:map( - name, - hoconsc:union([ - ref(schema_mod(T), "ingress"), - ref(schema_mod(T), "egress") - ]) - ), - #{desc => ?DESC("bridges_name")} - )} - || T <- ?CONN_TYPES - ]; -fields("metrics") -> - [ - {"matched", mk(integer(), #{desc => ?DESC("metric_matched")})}, - {"success", mk(integer(), #{desc => ?DESC("metric_success")})}, - {"failed", mk(integer(), #{desc => ?DESC("metric_failed")})}, - {"rate", mk(float(), #{desc => ?DESC("metric_rate")})}, - {"rate_max", mk(float(), #{desc => ?DESC("metric_rate_max")})}, - {"rate_last5m", - mk( - float(), - #{desc => ?DESC("metric_rate_last5m")} - )} - ]; -fields("node_metrics") -> - [ - node_name(), - {"metrics", mk(ref(?MODULE, "metrics"), #{})} - ]; -fields("node_status") -> - [ - node_name(), - {"status", mk(status(), #{})} - ]. - -desc(bridges) -> - ?DESC("desc_bridges"); -desc("metrics") -> - ?DESC("desc_metrics"); -desc("node_metrics") -> - ?DESC("desc_node_metrics"); -desc("node_status") -> - ?DESC("desc_node_status"); -desc(_) -> - undefined. - -status() -> - hoconsc:enum([connected, disconnected, connecting]). - -node_name() -> - {"node", mk(binary(), #{desc => ?DESC("desc_node_name"), example => "emqx@127.0.0.1"})}. - -schema_mod(Type) -> - list_to_atom(lists:concat(["emqx_bridge_", Type, "_schema"])). diff --git a/apps/emqx_bridge/src/schema/emqx_bridge_mqtt_config.erl b/apps/emqx_bridge/src/schema/emqx_bridge_mqtt_config.erl new file mode 100644 index 000000000..997337c9d --- /dev/null +++ b/apps/emqx_bridge/src/schema/emqx_bridge_mqtt_config.erl @@ -0,0 +1,118 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2020-2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +%% @doc This module was created to convert old version (from v5.0.0 to v5.0.11) +%% mqtt connector configs to newer version (developed for enterprise edition). +-module(emqx_bridge_mqtt_config). + +-export([ + upgrade_pre_ee/1, + maybe_upgrade/1 +]). + +upgrade_pre_ee(undefined) -> + undefined; +upgrade_pre_ee(Conf0) when is_map(Conf0) -> + maps:from_list(upgrade_pre_ee(maps:to_list(Conf0))); +upgrade_pre_ee([]) -> + []; +upgrade_pre_ee([{Name, Config} | Bridges]) -> + [{Name, maybe_upgrade(Config)} | upgrade_pre_ee(Bridges)]. + +maybe_upgrade(#{<<"connector">> := _} = Config0) -> + Config1 = up(Config0), + Config = lists:map(fun binary_key/1, Config1), + maps:from_list(Config); +maybe_upgrade(NewVersion) -> + NewVersion. + +binary_key({K, V}) -> + {atom_to_binary(K, utf8), V}. + +up(#{<<"connector">> := Connector} = Config) -> + Cn = fun(Key0, Default) -> + Key = atom_to_binary(Key0, utf8), + {Key0, maps:get(Key, Connector, Default)} + end, + Direction = + case maps:get(<<"direction">>, Config) of + <<"egress">> -> + {egress, egress(Config)}; + <<"ingress">> -> + {ingress, ingress(Config)} + end, + Enable = maps:get(<<"enable">>, Config, true), + [ + Cn(bridge_mode, false), + Cn(username, <<>>), + Cn(password, <<>>), + Cn(clean_start, true), + Cn(keepalive, <<"60s">>), + Cn(mode, <<"cluster_shareload">>), + Cn(proto_ver, <<"v4">>), + Cn(server, undefined), + Cn(retry_interval, <<"15s">>), + Cn(reconnect_interval, <<"15s">>), + Cn(ssl, default_ssl()), + {enable, Enable}, + {resource_opts, default_resource_opts()}, + Direction + ]. + +default_ssl() -> + #{ + <<"enable">> => false, + <<"verify">> => <<"verify_peer">> + }. + +default_resource_opts() -> + #{ + <<"async_inflight_window">> => 100, + <<"auto_restart_interval">> => <<"60s">>, + <<"enable_queue">> => false, + <<"health_check_interval">> => <<"15s">>, + <<"max_queue_bytes">> => <<"1GB">>, + <<"query_mode">> => <<"sync">>, + <<"worker_pool_size">> => 16 + }. + +egress(Config) -> + % <<"local">> % the old version has no 'local' config for egress + #{ + <<"remote">> => + #{ + <<"topic">> => maps:get(<<"remote_topic">>, Config), + <<"qos">> => maps:get(<<"remote_qos">>, Config), + <<"retain">> => maps:get(<<"retain">>, Config), + <<"payload">> => maps:get(<<"payload">>, Config) + } + }. + +ingress(Config) -> + #{ + <<"remote">> => + #{ + <<"qos">> => maps:get(<<"remote_qos">>, Config), + <<"topic">> => maps:get(<<"remote_topic">>, Config) + }, + <<"local">> => + #{ + <<"payload">> => maps:get(<<"payload">>, Config), + <<"qos">> => maps:get(<<"local_qos">>, Config), + <<"retain">> => maps:get(<<"retain">>, Config, false) + %% <<"topic">> % th old version has no local topic for ingress + } + }. diff --git a/apps/emqx_bridge/src/schema/emqx_bridge_mqtt_schema.erl b/apps/emqx_bridge/src/schema/emqx_bridge_mqtt_schema.erl new file mode 100644 index 000000000..6d2baaaa8 --- /dev/null +++ b/apps/emqx_bridge/src/schema/emqx_bridge_mqtt_schema.erl @@ -0,0 +1,57 @@ +-module(emqx_bridge_mqtt_schema). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-import(hoconsc, [mk/2, ref/2]). + +-export([roots/0, fields/1, desc/1, namespace/0]). + +%%====================================================================================== +%% Hocon Schema Definitions +namespace() -> "bridge_mqtt". + +roots() -> []. + +fields("config") -> + %% enable + emqx_bridge_schema:common_bridge_fields() ++ + [ + {resource_opts, + mk( + ref(?MODULE, "creation_opts"), + #{ + required => false, + default => #{}, + desc => ?DESC(emqx_resource_schema, <<"resource_opts">>) + } + )} + ] ++ + emqx_connector_mqtt_schema:fields("config"); +fields("creation_opts") -> + Opts = emqx_resource_schema:fields("creation_opts"), + [O || {Field, _} = O <- Opts, not is_hidden_opts(Field)]; +fields("post") -> + [type_field(), name_field() | fields("config")]; +fields("put") -> + fields("config"); +fields("get") -> + emqx_bridge_schema:metrics_status_fields() ++ fields("config"). + +desc("config") -> + ?DESC("config"); +desc("creation_opts" = Name) -> + emqx_resource_schema:desc(Name); +desc(_) -> + undefined. + +%%====================================================================================== +%% internal +is_hidden_opts(Field) -> + lists:member(Field, [enable_batch, batch_size, batch_time]). + +type_field() -> + {type, mk(mqtt, #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. diff --git a/apps/emqx_bridge/src/schema/emqx_bridge_schema.erl b/apps/emqx_bridge/src/schema/emqx_bridge_schema.erl new file mode 100644 index 000000000..756a8347d --- /dev/null +++ b/apps/emqx_bridge/src/schema/emqx_bridge_schema.erl @@ -0,0 +1,181 @@ +-module(emqx_bridge_schema). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-import(hoconsc, [mk/2, ref/2]). + +-export([roots/0, fields/1, desc/1, namespace/0]). + +-export([ + get_response/0, + put_request/0, + post_request/0 +]). + +-export([ + common_bridge_fields/0, + metrics_status_fields/0 +]). + +%%====================================================================================== +%% Hocon Schema Definitions + +%%====================================================================================== +%% For HTTP APIs +get_response() -> + api_schema("get"). + +put_request() -> + api_schema("put"). + +post_request() -> + api_schema("post"). + +api_schema(Method) -> + Broker = [ + ref(Mod, Method) + || Mod <- [emqx_bridge_webhook_schema, emqx_bridge_mqtt_schema] + ], + EE = ee_api_schemas(Method), + hoconsc:union(Broker ++ EE). + +ee_api_schemas(Method) -> + %% must ensure the app is loaded before checking if fn is defined. + ensure_loaded(emqx_ee_bridge, emqx_ee_bridge), + case erlang:function_exported(emqx_ee_bridge, api_schemas, 1) of + true -> emqx_ee_bridge:api_schemas(Method); + false -> [] + end. + +ee_fields_bridges() -> + %% must ensure the app is loaded before checking if fn is defined. + ensure_loaded(emqx_ee_bridge, emqx_ee_bridge), + case erlang:function_exported(emqx_ee_bridge, fields, 1) of + true -> emqx_ee_bridge:fields(bridges); + false -> [] + end. + +common_bridge_fields() -> + [ + {enable, + mk( + boolean(), + #{ + desc => ?DESC("desc_enable"), + default => true + } + )} + ]. + +metrics_status_fields() -> + [ + {"metrics", mk(ref(?MODULE, "metrics"), #{desc => ?DESC("desc_metrics")})}, + {"node_metrics", + mk( + hoconsc:array(ref(?MODULE, "node_metrics")), + #{desc => ?DESC("desc_node_metrics")} + )}, + {"status", mk(status(), #{desc => ?DESC("desc_status")})}, + {"node_status", + mk( + hoconsc:array(ref(?MODULE, "node_status")), + #{desc => ?DESC("desc_node_status")} + )} + ]. + +%%====================================================================================== +%% For config files + +namespace() -> "bridge". + +roots() -> [bridges]. + +fields(bridges) -> + [ + {webhook, + mk( + hoconsc:map(name, ref(emqx_bridge_webhook_schema, "config")), + #{ + desc => ?DESC("bridges_webhook"), + required => false + } + )}, + {mqtt, + mk( + hoconsc:map(name, ref(emqx_bridge_mqtt_schema, "config")), + #{ + desc => ?DESC("bridges_mqtt"), + required => false, + converter => fun emqx_bridge_mqtt_config:upgrade_pre_ee/1 + } + )} + ] ++ ee_fields_bridges(); +fields("metrics") -> + [ + {"batching", mk(integer(), #{desc => ?DESC("metric_batching")})}, + {"dropped", mk(integer(), #{desc => ?DESC("metric_dropped")})}, + {"dropped.other", mk(integer(), #{desc => ?DESC("metric_dropped_other")})}, + {"dropped.queue_full", mk(integer(), #{desc => ?DESC("metric_dropped_queue_full")})}, + {"dropped.queue_not_enabled", + mk(integer(), #{desc => ?DESC("metric_dropped_queue_not_enabled")})}, + {"dropped.resource_not_found", + mk(integer(), #{desc => ?DESC("metric_dropped_resource_not_found")})}, + {"dropped.resource_stopped", + mk(integer(), #{desc => ?DESC("metric_dropped_resource_stopped")})}, + {"matched", mk(integer(), #{desc => ?DESC("metric_matched")})}, + {"queuing", mk(integer(), #{desc => ?DESC("metric_queuing")})}, + {"retried", mk(integer(), #{desc => ?DESC("metric_retried")})}, + {"failed", mk(integer(), #{desc => ?DESC("metric_sent_failed")})}, + {"inflight", mk(integer(), #{desc => ?DESC("metric_sent_inflight")})}, + {"success", mk(integer(), #{desc => ?DESC("metric_sent_success")})}, + {"rate", mk(float(), #{desc => ?DESC("metric_rate")})}, + {"rate_max", mk(float(), #{desc => ?DESC("metric_rate_max")})}, + {"rate_last5m", + mk( + float(), + #{desc => ?DESC("metric_rate_last5m")} + )}, + {"received", mk(float(), #{desc => ?DESC("metric_received")})} + ]; +fields("node_metrics") -> + [ + node_name(), + {"metrics", mk(ref(?MODULE, "metrics"), #{})} + ]; +fields("node_status") -> + [ + node_name(), + {"status", mk(status(), #{})} + ]. + +desc(bridges) -> + ?DESC("desc_bridges"); +desc("metrics") -> + ?DESC("desc_metrics"); +desc("node_metrics") -> + ?DESC("desc_node_metrics"); +desc("node_status") -> + ?DESC("desc_node_status"); +desc(_) -> + undefined. + +status() -> + hoconsc:enum([connected, disconnected, connecting]). + +node_name() -> + {"node", mk(binary(), #{desc => ?DESC("desc_node_name"), example => "emqx@127.0.0.1"})}. + +%%================================================================================================= +%% Internal fns +%%================================================================================================= + +ensure_loaded(App, Mod) -> + try + _ = application:load(App), + _ = Mod:module_info(), + ok + catch + _:_ -> + ok + end. diff --git a/apps/emqx_bridge/src/emqx_bridge_webhook_schema.erl b/apps/emqx_bridge/src/schema/emqx_bridge_webhook_schema.erl similarity index 79% rename from apps/emqx_bridge/src/emqx_bridge_webhook_schema.erl rename to apps/emqx_bridge/src/schema/emqx_bridge_webhook_schema.erl index 02dd0a76d..d270fc91e 100644 --- a/apps/emqx_bridge/src/emqx_bridge_webhook_schema.erl +++ b/apps/emqx_bridge/src/schema/emqx_bridge_webhook_schema.erl @@ -3,13 +3,13 @@ -include_lib("typerefl/include/types.hrl"). -include_lib("hocon/include/hoconsc.hrl"). --import(hoconsc, [mk/2, enum/1]). +-import(hoconsc, [mk/2, enum/1, ref/2]). -export([roots/0, fields/1, namespace/0, desc/1]). %%====================================================================================== %% Hocon Schema Definitions -namespace() -> "bridge". +namespace() -> "bridge_webhook". roots() -> []. @@ -23,10 +23,19 @@ fields("post") -> fields("put") -> fields("config"); fields("get") -> - emqx_bridge_schema:metrics_status_fields() ++ fields("post"). + emqx_bridge_schema:metrics_status_fields() ++ fields("post"); +fields("creation_opts") -> + lists:filter( + fun({K, _V}) -> + not lists:member(K, unsupported_opts()) + end, + emqx_resource_schema:fields("creation_opts") + ). desc("config") -> ?DESC("desc_config"); +desc("creation_opts") -> + ?DESC(emqx_resource_schema, "creation_opts"); desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> ["Configuration for WebHook using `", string:to_upper(Method), "` method."]; desc(_) -> @@ -41,16 +50,8 @@ basic_config() -> desc => ?DESC("config_enable"), default => true } - )}, - {direction, - mk( - egress, - #{ - desc => ?DESC("config_direction"), - default => egress - } )} - ] ++ + ] ++ webhook_creation_opts() ++ proplists:delete( max_retries, proplists:delete(base_url, emqx_connector_http:fields(config)) ). @@ -68,7 +69,10 @@ request_config() -> {local_topic, mk( binary(), - #{desc => ?DESC("config_local_topic")} + #{ + desc => ?DESC("config_local_topic"), + required => false + } )}, {method, mk( @@ -118,6 +122,26 @@ request_config() -> )} ]. +webhook_creation_opts() -> + [ + {resource_opts, + mk( + ref(?MODULE, "creation_opts"), + #{ + required => false, + default => #{}, + desc => ?DESC(emqx_resource_schema, <<"resource_opts">>) + } + )} + ]. + +unsupported_opts() -> + [ + enable_batch, + batch_size, + batch_time + ]. + %%====================================================================================== type_field() -> diff --git a/apps/emqx_bridge/test/emqx_bridge_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_SUITE.erl index dca14b829..99d5af447 100644 --- a/apps/emqx_bridge/test/emqx_bridge_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_SUITE.erl @@ -44,6 +44,9 @@ init_per_testcase(t_get_basic_usage_info_1, Config) -> {ok, _} = emqx_cluster_rpc:start_link(node(), emqx_cluster_rpc, 1000), setup_fake_telemetry_data(), Config; +init_per_testcase(t_update_ssl_conf, Config) -> + Path = [bridges, <<"mqtt">>, <<"ssl_update_test">>], + [{config_path, Path} | Config]; init_per_testcase(_TestCase, Config) -> {ok, _} = emqx_cluster_rpc:start_link(node(), emqx_cluster_rpc, 1000), Config. @@ -63,6 +66,9 @@ end_per_testcase(t_get_basic_usage_info_1, _Config) -> ok = emqx_config:put([bridges], #{}), ok = emqx_config:put_raw([bridges], #{}), ok; +end_per_testcase(t_update_ssl_conf, Config) -> + Path = proplists:get_value(config_path, Config), + emqx:remove_config(Path); end_per_testcase(_TestCase, _Config) -> ok. @@ -89,36 +95,29 @@ t_get_basic_usage_info_1(_Config) -> ). setup_fake_telemetry_data() -> - ConnectorConf = - #{ - <<"connectors">> => - #{ - <<"mqtt">> => #{ - <<"my_mqtt_connector">> => - #{server => "127.0.0.1:1883"}, - <<"my_mqtt_connector2">> => - #{server => "127.0.0.1:1884"} - } - } - }, MQTTConfig1 = #{ - connector => <<"mqtt:my_mqtt_connector">>, + server => "127.0.0.1:1883", enable => true, - direction => ingress, - remote_topic => <<"aws/#">>, - remote_qos => 1 + ingress => #{ + remote => #{ + topic => <<"aws/#">>, + qos => 1 + } + } }, MQTTConfig2 = #{ - connector => <<"mqtt:my_mqtt_connector2">>, + server => "127.0.0.1:1884", enable => true, - direction => ingress, - remote_topic => <<"$bridges/mqtt:some_bridge_in">>, - remote_qos => 1 + ingress => #{ + remote => #{ + topic => <<"$bridges/mqtt:some_bridge_in">>, + qos => 1 + } + } }, HTTPConfig = #{ url => <<"http://localhost:9901/messages/${topic}">>, enable => true, - direction => egress, local_topic => "emqx_webhook/#", method => post, body => <<"${payload}">>, @@ -143,7 +142,6 @@ setup_fake_telemetry_data() -> } }, Opts = #{raw_with_default => true}, - ok = emqx_common_test_helpers:load_config(emqx_connector_schema, ConnectorConf, Opts), ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, Conf, Opts), ok = snabbkaffe:start_trace(), @@ -157,82 +155,30 @@ setup_fake_telemetry_data() -> ok = snabbkaffe:stop(), ok. -t_update_ssl_conf(_) -> - Path = [bridges, <<"mqtt">>, <<"ssl_update_test">>], +t_update_ssl_conf(Config) -> + Path = proplists:get_value(config_path, Config), EnableSSLConf = #{ - <<"connector">> => + <<"bridge_mode">> => false, + <<"clean_start">> => true, + <<"keepalive">> => <<"60s">>, + <<"mode">> => <<"cluster_shareload">>, + <<"proto_ver">> => <<"v4">>, + <<"server">> => <<"127.0.0.1:1883">>, + <<"ssl">> => #{ - <<"bridge_mode">> => false, - <<"clean_start">> => true, - <<"keepalive">> => <<"60s">>, - <<"mode">> => <<"cluster_shareload">>, - <<"proto_ver">> => <<"v4">>, - <<"server">> => <<"127.0.0.1:1883">>, - <<"ssl">> => - #{ - <<"cacertfile">> => cert_file("cafile"), - <<"certfile">> => cert_file("certfile"), - <<"enable">> => true, - <<"keyfile">> => cert_file("keyfile"), - <<"verify">> => <<"verify_peer">> - } - }, - <<"direction">> => <<"ingress">>, - <<"local_qos">> => 1, - <<"payload">> => <<"${payload}">>, - <<"remote_qos">> => 1, - <<"remote_topic">> => <<"t/#">>, - <<"retain">> => false + <<"cacertfile">> => cert_file("cafile"), + <<"certfile">> => cert_file("certfile"), + <<"enable">> => true, + <<"keyfile">> => cert_file("keyfile"), + <<"verify">> => <<"verify_peer">> + } }, - - emqx:update_config(Path, EnableSSLConf), - ?assertMatch({ok, [_, _, _]}, list_pem_dir(Path)), - NoSSLConf = #{ - <<"connector">> => - #{ - <<"bridge_mode">> => false, - <<"clean_start">> => true, - <<"keepalive">> => <<"60s">>, - <<"max_inflight">> => 32, - <<"mode">> => <<"cluster_shareload">>, - <<"password">> => <<>>, - <<"proto_ver">> => <<"v4">>, - <<"reconnect_interval">> => <<"15s">>, - <<"replayq">> => - #{<<"offload">> => false, <<"seg_bytes">> => <<"100MB">>}, - <<"retry_interval">> => <<"15s">>, - <<"server">> => <<"127.0.0.1:1883">>, - <<"ssl">> => - #{ - <<"ciphers">> => <<>>, - <<"depth">> => 10, - <<"enable">> => false, - <<"reuse_sessions">> => true, - <<"secure_renegotiate">> => true, - <<"user_lookup_fun">> => <<"emqx_tls_psk:lookup">>, - <<"verify">> => <<"verify_peer">>, - <<"versions">> => - [ - <<"tlsv1.3">>, - <<"tlsv1.2">>, - <<"tlsv1.1">>, - <<"tlsv1">> - ] - }, - <<"username">> => <<>> - }, - <<"direction">> => <<"ingress">>, - <<"enable">> => true, - <<"local_qos">> => 1, - <<"payload">> => <<"${payload}">>, - <<"remote_qos">> => 1, - <<"remote_topic">> => <<"t/#">>, - <<"retain">> => false - }, - - emqx:update_config(Path, NoSSLConf), + {ok, _} = emqx:update_config(Path, EnableSSLConf), + {ok, Certs} = list_pem_dir(Path), + ?assertMatch([_, _, _], Certs), + NoSSLConf = EnableSSLConf#{<<"ssl">> := #{<<"enable">> => false}}, + {ok, _} = emqx:update_config(Path, NoSSLConf), ?assertMatch({error, not_dir}, list_pem_dir(Path)), - emqx:remove_config(Path), ok. list_pem_dir(Path) -> diff --git a/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl index c048a13fe..c0a58abcc 100644 --- a/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl @@ -24,7 +24,7 @@ -include_lib("common_test/include/ct.hrl"). -define(CONF_DEFAULT, <<"bridges: {}">>). -define(BRIDGE_TYPE, <<"webhook">>). --define(BRIDGE_NAME, <<"test_bridge">>). +-define(BRIDGE_NAME, (atom_to_binary(?FUNCTION_NAME))). -define(URL(PORT, PATH), list_to_binary( io_lib:format( @@ -61,14 +61,18 @@ init_per_suite(Config) -> _ = application:stop(emqx_resource), _ = application:stop(emqx_connector), ok = emqx_common_test_helpers:start_apps( - [emqx_bridge, emqx_dashboard], + [emqx_rule_engine, emqx_bridge, emqx_dashboard], fun set_special_configs/1 ), + ok = emqx_common_test_helpers:load_config( + emqx_rule_engine_schema, + <<"rule_engine {rules {}}">> + ), ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, ?CONF_DEFAULT), Config. end_per_suite(_Config) -> - emqx_common_test_helpers:stop_apps([emqx_bridge, emqx_dashboard]), + emqx_common_test_helpers:stop_apps([emqx_rule_engine, emqx_bridge, emqx_dashboard]), ok. set_special_configs(emqx_dashboard) -> @@ -78,8 +82,12 @@ set_special_configs(_) -> init_per_testcase(_, Config) -> {ok, _} = emqx_cluster_rpc:start_link(node(), emqx_cluster_rpc, 1000), - Config. -end_per_testcase(_, _Config) -> + {Port, Sock, Acceptor} = start_http_server(fun handle_fun_200_ok/2), + [{port, Port}, {sock, Sock}, {acceptor, Acceptor} | Config]. +end_per_testcase(_, Config) -> + Sock = ?config(sock, Config), + Acceptor = ?config(acceptor, Config), + stop_http_server(Sock, Acceptor), clear_resources(), ok. @@ -95,31 +103,39 @@ clear_resources() -> %% HTTP server for testing %%------------------------------------------------------------------------------ start_http_server(HandleFun) -> + process_flag(trap_exit, true), Parent = self(), - spawn_link(fun() -> - {Port, Sock} = listen_on_random_port(), - Parent ! {port, Port}, - loop(Sock, HandleFun, Parent) + {Port, Sock} = listen_on_random_port(), + Acceptor = spawn_link(fun() -> + accept_loop(Sock, HandleFun, Parent) end), - receive - {port, Port} -> Port - after 2000 -> error({timeout, start_http_server}) - end. + timer:sleep(100), + {Port, Sock, Acceptor}. + +stop_http_server(Sock, Acceptor) -> + exit(Acceptor, kill), + gen_tcp:close(Sock). listen_on_random_port() -> Min = 1024, Max = 65000, + rand:seed(exsplus, erlang:timestamp()), Port = rand:uniform(Max - Min) + Min, - case gen_tcp:listen(Port, [{active, false}, {reuseaddr, true}, binary]) of + case + gen_tcp:listen(Port, [ + binary, {active, false}, {packet, raw}, {reuseaddr, true}, {backlog, 1000} + ]) + of {ok, Sock} -> {Port, Sock}; {error, eaddrinuse} -> listen_on_random_port() end. -loop(Sock, HandleFun, Parent) -> +accept_loop(Sock, HandleFun, Parent) -> + process_flag(trap_exit, true), {ok, Conn} = gen_tcp:accept(Sock), - Handler = spawn(fun() -> HandleFun(Conn, Parent) end), + Handler = spawn_link(fun() -> HandleFun(Conn, Parent) end), gen_tcp:controlling_process(Conn, Handler), - loop(Sock, HandleFun, Parent). + accept_loop(Sock, HandleFun, Parent). make_response(CodeStr, Str) -> B = iolist_to_binary(Str), @@ -138,7 +154,9 @@ handle_fun_200_ok(Conn, Parent) -> Parent ! {http_server, received, Req}, gen_tcp:send(Conn, make_response("200 OK", "Request OK")), handle_fun_200_ok(Conn, Parent); - {error, closed} -> + {error, Reason} -> + ct:pal("the http handler recv error: ~p", [Reason]), + timer:sleep(100), gen_tcp:close(Conn) end. @@ -153,24 +171,25 @@ parse_http_request(ReqStr0) -> %% Testcases %%------------------------------------------------------------------------------ -t_http_crud_apis(_) -> - Port = start_http_server(fun handle_fun_200_ok/2), +t_http_crud_apis(Config) -> + Port = ?config(port, Config), %% assert we there's no bridges at first {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), %% then we add a webhook bridge, using POST %% POST /bridges/ will create a bridge URL1 = ?URL(Port, "path1"), + Name = ?BRIDGE_NAME, {ok, 201, Bridge} = request( post, uri(["bridges"]), - ?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME) + ?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, Name) ), %ct:pal("---bridge: ~p", [Bridge]), #{ <<"type">> := ?BRIDGE_TYPE, - <<"name">> := ?BRIDGE_NAME, + <<"name">> := Name, <<"enable">> := true, <<"status">> := _, <<"node_status">> := [_ | _], @@ -179,7 +198,7 @@ t_http_crud_apis(_) -> <<"url">> := URL1 } = jsx:decode(Bridge), - BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME), + BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name), %% send an message to emqx and the message should be forwarded to the HTTP server Body = <<"my msg">>, emqx:publish(emqx_message:make(<<"emqx_webhook/1">>, Body)), @@ -203,12 +222,12 @@ t_http_crud_apis(_) -> {ok, 200, Bridge2} = request( put, uri(["bridges", BridgeID]), - ?HTTP_BRIDGE(URL2, ?BRIDGE_TYPE, ?BRIDGE_NAME) + ?HTTP_BRIDGE(URL2, ?BRIDGE_TYPE, Name) ), ?assertMatch( #{ <<"type">> := ?BRIDGE_TYPE, - <<"name">> := ?BRIDGE_NAME, + <<"name">> := Name, <<"enable">> := true, <<"status">> := _, <<"node_status">> := [_ | _], @@ -225,7 +244,7 @@ t_http_crud_apis(_) -> [ #{ <<"type">> := ?BRIDGE_TYPE, - <<"name">> := ?BRIDGE_NAME, + <<"name">> := Name, <<"enable">> := true, <<"status">> := _, <<"node_status">> := [_ | _], @@ -242,7 +261,7 @@ t_http_crud_apis(_) -> ?assertMatch( #{ <<"type">> := ?BRIDGE_TYPE, - <<"name">> := ?BRIDGE_NAME, + <<"name">> := Name, <<"enable">> := true, <<"status">> := _, <<"node_status">> := [_ | _], @@ -275,7 +294,7 @@ t_http_crud_apis(_) -> {ok, 404, ErrMsg2} = request( put, uri(["bridges", BridgeID]), - ?HTTP_BRIDGE(URL2, ?BRIDGE_TYPE, ?BRIDGE_NAME) + ?HTTP_BRIDGE(URL2, ?BRIDGE_TYPE, Name) ), ?assertMatch( #{ @@ -286,29 +305,102 @@ t_http_crud_apis(_) -> ), ok. -t_start_stop_bridges(_) -> - lists:foreach( - fun(Type) -> - do_start_stop_bridges(Type) - end, - [node, cluster] - ). - -do_start_stop_bridges(Type) -> +t_check_dependent_actions_on_delete(Config) -> + Port = ?config(port, Config), %% assert we there's no bridges at first {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), - Port = start_http_server(fun handle_fun_200_ok/2), + %% then we add a webhook bridge, using POST + %% POST /bridges/ will create a bridge + URL1 = ?URL(Port, "path1"), + Name = <<"t_http_crud_apis">>, + BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name), + {ok, 201, _} = request( + post, + uri(["bridges"]), + ?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, Name) + ), + {ok, 201, Rule} = request( + post, + uri(["rules"]), + #{ + <<"name">> => <<"t_http_crud_apis">>, + <<"enable">> => true, + <<"actions">> => [BridgeID], + <<"sql">> => <<"SELECT * from \"t\"">> + } + ), + #{<<"id">> := RuleId} = jsx:decode(Rule), + %% delete the bridge should fail because there is a rule depenents on it + {ok, 403, _} = request(delete, uri(["bridges", BridgeID]), []), + %% delete the rule first + {ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []), + %% then delete the bridge is OK + {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []), + {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), + ok. + +t_cascade_delete_actions(Config) -> + Port = ?config(port, Config), + %% assert we there's no bridges at first + {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), + + %% then we add a webhook bridge, using POST + %% POST /bridges/ will create a bridge + URL1 = ?URL(Port, "path1"), + Name = <<"t_http_crud_apis">>, + BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name), + {ok, 201, _} = request( + post, + uri(["bridges"]), + ?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, Name) + ), + {ok, 201, Rule} = request( + post, + uri(["rules"]), + #{ + <<"name">> => <<"t_http_crud_apis">>, + <<"enable">> => true, + <<"actions">> => [BridgeID], + <<"sql">> => <<"SELECT * from \"t\"">> + } + ), + #{<<"id">> := RuleId} = jsx:decode(Rule), + %% delete the bridge will also delete the actions from the rules + {ok, 204, _} = request(delete, uri(["bridges", BridgeID]) ++ "?also_delete_dep_actions", []), + {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), + {ok, 200, Rule1} = request(get, uri(["rules", RuleId]), []), + ?assertMatch( + #{ + <<"actions">> := [] + }, + jsx:decode(Rule1) + ), + {ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []), + ok. + +t_start_stop_bridges_node(Config) -> + do_start_stop_bridges(node, Config). + +t_start_stop_bridges_cluster(Config) -> + do_start_stop_bridges(cluster, Config). + +do_start_stop_bridges(Type, Config) -> + %% assert we there's no bridges at first + {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), + + Port = ?config(port, Config), URL1 = ?URL(Port, "abc"), + Name = atom_to_binary(Type), {ok, 201, Bridge} = request( post, uri(["bridges"]), - ?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME) + ?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, Name) ), %ct:pal("the bridge ==== ~p", [Bridge]), #{ <<"type">> := ?BRIDGE_TYPE, - <<"name">> := ?BRIDGE_NAME, + <<"name">> := Name, <<"enable">> := true, <<"status">> := <<"connected">>, <<"node_status">> := [_ | _], @@ -316,11 +408,11 @@ do_start_stop_bridges(Type) -> <<"node_metrics">> := [_ | _], <<"url">> := URL1 } = jsx:decode(Bridge), - BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME), + BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name), %% stop it {ok, 200, <<>>} = request(post, operation_path(Type, stop, BridgeID), <<"">>), {ok, 200, Bridge2} = request(get, uri(["bridges", BridgeID]), []), - ?assertMatch(#{<<"status">> := <<"disconnected">>}, jsx:decode(Bridge2)), + ?assertMatch(#{<<"status">> := <<"stopped">>}, jsx:decode(Bridge2)), %% start again {ok, 200, <<>>} = request(post, operation_path(Type, restart, BridgeID), <<"">>), {ok, 200, Bridge3} = request(get, uri(["bridges", BridgeID]), []), @@ -339,21 +431,22 @@ do_start_stop_bridges(Type) -> {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []), {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []). -t_enable_disable_bridges(_) -> +t_enable_disable_bridges(Config) -> %% assert we there's no bridges at first {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), - Port = start_http_server(fun handle_fun_200_ok/2), + Name = ?BRIDGE_NAME, + Port = ?config(port, Config), URL1 = ?URL(Port, "abc"), {ok, 201, Bridge} = request( post, uri(["bridges"]), - ?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME) + ?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, Name) ), %ct:pal("the bridge ==== ~p", [Bridge]), #{ <<"type">> := ?BRIDGE_TYPE, - <<"name">> := ?BRIDGE_NAME, + <<"name">> := Name, <<"enable">> := true, <<"status">> := <<"connected">>, <<"node_status">> := [_ | _], @@ -361,11 +454,11 @@ t_enable_disable_bridges(_) -> <<"node_metrics">> := [_ | _], <<"url">> := URL1 } = jsx:decode(Bridge), - BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME), + BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name), %% disable it {ok, 200, <<>>} = request(post, operation_path(cluster, disable, BridgeID), <<"">>), {ok, 200, Bridge2} = request(get, uri(["bridges", BridgeID]), []), - ?assertMatch(#{<<"status">> := <<"disconnected">>}, jsx:decode(Bridge2)), + ?assertMatch(#{<<"status">> := <<"stopped">>}, jsx:decode(Bridge2)), %% enable again {ok, 200, <<>>} = request(post, operation_path(cluster, enable, BridgeID), <<"">>), {ok, 200, Bridge3} = request(get, uri(["bridges", BridgeID]), []), @@ -391,21 +484,22 @@ t_enable_disable_bridges(_) -> {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []), {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []). -t_reset_bridges(_) -> +t_reset_bridges(Config) -> %% assert we there's no bridges at first {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), - Port = start_http_server(fun handle_fun_200_ok/2), + Name = ?BRIDGE_NAME, + Port = ?config(port, Config), URL1 = ?URL(Port, "abc"), {ok, 201, Bridge} = request( post, uri(["bridges"]), - ?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME) + ?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, Name) ), %ct:pal("the bridge ==== ~p", [Bridge]), #{ <<"type">> := ?BRIDGE_TYPE, - <<"name">> := ?BRIDGE_NAME, + <<"name">> := Name, <<"enable">> := true, <<"status">> := <<"connected">>, <<"node_status">> := [_ | _], @@ -413,7 +507,7 @@ t_reset_bridges(_) -> <<"node_metrics">> := [_ | _], <<"url">> := URL1 } = jsx:decode(Bridge), - BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME), + BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name), {ok, 200, <<"Reset success">>} = request(put, uri(["bridges", BridgeID, "reset_metrics"]), []), %% delete the bridge diff --git a/apps/emqx_bridge/test/emqx_bridge_mqtt_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_mqtt_SUITE.erl new file mode 100644 index 000000000..c907205f1 --- /dev/null +++ b/apps/emqx_bridge/test/emqx_bridge_mqtt_SUITE.erl @@ -0,0 +1,633 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2020-2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_mqtt_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-import(emqx_dashboard_api_test_helpers, [request/4, uri/1]). + +-include("emqx/include/emqx.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-include("emqx_dashboard/include/emqx_dashboard.hrl"). + +%% output functions +-export([inspect/3]). + +-define(BRIDGE_CONF_DEFAULT, <<"bridges: {}">>). +-define(TYPE_MQTT, <<"mqtt">>). +-define(NAME_MQTT, <<"my_mqtt_bridge">>). +-define(BRIDGE_NAME_INGRESS, <<"ingress_mqtt_bridge">>). +-define(BRIDGE_NAME_EGRESS, <<"egress_mqtt_bridge">>). +-define(SERVER_CONF(Username), #{ + <<"server">> => <<"127.0.0.1:1883">>, + <<"username">> => Username, + <<"password">> => <<"">>, + <<"proto_ver">> => <<"v4">>, + <<"ssl">> => #{<<"enable">> => false} +}). + +-define(INGRESS_CONF, #{ + <<"remote">> => #{ + <<"topic">> => <<"remote_topic/#">>, + <<"qos">> => 2 + }, + <<"local">> => #{ + <<"topic">> => <<"local_topic/${topic}">>, + <<"qos">> => <<"${qos}">>, + <<"payload">> => <<"${payload}">>, + <<"retain">> => <<"${retain}">> + } +}). + +-define(EGRESS_CONF, #{ + <<"local">> => #{ + <<"topic">> => <<"local_topic/#">> + }, + <<"remote">> => #{ + <<"topic">> => <<"remote_topic/${topic}">>, + <<"payload">> => <<"${payload}">>, + <<"qos">> => <<"${qos}">>, + <<"retain">> => <<"${retain}">> + } +}). + +inspect(Selected, _Envs, _Args) -> + persistent_term:put(?MODULE, #{inspect => Selected}). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +groups() -> + []. + +suite() -> + [{timetrap, {seconds, 30}}]. + +init_per_suite(Config) -> + _ = application:load(emqx_conf), + %% some testcases (may from other app) already get emqx_connector started + _ = application:stop(emqx_resource), + _ = application:stop(emqx_connector), + ok = emqx_common_test_helpers:start_apps( + [ + emqx_rule_engine, + emqx_bridge, + emqx_dashboard + ], + fun set_special_configs/1 + ), + ok = emqx_common_test_helpers:load_config( + emqx_rule_engine_schema, + <<"rule_engine {rules {}}">> + ), + ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, ?BRIDGE_CONF_DEFAULT), + Config. + +end_per_suite(_Config) -> + emqx_common_test_helpers:stop_apps([ + emqx_rule_engine, + emqx_bridge, + emqx_dashboard + ]), + ok. + +set_special_configs(emqx_dashboard) -> + emqx_dashboard_api_test_helpers:set_default_config(<<"connector_admin">>); +set_special_configs(_) -> + ok. + +init_per_testcase(_, Config) -> + {ok, _} = emqx_cluster_rpc:start_link(node(), emqx_cluster_rpc, 1000), + Config. +end_per_testcase(_, _Config) -> + clear_resources(), + ok. + +clear_resources() -> + lists:foreach( + fun(#{id := Id}) -> + ok = emqx_rule_engine:delete_rule(Id) + end, + emqx_rule_engine:get_rules() + ), + lists:foreach( + fun(#{type := Type, name := Name}) -> + {ok, _} = emqx_bridge:remove(Type, Name) + end, + emqx_bridge:list() + ). + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ +t_mqtt_conn_bridge_ingress(_) -> + User1 = <<"user1">>, + %% create an MQTT bridge, using POST + {ok, 201, Bridge} = request( + post, + uri(["bridges"]), + ?SERVER_CONF(User1)#{ + <<"type">> => ?TYPE_MQTT, + <<"name">> => ?BRIDGE_NAME_INGRESS, + <<"ingress">> => ?INGRESS_CONF + } + ), + #{ + <<"type">> := ?TYPE_MQTT, + <<"name">> := ?BRIDGE_NAME_INGRESS + } = jsx:decode(Bridge), + BridgeIDIngress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_INGRESS), + + %% we now test if the bridge works as expected + RemoteTopic = <<"remote_topic/1">>, + LocalTopic = <<"local_topic/", RemoteTopic/binary>>, + Payload = <<"hello">>, + emqx:subscribe(LocalTopic), + timer:sleep(100), + %% PUBLISH a message to the 'remote' broker, as we have only one broker, + %% the remote broker is also the local one. + emqx:publish(emqx_message:make(RemoteTopic, Payload)), + %% we should receive a message on the local broker, with specified topic + ?assert( + receive + {deliver, LocalTopic, #message{payload = Payload}} -> + ct:pal("local broker got message: ~p on topic ~p", [Payload, LocalTopic]), + true; + Msg -> + ct:pal("Msg: ~p", [Msg]), + false + after 100 -> + false + end + ), + + %% verify the metrics of the bridge + {ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDIngress]), []), + ?assertMatch( + #{ + <<"metrics">> := #{<<"matched">> := 0, <<"received">> := 1}, + <<"node_metrics">> := + [ + #{ + <<"node">> := _, + <<"metrics">> := + #{<<"matched">> := 0, <<"received">> := 1} + } + ] + }, + jsx:decode(BridgeStr) + ), + + %% delete the bridge + {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDIngress]), []), + {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), + + ok. + +t_mqtt_conn_bridge_egress(_) -> + %% then we add a mqtt connector, using POST + User1 = <<"user1">>, + + {ok, 201, Bridge} = request( + post, + uri(["bridges"]), + ?SERVER_CONF(User1)#{ + <<"type">> => ?TYPE_MQTT, + <<"name">> => ?BRIDGE_NAME_EGRESS, + <<"egress">> => ?EGRESS_CONF + } + ), + #{ + <<"type">> := ?TYPE_MQTT, + <<"name">> := ?BRIDGE_NAME_EGRESS + } = jsx:decode(Bridge), + BridgeIDEgress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS), + %% we now test if the bridge works as expected + LocalTopic = <<"local_topic/1">>, + RemoteTopic = <<"remote_topic/", LocalTopic/binary>>, + Payload = <<"hello">>, + emqx:subscribe(RemoteTopic), + timer:sleep(100), + %% PUBLISH a message to the 'local' broker, as we have only one broker, + %% the remote broker is also the local one. + emqx:publish(emqx_message:make(LocalTopic, Payload)), + + %% we should receive a message on the "remote" broker, with specified topic + ?assert( + receive + {deliver, RemoteTopic, #message{payload = Payload}} -> + ct:pal("local broker got message: ~p on topic ~p", [Payload, RemoteTopic]), + true; + Msg -> + ct:pal("Msg: ~p", [Msg]), + false + after 100 -> + false + end + ), + + %% verify the metrics of the bridge + {ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDEgress]), []), + ?assertMatch( + #{ + <<"metrics">> := #{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0}, + <<"node_metrics">> := + [ + #{ + <<"node">> := _, + <<"metrics">> := + #{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0} + } + ] + }, + jsx:decode(BridgeStr) + ), + + %% delete the bridge + {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []), + {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), + ok. + +t_ingress_mqtt_bridge_with_rules(_) -> + {ok, 201, _} = request( + post, + uri(["bridges"]), + ?SERVER_CONF(<<"user1">>)#{ + <<"type">> => ?TYPE_MQTT, + <<"name">> => ?BRIDGE_NAME_INGRESS, + <<"ingress">> => ?INGRESS_CONF + } + ), + BridgeIDIngress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_INGRESS), + + {ok, 201, Rule} = request( + post, + uri(["rules"]), + #{ + <<"name">> => <<"A_rule_get_messages_from_a_source_mqtt_bridge">>, + <<"enable">> => true, + <<"actions">> => [#{<<"function">> => "emqx_bridge_mqtt_SUITE:inspect"}], + <<"sql">> => <<"SELECT * from \"$bridges/", BridgeIDIngress/binary, "\"">> + } + ), + #{<<"id">> := RuleId} = jsx:decode(Rule), + + %% we now test if the bridge works as expected + + RemoteTopic = <<"remote_topic/1">>, + LocalTopic = <<"local_topic/", RemoteTopic/binary>>, + Payload = <<"hello">>, + emqx:subscribe(LocalTopic), + timer:sleep(100), + %% PUBLISH a message to the 'remote' broker, as we have only one broker, + %% the remote broker is also the local one. + emqx:publish(emqx_message:make(RemoteTopic, Payload)), + %% we should receive a message on the local broker, with specified topic + ?assert( + receive + {deliver, LocalTopic, #message{payload = Payload}} -> + ct:pal("local broker got message: ~p on topic ~p", [Payload, LocalTopic]), + true; + Msg -> + ct:pal("Msg: ~p", [Msg]), + false + after 100 -> + false + end + ), + %% and also the rule should be matched, with matched + 1: + {ok, 200, Rule1} = request(get, uri(["rules", RuleId]), []), + {ok, 200, Metrics} = request(get, uri(["rules", RuleId, "metrics"]), []), + ?assertMatch(#{<<"id">> := RuleId}, jsx:decode(Rule1)), + ?assertMatch( + #{ + <<"metrics">> := #{ + <<"matched">> := 1, + <<"passed">> := 1, + <<"failed">> := 0, + <<"failed.exception">> := 0, + <<"failed.no_result">> := 0, + <<"matched.rate">> := _, + <<"matched.rate.max">> := _, + <<"matched.rate.last5m">> := _, + <<"actions.total">> := 1, + <<"actions.success">> := 1, + <<"actions.failed">> := 0, + <<"actions.failed.out_of_service">> := 0, + <<"actions.failed.unknown">> := 0 + } + }, + jsx:decode(Metrics) + ), + + %% we also check if the actions of the rule is triggered + ?assertMatch( + #{ + inspect := #{ + event := <<"$bridges/mqtt", _/binary>>, + id := MsgId, + payload := Payload, + topic := RemoteTopic, + qos := 0, + dup := false, + retain := false, + pub_props := #{}, + timestamp := _ + } + } when is_binary(MsgId), + persistent_term:get(?MODULE) + ), + + %% verify the metrics of the bridge + {ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDIngress]), []), + ?assertMatch( + #{ + <<"metrics">> := #{<<"matched">> := 0, <<"received">> := 1}, + <<"node_metrics">> := + [ + #{ + <<"node">> := _, + <<"metrics">> := + #{<<"matched">> := 0, <<"received">> := 1} + } + ] + }, + jsx:decode(BridgeStr) + ), + + {ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []), + {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDIngress]), []). + +t_egress_mqtt_bridge_with_rules(_) -> + {ok, 201, Bridge} = request( + post, + uri(["bridges"]), + ?SERVER_CONF(<<"user1">>)#{ + <<"type">> => ?TYPE_MQTT, + <<"name">> => ?BRIDGE_NAME_EGRESS, + <<"egress">> => ?EGRESS_CONF + } + ), + #{<<"type">> := ?TYPE_MQTT, <<"name">> := ?BRIDGE_NAME_EGRESS} = jsx:decode(Bridge), + BridgeIDEgress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS), + + {ok, 201, Rule} = request( + post, + uri(["rules"]), + #{ + <<"name">> => <<"A_rule_send_messages_to_a_sink_mqtt_bridge">>, + <<"enable">> => true, + <<"actions">> => [BridgeIDEgress], + <<"sql">> => <<"SELECT * from \"t/1\"">> + } + ), + #{<<"id">> := RuleId} = jsx:decode(Rule), + + %% we now test if the bridge works as expected + LocalTopic = <<"local_topic/1">>, + RemoteTopic = <<"remote_topic/", LocalTopic/binary>>, + Payload = <<"hello">>, + emqx:subscribe(RemoteTopic), + timer:sleep(100), + %% PUBLISH a message to the 'local' broker, as we have only one broker, + %% the remote broker is also the local one. + emqx:publish(emqx_message:make(LocalTopic, Payload)), + %% we should receive a message on the "remote" broker, with specified topic + ?assert( + receive + {deliver, RemoteTopic, #message{payload = Payload}} -> + ct:pal("remote broker got message: ~p on topic ~p", [Payload, RemoteTopic]), + true; + Msg -> + ct:pal("Msg: ~p", [Msg]), + false + after 100 -> + false + end + ), + emqx:unsubscribe(RemoteTopic), + + %% PUBLISH a message to the rule. + Payload2 = <<"hi">>, + RuleTopic = <<"t/1">>, + RemoteTopic2 = <<"remote_topic/", RuleTopic/binary>>, + emqx:subscribe(RemoteTopic2), + timer:sleep(100), + emqx:publish(emqx_message:make(RuleTopic, Payload2)), + {ok, 200, Rule1} = request(get, uri(["rules", RuleId]), []), + ?assertMatch(#{<<"id">> := RuleId, <<"name">> := _}, jsx:decode(Rule1)), + {ok, 200, Metrics} = request(get, uri(["rules", RuleId, "metrics"]), []), + ?assertMatch( + #{ + <<"metrics">> := #{ + <<"matched">> := 1, + <<"passed">> := 1, + <<"failed">> := 0, + <<"failed.exception">> := 0, + <<"failed.no_result">> := 0, + <<"matched.rate">> := _, + <<"matched.rate.max">> := _, + <<"matched.rate.last5m">> := _, + <<"actions.total">> := 1, + <<"actions.success">> := 1, + <<"actions.failed">> := 0, + <<"actions.failed.out_of_service">> := 0, + <<"actions.failed.unknown">> := 0 + } + }, + jsx:decode(Metrics) + ), + + %% we should receive a message on the "remote" broker, with specified topic + ?assert( + receive + {deliver, RemoteTopic2, #message{payload = Payload2}} -> + ct:pal("remote broker got message: ~p on topic ~p", [Payload2, RemoteTopic2]), + true; + Msg -> + ct:pal("Msg: ~p", [Msg]), + false + after 100 -> + false + end + ), + + %% verify the metrics of the bridge + {ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDEgress]), []), + ?assertMatch( + #{ + <<"metrics">> := #{<<"matched">> := 2, <<"success">> := 2, <<"failed">> := 0}, + <<"node_metrics">> := + [ + #{ + <<"node">> := _, + <<"metrics">> := #{ + <<"matched">> := 2, <<"success">> := 2, <<"failed">> := 0 + } + } + ] + }, + jsx:decode(BridgeStr) + ), + + {ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []), + {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []). + +t_mqtt_conn_bridge_egress_reconnect(_) -> + %% then we add a mqtt connector, using POST + User1 = <<"user1">>, + + {ok, 201, Bridge} = request( + post, + uri(["bridges"]), + ?SERVER_CONF(User1)#{ + <<"type">> => ?TYPE_MQTT, + <<"name">> => ?BRIDGE_NAME_EGRESS, + <<"egress">> => ?EGRESS_CONF, + %% to make it reconnect quickly + <<"reconnect_interval">> => <<"1s">>, + <<"resource_opts">> => #{ + <<"worker_pool_size">> => 2, + <<"enable_queue">> => true, + <<"query_mode">> => <<"sync">>, + %% to make it check the healthy quickly + <<"health_check_interval">> => <<"0.5s">> + } + } + ), + #{ + <<"type">> := ?TYPE_MQTT, + <<"name">> := ?BRIDGE_NAME_EGRESS + } = jsx:decode(Bridge), + BridgeIDEgress = emqx_bridge_resource:bridge_id(?TYPE_MQTT, ?BRIDGE_NAME_EGRESS), + %% we now test if the bridge works as expected + LocalTopic = <<"local_topic/1">>, + RemoteTopic = <<"remote_topic/", LocalTopic/binary>>, + Payload0 = <<"hello">>, + emqx:subscribe(RemoteTopic), + timer:sleep(100), + %% PUBLISH a message to the 'local' broker, as we have only one broker, + %% the remote broker is also the local one. + emqx:publish(emqx_message:make(LocalTopic, Payload0)), + + %% we should receive a message on the "remote" broker, with specified topic + assert_mqtt_msg_received(RemoteTopic, Payload0), + + %% verify the metrics of the bridge + {ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDEgress]), []), + ?assertMatch( + #{ + <<"metrics">> := #{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0}, + <<"node_metrics">> := + [ + #{ + <<"node">> := _, + <<"metrics">> := + #{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0} + } + ] + }, + jsx:decode(BridgeStr) + ), + + %% stop the listener 1883 to make the bridge disconnected + ok = emqx_listeners:stop_listener('tcp:default'), + ct:sleep(1500), + + %% PUBLISH 2 messages to the 'local' broker, the message should + ok = snabbkaffe:start_trace(), + {ok, SRef} = + snabbkaffe:subscribe( + fun + ( + #{ + ?snk_kind := call_query_enter, + query := {query, _From, {send_message, #{}}, _Sent} + } + ) -> + true; + (_) -> + false + end, + _NEvents = 2, + _Timeout = 1_000 + ), + Payload1 = <<"hello2">>, + Payload2 = <<"hello3">>, + emqx:publish(emqx_message:make(LocalTopic, Payload1)), + emqx:publish(emqx_message:make(LocalTopic, Payload2)), + {ok, _} = snabbkaffe:receive_events(SRef), + ok = snabbkaffe:stop(), + + %% verify the metrics of the bridge, the message should be queued + {ok, 200, BridgeStr1} = request(get, uri(["bridges", BridgeIDEgress]), []), + %% matched >= 3 because of possible retries. + ?assertMatch( + #{ + <<"status">> := Status, + <<"metrics">> := #{ + <<"matched">> := Matched, <<"success">> := 1, <<"failed">> := 0, <<"queuing">> := 2 + } + } when Matched >= 3 andalso (Status == <<"connected">> orelse Status == <<"connecting">>), + jsx:decode(BridgeStr1) + ), + + %% start the listener 1883 to make the bridge reconnected + ok = emqx_listeners:start_listener('tcp:default'), + timer:sleep(1500), + %% verify the metrics of the bridge, the 2 queued messages should have been sent + {ok, 200, BridgeStr2} = request(get, uri(["bridges", BridgeIDEgress]), []), + %% matched >= 3 because of possible retries. + ?assertMatch( + #{ + <<"status">> := <<"connected">>, + <<"metrics">> := #{ + <<"matched">> := Matched, + <<"success">> := 3, + <<"failed">> := 0, + <<"queuing">> := 0, + <<"retried">> := _ + } + } when Matched >= 3, + jsx:decode(BridgeStr2) + ), + %% also verify the 2 messages have been sent to the remote broker + assert_mqtt_msg_received(RemoteTopic, Payload1), + assert_mqtt_msg_received(RemoteTopic, Payload2), + %% delete the bridge + {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []), + {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), + ok. + +assert_mqtt_msg_received(Topic, Payload) -> + ?assert( + receive + {deliver, Topic, #message{payload = Payload}} -> + ct:pal("Got mqtt message: ~p on topic ~p", [Payload, Topic]), + true; + Msg -> + ct:pal("Unexpected Msg: ~p", [Msg]), + false + after 100 -> + false + end + ). + +request(Method, Url, Body) -> + request(<<"connector_admin">>, Method, Url, Body). diff --git a/apps/emqx_bridge/test/emqx_bridge_mqtt_config_tests.erl b/apps/emqx_bridge/test/emqx_bridge_mqtt_config_tests.erl new file mode 100644 index 000000000..fa3fff7d9 --- /dev/null +++ b/apps/emqx_bridge/test/emqx_bridge_mqtt_config_tests.erl @@ -0,0 +1,229 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_mqtt_config_tests). + +-include_lib("eunit/include/eunit.hrl"). + +empty_config_test() -> + Conf1 = #{<<"bridges">> => #{}}, + Conf2 = #{<<"bridges">> => #{<<"webhook">> => #{}}}, + ?assertEqual(Conf1, check(Conf1)), + ?assertEqual(Conf2, check(Conf2)), + ok. + +%% ensure webhook config can be checked +webhook_config_test() -> + Conf = parse(webhook_v5011_hocon()), + ?assertMatch( + #{ + <<"bridges">> := + #{ + <<"webhook">> := #{ + <<"the_name">> := + #{ + <<"method">> := get, + <<"body">> := <<"${payload}">> + } + } + } + }, + check(Conf) + ), + ok. + +up(#{<<"bridges">> := Bridges0} = Conf0) -> + Bridges = up(Bridges0), + Conf0#{<<"bridges">> := Bridges}; +up(#{<<"mqtt">> := MqttBridges0} = Bridges) -> + MqttBridges = emqx_bridge_mqtt_config:upgrade_pre_ee(MqttBridges0), + Bridges#{<<"mqtt">> := MqttBridges}. + +parse(HOCON) -> + {ok, Conf} = hocon:binary(HOCON), + Conf. + +mqtt_config_test_() -> + Conf0 = mqtt_v5011_hocon(), + Conf1 = mqtt_v5011_full_hocon(), + [ + {Tag, fun() -> + Parsed = parse(Conf), + Upgraded = up(Parsed), + Checked = check(Upgraded), + assert_upgraded(Checked) + end} + || {Tag, Conf} <- [{"minimum", Conf0}, {"full", Conf1}] + ]. + +assert_upgraded(#{<<"bridges">> := Bridges}) -> + assert_upgraded(Bridges); +assert_upgraded(#{<<"mqtt">> := Mqtt}) -> + assert_upgraded(Mqtt); +assert_upgraded(#{<<"bridge_one">> := Map}) -> + assert_upgraded1(Map); +assert_upgraded(#{<<"bridge_two">> := Map}) -> + assert_upgraded1(Map). + +assert_upgraded1(Map) -> + ?assertNot(maps:is_key(<<"connector">>, Map)), + ?assertNot(maps:is_key(<<"direction">>, Map)), + ?assert(maps:is_key(<<"server">>, Map)), + ?assert(maps:is_key(<<"ssl">>, Map)). + +check(Conf) when is_map(Conf) -> + hocon_tconf:check_plain(emqx_bridge_schema, Conf). + +%% erlfmt-ignore +%% this is config generated from v5.0.11 +webhook_v5011_hocon() -> +""" +bridges{ + webhook { + the_name{ + body = \"${payload}\" + connect_timeout = \"5s\" + enable_pipelining = 100 + headers {\"content-type\" = \"application/json\"} + max_retries = 3 + method = \"get\" + pool_size = 4 + request_timeout = \"5s\" + ssl {enable = false, verify = \"verify_peer\"} + url = \"http://localhost:8080\" + } + } +} +""". + +%% erlfmt-ignore +%% this is a generated from v5.0.11 +mqtt_v5011_hocon() -> +""" +bridges { + mqtt { + bridge_one { + connector { + bridge_mode = false + clean_start = true + keepalive = \"60s\" + mode = cluster_shareload + proto_ver = \"v4\" + server = \"localhost:1883\" + ssl {enable = false, verify = \"verify_peer\"} + } + direction = egress + enable = true + payload = \"${payload}\" + remote_qos = 1 + remote_topic = \"tttttttttt\" + retain = false + } + bridge_two { + connector { + bridge_mode = false + clean_start = true + keepalive = \"60s\" + mode = \"cluster_shareload\" + proto_ver = \"v4\" + server = \"localhost:1883\" + ssl {enable = false, verify = \"verify_peer\"} + } + direction = ingress + enable = true + local_qos = 1 + payload = \"${payload}\" + remote_qos = 1 + remote_topic = \"tttttttt/#\" + retain = false + } + } +} +""". + +%% erlfmt-ignore +%% a more complete version +mqtt_v5011_full_hocon() -> +""" +bridges { + mqtt { + bridge_one { + connector { + bridge_mode = false + clean_start = true + keepalive = \"60s\" + max_inflight = 32 + mode = \"cluster_shareload\" + password = \"\" + proto_ver = \"v5\" + reconnect_interval = \"15s\" + replayq {offload = false, seg_bytes = \"100MB\"} + retry_interval = \"12s\" + server = \"localhost:1883\" + ssl { + ciphers = \"\" + depth = 10 + enable = false + reuse_sessions = true + secure_renegotiate = true + user_lookup_fun = \"emqx_tls_psk:lookup\" + verify = \"verify_peer\" + versions = [\"tlsv1.3\", \"tlsv1.2\", \"tlsv1.1\", \"tlsv1\"] + } + username = \"\" + } + direction = \"ingress\" + enable = true + local_qos = 1 + payload = \"${payload}\" + remote_qos = 1 + remote_topic = \"tttt/a\" + retain = false + } + bridge_two { + connector { + bridge_mode = false + clean_start = true + keepalive = \"60s\" + max_inflight = 32 + mode = \"cluster_shareload\" + password = \"\" + proto_ver = \"v4\" + reconnect_interval = \"15s\" + replayq {offload = false, seg_bytes = \"100MB\"} + retry_interval = \"44s\" + server = \"localhost:1883\" + ssl { + ciphers = \"\" + depth = 10 + enable = false + reuse_sessions = true + secure_renegotiate = true + user_lookup_fun = \"emqx_tls_psk:lookup\" + verify = verify_peer + versions = [\"tlsv1.3\", \"tlsv1.2\", \"tlsv1.1\", \"tlsv1\"] + } + username = \"\" + } + direction = egress + enable = true + payload = \"${payload.x}\" + remote_qos = 1 + remote_topic = \"remotetopic/1\" + retain = false + } + } +} +""". diff --git a/apps/emqx_conf/src/emqx_conf.erl b/apps/emqx_conf/src/emqx_conf.erl index bc8b52702..25aa82d76 100644 --- a/apps/emqx_conf/src/emqx_conf.erl +++ b/apps/emqx_conf/src/emqx_conf.erl @@ -165,7 +165,6 @@ gen_schema_json(Dir, I18nFile, SchemaModule) -> gen_api_schema_json(Dir, I18nFile, Lang) -> emqx_dashboard:init_i18n(I18nFile, Lang), gen_api_schema_json_hotconf(Dir, Lang), - gen_api_schema_json_connector(Dir, Lang), gen_api_schema_json_bridge(Dir, Lang), emqx_dashboard:clear_i18n(). @@ -174,11 +173,6 @@ gen_api_schema_json_hotconf(Dir, Lang) -> File = schema_filename(Dir, "hot-config-schema-", Lang), ok = do_gen_api_schema_json(File, emqx_mgmt_api_configs, SchemaInfo). -gen_api_schema_json_connector(Dir, Lang) -> - SchemaInfo = #{title => <<"EMQX Connector API Schema">>, version => <<"0.1.0">>}, - File = schema_filename(Dir, "connector-api-", Lang), - ok = do_gen_api_schema_json(File, emqx_connector_api, SchemaInfo). - gen_api_schema_json_bridge(Dir, Lang) -> SchemaInfo = #{title => <<"EMQX Data Bridge API Schema">>, version => <<"0.1.0">>}, File = schema_filename(Dir, "bridge-api-", Lang), @@ -399,6 +393,10 @@ typename_to_spec("failure_strategy()", _Mod) -> #{type => enum, symbols => [force, drop, throw]}; typename_to_spec("initial()", _Mod) -> #{type => string}; +typename_to_spec("map()", _Mod) -> + #{type => object}; +typename_to_spec("#{" ++ _, Mod) -> + typename_to_spec("map()", Mod); typename_to_spec(Name, Mod) -> Spec = range(Name), Spec1 = remote_module_type(Spec, Name, Mod), diff --git a/apps/emqx_conf/src/emqx_conf_schema.erl b/apps/emqx_conf/src/emqx_conf_schema.erl index 2080d32cb..80bb676c8 100644 --- a/apps/emqx_conf/src/emqx_conf_schema.erl +++ b/apps/emqx_conf/src/emqx_conf_schema.erl @@ -60,7 +60,6 @@ emqx_exhook_schema, emqx_psk_schema, emqx_limiter_schema, - emqx_connector_schema, emqx_slow_subs_schema ]). diff --git a/apps/emqx_connector/i18n/emqx_connector_mqtt.conf b/apps/emqx_connector/i18n/emqx_connector_mqtt.conf index 1005d68dc..5ade54670 100644 --- a/apps/emqx_connector/i18n/emqx_connector_mqtt.conf +++ b/apps/emqx_connector/i18n/emqx_connector_mqtt.conf @@ -1,5 +1,4 @@ emqx_connector_mqtt { - num_of_bridges { desc { en: "The current number of bridges that are using this connector." diff --git a/apps/emqx_connector/i18n/emqx_connector_mqtt_schema.conf b/apps/emqx_connector/i18n/emqx_connector_mqtt_schema.conf index a1268c74b..d7e6cc033 100644 --- a/apps/emqx_connector/i18n/emqx_connector_mqtt_schema.conf +++ b/apps/emqx_connector/i18n/emqx_connector_mqtt_schema.conf @@ -1,4 +1,85 @@ emqx_connector_mqtt_schema { + ingress_desc { + desc { + en: """The ingress config defines how this bridge receive messages from the remote MQTT broker, and then + send them to the local broker.
+ Template with variables is allowed in 'remote.qos', 'local.topic', 'local.qos', 'local.retain', 'local.payload'.
+ NOTE: if this bridge is used as the input of a rule, and also 'local.topic' is + configured, then messages got from the remote broker will be sent to both the 'local.topic' and + the rule.""" + zh: """入口配置定义了该桥接如何从远程 MQTT Broker 接收消息,然后将消息发送到本地 Broker。
+ 以下字段中允许使用带有变量的模板:'remote.qos', 'local.topic', 'local.qos', 'local.retain', 'local.payload'。
+ 注意:如果此桥接被用作规则的输入,并且配置了 'local.topic',则从远程代理获取的消息将同时被发送到 'local.topic' 和规则。 + """ + } + label: { + en: "Ingress Configs" + zh: "入方向配置" + } + } + + egress_desc { + desc { + en: """The egress config defines how this bridge forwards messages from the local broker to the remote broker.
+Template with variables is allowed in 'remote.topic', 'local.qos', 'local.retain', 'local.payload'.
+NOTE: if this bridge is used as the action of a rule, and also 'local.topic' +is configured, then both the data got from the rule and the MQTT messages that matches +'local.topic' will be forwarded.""" + zh: """出口配置定义了该桥接如何将消息从本地 Broker 转发到远程 Broker。 +以下字段中允许使用带有变量的模板:'remote.topic', 'local.qos', 'local.retain', 'local.payload'。
+注意:如果此桥接被用作规则的动作,并且配置了 'local.topic',则从规则输出的数据以及匹配到 'local.topic' 的 MQTT 消息都会被转发。 + """ + } + label: { + en: "Egress Configs" + zh: "出方向配置" + } + } + + ingress_remote { + desc { + en: """The configs about subscribing to the remote broker.""" + zh: """订阅远程 Broker 相关的配置。""" + } + label: { + en: "Remote Configs" + zh: "远程配置" + } + } + + ingress_local { + desc { + en: """The configs about sending message to the local broker.""" + zh: """发送消息到本地 Broker 相关的配置。""" + } + label: { + en: "Local Configs" + zh: "本地配置" + } + } + + egress_remote { + desc { + en: """The configs about sending message to the remote broker.""" + zh: """发送消息到远程 Broker 相关的配置。""" + } + label: { + en: "Remote Configs" + zh: "远程配置" + } + } + + egress_local { + desc { + en: """The configs about receiving messages from local broker.""" + zh: """如何从本地 Broker 接收消息相关的配置。""" + } + label: { + en: "Local Configs" + zh: "本地配置" + } + } + mode { desc { en: """ @@ -9,15 +90,15 @@ In 'cluster_shareload' mode, the incoming load from the remote broker is shared using shared subscription.
Note that the 'clientid' is suffixed by the node name, this is to avoid clientid conflicts between different nodes. And we can only use shared subscription -topic filters for remote_topic of ingress connections. +topic filters for remote.topic of ingress connections. """ zh: """ MQTT 桥的模式。
- cluster_shareload:在 emqx 集群的每个节点上创建一个 MQTT 连接。
在“cluster_shareload”模式下,来自远程代理的传入负载通过共享订阅的方式接收。
-请注意,clientid 以节点名称为后缀,这是为了避免不同节点之间的clientid冲突。 -而且对于入口连接的 remote_topic,我们只能使用共享订阅主题过滤器。 +请注意,clientid 以节点名称为后缀,这是为了避免不同节点之间的 clientid 冲突。 +而且对于入口连接的 remote.topic,我们只能使用共享订阅主题过滤器。 """ } label: { @@ -166,17 +247,6 @@ Template with variables is allowed. } } - ingress_hookpoint { - desc { - en: "The hook point will be triggered when there's any message received from the remote broker." - zh: "当从远程borker收到任何消息时,将触发钩子。" - } - label: { - en: "Hookpoint" - zh: "挂载点" - } - } - egress_local_topic { desc { en: "The local topic to be forwarded to the remote broker" @@ -222,59 +292,6 @@ Template with variables is allowed. } } - dir { - desc { - en: """ -The dir where the replayq file saved.
-Set to 'false' disables the replayq feature. -""" - zh: """ -replayq 文件保存的目录。
-设置为 'false' 会禁用 replayq 功能。 -""" - } - label: { - en: "Replyq file Save Dir" - zh: "Replyq 文件保存目录" - } - } - - seg_bytes { - desc { - en: """ -The size in bytes of a single segment.
-A segment is mapping to a file in the replayq dir. If the current segment is full, a new segment -(file) will be opened to write. -""" - zh: """ -单个段的大小(以字节为单位)。
-一个段映射到 replayq 目录中的一个文件。 如果当前段已满,则新段(文件)将被打开写入。 -""" - } - label: { - en: "Segment Size" - zh: "Segment 大小" - } - } - - offload { - desc { - en: """ -In offload mode, the disk queue is only used to offload queue tail segments.
-The messages are cached in the memory first, then it writes to the replayq files after the size of -the memory cache reaches 'seg_bytes'. -""" - zh: """ -在Offload模式下,磁盘队列仅用于卸载队列尾段。
-消息首先缓存在内存中,然后写入replayq文件。内存缓大小为“seg_bytes” 指定的值。 -""" - } - label: { - en: "Offload Mode" - zh: "Offload 模式" - } - } - retain { desc { en: """ @@ -309,66 +326,15 @@ Template with variables is allowed. } } - desc_connector { + server_configs { desc { - en: """Generic configuration for the connector.""" - zh: """连接器的通用配置。""" + en: """Configs related to the server.""" + zh: """服务器相关的配置。""" } label: { - en: "Connector Generic Configuration" - zh: "连接器通用配置。" + en: "Server Configs" + zh: "服务配置。" } } - desc_ingress { - desc { - en: """ -The ingress config defines how this bridge receive messages from the remote MQTT broker, and then send them to the local broker.
-Template with variables is allowed in 'local_topic', 'remote_qos', 'qos', 'retain', 'payload'.
-NOTE: if this bridge is used as the input of a rule (emqx rule engine), and also local_topic is configured, then messages got from the remote broker will be sent to both the 'local_topic' and the rule. -""" - zh: """ -Ingress 模式定义了这个 bridge 如何从远程 MQTT broker 接收消息,然后将它们发送到本地 broker 。
-允许带有的模板变量: 'local_topic'、'remote_qos'、'qos'、'retain'、'payload' 。
-注意:如果这个 bridge 被用作规则的输入(emqx 规则引擎),并且还配置了 local_topic,那么从远程 broker 获取的消息将同时被发送到 'local_topic' 和规则引擎。 -""" - } - label: { - en: "Ingress Config" - zh: "Ingress 模式配置" - } - } - - desc_egress { - desc { - en: """ -The egress config defines how this bridge forwards messages from the local broker to the remote broker.
-Template with variables is allowed in 'remote_topic', 'qos', 'retain', 'payload'.
-NOTE: if this bridge is used as the action of a rule (emqx rule engine), and also local_topic is configured, then both the data got from the rule and the MQTT messages that matches local_topic will be forwarded. -""" - zh: """ -Egress 模式定义了 bridge 如何将消息从本地 broker 转发到远程 broker。
-允许带有的模板变量: 'remote_topic'、'qos'、'retain'、'payload' 。
-注意:如果这个 bridge 作为规则(emqx 规则引擎)的输出,并且还配置了 local_topic,那么从规则引擎中获取的数据和匹配 local_topic 的 MQTT 消息都会被转发到远程 broker 。 -""" - } - label: { - en: "Egress Config" - zh: "Egress 模式配置" - } - } - - desc_replayq { - desc { - en: """Queue messages in disk files.""" - zh: """本地磁盘消息队列""" - } - label: { - en: "Replayq" - zh: "本地磁盘消息队列" - } - } - - - } diff --git a/apps/emqx_connector/i18n/emqx_connector_schema.conf b/apps/emqx_connector/i18n/emqx_connector_schema.conf deleted file mode 100644 index 0a94f5e88..000000000 --- a/apps/emqx_connector/i18n/emqx_connector_schema.conf +++ /dev/null @@ -1,31 +0,0 @@ -emqx_connector_schema { - - mqtt { - desc { - en: "MQTT bridges." - zh: "MQTT bridges。" - } - label: { - en: "MQTT bridges" - zh: "MQTT bridges" - } - } - - desc_connector { - desc { - en: """ -Configuration for EMQX connectors.
-A connector maintains the data related to the external resources, such as MySQL database. -""" - zh: """ -EMQX 连接器的配置。
-连接器维护与外部资源相关的数据,比如 MySQL 数据库。 -""" - } - label: { - en: "Connector" - zh: "连接器" - } - } - -} diff --git a/apps/emqx_connector/rebar.config b/apps/emqx_connector/rebar.config index 0ac2d0da8..4d0b53e9a 100644 --- a/apps/emqx_connector/rebar.config +++ b/apps/emqx_connector/rebar.config @@ -20,8 +20,7 @@ %% By accident, We have always been using the upstream fork due to %% eredis_cluster's dependency getting resolved earlier. %% Here we pin 1.5.2 to avoid surprises in the future. - {poolboy, {git, "https://github.com/emqx/poolboy.git", {tag, "1.5.2"}}}, - {emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.6.0"}}} + {poolboy, {git, "https://github.com/emqx/poolboy.git", {tag, "1.5.2"}}} ]}. {shell, [ diff --git a/apps/emqx_connector/src/emqx_connector.erl b/apps/emqx_connector/src/emqx_connector.erl deleted file mode 100644 index 1ce4d982a..000000000 --- a/apps/emqx_connector/src/emqx_connector.erl +++ /dev/null @@ -1,166 +0,0 @@ -%%-------------------------------------------------------------------- -%% Copyright (c) 2020-2022 EMQ Technologies Co., Ltd. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%%-------------------------------------------------------------------- --module(emqx_connector). - --export([ - config_key_path/0, - pre_config_update/3, - post_config_update/5 -]). - --export([ - parse_connector_id/1, - connector_id/2 -]). - --export([ - list_raw/0, - lookup_raw/1, - lookup_raw/2, - create_dry_run/2, - update/2, - update/3, - delete/1, - delete/2 -]). - -config_key_path() -> - [connectors]. - -pre_config_update(Path, Conf, _OldConfig) when is_map(Conf) -> - emqx_connector_ssl:convert_certs(filename:join(Path), Conf). - --dialyzer([{nowarn_function, [post_config_update/5]}, error_handling]). -post_config_update([connectors, Type, Name] = Path, '$remove', _, OldConf, _AppEnvs) -> - ConnId = connector_id(Type, Name), - try - foreach_linked_bridges(ConnId, fun(#{type := BType, name := BName}) -> - throw({dependency_bridges_exist, emqx_bridge_resource:bridge_id(BType, BName)}) - end), - _ = emqx_connector_ssl:clear_certs(filename:join(Path), OldConf) - catch - throw:Error -> {error, Error} - end; -post_config_update([connectors, Type, Name], _Req, NewConf, OldConf, _AppEnvs) -> - ConnId = connector_id(Type, Name), - foreach_linked_bridges( - ConnId, - fun(#{type := BType, name := BName}) -> - BridgeConf = emqx:get_config([bridges, BType, BName]), - case - emqx_bridge_resource:update( - BType, - BName, - {BridgeConf#{connector => OldConf}, BridgeConf#{connector => NewConf}} - ) - of - ok -> ok; - {error, Reason} -> error({update_bridge_error, Reason}) - end - end - ). - -connector_id(Type0, Name0) -> - Type = bin(Type0), - Name = bin(Name0), - <>. - --spec parse_connector_id(binary() | list() | atom()) -> {atom(), binary()}. -parse_connector_id(ConnectorId) -> - case string:split(bin(ConnectorId), ":", all) of - [Type, Name] -> {binary_to_atom(Type, utf8), Name}; - _ -> error({invalid_connector_id, ConnectorId}) - end. - -list_raw() -> - case get_raw_connector_conf() of - not_found -> - []; - Config -> - lists:foldl( - fun({Type, NameAndConf}, Connectors) -> - lists:foldl( - fun({Name, RawConf}, Acc) -> - [RawConf#{<<"type">> => Type, <<"name">> => Name} | Acc] - end, - Connectors, - maps:to_list(NameAndConf) - ) - end, - [], - maps:to_list(Config) - ) - end. - -lookup_raw(Id) when is_binary(Id) -> - {Type, Name} = parse_connector_id(Id), - lookup_raw(Type, Name). - -lookup_raw(Type, Name) -> - Path = [bin(P) || P <- [Type, Name]], - case get_raw_connector_conf() of - not_found -> - {error, not_found}; - Conf -> - case emqx_map_lib:deep_get(Path, Conf, not_found) of - not_found -> {error, not_found}; - Conf1 -> {ok, Conf1#{<<"type">> => Type, <<"name">> => Name}} - end - end. - --spec create_dry_run(module(), binary() | #{binary() => term()} | [#{binary() => term()}]) -> - ok | {error, Reason :: term()}. -create_dry_run(Type, Conf) -> - emqx_bridge_resource:create_dry_run(Type, Conf). - -update(Id, Conf) when is_binary(Id) -> - {Type, Name} = parse_connector_id(Id), - update(Type, Name, Conf). - -update(Type, Name, Conf) -> - emqx_conf:update(config_key_path() ++ [Type, Name], Conf, #{override_to => cluster}). - -delete(Id) when is_binary(Id) -> - {Type, Name} = parse_connector_id(Id), - delete(Type, Name). - -delete(Type, Name) -> - emqx_conf:remove(config_key_path() ++ [Type, Name], #{override_to => cluster}). - -get_raw_connector_conf() -> - case emqx:get_raw_config(config_key_path(), not_found) of - not_found -> - not_found; - RawConf -> - #{<<"connectors">> := Conf} = - emqx_config:fill_defaults(#{<<"connectors">> => RawConf}), - Conf - end. - -bin(Bin) when is_binary(Bin) -> Bin; -bin(Str) when is_list(Str) -> list_to_binary(Str); -bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8). - -foreach_linked_bridges(ConnId, Do) -> - lists:foreach( - fun - (#{raw_config := #{<<"connector">> := ConnId0}} = Bridge) when ConnId0 == ConnId -> - Do(Bridge); - (_) -> - ok - end, - emqx_bridge:list() - ). diff --git a/apps/emqx_connector/src/emqx_connector_api.erl b/apps/emqx_connector/src/emqx_connector_api.erl deleted file mode 100644 index 18409fafd..000000000 --- a/apps/emqx_connector/src/emqx_connector_api.erl +++ /dev/null @@ -1,331 +0,0 @@ -%%-------------------------------------------------------------------- -%% Copyright (c) 2020-2022 EMQ Technologies Co., Ltd. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%%-------------------------------------------------------------------- - --module(emqx_connector_api). - --behaviour(minirest_api). - --include("emqx_connector.hrl"). - --include_lib("typerefl/include/types.hrl"). --include_lib("hocon/include/hoconsc.hrl"). - --import(hoconsc, [mk/2, ref/2, array/1, enum/1]). - -%% Swagger specs from hocon schema --export([api_spec/0, paths/0, schema/1, namespace/0]). - -%% API callbacks --export(['/connectors_test'/2, '/connectors'/2, '/connectors/:id'/2]). - --define(CONN_TYPES, [mqtt]). - --define(TRY_PARSE_ID(ID, EXPR), - try emqx_connector:parse_connector_id(Id) of - {ConnType, ConnName} -> - _ = ConnName, - EXPR - catch - error:{invalid_connector_id, Id0} -> - {400, #{ - code => 'INVALID_ID', - message => - <<"invalid_connector_id: ", Id0/binary, - ". Connector Ids must be of format {type}:{name}">> - }} - end -). - -namespace() -> "connector". - -api_spec() -> - emqx_dashboard_swagger:spec(?MODULE, #{check_schema => false}). - -paths() -> ["/connectors_test", "/connectors", "/connectors/:id"]. - -error_schema(Codes, Message) when is_list(Message) -> - error_schema(Codes, list_to_binary(Message)); -error_schema(Codes, Message) when is_binary(Message) -> - emqx_dashboard_swagger:error_codes(Codes, Message). - -put_request_body_schema() -> - emqx_dashboard_swagger:schema_with_examples( - emqx_connector_schema:put_request(), connector_info_examples(put) - ). - -post_request_body_schema() -> - emqx_dashboard_swagger:schema_with_examples( - emqx_connector_schema:post_request(), connector_info_examples(post) - ). - -get_response_body_schema() -> - emqx_dashboard_swagger:schema_with_examples( - emqx_connector_schema:get_response(), connector_info_examples(get) - ). - -connector_info_array_example(Method) -> - [Config || #{value := Config} <- maps:values(connector_info_examples(Method))]. - -connector_info_examples(Method) -> - lists:foldl( - fun(Type, Acc) -> - SType = atom_to_list(Type), - maps:merge(Acc, #{ - Type => #{ - summary => bin(string:uppercase(SType) ++ " Connector"), - value => info_example(Type, Method) - } - }) - end, - #{}, - ?CONN_TYPES - ). - -info_example(Type, Method) -> - maps:merge( - info_example_basic(Type), - method_example(Type, Method) - ). - -method_example(Type, Method) when Method == get; Method == post -> - SType = atom_to_list(Type), - SName = "my_" ++ SType ++ "_connector", - #{ - type => bin(SType), - name => bin(SName) - }; -method_example(_Type, put) -> - #{}. - -info_example_basic(mqtt) -> - #{ - mode => cluster_shareload, - server => <<"127.0.0.1:1883">>, - reconnect_interval => <<"15s">>, - proto_ver => <<"v4">>, - username => <<"foo">>, - password => <<"bar">>, - clientid => <<"foo">>, - clean_start => true, - keepalive => <<"300s">>, - retry_interval => <<"15s">>, - max_inflight => 100, - ssl => #{ - enable => false - } - }. - -param_path_id() -> - [ - {id, - mk( - binary(), - #{ - in => path, - example => <<"mqtt:my_mqtt_connector">>, - desc => ?DESC("id") - } - )} - ]. - -schema("/connectors_test") -> - #{ - 'operationId' => '/connectors_test', - post => #{ - tags => [<<"connectors">>], - desc => ?DESC("conn_test_post"), - summary => <<"Test creating connector">>, - 'requestBody' => post_request_body_schema(), - responses => #{ - 204 => <<"Test connector OK">>, - 400 => error_schema(['TEST_FAILED'], "connector test failed") - } - } - }; -schema("/connectors") -> - #{ - 'operationId' => '/connectors', - get => #{ - tags => [<<"connectors">>], - desc => ?DESC("conn_get"), - summary => <<"List connectors">>, - responses => #{ - 200 => emqx_dashboard_swagger:schema_with_example( - array(emqx_connector_schema:get_response()), - connector_info_array_example(get) - ) - } - }, - post => #{ - tags => [<<"connectors">>], - desc => ?DESC("conn_post"), - summary => <<"Create connector">>, - 'requestBody' => post_request_body_schema(), - responses => #{ - 201 => get_response_body_schema(), - 400 => error_schema(['ALREADY_EXISTS'], "connector already exists") - } - } - }; -schema("/connectors/:id") -> - #{ - 'operationId' => '/connectors/:id', - get => #{ - tags => [<<"connectors">>], - desc => ?DESC("conn_id_get"), - summary => <<"Get connector">>, - parameters => param_path_id(), - responses => #{ - 200 => get_response_body_schema(), - 404 => error_schema(['NOT_FOUND'], "Connector not found"), - 400 => error_schema(['INVALID_ID'], "Bad connector ID") - } - }, - put => #{ - tags => [<<"connectors">>], - desc => ?DESC("conn_id_put"), - summary => <<"Update connector">>, - parameters => param_path_id(), - 'requestBody' => put_request_body_schema(), - responses => #{ - 200 => get_response_body_schema(), - 404 => error_schema(['NOT_FOUND'], "Connector not found"), - 400 => error_schema(['INVALID_ID'], "Bad connector ID") - } - }, - delete => #{ - tags => [<<"connectors">>], - desc => ?DESC("conn_id_delete"), - summary => <<"Delete connector">>, - parameters => param_path_id(), - responses => #{ - 204 => <<"Delete connector successfully">>, - 403 => error_schema(['DEPENDENCY_EXISTS'], "Cannot remove dependent connector"), - 404 => error_schema(['NOT_FOUND'], "Delete failed, not found"), - 400 => error_schema(['INVALID_ID'], "Bad connector ID") - } - } - }. - -'/connectors_test'(post, #{body := #{<<"type">> := ConnType} = Params}) -> - case emqx_connector:create_dry_run(ConnType, maps:remove(<<"type">>, Params)) of - ok -> - {204}; - {error, Error} -> - {400, error_msg(['TEST_FAILED'], Error)} - end. - -'/connectors'(get, _Request) -> - {200, [format_resp(Conn) || Conn <- emqx_connector:list_raw()]}; -'/connectors'(post, #{body := #{<<"type">> := ConnType, <<"name">> := ConnName} = Params}) -> - case emqx_connector:lookup_raw(ConnType, ConnName) of - {ok, _} -> - {400, error_msg('ALREADY_EXISTS', <<"connector already exists">>)}; - {error, not_found} -> - case - emqx_connector:update( - ConnType, - ConnName, - filter_out_request_body(Params) - ) - of - {ok, #{raw_config := RawConf}} -> - {201, - format_resp(RawConf#{ - <<"type">> => ConnType, - <<"name">> => ConnName - })}; - {error, Error} -> - {400, error_msg('BAD_REQUEST', Error)} - end - end; -'/connectors'(post, _) -> - {400, error_msg('BAD_REQUEST', <<"missing some required fields: [name, type]">>)}. - -'/connectors/:id'(get, #{bindings := #{id := Id}}) -> - ?TRY_PARSE_ID( - Id, - case emqx_connector:lookup_raw(ConnType, ConnName) of - {ok, Conf} -> - {200, format_resp(Conf)}; - {error, not_found} -> - {404, error_msg('NOT_FOUND', <<"connector not found">>)} - end - ); -'/connectors/:id'(put, #{bindings := #{id := Id}, body := Params0}) -> - Params = filter_out_request_body(Params0), - ?TRY_PARSE_ID( - Id, - case emqx_connector:lookup_raw(ConnType, ConnName) of - {ok, _} -> - case emqx_connector:update(ConnType, ConnName, Params) of - {ok, #{raw_config := RawConf}} -> - {200, - format_resp(RawConf#{ - <<"type">> => ConnType, - <<"name">> => ConnName - })}; - {error, Error} -> - {500, error_msg('INTERNAL_ERROR', Error)} - end; - {error, not_found} -> - {404, error_msg('NOT_FOUND', <<"connector not found">>)} - end - ); -'/connectors/:id'(delete, #{bindings := #{id := Id}}) -> - ?TRY_PARSE_ID( - Id, - case emqx_connector:lookup_raw(ConnType, ConnName) of - {ok, _} -> - case emqx_connector:delete(ConnType, ConnName) of - {ok, _} -> - {204}; - {error, {post_config_update, _, {dependency_bridges_exist, BridgeID}}} -> - {403, - error_msg( - 'DEPENDENCY_EXISTS', - <<"Cannot remove the connector as it's in use by a bridge: ", - BridgeID/binary>> - )}; - {error, Error} -> - {500, error_msg('INTERNAL_ERROR', Error)} - end; - {error, not_found} -> - {404, error_msg('NOT_FOUND', <<"connector not found">>)} - end - ). - -error_msg(Code, Msg) -> - #{code => Code, message => emqx_misc:readable_error_msg(Msg)}. - -format_resp(#{<<"type">> := ConnType, <<"name">> := ConnName} = RawConf) -> - NumOfBridges = length( - emqx_bridge:list_bridges_by_connector( - emqx_connector:connector_id(ConnType, ConnName) - ) - ), - RawConf#{ - <<"type">> => ConnType, - <<"name">> => ConnName, - <<"num_of_bridges">> => NumOfBridges - }. - -filter_out_request_body(Conf) -> - ExtraConfs = [<<"clientid">>, <<"num_of_bridges">>, <<"type">>, <<"name">>], - maps:without(ExtraConfs, Conf). - -bin(S) when is_list(S) -> - list_to_binary(S). diff --git a/apps/emqx_connector/src/emqx_connector_app.erl b/apps/emqx_connector/src/emqx_connector_app.erl index b6f5b8623..62167dc18 100644 --- a/apps/emqx_connector/src/emqx_connector_app.erl +++ b/apps/emqx_connector/src/emqx_connector_app.erl @@ -20,15 +20,10 @@ -export([start/2, stop/1]). --define(CONF_HDLR_PATH, (emqx_connector:config_key_path() ++ ['?', '?'])). - start(_StartType, _StartArgs) -> - ok = emqx_config_handler:add_handler(?CONF_HDLR_PATH, emqx_connector), - emqx_connector_mqtt_worker:register_metrics(), emqx_connector_sup:start_link(). stop(_State) -> - emqx_config_handler:remove_handler(?CONF_HDLR_PATH), ok. %% internal functions diff --git a/apps/emqx_connector/src/emqx_connector_http.erl b/apps/emqx_connector/src/emqx_connector_http.erl index b12b38838..ed5897a59 100644 --- a/apps/emqx_connector/src/emqx_connector_http.erl +++ b/apps/emqx_connector/src/emqx_connector_http.erl @@ -26,10 +26,13 @@ %% callbacks of behaviour emqx_resource -export([ + callback_mode/0, on_start/2, on_stop/2, - on_query/4, - on_get_status/2 + on_query/3, + on_query_async/4, + on_get_status/2, + reply_delegator/2 ]). -type url() :: emqx_http_lib:uri_map(). @@ -44,7 +47,7 @@ namespace/0 ]). --export([check_ssl_opts/2]). +-export([check_ssl_opts/2, validate_method/1]). -type connect_timeout() :: emqx_schema:duration() | infinity. -type pool_type() :: random | hash. @@ -135,8 +138,10 @@ fields(config) -> fields("request") -> [ {method, - hoconsc:mk(hoconsc:enum([post, put, get, delete]), #{ - required => false, desc => ?DESC("method") + hoconsc:mk(binary(), #{ + required => false, + desc => ?DESC("method"), + validator => fun ?MODULE:validate_method/1 })}, {path, hoconsc:mk(binary(), #{required => false, desc => ?DESC("path")})}, {body, hoconsc:mk(binary(), #{required => false, desc => ?DESC("body")})}, @@ -169,11 +174,24 @@ desc(_) -> validations() -> [{check_ssl_opts, fun check_ssl_opts/1}]. +validate_method(M) when M =:= <<"post">>; M =:= <<"put">>; M =:= <<"get">>; M =:= <<"delete">> -> + ok; +validate_method(M) -> + case string:find(M, "${") of + nomatch -> + {error, + <<"Invalid method, should be one of 'post', 'put', 'get', 'delete' or variables in ${field} format.">>}; + _ -> + ok + end. + sc(Type, Meta) -> hoconsc:mk(Type, Meta). ref(Field) -> hoconsc:ref(?MODULE, Field). %% =================================================================== +callback_mode() -> async_if_possible. + on_start( InstId, #{ @@ -235,10 +253,11 @@ on_stop(InstId, #{pool_name := PoolName}) -> }), ehttpc_sup:stop_pool(PoolName). -on_query(InstId, {send_message, Msg}, AfterQuery, State) -> +on_query(InstId, {send_message, Msg}, State) -> case maps:get(request, State, undefined) of undefined -> - ?SLOG(error, #{msg => "request_not_found", connector => InstId}); + ?SLOG(error, #{msg => "arg_request_not_found", connector => InstId}), + {error, arg_request_not_found}; Request -> #{ method := Method, @@ -251,18 +270,16 @@ on_query(InstId, {send_message, Msg}, AfterQuery, State) -> on_query( InstId, {undefined, Method, {Path, Headers, Body}, Timeout, Retry}, - AfterQuery, State ) end; -on_query(InstId, {Method, Request}, AfterQuery, State) -> - on_query(InstId, {undefined, Method, Request, 5000, 2}, AfterQuery, State); -on_query(InstId, {Method, Request, Timeout}, AfterQuery, State) -> - on_query(InstId, {undefined, Method, Request, Timeout, 2}, AfterQuery, State); +on_query(InstId, {Method, Request}, State) -> + on_query(InstId, {undefined, Method, Request, 5000, 2}, State); +on_query(InstId, {Method, Request, Timeout}, State) -> + on_query(InstId, {undefined, Method, Request, Timeout, 2}, State); on_query( InstId, {KeyOrNum, Method, Request, Timeout, Retry}, - AfterQuery, #{pool_name := PoolName, base_path := BasePath} = State ) -> ?TRACE( @@ -272,7 +289,7 @@ on_query( ), NRequest = formalize_request(Method, BasePath, Request), case - Result = ehttpc:request( + ehttpc:request( case KeyOrNum of undefined -> PoolName; _ -> {PoolName, KeyOrNum} @@ -283,36 +300,87 @@ on_query( Retry ) of - {error, Reason} -> + {error, Reason} when Reason =:= econnrefused; Reason =:= timeout -> + ?SLOG(warning, #{ + msg => "http_connector_do_request_failed", + reason => Reason, + connector => InstId + }), + {error, {recoverable_error, Reason}}; + {error, Reason} = Result -> ?SLOG(error, #{ - msg => "http_connector_do_reqeust_failed", + msg => "http_connector_do_request_failed", request => NRequest, reason => Reason, connector => InstId }), - emqx_resource:query_failed(AfterQuery); - {ok, StatusCode, _} when StatusCode >= 200 andalso StatusCode < 300 -> - emqx_resource:query_success(AfterQuery); - {ok, StatusCode, _, _} when StatusCode >= 200 andalso StatusCode < 300 -> - emqx_resource:query_success(AfterQuery); - {ok, StatusCode, _} -> + Result; + {ok, StatusCode, _} = Result when StatusCode >= 200 andalso StatusCode < 300 -> + Result; + {ok, StatusCode, _, _} = Result when StatusCode >= 200 andalso StatusCode < 300 -> + Result; + {ok, StatusCode, Headers} -> ?SLOG(error, #{ msg => "http connector do request, received error response", request => NRequest, connector => InstId, status_code => StatusCode }), - emqx_resource:query_failed(AfterQuery); - {ok, StatusCode, _, _} -> + {error, #{status_code => StatusCode, headers => Headers}}; + {ok, StatusCode, Headers, Body} -> ?SLOG(error, #{ msg => "http connector do request, received error response", request => NRequest, connector => InstId, status_code => StatusCode }), - emqx_resource:query_failed(AfterQuery) - end, - Result. + {error, #{status_code => StatusCode, headers => Headers, body => Body}} + end. + +on_query_async(InstId, {send_message, Msg}, ReplyFunAndArgs, State) -> + case maps:get(request, State, undefined) of + undefined -> + ?SLOG(error, #{msg => "arg_request_not_found", connector => InstId}), + {error, arg_request_not_found}; + Request -> + #{ + method := Method, + path := Path, + body := Body, + headers := Headers, + request_timeout := Timeout + } = process_request(Request, Msg), + on_query_async( + InstId, + {undefined, Method, {Path, Headers, Body}, Timeout}, + ReplyFunAndArgs, + State + ) + end; +on_query_async( + InstId, + {KeyOrNum, Method, Request, Timeout}, + ReplyFunAndArgs, + #{pool_name := PoolName, base_path := BasePath} = State +) -> + ?TRACE( + "QUERY_ASYNC", + "http_connector_received", + #{request => Request, connector => InstId, state => State} + ), + NRequest = formalize_request(Method, BasePath, Request), + Worker = + case KeyOrNum of + undefined -> ehttpc_pool:pick_worker(PoolName); + _ -> ehttpc_pool:pick_worker(PoolName, KeyOrNum) + end, + ok = ehttpc:request_async( + Worker, + Method, + NRequest, + Timeout, + {fun ?MODULE:reply_delegator/2, [ReplyFunAndArgs]} + ). on_get_status(_InstId, #{pool_name := PoolName, connect_timeout := Timeout} = State) -> case do_get_status(PoolName, Timeout) of @@ -355,7 +423,6 @@ do_get_status(PoolName, Timeout) -> %%-------------------------------------------------------------------- %% Internal functions %%-------------------------------------------------------------------- - preprocess_request(undefined) -> undefined; preprocess_request(Req) when map_size(Req) == 0 -> @@ -468,3 +535,12 @@ bin(Str) when is_list(Str) -> list_to_binary(Str); bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8). + +reply_delegator(ReplyFunAndArgs, Result) -> + case Result of + {error, Reason} when Reason =:= econnrefused; Reason =:= timeout -> + Result1 = {error, {recoverable_error, Reason}}, + emqx_resource:apply_reply_fun(ReplyFunAndArgs, Result1); + _ -> + emqx_resource:apply_reply_fun(ReplyFunAndArgs, Result) + end. diff --git a/apps/emqx_connector/src/emqx_connector_ldap.erl b/apps/emqx_connector/src/emqx_connector_ldap.erl index 195aa89a9..d53c0e41b 100644 --- a/apps/emqx_connector/src/emqx_connector_ldap.erl +++ b/apps/emqx_connector/src/emqx_connector_ldap.erl @@ -25,9 +25,10 @@ %% callbacks of behaviour emqx_resource -export([ + callback_mode/0, on_start/2, on_stop/2, - on_query/4, + on_query/3, on_get_status/2 ]). @@ -42,6 +43,8 @@ roots() -> fields(_) -> []. %% =================================================================== +callback_mode() -> always_sync. + on_start( InstId, #{ @@ -99,7 +102,7 @@ on_stop(InstId, #{poolname := PoolName}) -> }), emqx_plugin_libs_pool:stop_pool(PoolName). -on_query(InstId, {search, Base, Filter, Attributes}, AfterQuery, #{poolname := PoolName} = State) -> +on_query(InstId, {search, Base, Filter, Attributes}, #{poolname := PoolName} = State) -> Request = {Base, Filter, Attributes}, ?TRACE( "QUERY", @@ -119,10 +122,9 @@ on_query(InstId, {search, Base, Filter, Attributes}, AfterQuery, #{poolname := P request => Request, connector => InstId, reason => Reason - }), - emqx_resource:query_failed(AfterQuery); + }); _ -> - emqx_resource:query_success(AfterQuery) + ok end, Result. diff --git a/apps/emqx_connector/src/emqx_connector_mongo.erl b/apps/emqx_connector/src/emqx_connector_mongo.erl index a5e2be521..678a4f847 100644 --- a/apps/emqx_connector/src/emqx_connector_mongo.erl +++ b/apps/emqx_connector/src/emqx_connector_mongo.erl @@ -25,9 +25,10 @@ %% callbacks of behaviour emqx_resource -export([ + callback_mode/0, on_start/2, on_stop/2, - on_query/4, + on_query/3, on_get_status/2 ]). @@ -36,7 +37,7 @@ -export([roots/0, fields/1, desc/1]). --export([mongo_query/5, check_worker_health/1]). +-export([mongo_query/5, mongo_insert/3, check_worker_health/1]). -define(HEALTH_CHECK_TIMEOUT, 30000). @@ -46,6 +47,10 @@ default_port => ?MONGO_DEFAULT_PORT }). +-ifdef(TEST). +-export([to_servers_raw/1]). +-endif. + %%===================================================================== roots() -> [ @@ -139,6 +144,8 @@ mongo_fields() -> %% =================================================================== +callback_mode() -> always_sync. + on_start( InstId, Config = #{ @@ -174,9 +181,16 @@ on_start( {worker_options, init_worker_options(maps:to_list(NConfig), SslOpts)} ], PoolName = emqx_plugin_libs_pool:pool_name(InstId), + Collection = maps:get(collection, Config, <<"mqtt">>), case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts) of - ok -> {ok, #{poolname => PoolName, type => Type}}; - {error, Reason} -> {error, Reason} + ok -> + {ok, #{ + poolname => PoolName, + type => Type, + collection => Collection + }}; + {error, Reason} -> + {error, Reason} end. on_stop(InstId, #{poolname := PoolName}) -> @@ -186,10 +200,38 @@ on_stop(InstId, #{poolname := PoolName}) -> }), emqx_plugin_libs_pool:stop_pool(PoolName). +on_query( + InstId, + {send_message, Document}, + #{poolname := PoolName, collection := Collection} = State +) -> + Request = {insert, Collection, Document}, + ?TRACE( + "QUERY", + "mongodb_connector_received", + #{request => Request, connector => InstId, state => State} + ), + case + ecpool:pick_and_do( + PoolName, + {?MODULE, mongo_insert, [Collection, Document]}, + no_handover + ) + of + {{false, Reason}, _Document} -> + ?SLOG(error, #{ + msg => "mongodb_connector_do_query_failed", + request => Request, + reason => Reason, + connector => InstId + }), + {error, Reason}; + {{true, _Info}, _Document} -> + ok + end; on_query( InstId, {Action, Collection, Filter, Projector}, - AfterQuery, #{poolname := PoolName} = State ) -> Request = {Action, Collection, Filter, Projector}, @@ -212,14 +254,11 @@ on_query( reason => Reason, connector => InstId }), - emqx_resource:query_failed(AfterQuery), {error, Reason}; {ok, Cursor} when is_pid(Cursor) -> - emqx_resource:query_success(AfterQuery), - mc_cursor:foldl(fun(O, Acc2) -> [O | Acc2] end, [], Cursor, 1000); + {ok, mc_cursor:foldl(fun(O, Acc2) -> [O | Acc2] end, [], Cursor, 1000)}; Result -> - emqx_resource:query_success(AfterQuery), - Result + {ok, Result} end. -dialyzer({nowarn_function, [on_get_status/2]}). @@ -293,6 +332,9 @@ mongo_query(Conn, find_one, Collection, Filter, Projector) -> mongo_query(_Conn, _Action, _Collection, _Filter, _Projector) -> ok. +mongo_insert(Conn, Collection, Documents) -> + mongo_api:insert(Conn, Collection, Documents). + init_type(#{mongo_type := rs, replica_set_name := ReplicaSetName}) -> {rs, ReplicaSetName}; init_type(#{mongo_type := Type}) -> @@ -409,7 +451,7 @@ may_parse_srv_and_txt_records_( true -> error({missing_parameter, replica_set_name}); false -> - Config#{hosts => servers_to_bin(Servers)} + Config#{hosts => servers_to_bin(lists:flatten(Servers))} end; may_parse_srv_and_txt_records_( #{ @@ -519,9 +561,33 @@ to_servers_raw(Servers) -> fun(Server) -> emqx_connector_schema_lib:parse_server(Server, ?MONGO_HOST_OPTIONS) end, - string:tokens(str(Servers), ", ") + split_servers(Servers) ). +split_servers(L) when is_list(L) -> + PossibleTypes = [ + list(binary()), + list(string()), + string() + ], + TypeChecks = lists:map(fun(T) -> typerefl:typecheck(T, L) end, PossibleTypes), + case TypeChecks of + [ok, _, _] -> + %% list(binary()) + lists:map(fun binary_to_list/1, L); + [_, ok, _] -> + %% list(string()) + L; + [_, _, ok] -> + %% string() + string:tokens(L, ", "); + [_, _, _] -> + %% invalid input + throw("List of servers must contain only strings") + end; +split_servers(B) when is_binary(B) -> + string:tokens(str(B), ", "). + str(A) when is_atom(A) -> atom_to_list(A); str(B) when is_binary(B) -> diff --git a/apps/emqx_connector/src/emqx_connector_mqtt.erl b/apps/emqx_connector/src/emqx_connector_mqtt.erl index d21d373a0..b063d7436 100644 --- a/apps/emqx_connector/src/emqx_connector_mqtt.erl +++ b/apps/emqx_connector/src/emqx_connector_mqtt.erl @@ -24,6 +24,7 @@ %% API and callbacks for supervisor -export([ + callback_mode/0, start_link/0, init/1, create_bridge/1, @@ -37,7 +38,8 @@ -export([ on_start/2, on_stop/2, - on_query/4, + on_query/3, + on_query_async/4, on_get_status/2 ]). @@ -66,7 +68,7 @@ fields("get") -> )} ] ++ fields("post"); fields("put") -> - emqx_connector_mqtt_schema:fields("connector"); + emqx_connector_mqtt_schema:fields("server_configs"); fields("post") -> [ {type, @@ -133,11 +135,13 @@ drop_bridge(Name) -> %% =================================================================== %% When use this bridge as a data source, ?MODULE:on_message_received will be called %% if the bridge received msgs from the remote broker. -on_message_received(Msg, HookPoint, InstId) -> - _ = emqx_resource:query(InstId, {message_received, Msg}), +on_message_received(Msg, HookPoint, ResId) -> + emqx_resource:inc_received(ResId), emqx:run_hook(HookPoint, [Msg]). %% =================================================================== +callback_mode() -> async_if_possible. + on_start(InstId, Conf) -> InstanceId = binary_to_atom(InstId, utf8), ?SLOG(info, #{ @@ -149,7 +153,7 @@ on_start(InstId, Conf) -> BridgeConf = BasicConf#{ name => InstanceId, clientid => clientid(InstId), - subscriptions => make_sub_confs(maps:get(ingress, Conf, undefined), InstId), + subscriptions => make_sub_confs(maps:get(ingress, Conf, undefined), Conf, InstId), forwards => make_forward_confs(maps:get(egress, Conf, undefined)) }, case ?MODULE:create_bridge(BridgeConf) of @@ -181,12 +185,18 @@ on_stop(_InstId, #{name := InstanceId}) -> }) end. -on_query(_InstId, {message_received, _Msg}, AfterQuery, _State) -> - emqx_resource:query_success(AfterQuery); -on_query(_InstId, {send_message, Msg}, AfterQuery, #{name := InstanceId}) -> +on_query(_InstId, {send_message, Msg}, #{name := InstanceId}) -> ?TRACE("QUERY", "send_msg_to_remote_node", #{message => Msg, connector => InstanceId}), - emqx_connector_mqtt_worker:send_to_remote(InstanceId, Msg), - emqx_resource:query_success(AfterQuery). + emqx_connector_mqtt_worker:send_to_remote(InstanceId, Msg). + +on_query_async( + _InstId, + {send_message, Msg}, + {ReplayFun, Args}, + #{name := InstanceId} +) -> + ?TRACE("QUERY", "async_send_msg_to_remote_node", #{message => Msg, connector => InstanceId}), + emqx_connector_mqtt_worker:send_to_remote_async(InstanceId, Msg, {ReplayFun, Args}). on_get_status(_InstId, #{name := InstanceId, bridge_conf := Conf}) -> AutoReconn = maps:get(auto_reconnect, Conf, true), @@ -202,17 +212,18 @@ ensure_mqtt_worker_started(InstanceId, BridgeConf) -> {error, Reason} -> {error, Reason} end. -make_sub_confs(EmptyMap, _) when map_size(EmptyMap) == 0 -> +make_sub_confs(EmptyMap, _Conf, _) when map_size(EmptyMap) == 0 -> undefined; -make_sub_confs(undefined, _) -> +make_sub_confs(undefined, _Conf, _) -> undefined; -make_sub_confs(SubRemoteConf, InstId) -> - case maps:take(hookpoint, SubRemoteConf) of +make_sub_confs(SubRemoteConf, Conf, InstId) -> + ResId = emqx_resource_manager:manager_id_to_resource_id(InstId), + case maps:find(hookpoint, Conf) of error -> - SubRemoteConf; - {HookPoint, SubConf} -> - MFA = {?MODULE, on_message_received, [HookPoint, InstId]}, - SubConf#{on_message_received => MFA} + error({no_hookpoint_provided, Conf}); + {ok, HookPoint} -> + MFA = {?MODULE, on_message_received, [HookPoint, ResId]}, + SubRemoteConf#{on_message_received => MFA} end. make_forward_confs(EmptyMap) when map_size(EmptyMap) == 0 -> @@ -232,12 +243,10 @@ basic_config( keepalive := KeepAlive, retry_interval := RetryIntv, max_inflight := MaxInflight, - replayq := ReplayQ, ssl := #{enable := EnableSsl} = Ssl } = Conf ) -> - #{ - replayq => ReplayQ, + BaiscConf = #{ %% connection opts server => Server, %% 30s @@ -251,9 +260,6 @@ basic_config( %% non-standard mqtt connection packets will be filtered out by LB. %% So let's disable bridge_mode. bridge_mode => BridgeMode, - %% should be iolist for emqtt - username => maps:get(username, Conf, <<>>), - password => maps:get(password, Conf, <<>>), clean_start => CleanStart, keepalive => ms_to_s(KeepAlive), retry_interval => RetryIntv, @@ -261,7 +267,20 @@ basic_config( ssl => EnableSsl, ssl_opts => maps:to_list(maps:remove(enable, Ssl)), if_record_metrics => true - }. + }, + maybe_put_fields([username, password], Conf, BaiscConf). + +maybe_put_fields(Fields, Conf, Acc0) -> + lists:foldl( + fun(Key, Acc) -> + case maps:find(Key, Conf) of + error -> Acc; + {ok, Val} -> Acc#{Key => Val} + end + end, + Acc0, + Fields + ). ms_to_s(Ms) -> erlang:ceil(Ms / 1000). diff --git a/apps/emqx_connector/src/emqx_connector_mysql.erl b/apps/emqx_connector/src/emqx_connector_mysql.erl index 79a306b05..fc3068c66 100644 --- a/apps/emqx_connector/src/emqx_connector_mysql.erl +++ b/apps/emqx_connector/src/emqx_connector_mysql.erl @@ -19,14 +19,17 @@ -include_lib("typerefl/include/types.hrl"). -include_lib("hocon/include/hoconsc.hrl"). -include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). -behaviour(emqx_resource). %% callbacks of behaviour emqx_resource -export([ + callback_mode/0, on_start/2, on_stop/2, - on_query/4, + on_query/3, + on_batch_query/3, on_get_status/2 ]). @@ -44,6 +47,19 @@ default_port => ?MYSQL_DEFAULT_PORT }). +-type prepares() :: #{atom() => binary()}. +-type params_tokens() :: #{atom() => list()}. +-type sqls() :: #{atom() => binary()}. +-type state() :: + #{ + poolname := atom(), + auto_reconnect := boolean(), + prepare_statement := prepares(), + params_tokens := params_tokens(), + batch_inserts := sqls(), + batch_params_tokens := params_tokens() + }. + %%===================================================================== %% Hocon schema roots() -> @@ -63,6 +79,9 @@ server(desc) -> ?DESC("server"); server(_) -> undefined. %% =================================================================== +callback_mode() -> always_sync. + +-spec on_start(binary(), hoconsc:config()) -> {ok, state()} | {error, _}. on_start( InstId, #{ @@ -97,11 +116,17 @@ on_start( {pool_size, PoolSize} ], PoolName = emqx_plugin_libs_pool:pool_name(InstId), - Prepares = maps:get(prepare_statement, Config, #{}), - State = #{poolname => PoolName, prepare_statement => Prepares, auto_reconnect => AutoReconn}, + Prepares = parse_prepare_sql(Config), + State = maps:merge(#{poolname => PoolName, auto_reconnect => AutoReconn}, Prepares), case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options ++ SslOpts) of - ok -> {ok, init_prepare(State)}; - {error, Reason} -> {error, Reason} + ok -> + {ok, init_prepare(State)}; + {error, Reason} -> + ?tp( + mysql_connector_start_failed, + #{error => Reason} + ), + {error, Reason} end. on_stop(InstId, #{poolname := PoolName}) -> @@ -111,63 +136,62 @@ on_stop(InstId, #{poolname := PoolName}) -> }), emqx_plugin_libs_pool:stop_pool(PoolName). -on_query(InstId, {Type, SQLOrKey}, AfterQuery, State) -> - on_query(InstId, {Type, SQLOrKey, [], default_timeout}, AfterQuery, State); -on_query(InstId, {Type, SQLOrKey, Params}, AfterQuery, State) -> - on_query(InstId, {Type, SQLOrKey, Params, default_timeout}, AfterQuery, State); +on_query(InstId, {TypeOrKey, SQLOrKey}, State) -> + on_query(InstId, {TypeOrKey, SQLOrKey, [], default_timeout}, State); +on_query(InstId, {TypeOrKey, SQLOrKey, Params}, State) -> + on_query(InstId, {TypeOrKey, SQLOrKey, Params, default_timeout}, State); on_query( InstId, - {Type, SQLOrKey, Params, Timeout}, - AfterQuery, + {TypeOrKey, SQLOrKey, Params, Timeout}, #{poolname := PoolName, prepare_statement := Prepares} = State ) -> - LogMeta = #{connector => InstId, sql => SQLOrKey, state => State}, - ?TRACE("QUERY", "mysql_connector_received", LogMeta), - Worker = ecpool:get_client(PoolName), - {ok, Conn} = ecpool_worker:client(Worker), - MySqlFunction = mysql_function(Type), - Result = erlang:apply(mysql, MySqlFunction, [Conn, SQLOrKey, Params, Timeout]), - case Result of - {error, disconnected} -> - ?SLOG( - error, - LogMeta#{msg => "mysql_connector_do_sql_query_failed", reason => disconnected} - ), - %% kill the poll worker to trigger reconnection - _ = exit(Conn, restart), - emqx_resource:query_failed(AfterQuery), - Result; + MySqlFunction = mysql_function(TypeOrKey), + {SQLOrKey2, Data} = proc_sql_params(TypeOrKey, SQLOrKey, Params, State), + case on_sql_query(InstId, MySqlFunction, SQLOrKey2, Data, Timeout, State) of {error, not_prepared} -> - ?SLOG( - warning, - LogMeta#{msg => "mysql_connector_prepare_query_failed", reason => not_prepared} - ), case prepare_sql(Prepares, PoolName) of ok -> %% not return result, next loop will try again - on_query(InstId, {Type, SQLOrKey, Params, Timeout}, AfterQuery, State); + on_query(InstId, {TypeOrKey, SQLOrKey, Params, Timeout}, State); {error, Reason} -> + LogMeta = #{connector => InstId, sql => SQLOrKey, state => State}, ?SLOG( error, LogMeta#{msg => "mysql_connector_do_prepare_failed", reason => Reason} ), - emqx_resource:query_failed(AfterQuery), {error, Reason} end; - {error, Reason} -> - ?SLOG( - error, - LogMeta#{msg => "mysql_connector_do_sql_query_failed", reason => Reason} - ), - emqx_resource:query_failed(AfterQuery), - Result; - _ -> - emqx_resource:query_success(AfterQuery), + Result -> Result end. -mysql_function(sql) -> query; -mysql_function(prepared_query) -> execute. +on_batch_query( + InstId, + BatchReq, + #{batch_inserts := Inserts, batch_params_tokens := ParamsTokens} = State +) -> + case hd(BatchReq) of + {Key, _} -> + case maps:get(Key, Inserts, undefined) of + undefined -> + {error, batch_select_not_implemented}; + InsertSQL -> + Tokens = maps:get(Key, ParamsTokens), + on_batch_insert(InstId, BatchReq, InsertSQL, Tokens, State) + end; + Request -> + LogMeta = #{connector => InstId, first_request => Request, state => State}, + ?SLOG(error, LogMeta#{msg => "invalid request"}), + {error, invald_request} + end. + +mysql_function(sql) -> + query; +mysql_function(prepared_query) -> + execute; +%% for bridge +mysql_function(_) -> + mysql_function(prepared_query). on_get_status(_InstId, #{poolname := Pool, auto_reconnect := AutoReconn} = State) -> case emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1) of @@ -287,3 +311,143 @@ prepare_sql_to_conn(Conn, [{Key, SQL} | PrepareList]) when is_pid(Conn) -> unprepare_sql_to_conn(Conn, PrepareSqlKey) -> mysql:unprepare(Conn, PrepareSqlKey). + +parse_prepare_sql(Config) -> + SQL = + case maps:get(prepare_statement, Config, undefined) of + undefined -> + case maps:get(sql, Config, undefined) of + undefined -> #{}; + Template -> #{send_message => Template} + end; + Any -> + Any + end, + parse_prepare_sql(maps:to_list(SQL), #{}, #{}, #{}, #{}). + +parse_prepare_sql([{Key, H} | _] = L, Prepares, Tokens, BatchInserts, BatchTks) -> + {PrepareSQL, ParamsTokens} = emqx_plugin_libs_rule:preproc_sql(H), + parse_batch_prepare_sql( + L, Prepares#{Key => PrepareSQL}, Tokens#{Key => ParamsTokens}, BatchInserts, BatchTks + ); +parse_prepare_sql([], Prepares, Tokens, BatchInserts, BatchTks) -> + #{ + prepare_statement => Prepares, + params_tokens => Tokens, + batch_inserts => BatchInserts, + batch_params_tokens => BatchTks + }. + +parse_batch_prepare_sql([{Key, H} | T], Prepares, Tokens, BatchInserts, BatchTks) -> + case emqx_plugin_libs_rule:detect_sql_type(H) of + {ok, select} -> + parse_prepare_sql(T, Prepares, Tokens, BatchInserts, BatchTks); + {ok, insert} -> + case emqx_plugin_libs_rule:split_insert_sql(H) of + {ok, {InsertSQL, Params}} -> + ParamsTks = emqx_plugin_libs_rule:preproc_tmpl(Params), + parse_prepare_sql( + T, + Prepares, + Tokens, + BatchInserts#{Key => InsertSQL}, + BatchTks#{Key => ParamsTks} + ); + {error, Reason} -> + ?SLOG(error, #{msg => "split sql failed", sql => H, reason => Reason}), + parse_prepare_sql(T, Prepares, Tokens, BatchInserts, BatchTks) + end; + {error, Reason} -> + ?SLOG(error, #{msg => "detect sql type failed", sql => H, reason => Reason}), + parse_prepare_sql(T, Prepares, Tokens, BatchInserts, BatchTks) + end. + +proc_sql_params(query, SQLOrKey, Params, _State) -> + {SQLOrKey, Params}; +proc_sql_params(prepared_query, SQLOrKey, Params, _State) -> + {SQLOrKey, Params}; +proc_sql_params(TypeOrKey, SQLOrData, Params, #{params_tokens := ParamsTokens}) -> + case maps:get(TypeOrKey, ParamsTokens, undefined) of + undefined -> + {SQLOrData, Params}; + Tokens -> + {TypeOrKey, emqx_plugin_libs_rule:proc_sql(Tokens, SQLOrData)} + end. + +on_batch_insert(InstId, BatchReqs, InsertPart, Tokens, State) -> + JoinFun = fun + ([Msg]) -> + emqx_plugin_libs_rule:proc_sql_param_str(Tokens, Msg); + ([H | T]) -> + lists:foldl( + fun(Msg, Acc) -> + Value = emqx_plugin_libs_rule:proc_sql_param_str(Tokens, Msg), + <> + end, + emqx_plugin_libs_rule:proc_sql_param_str(Tokens, H), + T + ) + end, + {_, Msgs} = lists:unzip(BatchReqs), + JoinPart = JoinFun(Msgs), + SQL = <>, + on_sql_query(InstId, query, SQL, [], default_timeout, State). + +on_sql_query( + InstId, + SQLFunc, + SQLOrKey, + Data, + Timeout, + #{poolname := PoolName} = State +) -> + LogMeta = #{connector => InstId, sql => SQLOrKey, state => State}, + ?TRACE("QUERY", "mysql_connector_received", LogMeta), + Worker = ecpool:get_client(PoolName), + {ok, Conn} = ecpool_worker:client(Worker), + ?tp( + mysql_connector_send_query, + #{sql_or_key => SQLOrKey, data => Data} + ), + try mysql:SQLFunc(Conn, SQLOrKey, Data, Timeout) of + {error, disconnected} = Result -> + ?SLOG( + error, + LogMeta#{msg => "mysql_connector_do_sql_query_failed", reason => disconnected} + ), + %% kill the poll worker to trigger reconnection + _ = exit(Conn, restart), + Result; + {error, not_prepared} = Error -> + ?SLOG( + warning, + LogMeta#{msg => "mysql_connector_prepare_query_failed", reason => not_prepared} + ), + Error; + {error, {1053, <<"08S01">>, Reason}} -> + %% mysql sql server shutdown in progress + ?SLOG( + error, + LogMeta#{msg => "mysql_connector_do_sql_query_failed", reason => Reason} + ), + {error, {recoverable_error, Reason}}; + {error, Reason} = Result -> + ?SLOG( + error, + LogMeta#{msg => "mysql_connector_do_sql_query_failed", reason => Reason} + ), + Result; + Result -> + ?tp( + mysql_connector_query_return, + #{result => Result} + ), + Result + catch + error:badarg -> + ?SLOG( + error, + LogMeta#{msg => "mysql_connector_invalid_params", params => Data} + ), + {error, {invalid_params, Data}} + end. diff --git a/apps/emqx_connector/src/emqx_connector_pgsql.erl b/apps/emqx_connector/src/emqx_connector_pgsql.erl index f642ba75c..71dd2bbeb 100644 --- a/apps/emqx_connector/src/emqx_connector_pgsql.erl +++ b/apps/emqx_connector/src/emqx_connector_pgsql.erl @@ -27,9 +27,10 @@ %% callbacks of behaviour emqx_resource -export([ + callback_mode/0, on_start/2, on_stop/2, - on_query/4, + on_query/3, on_get_status/2 ]). @@ -66,6 +67,8 @@ server(desc) -> ?DESC("server"); server(_) -> undefined. %% =================================================================== +callback_mode() -> always_sync. + on_start( InstId, #{ @@ -116,9 +119,9 @@ on_stop(InstId, #{poolname := PoolName}) -> }), emqx_plugin_libs_pool:stop_pool(PoolName). -on_query(InstId, {Type, NameOrSQL}, AfterQuery, #{poolname := _PoolName} = State) -> - on_query(InstId, {Type, NameOrSQL, []}, AfterQuery, State); -on_query(InstId, {Type, NameOrSQL, Params}, AfterQuery, #{poolname := PoolName} = State) -> +on_query(InstId, {Type, NameOrSQL}, #{poolname := _PoolName} = State) -> + on_query(InstId, {Type, NameOrSQL, []}, State); +on_query(InstId, {Type, NameOrSQL, Params}, #{poolname := PoolName} = State) -> ?SLOG(debug, #{ msg => "postgresql connector received sql query", connector => InstId, @@ -132,10 +135,9 @@ on_query(InstId, {Type, NameOrSQL, Params}, AfterQuery, #{poolname := PoolName} connector => InstId, sql => NameOrSQL, reason => Reason - }), - emqx_resource:query_failed(AfterQuery); + }); _ -> - emqx_resource:query_success(AfterQuery) + ok end, Result. diff --git a/apps/emqx_connector/src/emqx_connector_redis.erl b/apps/emqx_connector/src/emqx_connector_redis.erl index 95677b766..a1e864f1d 100644 --- a/apps/emqx_connector/src/emqx_connector_redis.erl +++ b/apps/emqx_connector/src/emqx_connector_redis.erl @@ -26,9 +26,10 @@ %% callbacks of behaviour emqx_resource -export([ + callback_mode/0, on_start/2, on_stop/2, - on_query/4, + on_query/3, on_get_status/2 ]). @@ -112,6 +113,8 @@ servers(desc) -> ?DESC("servers"); servers(_) -> undefined. %% =================================================================== +callback_mode() -> always_sync. + on_start( InstId, #{ @@ -177,7 +180,7 @@ on_stop(InstId, #{poolname := PoolName, type := Type}) -> _ -> emqx_plugin_libs_pool:stop_pool(PoolName) end. -on_query(InstId, {cmd, Command}, AfterCommand, #{poolname := PoolName, type := Type} = State) -> +on_query(InstId, {cmd, Command}, #{poolname := PoolName, type := Type} = State) -> ?TRACE( "QUERY", "redis_connector_received", @@ -195,10 +198,9 @@ on_query(InstId, {cmd, Command}, AfterCommand, #{poolname := PoolName, type := T connector => InstId, sql => Command, reason => Reason - }), - emqx_resource:query_failed(AfterCommand); + }); _ -> - emqx_resource:query_success(AfterCommand) + ok end, Result. diff --git a/apps/emqx_connector/src/emqx_connector_schema.erl b/apps/emqx_connector/src/emqx_connector_schema.erl deleted file mode 100644 index b0f20924f..000000000 --- a/apps/emqx_connector/src/emqx_connector_schema.erl +++ /dev/null @@ -1,77 +0,0 @@ -%%-------------------------------------------------------------------- -%% Copyright (c) 2020-2022 EMQ Technologies Co., Ltd. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%%-------------------------------------------------------------------- --module(emqx_connector_schema). - --behaviour(hocon_schema). - --include_lib("typerefl/include/types.hrl"). --include_lib("hocon/include/hoconsc.hrl"). - --export([namespace/0, roots/0, fields/1, desc/1]). - --export([ - get_response/0, - put_request/0, - post_request/0 -]). - -%% the config for webhook bridges do not need connectors --define(CONN_TYPES, [mqtt]). - -%%====================================================================================== -%% For HTTP APIs - -get_response() -> - http_schema("get"). - -put_request() -> - http_schema("put"). - -post_request() -> - http_schema("post"). - -http_schema(Method) -> - Schemas = [?R_REF(schema_mod(Type), Method) || Type <- ?CONN_TYPES], - ?UNION(Schemas). - -%%====================================================================================== -%% Hocon Schema Definitions - -namespace() -> connector. - -roots() -> ["connectors"]. - -fields(connectors) -> - fields("connectors"); -fields("connectors") -> - [ - {mqtt, - ?HOCON( - ?MAP(name, ?R_REF(emqx_connector_mqtt_schema, "connector")), - #{desc => ?DESC("mqtt")} - )} - ]. - -desc(Record) when - Record =:= connectors; - Record =:= "connectors" --> - ?DESC("desc_connector"); -desc(_) -> - undefined. - -schema_mod(Type) -> - list_to_atom(lists:concat(["emqx_connector_", Type])). diff --git a/apps/emqx_connector/src/emqx_connector_schema_lib.erl b/apps/emqx_connector/src/emqx_connector_schema_lib.erl index dd85566ed..53643c9f9 100644 --- a/apps/emqx_connector/src/emqx_connector_schema_lib.erl +++ b/apps/emqx_connector/src/emqx_connector_schema_lib.erl @@ -68,6 +68,8 @@ ssl_fields() -> relational_db_fields() -> [ {database, fun database/1}, + %% TODO: The `pool_size` for drivers will be deprecated. Ues `worker_pool_size` for emqx_resource + %% See emqx_resource.hrl {pool_size, fun pool_size/1}, {username, fun username/1}, {password, fun password/1}, @@ -102,6 +104,7 @@ username(_) -> undefined. password(type) -> binary(); password(desc) -> ?DESC("password"); password(required) -> false; +password(format) -> <<"password">>; password(_) -> undefined. auto_reconnect(type) -> boolean(); diff --git a/apps/emqx_connector/src/emqx_connector_ssl.erl b/apps/emqx_connector/src/emqx_connector_ssl.erl index 450206ced..9c0133ac9 100644 --- a/apps/emqx_connector/src/emqx_connector_ssl.erl +++ b/apps/emqx_connector/src/emqx_connector_ssl.erl @@ -24,20 +24,6 @@ try_clear_certs/3 ]). -%% TODO: rm `connector` case after `dev/ee5.0` merged into `master`. -%% The `connector` config layer will be removed. -%% for bridges with `connector` field. i.e. `mqtt_source` and `mqtt_sink` -convert_certs(RltvDir, #{<<"connector">> := Connector} = Config) when - is_map(Connector) --> - SSL = maps:get(<<"ssl">>, Connector, undefined), - new_ssl_config(RltvDir, Config, SSL); -convert_certs(RltvDir, #{connector := Connector} = Config) when - is_map(Connector) --> - SSL = maps:get(ssl, Connector, undefined), - new_ssl_config(RltvDir, Config, SSL); -%% for bridges without `connector` field. i.e. webhook convert_certs(RltvDir, #{<<"ssl">> := SSL} = Config) -> new_ssl_config(RltvDir, Config, SSL); convert_certs(RltvDir, #{ssl := SSL} = Config) -> @@ -49,14 +35,6 @@ convert_certs(_RltvDir, Config) -> clear_certs(RltvDir, Config) -> clear_certs2(RltvDir, normalize_key_to_bin(Config)). -clear_certs2(RltvDir, #{<<"connector">> := Connector} = _Config) when - is_map(Connector) --> - %% TODO remove the 'connector' clause after dev/ee5.0 is merged back to master - %% The `connector` config layer will be removed. - %% for bridges with `connector` field. i.e. `mqtt_source` and `mqtt_sink` - OldSSL = maps:get(<<"ssl">>, Connector, undefined), - ok = emqx_tls_lib:delete_ssl_files(RltvDir, undefined, OldSSL); clear_certs2(RltvDir, #{<<"ssl">> := OldSSL} = _Config) -> ok = emqx_tls_lib:delete_ssl_files(RltvDir, undefined, OldSSL); clear_certs2(_RltvDir, _) -> @@ -69,8 +47,6 @@ try_clear_certs(RltvDir, NewConf, OldConf) -> normalize_key_to_bin(OldConf) ). -try_clear_certs2(RltvDir, #{<<"connector">> := NewConnector}, #{<<"connector">> := OldConnector}) -> - try_clear_certs2(RltvDir, NewConnector, OldConnector); try_clear_certs2(RltvDir, NewConf, OldConf) -> NewSSL = try_map_get(<<"ssl">>, NewConf, undefined), OldSSL = try_map_get(<<"ssl">>, OldConf, undefined), @@ -95,7 +71,9 @@ new_ssl_config(#{<<"ssl">> := _} = Config, NewSSL) -> new_ssl_config(Config, _NewSSL) -> Config. -normalize_key_to_bin(Map) -> +normalize_key_to_bin(undefined) -> + undefined; +normalize_key_to_bin(Map) when is_map(Map) -> emqx_map_lib:binary_key_map(Map). try_map_get(Key, Map, Default) when is_map(Map) -> diff --git a/apps/emqx_connector/src/emqx_connector_utils.erl b/apps/emqx_connector/src/emqx_connector_utils.erl new file mode 100644 index 000000000..94b12921d --- /dev/null +++ b/apps/emqx_connector/src/emqx_connector_utils.erl @@ -0,0 +1,19 @@ +-module(emqx_connector_utils). + +-export([split_insert_sql/1]). + +%% SQL = <<"INSERT INTO \"abc\" (c1,c2,c3) VALUES (${1}, ${1}, ${1})">> +split_insert_sql(SQL) -> + case re:split(SQL, "((?i)values)", [{return, binary}]) of + [Part1, _, Part3] -> + case string:trim(Part1, leading) of + <<"insert", _/binary>> = InsertSQL -> + {ok, {InsertSQL, Part3}}; + <<"INSERT", _/binary>> = InsertSQL -> + {ok, {InsertSQL, Part3}}; + _ -> + {error, not_insert_sql} + end; + _ -> + {error, not_insert_sql} + end. diff --git a/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_mod.erl b/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_mod.erl index d0251104b..f1ecbf68c 100644 --- a/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_mod.erl +++ b/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_mod.erl @@ -21,6 +21,7 @@ -export([ start/1, send/2, + send_async/3, stop/1, ping/1 ]). @@ -32,7 +33,6 @@ %% callbacks for emqtt -export([ - handle_puback/2, handle_publish/3, handle_disconnected/2 ]). @@ -134,44 +134,11 @@ safe_stop(Pid, StopF, Timeout) -> exit(Pid, kill) end. -send(Conn, Msgs) -> - send(Conn, Msgs, []). +send(#{client_pid := ClientPid}, Msg) -> + emqtt:publish(ClientPid, Msg). -send(_Conn, [], []) -> - %% all messages in the batch are QoS-0 - Ref = make_ref(), - %% QoS-0 messages do not have packet ID - %% the batch ack is simulated with a loop-back message - self() ! {batch_ack, Ref}, - {ok, Ref}; -send(_Conn, [], PktIds) -> - %% PktIds is not an empty list if there is any non-QoS-0 message in the batch, - %% And the worker should wait for all acks - {ok, PktIds}; -send(#{client_pid := ClientPid} = Conn, [Msg | Rest], PktIds) -> - case emqtt:publish(ClientPid, Msg) of - ok -> - send(Conn, Rest, PktIds); - {ok, PktId} -> - send(Conn, Rest, [PktId | PktIds]); - {error, Reason} -> - %% NOTE: There is no partial success of a batch and recover from the middle - %% only to retry all messages in one batch - {error, Reason} - end. - -handle_puback(#{packet_id := PktId, reason_code := RC}, Parent) when - RC =:= ?RC_SUCCESS; - RC =:= ?RC_NO_MATCHING_SUBSCRIBERS --> - Parent ! {batch_ack, PktId}, - ok; -handle_puback(#{packet_id := PktId, reason_code := RC}, _Parent) -> - ?SLOG(warning, #{ - msg => "publish_to_remote_node_falied", - packet_id => PktId, - reason_code => RC - }). +send_async(#{client_pid := ClientPid}, Msg, Callback) -> + emqtt:publish_async(ClientPid, Msg, infinity, Callback). handle_publish(Msg, undefined, _Opts) -> ?SLOG(error, #{ @@ -200,14 +167,13 @@ handle_disconnected(Reason, Parent) -> make_hdlr(Parent, Vars, Opts) -> #{ - puback => {fun ?MODULE:handle_puback/2, [Parent]}, publish => {fun ?MODULE:handle_publish/3, [Vars, Opts]}, disconnected => {fun ?MODULE:handle_disconnected/2, [Parent]} }. sub_remote_topics(_ClientPid, undefined) -> ok; -sub_remote_topics(ClientPid, #{remote_topic := FromTopic, remote_qos := QoS}) -> +sub_remote_topics(ClientPid, #{remote := #{topic := FromTopic, qos := QoS}}) -> case emqtt:subscribe(ClientPid, FromTopic, QoS) of {ok, _, _} -> ok; Error -> throw(Error) @@ -217,12 +183,10 @@ process_config(Config) -> maps:without([conn_type, address, receive_mountpoint, subscriptions, name], Config). maybe_publish_to_local_broker(Msg, Vars, Props) -> - case maps:get(local_topic, Vars, undefined) of - undefined -> - %% local topic is not set, discard it - ok; - _ -> - _ = emqx_broker:publish(emqx_connector_mqtt_msg:to_broker_msg(Msg, Vars, Props)) + case emqx_map_lib:deep_get([local, topic], Vars, undefined) of + %% local topic is not set, discard it + undefined -> ok; + _ -> emqx_broker:publish(emqx_connector_mqtt_msg:to_broker_msg(Msg, Vars, Props)) end. format_msg_received( diff --git a/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_msg.erl b/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_msg.erl index 469dd952b..0d03465d3 100644 --- a/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_msg.erl +++ b/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_msg.erl @@ -38,14 +38,16 @@ -type msg() :: emqx_types:message(). -type exp_msg() :: emqx_types:message() | #mqtt_msg{}. - --type variables() :: #{ - mountpoint := undefined | binary(), - remote_topic := binary(), - remote_qos := original | integer(), +-type remote_config() :: #{ + topic := binary(), + qos := original | integer(), retain := original | boolean(), payload := binary() }. +-type variables() :: #{ + mountpoint := undefined | binary(), + remote := remote_config() +}. make_pub_vars(_, undefined) -> undefined; @@ -67,10 +69,12 @@ to_remote_msg(#message{flags = Flags0} = Msg, Vars) -> MapMsg = maps:put(retain, Retain0, Columns), to_remote_msg(MapMsg, Vars); to_remote_msg(MapMsg, #{ - remote_topic := TopicToken, - payload := PayloadToken, - remote_qos := QoSToken, - retain := RetainToken, + remote := #{ + topic := TopicToken, + payload := PayloadToken, + qos := QoSToken, + retain := RetainToken + }, mountpoint := Mountpoint }) when is_map(MapMsg) -> Topic = replace_vars_in_str(TopicToken, MapMsg), @@ -94,10 +98,12 @@ to_broker_msg(Msg, Vars, undefined) -> to_broker_msg( #{dup := Dup} = MapMsg, #{ - local_topic := TopicToken, - payload := PayloadToken, - local_qos := QoSToken, - retain := RetainToken, + local := #{ + topic := TopicToken, + payload := PayloadToken, + qos := QoSToken, + retain := RetainToken + }, mountpoint := Mountpoint }, Props diff --git a/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_schema.erl b/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_schema.erl index 018331c8a..93bd846e4 100644 --- a/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_schema.erl +++ b/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_schema.erl @@ -28,25 +28,39 @@ desc/1 ]). --export([ - ingress_desc/0, - egress_desc/0 -]). - -import(emqx_schema, [mk_duration/2]). +-import(hoconsc, [mk/2, ref/2]). + namespace() -> "connector-mqtt". roots() -> fields("config"). fields("config") -> - fields("connector") ++ - topic_mappings(); -fields("connector") -> + fields("server_configs") ++ + [ + {"ingress", + mk( + ref(?MODULE, "ingress"), + #{ + required => {false, recursively}, + desc => ?DESC("ingress_desc") + } + )}, + {"egress", + mk( + ref(?MODULE, "egress"), + #{ + required => {false, recursively}, + desc => ?DESC("egress_desc") + } + )} + ]; +fields("server_configs") -> [ {mode, - sc( + mk( hoconsc:enum([cluster_shareload]), #{ default => cluster_shareload, @@ -54,7 +68,7 @@ fields("connector") -> } )}, {server, - sc( + mk( emqx_schema:host_port(), #{ required => true, @@ -68,7 +82,7 @@ fields("connector") -> #{default => "15s"} )}, {proto_ver, - sc( + mk( hoconsc:enum([v3, v4, v5]), #{ default => v4, @@ -76,7 +90,7 @@ fields("connector") -> } )}, {bridge_mode, - sc( + mk( boolean(), #{ default => false, @@ -84,21 +98,23 @@ fields("connector") -> } )}, {username, - sc( + mk( binary(), #{ desc => ?DESC("username") } )}, {password, - sc( + mk( binary(), #{ + format => <<"password">>, + sensitive => true, desc => ?DESC("password") } )}, {clean_start, - sc( + mk( boolean(), #{ default => true, @@ -113,20 +129,34 @@ fields("connector") -> #{default => "15s"} )}, {max_inflight, - sc( + mk( non_neg_integer(), #{ default => 32, desc => ?DESC("max_inflight") } - )}, - {replayq, sc(ref("replayq"), #{})} + )} ] ++ emqx_connector_schema_lib:ssl_fields(); fields("ingress") -> - %% the message maybe subscribed by rules, in this case 'local_topic' is not necessary [ - {remote_topic, - sc( + {"remote", + mk( + ref(?MODULE, "ingress_remote"), + #{desc => ?DESC(emqx_connector_mqtt_schema, "ingress_remote")} + )}, + {"local", + mk( + ref(?MODULE, "ingress_local"), + #{ + desc => ?DESC(emqx_connector_mqtt_schema, "ingress_local"), + is_required => false + } + )} + ]; +fields("ingress_remote") -> + [ + {topic, + mk( binary(), #{ required => true, @@ -134,47 +164,44 @@ fields("ingress") -> desc => ?DESC("ingress_remote_topic") } )}, - {remote_qos, - sc( + {qos, + mk( qos(), #{ default => 1, desc => ?DESC("ingress_remote_qos") } - )}, - {local_topic, - sc( + )} + ]; +fields("ingress_local") -> + [ + {topic, + mk( binary(), #{ validator => fun emqx_schema:non_empty_string/1, - desc => ?DESC("ingress_local_topic") + desc => ?DESC("ingress_local_topic"), + required => false } )}, - {local_qos, - sc( + {qos, + mk( qos(), #{ default => <<"${qos}">>, desc => ?DESC("ingress_local_qos") } )}, - {hookpoint, - sc( - binary(), - #{desc => ?DESC("ingress_hookpoint")} - )}, - {retain, - sc( + mk( hoconsc:union([boolean(), binary()]), #{ default => <<"${retain}">>, desc => ?DESC("retain") } )}, - {payload, - sc( + mk( binary(), #{ default => undefined, @@ -183,18 +210,40 @@ fields("ingress") -> )} ]; fields("egress") -> - %% the message maybe sent from rules, in this case 'local_topic' is not necessary [ - {local_topic, - sc( + {"local", + mk( + ref(?MODULE, "egress_local"), + #{ + desc => ?DESC(emqx_connector_mqtt_schema, "egress_local"), + required => false + } + )}, + {"remote", + mk( + ref(?MODULE, "egress_remote"), + #{ + desc => ?DESC(emqx_connector_mqtt_schema, "egress_remote"), + required => true + } + )} + ]; +fields("egress_local") -> + [ + {topic, + mk( binary(), #{ desc => ?DESC("egress_local_topic"), + required => false, validator => fun emqx_schema:non_empty_string/1 } - )}, - {remote_topic, - sc( + )} + ]; +fields("egress_remote") -> + [ + {topic, + mk( binary(), #{ required => true, @@ -202,104 +251,48 @@ fields("egress") -> desc => ?DESC("egress_remote_topic") } )}, - {remote_qos, - sc( + {qos, + mk( qos(), #{ required => true, desc => ?DESC("egress_remote_qos") } )}, - {retain, - sc( + mk( hoconsc:union([boolean(), binary()]), #{ required => true, desc => ?DESC("retain") } )}, - {payload, - sc( + mk( binary(), #{ default => undefined, desc => ?DESC("payload") } )} - ]; -fields("replayq") -> - [ - {dir, - sc( - hoconsc:union([boolean(), string()]), - #{desc => ?DESC("dir")} - )}, - {seg_bytes, - sc( - emqx_schema:bytesize(), - #{ - default => "100MB", - desc => ?DESC("seg_bytes") - } - )}, - {offload, - sc( - boolean(), - #{ - default => false, - desc => ?DESC("offload") - } - )} ]. -desc("connector") -> - ?DESC("desc_connector"); +desc("server_configs") -> + ?DESC("server_configs"); desc("ingress") -> - ingress_desc(); + ?DESC("ingress_desc"); +desc("ingress_remote") -> + ?DESC("ingress_remote"); +desc("ingress_local") -> + ?DESC("ingress_local"); desc("egress") -> - egress_desc(); -desc("replayq") -> - ?DESC("desc_replayq"); + ?DESC("egress_desc"); +desc("egress_remote") -> + ?DESC("egress_remote"); +desc("egress_local") -> + ?DESC("egress_local"); desc(_) -> undefined. -topic_mappings() -> - [ - {ingress, - sc( - ref("ingress"), - #{default => #{}} - )}, - {egress, - sc( - ref("egress"), - #{default => #{}} - )} - ]. - -ingress_desc() -> - "\n" - "The ingress config defines how this bridge receive messages from the remote MQTT broker, and then\n" - "send them to the local broker.
" - "Template with variables is allowed in 'local_topic', 'remote_qos', 'qos', 'retain',\n" - "'payload'.
" - "NOTE: if this bridge is used as the input of a rule (emqx rule engine), and also local_topic is\n" - "configured, then messages got from the remote broker will be sent to both the 'local_topic' and\n" - "the rule.\n". - -egress_desc() -> - "\n" - "The egress config defines how this bridge forwards messages from the local broker to the remote\n" - "broker.
" - "Template with variables is allowed in 'remote_topic', 'qos', 'retain', 'payload'.
" - "NOTE: if this bridge is used as the action of a rule (emqx rule engine), and also local_topic\n" - "is configured, then both the data got from the rule and the MQTT messages that matches\n" - "local_topic will be forwarded.\n". - qos() -> hoconsc:union([emqx_schema:qos(), binary()]). - -sc(Type, Meta) -> hoconsc:mk(Type, Meta). -ref(Field) -> hoconsc:ref(?MODULE, Field). diff --git a/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_worker.erl b/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_worker.erl index db795a4cf..fe359437c 100644 --- a/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_worker.erl +++ b/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_worker.erl @@ -68,7 +68,6 @@ %% APIs -export([ start_link/1, - register_metrics/0, stop/1 ]). @@ -92,16 +91,14 @@ ensure_stopped/1, status/1, ping/1, - send_to_remote/2 + send_to_remote/2, + send_to_remote_async/3 ]). -export([get_forwards/1]). -export([get_subscriptions/1]). -%% Internal --export([msg_marshaller/1]). - -export_type([ config/0, ack_ref/0 @@ -134,12 +131,6 @@ %% mountpoint: The topic mount point for messages sent to remote node/cluster %% `undefined', `<<>>' or `""' to disable %% forwards: Local topics to subscribe. -%% replayq.batch_bytes_limit: Max number of bytes to collect in a batch for each -%% send call towards emqx_bridge_connect -%% replayq.batch_count_limit: Max number of messages to collect in a batch for -%% each send call towards emqx_bridge_connect -%% replayq.dir: Directory where replayq should persist messages -%% replayq.seg_bytes: Size in bytes for each replayq segment file %% %% Find more connection specific configs in the callback modules %% of emqx_bridge_connect behaviour. @@ -174,9 +165,14 @@ ping(Name) -> gen_statem:call(name(Name), ping). send_to_remote(Pid, Msg) when is_pid(Pid) -> - gen_statem:cast(Pid, {send_to_remote, Msg}); + gen_statem:call(Pid, {send_to_remote, Msg}); send_to_remote(Name, Msg) -> - gen_statem:cast(name(Name), {send_to_remote, Msg}). + gen_statem:call(name(Name), {send_to_remote, Msg}). + +send_to_remote_async(Pid, Msg, Callback) when is_pid(Pid) -> + gen_statem:cast(Pid, {send_to_remote_async, Msg, Callback}); +send_to_remote_async(Name, Msg, Callback) -> + gen_statem:cast(name(Name), {send_to_remote_async, Msg, Callback}). %% @doc Return all forwards (local subscriptions). -spec get_forwards(id()) -> [topic()]. @@ -195,12 +191,10 @@ init(#{name := Name} = ConnectOpts) -> name => Name }), erlang:process_flag(trap_exit, true), - Queue = open_replayq(Name, maps:get(replayq, ConnectOpts, #{})), State = init_state(ConnectOpts), self() ! idle, {ok, idle, State#{ - connect_opts => pre_process_opts(ConnectOpts), - replayq => Queue + connect_opts => pre_process_opts(ConnectOpts) }}. init_state(Opts) -> @@ -213,32 +207,11 @@ init_state(Opts) -> start_type => StartType, reconnect_interval => ReconnDelayMs, mountpoint => format_mountpoint(Mountpoint), - inflight => [], max_inflight => MaxInflightSize, connection => undefined, name => Name }. -open_replayq(Name, QCfg) -> - Dir = maps:get(dir, QCfg, undefined), - SegBytes = maps:get(seg_bytes, QCfg, ?DEFAULT_SEG_BYTES), - MaxTotalSize = maps:get(max_total_size, QCfg, ?DEFAULT_MAX_TOTAL_SIZE), - QueueConfig = - case Dir =:= undefined orelse Dir =:= "" of - true -> - #{mem_only => true}; - false -> - #{ - dir => filename:join([Dir, node(), Name]), - seg_bytes => SegBytes, - max_total_size => MaxTotalSize - } - end, - replayq:open(QueueConfig#{ - sizer => fun emqx_connector_mqtt_msg:estimate_size/1, - marshaller => fun ?MODULE:msg_marshaller/1 - }). - pre_process_opts(#{subscriptions := InConf, forwards := OutConf} = ConnectOpts) -> ConnectOpts#{ subscriptions => pre_process_in_out(in, InConf), @@ -247,18 +220,22 @@ pre_process_opts(#{subscriptions := InConf, forwards := OutConf} = ConnectOpts) pre_process_in_out(_, undefined) -> undefined; +pre_process_in_out(in, #{local := LC} = Conf) when is_map(Conf) -> + Conf#{local => pre_process_in_out_common(LC)}; pre_process_in_out(in, Conf) when is_map(Conf) -> - Conf1 = pre_process_conf(local_topic, Conf), - Conf2 = pre_process_conf(local_qos, Conf1), - pre_process_in_out_common(Conf2); + %% have no 'local' field in the config + undefined; +pre_process_in_out(out, #{remote := RC} = Conf) when is_map(Conf) -> + Conf#{remote => pre_process_in_out_common(RC)}; pre_process_in_out(out, Conf) when is_map(Conf) -> - Conf1 = pre_process_conf(remote_topic, Conf), - Conf2 = pre_process_conf(remote_qos, Conf1), - pre_process_in_out_common(Conf2). + %% have no 'remote' field in the config + undefined. -pre_process_in_out_common(Conf) -> - Conf1 = pre_process_conf(payload, Conf), - pre_process_conf(retain, Conf1). +pre_process_in_out_common(Conf0) -> + Conf1 = pre_process_conf(topic, Conf0), + Conf2 = pre_process_conf(qos, Conf1), + Conf3 = pre_process_conf(payload, Conf2), + pre_process_conf(retain, Conf3). pre_process_conf(Key, Conf) -> case maps:find(Key, Conf) of @@ -273,9 +250,8 @@ pre_process_conf(Key, Conf) -> code_change(_Vsn, State, Data, _Extra) -> {ok, State, Data}. -terminate(_Reason, _StateName, #{replayq := Q} = State) -> +terminate(_Reason, _StateName, State) -> _ = disconnect(State), - _ = replayq:close(Q), maybe_destroy_session(State). maybe_destroy_session(#{connect_opts := ConnectOpts = #{clean_start := false}} = State) -> @@ -300,6 +276,8 @@ idle({call, From}, ensure_started, State) -> {error, Reason, _State} -> {keep_state_and_data, [{reply, From, {error, Reason}}]} end; +idle({call, From}, {send_to_remote, _}, _State) -> + {keep_state_and_data, [{reply, From, {error, {recoverable_error, not_connected}}}]}; %% @doc Standing by for manual start. idle(info, idle, #{start_type := manual}) -> keep_state_and_data; @@ -319,16 +297,19 @@ connecting(#{reconnect_interval := ReconnectDelayMs} = State) -> {keep_state_and_data, {state_timeout, ReconnectDelayMs, reconnect}} end. -connected(state_timeout, connected, #{inflight := Inflight} = State) -> - case retry_inflight(State#{inflight := []}, Inflight) of - {ok, NewState} -> - {keep_state, NewState, {next_event, internal, maybe_send}}; - {error, NewState} -> - {keep_state, NewState} +connected(state_timeout, connected, State) -> + %% nothing to do + {keep_state, State}; +connected({call, From}, {send_to_remote, Msg}, State) -> + case do_send(State, Msg) of + {ok, NState} -> + {keep_state, NState, [{reply, From, ok}]}; + {error, Reason} -> + {keep_state_and_data, [[reply, From, {error, Reason}]]} end; -connected(internal, maybe_send, State) -> - {_, NewState} = pop_and_send(State), - {keep_state, NewState}; +connected(cast, {send_to_remote_async, Msg, Callback}, State) -> + _ = do_send_async(State, Msg, Callback), + {keep_state, State}; connected( info, {disconnected, Conn, Reason}, @@ -342,9 +323,6 @@ connected( false -> keep_state_and_data end; -connected(info, {batch_ack, Ref}, State) -> - NewState = handle_batch_ack(State, Ref), - {keep_state, NewState, {next_event, internal, maybe_send}}; connected(Type, Content, State) -> common(connected, Type, Content, State). @@ -363,13 +341,12 @@ common(_StateName, {call, From}, get_forwards, #{connect_opts := #{forwards := F {keep_state_and_data, [{reply, From, Forwards}]}; common(_StateName, {call, From}, get_subscriptions, #{connection := Connection}) -> {keep_state_and_data, [{reply, From, maps:get(subscriptions, Connection, #{})}]}; +common(_StateName, {call, From}, Req, _State) -> + {keep_state_and_data, [{reply, From, {error, {unsupported_request, Req}}}]}; common(_StateName, info, {'EXIT', _, _}, State) -> {keep_state, State}; -common(_StateName, cast, {send_to_remote, Msg}, #{replayq := Q} = State) -> - NewQ = replayq:append(Q, [Msg]), - {keep_state, State#{replayq => NewQ}, {next_event, internal, maybe_send}}; common(StateName, Type, Content, #{name := Name} = State) -> - ?SLOG(notice, #{ + ?SLOG(error, #{ msg => "bridge_discarded_event", name => Name, type => Type, @@ -381,13 +358,12 @@ common(StateName, Type, Content, #{name := Name} = State) -> do_connect( #{ connect_opts := ConnectOpts, - inflight := Inflight, name := Name } = State ) -> case emqx_connector_mqtt_mod:start(ConnectOpts) of {ok, Conn} -> - ?tp(info, connected, #{name => Name, inflight => length(Inflight)}), + ?tp(info, connected, #{name => Name}), {ok, State#{connection => Conn}}; {error, Reason} -> ConnectOpts1 = obfuscate(ConnectOpts), @@ -399,39 +375,7 @@ do_connect( {error, Reason, State} end. -%% Retry all inflight (previously sent but not acked) batches. -retry_inflight(State, []) -> - {ok, State}; -retry_inflight(State, [#{q_ack_ref := QAckRef, msg := Msg} | Rest] = OldInf) -> - case do_send(State, QAckRef, Msg) of - {ok, State1} -> - retry_inflight(State1, Rest); - {error, #{inflight := NewInf} = State1} -> - {error, State1#{inflight := NewInf ++ OldInf}} - end. - -pop_and_send(#{inflight := Inflight, max_inflight := Max} = State) -> - pop_and_send_loop(State, Max - length(Inflight)). - -pop_and_send_loop(State, 0) -> - ?tp(debug, inflight_full, #{}), - {ok, State}; -pop_and_send_loop(#{replayq := Q} = State, N) -> - case replayq:is_empty(Q) of - true -> - ?tp(debug, replayq_drained, #{}), - {ok, State}; - false -> - BatchSize = 1, - Opts = #{count_limit => BatchSize, bytes_limit => 999999999}, - {Q1, QAckRef, [Msg]} = replayq:pop(Q, Opts), - case do_send(State#{replayq := Q1}, QAckRef, Msg) of - {ok, NewState} -> pop_and_send_loop(NewState, N - 1); - {error, NewState} -> {error, NewState} - end - end. - -do_send(#{connect_opts := #{forwards := undefined}}, _QAckRef, Msg) -> +do_send(#{connect_opts := #{forwards := undefined}}, Msg) -> ?SLOG(error, #{ msg => "cannot_forward_messages_to_remote_broker" @@ -440,99 +384,68 @@ do_send(#{connect_opts := #{forwards := undefined}}, _QAckRef, Msg) -> }); do_send( #{ - inflight := Inflight, connection := Connection, mountpoint := Mountpoint, connect_opts := #{forwards := Forwards} } = State, - QAckRef, Msg ) -> Vars = emqx_connector_mqtt_msg:make_pub_vars(Mountpoint, Forwards), - ExportMsg = fun(Message) -> - emqx_metrics:inc('bridge.mqtt.message_sent_to_remote'), - emqx_connector_mqtt_msg:to_remote_msg(Message, Vars) - end, + ExportMsg = emqx_connector_mqtt_msg:to_remote_msg(Msg, Vars), ?SLOG(debug, #{ msg => "publish_to_remote_broker", message => Msg, vars => Vars }), - case emqx_connector_mqtt_mod:send(Connection, [ExportMsg(Msg)]) of - {ok, Refs} -> - {ok, State#{ - inflight := Inflight ++ - [ - #{ - q_ack_ref => QAckRef, - send_ack_ref => map_set(Refs), - msg => Msg - } - ] - }}; + case emqx_connector_mqtt_mod:send(Connection, ExportMsg) of + ok -> + {ok, State}; + {ok, #{reason_code := RC}} when + RC =:= ?RC_SUCCESS; + RC =:= ?RC_NO_MATCHING_SUBSCRIBERS + -> + {ok, State}; + {ok, #{reason_code := RC, reason_code_name := RCN}} -> + ?SLOG(warning, #{ + msg => "publish_to_remote_node_falied", + message => Msg, + reason_code => RC, + reason_code_name => RCN + }), + {error, RCN}; {error, Reason} -> ?SLOG(info, #{ msg => "mqtt_bridge_produce_failed", reason => Reason }), - {error, State} + {error, Reason} end. -%% map as set, ack-reference -> 1 -map_set(Ref) when is_reference(Ref) -> - %% QoS-0 or RPC call returns a reference - map_set([Ref]); -map_set(List) -> - map_set(List, #{}). - -map_set([], Set) -> Set; -map_set([H | T], Set) -> map_set(T, Set#{H => 1}). - -handle_batch_ack(#{inflight := Inflight0, replayq := Q} = State, Ref) -> - Inflight1 = do_ack(Inflight0, Ref), - Inflight = drop_acked_batches(Q, Inflight1), - State#{inflight := Inflight}. - -do_ack([], Ref) -> - ?SLOG(debug, #{ - msg => "stale_batch_ack_reference", - ref => Ref - }), - []; -do_ack([#{send_ack_ref := Refs} = First | Rest], Ref) -> - case maps:is_key(Ref, Refs) of - true -> - NewRefs = maps:without([Ref], Refs), - [First#{send_ack_ref := NewRefs} | Rest]; - false -> - [First | do_ack(Rest, Ref)] - end. - -%% Drop the consecutive header of the inflight list having empty send_ack_ref -drop_acked_batches(_Q, []) -> - ?tp(debug, inflight_drained, #{}), - []; -drop_acked_batches( - Q, - [ - #{ - send_ack_ref := Refs, - q_ack_ref := QAckRef - } - | Rest - ] = All +do_send_async(#{connect_opts := #{forwards := undefined}}, Msg, _Callback) -> + %% TODO: eval callback with undefined error + ?SLOG(error, #{ + msg => + "cannot_forward_messages_to_remote_broker" + "_as_'egress'_is_not_configured", + messages => Msg + }); +do_send_async( + #{ + connection := Connection, + mountpoint := Mountpoint, + connect_opts := #{forwards := Forwards} + }, + Msg, + Callback ) -> - case maps:size(Refs) of - 0 -> - %% all messages are acked by bridge target - %% now it's safe to ack replayq (delete from disk) - ok = replayq:ack(Q, QAckRef), - %% continue to check more sent batches - drop_acked_batches(Q, Rest); - _ -> - %% the head (oldest) inflight batch is not acked, keep waiting - All - end. + Vars = emqx_connector_mqtt_msg:make_pub_vars(Mountpoint, Forwards), + ExportMsg = emqx_connector_mqtt_msg:to_remote_msg(Msg, Vars), + ?SLOG(debug, #{ + msg => "publish_to_remote_broker", + message => Msg, + vars => Vars + }), + emqx_connector_mqtt_mod:send_async(Connection, ExportMsg, Callback). disconnect(#{connection := Conn} = State) when Conn =/= undefined -> emqx_connector_mqtt_mod:stop(Conn), @@ -540,10 +453,6 @@ disconnect(#{connection := Conn} = State) when Conn =/= undefined -> disconnect(State) -> State. -%% Called only when replayq needs to dump it to disk. -msg_marshaller(Bin) when is_binary(Bin) -> emqx_connector_mqtt_msg:from_binary(Bin); -msg_marshaller(Msg) -> emqx_connector_mqtt_msg:to_binary(Msg). - format_mountpoint(undefined) -> undefined; format_mountpoint(Prefix) -> @@ -551,15 +460,6 @@ format_mountpoint(Prefix) -> name(Id) -> list_to_atom(str(Id)). -register_metrics() -> - lists:foreach( - fun emqx_metrics:ensure/1, - [ - 'bridge.mqtt.message_sent_to_remote', - 'bridge.mqtt.message_received_from_remote' - ] - ). - obfuscate(Map) -> maps:fold( fun(K, V, Acc) -> diff --git a/apps/emqx_connector/test/emqx_connector_SUITE.erl b/apps/emqx_connector/test/emqx_connector_SUITE.erl deleted file mode 100644 index c4a6418c2..000000000 --- a/apps/emqx_connector/test/emqx_connector_SUITE.erl +++ /dev/null @@ -1,94 +0,0 @@ -%%-------------------------------------------------------------------- -%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%%-------------------------------------------------------------------- - --module(emqx_connector_SUITE). - --compile(nowarn_export_all). --compile(export_all). - --include("emqx/include/emqx.hrl"). --include_lib("eunit/include/eunit.hrl"). --include_lib("common_test/include/ct.hrl"). - --define(BRIDGE_CONF_DEFAULT, <<"bridges: {}">>). --define(MQTT_CONNECTOR(Username), #{ - <<"server">> => <<"127.0.0.1:1883">>, - <<"username">> => Username, - <<"password">> => <<"">>, - <<"proto_ver">> => <<"v4">>, - <<"ssl">> => #{<<"enable">> => false} -}). --define(CONNECTOR_TYPE, <<"mqtt">>). --define(CONNECTOR_NAME, <<"test_connector_42">>). - -all() -> - emqx_common_test_helpers:all(?MODULE). - -groups() -> - []. - -suite() -> - []. - -init_per_suite(Config) -> - _ = application:load(emqx_conf), - %% some testcases (may from other app) already get emqx_connector started - _ = application:stop(emqx_resource), - _ = application:stop(emqx_connector), - ok = emqx_common_test_helpers:start_apps( - [ - emqx_connector, - emqx_bridge - ] - ), - ok = emqx_common_test_helpers:load_config(emqx_connector_schema, <<"connectors: {}">>), - Config. - -end_per_suite(_Config) -> - emqx_common_test_helpers:stop_apps([ - emqx_connector, - emqx_bridge - ]), - ok. - -init_per_testcase(_, Config) -> - {ok, _} = emqx_cluster_rpc:start_link(), - Config. -end_per_testcase(_, _Config) -> - ok. - -t_list_raw_empty(_) -> - ok = emqx_config:erase(hd(emqx_connector:config_key_path())), - Result = emqx_connector:list_raw(), - ?assertEqual([], Result). - -t_lookup_raw_error(_) -> - Result = emqx_connector:lookup_raw(<<"foo:bar">>), - ?assertEqual({error, not_found}, Result). - -t_parse_connector_id_error(_) -> - ?assertError( - {invalid_connector_id, <<"foobar">>}, emqx_connector:parse_connector_id(<<"foobar">>) - ). - -t_update_connector_does_not_exist(_) -> - Config = ?MQTT_CONNECTOR(<<"user1">>), - ?assertMatch({ok, _Config}, emqx_connector:update(?CONNECTOR_TYPE, ?CONNECTOR_NAME, Config)). - -t_delete_connector_does_not_exist(_) -> - ?assertEqual({ok, #{post_config_update => #{}}}, emqx_connector:delete(<<"foo:bar">>)). - -t_connector_id_using_list(_) -> - <<"foo:bar">> = emqx_connector:connector_id("foo", "bar"). diff --git a/apps/emqx_connector/test/emqx_connector_api_SUITE.erl b/apps/emqx_connector/test/emqx_connector_api_SUITE.erl deleted file mode 100644 index ef5db26f2..000000000 --- a/apps/emqx_connector/test/emqx_connector_api_SUITE.erl +++ /dev/null @@ -1,812 +0,0 @@ -%%-------------------------------------------------------------------- -%% Copyright (c) 2020-2022 EMQ Technologies Co., Ltd. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%%-------------------------------------------------------------------- - --module(emqx_connector_api_SUITE). - --compile(nowarn_export_all). --compile(export_all). - --import(emqx_dashboard_api_test_helpers, [request/4, uri/1]). - --include("emqx/include/emqx.hrl"). --include_lib("eunit/include/eunit.hrl"). --include_lib("common_test/include/ct.hrl"). --include("emqx_dashboard/include/emqx_dashboard.hrl"). - -%% output functions --export([inspect/3]). - --define(BRIDGE_CONF_DEFAULT, <<"bridges: {}">>). --define(CONNECTR_TYPE, <<"mqtt">>). --define(CONNECTR_NAME, <<"test_connector">>). --define(BRIDGE_NAME_INGRESS, <<"ingress_test_bridge">>). --define(BRIDGE_NAME_EGRESS, <<"egress_test_bridge">>). --define(MQTT_CONNECTOR(Username), #{ - <<"server">> => <<"127.0.0.1:1883">>, - <<"username">> => Username, - <<"password">> => <<"">>, - <<"proto_ver">> => <<"v4">>, - <<"ssl">> => #{<<"enable">> => false} -}). --define(MQTT_CONNECTOR2(Server), ?MQTT_CONNECTOR(<<"user1">>)#{<<"server">> => Server}). - --define(MQTT_BRIDGE_INGRESS(ID), #{ - <<"connector">> => ID, - <<"direction">> => <<"ingress">>, - <<"remote_topic">> => <<"remote_topic/#">>, - <<"remote_qos">> => 2, - <<"local_topic">> => <<"local_topic/${topic}">>, - <<"local_qos">> => <<"${qos}">>, - <<"payload">> => <<"${payload}">>, - <<"retain">> => <<"${retain}">> -}). - --define(MQTT_BRIDGE_EGRESS(ID), #{ - <<"connector">> => ID, - <<"direction">> => <<"egress">>, - <<"local_topic">> => <<"local_topic/#">>, - <<"remote_topic">> => <<"remote_topic/${topic}">>, - <<"payload">> => <<"${payload}">>, - <<"remote_qos">> => <<"${qos}">>, - <<"retain">> => <<"${retain}">> -}). - --define(metrics(MATCH, SUCC, FAILED, SPEED, SPEED5M, SPEEDMAX), #{ - <<"matched">> := MATCH, - <<"success">> := SUCC, - <<"failed">> := FAILED, - <<"rate">> := SPEED, - <<"rate_last5m">> := SPEED5M, - <<"rate_max">> := SPEEDMAX -}). - -inspect(Selected, _Envs, _Args) -> - persistent_term:put(?MODULE, #{inspect => Selected}). - -all() -> - emqx_common_test_helpers:all(?MODULE). - -groups() -> - []. - -suite() -> - [{timetrap, {seconds, 30}}]. - -init_per_suite(Config) -> - _ = application:load(emqx_conf), - %% some testcases (may from other app) already get emqx_connector started - _ = application:stop(emqx_resource), - _ = application:stop(emqx_connector), - ok = emqx_common_test_helpers:start_apps( - [ - emqx_rule_engine, - emqx_connector, - emqx_bridge, - emqx_dashboard - ], - fun set_special_configs/1 - ), - ok = emqx_common_test_helpers:load_config(emqx_connector_schema, <<"connectors: {}">>), - ok = emqx_common_test_helpers:load_config( - emqx_rule_engine_schema, - <<"rule_engine {rules {}}">> - ), - ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, ?BRIDGE_CONF_DEFAULT), - Config. - -end_per_suite(_Config) -> - emqx_common_test_helpers:stop_apps([ - emqx_rule_engine, - emqx_connector, - emqx_bridge, - emqx_dashboard - ]), - ok. - -set_special_configs(emqx_dashboard) -> - emqx_dashboard_api_test_helpers:set_default_config(<<"connector_admin">>); -set_special_configs(_) -> - ok. - -init_per_testcase(_, Config) -> - {ok, _} = emqx_cluster_rpc:start_link(node(), emqx_cluster_rpc, 1000), - Config. -end_per_testcase(_, _Config) -> - clear_resources(), - ok. - -clear_resources() -> - lists:foreach( - fun(#{id := Id}) -> - ok = emqx_rule_engine:delete_rule(Id) - end, - emqx_rule_engine:get_rules() - ), - lists:foreach( - fun(#{type := Type, name := Name}) -> - {ok, _} = emqx_bridge:remove(Type, Name) - end, - emqx_bridge:list() - ), - lists:foreach( - fun(#{<<"type">> := Type, <<"name">> := Name}) -> - {ok, _} = emqx_connector:delete(Type, Name) - end, - emqx_connector:list_raw() - ). - -%%------------------------------------------------------------------------------ -%% Testcases -%%------------------------------------------------------------------------------ - -t_mqtt_crud_apis(_) -> - %% assert we there's no connectors at first - {ok, 200, <<"[]">>} = request(get, uri(["connectors"]), []), - - %% then we add a mqtt connector, using POST - %% POST /connectors/ will create a connector - User1 = <<"user1">>, - {ok, 400, << - "{\"code\":\"BAD_REQUEST\",\"message\"" - ":\"missing some required fields: [name, type]\"}" - >>} = - request( - post, - uri(["connectors"]), - ?MQTT_CONNECTOR(User1)#{<<"type">> => ?CONNECTR_TYPE} - ), - {ok, 201, Connector} = request( - post, - uri(["connectors"]), - ?MQTT_CONNECTOR(User1)#{ - <<"type">> => ?CONNECTR_TYPE, - <<"name">> => ?CONNECTR_NAME - } - ), - - #{ - <<"type">> := ?CONNECTR_TYPE, - <<"name">> := ?CONNECTR_NAME, - <<"server">> := <<"127.0.0.1:1883">>, - <<"username">> := User1, - <<"password">> := <<"">>, - <<"proto_ver">> := <<"v4">>, - <<"ssl">> := #{<<"enable">> := false} - } = jsx:decode(Connector), - ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME), - %% update the request-path of the connector - User2 = <<"user2">>, - {ok, 200, Connector2} = request( - put, - uri(["connectors", ConnctorID]), - ?MQTT_CONNECTOR(User2) - ), - ?assertMatch( - #{ - <<"type">> := ?CONNECTR_TYPE, - <<"name">> := ?CONNECTR_NAME, - <<"server">> := <<"127.0.0.1:1883">>, - <<"username">> := User2, - <<"password">> := <<"">>, - <<"proto_ver">> := <<"v4">>, - <<"ssl">> := #{<<"enable">> := false} - }, - jsx:decode(Connector2) - ), - - %% list all connectors again, assert Connector2 is in it - {ok, 200, Connector2Str} = request(get, uri(["connectors"]), []), - ?assertMatch( - [ - #{ - <<"type">> := ?CONNECTR_TYPE, - <<"name">> := ?CONNECTR_NAME, - <<"server">> := <<"127.0.0.1:1883">>, - <<"username">> := User2, - <<"password">> := <<"">>, - <<"proto_ver">> := <<"v4">>, - <<"ssl">> := #{<<"enable">> := false} - } - ], - jsx:decode(Connector2Str) - ), - - %% get the connector by id - {ok, 200, Connector3Str} = request(get, uri(["connectors", ConnctorID]), []), - ?assertMatch( - #{ - <<"type">> := ?CONNECTR_TYPE, - <<"name">> := ?CONNECTR_NAME, - <<"server">> := <<"127.0.0.1:1883">>, - <<"username">> := User2, - <<"password">> := <<"">>, - <<"proto_ver">> := <<"v4">>, - <<"ssl">> := #{<<"enable">> := false} - }, - jsx:decode(Connector3Str) - ), - - %% delete the connector - {ok, 204, <<>>} = request(delete, uri(["connectors", ConnctorID]), []), - {ok, 200, <<"[]">>} = request(get, uri(["connectors"]), []), - - %% update a deleted connector returns an error - {ok, 404, ErrMsg2} = request( - put, - uri(["connectors", ConnctorID]), - ?MQTT_CONNECTOR(User2) - ), - ?assertMatch( - #{ - <<"code">> := _, - <<"message">> := <<"connector not found">> - }, - jsx:decode(ErrMsg2) - ), - ok. - -t_mqtt_conn_bridge_ingress(_) -> - %% then we add a mqtt connector, using POST - User1 = <<"user1">>, - {ok, 201, Connector} = request( - post, - uri(["connectors"]), - ?MQTT_CONNECTOR(User1)#{ - <<"type">> => ?CONNECTR_TYPE, - <<"name">> => ?CONNECTR_NAME - } - ), - - #{ - <<"type">> := ?CONNECTR_TYPE, - <<"name">> := ?CONNECTR_NAME, - <<"server">> := <<"127.0.0.1:1883">>, - <<"num_of_bridges">> := 0, - <<"username">> := User1, - <<"password">> := <<"">>, - <<"proto_ver">> := <<"v4">>, - <<"ssl">> := #{<<"enable">> := false} - } = jsx:decode(Connector), - ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME), - %% ... and a MQTT bridge, using POST - %% we bind this bridge to the connector created just now - timer:sleep(50), - {ok, 201, Bridge} = request( - post, - uri(["bridges"]), - ?MQTT_BRIDGE_INGRESS(ConnctorID)#{ - <<"type">> => ?CONNECTR_TYPE, - <<"name">> => ?BRIDGE_NAME_INGRESS - } - ), - #{ - <<"type">> := ?CONNECTR_TYPE, - <<"name">> := ?BRIDGE_NAME_INGRESS, - <<"connector">> := ConnctorID - } = jsx:decode(Bridge), - BridgeIDIngress = emqx_bridge_resource:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_INGRESS), - wait_for_resource_ready(BridgeIDIngress, 5), - - %% we now test if the bridge works as expected - RemoteTopic = <<"remote_topic/1">>, - LocalTopic = <<"local_topic/", RemoteTopic/binary>>, - Payload = <<"hello">>, - emqx:subscribe(LocalTopic), - timer:sleep(100), - %% PUBLISH a message to the 'remote' broker, as we have only one broker, - %% the remote broker is also the local one. - emqx:publish(emqx_message:make(RemoteTopic, Payload)), - %% we should receive a message on the local broker, with specified topic - ?assert( - receive - {deliver, LocalTopic, #message{payload = Payload}} -> - ct:pal("local broker got message: ~p on topic ~p", [Payload, LocalTopic]), - true; - Msg -> - ct:pal("Msg: ~p", [Msg]), - false - after 100 -> - false - end - ), - - %% get the connector by id, verify the num_of_bridges now is 1 - {ok, 200, Connector1Str} = request(get, uri(["connectors", ConnctorID]), []), - ?assertMatch(#{<<"num_of_bridges">> := 1}, jsx:decode(Connector1Str)), - - %% delete the bridge - {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDIngress]), []), - {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), - - %% delete the connector - {ok, 204, <<>>} = request(delete, uri(["connectors", ConnctorID]), []), - {ok, 200, <<"[]">>} = request(get, uri(["connectors"]), []), - ok. - -t_mqtt_conn_bridge_egress(_) -> - %% then we add a mqtt connector, using POST - User1 = <<"user1">>, - {ok, 201, Connector} = request( - post, - uri(["connectors"]), - ?MQTT_CONNECTOR(User1)#{ - <<"type">> => ?CONNECTR_TYPE, - <<"name">> => ?CONNECTR_NAME - } - ), - - %ct:pal("---connector: ~p", [Connector]), - #{ - <<"server">> := <<"127.0.0.1:1883">>, - <<"username">> := User1, - <<"password">> := <<"">>, - <<"proto_ver">> := <<"v4">>, - <<"ssl">> := #{<<"enable">> := false} - } = jsx:decode(Connector), - ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME), - %% ... and a MQTT bridge, using POST - %% we bind this bridge to the connector created just now - {ok, 201, Bridge} = request( - post, - uri(["bridges"]), - ?MQTT_BRIDGE_EGRESS(ConnctorID)#{ - <<"type">> => ?CONNECTR_TYPE, - <<"name">> => ?BRIDGE_NAME_EGRESS - } - ), - #{ - <<"type">> := ?CONNECTR_TYPE, - <<"name">> := ?BRIDGE_NAME_EGRESS, - <<"connector">> := ConnctorID - } = jsx:decode(Bridge), - BridgeIDEgress = emqx_bridge_resource:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_EGRESS), - wait_for_resource_ready(BridgeIDEgress, 5), - - %% we now test if the bridge works as expected - LocalTopic = <<"local_topic/1">>, - RemoteTopic = <<"remote_topic/", LocalTopic/binary>>, - Payload = <<"hello">>, - emqx:subscribe(RemoteTopic), - timer:sleep(100), - %% PUBLISH a message to the 'local' broker, as we have only one broker, - %% the remote broker is also the local one. - emqx:publish(emqx_message:make(LocalTopic, Payload)), - - %% we should receive a message on the "remote" broker, with specified topic - ?assert( - receive - {deliver, RemoteTopic, #message{payload = Payload}} -> - ct:pal("local broker got message: ~p on topic ~p", [Payload, RemoteTopic]), - true; - Msg -> - ct:pal("Msg: ~p", [Msg]), - false - after 100 -> - false - end - ), - - %% verify the metrics of the bridge - {ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDEgress]), []), - ?assertMatch( - #{ - <<"metrics">> := ?metrics(1, 1, 0, _, _, _), - <<"node_metrics">> := - [#{<<"node">> := _, <<"metrics">> := ?metrics(1, 1, 0, _, _, _)}] - }, - jsx:decode(BridgeStr) - ), - - %% delete the bridge - {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []), - {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), - - %% delete the connector - {ok, 204, <<>>} = request(delete, uri(["connectors", ConnctorID]), []), - {ok, 200, <<"[]">>} = request(get, uri(["connectors"]), []), - ok. - -%% t_mqtt_conn_update: -%% - update a connector should also update all of the the bridges -%% - cannot delete a connector that is used by at least one bridge -t_mqtt_conn_update(_) -> - %% then we add a mqtt connector, using POST - {ok, 201, Connector} = request( - post, - uri(["connectors"]), - ?MQTT_CONNECTOR2(<<"127.0.0.1:1883">>)#{ - <<"type">> => ?CONNECTR_TYPE, - <<"name">> => ?CONNECTR_NAME - } - ), - - %ct:pal("---connector: ~p", [Connector]), - #{<<"server">> := <<"127.0.0.1:1883">>} = jsx:decode(Connector), - ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME), - %% ... and a MQTT bridge, using POST - %% we bind this bridge to the connector created just now - {ok, 201, Bridge} = request( - post, - uri(["bridges"]), - ?MQTT_BRIDGE_EGRESS(ConnctorID)#{ - <<"type">> => ?CONNECTR_TYPE, - <<"name">> => ?BRIDGE_NAME_EGRESS - } - ), - #{ - <<"type">> := ?CONNECTR_TYPE, - <<"name">> := ?BRIDGE_NAME_EGRESS, - <<"connector">> := ConnctorID - } = jsx:decode(Bridge), - BridgeIDEgress = emqx_bridge_resource:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_EGRESS), - wait_for_resource_ready(BridgeIDEgress, 5), - - %% Then we try to update 'server' of the connector, to an unavailable IP address - %% The update OK, we recreate the resource even if the resource is current connected, - %% and the target resource we're going to update is unavailable. - {ok, 200, _} = request( - put, - uri(["connectors", ConnctorID]), - ?MQTT_CONNECTOR2(<<"127.0.0.1:2603">>) - ), - %% we fix the 'server' parameter to a normal one, it should work - {ok, 200, _} = request( - put, - uri(["connectors", ConnctorID]), - ?MQTT_CONNECTOR2(<<"127.0.0.1 : 1883">>) - ), - %% delete the bridge - {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []), - {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), - - %% delete the connector - {ok, 204, <<>>} = request(delete, uri(["connectors", ConnctorID]), []), - {ok, 200, <<"[]">>} = request(get, uri(["connectors"]), []). - -t_mqtt_conn_update2(_) -> - %% then we add a mqtt connector, using POST - %% but this connector is point to a unreachable server "2603" - {ok, 201, Connector} = request( - post, - uri(["connectors"]), - ?MQTT_CONNECTOR2(<<"127.0.0.1:2603">>)#{ - <<"type">> => ?CONNECTR_TYPE, - <<"name">> => ?CONNECTR_NAME - } - ), - - #{<<"server">> := <<"127.0.0.1:2603">>} = jsx:decode(Connector), - ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME), - %% ... and a MQTT bridge, using POST - %% we bind this bridge to the connector created just now - {ok, 201, Bridge} = request( - post, - uri(["bridges"]), - ?MQTT_BRIDGE_EGRESS(ConnctorID)#{ - <<"type">> => ?CONNECTR_TYPE, - <<"name">> => ?BRIDGE_NAME_EGRESS - } - ), - #{ - <<"type">> := ?CONNECTR_TYPE, - <<"name">> := ?BRIDGE_NAME_EGRESS, - <<"status">> := <<"disconnected">>, - <<"connector">> := ConnctorID - } = jsx:decode(Bridge), - BridgeIDEgress = emqx_bridge_resource:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_EGRESS), - %% We try to fix the 'server' parameter, to another unavailable server.. - %% The update should success: we don't check the connectivity of the new config - %% if the resource is now disconnected. - {ok, 200, _} = request( - put, - uri(["connectors", ConnctorID]), - ?MQTT_CONNECTOR2(<<"127.0.0.1:2604">>) - ), - %% we fix the 'server' parameter to a normal one, it should work - {ok, 200, _} = request( - put, - uri(["connectors", ConnctorID]), - ?MQTT_CONNECTOR2(<<"127.0.0.1:1883">>) - ), - wait_for_resource_ready(BridgeIDEgress, 5), - {ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDEgress]), []), - ?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(BridgeStr)), - %% delete the bridge - {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []), - {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), - - %% delete the connector - {ok, 204, <<>>} = request(delete, uri(["connectors", ConnctorID]), []), - {ok, 200, <<"[]">>} = request(get, uri(["connectors"]), []). - -t_mqtt_conn_update3(_) -> - %% we add a mqtt connector, using POST - {ok, 201, _} = request( - post, - uri(["connectors"]), - ?MQTT_CONNECTOR2(<<"127.0.0.1:1883">>)#{ - <<"type">> => ?CONNECTR_TYPE, - <<"name">> => ?CONNECTR_NAME - } - ), - ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME), - %% ... and a MQTT bridge, using POST - %% we bind this bridge to the connector created just now - {ok, 201, Bridge} = request( - post, - uri(["bridges"]), - ?MQTT_BRIDGE_EGRESS(ConnctorID)#{ - <<"type">> => ?CONNECTR_TYPE, - <<"name">> => ?BRIDGE_NAME_EGRESS - } - ), - #{<<"connector">> := ConnctorID} = jsx:decode(Bridge), - BridgeIDEgress = emqx_bridge_resource:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_EGRESS), - wait_for_resource_ready(BridgeIDEgress, 5), - - %% delete the connector should fail because it is in use by a bridge - {ok, 403, _} = request(delete, uri(["connectors", ConnctorID]), []), - %% delete the bridge - {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []), - %% the connector now can be deleted without problems - {ok, 204, <<>>} = request(delete, uri(["connectors", ConnctorID]), []). - -t_mqtt_conn_testing(_) -> - %% APIs for testing the connectivity - %% then we add a mqtt connector, using POST - {ok, 204, <<>>} = request( - post, - uri(["connectors_test"]), - ?MQTT_CONNECTOR2(<<"127.0.0.1:1883">>)#{ - <<"type">> => ?CONNECTR_TYPE, - <<"name">> => ?BRIDGE_NAME_EGRESS - } - ), - {ok, 400, _} = request( - post, - uri(["connectors_test"]), - ?MQTT_CONNECTOR2(<<"127.0.0.1:2883">>)#{ - <<"type">> => ?CONNECTR_TYPE, - <<"name">> => ?BRIDGE_NAME_EGRESS - } - ). - -t_ingress_mqtt_bridge_with_rules(_) -> - {ok, 201, _} = request( - post, - uri(["connectors"]), - ?MQTT_CONNECTOR(<<"user1">>)#{ - <<"type">> => ?CONNECTR_TYPE, - <<"name">> => ?CONNECTR_NAME - } - ), - ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME), - - {ok, 201, _} = request( - post, - uri(["bridges"]), - ?MQTT_BRIDGE_INGRESS(ConnctorID)#{ - <<"type">> => ?CONNECTR_TYPE, - <<"name">> => ?BRIDGE_NAME_INGRESS - } - ), - BridgeIDIngress = emqx_bridge_resource:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_INGRESS), - - {ok, 201, Rule} = request( - post, - uri(["rules"]), - #{ - <<"name">> => <<"A_rule_get_messages_from_a_source_mqtt_bridge">>, - <<"enable">> => true, - <<"actions">> => [#{<<"function">> => "emqx_connector_api_SUITE:inspect"}], - <<"sql">> => <<"SELECT * from \"$bridges/", BridgeIDIngress/binary, "\"">> - } - ), - #{<<"id">> := RuleId} = jsx:decode(Rule), - - %% we now test if the bridge works as expected - - RemoteTopic = <<"remote_topic/1">>, - LocalTopic = <<"local_topic/", RemoteTopic/binary>>, - Payload = <<"hello">>, - emqx:subscribe(LocalTopic), - timer:sleep(100), - %% PUBLISH a message to the 'remote' broker, as we have only one broker, - %% the remote broker is also the local one. - wait_for_resource_ready(BridgeIDIngress, 5), - emqx:publish(emqx_message:make(RemoteTopic, Payload)), - %% we should receive a message on the local broker, with specified topic - ?assert( - receive - {deliver, LocalTopic, #message{payload = Payload}} -> - ct:pal("local broker got message: ~p on topic ~p", [Payload, LocalTopic]), - true; - Msg -> - ct:pal("Msg: ~p", [Msg]), - false - after 100 -> - false - end - ), - %% and also the rule should be matched, with matched + 1: - {ok, 200, Rule1} = request(get, uri(["rules", RuleId, "metrics"]), []), - #{ - <<"id">> := RuleId, - <<"metrics">> := #{ - <<"matched">> := 1, - <<"passed">> := 1, - <<"failed">> := 0, - <<"failed.exception">> := 0, - <<"failed.no_result">> := 0, - <<"matched.rate">> := _, - <<"matched.rate.max">> := _, - <<"matched.rate.last5m">> := _, - <<"actions.total">> := 1, - <<"actions.success">> := 1, - <<"actions.failed">> := 0, - <<"actions.failed.out_of_service">> := 0, - <<"actions.failed.unknown">> := 0 - } - } = jsx:decode(Rule1), - %% we also check if the actions of the rule is triggered - ?assertMatch( - #{ - inspect := #{ - event := <<"$bridges/mqtt", _/binary>>, - id := MsgId, - payload := Payload, - topic := RemoteTopic, - qos := 0, - dup := false, - retain := false, - pub_props := #{}, - timestamp := _ - } - } when is_binary(MsgId), - persistent_term:get(?MODULE) - ), - - {ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []), - {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDIngress]), []), - {ok, 204, <<>>} = request(delete, uri(["connectors", ConnctorID]), []). - -t_egress_mqtt_bridge_with_rules(_) -> - {ok, 201, _} = request( - post, - uri(["connectors"]), - ?MQTT_CONNECTOR(<<"user1">>)#{ - <<"type">> => ?CONNECTR_TYPE, - <<"name">> => ?CONNECTR_NAME - } - ), - ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME), - {ok, 201, Bridge} = request( - post, - uri(["bridges"]), - ?MQTT_BRIDGE_EGRESS(ConnctorID)#{ - <<"type">> => ?CONNECTR_TYPE, - <<"name">> => ?BRIDGE_NAME_EGRESS - } - ), - #{<<"type">> := ?CONNECTR_TYPE, <<"name">> := ?BRIDGE_NAME_EGRESS} = jsx:decode(Bridge), - BridgeIDEgress = emqx_bridge_resource:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_EGRESS), - - {ok, 201, Rule} = request( - post, - uri(["rules"]), - #{ - <<"name">> => <<"A_rule_send_messages_to_a_sink_mqtt_bridge">>, - <<"enable">> => true, - <<"actions">> => [BridgeIDEgress], - <<"sql">> => <<"SELECT * from \"t/1\"">> - } - ), - #{<<"id">> := RuleId} = jsx:decode(Rule), - - %% we now test if the bridge works as expected - LocalTopic = <<"local_topic/1">>, - RemoteTopic = <<"remote_topic/", LocalTopic/binary>>, - Payload = <<"hello">>, - emqx:subscribe(RemoteTopic), - timer:sleep(100), - %% PUBLISH a message to the 'local' broker, as we have only one broker, - %% the remote broker is also the local one. - wait_for_resource_ready(BridgeIDEgress, 5), - emqx:publish(emqx_message:make(LocalTopic, Payload)), - %% we should receive a message on the "remote" broker, with specified topic - ?assert( - receive - {deliver, RemoteTopic, #message{payload = Payload}} -> - ct:pal("remote broker got message: ~p on topic ~p", [Payload, RemoteTopic]), - true; - Msg -> - ct:pal("Msg: ~p", [Msg]), - false - after 100 -> - false - end - ), - emqx:unsubscribe(RemoteTopic), - - %% PUBLISH a message to the rule. - Payload2 = <<"hi">>, - RuleTopic = <<"t/1">>, - RemoteTopic2 = <<"remote_topic/", RuleTopic/binary>>, - emqx:subscribe(RemoteTopic2), - timer:sleep(100), - wait_for_resource_ready(BridgeIDEgress, 5), - emqx:publish(emqx_message:make(RuleTopic, Payload2)), - {ok, 200, Rule1} = request(get, uri(["rules", RuleId, "metrics"]), []), - #{ - <<"id">> := RuleId, - <<"metrics">> := #{ - <<"matched">> := 1, - <<"passed">> := 1, - <<"failed">> := 0, - <<"failed.exception">> := 0, - <<"failed.no_result">> := 0, - <<"matched.rate">> := _, - <<"matched.rate.max">> := _, - <<"matched.rate.last5m">> := _, - <<"actions.total">> := 1, - <<"actions.success">> := 1, - <<"actions.failed">> := 0, - <<"actions.failed.out_of_service">> := 0, - <<"actions.failed.unknown">> := 0 - } - } = jsx:decode(Rule1), - %% we should receive a message on the "remote" broker, with specified topic - ?assert( - receive - {deliver, RemoteTopic2, #message{payload = Payload2}} -> - ct:pal("remote broker got message: ~p on topic ~p", [Payload2, RemoteTopic2]), - true; - Msg -> - ct:pal("Msg: ~p", [Msg]), - false - after 100 -> - false - end - ), - - %% verify the metrics of the bridge - {ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDEgress]), []), - ?assertMatch( - #{ - <<"metrics">> := ?metrics(2, 2, 0, _, _, _), - <<"node_metrics">> := - [#{<<"node">> := _, <<"metrics">> := ?metrics(2, 2, 0, _, _, _)}] - }, - jsx:decode(BridgeStr) - ), - - {ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []), - {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []), - {ok, 204, <<>>} = request(delete, uri(["connectors", ConnctorID]), []). - -request(Method, Url, Body) -> - request(<<"connector_admin">>, Method, Url, Body). - -wait_for_resource_ready(InstId, 0) -> - ct:pal("--- bridge ~p: ~p", [InstId, emqx_bridge:lookup(InstId)]), - ct:fail(wait_resource_timeout); -wait_for_resource_ready(InstId, Retry) -> - case emqx_bridge:lookup(InstId) of - {ok, #{resource_data := #{status := connected}}} -> - ok; - _ -> - timer:sleep(100), - wait_for_resource_ready(InstId, Retry - 1) - end. diff --git a/apps/emqx_connector/test/emqx_connector_mongo_SUITE.erl b/apps/emqx_connector/test/emqx_connector_mongo_SUITE.erl index d76b8420a..5473463ec 100644 --- a/apps/emqx_connector/test/emqx_connector_mongo_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_mongo_SUITE.erl @@ -36,7 +36,8 @@ init_per_suite(Config) -> case emqx_common_test_helpers:is_tcp_server_available(?MONGO_HOST, ?MONGO_DEFAULT_PORT) of true -> ok = emqx_common_test_helpers:start_apps([emqx_conf]), - ok = emqx_connector_test_helpers:start_apps([emqx_resource, emqx_connector]), + ok = emqx_connector_test_helpers:start_apps([emqx_resource]), + {ok, _} = application:ensure_all_started(emqx_connector), Config; false -> {skip, no_mongo} @@ -44,7 +45,8 @@ init_per_suite(Config) -> end_per_suite(_Config) -> ok = emqx_common_test_helpers:stop_apps([emqx_conf]), - ok = emqx_connector_test_helpers:stop_apps([emqx_resource, emqx_connector]). + ok = emqx_connector_test_helpers:stop_apps([emqx_resource]), + _ = application:stop(emqx_connector). init_per_testcase(_, Config) -> Config. @@ -85,8 +87,8 @@ perform_lifecycle_check(PoolName, InitialConfig) -> emqx_resource:get_instance(PoolName), ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), % % Perform query as further check that the resource is working as expected - ?assertMatch([], emqx_resource:query(PoolName, test_query_find())), - ?assertMatch(undefined, emqx_resource:query(PoolName, test_query_find_one())), + ?assertMatch({ok, []}, emqx_resource:query(PoolName, test_query_find())), + ?assertMatch({ok, undefined}, emqx_resource:query(PoolName, test_query_find_one())), ?assertEqual(ok, emqx_resource:stop(PoolName)), % Resource will be listed still, but state will be changed and healthcheck will fail % as the worker no longer exists. @@ -95,7 +97,7 @@ perform_lifecycle_check(PoolName, InitialConfig) -> status := StoppedStatus }} = emqx_resource:get_instance(PoolName), - ?assertEqual(StoppedStatus, disconnected), + ?assertEqual(stopped, StoppedStatus), ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), @@ -108,8 +110,8 @@ perform_lifecycle_check(PoolName, InitialConfig) -> {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} = emqx_resource:get_instance(PoolName), ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), - ?assertMatch([], emqx_resource:query(PoolName, test_query_find())), - ?assertMatch(undefined, emqx_resource:query(PoolName, test_query_find_one())), + ?assertMatch({ok, []}, emqx_resource:query(PoolName, test_query_find())), + ?assertMatch({ok, undefined}, emqx_resource:query(PoolName, test_query_find_one())), % Stop and remove the resource in one go. ?assertEqual(ok, emqx_resource:remove_local(PoolName)), ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), diff --git a/apps/emqx_connector/test/emqx_connector_mongo_tests.erl b/apps/emqx_connector/test/emqx_connector_mongo_tests.erl new file mode 100644 index 000000000..7978ed289 --- /dev/null +++ b/apps/emqx_connector/test/emqx_connector_mongo_tests.erl @@ -0,0 +1,168 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_connector_mongo_tests). + +-include_lib("eunit/include/eunit.hrl"). + +-define(DEFAULT_MONGO_PORT, 27017). + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +%%------------------------------------------------------------------------------ +%% Test cases +%%------------------------------------------------------------------------------ + +to_servers_raw_test_() -> + [ + {"single server, binary, no port", + ?_test( + ?assertEqual( + [{"localhost", ?DEFAULT_MONGO_PORT}], + emqx_connector_mongo:to_servers_raw(<<"localhost">>) + ) + )}, + {"single server, string, no port", + ?_test( + ?assertEqual( + [{"localhost", ?DEFAULT_MONGO_PORT}], + emqx_connector_mongo:to_servers_raw("localhost") + ) + )}, + {"single server, list(binary), no port", + ?_test( + ?assertEqual( + [{"localhost", ?DEFAULT_MONGO_PORT}], + emqx_connector_mongo:to_servers_raw([<<"localhost">>]) + ) + )}, + {"single server, list(string), no port", + ?_test( + ?assertEqual( + [{"localhost", ?DEFAULT_MONGO_PORT}], + emqx_connector_mongo:to_servers_raw(["localhost"]) + ) + )}, + %%%%%%%%% + {"single server, binary, with port", + ?_test( + ?assertEqual( + [{"localhost", 9999}], emqx_connector_mongo:to_servers_raw(<<"localhost:9999">>) + ) + )}, + {"single server, string, with port", + ?_test( + ?assertEqual( + [{"localhost", 9999}], emqx_connector_mongo:to_servers_raw("localhost:9999") + ) + )}, + {"single server, list(binary), with port", + ?_test( + ?assertEqual( + [{"localhost", 9999}], + emqx_connector_mongo:to_servers_raw([<<"localhost:9999">>]) + ) + )}, + {"single server, list(string), with port", + ?_test( + ?assertEqual( + [{"localhost", 9999}], emqx_connector_mongo:to_servers_raw(["localhost:9999"]) + ) + )}, + %%%%%%%%% + {"multiple servers, string, no port", + ?_test( + ?assertEqual( + [{"host1", ?DEFAULT_MONGO_PORT}, {"host2", ?DEFAULT_MONGO_PORT}], + emqx_connector_mongo:to_servers_raw("host1, host2") + ) + )}, + {"multiple servers, binary, no port", + ?_test( + ?assertEqual( + [{"host1", ?DEFAULT_MONGO_PORT}, {"host2", ?DEFAULT_MONGO_PORT}], + emqx_connector_mongo:to_servers_raw(<<"host1, host2">>) + ) + )}, + {"multiple servers, list(string), no port", + ?_test( + ?assertEqual( + [{"host1", ?DEFAULT_MONGO_PORT}, {"host2", ?DEFAULT_MONGO_PORT}], + emqx_connector_mongo:to_servers_raw(["host1", "host2"]) + ) + )}, + {"multiple servers, list(binary), no port", + ?_test( + ?assertEqual( + [{"host1", ?DEFAULT_MONGO_PORT}, {"host2", ?DEFAULT_MONGO_PORT}], + emqx_connector_mongo:to_servers_raw([<<"host1">>, <<"host2">>]) + ) + )}, + %%%%%%%%% + {"multiple servers, string, with port", + ?_test( + ?assertEqual( + [{"host1", 1234}, {"host2", 2345}], + emqx_connector_mongo:to_servers_raw("host1:1234, host2:2345") + ) + )}, + {"multiple servers, binary, with port", + ?_test( + ?assertEqual( + [{"host1", 1234}, {"host2", 2345}], + emqx_connector_mongo:to_servers_raw(<<"host1:1234, host2:2345">>) + ) + )}, + {"multiple servers, list(string), with port", + ?_test( + ?assertEqual( + [{"host1", 1234}, {"host2", 2345}], + emqx_connector_mongo:to_servers_raw(["host1:1234", "host2:2345"]) + ) + )}, + {"multiple servers, list(binary), with port", + ?_test( + ?assertEqual( + [{"host1", 1234}, {"host2", 2345}], + emqx_connector_mongo:to_servers_raw([<<"host1:1234">>, <<"host2:2345">>]) + ) + )}, + %%%%%%%% + {"multiple servers, invalid list(string)", + ?_test( + ?assertThrow( + _, + emqx_connector_mongo:to_servers_raw(["host1, host2"]) + ) + )}, + {"multiple servers, invalid list(binary)", + ?_test( + ?assertThrow( + _, + emqx_connector_mongo:to_servers_raw([<<"host1, host2">>]) + ) + )}, + %% TODO: handle this case?? + {"multiple servers, mixed list(binary|string)", + ?_test( + ?assertThrow( + _, + emqx_connector_mongo:to_servers_raw([<<"host1">>, "host2"]) + ) + )} + ]. diff --git a/apps/emqx_connector/test/emqx_connector_mqtt_worker_tests.erl b/apps/emqx_connector/test/emqx_connector_mqtt_worker_tests.erl index aff1a92a6..3f0374d26 100644 --- a/apps/emqx_connector/test/emqx_connector_mqtt_worker_tests.erl +++ b/apps/emqx_connector/test/emqx_connector_mqtt_worker_tests.erl @@ -45,22 +45,6 @@ send(SendFun, Batch) when is_function(SendFun, 2) -> stop(_Pid) -> ok. -%% bridge worker should retry connecting remote node indefinitely -% reconnect_test() -> -% emqx_metrics:start_link(), -% emqx_connector_mqtt_worker:register_metrics(), -% Ref = make_ref(), -% Config = make_config(Ref, self(), {error, test}), -% {ok, Pid} = emqx_connector_mqtt_worker:start_link(?BRIDGE_NAME, Config), -% %% assert name registered -% ?assertEqual(Pid, whereis(?BRIDGE_REG_NAME)), -% ?WAIT({connection_start_attempt, Ref}, 1000), -% %% expect same message again -% ?WAIT({connection_start_attempt, Ref}, 1000), -% ok = emqx_connector_mqtt_worker:stop(?BRIDGE_REG_NAME), -% emqx_metrics:stop(), -% ok. - %% connect first, disconnect, then connect again disturbance_test() -> meck:new(emqx_connector_mqtt_mod, [passthrough, no_history]), @@ -69,7 +53,6 @@ disturbance_test() -> meck:expect(emqx_connector_mqtt_mod, stop, 1, fun(Pid) -> stop(Pid) end), try emqx_metrics:start_link(), - emqx_connector_mqtt_worker:register_metrics(), Ref = make_ref(), TestPid = self(), Config = make_config(Ref, TestPid, {ok, #{client_pid => TestPid}}), @@ -84,36 +67,6 @@ disturbance_test() -> meck:unload(emqx_connector_mqtt_mod) end. -% % %% buffer should continue taking in messages when disconnected -% buffer_when_disconnected_test_() -> -% {timeout, 10000, fun test_buffer_when_disconnected/0}. - -% test_buffer_when_disconnected() -> -% Ref = make_ref(), -% Nums = lists:seq(1, 100), -% Sender = spawn_link(fun() -> receive {bridge, Pid} -> sender_loop(Pid, Nums, _Interval = 5) end end), -% SenderMref = monitor(process, Sender), -% Receiver = spawn_link(fun() -> receive {bridge, Pid} -> receiver_loop(Pid, Nums, _Interval = 1) end end), -% ReceiverMref = monitor(process, Receiver), -% SendFun = fun(Batch) -> -% BatchRef = make_ref(), -% Receiver ! {batch, BatchRef, Batch}, -% {ok, BatchRef} -% end, -% Config0 = make_config(Ref, false, {ok, #{client_pid => undefined}}), -% Config = Config0#{reconnect_delay_ms => 100}, -% emqx_metrics:start_link(), -% emqx_connector_mqtt_worker:register_metrics(), -% {ok, Pid} = emqx_connector_mqtt_worker:start_link(?BRIDGE_NAME, Config), -% Sender ! {bridge, Pid}, -% Receiver ! {bridge, Pid}, -% ?assertEqual(Pid, whereis(?BRIDGE_REG_NAME)), -% Pid ! {disconnected, Ref, test}, -% ?WAIT({'DOWN', SenderMref, process, Sender, normal}, 5000), -% ?WAIT({'DOWN', ReceiverMref, process, Receiver, normal}, 1000), -% ok = emqx_connector_mqtt_worker:stop(?BRIDGE_REG_NAME), -% emqx_metrics:stop(). - manual_start_stop_test() -> meck:new(emqx_connector_mqtt_mod, [passthrough, no_history]), meck:expect(emqx_connector_mqtt_mod, start, 1, fun(Conf) -> start(Conf) end), @@ -121,7 +74,6 @@ manual_start_stop_test() -> meck:expect(emqx_connector_mqtt_mod, stop, 1, fun(Pid) -> stop(Pid) end), try emqx_metrics:start_link(), - emqx_connector_mqtt_worker:register_metrics(), Ref = make_ref(), TestPid = self(), BridgeName = manual_start_stop, diff --git a/apps/emqx_connector/test/emqx_connector_mysql_SUITE.erl b/apps/emqx_connector/test/emqx_connector_mysql_SUITE.erl index d7f5cec63..3a41cc0b1 100644 --- a/apps/emqx_connector/test/emqx_connector_mysql_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_mysql_SUITE.erl @@ -36,7 +36,8 @@ init_per_suite(Config) -> case emqx_common_test_helpers:is_tcp_server_available(?MYSQL_HOST, ?MYSQL_DEFAULT_PORT) of true -> ok = emqx_common_test_helpers:start_apps([emqx_conf]), - ok = emqx_connector_test_helpers:start_apps([emqx_resource, emqx_connector]), + ok = emqx_connector_test_helpers:start_apps([emqx_resource]), + {ok, _} = application:ensure_all_started(emqx_connector), Config; false -> {skip, no_mysql} @@ -44,7 +45,8 @@ init_per_suite(Config) -> end_per_suite(_Config) -> ok = emqx_common_test_helpers:stop_apps([emqx_conf]), - ok = emqx_connector_test_helpers:stop_apps([emqx_resource, emqx_connector]). + ok = emqx_connector_test_helpers:stop_apps([emqx_resource]), + _ = application:stop(emqx_connector). init_per_testcase(_, Config) -> Config. @@ -101,7 +103,7 @@ perform_lifecycle_check(PoolName, InitialConfig) -> status := StoppedStatus }} = emqx_resource:get_instance(PoolName), - ?assertEqual(StoppedStatus, disconnected), + ?assertEqual(stopped, StoppedStatus), ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), diff --git a/apps/emqx_connector/test/emqx_connector_pgsql_SUITE.erl b/apps/emqx_connector/test/emqx_connector_pgsql_SUITE.erl index d99d8ab6c..10293a241 100644 --- a/apps/emqx_connector/test/emqx_connector_pgsql_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_pgsql_SUITE.erl @@ -36,7 +36,8 @@ init_per_suite(Config) -> case emqx_common_test_helpers:is_tcp_server_available(?PGSQL_HOST, ?PGSQL_DEFAULT_PORT) of true -> ok = emqx_common_test_helpers:start_apps([emqx_conf]), - ok = emqx_connector_test_helpers:start_apps([emqx_resource, emqx_connector]), + ok = emqx_connector_test_helpers:start_apps([emqx_resource]), + {ok, _} = application:ensure_all_started(emqx_connector), Config; false -> {skip, no_pgsql} @@ -44,7 +45,8 @@ init_per_suite(Config) -> end_per_suite(_Config) -> ok = emqx_common_test_helpers:stop_apps([emqx_conf]), - ok = emqx_connector_test_helpers:stop_apps([emqx_resource, emqx_connector]). + ok = emqx_connector_test_helpers:stop_apps([emqx_resource]), + _ = application:stop(emqx_connector). init_per_testcase(_, Config) -> Config. @@ -95,7 +97,7 @@ perform_lifecycle_check(PoolName, InitialConfig) -> status := StoppedStatus }} = emqx_resource:get_instance(PoolName), - ?assertEqual(StoppedStatus, disconnected), + ?assertEqual(stopped, StoppedStatus), ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), diff --git a/apps/emqx_connector/test/emqx_connector_redis_SUITE.erl b/apps/emqx_connector/test/emqx_connector_redis_SUITE.erl index 4770bbeee..d9199d2d6 100644 --- a/apps/emqx_connector/test/emqx_connector_redis_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_redis_SUITE.erl @@ -46,14 +46,16 @@ init_per_suite(Config) -> of true -> ok = emqx_common_test_helpers:start_apps([emqx_conf]), - ok = emqx_connector_test_helpers:start_apps([emqx_resource, emqx_connector]), + ok = emqx_connector_test_helpers:start_apps([emqx_resource]), + {ok, _} = application:ensure_all_started(emqx_connector), Config; false -> {skip, no_redis} end. end_per_suite(_Config) -> - ok = emqx_common_test_helpers:stop_apps([emqx_resource, emqx_connector]). + ok = emqx_common_test_helpers:stop_apps([emqx_resource]), + _ = application:stop(emqx_connector). init_per_testcase(_, Config) -> Config. @@ -117,7 +119,7 @@ perform_lifecycle_check(PoolName, InitialConfig, RedisCommand) -> status := StoppedStatus }} = emqx_resource:get_instance(PoolName), - ?assertEqual(StoppedStatus, disconnected), + ?assertEqual(stopped, StoppedStatus), ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), diff --git a/apps/emqx_dashboard/src/emqx_dashboard.app.src b/apps/emqx_dashboard/src/emqx_dashboard.app.src index 9e639bcf8..16c51342f 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard.app.src +++ b/apps/emqx_dashboard/src/emqx_dashboard.app.src @@ -2,7 +2,7 @@ {application, emqx_dashboard, [ {description, "EMQX Web Dashboard"}, % strict semver, bump manually! - {vsn, "5.0.8"}, + {vsn, "5.0.9"}, {modules, []}, {registered, [emqx_dashboard_sup]}, {applications, [kernel, stdlib, mnesia, minirest, emqx]}, diff --git a/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl b/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl index 5af1aee89..5efe9114a 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl @@ -677,6 +677,13 @@ typename_to_spec("ip_port()", _Mod) -> #{type => string, example => <<"127.0.0.1:80">>}; typename_to_spec("host_port()", _Mod) -> #{type => string, example => <<"example.host.domain:80">>}; +typename_to_spec("write_syntax()", _Mod) -> + #{ + type => string, + example => + <<"${topic},clientid=${clientid}", " ", "payload=${payload},", + "${clientid}_int_value=${payload.int_key}i,", "bool=${payload.bool}">> + }; typename_to_spec("url()", _Mod) -> #{type => string, example => <<"http://127.0.0.1">>}; typename_to_spec("connect_timeout()", Mod) -> diff --git a/apps/emqx_dashboard/test/emqx_dashboard_api_test_helpers.erl b/apps/emqx_dashboard/test/emqx_dashboard_api_test_helpers.erl index 6b3891ef3..b74a118d2 100644 --- a/apps/emqx_dashboard/test/emqx_dashboard_api_test_helpers.erl +++ b/apps/emqx_dashboard/test/emqx_dashboard_api_test_helpers.erl @@ -92,7 +92,7 @@ request(Username, Method, Url, Body) -> uri() -> uri([]). uri(Parts) when is_list(Parts) -> NParts = [E || E <- Parts], - ?HOST ++ filename:join([?BASE_PATH, ?API_VERSION | NParts]). + ?HOST ++ to_list(filename:join([?BASE_PATH, ?API_VERSION | NParts])). auth_header(Username) -> Password = <<"public">>, diff --git a/apps/emqx_gateway/test/emqx_gateway_authz_SUITE.erl b/apps/emqx_gateway/test/emqx_gateway_authz_SUITE.erl index 171a0bde4..accbcd3ac 100644 --- a/apps/emqx_gateway/test/emqx_gateway_authz_SUITE.erl +++ b/apps/emqx_gateway/test/emqx_gateway_authz_SUITE.erl @@ -79,7 +79,7 @@ end_per_suite(Config) -> emqx_gateway_auth_ct:stop(), ok = emqx_authz_test_lib:restore_authorizers(), emqx_config:erase(gateway), - emqx_mgmt_api_test_util:end_suite([cowboy, emqx_authz, emqx_authn, emqx_gateway]), + emqx_mgmt_api_test_util:end_suite([emqx_gateway, emqx_authn, emqx_authz, emqx_conf]), Config. init_per_testcase(_Case, Config) -> diff --git a/apps/emqx_machine/src/emqx_restricted_shell.erl b/apps/emqx_machine/src/emqx_restricted_shell.erl index cc475cb56..31ee16986 100644 --- a/apps/emqx_machine/src/emqx_restricted_shell.erl +++ b/apps/emqx_machine/src/emqx_restricted_shell.erl @@ -45,9 +45,10 @@ set_prompt_func() -> prompt_func(PropList) -> Line = proplists:get_value(history, PropList, 1), Version = emqx_release:version(), + Edition = emqx_release:edition(), case is_alive() of - true -> io_lib:format(<<"~ts(~s)~w> ">>, [Version, node(), Line]); - false -> io_lib:format(<<"~ts ~w> ">>, [Version, Line]) + true -> io_lib:format(<<"~ts-~ts(~s)~w> ">>, [Edition, Version, node(), Line]); + false -> io_lib:format(<<"~ts-~ts ~w> ">>, [Edition, Version, Line]) end. local_allowed(MF, Args, State) -> diff --git a/apps/emqx_machine/test/emqx_machine_SUITE.erl b/apps/emqx_machine/test/emqx_machine_SUITE.erl index f865b0f26..3e0274337 100644 --- a/apps/emqx_machine/test/emqx_machine_SUITE.erl +++ b/apps/emqx_machine/test/emqx_machine_SUITE.erl @@ -47,7 +47,6 @@ init_per_suite(Config) -> emqx_prometheus, emqx_modules, emqx_dashboard, - emqx_connector, emqx_gateway, emqx_statsd, emqx_resource, diff --git a/apps/emqx_management/src/emqx_mgmt.erl b/apps/emqx_management/src/emqx_mgmt.erl index 23804614b..38b26444c 100644 --- a/apps/emqx_management/src/emqx_mgmt.erl +++ b/apps/emqx_management/src/emqx_mgmt.erl @@ -143,6 +143,7 @@ node_info() -> node_status => 'running', uptime => proplists:get_value(uptime, BrokerInfo), version => iolist_to_binary(proplists:get_value(version, BrokerInfo)), + edition => emqx_release:edition_longstr(), role => mria_rlog:role() }. diff --git a/apps/emqx_management/src/emqx_mgmt_api_nodes.erl b/apps/emqx_management/src/emqx_mgmt_api_nodes.erl index e0f0912df..d0d2e4b8c 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_nodes.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_nodes.erl @@ -215,7 +215,12 @@ fields(node_info) -> {version, mk( string(), - #{desc => <<"Release version">>, example => "5.0.0-beat.3-00000000"} + #{desc => <<"Release version">>, example => "5.0.0"} + )}, + {edition, + mk( + enum(['Opensource', 'Enterprise']), + #{desc => <<"Release edition">>, example => "Opensource"} )}, {sys_path, mk( diff --git a/apps/emqx_management/test/emqx_mgmt_api_nodes_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_nodes_SUITE.erl index 1ba9cdfda..73b796afe 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_nodes_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_nodes_SUITE.erl @@ -57,6 +57,8 @@ t_nodes_api(_) -> LocalNodeInfo = hd(NodesResponse), Node = binary_to_atom(maps:get(<<"node">>, LocalNodeInfo), utf8), ?assertEqual(Node, node()), + Edition = maps:get(<<"edition">>, LocalNodeInfo), + ?assertEqual(emqx_release:edition_longstr(), Edition), NodePath = emqx_mgmt_api_test_util:api_path(["nodes", atom_to_list(node())]), {ok, NodeInfo} = emqx_mgmt_api_test_util:request_api(get, NodePath), diff --git a/apps/emqx_plugin_libs/src/emqx_plugin_libs.app.src b/apps/emqx_plugin_libs/src/emqx_plugin_libs.app.src index 10df22d97..bcdcfe420 100644 --- a/apps/emqx_plugin_libs/src/emqx_plugin_libs.app.src +++ b/apps/emqx_plugin_libs/src/emqx_plugin_libs.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_plugin_libs, [ {description, "EMQX Plugin utility libs"}, - {vsn, "4.3.3"}, + {vsn, "4.3.4"}, {modules, []}, {applications, [kernel, stdlib]}, {env, []} diff --git a/apps/emqx_plugin_libs/src/emqx_plugin_libs_pool.erl b/apps/emqx_plugin_libs/src/emqx_plugin_libs_pool.erl index cce45fa4a..f0b1f0bb5 100644 --- a/apps/emqx_plugin_libs/src/emqx_plugin_libs_pool.erl +++ b/apps/emqx_plugin_libs/src/emqx_plugin_libs_pool.erl @@ -40,12 +40,13 @@ start_pool(Name, Mod, Options) -> stop_pool(Name), start_pool(Name, Mod, Options); {error, Reason} -> + NReason = parse_reason(Reason), ?SLOG(error, #{ msg => "start_ecpool_error", pool_name => Name, - reason => Reason + reason => NReason }), - {error, {start_pool_failed, Name, Reason}} + {error, {start_pool_failed, Name, NReason}} end. stop_pool(Name) -> @@ -86,3 +87,11 @@ health_check_ecpool_workers(PoolName, CheckFunc, Timeout) when is_function(Check exit:timeout -> false end. + +parse_reason({ + {shutdown, {failed_to_start_child, _, {shutdown, {failed_to_start_child, _, Reason}}}}, + _ +}) -> + Reason; +parse_reason(Reason) -> + Reason. diff --git a/apps/emqx_plugin_libs/src/emqx_plugin_libs_rule.erl b/apps/emqx_plugin_libs/src/emqx_plugin_libs_rule.erl index 03304c209..e94d62b53 100644 --- a/apps/emqx_plugin_libs/src/emqx_plugin_libs_rule.erl +++ b/apps/emqx_plugin_libs/src/emqx_plugin_libs_rule.erl @@ -29,7 +29,9 @@ preproc_sql/2, proc_sql/2, proc_sql_param_str/2, - proc_cql_param_str/2 + proc_cql_param_str/2, + split_insert_sql/1, + detect_sql_type/1 ]). %% type converting @@ -123,6 +125,43 @@ proc_sql_param_str(Tokens, Data) -> proc_cql_param_str(Tokens, Data) -> emqx_placeholder:proc_cql_param_str(Tokens, Data). +%% SQL = <<"INSERT INTO \"abc\" (c1,c2,c3) VALUES (${1}, ${1}, ${1})">> +-spec split_insert_sql(binary()) -> {ok, {InsertSQL, Params}} | {error, atom()} when + InsertSQL :: binary(), + Params :: binary(). +split_insert_sql(SQL) -> + case re:split(SQL, "((?i)values)", [{return, binary}]) of + [Part1, _, Part3] -> + case string:trim(Part1, leading) of + <<"insert", _/binary>> = InsertSQL -> + {ok, {InsertSQL, Part3}}; + <<"INSERT", _/binary>> = InsertSQL -> + {ok, {InsertSQL, Part3}}; + _ -> + {error, not_insert_sql} + end; + _ -> + {error, not_insert_sql} + end. + +-spec detect_sql_type(binary()) -> {ok, Type} | {error, atom()} when + Type :: insert | select. +detect_sql_type(SQL) -> + case re:run(SQL, "^\\s*([a-zA-Z]+)", [{capture, all_but_first, list}]) of + {match, [First]} -> + Types = [select, insert], + PropTypes = [{erlang:atom_to_list(Type), Type} || Type <- Types], + LowFirst = string:lowercase(First), + case proplists:lookup(LowFirst, PropTypes) of + {LowFirst, Type} -> + {ok, Type}; + _ -> + {error, invalid_sql} + end; + _ -> + {error, invalid_sql} + end. + unsafe_atom_key(Key) when is_atom(Key) -> Key; unsafe_atom_key(Key) when is_binary(Key) -> diff --git a/apps/emqx_resource/README.md b/apps/emqx_resource/README.md index 04f3c2205..0f61df7ff 100644 --- a/apps/emqx_resource/README.md +++ b/apps/emqx_resource/README.md @@ -14,5 +14,5 @@ the config operations (like config validation, config dump back to files), and t And we put all the `specific` codes to the callback modules. See -* `test/emqx_test_resource.erl` for a minimal `emqx_resource` implementation; +* `test/emqx_connector_demo.erl` for a minimal `emqx_resource` implementation; * `test/emqx_resource_SUITE.erl` for examples of `emqx_resource` usage. diff --git a/apps/emqx_resource/i18n/emqx_resource_schema_i18n.conf b/apps/emqx_resource/i18n/emqx_resource_schema_i18n.conf new file mode 100644 index 000000000..d7953ac3b --- /dev/null +++ b/apps/emqx_resource/i18n/emqx_resource_schema_i18n.conf @@ -0,0 +1,158 @@ +emqx_resource_schema { + + resource_opts { + desc { + en: """Resource options.""" + zh: """资源相关的选项。""" + } + label { + en: """Resource Options""" + zh: """资源选项""" + } + } + + creation_opts { + desc { + en: """Creation options.""" + zh: """资源启动相关的选项。""" + } + label { + en: """Creation Options""" + zh: """资源启动选项""" + } + } + + worker_pool_size { + desc { + en: """Resource worker pool size.""" + zh: """资源连接池大小。""" + } + label { + en: """Worker Pool Size""" + zh: """资源连接池大小""" + } + } + + health_check_interval { + desc { + en: """Health check interval, in milliseconds.""" + zh: """健康检查间隔,单位毫秒。""" + } + label { + en: """Health Check Interval""" + zh: """健康检查间隔""" + } + } + + start_after_created { + desc { + en: """Whether start the resource right after created.""" + zh: """是否在创建资源后立即启动资源。""" + } + label { + en: """Start After Created""" + zh: """创建后立即启动""" + } + } + + start_timeout { + desc { + en: """If 'start_after_created' enabled, how long time do we wait for the resource get started, in milliseconds.""" + zh: """如果选择了创建后立即启动资源,此选项用来设置等待资源启动的超时时间,单位毫秒。""" + } + label { + en: """Start Timeout""" + zh: """启动超时时间""" + } + } + + auto_restart_interval { + desc { + en: """The auto restart interval after the resource is disconnected, in milliseconds.""" + zh: """资源断开以后,自动重连的时间间隔,单位毫秒。""" + } + label { + en: """Auto Restart Interval""" + zh: """自动重连间隔""" + } + } + + query_mode { + desc { + en: """Query mode. Optional 'sync/async', default 'sync'.""" + zh: """请求模式。可选 '同步/异步',默认为'同步'模式。""" + } + label { + en: """Query mode""" + zh: """请求模式""" + } + } + + enable_batch { + desc { + en: """Batch mode enabled.""" + zh: """启用批量模式。""" + } + label { + en: """Enable batch""" + zh: """启用批量模式""" + } + } + + enable_queue { + desc { + en: """Queue mode enabled.""" + zh: """启用队列模式。""" + } + label { + en: """Enable queue""" + zh: """启用队列模式""" + } + } + + async_inflight_window { + desc { + en: """Async query inflight window.""" + zh: """异步请求飞行队列窗口大小。""" + } + label { + en: """Async inflight window""" + zh: """异步请求飞行队列窗口""" + } + } + + batch_size { + desc { + en: """Maximum batch count.""" + zh: """批量请求大小。""" + } + label { + en: """Batch size""" + zh: """批量请求大小""" + } + } + + batch_time { + desc { + en: """Maximum batch waiting interval.""" + zh: """最大批量请求等待时间。""" + } + label { + en: """Batch time""" + zh: """批量等待间隔""" + } + } + + max_queue_bytes { + desc { + en: """Maximum queue storage.""" + zh: """消息队列的最大长度。""" + } + label { + en: """Queue max bytes""" + zh: """队列最大长度""" + } + } + + +} diff --git a/apps/emqx_resource/include/emqx_resource.hrl b/apps/emqx_resource/include/emqx_resource.hrl index dd384af7c..71300df72 100644 --- a/apps/emqx_resource/include/emqx_resource.hrl +++ b/apps/emqx_resource/include/emqx_resource.hrl @@ -21,36 +21,89 @@ -type resource_config() :: term(). -type resource_spec() :: map(). -type resource_state() :: term(). --type resource_status() :: connected | disconnected | connecting. +-type resource_status() :: connected | disconnected | connecting | stopped. +-type callback_mode() :: always_sync | async_if_possible. +-type query_mode() :: async | sync | dynamic. +-type result() :: term(). +-type reply_fun() :: {fun((result(), Args :: term()) -> any()), Args :: term()} | undefined. +-type query_opts() :: #{ + %% The key used for picking a resource worker + pick_key => term(), + async_reply_fun => reply_fun() +}. -type resource_data() :: #{ id := resource_id(), mod := module(), + callback_mode := callback_mode(), + query_mode := query_mode(), config := resource_config(), state := resource_state(), status := resource_status(), metrics := emqx_metrics_worker:metrics() }. -type resource_group() :: binary(). --type create_opts() :: #{ - health_check_interval => integer(), +-type creation_opts() :: #{ + %%======================================= Deprecated Opts: + %% use health_check_interval instead health_check_timeout => integer(), - %% We can choose to block the return of emqx_resource:start until - %% the resource connected, wait max to `wait_for_resource_ready` ms. + %% use start_timeout instead wait_for_resource_ready => integer(), + %% use auto_restart_interval instead + auto_retry_interval => integer(), + %%======================================= Deprecated Opts End + worker_pool_size => pos_integer(), + %% use `integer()` compatibility to release 5.0.0 bpapi + health_check_interval => integer(), + %% We can choose to block the return of emqx_resource:start until + %% the resource connected, wait max to `start_timeout` ms. + start_timeout => pos_integer(), %% If `start_after_created` is set to true, the resource is started right %% after it is created. But note that a `started` resource is not guaranteed %% to be `connected`. start_after_created => boolean(), %% If the resource disconnected, we can set to retry starting the resource %% periodically. - auto_retry_interval => integer() + auto_restart_interval => pos_integer(), + enable_batch => boolean(), + batch_size => pos_integer(), + batch_time => pos_integer(), + enable_queue => boolean(), + max_queue_bytes => pos_integer(), + query_mode => query_mode(), + resume_interval => pos_integer(), + async_inflight_window => pos_integer() }. --type after_query() :: - {[OnSuccess :: after_query_fun()], [OnFailed :: after_query_fun()]} - | undefined. +-type query_result() :: + ok + | {ok, term()} + | {error, {recoverable_error, term()}} + | {error, term()}. -%% the `after_query_fun()` is mainly for callbacks that increment counters or do some fallback -%% actions upon query failure --type after_query_fun() :: {fun((...) -> ok), Args :: [term()]}. +-define(WORKER_POOL_SIZE, 16). + +-define(DEFAULT_QUEUE_SEG_SIZE, 10 * 1024 * 1024). +-define(DEFAULT_QUEUE_SEG_SIZE_RAW, <<"10MB">>). + +-define(DEFAULT_QUEUE_SIZE, 100 * 1024 * 1024). +-define(DEFAULT_QUEUE_SIZE_RAW, <<"100MB">>). + +%% count +-define(DEFAULT_BATCH_SIZE, 100). + +%% milliseconds +-define(DEFAULT_BATCH_TIME, 20). +-define(DEFAULT_BATCH_TIME_RAW, <<"20ms">>). + +%% count +-define(DEFAULT_INFLIGHT, 100). + +%% milliseconds +-define(HEALTHCHECK_INTERVAL, 15000). +-define(HEALTHCHECK_INTERVAL_RAW, <<"15s">>). + +%% milliseconds +-define(AUTO_RESTART_INTERVAL, 60000). +-define(AUTO_RESTART_INTERVAL_RAW, <<"60s">>). -define(TEST_ID_PREFIX, "_test_:"). +-define(RES_METRICS, resource_metrics). diff --git a/apps/emqx_resource/include/emqx_resource_errors.hrl b/apps/emqx_resource/include/emqx_resource_errors.hrl new file mode 100644 index 000000000..6d1b3e92f --- /dev/null +++ b/apps/emqx_resource/include/emqx_resource_errors.hrl @@ -0,0 +1,20 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2020-2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-define(RESOURCE_ERROR(Reason, Msg), + {error, {resource_error, #{reason => Reason, msg => Msg}}} +). +-define(RESOURCE_ERROR_M(Reason, Msg), {error, {resource_error, #{reason := Reason, msg := Msg}}}). diff --git a/apps/emqx_resource/include/emqx_resource_utils.hrl b/apps/emqx_resource/include/emqx_resource_utils.hrl index 8d94746eb..3df64b1e5 100644 --- a/apps/emqx_resource/include/emqx_resource_utils.hrl +++ b/apps/emqx_resource/include/emqx_resource_utils.hrl @@ -15,7 +15,7 @@ %%-------------------------------------------------------------------- -define(SAFE_CALL(_EXP_), - ?SAFE_CALL(_EXP_, ok) + ?SAFE_CALL(_EXP_, {error, {_EXCLASS_, _EXCPTION_, _ST_}}) ). -define(SAFE_CALL(_EXP_, _EXP_ON_FAIL_), @@ -24,8 +24,7 @@ (_EXP_) catch _EXCLASS_:_EXCPTION_:_ST_ -> - _EXP_ON_FAIL_, - {error, {_EXCLASS_, _EXCPTION_, _ST_}} + _EXP_ON_FAIL_ end end() ). diff --git a/apps/emqx_resource/src/emqx_resource.app.src b/apps/emqx_resource/src/emqx_resource.app.src index 1bfd02323..38dac5449 100644 --- a/apps/emqx_resource/src/emqx_resource.app.src +++ b/apps/emqx_resource/src/emqx_resource.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_resource, [ {description, "Manager for all external resources"}, - {vsn, "0.1.1"}, + {vsn, "0.1.2"}, {registered, []}, {mod, {emqx_resource_app, []}}, {applications, [ @@ -9,7 +9,8 @@ stdlib, gproc, jsx, - emqx + emqx, + telemetry ]}, {env, []}, {modules, []}, diff --git a/apps/emqx_resource/src/emqx_resource.erl b/apps/emqx_resource/src/emqx_resource.erl index 33f0d0a3d..8086dfa25 100644 --- a/apps/emqx_resource/src/emqx_resource.erl +++ b/apps/emqx_resource/src/emqx_resource.erl @@ -18,18 +18,12 @@ -include("emqx_resource.hrl"). -include("emqx_resource_utils.hrl"). +-include("emqx_resource_errors.hrl"). %% APIs for resource types -export([list_types/0]). -%% APIs for behaviour implementations - --export([ - query_success/1, - query_failed/1 -]). - %% APIs for instances -export([ @@ -83,19 +77,24 @@ stop/1, %% query the instance query/2, - %% query the instance with after_query() - query/3 + query/3, + %% query the instance without batching and queuing messages. + simple_sync_query/2, + simple_async_query/3 ]). %% Direct calls to the callback module -%% start the instance -export([ + %% get the callback mode of a specific module + get_callback_mode/1, + %% start the instance call_start/3, %% verify if the resource is working normally call_health_check/3, %% stop the instance - call_stop/3 + call_stop/3, + is_buffer_supported/1 ]). %% list all the instances, id only. @@ -105,15 +104,22 @@ list_instances_verbose/0, %% return the data of the instance get_instance/1, + fetch_creation_opts/1, %% return all the instances of the same resource type list_instances_by_type/1, generate_id/1, list_group_instances/1 ]). +-export([inc_received/1, apply_reply_fun/2]). + -optional_callbacks([ - on_query/4, - on_get_status/2 + on_query/3, + on_batch_query/3, + on_query_async/4, + on_batch_query_async/4, + on_get_status/2, + is_buffer_supported/0 ]). %% when calling emqx_resource:start/1 @@ -124,7 +130,26 @@ -callback on_stop(resource_id(), resource_state()) -> term(). %% when calling emqx_resource:query/3 --callback on_query(resource_id(), Request :: term(), after_query(), resource_state()) -> term(). +-callback on_query(resource_id(), Request :: term(), resource_state()) -> query_result(). + +%% when calling emqx_resource:on_batch_query/3 +-callback on_batch_query(resource_id(), Request :: term(), resource_state()) -> query_result(). + +%% when calling emqx_resource:on_query_async/4 +-callback on_query_async( + resource_id(), + Request :: term(), + {ReplyFun :: function(), Args :: list()}, + resource_state() +) -> query_result(). + +%% when calling emqx_resource:on_batch_query_async/4 +-callback on_batch_query_async( + resource_id(), + Request :: term(), + {ReplyFun :: function(), Args :: list()}, + resource_state() +) -> query_result(). %% when calling emqx_resource:health_check/2 -callback on_get_status(resource_id(), resource_state()) -> @@ -132,6 +157,8 @@ | {resource_status(), resource_state()} | {resource_status(), resource_state(), term()}. +-callback is_buffer_supported() -> boolean(). + -spec list_types() -> [module()]. list_types() -> discover_resource_mods(). @@ -148,22 +175,6 @@ is_resource_mod(Module) -> proplists:get_value(behaviour, Info, []), lists:member(?MODULE, Behaviour). --spec query_success(after_query()) -> ok. -query_success(undefined) -> ok; -query_success({OnSucc, _}) -> apply_query_after_calls(OnSucc). - --spec query_failed(after_query()) -> ok. -query_failed(undefined) -> ok; -query_failed({_, OnFailed}) -> apply_query_after_calls(OnFailed). - -apply_query_after_calls(Funcs) -> - lists:foreach( - fun({Fun, Args}) -> - safe_apply(Fun, Args) - end, - Funcs - ). - %% ================================================================================= %% APIs for resource instances %% ================================================================================= @@ -172,7 +183,7 @@ apply_query_after_calls(Funcs) -> create(ResId, Group, ResourceType, Config) -> create(ResId, Group, ResourceType, Config, #{}). --spec create(resource_id(), resource_group(), resource_type(), resource_config(), create_opts()) -> +-spec create(resource_id(), resource_group(), resource_type(), resource_config(), creation_opts()) -> {ok, resource_data() | 'already_created'} | {error, Reason :: term()}. create(ResId, Group, ResourceType, Config, Opts) -> emqx_resource_proto_v1:create(ResId, Group, ResourceType, Config, Opts). @@ -188,7 +199,7 @@ create_local(ResId, Group, ResourceType, Config) -> resource_group(), resource_type(), resource_config(), - create_opts() + creation_opts() ) -> {ok, resource_data()}. create_local(ResId, Group, ResourceType, Config, Opts) -> @@ -209,7 +220,7 @@ create_dry_run_local(ResourceType, Config) -> recreate(ResId, ResourceType, Config) -> recreate(ResId, ResourceType, Config, #{}). --spec recreate(resource_id(), resource_type(), resource_config(), create_opts()) -> +-spec recreate(resource_id(), resource_type(), resource_config(), creation_opts()) -> {ok, resource_data()} | {error, Reason :: term()}. recreate(ResId, ResourceType, Config, Opts) -> emqx_resource_proto_v1:recreate(ResId, ResourceType, Config, Opts). @@ -219,7 +230,7 @@ recreate(ResId, ResourceType, Config, Opts) -> recreate_local(ResId, ResourceType, Config) -> recreate_local(ResId, ResourceType, Config, #{}). --spec recreate_local(resource_id(), resource_type(), resource_config(), create_opts()) -> +-spec recreate_local(resource_id(), resource_type(), resource_config(), creation_opts()) -> {ok, resource_data()} | {error, Reason :: term()}. recreate_local(ResId, ResourceType, Config, Opts) -> emqx_resource_manager:recreate(ResId, ResourceType, Config, Opts). @@ -243,35 +254,39 @@ reset_metrics(ResId) -> %% ================================================================================= -spec query(resource_id(), Request :: term()) -> Result :: term(). query(ResId, Request) -> - query(ResId, Request, inc_metrics_funcs(ResId)). + query(ResId, Request, #{}). -%% same to above, also defines what to do when the Module:on_query success or failed -%% it is the duty of the Module to apply the `after_query()` functions. --spec query(resource_id(), Request :: term(), after_query()) -> Result :: term(). -query(ResId, Request, AfterQuery) -> +-spec query(resource_id(), Request :: term(), emqx_resource_worker:query_opts()) -> + Result :: term(). +query(ResId, Request, Opts) -> case emqx_resource_manager:ets_lookup(ResId) of - {ok, _Group, #{mod := Mod, state := ResourceState, status := connected}} -> - %% the resource state is readonly to Module:on_query/4 - %% and the `after_query()` functions should be thread safe - ok = emqx_metrics_worker:inc(resource_metrics, ResId, matched), - try - Mod:on_query(ResId, Request, AfterQuery, ResourceState) - catch - Err:Reason:ST -> - emqx_metrics_worker:inc(resource_metrics, ResId, exception), - erlang:raise(Err, Reason, ST) + {ok, _Group, #{query_mode := QM, mod := Module}} -> + IsBufferSupported = is_buffer_supported(Module), + case {IsBufferSupported, QM} of + {true, _} -> + emqx_resource_worker:simple_sync_query(ResId, Request); + {false, sync} -> + emqx_resource_worker:sync_query(ResId, Request, Opts); + {false, async} -> + emqx_resource_worker:async_query(ResId, Request, Opts) end; - {ok, _Group, _Data} -> - query_error(not_connected, <<"resource not connected">>); {error, not_found} -> - query_error(not_found, <<"resource not found">>) + ?RESOURCE_ERROR(not_found, "resource not found") end. +-spec simple_sync_query(resource_id(), Request :: term()) -> Result :: term(). +simple_sync_query(ResId, Request) -> + emqx_resource_worker:simple_sync_query(ResId, Request). + +-spec simple_async_query(resource_id(), Request :: term(), reply_fun()) -> Result :: term(). +simple_async_query(ResId, Request, ReplyFun) -> + emqx_resource_worker:simple_async_query(ResId, Request, ReplyFun). + -spec start(resource_id()) -> ok | {error, Reason :: term()}. start(ResId) -> start(ResId, #{}). --spec start(resource_id(), create_opts()) -> ok | {error, Reason :: term()}. +-spec start(resource_id(), creation_opts()) -> ok | {error, Reason :: term()}. start(ResId, Opts) -> emqx_resource_manager:start(ResId, Opts). @@ -279,7 +294,7 @@ start(ResId, Opts) -> restart(ResId) -> restart(ResId, #{}). --spec restart(resource_id(), create_opts()) -> ok | {error, Reason :: term()}. +-spec restart(resource_id(), creation_opts()) -> ok | {error, Reason :: term()}. restart(ResId, Opts) -> emqx_resource_manager:restart(ResId, Opts). @@ -299,6 +314,10 @@ set_resource_status_connecting(ResId) -> get_instance(ResId) -> emqx_resource_manager:lookup(ResId). +-spec fetch_creation_opts(map()) -> creation_opts(). +fetch_creation_opts(Opts) -> + maps:get(resource_opts, Opts, #{}). + -spec list_instances() -> [resource_id()]. list_instances() -> [Id || #{id := Id} <- list_instances_verbose()]. @@ -322,6 +341,19 @@ generate_id(Name) when is_binary(Name) -> -spec list_group_instances(resource_group()) -> [resource_id()]. list_group_instances(Group) -> emqx_resource_manager:list_group(Group). +-spec get_callback_mode(module()) -> callback_mode(). +get_callback_mode(Mod) -> + Mod:callback_mode(). + +-spec is_buffer_supported(module()) -> boolean(). +is_buffer_supported(Module) -> + try + Module:is_buffer_supported() + catch + _:_ -> + false + end. + -spec call_start(manager_id(), module(), resource_config()) -> {ok, resource_state()} | {error, Reason :: term()}. call_start(MgrId, Mod, Config) -> @@ -359,7 +391,7 @@ check_and_create(ResId, Group, ResourceType, RawConfig) -> resource_group(), resource_type(), raw_resource_config(), - create_opts() + creation_opts() ) -> {ok, resource_data() | 'already_created'} | {error, term()}. check_and_create(ResId, Group, ResourceType, RawConfig, Opts) -> @@ -384,7 +416,7 @@ check_and_create_local(ResId, Group, ResourceType, RawConfig) -> resource_group(), resource_type(), raw_resource_config(), - create_opts() + creation_opts() ) -> {ok, resource_data()} | {error, term()}. check_and_create_local(ResId, Group, ResourceType, RawConfig, Opts) -> check_and_do( @@ -397,7 +429,7 @@ check_and_create_local(ResId, Group, ResourceType, RawConfig, Opts) -> resource_id(), resource_type(), raw_resource_config(), - create_opts() + creation_opts() ) -> {ok, resource_data()} | {error, term()}. check_and_recreate(ResId, ResourceType, RawConfig, Opts) -> @@ -411,7 +443,7 @@ check_and_recreate(ResId, ResourceType, RawConfig, Opts) -> resource_id(), resource_type(), raw_resource_config(), - create_opts() + creation_opts() ) -> {ok, resource_data()} | {error, term()}. check_and_recreate_local(ResId, ResourceType, RawConfig, Opts) -> @@ -427,18 +459,16 @@ check_and_do(ResourceType, RawConfig, Do) when is_function(Do) -> Error -> Error end. +apply_reply_fun({F, A}, Result) when is_function(F) -> + _ = erlang:apply(F, A ++ [Result]), + ok; +apply_reply_fun(From, Result) -> + gen_server:reply(From, Result). + %% ================================================================================= +inc_received(ResId) -> + emqx_metrics_worker:inc(?RES_METRICS, ResId, 'received'). + filter_instances(Filter) -> [Id || #{id := Id, mod := Mod} <- list_instances_verbose(), Filter(Id, Mod)]. - -inc_metrics_funcs(ResId) -> - OnFailed = [{fun emqx_metrics_worker:inc/3, [resource_metrics, ResId, failed]}], - OnSucc = [{fun emqx_metrics_worker:inc/3, [resource_metrics, ResId, success]}], - {OnSucc, OnFailed}. - -safe_apply(Func, Args) -> - ?SAFE_CALL(erlang:apply(Func, Args)). - -query_error(Reason, Msg) -> - {error, {?MODULE, #{reason => Reason, msg => Msg}}}. diff --git a/apps/emqx_resource/src/emqx_resource_app.erl b/apps/emqx_resource/src/emqx_resource_app.erl index 72838a8c1..51e7b2556 100644 --- a/apps/emqx_resource/src/emqx_resource_app.erl +++ b/apps/emqx_resource/src/emqx_resource_app.erl @@ -23,9 +23,18 @@ -export([start/2, stop/1]). start(_StartType, _StartArgs) -> + %% since the handler is generic and executed in the process + %% emitting the event, we need to install only a single handler + %% for the whole app. + TelemetryHandlerID = telemetry_handler_id(), + ok = emqx_resource_metrics:install_telemetry_handler(TelemetryHandlerID), emqx_resource_sup:start_link(). stop(_State) -> + TelemetryHandlerID = telemetry_handler_id(), + ok = emqx_resource_metrics:uninstall_telemetry_handler(TelemetryHandlerID), ok. %% internal functions +telemetry_handler_id() -> + <<"emqx-resource-app-telemetry-handler">>. diff --git a/apps/emqx_resource/src/emqx_resource_manager.erl b/apps/emqx_resource/src/emqx_resource_manager.erl index 3a1afd27c..10c501865 100644 --- a/apps/emqx_resource/src/emqx_resource_manager.erl +++ b/apps/emqx_resource/src/emqx_resource_manager.erl @@ -38,8 +38,12 @@ list_group/1, ets_lookup/1, get_metrics/1, - reset_metrics/1, - set_resource_status_connecting/1 + reset_metrics/1 +]). + +-export([ + set_resource_status_connecting/1, + manager_id_to_resource_id/1 ]). % Server @@ -49,11 +53,12 @@ -export([init/1, callback_mode/0, handle_event/4, terminate/3]). % State record --record(data, {id, manager_id, group, mod, config, opts, status, state, error}). +-record(data, { + id, manager_id, group, mod, callback_mode, query_mode, config, opts, status, state, error +}). +-type data() :: #data{}. --define(SHORT_HEALTHCHECK_INTERVAL, 1000). --define(HEALTHCHECK_INTERVAL, 15000). --define(ETS_TABLE, emqx_resource_manager). +-define(ETS_TABLE, ?MODULE). -define(WAIT_FOR_RESOURCE_DELAY, 100). -define(T_OPERATION, 5000). -define(T_LOOKUP, 1000). @@ -64,6 +69,13 @@ %% API %%------------------------------------------------------------------------------ +make_manager_id(ResId) -> + emqx_resource:generate_id(ResId). + +manager_id_to_resource_id(MgrId) -> + [ResId, _Index] = string:split(MgrId, ":", trailing), + ResId. + %% @doc Called from emqx_resource when starting a resource instance. %% %% Triggers the emqx_resource_manager_sup supervisor to actually create @@ -73,7 +85,7 @@ resource_group(), resource_type(), resource_config(), - create_opts() + creation_opts() ) -> {ok, resource_data()}. ensure_resource(ResId, Group, ResourceType, Config, Opts) -> case lookup(ResId) of @@ -85,7 +97,7 @@ ensure_resource(ResId, Group, ResourceType, Config, Opts) -> end. %% @doc Called from emqx_resource when recreating a resource which may or may not exist --spec recreate(resource_id(), resource_type(), resource_config(), create_opts()) -> +-spec recreate(resource_id(), resource_type(), resource_config(), creation_opts()) -> {ok, resource_data()} | {error, not_found} | {error, updating_to_incorrect_resource_type}. recreate(ResId, ResourceType, NewConfig, Opts) -> case lookup(ResId) of @@ -104,21 +116,52 @@ create_and_return_data(MgrId, ResId, Group, ResourceType, Config, Opts) -> {ok, _Group, Data} = lookup(ResId), {ok, Data}. +%% internal configs +-define(START_AFTER_CREATED, true). +%% in milliseconds +-define(START_TIMEOUT, 5000). + %% @doc Create a resource_manager and wait until it is running create(MgrId, ResId, Group, ResourceType, Config, Opts) -> % The state machine will make the actual call to the callback/resource module after init ok = emqx_resource_manager_sup:ensure_child(MgrId, ResId, Group, ResourceType, Config, Opts), ok = emqx_metrics_worker:create_metrics( - resource_metrics, + ?RES_METRICS, ResId, - [matched, success, failed, exception], + [ + 'matched', + 'retried', + 'retried.success', + 'retried.failed', + 'success', + 'failed', + 'dropped', + 'dropped.queue_not_enabled', + 'dropped.queue_full', + 'dropped.resource_not_found', + 'dropped.resource_stopped', + 'dropped.other', + 'queuing', + 'batching', + 'inflight', + 'received' + ], [matched] ), - case maps:get(start_after_created, Opts, true) of - true -> wait_for_resource_ready(ResId, maps:get(wait_for_resource_ready, Opts, 5000)); - false -> ok - end, - ok. + case emqx_resource:is_buffer_supported(ResourceType) of + true -> + %% the resource it self supports + %% buffer, so there is no need for resource workers + ok; + false -> + ok = emqx_resource_worker_sup:start_workers(ResId, Opts), + case maps:get(start_after_created, Opts, ?START_AFTER_CREATED) of + true -> + wait_for_ready(ResId, maps:get(start_timeout, Opts, ?START_TIMEOUT)); + false -> + ok + end + end. %% @doc Called from `emqx_resource` when doing a dry run for creating a resource instance. %% @@ -132,7 +175,7 @@ create_dry_run(ResourceType, Config) -> ok = emqx_resource_manager_sup:ensure_child( MgrId, ResId, <<"dry_run">>, ResourceType, Config, #{} ), - case wait_for_resource_ready(ResId, 15000) of + case wait_for_ready(ResId, 15000) of ok -> remove(ResId); timeout -> @@ -151,22 +194,22 @@ remove(ResId, ClearMetrics) when is_binary(ResId) -> safe_call(ResId, {remove, ClearMetrics}, ?T_OPERATION). %% @doc Stops and then starts an instance that was already running --spec restart(resource_id(), create_opts()) -> ok | {error, Reason :: term()}. +-spec restart(resource_id(), creation_opts()) -> ok | {error, Reason :: term()}. restart(ResId, Opts) when is_binary(ResId) -> case safe_call(ResId, restart, ?T_OPERATION) of ok -> - wait_for_resource_ready(ResId, maps:get(wait_for_resource_ready, Opts, 5000)), + wait_for_ready(ResId, maps:get(start_timeout, Opts, 5000)), ok; {error, _Reason} = Error -> Error end. %% @doc Start the resource --spec start(resource_id(), create_opts()) -> ok | {error, Reason :: term()}. +-spec start(resource_id(), creation_opts()) -> ok | {error, Reason :: term()}. start(ResId, Opts) -> case safe_call(ResId, start, ?T_OPERATION) of ok -> - wait_for_resource_ready(ResId, maps:get(wait_for_resource_ready, Opts, 5000)), + wait_for_ready(ResId, maps:get(start_timeout, Opts, 5000)), ok; {error, _Reason} = Error -> Error @@ -207,14 +250,14 @@ ets_lookup(ResId) -> %% @doc Get the metrics for the specified resource get_metrics(ResId) -> - emqx_metrics_worker:get_metrics(resource_metrics, ResId). + emqx_metrics_worker:get_metrics(?RES_METRICS, ResId). %% @doc Reset the metrics for the specified resource -spec reset_metrics(resource_id()) -> ok. reset_metrics(ResId) -> - emqx_metrics_worker:reset_metrics(resource_metrics, ResId). + emqx_metrics_worker:reset_metrics(?RES_METRICS, ResId). -%% @doc Returns the data for all resorces +%% @doc Returns the data for all resources -spec list_all() -> [resource_data()] | []. list_all() -> try @@ -245,6 +288,12 @@ start_link(MgrId, ResId, Group, ResourceType, Config, Opts) -> manager_id = MgrId, group = Group, mod = ResourceType, + callback_mode = emqx_resource:get_callback_mode(ResourceType), + %% query_mode = dynamic | sync | async + %% TODO: + %% dynamic mode is async mode when things are going well, but becomes sync mode + %% if the resource worker is overloaded + query_mode = maps:get(query_mode, Opts, sync), config = Config, opts = Opts, status = connecting, @@ -298,8 +347,7 @@ handle_event({call, From}, stop, stopped, _Data) -> {keep_state_and_data, [{reply, From, ok}]}; handle_event({call, From}, stop, _State, Data) -> Result = stop_resource(Data), - UpdatedData = Data#data{status = disconnected}, - {next_state, stopped, UpdatedData, [{reply, From, Result}]}; + {next_state, stopped, Data, [{reply, From, Result}]}; % Called when a resource is to be stopped and removed. handle_event({call, From}, {remove, ClearMetrics}, _State, Data) -> handle_remove_event(From, ClearMetrics, Data); @@ -315,9 +363,10 @@ handle_event({call, From}, health_check, _State, Data) -> handle_manually_health_check(From, Data); % State: CONNECTING handle_event(enter, _OldState, connecting, Data) -> + UpdatedData = Data#data{status = connecting}, insert_cache(Data#data.id, Data#data.group, Data), Actions = [{state_timeout, 0, health_check}], - {keep_state_and_data, Actions}; + {keep_state, UpdatedData, Actions}; handle_event(internal, start_resource, connecting, Data) -> start_resource(Data, undefined); handle_event(state_timeout, health_check, connecting, Data) -> @@ -326,22 +375,24 @@ handle_event(state_timeout, health_check, connecting, Data) -> %% The connected state is entered after a successful on_start/2 of the callback mod %% and successful health_checks handle_event(enter, _OldState, connected, Data) -> - insert_cache(Data#data.id, Data#data.group, Data), + UpdatedData = Data#data{status = connected}, + insert_cache(Data#data.id, Data#data.group, UpdatedData), _ = emqx_alarm:deactivate(Data#data.id), - Actions = [{state_timeout, ?HEALTHCHECK_INTERVAL, health_check}], - {next_state, connected, Data, Actions}; + Actions = [{state_timeout, health_check_interval(Data#data.opts), health_check}], + {next_state, connected, UpdatedData, Actions}; handle_event(state_timeout, health_check, connected, Data) -> handle_connected_health_check(Data); %% State: DISCONNECTED handle_event(enter, _OldState, disconnected, Data) -> - insert_cache(Data#data.id, Data#data.group, Data), - handle_disconnected_state_enter(Data); + UpdatedData = Data#data{status = disconnected}, + insert_cache(Data#data.id, Data#data.group, UpdatedData), + handle_disconnected_state_enter(UpdatedData); handle_event(state_timeout, auto_retry, disconnected, Data) -> start_resource(Data, undefined); %% State: STOPPED %% The stopped state is entered after the resource has been explicitly stopped handle_event(enter, _OldState, stopped, Data) -> - UpdatedData = Data#data{status = disconnected}, + UpdatedData = Data#data{status = stopped}, insert_cache(Data#data.id, Data#data.group, UpdatedData), {next_state, stopped, UpdatedData}; % Ignore all other events @@ -363,9 +414,17 @@ handle_event(EventType, EventData, State, Data) -> %%------------------------------------------------------------------------------ insert_cache(ResId, Group, Data = #data{manager_id = MgrId}) -> case get_owner(ResId) of - not_found -> ets:insert(?ETS_TABLE, {ResId, Group, Data}); - MgrId -> ets:insert(?ETS_TABLE, {ResId, Group, Data}); - _ -> self() ! quit + not_found -> + ets:insert(?ETS_TABLE, {ResId, Group, Data}); + MgrId -> + ets:insert(?ETS_TABLE, {ResId, Group, Data}); + _ -> + ?SLOG(error, #{ + msg => get_resource_owner_failed, + resource_id => ResId, + action => quit_resource + }), + self() ! quit end. read_cache(ResId) -> @@ -404,18 +463,21 @@ get_owner(ResId) -> end. handle_disconnected_state_enter(Data) -> - case maps:get(auto_retry_interval, Data#data.opts, undefined) of + {next_state, disconnected, Data, retry_actions(Data)}. + +retry_actions(Data) -> + case maps:get(auto_restart_interval, Data#data.opts, ?AUTO_RESTART_INTERVAL) of undefined -> - {next_state, disconnected, Data}; + []; RetryInterval -> - Actions = [{state_timeout, RetryInterval, auto_retry}], - {next_state, disconnected, Data, Actions} + [{state_timeout, RetryInterval, auto_retry}] end. handle_remove_event(From, ClearMetrics, Data) -> stop_resource(Data), + ok = emqx_resource_worker_sup:stop_workers(Data#data.id, Data#data.opts), case ClearMetrics of - true -> ok = emqx_metrics_worker:clear_metrics(resource_metrics, Data#data.id); + true -> ok = emqx_metrics_worker:clear_metrics(?RES_METRICS, Data#data.id); false -> ok end, {stop_and_reply, normal, [{reply, From, ok}]}. @@ -430,28 +492,33 @@ start_resource(Data, From) -> Actions = maybe_reply([{state_timeout, 0, health_check}], From, ok), {next_state, connecting, UpdatedData, Actions}; {error, Reason} = Err -> + ?SLOG(error, #{ + msg => start_resource_failed, + id => Data#data.id, + reason => Reason + }), _ = maybe_alarm(disconnected, Data#data.id), %% Keep track of the error reason why the connection did not work %% so that the Reason can be returned when the verification call is made. - UpdatedData = Data#data{status = disconnected, error = Reason}, - Actions = maybe_reply([], From, Err), + UpdatedData = Data#data{error = Reason}, + Actions = maybe_reply(retry_actions(UpdatedData), From, Err), {next_state, disconnected, UpdatedData, Actions} end. stop_resource(#data{state = undefined, id = ResId} = _Data) -> _ = maybe_clear_alarm(ResId), + ok = emqx_metrics_worker:reset_metrics(?RES_METRICS, ResId), ok; stop_resource(Data) -> %% We don't care the return value of the Mod:on_stop/2. %% The callback mod should make sure the resource is stopped after on_stop/2 %% is returned. + ResId = Data#data.id, _ = emqx_resource:call_stop(Data#data.manager_id, Data#data.mod, Data#data.state), - _ = maybe_clear_alarm(Data#data.id), + _ = maybe_clear_alarm(ResId), + ok = emqx_metrics_worker:reset_metrics(?RES_METRICS, ResId), ok. -make_manager_id(ResId) -> - emqx_resource:generate_id(ResId). - make_test_id() -> RandId = iolist_to_binary(emqx_misc:gen_id(16)), <>. @@ -469,7 +536,7 @@ handle_connecting_health_check(Data) -> (connected, UpdatedData) -> {next_state, connected, UpdatedData}; (connecting, UpdatedData) -> - Actions = [{state_timeout, ?SHORT_HEALTHCHECK_INTERVAL, health_check}], + Actions = [{state_timeout, health_check_interval(Data#data.opts), health_check}], {keep_state, UpdatedData, Actions}; (disconnected, UpdatedData) -> {next_state, disconnected, UpdatedData} @@ -481,7 +548,7 @@ handle_connected_health_check(Data) -> Data, fun (connected, UpdatedData) -> - Actions = [{state_timeout, ?HEALTHCHECK_INTERVAL, health_check}], + Actions = [{state_timeout, health_check_interval(Data#data.opts), health_check}], {keep_state, UpdatedData, Actions}; (Status, UpdatedData) -> ?SLOG(error, #{ @@ -498,12 +565,16 @@ with_health_check(Data, Func) -> HCRes = emqx_resource:call_health_check(Data#data.manager_id, Data#data.mod, Data#data.state), {Status, NewState, Err} = parse_health_check_result(HCRes, Data), _ = maybe_alarm(Status, ResId), + ok = maybe_resume_resource_workers(Status), UpdatedData = Data#data{ state = NewState, status = Status, error = Err }, insert_cache(ResId, UpdatedData#data.group, UpdatedData), Func(Status, UpdatedData). +health_check_interval(Opts) -> + maps:get(health_check_interval, Opts, ?HEALTHCHECK_INTERVAL). + maybe_alarm(connected, _ResId) -> ok; maybe_alarm(_Status, <>) -> @@ -515,6 +586,16 @@ maybe_alarm(_Status, ResId) -> <<"resource down: ", ResId/binary>> ). +maybe_resume_resource_workers(connected) -> + lists:foreach( + fun({_, Pid, _, _}) -> + emqx_resource_worker:resume(Pid) + end, + supervisor:which_children(emqx_resource_worker_sup) + ); +maybe_resume_resource_workers(_) -> + ok. + maybe_clear_alarm(<>) -> ok; maybe_clear_alarm(ResId) -> @@ -542,29 +623,32 @@ maybe_reply(Actions, undefined, _Reply) -> maybe_reply(Actions, From, Reply) -> [{reply, From, Reply} | Actions]. +-spec data_record_to_external_map_with_metrics(data()) -> resource_data(). data_record_to_external_map_with_metrics(Data) -> #{ id => Data#data.id, mod => Data#data.mod, + callback_mode => Data#data.callback_mode, + query_mode => Data#data.query_mode, config => Data#data.config, status => Data#data.status, state => Data#data.state, metrics => get_metrics(Data#data.id) }. --spec wait_for_resource_ready(resource_id(), integer()) -> ok | timeout. -wait_for_resource_ready(ResId, WaitTime) -> - do_wait_for_resource_ready(ResId, WaitTime div ?WAIT_FOR_RESOURCE_DELAY). +-spec wait_for_ready(resource_id(), integer()) -> ok | timeout. +wait_for_ready(ResId, WaitTime) -> + do_wait_for_ready(ResId, WaitTime div ?WAIT_FOR_RESOURCE_DELAY). -do_wait_for_resource_ready(_ResId, 0) -> +do_wait_for_ready(_ResId, 0) -> timeout; -do_wait_for_resource_ready(ResId, Retry) -> +do_wait_for_ready(ResId, Retry) -> case ets_lookup(ResId) of {ok, _Group, #{status := connected}} -> ok; _ -> timer:sleep(?WAIT_FOR_RESOURCE_DELAY), - do_wait_for_resource_ready(ResId, Retry - 1) + do_wait_for_ready(ResId, Retry - 1) end. safe_call(ResId, Message, Timeout) -> diff --git a/apps/emqx_resource/src/emqx_resource_metrics.erl b/apps/emqx_resource/src/emqx_resource_metrics.erl new file mode 100644 index 000000000..e6637b68f --- /dev/null +++ b/apps/emqx_resource/src/emqx_resource_metrics.erl @@ -0,0 +1,319 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_resource_metrics). + +-export([ + events/0, + install_telemetry_handler/1, + uninstall_telemetry_handler/1, + handle_telemetry_event/4 +]). + +-export([ + batching_change/2, + batching_get/1, + inflight_change/2, + inflight_get/1, + queuing_change/2, + queuing_get/1, + dropped_inc/1, + dropped_inc/2, + dropped_get/1, + dropped_other_inc/1, + dropped_other_inc/2, + dropped_other_get/1, + dropped_queue_full_inc/1, + dropped_queue_full_inc/2, + dropped_queue_full_get/1, + dropped_queue_not_enabled_inc/1, + dropped_queue_not_enabled_inc/2, + dropped_queue_not_enabled_get/1, + dropped_resource_not_found_inc/1, + dropped_resource_not_found_inc/2, + dropped_resource_not_found_get/1, + dropped_resource_stopped_inc/1, + dropped_resource_stopped_inc/2, + dropped_resource_stopped_get/1, + failed_inc/1, + failed_inc/2, + failed_get/1, + matched_inc/1, + matched_inc/2, + matched_get/1, + retried_inc/1, + retried_inc/2, + retried_get/1, + retried_failed_inc/1, + retried_failed_inc/2, + retried_failed_get/1, + retried_success_inc/1, + retried_success_inc/2, + retried_success_get/1, + success_inc/1, + success_inc/2, + success_get/1 +]). + +-define(RES_METRICS, resource_metrics). +-define(TELEMETRY_PREFIX, emqx, resource). + +-spec events() -> [telemetry:event_name()]. +events() -> + [ + [?TELEMETRY_PREFIX, Event] + || Event <- [ + batching, + dropped_other, + dropped_queue_full, + dropped_queue_not_enabled, + dropped_resource_not_found, + dropped_resource_stopped, + failed, + inflight, + matched, + queuing, + retried_failed, + retried_success, + success + ] + ]. + +-spec install_telemetry_handler(binary()) -> ok. +install_telemetry_handler(HandlerID) -> + _ = telemetry:attach_many( + HandlerID, + events(), + fun ?MODULE:handle_telemetry_event/4, + _HandlerConfig = #{} + ), + ok. + +-spec uninstall_telemetry_handler(binary()) -> ok. +uninstall_telemetry_handler(HandlerID) -> + _ = telemetry:detach(HandlerID), + ok. + +handle_telemetry_event( + [?TELEMETRY_PREFIX, Event], + _Measurements = #{counter_inc := Val}, + _Metadata = #{resource_id := ID}, + _HandlerConfig +) -> + case Event of + batching -> + emqx_metrics_worker:inc(?RES_METRICS, ID, 'batching', Val); + dropped_other -> + emqx_metrics_worker:inc(?RES_METRICS, ID, 'dropped', Val), + emqx_metrics_worker:inc(?RES_METRICS, ID, 'dropped.other', Val); + dropped_queue_full -> + emqx_metrics_worker:inc(?RES_METRICS, ID, 'dropped', Val), + emqx_metrics_worker:inc(?RES_METRICS, ID, 'dropped.queue_full', Val); + dropped_queue_not_enabled -> + emqx_metrics_worker:inc(?RES_METRICS, ID, 'dropped', Val), + emqx_metrics_worker:inc(?RES_METRICS, ID, 'dropped.queue_not_enabled', Val); + dropped_resource_not_found -> + emqx_metrics_worker:inc(?RES_METRICS, ID, 'dropped', Val), + emqx_metrics_worker:inc(?RES_METRICS, ID, 'dropped.resource_not_found', Val); + dropped_resource_stopped -> + emqx_metrics_worker:inc(?RES_METRICS, ID, 'dropped', Val), + emqx_metrics_worker:inc(?RES_METRICS, ID, 'dropped.resource_stopped', Val); + failed -> + emqx_metrics_worker:inc(?RES_METRICS, ID, 'failed', Val); + inflight -> + emqx_metrics_worker:inc(?RES_METRICS, ID, 'inflight', Val); + matched -> + emqx_metrics_worker:inc(?RES_METRICS, ID, 'matched', Val); + queuing -> + emqx_metrics_worker:inc(?RES_METRICS, ID, 'queuing', Val); + retried_failed -> + emqx_metrics_worker:inc(?RES_METRICS, ID, 'retried', Val), + emqx_metrics_worker:inc(?RES_METRICS, ID, 'failed', Val), + emqx_metrics_worker:inc(?RES_METRICS, ID, 'retried.failed', Val); + retried_success -> + emqx_metrics_worker:inc(?RES_METRICS, ID, 'retried', Val), + emqx_metrics_worker:inc(?RES_METRICS, ID, 'success', Val), + emqx_metrics_worker:inc(?RES_METRICS, ID, 'retried.success', Val); + success -> + emqx_metrics_worker:inc(?RES_METRICS, ID, 'success', Val); + _ -> + ok + end; +handle_telemetry_event(_EventName, _Measurements, _Metadata, _HandlerConfig) -> + ok. + +%% Gauges (value can go both up and down): +%% -------------------------------------- + +%% @doc Count of messages that are currently accumulated in memory waiting for +%% being sent in one batch +batching_change(ID, Val) -> + telemetry:execute([?TELEMETRY_PREFIX, batching], #{counter_inc => Val}, #{resource_id => ID}). + +batching_get(ID) -> + emqx_metrics_worker:get(?RES_METRICS, ID, 'batching'). + +%% @doc Count of messages that are currently queuing. [Gauge] +queuing_change(ID, Val) -> + telemetry:execute([?TELEMETRY_PREFIX, queuing], #{counter_inc => Val}, #{resource_id => ID}). + +queuing_get(ID) -> + emqx_metrics_worker:get(?RES_METRICS, ID, 'queuing'). + +%% @doc Count of messages that were sent asynchronously but ACKs are not +%% received. [Gauge] +inflight_change(ID, Val) -> + telemetry:execute([?TELEMETRY_PREFIX, inflight], #{counter_inc => Val}, #{resource_id => ID}). + +inflight_get(ID) -> + emqx_metrics_worker:get(?RES_METRICS, ID, 'inflight'). + +%% Counters (value can only got up): +%% -------------------------------------- + +%% @doc Count of messages dropped +dropped_inc(ID) -> + dropped_inc(ID, 1). + +dropped_inc(ID, Val) -> + telemetry:execute([?TELEMETRY_PREFIX, dropped], #{counter_inc => Val}, #{resource_id => ID}). + +dropped_get(ID) -> + emqx_metrics_worker:get(?RES_METRICS, ID, 'dropped'). + +%% @doc Count of messages dropped due to other reasons +dropped_other_inc(ID) -> + dropped_other_inc(ID, 1). + +dropped_other_inc(ID, Val) -> + telemetry:execute([?TELEMETRY_PREFIX, dropped_other], #{counter_inc => Val}, #{ + resource_id => ID + }). + +dropped_other_get(ID) -> + emqx_metrics_worker:get(?RES_METRICS, ID, 'dropped.other'). + +%% @doc Count of messages dropped because the queue was full +dropped_queue_full_inc(ID) -> + dropped_queue_full_inc(ID, 1). + +dropped_queue_full_inc(ID, Val) -> + telemetry:execute([?TELEMETRY_PREFIX, dropped_queue_full], #{counter_inc => Val}, #{ + resource_id => ID + }). + +dropped_queue_full_get(ID) -> + emqx_metrics_worker:get(?RES_METRICS, ID, 'dropped.queue_full'). + +%% @doc Count of messages dropped because the queue was not enabled +dropped_queue_not_enabled_inc(ID) -> + dropped_queue_not_enabled_inc(ID, 1). + +dropped_queue_not_enabled_inc(ID, Val) -> + telemetry:execute([?TELEMETRY_PREFIX, dropped_queue_not_enabled], #{counter_inc => Val}, #{ + resource_id => ID + }). + +dropped_queue_not_enabled_get(ID) -> + emqx_metrics_worker:get(?RES_METRICS, ID, 'dropped.queue_not_enabled'). + +%% @doc Count of messages dropped because the resource was not found +dropped_resource_not_found_inc(ID) -> + dropped_resource_not_found_inc(ID, 1). + +dropped_resource_not_found_inc(ID, Val) -> + telemetry:execute([?TELEMETRY_PREFIX, dropped_resource_not_found], #{counter_inc => Val}, #{ + resource_id => ID + }). + +dropped_resource_not_found_get(ID) -> + emqx_metrics_worker:get(?RES_METRICS, ID, 'dropped.resource_not_found'). + +%% @doc Count of messages dropped because the resource was stopped +dropped_resource_stopped_inc(ID) -> + dropped_resource_stopped_inc(ID, 1). + +dropped_resource_stopped_inc(ID, Val) -> + telemetry:execute([?TELEMETRY_PREFIX, dropped_resource_stopped], #{counter_inc => Val}, #{ + resource_id => ID + }). + +dropped_resource_stopped_get(ID) -> + emqx_metrics_worker:get(?RES_METRICS, ID, 'dropped.resource_stopped'). + +%% @doc Count of how many times this bridge has been matched and queried +matched_inc(ID) -> + matched_inc(ID, 1). + +matched_inc(ID, Val) -> + telemetry:execute([?TELEMETRY_PREFIX, matched], #{counter_inc => Val}, #{resource_id => ID}). + +matched_get(ID) -> + emqx_metrics_worker:get(?RES_METRICS, ID, 'matched'). + +%% @doc The number of times message sends have been retried +retried_inc(ID) -> + retried_inc(ID, 1). + +retried_inc(ID, Val) -> + telemetry:execute([?TELEMETRY_PREFIX, retried], #{counter_inc => Val}, #{resource_id => ID}). + +retried_get(ID) -> + emqx_metrics_worker:get(?RES_METRICS, ID, 'retried'). + +%% @doc Count of message sends that have failed +failed_inc(ID) -> + failed_inc(ID, 1). + +failed_inc(ID, Val) -> + telemetry:execute([?TELEMETRY_PREFIX, failed], #{counter_inc => Val}, #{resource_id => ID}). + +failed_get(ID) -> + emqx_metrics_worker:get(?RES_METRICS, ID, 'failed'). + +%%% @doc Count of message sends that have failed after having been retried +retried_failed_inc(ID) -> + retried_failed_inc(ID, 1). + +retried_failed_inc(ID, Val) -> + telemetry:execute([?TELEMETRY_PREFIX, retried_failed], #{counter_inc => Val}, #{ + resource_id => ID + }). + +retried_failed_get(ID) -> + emqx_metrics_worker:get(?RES_METRICS, ID, 'retried.failed'). + +%% @doc Count messages that were sucessfully sent after at least one retry +retried_success_inc(ID) -> + retried_success_inc(ID, 1). + +retried_success_inc(ID, Val) -> + telemetry:execute([?TELEMETRY_PREFIX, retried_success], #{counter_inc => Val}, #{ + resource_id => ID + }). + +retried_success_get(ID) -> + emqx_metrics_worker:get(?RES_METRICS, ID, 'retried.success'). + +%% @doc Count of messages that have been sent successfully +success_inc(ID) -> + success_inc(ID, 1). + +success_inc(ID, Val) -> + telemetry:execute([?TELEMETRY_PREFIX, success], #{counter_inc => Val}, #{resource_id => ID}). + +success_get(ID) -> + emqx_metrics_worker:get(?RES_METRICS, ID, 'success'). diff --git a/apps/emqx_resource/src/emqx_resource_sup.erl b/apps/emqx_resource/src/emqx_resource_sup.erl index 1120723c3..920743101 100644 --- a/apps/emqx_resource/src/emqx_resource_sup.erl +++ b/apps/emqx_resource/src/emqx_resource_sup.erl @@ -15,22 +15,20 @@ %%-------------------------------------------------------------------- -module(emqx_resource_sup). +-include("emqx_resource.hrl"). + -behaviour(supervisor). -export([start_link/0]). -export([init/1]). -%% set a very large pool size in case all the workers busy --define(POOL_SIZE, 64). - start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). init([]) -> SupFlags = #{strategy => one_for_one, intensity => 10, period => 10}, - Metrics = emqx_metrics_worker:child_spec(resource_metrics), - + Metrics = emqx_metrics_worker:child_spec(?RES_METRICS), ResourceManager = #{ id => emqx_resource_manager_sup, @@ -40,4 +38,11 @@ init([]) -> type => supervisor, modules => [emqx_resource_manager_sup] }, - {ok, {SupFlags, [Metrics, ResourceManager]}}. + WorkerSup = #{ + id => emqx_resource_worker_sup, + start => {emqx_resource_worker_sup, start_link, []}, + restart => permanent, + shutdown => infinity, + type => supervisor + }, + {ok, {SupFlags, [Metrics, ResourceManager, WorkerSup]}}. diff --git a/apps/emqx_resource/src/emqx_resource_utils.erl b/apps/emqx_resource/src/emqx_resource_utils.erl new file mode 100644 index 000000000..715691d2a --- /dev/null +++ b/apps/emqx_resource/src/emqx_resource_utils.erl @@ -0,0 +1,17 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2020-2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_resource_utils). diff --git a/apps/emqx_resource/src/emqx_resource_worker.erl b/apps/emqx_resource/src/emqx_resource_worker.erl new file mode 100644 index 000000000..a36cb15b7 --- /dev/null +++ b/apps/emqx_resource/src/emqx_resource_worker.erl @@ -0,0 +1,700 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2020-2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +%% This module implements async message sending, disk message queuing, +%% and message batching using ReplayQ. + +-module(emqx_resource_worker). + +-include("emqx_resource.hrl"). +-include("emqx_resource_utils.hrl"). +-include("emqx_resource_errors.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-behaviour(gen_statem). + +-export([ + start_link/3, + sync_query/3, + async_query/3, + block/1, + block/2, + resume/1 +]). + +-export([ + simple_sync_query/2, + simple_async_query/3 +]). + +-export([ + callback_mode/0, + init/1, + terminate/2, + code_change/3 +]). + +-export([running/3, blocked/3]). + +-export([queue_item_marshaller/1, estimate_size/1]). + +-export([reply_after_query/6, batch_reply_after_query/6]). + +-define(Q_ITEM(REQUEST), {q_item, REQUEST}). + +-define(QUERY(FROM, REQUEST, SENT), {query, FROM, REQUEST, SENT}). +-define(REPLY(FROM, REQUEST, SENT, RESULT), {reply, FROM, REQUEST, SENT, RESULT}). +-define(EXPAND(RESULT, BATCH), [ + ?REPLY(FROM, REQUEST, SENT, RESULT) + || ?QUERY(FROM, REQUEST, SENT) <- BATCH +]). + +-type id() :: binary(). +-type query() :: {query, from(), request()}. +-type request() :: term(). +-type from() :: pid() | reply_fun(). + +-callback batcher_flush(Acc :: [{from(), request()}], CbState :: term()) -> + {{from(), result()}, NewCbState :: term()}. + +callback_mode() -> [state_functions, state_enter]. + +start_link(Id, Index, Opts) -> + gen_statem:start_link({local, name(Id, Index)}, ?MODULE, {Id, Index, Opts}, []). + +-spec sync_query(id(), request(), query_opts()) -> Result :: term(). +sync_query(Id, Request, Opts) -> + PickKey = maps:get(pick_key, Opts, self()), + Timeout = maps:get(timeout, Opts, infinity), + pick_call(Id, PickKey, {query, Request, Opts}, Timeout). + +-spec async_query(id(), request(), query_opts()) -> Result :: term(). +async_query(Id, Request, Opts) -> + PickKey = maps:get(pick_key, Opts, self()), + pick_cast(Id, PickKey, {query, Request, Opts}). + +%% simple query the resource without batching and queuing messages. +-spec simple_sync_query(id(), request()) -> Result :: term(). +simple_sync_query(Id, Request) -> + Result = call_query(sync, Id, ?QUERY(self(), Request, false), #{}), + _ = handle_query_result(Id, Result, false, false), + Result. + +-spec simple_async_query(id(), request(), reply_fun()) -> Result :: term(). +simple_async_query(Id, Request, ReplyFun) -> + Result = call_query(async, Id, ?QUERY(ReplyFun, Request, false), #{}), + _ = handle_query_result(Id, Result, false, false), + Result. + +-spec block(pid() | atom()) -> ok. +block(ServerRef) -> + gen_statem:cast(ServerRef, block). + +-spec block(pid() | atom(), [query()]) -> ok. +block(ServerRef, Query) -> + gen_statem:cast(ServerRef, {block, Query}). + +-spec resume(pid() | atom()) -> ok. +resume(ServerRef) -> + gen_statem:cast(ServerRef, resume). + +init({Id, Index, Opts}) -> + process_flag(trap_exit, true), + true = gproc_pool:connect_worker(Id, {Id, Index}), + Name = name(Id, Index), + BatchSize = maps:get(batch_size, Opts, ?DEFAULT_BATCH_SIZE), + Queue = + case maps:get(enable_queue, Opts, false) of + true -> + replayq:open(#{ + dir => disk_queue_dir(Id, Index), + seg_bytes => maps:get(queue_seg_bytes, Opts, ?DEFAULT_QUEUE_SEG_SIZE), + max_total_bytes => maps:get(max_queue_bytes, Opts, ?DEFAULT_QUEUE_SIZE), + sizer => fun ?MODULE:estimate_size/1, + marshaller => fun ?MODULE:queue_item_marshaller/1 + }); + false -> + undefined + end, + emqx_resource_metrics:queuing_change(Id, queue_count(Queue)), + InfltWinSZ = maps:get(async_inflight_window, Opts, ?DEFAULT_INFLIGHT), + ok = inflight_new(Name, InfltWinSZ), + HCItvl = maps:get(health_check_interval, Opts, ?HEALTHCHECK_INTERVAL), + St = #{ + id => Id, + index => Index, + name => Name, + enable_batch => maps:get(enable_batch, Opts, false), + batch_size => BatchSize, + batch_time => maps:get(batch_time, Opts, ?DEFAULT_BATCH_TIME), + queue => Queue, + resume_interval => maps:get(resume_interval, Opts, HCItvl), + acc => [], + acc_left => BatchSize, + tref => undefined + }, + {ok, blocked, St, {next_event, cast, resume}}. + +running(enter, _, _St) -> + keep_state_and_data; +running(cast, resume, _St) -> + keep_state_and_data; +running(cast, block, St) -> + {next_state, blocked, St}; +running(cast, {block, [?QUERY(_, _, _) | _] = Batch}, #{id := Id, queue := Q} = St) when + is_list(Batch) +-> + Q1 = maybe_append_queue(Id, Q, [?Q_ITEM(Query) || Query <- Batch]), + {next_state, blocked, St#{queue := Q1}}; +running({call, From}, {query, Request, _Opts}, St) -> + query_or_acc(From, Request, St); +running(cast, {query, Request, Opts}, St) -> + ReplayFun = maps:get(async_reply_fun, Opts, undefined), + query_or_acc(ReplayFun, Request, St); +running(info, {flush, Ref}, St = #{tref := {_TRef, Ref}}) -> + flush(St#{tref := undefined}); +running(info, {flush, _Ref}, _St) -> + keep_state_and_data; +running(info, Info, _St) -> + ?SLOG(error, #{msg => unexpected_msg, info => Info}), + keep_state_and_data. + +blocked(enter, _, #{resume_interval := ResumeT} = _St) -> + {keep_state_and_data, {state_timeout, ResumeT, resume}}; +blocked(cast, block, _St) -> + keep_state_and_data; +blocked(cast, {block, [?QUERY(_, _, _) | _] = Batch}, #{id := Id, queue := Q} = St) when + is_list(Batch) +-> + Q1 = maybe_append_queue(Id, Q, [?Q_ITEM(Query) || Query <- Batch]), + {keep_state, St#{queue := Q1}}; +blocked(cast, resume, St) -> + do_resume(St); +blocked(state_timeout, resume, St) -> + do_resume(St); +blocked({call, From}, {query, Request, _Opts}, #{id := Id, queue := Q} = St) -> + Error = ?RESOURCE_ERROR(blocked, "resource is blocked"), + _ = reply_caller(Id, ?REPLY(From, Request, false, Error)), + {keep_state, St#{queue := maybe_append_queue(Id, Q, [?Q_ITEM(?QUERY(From, Request, false))])}}; +blocked(cast, {query, Request, Opts}, #{id := Id, queue := Q} = St) -> + ReplayFun = maps:get(async_reply_fun, Opts, undefined), + Error = ?RESOURCE_ERROR(blocked, "resource is blocked"), + _ = reply_caller(Id, ?REPLY(ReplayFun, Request, false, Error)), + {keep_state, St#{ + queue := maybe_append_queue(Id, Q, [?Q_ITEM(?QUERY(ReplayFun, Request, false))]) + }}. + +terminate(_Reason, #{id := Id, index := Index}) -> + gproc_pool:disconnect_worker(Id, {Id, Index}). + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +%%============================================================================== +-define(PICK(ID, KEY, EXPR), + try gproc_pool:pick_worker(ID, KEY) of + Pid when is_pid(Pid) -> + EXPR; + _ -> + ?RESOURCE_ERROR(worker_not_created, "resource not created") + catch + error:badarg -> + ?RESOURCE_ERROR(worker_not_created, "resource not created"); + exit:{timeout, _} -> + ?RESOURCE_ERROR(timeout, "call resource timeout") + end +). + +pick_call(Id, Key, Query, Timeout) -> + ?PICK(Id, Key, gen_statem:call(Pid, Query, {clean_timeout, Timeout})). + +pick_cast(Id, Key, Query) -> + ?PICK(Id, Key, gen_statem:cast(Pid, Query)). + +do_resume(#{id := Id, name := Name} = St) -> + case inflight_get_first(Name) of + empty -> + retry_queue(St); + {Ref, FirstQuery} -> + %% We retry msgs in inflight window sync, as if we send them + %% async, they will be appended to the end of inflight window again. + retry_inflight_sync(Id, Ref, FirstQuery, Name, St) + end. + +retry_queue(#{queue := undefined} = St) -> + {next_state, running, St}; +retry_queue(#{queue := Q, id := Id, enable_batch := false, resume_interval := ResumeT} = St) -> + case get_first_n_from_queue(Q, 1) of + [] -> + {next_state, running, St}; + [?QUERY(_, Request, HasSent) = Query] -> + QueryOpts = #{inflight_name => maps:get(name, St)}, + Result = call_query(configured, Id, Query, QueryOpts), + case reply_caller(Id, ?REPLY(undefined, Request, HasSent, Result)) of + true -> + {keep_state, St, {state_timeout, ResumeT, resume}}; + false -> + retry_queue(St#{queue := drop_head(Q, Id)}) + end + end; +retry_queue( + #{ + queue := Q, + id := Id, + enable_batch := true, + batch_size := BatchSize, + resume_interval := ResumeT + } = St +) -> + case get_first_n_from_queue(Q, BatchSize) of + [] -> + {next_state, running, St}; + Batch0 -> + QueryOpts = #{inflight_name => maps:get(name, St)}, + Result = call_query(configured, Id, Batch0, QueryOpts), + %% The caller has been replied with ?RESOURCE_ERROR(blocked, _) before saving into the queue, + %% we now change the 'from' field to 'undefined' so it will not reply the caller again. + Batch = [?QUERY(undefined, Request, HasSent) || ?QUERY(_, Request, HasSent) <- Batch0], + case batch_reply_caller(Id, Result, Batch) of + true -> + {keep_state, St, {state_timeout, ResumeT, resume}}; + false -> + retry_queue(St#{queue := drop_first_n_from_queue(Q, length(Batch), Id)}) + end + end. + +retry_inflight_sync( + Id, Ref, ?QUERY(_, _, HasSent) = Query, Name, #{resume_interval := ResumeT} = St0 +) -> + Result = call_query(sync, Id, Query, #{}), + case handle_query_result(Id, Result, HasSent, false) of + %% Send failed because resource down + true -> + {keep_state, St0, {state_timeout, ResumeT, resume}}; + %% Send ok or failed but the resource is working + false -> + inflight_drop(Name, Ref), + do_resume(St0) + end. + +query_or_acc(From, Request, #{enable_batch := true, acc := Acc, acc_left := Left, id := Id} = St0) -> + Acc1 = [?QUERY(From, Request, false) | Acc], + emqx_resource_metrics:batching_change(Id, 1), + St = St0#{acc := Acc1, acc_left := Left - 1}, + case Left =< 1 of + true -> flush(St); + false -> {keep_state, ensure_flush_timer(St)} + end; +query_or_acc(From, Request, #{enable_batch := false, queue := Q, id := Id} = St) -> + QueryOpts = #{ + inflight_name => maps:get(name, St) + }, + Result = call_query(configured, Id, ?QUERY(From, Request, false), QueryOpts), + case reply_caller(Id, ?REPLY(From, Request, false, Result)) of + true -> + Query = ?QUERY(From, Request, false), + {next_state, blocked, St#{queue := maybe_append_queue(Id, Q, [?Q_ITEM(Query)])}}; + false -> + {keep_state, St} + end. + +flush(#{acc := []} = St) -> + {keep_state, St}; +flush( + #{ + id := Id, + acc := Batch0, + batch_size := Size, + queue := Q0 + } = St +) -> + Batch = lists:reverse(Batch0), + QueryOpts = #{ + inflight_name => maps:get(name, St) + }, + emqx_resource_metrics:batching_change(Id, -length(Batch)), + Result = call_query(configured, Id, Batch, QueryOpts), + St1 = cancel_flush_timer(St#{acc_left := Size, acc := []}), + case batch_reply_caller(Id, Result, Batch) of + true -> + Q1 = maybe_append_queue(Id, Q0, [?Q_ITEM(Query) || Query <- Batch]), + {next_state, blocked, St1#{queue := Q1}}; + false -> + {keep_state, St1} + end. + +batch_reply_caller(Id, BatchResult, Batch) -> + lists:foldl( + fun(Reply, BlockWorker) -> + reply_caller(Id, Reply, BlockWorker) + end, + false, + %% the `Mod:on_batch_query/3` returns a single result for a batch, + %% so we need to expand + ?EXPAND(BatchResult, Batch) + ). + +reply_caller(Id, Reply) -> + reply_caller(Id, Reply, false). + +reply_caller(Id, ?REPLY(undefined, _, HasSent, Result), BlockWorker) -> + handle_query_result(Id, Result, HasSent, BlockWorker); +reply_caller(Id, ?REPLY({ReplyFun, Args}, _, HasSent, Result), BlockWorker) when + is_function(ReplyFun) +-> + _ = + case Result of + {async_return, _} -> no_reply_for_now; + _ -> apply(ReplyFun, Args ++ [Result]) + end, + handle_query_result(Id, Result, HasSent, BlockWorker); +reply_caller(Id, ?REPLY(From, _, HasSent, Result), BlockWorker) -> + gen_statem:reply(From, Result), + handle_query_result(Id, Result, HasSent, BlockWorker). + +handle_query_result(Id, ?RESOURCE_ERROR_M(exception, Msg), HasSent, BlockWorker) -> + ?SLOG(error, #{msg => resource_exception, info => Msg}), + inc_sent_failed(Id, HasSent), + BlockWorker; +handle_query_result(_Id, ?RESOURCE_ERROR_M(NotWorking, _), _HasSent, _) when + NotWorking == not_connected; NotWorking == blocked +-> + true; +handle_query_result(Id, ?RESOURCE_ERROR_M(not_found, Msg), _HasSent, BlockWorker) -> + ?SLOG(error, #{id => Id, msg => resource_not_found, info => Msg}), + emqx_resource_metrics:dropped_resource_not_found_inc(Id), + BlockWorker; +handle_query_result(Id, ?RESOURCE_ERROR_M(stopped, Msg), _HasSent, BlockWorker) -> + ?SLOG(error, #{id => Id, msg => resource_stopped, info => Msg}), + emqx_resource_metrics:dropped_resource_stopped_inc(Id), + BlockWorker; +handle_query_result(Id, ?RESOURCE_ERROR_M(Reason, _), _HasSent, BlockWorker) -> + ?SLOG(error, #{id => Id, msg => other_resource_error, reason => Reason}), + emqx_resource_metrics:dropped_other_inc(Id), + BlockWorker; +handle_query_result(Id, {error, {recoverable_error, Reason}}, _HasSent, _BlockWorker) -> + %% the message will be queued in replayq or inflight window, + %% i.e. the counter 'queuing' or 'dropped' will increase, so we pretend that we have not + %% sent this message. + ?SLOG(warning, #{id => Id, msg => recoverable_error, reason => Reason}), + true; +handle_query_result(Id, {error, Reason}, HasSent, BlockWorker) -> + ?SLOG(error, #{id => Id, msg => send_error, reason => Reason}), + inc_sent_failed(Id, HasSent), + BlockWorker; +handle_query_result(_Id, {async_return, inflight_full}, _HasSent, _BlockWorker) -> + true; +handle_query_result(Id, {async_return, {error, Msg}}, HasSent, BlockWorker) -> + ?SLOG(error, #{id => Id, msg => async_send_error, info => Msg}), + inc_sent_failed(Id, HasSent), + BlockWorker; +handle_query_result(_Id, {async_return, ok}, _HasSent, BlockWorker) -> + BlockWorker; +handle_query_result(Id, Result, HasSent, BlockWorker) -> + assert_ok_result(Result), + inc_sent_success(Id, HasSent), + BlockWorker. + +call_query(QM0, Id, Query, QueryOpts) -> + ?tp(call_query_enter, #{id => Id, query => Query}), + case emqx_resource_manager:ets_lookup(Id) of + {ok, _Group, #{mod := Mod, state := ResSt, status := connected} = Data} -> + QM = + case QM0 of + configured -> maps:get(query_mode, Data); + _ -> QM0 + end, + CM = maps:get(callback_mode, Data), + emqx_resource_metrics:matched_inc(Id), + apply_query_fun(call_mode(QM, CM), Mod, Id, Query, ResSt, QueryOpts); + {ok, _Group, #{status := stopped}} -> + emqx_resource_metrics:matched_inc(Id), + ?RESOURCE_ERROR(stopped, "resource stopped or disabled"); + {ok, _Group, #{status := S}} when S == connecting; S == disconnected -> + emqx_resource_metrics:matched_inc(Id), + ?RESOURCE_ERROR(not_connected, "resource not connected"); + {error, not_found} -> + ?RESOURCE_ERROR(not_found, "resource not found") + end. + +-define(APPLY_RESOURCE(NAME, EXPR, REQ), + try + %% if the callback module (connector) wants to return an error that + %% makes the current resource goes into the `blocked` state, it should + %% return `{error, {recoverable_error, Reason}}` + EXPR + catch + ERR:REASON:STACKTRACE -> + ?RESOURCE_ERROR(exception, #{ + name => NAME, + id => Id, + request => REQ, + error => {ERR, REASON}, + stacktrace => STACKTRACE + }) + end +). + +apply_query_fun(sync, Mod, Id, ?QUERY(_, Request, _) = _Query, ResSt, _QueryOpts) -> + ?tp(call_query, #{id => Id, mod => Mod, query => _Query, res_st => ResSt}), + ?APPLY_RESOURCE(call_query, Mod:on_query(Id, Request, ResSt), Request); +apply_query_fun(async, Mod, Id, ?QUERY(_, Request, _) = Query, ResSt, QueryOpts) -> + ?tp(call_query_async, #{id => Id, mod => Mod, query => Query, res_st => ResSt}), + Name = maps:get(inflight_name, QueryOpts, undefined), + ?APPLY_RESOURCE( + call_query_async, + case inflight_is_full(Name) of + true -> + {async_return, inflight_full}; + false -> + ok = emqx_resource_metrics:inflight_change(Id, 1), + ReplyFun = fun ?MODULE:reply_after_query/6, + Ref = make_message_ref(), + Args = [self(), Id, Name, Ref, Query], + ok = inflight_append(Name, Ref, Query), + Result = Mod:on_query_async(Id, Request, {ReplyFun, Args}, ResSt), + {async_return, Result} + end, + Request + ); +apply_query_fun(sync, Mod, Id, [?QUERY(_, _, _) | _] = Batch, ResSt, _QueryOpts) -> + ?tp(call_batch_query, #{id => Id, mod => Mod, batch => Batch, res_st => ResSt}), + Requests = [Request || ?QUERY(_From, Request, _) <- Batch], + ?APPLY_RESOURCE(call_batch_query, Mod:on_batch_query(Id, Requests, ResSt), Batch); +apply_query_fun(async, Mod, Id, [?QUERY(_, _, _) | _] = Batch, ResSt, QueryOpts) -> + ?tp(call_batch_query_async, #{id => Id, mod => Mod, batch => Batch, res_st => ResSt}), + Name = maps:get(inflight_name, QueryOpts, undefined), + ?APPLY_RESOURCE( + call_batch_query_async, + case inflight_is_full(Name) of + true -> + {async_return, inflight_full}; + false -> + BatchLen = length(Batch), + ok = emqx_resource_metrics:inflight_change(Id, BatchLen), + ReplyFun = fun ?MODULE:batch_reply_after_query/6, + Ref = make_message_ref(), + Args = {ReplyFun, [self(), Id, Name, Ref, Batch]}, + Requests = [Request || ?QUERY(_From, Request, _) <- Batch], + ok = inflight_append(Name, Ref, Batch), + Result = Mod:on_batch_query_async(Id, Requests, Args, ResSt), + {async_return, Result} + end, + Batch + ). + +reply_after_query(Pid, Id, Name, Ref, ?QUERY(From, Request, HasSent), Result) -> + %% NOTE: 'inflight' is message count that sent async but no ACK received, + %% NOT the message number ququed in the inflight window. + emqx_resource_metrics:inflight_change(Id, -1), + case reply_caller(Id, ?REPLY(From, Request, HasSent, Result)) of + true -> + %% we marked these messages are 'queuing' although they are actually + %% keeped in inflight window, not replayq + emqx_resource_metrics:queuing_change(Id, 1), + ?MODULE:block(Pid); + false -> + drop_inflight_and_resume(Pid, Name, Ref) + end. + +batch_reply_after_query(Pid, Id, Name, Ref, Batch, Result) -> + %% NOTE: 'inflight' is message count that sent async but no ACK received, + %% NOT the message number ququed in the inflight window. + BatchLen = length(Batch), + emqx_resource_metrics:inflight_change(Id, -BatchLen), + case batch_reply_caller(Id, Result, Batch) of + true -> + %% we marked these messages are 'queuing' although they are actually + %% kept in inflight window, not replayq + emqx_resource_metrics:queuing_change(Id, BatchLen), + ?MODULE:block(Pid); + false -> + drop_inflight_and_resume(Pid, Name, Ref) + end. + +drop_inflight_and_resume(Pid, Name, Ref) -> + case inflight_is_full(Name) of + true -> + inflight_drop(Name, Ref), + ?MODULE:resume(Pid); + false -> + inflight_drop(Name, Ref) + end. + +%%============================================================================== +%% operations for queue +queue_item_marshaller(?Q_ITEM(_) = I) -> + term_to_binary(I); +queue_item_marshaller(Bin) when is_binary(Bin) -> + binary_to_term(Bin). + +estimate_size(QItem) -> + size(queue_item_marshaller(QItem)). + +maybe_append_queue(Id, undefined, _Items) -> + emqx_resource_metrics:dropped_queue_not_enabled_inc(Id), + undefined; +maybe_append_queue(Id, Q, Items) -> + Q2 = + case replayq:overflow(Q) of + Overflow when Overflow =< 0 -> + Q; + Overflow -> + PopOpts = #{bytes_limit => Overflow, count_limit => 999999999}, + {Q1, QAckRef, Items2} = replayq:pop(Q, PopOpts), + ok = replayq:ack(Q1, QAckRef), + Dropped = length(Items2), + emqx_resource_metrics:queuing_change(Id, -Dropped), + emqx_resource_metrics:dropped_queue_full_inc(Id), + ?SLOG(error, #{msg => drop_query, reason => queue_full, dropped => Dropped}), + Q1 + end, + emqx_resource_metrics:queuing_change(Id, 1), + replayq:append(Q2, Items). + +get_first_n_from_queue(Q, N) -> + get_first_n_from_queue(Q, N, []). + +get_first_n_from_queue(_Q, 0, Acc) -> + lists:reverse(Acc); +get_first_n_from_queue(Q, N, Acc) when N > 0 -> + case replayq:peek(Q) of + empty -> Acc; + ?Q_ITEM(Query) -> get_first_n_from_queue(Q, N - 1, [Query | Acc]) + end. + +drop_first_n_from_queue(Q, 0, _Id) -> + Q; +drop_first_n_from_queue(Q, N, Id) when N > 0 -> + drop_first_n_from_queue(drop_head(Q, Id), N - 1, Id). + +drop_head(Q, Id) -> + {Q1, AckRef, _} = replayq:pop(Q, #{count_limit => 1}), + ok = replayq:ack(Q1, AckRef), + emqx_resource_metrics:queuing_change(Id, -1), + Q1. + +%%============================================================================== +%% the inflight queue for async query +-define(SIZE_REF, -1). +inflight_new(Name, InfltWinSZ) -> + _ = ets:new(Name, [named_table, ordered_set, public, {write_concurrency, true}]), + inflight_append(Name, ?SIZE_REF, {max_size, InfltWinSZ}), + ok. + +inflight_get_first(Name) -> + case ets:next(Name, ?SIZE_REF) of + '$end_of_table' -> + empty; + Ref -> + case ets:lookup(Name, Ref) of + [Object] -> + Object; + [] -> + %% it might have been dropped + inflight_get_first(Name) + end + end. + +inflight_is_full(undefined) -> + false; +inflight_is_full(Name) -> + [{_, {max_size, MaxSize}}] = ets:lookup(Name, ?SIZE_REF), + case ets:info(Name, size) of + Size when Size > MaxSize -> true; + _ -> false + end. + +inflight_append(undefined, _Ref, _Query) -> + ok; +inflight_append(Name, Ref, [?QUERY(_, _, _) | _] = Batch) -> + ets:insert(Name, {Ref, [?QUERY(From, Req, true) || ?QUERY(From, Req, _) <- Batch]}), + ok; +inflight_append(Name, Ref, ?QUERY(From, Req, _)) -> + ets:insert(Name, {Ref, ?QUERY(From, Req, true)}), + ok; +inflight_append(Name, Ref, Data) -> + ets:insert(Name, {Ref, Data}), + ok. + +inflight_drop(undefined, _) -> + ok; +inflight_drop(Name, Ref) -> + ets:delete(Name, Ref), + ok. + +%%============================================================================== + +inc_sent_failed(Id, _HasSent = true) -> + emqx_resource_metrics:retried_failed_inc(Id); +inc_sent_failed(Id, _HasSent) -> + emqx_resource_metrics:failed_inc(Id). + +inc_sent_success(Id, _HasSent = true) -> + emqx_resource_metrics:retried_success_inc(Id); +inc_sent_success(Id, _HasSent) -> + emqx_resource_metrics:success_inc(Id). + +call_mode(sync, _) -> sync; +call_mode(async, always_sync) -> sync; +call_mode(async, async_if_possible) -> async. + +assert_ok_result(ok) -> + true; +assert_ok_result({async_return, R}) -> + assert_ok_result(R); +assert_ok_result(R) when is_tuple(R) -> + try + ok = erlang:element(1, R) + catch + error:{badmatch, _} -> + error({not_ok_result, R}) + end; +assert_ok_result(R) -> + error({not_ok_result, R}). + +queue_count(undefined) -> + 0; +queue_count(Q) -> + replayq:count(Q). + +-spec name(id(), integer()) -> atom(). +name(Id, Index) -> + Mod = atom_to_list(?MODULE), + Id1 = binary_to_list(Id), + Index1 = integer_to_list(Index), + list_to_atom(lists:concat([Mod, ":", Id1, ":", Index1])). + +disk_queue_dir(Id, Index) -> + QDir = binary_to_list(Id) ++ ":" ++ integer_to_list(Index), + filename:join([emqx:data_dir(), "resource_worker", node(), QDir]). + +ensure_flush_timer(St = #{tref := undefined, batch_time := T}) -> + Ref = make_ref(), + TRef = erlang:send_after(T, self(), {flush, Ref}), + St#{tref => {TRef, Ref}}; +ensure_flush_timer(St) -> + St. + +cancel_flush_timer(St = #{tref := undefined}) -> + St; +cancel_flush_timer(St = #{tref := {TRef, _Ref}}) -> + _ = erlang:cancel_timer(TRef), + St#{tref => undefined}. + +make_message_ref() -> + erlang:unique_integer([monotonic, positive]). diff --git a/apps/emqx_resource/src/emqx_resource_worker_sup.erl b/apps/emqx_resource/src/emqx_resource_worker_sup.erl new file mode 100644 index 000000000..2db7b5c4c --- /dev/null +++ b/apps/emqx_resource/src/emqx_resource_worker_sup.erl @@ -0,0 +1,136 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2020-2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-module(emqx_resource_worker_sup). +-behaviour(supervisor). + +%%%============================================================================= +%%% Exports and Definitions +%%%============================================================================= + +%% External API +-export([start_link/0]). + +-export([start_workers/2, stop_workers/2]). + +%% Callbacks +-export([init/1]). + +-define(SERVER, ?MODULE). + +%%%============================================================================= +%%% API +%%%============================================================================= + +-spec start_link() -> supervisor:startlink_ret(). +start_link() -> + supervisor:start_link({local, ?SERVER}, ?MODULE, []). + +%%%============================================================================= +%%% Callbacks +%%%============================================================================= + +-spec init(list()) -> {ok, {supervisor:sup_flags(), [supervisor:child_spec()]}} | ignore. +init([]) -> + SupFlags = #{ + strategy => one_for_one, + intensity => 100, + period => 30 + }, + ChildSpecs = [], + {ok, {SupFlags, ChildSpecs}}. + +start_workers(ResId, Opts) -> + WorkerPoolSize = worker_pool_size(Opts), + _ = ensure_worker_pool(ResId, hash, [{size, WorkerPoolSize}]), + lists:foreach( + fun(Idx) -> + _ = ensure_worker_added(ResId, Idx), + ok = ensure_worker_started(ResId, Idx, Opts) + end, + lists:seq(1, WorkerPoolSize) + ). + +stop_workers(ResId, Opts) -> + WorkerPoolSize = worker_pool_size(Opts), + lists:foreach( + fun(Idx) -> + ensure_worker_removed(ResId, Idx) + end, + lists:seq(1, WorkerPoolSize) + ), + ensure_worker_pool_removed(ResId), + ok. + +%%%============================================================================= +%%% Internal +%%%============================================================================= +worker_pool_size(Opts) -> + maps:get(worker_pool_size, Opts, erlang:system_info(schedulers_online)). + +ensure_worker_pool(ResId, Type, Opts) -> + try + gproc_pool:new(ResId, Type, Opts) + catch + error:exists -> ok + end, + ok. + +ensure_worker_added(ResId, Idx) -> + try + gproc_pool:add_worker(ResId, {ResId, Idx}, Idx) + catch + error:exists -> ok + end, + ok. + +-define(CHILD_ID(MOD, RESID, INDEX), {MOD, RESID, INDEX}). +ensure_worker_started(ResId, Idx, Opts) -> + Mod = emqx_resource_worker, + Spec = #{ + id => ?CHILD_ID(Mod, ResId, Idx), + start => {Mod, start_link, [ResId, Idx, Opts]}, + restart => transient, + shutdown => 5000, + type => worker, + modules => [Mod] + }, + case supervisor:start_child(?SERVER, Spec) of + {ok, _Pid} -> ok; + {error, {already_started, _}} -> ok; + {error, already_present} -> ok; + {error, _} = Err -> Err + end. + +ensure_worker_removed(ResId, Idx) -> + ChildId = ?CHILD_ID(emqx_resource_worker, ResId, Idx), + case supervisor:terminate_child(?SERVER, ChildId) of + ok -> + Res = supervisor:delete_child(?SERVER, ChildId), + _ = gproc_pool:remove_worker(ResId, {ResId, Idx}), + Res; + {error, not_found} -> + ok; + {error, Reason} -> + {error, Reason} + end. + +ensure_worker_pool_removed(ResId) -> + try + gproc_pool:delete(ResId) + catch + error:badarg -> ok + end, + ok. diff --git a/apps/emqx_resource/src/proto/emqx_resource_proto_v1.erl b/apps/emqx_resource/src/proto/emqx_resource_proto_v1.erl index cdd2592d9..11af1a62c 100644 --- a/apps/emqx_resource/src/proto/emqx_resource_proto_v1.erl +++ b/apps/emqx_resource/src/proto/emqx_resource_proto_v1.erl @@ -38,7 +38,7 @@ introduced_in() -> resource_group(), resource_type(), resource_config(), - create_opts() + creation_opts() ) -> {ok, resource_data() | 'already_created'} | {error, Reason :: term()}. create(ResId, Group, ResourceType, Config, Opts) -> @@ -58,7 +58,7 @@ create_dry_run(ResourceType, Config) -> resource_id(), resource_type(), resource_config(), - create_opts() + creation_opts() ) -> {ok, resource_data()} | {error, Reason :: term()}. recreate(ResId, ResourceType, Config, Opts) -> diff --git a/apps/emqx_resource/src/schema/emqx_resource_schema.erl b/apps/emqx_resource/src/schema/emqx_resource_schema.erl new file mode 100644 index 000000000..c666974b1 --- /dev/null +++ b/apps/emqx_resource/src/schema/emqx_resource_schema.erl @@ -0,0 +1,120 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_resource_schema). + +-include("emqx_resource.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([namespace/0, roots/0, fields/1, desc/1]). + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions + +namespace() -> "resource_schema". + +roots() -> []. + +fields("resource_opts") -> + [ + {resource_opts, + mk( + ref(?MODULE, "creation_opts"), + #{ + required => false, + default => #{}, + desc => ?DESC(<<"resource_opts">>) + } + )} + ]; +fields("creation_opts") -> + [ + {worker_pool_size, fun worker_pool_size/1}, + {health_check_interval, fun health_check_interval/1}, + {auto_restart_interval, fun auto_restart_interval/1}, + {query_mode, fun query_mode/1}, + {async_inflight_window, fun async_inflight_window/1}, + {enable_batch, fun enable_batch/1}, + {batch_size, fun batch_size/1}, + {batch_time, fun batch_time/1}, + {enable_queue, fun enable_queue/1}, + {max_queue_bytes, fun max_queue_bytes/1} + ]. + +worker_pool_size(type) -> pos_integer(); +worker_pool_size(desc) -> ?DESC("worker_pool_size"); +worker_pool_size(default) -> ?WORKER_POOL_SIZE; +worker_pool_size(required) -> false; +worker_pool_size(_) -> undefined. + +health_check_interval(type) -> emqx_schema:duration_ms(); +health_check_interval(desc) -> ?DESC("health_check_interval"); +health_check_interval(default) -> ?HEALTHCHECK_INTERVAL_RAW; +health_check_interval(required) -> false; +health_check_interval(_) -> undefined. + +auto_restart_interval(type) -> hoconsc:union([infinity, emqx_schema:duration_ms()]); +auto_restart_interval(desc) -> ?DESC("auto_restart_interval"); +auto_restart_interval(default) -> ?AUTO_RESTART_INTERVAL_RAW; +auto_restart_interval(required) -> false; +auto_restart_interval(_) -> undefined. + +query_mode(type) -> enum([sync, async]); +query_mode(desc) -> ?DESC("query_mode"); +query_mode(default) -> async; +query_mode(required) -> false; +query_mode(_) -> undefined. + +enable_batch(type) -> boolean(); +enable_batch(required) -> false; +enable_batch(default) -> true; +enable_batch(desc) -> ?DESC("enable_batch"); +enable_batch(_) -> undefined. + +enable_queue(type) -> boolean(); +enable_queue(required) -> false; +enable_queue(default) -> false; +enable_queue(desc) -> ?DESC("enable_queue"); +enable_queue(_) -> undefined. + +async_inflight_window(type) -> pos_integer(); +async_inflight_window(desc) -> ?DESC("async_inflight_window"); +async_inflight_window(default) -> ?DEFAULT_INFLIGHT; +async_inflight_window(required) -> false; +async_inflight_window(_) -> undefined. + +batch_size(type) -> pos_integer(); +batch_size(desc) -> ?DESC("batch_size"); +batch_size(default) -> ?DEFAULT_BATCH_SIZE; +batch_size(required) -> false; +batch_size(_) -> undefined. + +batch_time(type) -> emqx_schema:duration_ms(); +batch_time(desc) -> ?DESC("batch_time"); +batch_time(default) -> ?DEFAULT_BATCH_TIME_RAW; +batch_time(required) -> false; +batch_time(_) -> undefined. + +max_queue_bytes(type) -> emqx_schema:bytesize(); +max_queue_bytes(desc) -> ?DESC("max_queue_bytes"); +max_queue_bytes(default) -> ?DEFAULT_QUEUE_SIZE_RAW; +max_queue_bytes(required) -> false; +max_queue_bytes(_) -> undefined. + +desc("creation_opts") -> + ?DESC("creation_opts"). diff --git a/apps/emqx_resource/test/emqx_connector_demo.erl b/apps/emqx_resource/test/emqx_connector_demo.erl new file mode 100644 index 000000000..105bcad77 --- /dev/null +++ b/apps/emqx_resource/test/emqx_connector_demo.erl @@ -0,0 +1,218 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2021-2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_connector_demo). + +-include_lib("typerefl/include/types.hrl"). + +-behaviour(emqx_resource). + +%% callbacks of behaviour emqx_resource +-export([ + callback_mode/0, + on_start/2, + on_stop/2, + on_query/3, + on_query_async/4, + on_batch_query/3, + on_get_status/2 +]). + +-export([counter_loop/0, set_callback_mode/1]). + +%% callbacks for emqx_resource config schema +-export([roots/0]). + +roots() -> + [ + {name, fun name/1}, + {register, fun register/1} + ]. + +name(type) -> atom(); +name(required) -> true; +name(_) -> undefined. + +register(type) -> boolean(); +register(required) -> true; +register(default) -> false; +register(_) -> undefined. + +-define(CM_KEY, {?MODULE, callback_mode}). +callback_mode() -> + persistent_term:get(?CM_KEY). + +set_callback_mode(Mode) -> + persistent_term:put(?CM_KEY, Mode). + +on_start(_InstId, #{create_error := true}) -> + error("some error"); +on_start(InstId, #{name := Name, stop_error := true} = Opts) -> + Register = maps:get(register, Opts, false), + {ok, Opts#{ + id => InstId, + stop_error => true, + pid => spawn_counter_process(Name, Register) + }}; +on_start(InstId, #{name := Name} = Opts) -> + Register = maps:get(register, Opts, false), + {ok, Opts#{ + id => InstId, + pid => spawn_counter_process(Name, Register) + }}. + +on_stop(_InstId, #{stop_error := true}) -> + {error, stop_error}; +on_stop(_InstId, #{pid := Pid}) -> + erlang:exit(Pid, shutdown), + ok. + +on_query(_InstId, get_state, State) -> + {ok, State}; +on_query(_InstId, get_state_failed, State) -> + {error, State}; +on_query(_InstId, block, #{pid := Pid}) -> + Pid ! block, + ok; +on_query(_InstId, resume, #{pid := Pid}) -> + Pid ! resume, + ok; +on_query(_InstId, {inc_counter, N}, #{pid := Pid}) -> + ReqRef = make_ref(), + From = {self(), ReqRef}, + Pid ! {From, {inc, N}}, + receive + {ReqRef, ok} -> ok; + {ReqRef, incorrect_status} -> {error, {recoverable_error, incorrect_status}} + after 1000 -> + {error, timeout} + end; +on_query(_InstId, get_incorrect_status_count, #{pid := Pid}) -> + ReqRef = make_ref(), + From = {self(), ReqRef}, + Pid ! {From, get_incorrect_status_count}, + receive + {ReqRef, Count} -> {ok, Count} + after 1000 -> + {error, timeout} + end; +on_query(_InstId, get_counter, #{pid := Pid}) -> + ReqRef = make_ref(), + From = {self(), ReqRef}, + Pid ! {From, get}, + receive + {ReqRef, Num} -> {ok, Num} + after 1000 -> + {error, timeout} + end. + +on_query_async(_InstId, {inc_counter, N}, ReplyFun, #{pid := Pid}) -> + Pid ! {inc, N, ReplyFun}, + ok; +on_query_async(_InstId, get_counter, ReplyFun, #{pid := Pid}) -> + Pid ! {get, ReplyFun}, + ok. + +on_batch_query(InstId, BatchReq, State) -> + %% Requests can be either 'get_counter' or 'inc_counter', but cannot be mixed. + case hd(BatchReq) of + {inc_counter, _} -> + batch_inc_counter(InstId, BatchReq, State); + get_counter -> + batch_get_counter(InstId, State) + end. + +batch_inc_counter(InstId, BatchReq, State) -> + TotalN = lists:foldl( + fun + ({inc_counter, N}, Total) -> + Total + N; + (Req, _Total) -> + error({mixed_requests_not_allowed, {inc_counter, Req}}) + end, + 0, + BatchReq + ), + on_query(InstId, {inc_counter, TotalN}, State). + +batch_get_counter(InstId, State) -> + on_query(InstId, get_counter, State). + +on_get_status(_InstId, #{health_check_error := true}) -> + disconnected; +on_get_status(_InstId, #{pid := Pid}) -> + timer:sleep(300), + case is_process_alive(Pid) of + true -> connected; + false -> disconnected + end. + +spawn_counter_process(Name, Register) -> + Pid = spawn_link(?MODULE, counter_loop, []), + true = maybe_register(Name, Pid, Register), + Pid. + +counter_loop() -> + counter_loop(#{counter => 0, status => running, incorrect_status_count => 0}). + +counter_loop( + #{ + counter := Num, + status := Status, + incorrect_status_count := IncorrectCount + } = State +) -> + NewState = + receive + block -> + ct:pal("counter recv: ~p", [block]), + State#{status => blocked}; + resume -> + {messages, Msgs} = erlang:process_info(self(), messages), + ct:pal("counter recv: ~p, buffered msgs: ~p", [resume, length(Msgs)]), + State#{status => running}; + {inc, N, ReplyFun} when Status == running -> + %ct:pal("async counter recv: ~p", [{inc, N}]), + apply_reply(ReplyFun, ok), + State#{counter => Num + N}; + {{FromPid, ReqRef}, {inc, N}} when Status == running -> + %ct:pal("sync counter recv: ~p", [{inc, N}]), + FromPid ! {ReqRef, ok}, + State#{counter => Num + N}; + {{FromPid, ReqRef}, {inc, _N}} when Status == blocked -> + FromPid ! {ReqRef, incorrect_status}, + State#{incorrect_status_count := IncorrectCount + 1}; + {get, ReplyFun} -> + apply_reply(ReplyFun, Num), + State; + {{FromPid, ReqRef}, get_incorrect_status_count} -> + FromPid ! {ReqRef, IncorrectCount}, + State; + {{FromPid, ReqRef}, get} -> + FromPid ! {ReqRef, Num}, + State + end, + counter_loop(NewState). + +maybe_register(Name, Pid, true) -> + ct:pal("---- Register Name: ~p", [Name]), + ct:pal("---- whereis(): ~p", [whereis(Name)]), + erlang:register(Name, Pid); +maybe_register(_Name, _Pid, false) -> + true. + +apply_reply({ReplyFun, Args}, Result) when is_function(ReplyFun) -> + apply(ReplyFun, Args ++ [Result]). diff --git a/apps/emqx_resource/test/emqx_resource_SUITE.erl b/apps/emqx_resource/test/emqx_resource_SUITE.erl index 51e6bac43..107ca2a93 100644 --- a/apps/emqx_resource/test/emqx_resource_SUITE.erl +++ b/apps/emqx_resource/test/emqx_resource_SUITE.erl @@ -22,10 +22,13 @@ -include_lib("common_test/include/ct.hrl"). -include("emqx_resource.hrl"). -include_lib("stdlib/include/ms_transform.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). --define(TEST_RESOURCE, emqx_test_resource). +-define(TEST_RESOURCE, emqx_connector_demo). -define(ID, <<"id">>). -define(DEFAULT_RESOURCE_GROUP, <<"default">>). +-define(RESOURCE_ERROR(REASON), {error, {resource_error, #{reason := REASON}}}). +-define(TRACE_OPTS, #{timetrap => 10000, timeout => 1000}). all() -> emqx_common_test_helpers:all(?MODULE). @@ -34,7 +37,10 @@ groups() -> []. init_per_testcase(_, Config) -> + emqx_connector_demo:set_callback_mode(always_sync), Config. +end_per_testcase(_, _Config) -> + _ = emqx_resource:remove(?ID). init_per_suite(Config) -> code:ensure_loaded(?TEST_RESOURCE), @@ -80,7 +86,7 @@ t_create_remove(_) -> #{name => test_resource}, #{} ), - #{pid := Pid} = emqx_resource:query(?ID, get_state), + {ok, #{pid := Pid}} = emqx_resource:query(?ID, get_state), ?assert(is_process_alive(Pid)), @@ -110,7 +116,7 @@ t_create_remove_local(_) -> #{name => test_resource}, #{} ), - #{pid := Pid} = emqx_resource:query(?ID, get_state), + {ok, #{pid := Pid}} = emqx_resource:query(?ID, get_state), ?assert(is_process_alive(Pid)), @@ -127,7 +133,7 @@ t_create_remove_local(_) -> {error, _} = emqx_resource:remove_local(?ID), ?assertMatch( - {error, {emqx_resource, #{reason := not_found}}}, + ?RESOURCE_ERROR(not_found), emqx_resource:query(?ID, get_state) ), ?assertNot(is_process_alive(Pid)). @@ -143,23 +149,23 @@ t_do_not_start_after_created(_) -> %% the resource should remain `disconnected` after created timer:sleep(200), ?assertMatch( - {error, {emqx_resource, #{reason := not_connected}}}, + ?RESOURCE_ERROR(stopped), emqx_resource:query(?ID, get_state) ), ?assertMatch( - {ok, _, #{status := disconnected}}, + {ok, _, #{status := stopped}}, emqx_resource:get_instance(?ID) ), %% start the resource manually.. ok = emqx_resource:start(?ID), - #{pid := Pid} = emqx_resource:query(?ID, get_state), + {ok, #{pid := Pid}} = emqx_resource:query(?ID, get_state), ?assert(is_process_alive(Pid)), %% restart the resource ok = emqx_resource:restart(?ID), ?assertNot(is_process_alive(Pid)), - #{pid := Pid2} = emqx_resource:query(?ID, get_state), + {ok, #{pid := Pid2}} = emqx_resource:query(?ID, get_state), ?assert(is_process_alive(Pid2)), ok = emqx_resource:remove_local(?ID), @@ -174,38 +180,283 @@ t_query(_) -> #{name => test_resource} ), - Pid = self(), - Success = fun() -> Pid ! success end, - Failure = fun() -> Pid ! failure end, - - #{pid := _} = emqx_resource:query(?ID, get_state), - #{pid := _} = emqx_resource:query(?ID, get_state, {[{Success, []}], [{Failure, []}]}), - #{pid := _} = emqx_resource:query(?ID, get_state, undefined), - #{pid := _} = emqx_resource:query(?ID, get_state_failed, undefined), - - receive - Message -> ?assertEqual(success, Message) - after 100 -> - ?assert(false) - end, + {ok, #{pid := _}} = emqx_resource:query(?ID, get_state), ?assertMatch( - {error, {emqx_resource, #{reason := not_found}}}, + ?RESOURCE_ERROR(not_found), emqx_resource:query(<<"unknown">>, get_state) ), ok = emqx_resource:remove_local(?ID). +t_query_counter(_) -> + {ok, _} = emqx_resource:create_local( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource, register => true} + ), + + {ok, 0} = emqx_resource:query(?ID, get_counter), + ok = emqx_resource:query(?ID, {inc_counter, 1}), + {ok, 1} = emqx_resource:query(?ID, get_counter), + ok = emqx_resource:query(?ID, {inc_counter, 5}), + {ok, 6} = emqx_resource:query(?ID, get_counter), + + ok = emqx_resource:remove_local(?ID). + +t_batch_query_counter(_) -> + {ok, _} = emqx_resource:create_local( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource, register => true}, + #{enable_batch => true, query_mode => sync} + ), + + ?check_trace( + ?TRACE_OPTS, + emqx_resource:query(?ID, get_counter), + fun(Result, Trace) -> + ?assertMatch({ok, 0}, Result), + QueryTrace = ?of_kind(call_batch_query, Trace), + ?assertMatch([#{batch := [{query, _, get_counter, _}]}], QueryTrace) + end + ), + + ?check_trace( + ?TRACE_OPTS, + inc_counter_in_parallel(1000), + fun(Trace) -> + QueryTrace = ?of_kind(call_batch_query, Trace), + ?assertMatch([#{batch := BatchReq} | _] when length(BatchReq) > 1, QueryTrace) + end + ), + {ok, 1000} = emqx_resource:query(?ID, get_counter), + + ok = emqx_resource:remove_local(?ID). + +t_query_counter_async_query(_) -> + {ok, _} = emqx_resource:create_local( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource, register => true}, + #{query_mode => async, enable_batch => false} + ), + ?assertMatch({ok, 0}, emqx_resource:simple_sync_query(?ID, get_counter)), + ?check_trace( + ?TRACE_OPTS, + inc_counter_in_parallel(1000), + fun(Trace) -> + %% the callback_mode if 'emqx_connector_demo' is 'always_sync'. + QueryTrace = ?of_kind(call_query, Trace), + ?assertMatch([#{query := {query, _, {inc_counter, 1}, _}} | _], QueryTrace) + end + ), + %% wait for 1s to make sure all the aysnc query is sent to the resource. + timer:sleep(1000), + %% simple query ignores the query_mode and batching settings in the resource_worker + ?check_trace( + ?TRACE_OPTS, + emqx_resource:simple_sync_query(?ID, get_counter), + fun(Result, Trace) -> + ?assertMatch({ok, 1000}, Result), + %% the callback_mode if 'emqx_connector_demo' is 'always_sync'. + QueryTrace = ?of_kind(call_query, Trace), + ?assertMatch([#{query := {query, _, get_counter, _}}], QueryTrace) + end + ), + {ok, _, #{metrics := #{counters := C}}} = emqx_resource:get_instance(?ID), + ?assertMatch(#{matched := 1002, 'success' := 1002, 'failed' := 0}, C), + ok = emqx_resource:remove_local(?ID). + +t_query_counter_async_callback(_) -> + emqx_connector_demo:set_callback_mode(async_if_possible), + + Tab0 = ets:new(?FUNCTION_NAME, [bag, public]), + Insert = fun(Tab, Result) -> + ets:insert(Tab, {make_ref(), Result}) + end, + ReqOpts = #{async_reply_fun => {Insert, [Tab0]}}, + {ok, _} = emqx_resource:create_local( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource, register => true}, + #{query_mode => async, enable_batch => false, async_inflight_window => 1000000} + ), + ?assertMatch({ok, 0}, emqx_resource:simple_sync_query(?ID, get_counter)), + ?check_trace( + ?TRACE_OPTS, + inc_counter_in_parallel(1000, ReqOpts), + fun(Trace) -> + QueryTrace = ?of_kind(call_query_async, Trace), + ?assertMatch([#{query := {query, _, {inc_counter, 1}, _}} | _], QueryTrace) + end + ), + + %% wait for 1s to make sure all the aysnc query is sent to the resource. + timer:sleep(1000), + %% simple query ignores the query_mode and batching settings in the resource_worker + ?check_trace( + ?TRACE_OPTS, + emqx_resource:simple_sync_query(?ID, get_counter), + fun(Result, Trace) -> + ?assertMatch({ok, 1000}, Result), + QueryTrace = ?of_kind(call_query, Trace), + ?assertMatch([#{query := {query, _, get_counter, _}}], QueryTrace) + end + ), + {ok, _, #{metrics := #{counters := C}}} = emqx_resource:get_instance(?ID), + ?assertMatch(#{matched := 1002, 'success' := 1002, 'failed' := 0}, C), + ?assertMatch(1000, ets:info(Tab0, size)), + ?assert( + lists:all( + fun + ({_, ok}) -> true; + (_) -> false + end, + ets:tab2list(Tab0) + ) + ), + ok = emqx_resource:remove_local(?ID). + +t_query_counter_async_inflight(_) -> + emqx_connector_demo:set_callback_mode(async_if_possible), + + Tab0 = ets:new(?FUNCTION_NAME, [bag, public]), + Insert0 = fun(Tab, Result) -> + ets:insert(Tab, {make_ref(), Result}) + end, + ReqOpts = #{async_reply_fun => {Insert0, [Tab0]}}, + WindowSize = 15, + {ok, _} = emqx_resource:create_local( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + #{name => test_resource, register => true}, + #{ + query_mode => async, + enable_batch => false, + async_inflight_window => WindowSize, + worker_pool_size => 1, + resume_interval => 300, + enable_queue => false + } + ), + ?assertMatch({ok, 0}, emqx_resource:simple_sync_query(?ID, get_counter)), + + %% block the resource + ?assertMatch(ok, emqx_resource:simple_sync_query(?ID, block)), + + %% send async query to make the inflight window full + ?check_trace( + ?TRACE_OPTS, + inc_counter_in_parallel(WindowSize, ReqOpts), + fun(Trace) -> + QueryTrace = ?of_kind(call_query_async, Trace), + ?assertMatch([#{query := {query, _, {inc_counter, 1}, _}} | _], QueryTrace) + end + ), + + %% this will block the resource_worker as the inflight window is full now + ok = emqx_resource:query(?ID, {inc_counter, 1}), + ?assertMatch(0, ets:info(Tab0, size)), + %% sleep to make the resource_worker resume some times + timer:sleep(2000), + + %% send query now will fail because the resource is blocked. + Insert = fun(Tab, Ref, Result) -> + ets:insert(Tab, {Ref, Result}) + end, + ok = emqx_resource:query(?ID, {inc_counter, 1}, #{ + async_reply_fun => {Insert, [Tab0, tmp_query]} + }), + timer:sleep(100), + ?assertMatch([{_, {error, {resource_error, #{reason := blocked}}}}], ets:take(Tab0, tmp_query)), + + %% all response should be received after the resource is resumed. + ?assertMatch(ok, emqx_resource:simple_sync_query(?ID, resume)), + timer:sleep(1000), + ?assertEqual(WindowSize, ets:info(Tab0, size)), + + %% send async query, this time everything should be ok. + Num = 10, + ?check_trace( + ?TRACE_OPTS, + inc_counter_in_parallel(Num, ReqOpts), + fun(Trace) -> + QueryTrace = ?of_kind(call_query_async, Trace), + ?assertMatch([#{query := {query, _, {inc_counter, 1}, _}} | _], QueryTrace) + end + ), + timer:sleep(1000), + ?assertEqual(WindowSize + Num, ets:info(Tab0, size)), + + %% block the resource + ?assertMatch(ok, emqx_resource:simple_sync_query(?ID, block)), + %% again, send async query to make the inflight window full + ?check_trace( + ?TRACE_OPTS, + inc_counter_in_parallel(WindowSize, ReqOpts), + fun(Trace) -> + QueryTrace = ?of_kind(call_query_async, Trace), + ?assertMatch([#{query := {query, _, {inc_counter, 1}, _}} | _], QueryTrace) + end + ), + + %% this will block the resource_worker + ok = emqx_resource:query(?ID, {inc_counter, 1}), + + Sent = WindowSize + Num + WindowSize, + ?assertMatch(ok, emqx_resource:simple_sync_query(?ID, resume)), + timer:sleep(1000), + ?assertEqual(Sent, ets:info(Tab0, size)), + + {ok, Counter} = emqx_resource:simple_sync_query(?ID, get_counter), + ct:pal("get_counter: ~p, sent: ~p", [Counter, Sent]), + ?assert(Sent =< Counter), + + {ok, _, #{metrics := #{counters := C}}} = emqx_resource:get_instance(?ID), + ct:pal("metrics: ~p", [C]), + {ok, IncorrectStatusCount} = emqx_resource:simple_sync_query(?ID, get_incorrect_status_count), + %% The `simple_sync_query' we just did also increases the matched + %% count, hence the + 1. + ExtraSimpleCallCount = IncorrectStatusCount + 1, + ?assertMatch( + #{matched := M, success := Ss, dropped := Dp, 'retried.success' := Rs} when + M == Ss + Dp - Rs + ExtraSimpleCallCount, + C, + #{ + metrics => C, + extra_simple_call_count => ExtraSimpleCallCount + } + ), + ?assert( + lists:all( + fun + ({_, ok}) -> true; + (_) -> false + end, + ets:tab2list(Tab0) + ) + ), + ok = emqx_resource:remove_local(?ID). + t_healthy_timeout(_) -> {ok, _} = emqx_resource:create_local( ?ID, ?DEFAULT_RESOURCE_GROUP, ?TEST_RESOURCE, - #{name => <<"test_resource">>}, - #{health_check_timeout => 200} + #{name => <<"bad_not_atom_name">>, register => true}, + %% the ?TEST_RESOURCE always returns the `Mod:on_get_status/2` 300ms later. + #{health_check_interval => 200} + ), + ?assertMatch( + ?RESOURCE_ERROR(not_connected), + emqx_resource:query(?ID, get_state) ), - timer:sleep(500), - ok = emqx_resource:remove_local(?ID). t_healthy(_) -> @@ -213,11 +464,9 @@ t_healthy(_) -> ?ID, ?DEFAULT_RESOURCE_GROUP, ?TEST_RESOURCE, - #{name => <<"test_resource">>} + #{name => test_resource} ), - timer:sleep(400), - - #{pid := Pid} = emqx_resource:query(?ID, get_state), + {ok, #{pid := Pid}} = emqx_resource:query(?ID, get_state), timer:sleep(300), emqx_resource:set_resource_status_connecting(?ID), @@ -229,10 +478,10 @@ t_healthy(_) -> erlang:exit(Pid, shutdown), - ?assertEqual({ok, connecting}, emqx_resource:health_check(?ID)), + ?assertEqual({ok, disconnected}, emqx_resource:health_check(?ID)), ?assertMatch( - [#{status := connecting}], + [#{status := disconnected}], emqx_resource:list_instances_verbose() ), @@ -253,6 +502,10 @@ t_stop_start(_) -> #{<<"name">> => <<"test_resource">>} ), + %% add some metrics to test their persistence + emqx_resource_metrics:batching_change(?ID, 5), + ?assertEqual(5, emqx_resource_metrics:batching_get(?ID)), + {ok, _} = emqx_resource:check_and_recreate( ?ID, ?TEST_RESOURCE, @@ -260,25 +513,36 @@ t_stop_start(_) -> #{} ), - #{pid := Pid0} = emqx_resource:query(?ID, get_state), + {ok, #{pid := Pid0}} = emqx_resource:query(?ID, get_state), ?assert(is_process_alive(Pid0)), + %% metrics are reset when recreating + ?assertEqual(0, emqx_resource_metrics:batching_get(?ID)), + ok = emqx_resource:stop(?ID), ?assertNot(is_process_alive(Pid0)), ?assertMatch( - {error, {emqx_resource, #{reason := not_connected}}}, + ?RESOURCE_ERROR(stopped), emqx_resource:query(?ID, get_state) ), ok = emqx_resource:restart(?ID), timer:sleep(300), - #{pid := Pid1} = emqx_resource:query(?ID, get_state), + {ok, #{pid := Pid1}} = emqx_resource:query(?ID, get_state), - ?assert(is_process_alive(Pid1)). + ?assert(is_process_alive(Pid1)), + + %% now stop while resetting the metrics + emqx_resource_metrics:batching_change(?ID, 5), + ?assertEqual(5, emqx_resource_metrics:batching_get(?ID)), + ok = emqx_resource:stop(?ID), + ?assertEqual(0, emqx_resource_metrics:batching_get(?ID)), + + ok. t_stop_start_local(_) -> {error, _} = emqx_resource:check_and_create_local( @@ -302,7 +566,7 @@ t_stop_start_local(_) -> #{} ), - #{pid := Pid0} = emqx_resource:query(?ID, get_state), + {ok, #{pid := Pid0}} = emqx_resource:query(?ID, get_state), ?assert(is_process_alive(Pid0)), @@ -311,13 +575,13 @@ t_stop_start_local(_) -> ?assertNot(is_process_alive(Pid0)), ?assertMatch( - {error, {emqx_resource, #{reason := not_connected}}}, + ?RESOURCE_ERROR(stopped), emqx_resource:query(?ID, get_state) ), ok = emqx_resource:restart(?ID), - #{pid := Pid1} = emqx_resource:query(?ID, get_state), + {ok, #{pid := Pid1}} = emqx_resource:query(?ID, get_state), ?assert(is_process_alive(Pid1)). @@ -358,6 +622,10 @@ t_create_dry_run_local(_) -> [] = ets:match(emqx_resource_manager, {{owner, '$1'}, '_'}). create_dry_run_local_succ() -> + case whereis(test_resource) of + undefined -> ok; + Pid -> exit(Pid, kill) + end, ?assertEqual( ok, emqx_resource:create_dry_run_local( @@ -368,17 +636,17 @@ create_dry_run_local_succ() -> ?assertEqual(undefined, whereis(test_resource)). t_create_dry_run_local_failed(_) -> - {Res1, _} = emqx_resource:create_dry_run_local( + Res1 = emqx_resource:create_dry_run_local( ?TEST_RESOURCE, - #{cteate_error => true} + #{create_error => true} ), - ?assertEqual(error, Res1), + ?assertMatch({error, _}, Res1), - {Res2, _} = emqx_resource:create_dry_run_local( + Res2 = emqx_resource:create_dry_run_local( ?TEST_RESOURCE, #{name => test_resource, health_check_error => true} ), - ?assertEqual(error, Res2), + ?assertMatch({error, _}, Res2), Res3 = emqx_resource:create_dry_run_local( ?TEST_RESOURCE, @@ -400,7 +668,7 @@ t_reset_metrics(_) -> #{name => test_resource} ), - #{pid := Pid} = emqx_resource:query(?ID, get_state), + {ok, #{pid := Pid}} = emqx_resource:query(?ID, get_state), emqx_resource:reset_metrics(?ID), ?assert(is_process_alive(Pid)), ok = emqx_resource:remove(?ID), @@ -419,6 +687,26 @@ t_auto_retry(_) -> %%------------------------------------------------------------------------------ %% Helpers %%------------------------------------------------------------------------------ +inc_counter_in_parallel(N) -> + inc_counter_in_parallel(N, #{}). + +inc_counter_in_parallel(N, Opts) -> + Parent = self(), + Pids = [ + erlang:spawn(fun() -> + emqx_resource:query(?ID, {inc_counter, 1}, Opts), + Parent ! {complete, self()} + end) + || _ <- lists:seq(1, N) + ], + [ + receive + {complete, Pid} -> ok + after 1000 -> + ct:fail({wait_for_query_timeout, Pid}) + end + || Pid <- Pids + ]. bin_config() -> <<"\"name\": \"test_resource\"">>. diff --git a/apps/emqx_resource/test/emqx_test_resource.erl b/apps/emqx_resource/test/emqx_test_resource.erl deleted file mode 100644 index c23f87d50..000000000 --- a/apps/emqx_resource/test/emqx_test_resource.erl +++ /dev/null @@ -1,110 +0,0 @@ -%%-------------------------------------------------------------------- -%% Copyright (c) 2021-2022 EMQ Technologies Co., Ltd. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%%-------------------------------------------------------------------- - --module(emqx_test_resource). - --include_lib("typerefl/include/types.hrl"). - --behaviour(emqx_resource). - -%% callbacks of behaviour emqx_resource --export([ - on_start/2, - on_stop/2, - on_query/4, - on_get_status/2 -]). - -%% callbacks for emqx_resource config schema --export([roots/0]). - -roots() -> - [ - {name, fun name/1}, - {register, fun register/1} - ]. - -name(type) -> atom(); -name(required) -> true; -name(_) -> undefined. - -register(type) -> boolean(); -register(required) -> true; -register(default) -> false; -register(_) -> undefined. - -on_start(_InstId, #{create_error := true}) -> - error("some error"); -on_start(InstId, #{name := Name, stop_error := true} = Opts) -> - Register = maps:get(register, Opts, false), - {ok, #{ - name => Name, - id => InstId, - stop_error => true, - pid => spawn_dummy_process(Name, Register) - }}; -on_start(InstId, #{name := Name, health_check_error := true} = Opts) -> - Register = maps:get(register, Opts, false), - {ok, #{ - name => Name, - id => InstId, - health_check_error => true, - pid => spawn_dummy_process(Name, Register) - }}; -on_start(InstId, #{name := Name} = Opts) -> - Register = maps:get(register, Opts, false), - {ok, #{ - name => Name, - id => InstId, - pid => spawn_dummy_process(Name, Register) - }}. - -on_stop(_InstId, #{stop_error := true}) -> - {error, stop_error}; -on_stop(_InstId, #{pid := Pid}) -> - erlang:exit(Pid, shutdown), - ok. - -on_query(_InstId, get_state, AfterQuery, State) -> - emqx_resource:query_success(AfterQuery), - State; -on_query(_InstId, get_state_failed, AfterQuery, State) -> - emqx_resource:query_failed(AfterQuery), - State. - -on_get_status(_InstId, #{health_check_error := true}) -> - disconnected; -on_get_status(_InstId, #{pid := Pid}) -> - timer:sleep(300), - case is_process_alive(Pid) of - true -> connected; - false -> connecting - end. - -spawn_dummy_process(Name, Register) -> - spawn( - fun() -> - true = - case Register of - true -> register(Name, self()); - _ -> true - end, - Ref = make_ref(), - receive - Ref -> ok - end - end - ). diff --git a/apps/emqx_rule_engine/include/rule_engine.hrl b/apps/emqx_rule_engine/include/rule_engine.hrl index 77d371711..d15db24be 100644 --- a/apps/emqx_rule_engine/include/rule_engine.hrl +++ b/apps/emqx_rule_engine/include/rule_engine.hrl @@ -88,18 +88,18 @@ %% Logical operators -define(is_logical(Op), (Op =:= 'and' orelse Op =:= 'or')). --define(RAISE(_EXP_, _ERROR_), - ?RAISE(_EXP_, _ = do_nothing, _ERROR_) +-define(RAISE(EXP, ERROR), + ?RAISE(EXP, _ = do_nothing, ERROR) ). --define(RAISE(_EXP_, _EXP_ON_FAIL_, _ERROR_), +-define(RAISE(EXP, EXP_ON_FAIL, ERROR), fun() -> try - (_EXP_) + (EXP) catch - _EXCLASS_:_EXCPTION_:_ST_ -> - _EXP_ON_FAIL_, - throw(_ERROR_) + EXCLASS:EXCPTION:ST -> + EXP_ON_FAIL, + throw(ERROR) end end() ). diff --git a/apps/emqx_rule_engine/src/emqx_rule_engine.erl b/apps/emqx_rule_engine/src/emqx_rule_engine.erl index 236e4b9aa..85fcc63a6 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_engine.erl +++ b/apps/emqx_rule_engine/src/emqx_rule_engine.erl @@ -46,6 +46,8 @@ get_rules/0, get_rules_for_topic/1, get_rules_with_same_event/1, + get_rule_ids_by_action/1, + ensure_action_removed/2, get_rules_ordered_by_ts/0 ]). @@ -99,6 +101,8 @@ -define(RATE_METRICS, ['matched']). +-type action_name() :: binary() | #{function := binary()}. + config_key_path() -> [rule_engine, rules]. @@ -208,6 +212,46 @@ get_rules_with_same_event(Topic) -> lists:any(fun(T) -> is_of_event_name(EventName, T) end, From) ]. +-spec get_rule_ids_by_action(action_name()) -> [rule_id()]. +get_rule_ids_by_action(ActionName) when is_binary(ActionName) -> + [ + Id + || #{actions := Acts, id := Id} <- get_rules(), + lists:any(fun(A) -> A =:= ActionName end, Acts) + ]; +get_rule_ids_by_action(#{function := FuncName}) when is_binary(FuncName) -> + {Mod, Fun} = + case string:split(FuncName, ":", leading) of + [M, F] -> {binary_to_module(M), F}; + [F] -> {emqx_rule_actions, F} + end, + [ + Id + || #{actions := Acts, id := Id} <- get_rules(), + contains_actions(Acts, Mod, Fun) + ]. + +-spec ensure_action_removed(rule_id(), action_name()) -> ok. +ensure_action_removed(RuleId, ActionName) -> + FilterFunc = + fun + (Func, Func) -> false; + (#{<<"function">> := Func}, #{function := Func}) -> false; + (_, _) -> true + end, + case emqx:get_raw_config([rule_engine, rules, RuleId], not_found) of + not_found -> + ok; + #{<<"actions">> := Acts} -> + NewActs = [AName || AName <- Acts, FilterFunc(AName, ActionName)], + {ok, _} = emqx_conf:update( + emqx_rule_engine:config_key_path() ++ [RuleId, actions], + NewActs, + #{override_to => cluster} + ), + ok + end. + is_of_event_name(EventName, Topic) -> EventName =:= emqx_rule_events:event_name(Topic). @@ -417,3 +461,20 @@ now_ms() -> bin(A) when is_atom(A) -> atom_to_binary(A, utf8); bin(B) when is_binary(B) -> B. + +binary_to_module(ModName) -> + try + binary_to_existing_atom(ModName, utf8) + catch + error:badarg -> + not_exist_mod + end. + +contains_actions(Actions, Mod0, Func0) -> + lists:any( + fun + (#{mod := Mod, func := Func}) when Mod =:= Mod0; Func =:= Func0 -> true; + (_) -> false + end, + Actions + ). diff --git a/apps/emqx_rule_engine/src/emqx_rule_runtime.erl b/apps/emqx_rule_engine/src/emqx_rule_runtime.erl index a0d1c464a..29f2c6bf6 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_runtime.erl +++ b/apps/emqx_rule_engine/src/emqx_rule_runtime.erl @@ -19,6 +19,7 @@ -include("rule_engine.hrl"). -include_lib("emqx/include/emqx.hrl"). -include_lib("emqx/include/logger.hrl"). +-include_lib("emqx_resource/include/emqx_resource_errors.hrl"). -export([ apply_rule/3, @@ -41,6 +42,10 @@ -type alias() :: atom(). -type collection() :: {alias(), [term()]}. +-elvis([ + {elvis_style, invalid_dynamic_call, #{ignore => [emqx_rule_runtime]}} +]). + -define(ephemeral_alias(TYPE, NAME), iolist_to_binary(io_lib:format("_v_~ts_~p_~p", [TYPE, NAME, erlang:system_time()])) ). @@ -129,13 +134,13 @@ do_apply_rule( ) -> {Selected, Collection} = ?RAISE( select_and_collect(Fields, Columns), - {select_and_collect_error, {_EXCLASS_, _EXCPTION_, _ST_}} + {select_and_collect_error, {EXCLASS, EXCPTION, ST}} ), ColumnsAndSelected = maps:merge(Columns, Selected), case ?RAISE( match_conditions(Conditions, ColumnsAndSelected), - {match_conditions_error, {_EXCLASS_, _EXCPTION_, _ST_}} + {match_conditions_error, {EXCLASS, EXCPTION, ST}} ) of true -> @@ -165,12 +170,12 @@ do_apply_rule( ) -> Selected = ?RAISE( select_and_transform(Fields, Columns), - {select_and_transform_error, {_EXCLASS_, _EXCPTION_, _ST_}} + {select_and_transform_error, {EXCLASS, EXCPTION, ST}} ), case ?RAISE( match_conditions(Conditions, maps:merge(Columns, Selected)), - {match_conditions_error, {_EXCLASS_, _EXCPTION_, _ST_}} + {match_conditions_error, {EXCLASS, EXCPTION, ST}} ) of true -> @@ -244,7 +249,7 @@ filter_collection(Columns, InCase, DoEach, {CollKey, CollVal}) -> case ?RAISE( match_conditions(InCase, ColumnsAndItem), - {match_incase_error, {_EXCLASS_, _EXCPTION_, _ST_}} + {match_incase_error, {EXCLASS, EXCPTION, ST}} ) of true when DoEach == [] -> {true, ColumnsAndItem}; @@ -252,7 +257,7 @@ filter_collection(Columns, InCase, DoEach, {CollKey, CollVal}) -> {true, ?RAISE( select_and_transform(DoEach, ColumnsAndItem), - {doeach_error, {_EXCLASS_, _EXCPTION_, _ST_}} + {doeach_error, {EXCLASS, EXCPTION, ST}} )}; false -> false @@ -270,7 +275,7 @@ match_conditions({'not', Var}, Data) -> case eval(Var, Data) of Bool when is_boolean(Bool) -> not Bool; - _other -> + _Other -> false end; match_conditions({in, Var, {list, Vals}}, Data) -> @@ -322,7 +327,7 @@ handle_action(RuleId, ActId, Selected, Envs) -> ok = emqx_metrics_worker:inc(rule_metrics, RuleId, 'actions.total'), try Result = do_handle_action(ActId, Selected, Envs), - ok = emqx_metrics_worker:inc(rule_metrics, RuleId, 'actions.success'), + inc_action_metrics(Result, RuleId), Result catch throw:out_of_service -> @@ -501,3 +506,26 @@ ensure_list(_NotList) -> []. nested_put(Alias, Val, Columns0) -> Columns = handle_alias(Alias, Columns0), emqx_rule_maps:nested_put(Alias, Val, Columns). + +-define(IS_RES_DOWN(R), R == stopped; R == not_connected; R == not_found). +inc_action_metrics(ok, RuleId) -> + emqx_metrics_worker:inc(rule_metrics, RuleId, 'actions.success'); +inc_action_metrics({error, {recoverable_error, _}}, RuleId) -> + emqx_metrics_worker:inc(rule_metrics, RuleId, 'actions.failed.out_of_service'); +inc_action_metrics(?RESOURCE_ERROR_M(R, _), RuleId) when ?IS_RES_DOWN(R) -> + emqx_metrics_worker:inc(rule_metrics, RuleId, 'actions.failed.out_of_service'); +inc_action_metrics(R, RuleId) -> + case is_ok_result(R) of + false -> + emqx_metrics_worker:inc(rule_metrics, RuleId, 'actions.failed'), + emqx_metrics_worker:inc(rule_metrics, RuleId, 'actions.failed.unknown'); + true -> + emqx_metrics_worker:inc(rule_metrics, RuleId, 'actions.success') + end. + +is_ok_result(ok) -> + true; +is_ok_result(R) when is_tuple(R) -> + ok == erlang:element(1, R); +is_ok_result(ok) -> + false. diff --git a/apps/emqx_rule_engine/src/emqx_rule_sqltester.erl b/apps/emqx_rule_engine/src/emqx_rule_sqltester.erl index c333bb80e..4de63e94f 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_sqltester.erl +++ b/apps/emqx_rule_engine/src/emqx_rule_sqltester.erl @@ -19,7 +19,6 @@ -export([ test/1, - echo_action/2, get_selected_data/3 ]). @@ -62,14 +61,17 @@ test_rule(Sql, Select, Context, EventTopics) -> }, FullContext = fill_default_values(hd(EventTopics), emqx_rule_maps:atom_key_map(Context)), try emqx_rule_runtime:apply_rule(Rule, FullContext, #{}) of - {ok, Data} -> {ok, flatten(Data)}; - {error, Reason} -> {error, Reason} + {ok, Data} -> + {ok, flatten(Data)}; + {error, Reason} -> + {error, Reason} after ok = emqx_rule_engine:clear_metrics_for_rule(RuleId) end. -get_selected_data(Selected, _Envs, _Args) -> - Selected. +get_selected_data(Selected, Envs, Args) -> + ?TRACE("RULE", "testing_rule_sql_ok", #{selected => Selected, envs => Envs, args => Args}), + {ok, Selected}. is_publish_topic(<<"$events/", _/binary>>) -> false; is_publish_topic(<<"$bridges/", _/binary>>) -> false; @@ -77,14 +79,10 @@ is_publish_topic(_Topic) -> true. flatten([]) -> []; -flatten([D1]) -> - D1; -flatten([D1 | L]) when is_list(D1) -> - D1 ++ flatten(L). - -echo_action(Data, Envs) -> - ?TRACE("RULE", "testing_rule_sql_ok", #{data => Data, envs => Envs}), - Data. +flatten([{ok, D}]) -> + D; +flatten([D | L]) when is_list(D) -> + [D0 || {ok, D0} <- D] ++ flatten(L). fill_default_values(Event, Context) -> maps:merge(envs_examp(Event), Context). diff --git a/apps/emqx_rule_engine/test/emqx_rule_engine_SUITE.erl b/apps/emqx_rule_engine/test/emqx_rule_engine_SUITE.erl index 50bb55fe1..8f68ae576 100644 --- a/apps/emqx_rule_engine/test/emqx_rule_engine_SUITE.erl +++ b/apps/emqx_rule_engine/test/emqx_rule_engine_SUITE.erl @@ -52,7 +52,9 @@ groups() -> t_create_existing_rule, t_get_rules_for_topic, t_get_rules_for_topic_2, - t_get_rules_with_same_event + t_get_rules_with_same_event, + t_get_rule_ids_by_action, + t_ensure_action_removed ]}, {runtime, [], [ t_match_atom_and_binary, @@ -440,6 +442,105 @@ t_get_rules_with_same_event(_Config) -> ]), ok. +t_get_rule_ids_by_action(_) -> + ID = <<"t_get_rule_ids_by_action">>, + Rule1 = #{ + enable => false, + id => ID, + sql => <<"SELECT * FROM \"t\"">>, + from => [<<"t">>], + fields => [<<"*">>], + is_foreach => false, + conditions => {}, + actions => [ + #{mod => emqx_rule_actions, func => console, args => #{}}, + #{mod => emqx_rule_actions, func => republish, args => #{}}, + <<"mqtt:my_mqtt_bridge">>, + <<"mysql:foo">> + ], + description => ID, + created_at => erlang:system_time(millisecond) + }, + ok = insert_rules([Rule1]), + ?assertMatch( + [ID], + emqx_rule_engine:get_rule_ids_by_action(#{function => <<"emqx_rule_actions:console">>}) + ), + ?assertMatch( + [ID], + emqx_rule_engine:get_rule_ids_by_action(#{function => <<"emqx_rule_actions:republish">>}) + ), + ?assertEqual([], emqx_rule_engine:get_rule_ids_by_action(#{function => <<"some_mod:fun">>})), + ?assertMatch([ID], emqx_rule_engine:get_rule_ids_by_action(<<"mysql:foo">>)), + ?assertEqual([], emqx_rule_engine:get_rule_ids_by_action(<<"mysql:not_exists">>)), + ok = delete_rules_by_ids([<<"t_get_rule_ids_by_action">>]). + +t_ensure_action_removed(_) -> + Id = <<"t_ensure_action_removed">>, + GetSelectedData = <<"emqx_rule_sqltester:get_selected_data">>, + emqx:update_config( + [rule_engine, rules], + #{ + Id => #{ + <<"actions">> => [ + #{<<"function">> => GetSelectedData}, + #{<<"function">> => <<"console">>}, + #{<<"function">> => <<"republish">>}, + <<"mysql:foo">>, + <<"mqtt:bar">> + ], + <<"description">> => <<"">>, + <<"sql">> => <<"SELECT * FROM \"t/#\"">> + } + } + ), + ?assertMatch( + #{ + <<"actions">> := [ + #{<<"function">> := GetSelectedData}, + #{<<"function">> := <<"console">>}, + #{<<"function">> := <<"republish">>}, + <<"mysql:foo">>, + <<"mqtt:bar">> + ] + }, + emqx:get_raw_config([rule_engine, rules, Id]) + ), + ok = emqx_rule_engine:ensure_action_removed(Id, #{function => <<"console">>}), + ?assertMatch( + #{ + <<"actions">> := [ + #{<<"function">> := GetSelectedData}, + #{<<"function">> := <<"republish">>}, + <<"mysql:foo">>, + <<"mqtt:bar">> + ] + }, + emqx:get_raw_config([rule_engine, rules, Id]) + ), + ok = emqx_rule_engine:ensure_action_removed(Id, <<"mysql:foo">>), + ?assertMatch( + #{ + <<"actions">> := [ + #{<<"function">> := GetSelectedData}, + #{<<"function">> := <<"republish">>}, + <<"mqtt:bar">> + ] + }, + emqx:get_raw_config([rule_engine, rules, Id]) + ), + ok = emqx_rule_engine:ensure_action_removed(Id, #{function => GetSelectedData}), + ?assertMatch( + #{ + <<"actions">> := [ + #{<<"function">> := <<"republish">>}, + <<"mqtt:bar">> + ] + }, + emqx:get_raw_config([rule_engine, rules, Id]) + ), + emqx:remove_config([rule_engine, rules, Id]). + %%------------------------------------------------------------------------------ %% Test cases for rule runtime %%------------------------------------------------------------------------------ diff --git a/build b/build index bd5307014..7e5f65b59 100755 --- a/build +++ b/build @@ -112,7 +112,7 @@ make_docs() { fi case "$(is_enterprise "$PROFILE")" in 'yes') - SCHEMA_MODULE='emqx_enterprise_conf_schema' + SCHEMA_MODULE='emqx_ee_conf_schema' ;; 'no') SCHEMA_MODULE='emqx_conf_schema' @@ -147,6 +147,12 @@ make_rel() { make_elixir_rel() { ./scripts/pre-compile.sh "$PROFILE" export_release_vars "$PROFILE" + # for some reason, this has to be run outside "do"... + mix local.rebar --if-missing --force + # shellcheck disable=SC1010 + mix do local.hex --if-missing --force, \ + local.rebar rebar3 "${PWD}/rebar3" --if-missing --force, \ + deps.get mix release --overwrite assert_no_compile_time_only_deps } diff --git a/deploy/charts/emqx/README.md b/deploy/charts/emqx/README.md index 84b5ab4c9..a579af70d 100644 --- a/deploy/charts/emqx/README.md +++ b/deploy/charts/emqx/README.md @@ -74,14 +74,14 @@ The following table lists the configurable parameters of the emqx chart and thei | `service.externalIPs` | ExternalIPs for the service | [] | | `service.annotations` | Service annotations | {}(evaluated as a template) | | `ingress.dashboard.enabled` | Enable ingress for EMQX Dashboard | false | -| `ingress.dashboard.ingressClassName` | Set the ingress class for EMQX Dashboard | | +| `ingress.dashboard.ingressClassName` | Set the ingress class for EMQX Dashboard | | | `ingress.dashboard.path` | Ingress path for EMQX Dashboard | / | | `ingress.dashboard.pathType` | Ingress pathType for EMQX Dashboard | `ImplementationSpecific` | | `ingress.dashboard.hosts` | Ingress hosts for EMQX Mgmt API | dashboard.emqx.local | | `ingress.dashboard.tls` | Ingress tls for EMQX Mgmt API | [] | | `ingress.dashboard.annotations` | Ingress annotations for EMQX Mgmt API | {} | | `ingress.mgmt.enabled` | Enable ingress for EMQX Mgmt API | false | -| `ingress.dashboard.ingressClassName` | Set the ingress class for EMQX Mgmt API | | +| `ingress.dashboard.ingressClassName` | Set the ingress class for EMQX Mgmt API | | | `ingress.mgmt.path` | Ingress path for EMQX Mgmt API | / | | `ingress.mgmt.hosts` | Ingress hosts for EMQX Mgmt API | api.emqx.local | | `ingress.mgmt.tls` | Ingress tls for EMQX Mgmt API | [] | diff --git a/git-blame-ignore-revs b/git-blame-ignore-revs index b21b6a552..41c6e5e49 100644 --- a/git-blame-ignore-revs +++ b/git-blame-ignore-revs @@ -13,7 +13,7 @@ acb3544d4b112121b5d9414237d2af7860ccc2a3 # reformat lib-ee/emqx_license 4f396cceb84d79d5ef540e91c1a8420e8de74a56 4e3fd9febd0df11f3fe5f221cd2c4362be57c886 -# reformat lib-ee/emqx_enterprise_conf +# reformat lib-ee/emqx_ee_conf 1aa82992616ad848539a533a5cd20ba6f9071e5a # reformat apps/emqx_gateway 3f6d78dda03fd0d8e968a352e134f11a7f16bfe8 diff --git a/lib-ee/emqx_enterprise_conf/.gitignore b/lib-ee/emqx_ee_bridge/.gitignore similarity index 100% rename from lib-ee/emqx_enterprise_conf/.gitignore rename to lib-ee/emqx_ee_bridge/.gitignore diff --git a/lib-ee/emqx_ee_bridge/README.md b/lib-ee/emqx_ee_bridge/README.md new file mode 100644 index 000000000..5cb4d8694 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/README.md @@ -0,0 +1,9 @@ +emqx_ee_bridge +===== + +An OTP application + +Build +----- + + $ rebar3 compile diff --git a/lib-ee/emqx_ee_bridge/docker-ct b/lib-ee/emqx_ee_bridge/docker-ct new file mode 100644 index 000000000..94f9379df --- /dev/null +++ b/lib-ee/emqx_ee_bridge/docker-ct @@ -0,0 +1,6 @@ +toxiproxy +influxdb +kafka +mongo +mongo_rs_sharded +mysql diff --git a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_hstreamdb.conf b/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_hstreamdb.conf new file mode 100644 index 000000000..dd3346579 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_hstreamdb.conf @@ -0,0 +1,94 @@ +emqx_ee_bridge_hstreamdb { + local_topic { + desc { + en: """ +The MQTT topic filter to be forwarded to the HStreamDB. All MQTT 'PUBLISH' messages with the topic +matching the local_topic will be forwarded.
+NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is +configured, then both the data got from the rule and the MQTT messages that match local_topic +will be forwarded. +""" + zh: """ +发送到 'local_topic' 的消息都会转发到 HStreamDB。
+注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发到 HStreamDB。 +""" + } + label { + en: "Local Topic" + zh: "本地 Topic" + } + } + payload { + desc { + en: """The payload to be forwarded to the HStreamDB. Placeholders supported.""" + zh: """要转发到 HStreamDB 的数据内容,支持占位符""" + } + label { + en: "Payload" + zh: "消息内容" + } + } + config_enable { + desc { + en: """Enable or disable this bridge""" + zh: """启用/禁用桥接""" + } + label { + en: "Enable Or Disable Bridge" + zh: "启用/禁用桥接" + } + } + config_direction { + desc { + en: """The direction of this bridge, MUST be 'egress'""" + zh: """桥接的方向, 必须是 egress""" + } + label { + en: "Bridge Direction" + zh: "桥接方向" + } + } + + desc_config { + desc { + en: """Configuration for an HStreamDB bridge.""" + zh: """HStreamDB 桥接配置""" + } + label: { + en: "HStreamDB Bridge Configuration" + zh: "HStreamDB 桥接配置" + } + } + + desc_type { + desc { + en: """The Bridge Type""" + zh: """Bridge 类型""" + } + label { + en: "Bridge Type" + zh: "桥接类型" + } + } + + desc_name { + desc { + en: """Bridge name, used as a human-readable description of the bridge.""" + zh: """桥接名字,可读描述""" + } + label { + en: "Bridge Name" + zh: "桥接名字" + } + } + desc_connector { + desc { + en: """Generic configuration for the connector.""" + zh: """连接器的通用配置。""" + } + label: { + en: "Connector Generic Configuration" + zh: "连接器通用配置。" + } + } +} diff --git a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_influxdb.conf b/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_influxdb.conf new file mode 100644 index 000000000..b2c3c5a73 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_influxdb.conf @@ -0,0 +1,87 @@ +emqx_ee_bridge_influxdb { + local_topic { + desc { + en: """The MQTT topic filter to be forwarded to the InfluxDB. All MQTT 'PUBLISH' messages with the topic +matching the local_topic will be forwarded.
+NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is +configured, then both the data got from the rule and the MQTT messages that match local_topic +will be forwarded. +""" + zh: """发送到 'local_topic' 的消息都会转发到 InfluxDB。
+注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发到 InfluxDB。 +""" + } + label { + en: "Local Topic" + zh: "本地 Topic" + } + } + write_syntax { + desc { + en: """Conf of InfluxDB line protocol to write data points. It is a text-based format that provides the measurement, tag set, field set, and timestamp of a data point, and placeholder supported. +See also [InfluxDB 2.3 Line Protocol](https://docs.influxdata.com/influxdb/v2.3/reference/syntax/line-protocol/) and +[InfluxDB 1.8 Line Protocol](https://docs.influxdata.com/influxdb/v1.8/write_protocols/line_protocol_tutorial/)
+TLDR:
+``` +[,=[,=]] =[,=] [] +``` +""" + zh: """使用 InfluxDB API Line Protocol 写入 InfluxDB 的数据,支持占位符
+参考 [InfluxDB 2.3 Line Protocol](https://docs.influxdata.com/influxdb/v2.3/reference/syntax/line-protocol/) 及 +[InfluxDB 1.8 Line Protocol](https://docs.influxdata.com/influxdb/v1.8/write_protocols/line_protocol_tutorial/)
+TLDR:
+``` +[,=[,=]] =[,=] [] +``` +""" + } + label { + en: "Write Syntax" + zh: "写语句" + } + } + config_enable { + desc { + en: """Enable or disable this bridge.""" + zh: """启用/禁用桥接。""" + } + label { + en: "Enable Or Disable Bridge" + zh: "启用/禁用桥接" + } + } + + desc_config { + desc { + en: """Configuration for an InfluxDB bridge.""" + zh: """InfluxDB 桥接配置。""" + } + label: { + en: "InfluxDB Bridge Configuration" + zh: "InfluxDB 桥接配置" + } + } + + desc_type { + desc { + en: """The Bridge Type.""" + zh: """桥接类型。""" + } + label { + en: "Bridge Type" + zh: "桥接类型" + } + } + + desc_name { + desc { + en: """Bridge name.""" + zh: """桥接名称。""" + } + label { + en: "Bridge Name" + zh: "桥接名称" + } + } + +} diff --git a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_kafka.conf b/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_kafka.conf new file mode 100644 index 000000000..1fdbfedc4 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_kafka.conf @@ -0,0 +1,471 @@ +emqx_ee_bridge_kafka { + config_enable { + desc { + en: "Enable (true) or disable (false) this Kafka bridge." + zh: "启用(true)或停用该(false)Kafka 数据桥接。" + } + label { + en: "Enable or Disable" + zh: "启用或停用" + } + } + desc_config { + desc { + en: """Configuration for a Kafka bridge.""" + zh: """Kafka 桥接配置""" + } + label { + en: "Kafka Bridge Configuration" + zh: "Kafka 桥接配置" + } + } + desc_type { + desc { + en: """The Bridge Type""" + zh: """桥接类型""" + } + label { + en: "Bridge Type" + zh: "桥接类型" + } + } + desc_name { + desc { + en: """Bridge name, used as a human-readable description of the bridge.""" + zh: """桥接名字,可读描述""" + } + label { + en: "Bridge Name" + zh: "桥接名字" + } + } + producer_opts { + desc { + en: "Local MQTT data source and Kafka bridge configs." + zh: "本地 MQTT 数据源和 Kafka 桥接的配置。" + } + label { + en: "MQTT to Kafka" + zh: "MQTT 到 Kafka" + } + } + producer_mqtt_opts { + desc { + en: "MQTT data source. Optional when used as a rule-engine action." + zh: "需要桥接到 MQTT 源主题。" + } + label { + en: "MQTT Source Topic" + zh: "MQTT 源主题" + } + } + mqtt_topic { + desc { + en: "MQTT topic or topic as data source (bridge input)." + zh: "指定 MQTT 主题作为桥接的数据源" + } + label { + en: "Source MQTT Topic" + zh: "源 MQTT 主题" + } + } + producer_kafka_opts { + desc { + en: "Kafka producer configs." + zh: "Kafka 生产者参数。" + } + label { + en: "Kafka Producer" + zh: "生产者参数" + } + } + bootstrap_hosts { + desc { + en: "A comma separated list of Kafka host:port endpoints to bootstrap the client." + zh: "用逗号分隔的 host:port 主机列表。" + } + label { + en: "Bootstrap Hosts" + zh: "主机列表" + } + } + connect_timeout { + desc { + en: "Maximum wait time for TCP connection establishment (including authentication time if enabled)." + zh: "建立 TCP 连接时的最大等待时长(若启用认证,这个等待时长也包含完成认证所需时间)。" + } + label { + en: "Connect Timeout" + zh: "连接超时" + } + } + min_metadata_refresh_interval { + desc { + en: "Minimum time interval the client has to wait before refreshing Kafka broker and topic metadata. " + "Setting too small value may add extra load on Kafka." + zh: "刷新 Kafka broker 和 Kafka 主题元数据段最短时间间隔。设置太小可能会增加 Kafka 压力。" + } + label { + en: "Min Metadata Refresh Interval" + zh: "元数据刷新最小间隔" + } + } + metadata_request_timeout { + desc { + en: "Maximum wait time when fetching metadata from Kafka." + zh: "刷新元数据时最大等待时长。" + } + label { + en: "Metadata Request Timeout" + zh: "元数据请求超时" + } + } + authentication { + desc { + en: "Authentication configs." + zh: "认证参数。" + } + label { + en: "Authentication" + zh: "认证" + } + } + socket_opts { + desc { + en: "Extra socket options." + zh: "更多 Socket 参数设置。" + } + label { + en: "Socket Options" + zh: "Socket 参数" + } + } + auth_sasl_mechanism { + desc { + en: "SASL authentication mechanism." + zh: "SASL 认证方法名称。" + } + label { + en: "Mechanism" + zh: "认证方法" + } + } + auth_sasl_username { + desc { + en: "SASL authentication username." + zh: "SASL 认证的用户名。" + } + label { + en: "Username" + zh: "用户名" + } + } + auth_sasl_password { + desc { + en: "SASL authentication password." + zh: "SASL 认证的密码。" + } + label { + en: "Password" + zh: "密码" + } + } + auth_kerberos_principal { + desc { + en: "SASL GSSAPI authentication Kerberos principal. " + "For example client_name@MY.KERBEROS.REALM.MYDOMAIN.COM, " + "NOTE: The realm in use has to be configured in /etc/krb5.conf in EMQX nodes." + zh: "SASL GSSAPI 认证方法的 Kerberos principal," + "例如 client_name@MY.KERBEROS.REALM.MYDOMAIN.COM" + "注意:这里使用的 realm 需要配置在 EMQX 服务器的 /etc/krb5.conf 中" + } + label { + en: "Kerberos Principal" + zh: "Kerberos Principal" + } + } + auth_kerberos_keytab_file { + desc { + en: "SASL GSSAPI authentication Kerberos keytab file path. " + "NOTE: This file has to be placed in EMQX nodes, and the EMQX service runner user requires read permission." + zh: "SASL GSSAPI 认证方法的 Kerberos keytab 文件。" + "注意:该文件需要上传到 EMQX 服务器中,且运行 EMQX 服务的系统账户需要有读取权限。" + } + label { + en: "Kerberos keytab file" + zh: "Kerberos keytab 文件" + } + } + socket_send_buffer { + desc { + en: "Fine tune the socket send buffer. The default value is tuned for high throughput." + zh: "TCP socket 的发送缓存调优。默认值是针对高吞吐量的一个推荐值。" + } + label { + en: "Socket Send Buffer Size" + zh: "Socket 发送缓存大小" + } + } + socket_receive_buffer { + desc { + en: "Fine tune the socket receive buffer. The default value is tuned for high throughput." + zh: "TCP socket 的收包缓存调优。默认值是针对高吞吐量的一个推荐值。" + } + label { + en: "Socket Receive Buffer Size" + zh: "Socket 收包缓存大小" + } + } + socket_nodelay { + desc { + en: "When set to 'true', TCP buffer sent as soon as possible. " + "Otherwise, the OS kernel may buffer small TCP packets for a while (40 ms by default)." + zh: "设置 ‘true' 让系统内核立即发送。否则当需要发送当内容很少时,可能会有一定延迟(默认 40 毫秒)。" + } + label { + en: "No Delay" + zh: "是否延迟发送" + } + } + kafka_topic { + desc { + en: "Kafka topic name" + zh: "Kafka 主题名称" + } + label { + en: "Kafka Topic Name" + zh: "Kafka 主题名称" + } + } + kafka_message { + desc { + en: "Template to render a Kafka message." + zh: "用于生成 Kafka 消息的模版。" + } + label { + en: "Kafka Message Template" + zh: "Kafka 消息模版" + } + } + kafka_message_key { + desc { + en: "Template to render Kafka message key. " + "If the desired variable for this template is not found in the input data " + "NULL is used." + zh: "生成 Kafka 消息 Key 的模版。当所需要的输入没有时,会使用 NULL。" + } + label { + en: "Message Key" + zh: "消息的 Key" + } + } + kafka_message_value { + desc { + en: "Template to render Kafka message value. " + "If the desired variable for this template is not found in the input data " + "NULL is used." + zh: "生成 Kafka 消息 Value 的模版。当所需要的输入没有时,会使用 NULL。" + } + label { + en: "Message Value" + zh: "消息的 Value" + } + } + kafka_message_timestamp { + desc { + en: "Which timestamp to use. " + "The timestamp is expected to be a millisecond precision Unix epoch " + "which can be in string format, e.g. 1661326462115 or " + "'1661326462115'. " + "When the desired data field for this template is not found, " + "or if the found data is not a valid integer, " + "the current system timestamp will be used." + zh: "生成 Kafka 消息时间戳的模版。" + "该时间必需是一个整型数值(可以是字符串格式)例如 1661326462115 " + "或 '1661326462115'。" + "当所需的输入字段不存在,或不是一个整型时," + "则会使用当前系统时间。" + } + label { + en: "Message Timestamp" + zh: "消息的时间戳" + } + } + max_batch_bytes { + desc { + en: "Maximum bytes to collect in a Kafka message batch. " + "Most of the Kafka brokers default to a limit of 1 MB batch size. " + "EMQX's default value is less than 1 MB in order to compensate " + "Kafka message encoding overheads (especially when each individual message is very small). " + "When a single message is over the limit, it is still sent (as a single element batch)." + zh: "最大消息批量字节数。" + "大多数 Kafka 环境的默认最低值是 1 MB,EMQX 的默认值比 1 MB 更小是因为需要" + "补偿 Kafka 消息编码索需要的额外字节(尤其是当每条消息都很小的情况下)。" + "当单个消息的大小超过该限制时,它仍然会被发送,(相当于该批量中只有单个消息)。" + } + label { + en: "Max Batch Bytes" + zh: "最大批量字节数" + } + } + compression { + desc { + en: "Compression method." + zh: "压缩方法。" + } + label { + en: "Compression" + zh: "压缩" + } + } + partition_strategy { + desc { + en: "Partition strategy is to tell the producer how to dispatch messages to Kafka partitions.\n\n" + "random: Randomly pick a partition for each message\n" + "key_dispatch: Hash Kafka message key to a partition number\n" + zh: "设置消息发布时应该如何选择 Kafka 分区。\n\n" + "random: 为每个消息随机选择一个分区。\n" + "key_dispatch: Hash Kafka message key to a partition number\n" + } + label { + en: "Partition Strategy" + zh: "分区选择策略" + } + } + required_acks { + desc { + en: "Required acknowledgements for Kafka partition leader to wait for its followers " + "before it sends back the acknowledgement to EMQX Kafka producer\n\n" + "all_isr: Require all in-sync replicas to acknowledge.\n" + "leader_only: Require only the partition-leader's acknowledgement.\n" + "none: No need for Kafka to acknowledge at all.\n" + zh: "设置 Kafka leader 在返回给 EMQX 确认之前需要等待多少个 follower 的确认。\n\n" + "all_isr: 需要所有的在线复制者都确认。\n" + "leader_only: 仅需要分区 leader 确认。\n" + "none: 无需 Kafka 回复任何确认。\n" + } + label { + en: "Required Acks" + zh: "Kafka 确认数量" + } + } + partition_count_refresh_interval { + desc { + en: "The time interval for Kafka producer to discover increased number of partitions.\n" + "After the number of partitions is increased in Kafka, EMQX will start taking the \n" + "discovered partitions into account when dispatching messages per partition_strategy." + zh: "配置 Kafka 刷新分区数量的时间间隔。\n" + "EMQX 发现 Kafka 分区数量增加后,会开始按 partition_strategy 配置,把消息发送到新的分区中。" + } + label { + en: "Partition Count Refresh Interval" + zh: "分区数量刷新间隔" + } + } + max_inflight { + desc { + en: "Maximum number of batches allowed for Kafka producer (per-partition) to send before receiving acknowledgement from Kafka. " + "Greater value typically means better throughput. However, there can be a risk of message reordering when this " + "value is greater than 1." + zh: "设置 Kafka 生产者(每个分区一个)在收到 Kafka 的确认前最多发送多少个请求(批量)。" + "调大这个值通常可以增加吞吐量,但是,当该值设置大于 1 是存在消息乱序的风险。" + } + label { + en: "Max Inflight" + zh: "飞行窗口" + } + } + producer_buffer { + desc { + en: "Configure producer message buffer.\n\n" + "Tell Kafka producer how to buffer messages when EMQX has more messages to send than " + "Kafka can keep up, or when Kafka is down.\n\n" + zh: "配置消息缓存的相关参数。\n\n" + "当 EMQX 需要发送的消息超过 Kafka 处理能力,或者当 Kafka 临时下线时,EMQX 内部会将消息缓存起来。" + } + label { + en: "Message Buffer" + zh: "消息缓存" + } + } + buffer_mode { + desc { + en: "Message buffer mode.\n\n" + "memory: Buffer all messages in memory. The messages will be lost in case of EMQX node restart\n" + "disc: Buffer all messages on disk. The messages on disk are able to survive EMQX node restart.\n" + "hybrid: Buffer message in memory first, when up to certain limit " + "(see segment_bytes config for more information), then start offloading " + "messages to disk, Like memory mode, the messages will be lost in case of " + "EMQX node restart." + zh: "消息缓存模式。\n" + "memory: 所有的消息都缓存在内存里。如果 EMQX 服务重启,缓存的消息会丢失。\n" + "disc: 缓存到磁盘上。EMQX 重启后会继续发送重启前未发送完成的消息。\n" + "hybrid: 先将消息缓存在内存中,当内存中的消息堆积超过一定限制" + "(配置项 segment_bytes 描述了该限制)后,后续的消息会缓存到磁盘上。" + "与 memory 模式一样,如果 EMQX 服务重启,缓存的消息会丢失。" + } + label { + en: "Buffer Mode" + zh: "缓存模式" + } + } + buffer_per_partition_limit { + desc { + en: "Number of bytes allowed to buffer for each Kafka partition. " + "When this limit is exceeded, old messages will be dropped in a trade for credits " + "for new messages to be buffered." + zh: "为每个 Kafka 分区设置的最大缓存字节数。当超过这个上限之后,老的消息会被丢弃," + "为新的消息腾出空间。" + } + label { + en: "Per-partition Buffer Limit" + zh: "Kafka 分区缓存上限" + } + } + buffer_segment_bytes { + desc { + en: "Applicable when buffer mode is set to disk or hybrid.\n" + "This value is to specify the size of each on-disk buffer file." + zh: "当缓存模式是 diskhybrid 时适用。" + "该配置用于指定缓存到磁盘上的文件的大小。" + } + label { + en: "Segment File Bytes" + zh: "缓存文件大小" + } + } + buffer_memory_overload_protection { + desc { + en: "Applicable when buffer mode is set to memory or hybrid.\n" + "EMQX will drop old cached messages under high memory pressure. " + "The high memory threshold is defined in config sysmon.os.sysmem_high_watermark." + zh: "缓存模式是 memoryhybrid 时适用。" + "当系统处于高内存压力时,从队列中丢弃旧的消息以减缓内存增长。" + "内存压力值由配置项 sysmon.os.sysmem_high_watermark 决定。" + } + label { + en: "Memory Overload Protection" + zh: "内存过载保护" + } + } + auth_username_password { + desc { + en: "Username/password based authentication." + zh: "基于用户名密码的认证。" + } + label { + en: "Username/password Auth" + zh: "用户名密码认证" + } + } + auth_gssapi_kerberos { + desc { + en: "Use GSSAPI/Kerberos authentication." + zh: "使用 GSSAPI/Kerberos 认证。" + } + label { + en: "GSSAPI/Kerberos" + zh: "GSSAPI/Kerberos" + } + } +} diff --git a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_mongodb.conf b/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_mongodb.conf new file mode 100644 index 000000000..fef3663ef --- /dev/null +++ b/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_mongodb.conf @@ -0,0 +1,67 @@ +emqx_ee_bridge_mongodb { + desc_config { + desc { + en: "Configuration for MongoDB Bridge" + zh: "为MongoDB桥配置" + } + label { + en: "MongoDB Bridge Configuration" + zh: "MongoDB桥配置" + } + } + + enable { + desc { + en: "Enable or disable this MongoDB Bridge" + zh: "启用或停用该MongoDB桥" + } + label { + en: "Enable or disable" + zh: "启用或禁用" + } + } + + collection { + desc { + en: "The collection where data will be stored into" + zh: "数据将被存储到的集合" + } + label { + en: "Collection to be used" + zh: "将要使用的藏品" + } + } + + mongodb_rs_conf { + desc { + en: "MongoDB (Replica Set) configuration" + zh: "MongoDB(Replica Set)配置" + } + label { + en: "MongoDB (Replica Set) Configuration" + zh: "MongoDB(Replica Set)配置" + } + } + + mongodb_sharded_conf { + desc { + en: "MongoDB (Sharded) configuration" + zh: "MongoDB (Sharded)配置" + } + label { + en: "MongoDB (Sharded) Configuration" + zh: "MongoDB (Sharded)配置" + } + } + + mongodb_single_conf { + desc { + en: "MongoDB (Standalone) configuration" + zh: "MongoDB(独立)配置" + } + label { + en: "MongoDB (Standalone) Configuration" + zh: "MongoDB(独立)配置" + } + } +} diff --git a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_mysql.conf b/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_mysql.conf new file mode 100644 index 000000000..0c56b1976 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_mysql.conf @@ -0,0 +1,74 @@ +emqx_ee_bridge_mysql { + + local_topic { + desc { + en: """The MQTT topic filter to be forwarded to MySQL. All MQTT 'PUBLISH' messages with the topic +matching the local_topic will be forwarded.
+NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is +configured, then both the data got from the rule and the MQTT messages that match local_topic +will be forwarded. +""" + zh: """发送到 'local_topic' 的消息都会转发到 MySQL。
+注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发。 +""" + } + label { + en: "Local Topic" + zh: "本地 Topic" + } + } + + sql_template { + desc { + en: """SQL Template""" + zh: """SQL 模板""" + } + label { + en: "SQL Template" + zh: "SQL 模板" + } + } + config_enable { + desc { + en: """Enable or disable this bridge""" + zh: """启用/禁用桥接""" + } + label { + en: "Enable Or Disable Bridge" + zh: "启用/禁用桥接" + } + } + + desc_config { + desc { + en: """Configuration for an HStreamDB bridge.""" + zh: """HStreamDB 桥接配置""" + } + label: { + en: "HStreamDB Bridge Configuration" + zh: "HStreamDB 桥接配置" + } + } + + desc_type { + desc { + en: """The Bridge Type""" + zh: """Bridge 类型""" + } + label { + en: "Bridge Type" + zh: "桥接类型" + } + } + + desc_name { + desc { + en: """Bridge name, used as a human-readable description of the bridge.""" + zh: """桥接名字,可读描述""" + } + label { + en: "Bridge Name" + zh: "桥接名字" + } + } +} diff --git a/lib-ee/emqx_ee_bridge/include/emqx_ee_bridge.hrl b/lib-ee/emqx_ee_bridge/include/emqx_ee_bridge.hrl new file mode 100644 index 000000000..e69de29bb diff --git a/lib-ee/emqx_ee_bridge/rebar.config b/lib-ee/emqx_ee_bridge/rebar.config new file mode 100644 index 000000000..9119b052d --- /dev/null +++ b/lib-ee/emqx_ee_bridge/rebar.config @@ -0,0 +1,14 @@ +{erl_opts, [debug_info]}. +{deps, [ {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.31.2"}}} + , {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.7.0"}}} + , {kafka_protocol, {git, "https://github.com/kafka4beam/kafka_protocol.git", {tag, "4.1.0"}}} + , {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.0-rc1"}}} + , {brod, {git, "https://github.com/kafka4beam/brod.git", {tag, "3.16.4"}}} + , {emqx_connector, {path, "../../apps/emqx_connector"}} + , {emqx_resource, {path, "../../apps/emqx_resource"}} + , {emqx_bridge, {path, "../../apps/emqx_bridge"}} + ]}. + +{shell, [ + {apps, [emqx_ee_bridge]} +]}. diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src new file mode 100644 index 000000000..2748c27a7 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src @@ -0,0 +1,15 @@ +{application, emqx_ee_bridge, [ + {description, "EMQX Enterprise data bridges"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + emqx_ee_connector, + telemetry + ]}, + {env, []}, + {modules, []}, + + {links, []} +]}. diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl new file mode 100644 index 000000000..e0d362f5e --- /dev/null +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl @@ -0,0 +1,116 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_ee_bridge). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + api_schemas/1, + examples/1, + resource_type/1, + fields/1 +]). + +api_schemas(Method) -> + [ + ref(emqx_ee_bridge_kafka, Method), + ref(emqx_ee_bridge_mysql, Method), + ref(emqx_ee_bridge_mongodb, Method ++ "_rs"), + ref(emqx_ee_bridge_mongodb, Method ++ "_sharded"), + ref(emqx_ee_bridge_mongodb, Method ++ "_single"), + ref(emqx_ee_bridge_hstreamdb, Method), + %% ref(emqx_ee_bridge_influxdb, Method ++ "_udp"), + ref(emqx_ee_bridge_influxdb, Method ++ "_api_v1"), + ref(emqx_ee_bridge_influxdb, Method ++ "_api_v2") + ]. + +schema_modules() -> + [ + emqx_ee_bridge_kafka, + emqx_ee_bridge_hstreamdb, + emqx_ee_bridge_influxdb, + emqx_ee_bridge_mongodb, + emqx_ee_bridge_mysql + ]. + +examples(Method) -> + MergeFun = + fun(Example, Examples) -> + maps:merge(Examples, Example) + end, + Fun = + fun(Module, Examples) -> + ConnectorExamples = erlang:apply(Module, conn_bridge_examples, [Method]), + lists:foldl(MergeFun, Examples, ConnectorExamples) + end, + lists:foldl(Fun, #{}, schema_modules()). + +resource_type(Type) when is_binary(Type) -> resource_type(binary_to_atom(Type, utf8)); +resource_type(kafka) -> emqx_bridge_impl_kafka; +resource_type(hstreamdb) -> emqx_ee_connector_hstreamdb; +resource_type(mongodb_rs) -> emqx_connector_mongo; +resource_type(mongodb_sharded) -> emqx_connector_mongo; +resource_type(mongodb_single) -> emqx_connector_mongo; +resource_type(mysql) -> emqx_connector_mysql; +resource_type(influxdb_udp) -> emqx_ee_connector_influxdb; +resource_type(influxdb_api_v1) -> emqx_ee_connector_influxdb; +resource_type(influxdb_api_v2) -> emqx_ee_connector_influxdb. + +fields(bridges) -> + [ + {kafka, + mk( + hoconsc:map(name, ref(emqx_ee_bridge_kafka, "config")), + #{ + desc => <<"Kafka Bridge Config">>, + required => false + } + )}, + {hstreamdb, + mk( + hoconsc:map(name, ref(emqx_ee_bridge_hstreamdb, "config")), + #{ + desc => <<"HStreamDB Bridge Config">>, + required => false + } + )}, + {mysql, + mk( + hoconsc:map(name, ref(emqx_ee_bridge_mysql, "config")), + #{ + desc => <<"MySQL Bridge Config">>, + required => false + } + )} + ] ++ mongodb_structs() ++ influxdb_structs(). + +mongodb_structs() -> + [ + {Type, + mk( + hoconsc:map(name, ref(emqx_ee_bridge_mongodb, Type)), + #{ + desc => <<"MongoDB Bridge Config">>, + required => false + } + )} + || Type <- [mongodb_rs, mongodb_sharded, mongodb_single] + ]. + +influxdb_structs() -> + [ + {Protocol, + mk( + hoconsc:map(name, ref(emqx_ee_bridge_influxdb, Protocol)), + #{ + desc => <<"InfluxDB Bridge Config">>, + required => false + } + )} + || Protocol <- [ + %% influxdb_udp, + influxdb_api_v1, + influxdb_api_v2 + ] + ]. diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_hstreamdb.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_hstreamdb.erl new file mode 100644 index 000000000..dfae764c8 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_hstreamdb.erl @@ -0,0 +1,95 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_ee_bridge_hstreamdb). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx_bridge/include/emqx_bridge.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + conn_bridge_examples/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +%% ------------------------------------------------------------------------------------------------- +%% api + +conn_bridge_examples(Method) -> + [ + #{ + <<"hstreamdb">> => #{ + summary => <<"HStreamDB Bridge">>, + value => values(Method) + } + } + ]. + +values(get) -> + maps:merge(values(post), ?METRICS_EXAMPLE); +values(post) -> + #{ + type => hstreamdb, + name => <<"demo">>, + connector => <<"hstreamdb:connector">>, + enable => true, + direction => egress, + local_topic => <<"local/topic/#">>, + payload => <<"${payload}">> + }; +values(put) -> + values(post). + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions +namespace() -> "bridge_hstreamdb". + +roots() -> []. + +fields("config") -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {direction, mk(egress, #{desc => ?DESC("config_direction"), default => egress})}, + {local_topic, mk(binary(), #{desc => ?DESC("local_topic")})}, + {payload, mk(binary(), #{default => <<"${payload}">>, desc => ?DESC("payload")})}, + {connector, field(connector)} + ]; +fields("post") -> + [type_field(), name_field() | fields("config")]; +fields("put") -> + fields("config"); +fields("get") -> + emqx_bridge_schema:metrics_status_fields() ++ fields("post"). + +field(connector) -> + mk( + hoconsc:union([binary(), ref(emqx_ee_connector_hstreamdb, config)]), + #{ + required => true, + example => <<"hstreamdb:demo">>, + desc => ?DESC("desc_connector") + } + ). + +desc("config") -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for HStream using `", string:to_upper(Method), "` method."]; +desc(_) -> + undefined. + +%% ------------------------------------------------------------------------------------------------- +%% internal +type_field() -> + {type, mk(enum([hstreamdb]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_influxdb.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_influxdb.erl new file mode 100644 index 000000000..a2f125722 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_influxdb.erl @@ -0,0 +1,254 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_ee_bridge_influxdb). + +-include_lib("emqx_bridge/include/emqx_bridge.hrl"). +-include_lib("emqx_connector/include/emqx_connector.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + conn_bridge_examples/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +-type write_syntax() :: list(). +-reflect_type([write_syntax/0]). +-typerefl_from_string({write_syntax/0, ?MODULE, to_influx_lines}). +-export([to_influx_lines/1]). + +%% ------------------------------------------------------------------------------------------------- +%% api + +conn_bridge_examples(Method) -> + [ + #{ + <<"influxdb_udp">> => #{ + summary => <<"InfluxDB UDP Bridge">>, + value => values("influxdb_udp", Method) + } + }, + #{ + <<"influxdb_api_v1">> => #{ + summary => <<"InfluxDB HTTP API V1 Bridge">>, + value => values("influxdb_api_v1", Method) + } + }, + #{ + <<"influxdb_api_v2">> => #{ + summary => <<"InfluxDB HTTP API V2 Bridge">>, + value => values("influxdb_api_v2", Method) + } + } + ]. + +values(Protocol, get) -> + maps:merge(values(Protocol, post), ?METRICS_EXAMPLE); +values("influxdb_api_v2", post) -> + SupportUint = <<"uint_value=${payload.uint_key}u,">>, + TypeOpts = #{ + bucket => <<"example_bucket">>, + org => <<"examlpe_org">>, + token => <<"example_token">>, + server => <<"127.0.0.1:8086">> + }, + values(common, "influxdb_api_v2", SupportUint, TypeOpts); +values("influxdb_api_v1", post) -> + SupportUint = <<>>, + TypeOpts = #{ + database => <<"example_database">>, + username => <<"example_username">>, + password => <<"examlpe_password">>, + server => <<"127.0.0.1:8086">> + }, + values(common, "influxdb_api_v1", SupportUint, TypeOpts); +values("influxdb_udp", post) -> + SupportUint = <<>>, + TypeOpts = #{ + server => <<"127.0.0.1:8089">> + }, + values(common, "influxdb_udp", SupportUint, TypeOpts); +values(Protocol, put) -> + values(Protocol, post). + +values(common, Protocol, SupportUint, TypeOpts) -> + CommonConfigs = #{ + type => list_to_atom(Protocol), + name => <<"demo">>, + enable => true, + local_topic => <<"local/topic/#">>, + write_syntax => + <<"${topic},clientid=${clientid}", " ", "payload=${payload},", + "${clientid}_int_value=${payload.int_key}i,", SupportUint/binary, + "bool=${payload.bool}">>, + precision => ms, + resource_opts => #{ + enable_batch => false, + batch_size => 100, + batch_time => <<"20ms">> + }, + server => <<"127.0.0.1:8086">>, + ssl => #{enable => false} + }, + maps:merge(TypeOpts, CommonConfigs). + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions +namespace() -> "bridge_influxdb". + +roots() -> []. + +fields("post_udp") -> + method_fileds(post, influxdb_udp); +fields("post_api_v1") -> + method_fileds(post, influxdb_api_v1); +fields("post_api_v2") -> + method_fileds(post, influxdb_api_v2); +fields("put_udp") -> + method_fileds(put, influxdb_udp); +fields("put_api_v1") -> + method_fileds(put, influxdb_api_v1); +fields("put_api_v2") -> + method_fileds(put, influxdb_api_v2); +fields("get_udp") -> + method_fileds(get, influxdb_udp); +fields("get_api_v1") -> + method_fileds(get, influxdb_api_v1); +fields("get_api_v2") -> + method_fileds(get, influxdb_api_v2); +fields(Type) when + Type == influxdb_udp orelse Type == influxdb_api_v1 orelse Type == influxdb_api_v2 +-> + influxdb_bridge_common_fields() ++ + connector_fields(Type). + +method_fileds(post, ConnectorType) -> + influxdb_bridge_common_fields() ++ + connector_fields(ConnectorType) ++ + type_name_fields(ConnectorType); +method_fileds(get, ConnectorType) -> + influxdb_bridge_common_fields() ++ + connector_fields(ConnectorType) ++ + type_name_fields(ConnectorType) ++ + emqx_bridge_schema:metrics_status_fields(); +method_fileds(put, ConnectorType) -> + influxdb_bridge_common_fields() ++ + connector_fields(ConnectorType). + +influxdb_bridge_common_fields() -> + emqx_bridge_schema:common_bridge_fields() ++ + [ + {local_topic, mk(binary(), #{desc => ?DESC("local_topic")})}, + {write_syntax, fun write_syntax/1} + ] ++ + emqx_resource_schema:fields("resource_opts"). + +connector_fields(Type) -> + emqx_ee_connector_influxdb:fields(Type). + +type_name_fields(Type) -> + [ + {type, mk(Type, #{required => true, desc => ?DESC("desc_type")})}, + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})} + ]. + +desc("config") -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for InfluxDB using `", string:to_upper(Method), "` method."]; +desc(influxdb_udp) -> + ?DESC(emqx_ee_connector_influxdb, "influxdb_udp"); +desc(influxdb_api_v1) -> + ?DESC(emqx_ee_connector_influxdb, "influxdb_api_v1"); +desc(influxdb_api_v2) -> + ?DESC(emqx_ee_connector_influxdb, "influxdb_api_v2"); +desc(_) -> + undefined. + +write_syntax(type) -> + ?MODULE:write_syntax(); +write_syntax(required) -> + true; +write_syntax(validator) -> + [?NOT_EMPTY("the value of the field 'write_syntax' cannot be empty")]; +write_syntax(converter) -> + fun to_influx_lines/1; +write_syntax(desc) -> + ?DESC("write_syntax"); +write_syntax(format) -> + <<"sql">>; +write_syntax(_) -> + undefined. + +to_influx_lines(RawLines) -> + Lines = string:tokens(str(RawLines), "\n"), + lists:reverse(lists:foldl(fun converter_influx_line/2, [], Lines)). + +converter_influx_line(Line, AccIn) -> + case string:tokens(str(Line), " ") of + [MeasurementAndTags, Fields, Timestamp] -> + {Measurement, Tags} = split_measurement_and_tags(MeasurementAndTags), + [ + #{ + measurement => Measurement, + tags => kv_pairs(Tags), + fields => kv_pairs(string:tokens(Fields, ",")), + timestamp => Timestamp + } + | AccIn + ]; + [MeasurementAndTags, Fields] -> + {Measurement, Tags} = split_measurement_and_tags(MeasurementAndTags), + %% TODO: fix here both here and influxdb driver. + %% Default value should evaluated by InfluxDB. + [ + #{ + measurement => Measurement, + tags => kv_pairs(Tags), + fields => kv_pairs(string:tokens(Fields, ",")), + timestamp => "${timestamp}" + } + | AccIn + ]; + _ -> + throw("Bad InfluxDB Line Protocol schema") + end. + +split_measurement_and_tags(Subject) -> + case string:tokens(Subject, ",") of + [] -> + throw("Bad Measurement schema"); + [Measurement] -> + {Measurement, []}; + [Measurement | Tags] -> + {Measurement, Tags} + end. + +kv_pairs(Pairs) -> + kv_pairs(Pairs, []). +kv_pairs([], Acc) -> + lists:reverse(Acc); +kv_pairs([Pair | Rest], Acc) -> + case string:tokens(Pair, "=") of + [K, V] -> + %% Reduplicated keys will be overwritten. Follows InfluxDB Line Protocol. + kv_pairs(Rest, [{K, V} | Acc]); + _ -> + throw(io_lib:format("Bad InfluxDB Line Protocol Key Value pair: ~p", Pair)) + end. + +str(A) when is_atom(A) -> + atom_to_list(A); +str(B) when is_binary(B) -> + binary_to_list(B); +str(S) when is_list(S) -> + S. diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_kafka.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_kafka.erl new file mode 100644 index 000000000..2540b987c --- /dev/null +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_kafka.erl @@ -0,0 +1,273 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_ee_bridge_kafka). + +-include_lib("emqx_bridge/include/emqx_bridge.hrl"). +-include_lib("emqx_connector/include/emqx_connector.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +%% allow atoms like scram_sha_256 and scram_sha_512 +%% i.e. the _256 part does not start with a-z +-elvis([ + {elvis_style, atom_naming_convention, #{ + regex => "^([a-z][a-z0-9]*_?)([a-z0-9]*_?)*$", + enclosed_atoms => ".*" + }} +]). +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + conn_bridge_examples/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +%% ------------------------------------------------------------------------------------------------- +%% api + +conn_bridge_examples(Method) -> + [ + #{ + <<"kafka">> => #{ + summary => <<"Kafka Bridge">>, + value => values(Method) + } + } + ]. + +values(get) -> + maps:merge(values(post), ?METRICS_EXAMPLE); +values(post) -> + #{ + bootstrap_hosts => <<"localhost:9092">> + }; +values(put) -> + values(post). + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions + +namespace() -> "bridge_kafka". + +roots() -> ["config"]. + +fields("post") -> + [type_field(), name_field() | fields("config")]; +fields("put") -> + fields("config"); +fields("get") -> + emqx_bridge_schema:metrics_status_fields() ++ fields("post"); +fields("config") -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {bootstrap_hosts, mk(binary(), #{required => true, desc => ?DESC(bootstrap_hosts)})}, + {connect_timeout, + mk(emqx_schema:duration_ms(), #{ + default => "5s", + desc => ?DESC(connect_timeout) + })}, + {min_metadata_refresh_interval, + mk( + emqx_schema:duration_ms(), + #{ + default => "3s", + desc => ?DESC(min_metadata_refresh_interval) + } + )}, + {metadata_request_timeout, + mk(emqx_schema:duration_ms(), #{ + default => "5s", + desc => ?DESC(metadata_request_timeout) + })}, + {authentication, + mk(hoconsc:union([none, ref(auth_username_password), ref(auth_gssapi_kerberos)]), #{ + default => none, desc => ?DESC("authentication") + })}, + {producer, mk(hoconsc:union([none, ref(producer_opts)]), #{desc => ?DESC(producer_opts)})}, + %{consumer, mk(hoconsc:union([none, ref(consumer_opts)]), #{desc => ?DESC(consumer_opts)})}, + {socket_opts, mk(ref(socket_opts), #{required => false, desc => ?DESC(socket_opts)})} + ] ++ emqx_connector_schema_lib:ssl_fields(); +fields(auth_username_password) -> + [ + {mechanism, + mk(enum([plain, scram_sha_256, scram_sha_512]), #{ + required => true, desc => ?DESC(auth_sasl_mechanism) + })}, + {username, mk(binary(), #{required => true, desc => ?DESC(auth_sasl_username)})}, + {password, + mk(binary(), #{required => true, sensitive => true, desc => ?DESC(auth_sasl_password)})} + ]; +fields(auth_gssapi_kerberos) -> + [ + {kerberos_principal, + mk(binary(), #{ + required => true, + desc => ?DESC(auth_kerberos_principal) + })}, + {kerberos_keytab_file, + mk(binary(), #{ + required => true, + desc => ?DESC(auth_kerberos_keytab_file) + })} + ]; +fields(socket_opts) -> + [ + {sndbuf, + mk( + emqx_schema:bytesize(), + #{default => "1024KB", desc => ?DESC(socket_send_buffer)} + )}, + {recbuf, + mk( + emqx_schema:bytesize(), + #{default => "1024KB", desc => ?DESC(socket_receive_buffer)} + )}, + {nodelay, + mk( + boolean(), + #{default => true, desc => ?DESC(socket_nodelay)} + )} + ]; +fields(producer_opts) -> + [ + {mqtt, mk(ref(producer_mqtt_opts), #{desc => ?DESC(producer_mqtt_opts)})}, + {kafka, + mk(ref(producer_kafka_opts), #{ + required => true, + desc => ?DESC(producer_kafka_opts) + })} + ]; +fields(producer_mqtt_opts) -> + [{topic, mk(binary(), #{desc => ?DESC(mqtt_topic)})}]; +fields(producer_kafka_opts) -> + [ + {topic, mk(string(), #{required => true, desc => ?DESC(kafka_topic)})}, + {message, mk(ref(kafka_message), #{required => false, desc => ?DESC(kafka_message)})}, + {max_batch_bytes, + mk(emqx_schema:bytesize(), #{default => "896KB", desc => ?DESC(max_batch_bytes)})}, + {compression, + mk(enum([no_compression, snappy, gzip]), #{ + default => no_compression, desc => ?DESC(compression) + })}, + {partition_strategy, + mk( + enum([random, key_dispatch]), + #{default => random, desc => ?DESC(partition_strategy)} + )}, + {required_acks, + mk( + enum([all_isr, leader_only, none]), + #{ + default => all_isr, + desc => ?DESC(required_acks) + } + )}, + {partition_count_refresh_interval, + mk( + emqx_schema:duration_s(), + #{ + default => "60s", + desc => ?DESC(partition_count_refresh_interval) + } + )}, + {max_inflight, + mk( + pos_integer(), + #{ + default => 10, + desc => ?DESC(max_inflight) + } + )}, + {buffer, + mk(ref(producer_buffer), #{ + required => false, + desc => ?DESC(producer_buffer) + })} + ]; +fields(kafka_message) -> + [ + {key, mk(string(), #{default => "${clientid}", desc => ?DESC(kafka_message_key)})}, + {value, mk(string(), #{default => "${payload}", desc => ?DESC(kafka_message_value)})}, + {timestamp, + mk(string(), #{ + default => "${timestamp}", desc => ?DESC(kafka_message_timestamp) + })} + ]; +fields(producer_buffer) -> + [ + {mode, + mk( + enum([memory, disk, hybrid]), + #{default => memory, desc => ?DESC(buffer_mode)} + )}, + {per_partition_limit, + mk( + emqx_schema:bytesize(), + #{default => "2GB", desc => ?DESC(buffer_per_partition_limit)} + )}, + {segment_bytes, + mk( + emqx_schema:bytesize(), + #{default => "100MB", desc => ?DESC(buffer_segment_bytes)} + )}, + {memory_overload_protection, + mk(boolean(), #{ + %% different from 4.x + default => true, + desc => ?DESC(buffer_memory_overload_protection) + })} + ]. + +% fields(consumer_opts) -> +% [ +% {kafka, mk(ref(consumer_kafka_opts), #{required => true, desc => ?DESC(consumer_kafka_opts)})}, +% {mqtt, mk(ref(consumer_mqtt_opts), #{required => true, desc => ?DESC(consumer_mqtt_opts)})} +% ]; +% fields(consumer_mqtt_opts) -> +% [ {topic, mk(string(), #{desc => ?DESC(consumer_mqtt_topic)})} +% ]; + +% fields(consumer_mqtt_opts) -> +% [ {topic, mk(string(), #{desc => ?DESC(consumer_mqtt_topic)})} +% ]; +% fields(consumer_kafka_opts) -> +% [ {topic, mk(string(), #{desc => ?DESC(consumer_kafka_topic)})} +% ]. + +desc("config") -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for Kafka using `", string:to_upper(Method), "` method."]; +desc(Name) -> + lists:member(Name, struct_names()) orelse throw({missing_desc, Name}), + ?DESC(Name). + +struct_names() -> + [ + auth_gssapi_kerberos, + auth_username_password, + kafka_message, + producer_buffer, + producer_kafka_opts, + producer_mqtt_opts, + socket_opts, + producer_opts + ]. + +%% ------------------------------------------------------------------------------------------------- +%% internal +type_field() -> + {type, mk(enum([kafka]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. + +ref(Name) -> + hoconsc:ref(?MODULE, Name). diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_mongodb.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_mongodb.erl new file mode 100644 index 000000000..9d9a5e4d0 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_mongodb.erl @@ -0,0 +1,154 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_ee_bridge_mongodb). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx_bridge/include/emqx_bridge.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-behaviour(hocon_schema). + +%% emqx_ee_bridge "callbacks" +-export([ + conn_bridge_examples/1 +]). + +%% hocon_schema callbacks +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +%%================================================================================================= +%% hocon_schema API +%%================================================================================================= + +namespace() -> + "bridge_mongodb". + +roots() -> + []. + +fields("config") -> + [ + {enable, mk(boolean(), #{desc => ?DESC("enable"), default => true})}, + {collection, mk(binary(), #{desc => ?DESC("collection"), default => <<"mqtt">>})} + ]; +fields(mongodb_rs) -> + emqx_connector_mongo:fields(rs) ++ fields("config"); +fields(mongodb_sharded) -> + emqx_connector_mongo:fields(sharded) ++ fields("config"); +fields(mongodb_single) -> + emqx_connector_mongo:fields(single) ++ fields("config"); +fields("post_rs") -> + fields(mongodb_rs); +fields("post_sharded") -> + fields(mongodb_sharded); +fields("post_single") -> + fields(mongodb_single); +fields("put_rs") -> + fields(mongodb_rs); +fields("put_sharded") -> + fields(mongodb_sharded); +fields("put_single") -> + fields(mongodb_single); +fields("get_rs") -> + emqx_bridge_schema:metrics_status_fields() ++ fields(mongodb_rs); +fields("get_sharded") -> + emqx_bridge_schema:metrics_status_fields() ++ fields(mongodb_sharded); +fields("get_single") -> + emqx_bridge_schema:metrics_status_fields() ++ fields(mongodb_single). + +conn_bridge_examples(Method) -> + [ + #{ + <<"mongodb_rs">> => #{ + summary => <<"MongoDB (Replica Set) Bridge">>, + value => values(mongodb_rs, Method) + } + }, + #{ + <<"mongodb_sharded">> => #{ + summary => <<"MongoDB (Sharded) Bridge">>, + value => values(mongodb_sharded, Method) + } + }, + #{ + <<"mongodb_single">> => #{ + summary => <<"MongoDB (Standalone) Bridge">>, + value => values(mongodb_single, Method) + } + } + ]. + +desc("config") -> + ?DESC("desc_config"); +desc(mongodb_rs) -> + ?DESC(mongodb_rs_conf); +desc(mongodb_sharded) -> + ?DESC(mongodb_sharded_conf); +desc(mongodb_single) -> + ?DESC(mongodb_single_conf); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for MongoDB using `", string:to_upper(Method), "` method."]; +desc(_) -> + undefined. + +%%================================================================================================= +%% Internal fns +%%================================================================================================= + +values(mongodb_rs = MongoType, Method) -> + TypeOpts = #{ + servers => <<"localhost:27017, localhost:27018">>, + w_mode => <<"safe">>, + r_mode => <<"safe">>, + replica_set_name => <<"rs">> + }, + values(common, MongoType, Method, TypeOpts); +values(mongodb_sharded = MongoType, Method) -> + TypeOpts = #{ + servers => <<"localhost:27017, localhost:27018">>, + w_mode => <<"safe">> + }, + values(common, MongoType, Method, TypeOpts); +values(mongodb_single = MongoType, Method) -> + TypeOpts = #{ + server => <<"localhost:27017">>, + w_mode => <<"safe">> + }, + values(common, MongoType, Method, TypeOpts). + +values(common, MongoType, Method, TypeOpts) -> + MongoTypeBin = atom_to_binary(MongoType), + Common = #{ + name => <>, + type => MongoTypeBin, + enable => true, + collection => <<"mycol">>, + database => <<"mqtt">>, + srv_record => false, + pool_size => 8, + username => <<"myuser">>, + password => <<"mypass">> + }, + MethodVals = method_values(MongoType, Method), + Vals0 = maps:merge(MethodVals, Common), + maps:merge(Vals0, TypeOpts). + +method_values(MongoType, get) -> + Vals = method_values(MongoType, post), + maps:merge(?METRICS_EXAMPLE, Vals); +method_values(MongoType, _) -> + ConnectorType = + case MongoType of + mongodb_rs -> <<"rs">>; + mongodb_sharded -> <<"sharded">>; + mongodb_single -> <<"single">> + end, + #{mongo_type => ConnectorType}. diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_mysql.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_mysql.erl new file mode 100644 index 000000000..bdbf96424 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_mysql.erl @@ -0,0 +1,132 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_ee_bridge_mysql). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx_bridge/include/emqx_bridge.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + conn_bridge_examples/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +-define(DEFAULT_SQL, << + "insert into t_mqtt_msg(msgid, topic, qos, payload, arrived) " + "values (${id}, ${topic}, ${qos}, ${payload}, FROM_UNIXTIME(${timestamp}/1000))" +>>). + +%% ------------------------------------------------------------------------------------------------- +%% api + +conn_bridge_examples(Method) -> + [ + #{ + <<"mysql">> => #{ + summary => <<"MySQL Bridge">>, + value => values(Method) + } + } + ]. + +values(get) -> + maps:merge(values(post), ?METRICS_EXAMPLE); +values(post) -> + #{ + enable => true, + type => mysql, + name => <<"foo">>, + server => <<"127.0.0.1:3306">>, + database => <<"test">>, + pool_size => 8, + username => <<"root">>, + password => <<"">>, + auto_reconnect => true, + sql => ?DEFAULT_SQL, + local_topic => <<"local/topic/#">>, + resource_opts => #{ + worker_pool_size => 1, + health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, + auto_restart_interval => ?AUTO_RESTART_INTERVAL_RAW, + enable_batch => true, + batch_size => ?DEFAULT_BATCH_SIZE, + batch_time => ?DEFAULT_BATCH_TIME, + query_mode => async, + enable_queue => false, + max_queue_bytes => ?DEFAULT_QUEUE_SIZE + } + }; +values(put) -> + values(post). + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions +namespace() -> "bridge_mysql". + +roots() -> []. + +fields("config") -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {sql, + mk( + binary(), + #{desc => ?DESC("sql_template"), default => ?DEFAULT_SQL, format => <<"sql">>} + )}, + {local_topic, + mk( + binary(), + #{desc => ?DESC("local_topic"), default => undefined} + )}, + {resource_opts, + mk( + ref(?MODULE, "creation_opts"), + #{ + required => false, + default => #{}, + desc => ?DESC(emqx_resource_schema, <<"resource_opts">>) + } + )} + ] ++ + emqx_connector_mysql:fields(config) -- emqx_connector_schema_lib:prepare_statement_fields(); +fields("creation_opts") -> + Opts = emqx_resource_schema:fields("creation_opts"), + [O || {Field, _} = O <- Opts, not is_hidden_opts(Field)]; +fields("post") -> + [type_field(), name_field() | fields("config")]; +fields("put") -> + fields("config"); +fields("get") -> + emqx_bridge_schema:metrics_status_fields() ++ fields("post"). + +desc("config") -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for MySQL using `", string:to_upper(Method), "` method."]; +desc("creation_opts" = Name) -> + emqx_resource_schema:desc(Name); +desc(_) -> + undefined. + +%% ------------------------------------------------------------------------------------------------- +%% internal +is_hidden_opts(Field) -> + lists:member(Field, [ + async_inflight_window + ]). + +type_field() -> + {type, mk(enum([mysql]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. diff --git a/lib-ee/emqx_ee_bridge/src/kafka/emqx_bridge_impl_kafka.erl b/lib-ee/emqx_ee_bridge/src/kafka/emqx_bridge_impl_kafka.erl new file mode 100644 index 000000000..d1fad4765 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/src/kafka/emqx_bridge_impl_kafka.erl @@ -0,0 +1,33 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +%% Kafka connection configuration +-module(emqx_bridge_impl_kafka). +-behaviour(emqx_resource). + +%% callbacks of behaviour emqx_resource +-export([ + callback_mode/0, + on_start/2, + on_stop/2, + on_query/3, + on_get_status/2, + is_buffer_supported/0 +]). + +is_buffer_supported() -> true. + +callback_mode() -> async_if_possible. + +on_start(InstId, Config) -> + emqx_bridge_impl_kafka_producer:on_start(InstId, Config). + +on_stop(InstId, State) -> + emqx_bridge_impl_kafka_producer:on_stop(InstId, State). + +on_query(InstId, Msg, State) -> + emqx_bridge_impl_kafka_producer:on_query(InstId, Msg, State). + +on_get_status(InstId, State) -> + emqx_bridge_impl_kafka_producer:on_get_status(InstId, State). diff --git a/lib-ee/emqx_ee_bridge/src/kafka/emqx_bridge_impl_kafka_producer.erl b/lib-ee/emqx_ee_bridge/src/kafka/emqx_bridge_impl_kafka_producer.erl new file mode 100644 index 000000000..0eeb9db0c --- /dev/null +++ b/lib-ee/emqx_ee_bridge/src/kafka/emqx_bridge_impl_kafka_producer.erl @@ -0,0 +1,380 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_impl_kafka_producer). + +%% callbacks of behaviour emqx_resource +-export([ + callback_mode/0, + on_start/2, + on_stop/2, + on_query/3, + on_get_status/2 +]). + +-export([ + on_kafka_ack/3, + handle_telemetry_event/4 +]). + +-include_lib("emqx/include/logger.hrl"). + +callback_mode() -> async_if_possible. + +%% @doc Config schema is defined in emqx_ee_bridge_kafka. +on_start(InstId, Config) -> + #{ + bridge_name := BridgeName, + bootstrap_hosts := Hosts0, + connect_timeout := ConnTimeout, + metadata_request_timeout := MetaReqTimeout, + min_metadata_refresh_interval := MinMetaRefreshInterval, + socket_opts := SocketOpts, + authentication := Auth, + ssl := SSL + } = Config, + _ = maybe_install_wolff_telemetry_handlers(InstId), + %% it's a bug if producer config is not found + %% the caller should not try to start a producer if + %% there is no producer config + ProducerConfigWrapper = get_required(producer, Config, no_kafka_producer_config), + ProducerConfig = get_required(kafka, ProducerConfigWrapper, no_kafka_producer_parameters), + MessageTemplate = get_required(message, ProducerConfig, no_kafka_message_template), + Hosts = hosts(Hosts0), + ClientId = make_client_id(BridgeName), + ClientConfig = #{ + min_metadata_refresh_interval => MinMetaRefreshInterval, + connect_timeout => ConnTimeout, + client_id => ClientId, + request_timeout => MetaReqTimeout, + extra_sock_opts => socket_opts(SocketOpts), + sasl => sasl(Auth), + ssl => ssl(SSL) + }, + #{ + topic := KafkaTopic + } = ProducerConfig, + case wolff:ensure_supervised_client(ClientId, Hosts, ClientConfig) of + {ok, _} -> + ?SLOG(info, #{ + msg => "kafka_client_started", + instance_id => InstId, + kafka_hosts => Hosts + }); + {error, Reason} -> + ?SLOG(error, #{ + msg => "failed_to_start_kafka_client", + instance_id => InstId, + kafka_hosts => Hosts, + reason => Reason + }), + throw(failed_to_start_kafka_client) + end, + WolffProducerConfig = producers_config(BridgeName, ClientId, ProducerConfig), + case wolff:ensure_supervised_producers(ClientId, KafkaTopic, WolffProducerConfig) of + {ok, Producers} -> + {ok, #{ + message_template => compile_message_template(MessageTemplate), + client_id => ClientId, + producers => Producers + }}; + {error, Reason2} -> + ?SLOG(error, #{ + msg => "failed_to_start_kafka_producer", + instance_id => InstId, + kafka_hosts => Hosts, + kafka_topic => KafkaTopic, + reason => Reason2 + }), + throw(failed_to_start_kafka_producer) + end. + +on_stop(InstanceID, #{client_id := ClientID, producers := Producers}) -> + _ = with_log_at_error( + fun() -> wolff:stop_and_delete_supervised_producers(Producers) end, + #{ + msg => "failed_to_delete_kafka_producer", + client_id => ClientID + } + ), + _ = with_log_at_error( + fun() -> wolff:stop_and_delete_supervised_client(ClientID) end, + #{ + msg => "failed_to_delete_kafka_client", + client_id => ClientID + } + ), + with_log_at_error( + fun() -> uninstall_telemetry_handlers(InstanceID) end, + #{ + msg => "failed_to_uninstall_telemetry_handlers", + client_id => ClientID + } + ). + +%% @doc The callback API for rule-engine (or bridge without rules) +%% The input argument `Message' is an enriched format (as a map()) +%% of the original #message{} record. +%% The enrichment is done by rule-engine or by the data bridge framework. +%% E.g. the output of rule-engine process chain +%% or the direct mapping from an MQTT message. +on_query(_InstId, {send_message, Message}, #{message_template := Template, producers := Producers}) -> + KafkaMessage = render_message(Template, Message), + %% The retuned information is discarded here. + %% If the producer process is down when sending, this function would + %% raise an error exception which is to be caught by the caller of this callback + {_Partition, _Pid} = wolff:send(Producers, [KafkaMessage], {fun ?MODULE:on_kafka_ack/3, [#{}]}), + ok. + +compile_message_template(#{ + key := KeyTemplate, value := ValueTemplate, timestamp := TimestampTemplate +}) -> + #{ + key => emqx_plugin_libs_rule:preproc_tmpl(KeyTemplate), + value => emqx_plugin_libs_rule:preproc_tmpl(ValueTemplate), + timestamp => emqx_plugin_libs_rule:preproc_tmpl(TimestampTemplate) + }. + +render_message( + #{key := KeyTemplate, value := ValueTemplate, timestamp := TimestampTemplate}, Message +) -> + #{ + key => render(KeyTemplate, Message), + value => render(ValueTemplate, Message), + ts => render_timestamp(TimestampTemplate, Message) + }. + +render(Template, Message) -> + emqx_plugin_libs_rule:proc_tmpl(Template, Message). + +render_timestamp(Template, Message) -> + try + binary_to_integer(render(Template, Message)) + catch + _:_ -> + erlang:system_time(millisecond) + end. + +on_kafka_ack(_Partition, _Offset, _Extra) -> + %% Do nothing so far. + %% Maybe need to bump some counters? + ok. + +on_get_status(_InstId, _State) -> + connected. + +%% Parse comma separated host:port list into a [{Host,Port}] list +hosts(Hosts) when is_binary(Hosts) -> + hosts(binary_to_list(Hosts)); +hosts(Hosts) when is_list(Hosts) -> + kpro:parse_endpoints(Hosts). + +%% Extra socket options, such as sndbuf size etc. +socket_opts(Opts) when is_map(Opts) -> + socket_opts(maps:to_list(Opts)); +socket_opts(Opts) when is_list(Opts) -> + socket_opts_loop(Opts, []). + +socket_opts_loop([], Acc) -> + lists:reverse(Acc); +socket_opts_loop([{T, Bytes} | Rest], Acc) when + T =:= sndbuf orelse T =:= recbuf orelse T =:= buffer +-> + Acc1 = [{T, Bytes} | adjust_socket_buffer(Bytes, Acc)], + socket_opts_loop(Rest, Acc1); +socket_opts_loop([Other | Rest], Acc) -> + socket_opts_loop(Rest, [Other | Acc]). + +%% https://www.erlang.org/doc/man/inet.html +%% For TCP it is recommended to have val(buffer) >= val(recbuf) +%% to avoid performance issues because of unnecessary copying. +adjust_socket_buffer(Bytes, Opts) -> + case lists:keytake(buffer, 1, Opts) of + false -> + [{buffer, Bytes} | Opts]; + {value, {buffer, Bytes1}, Acc1} -> + [{buffer, max(Bytes1, Bytes)} | Acc1] + end. + +sasl(none) -> + undefined; +sasl(#{mechanism := Mechanism, username := Username, password := Password}) -> + {Mechanism, Username, emqx_secret:wrap(Password)}; +sasl(#{ + kerberos_principal := Principal, + kerberos_keytab_file := KeyTabFile +}) -> + {callback, brod_gssapi, {gssapi, KeyTabFile, Principal}}. + +ssl(#{enable := true} = SSL) -> + emqx_tls_lib:to_client_opts(SSL); +ssl(_) -> + []. + +producers_config(BridgeName, ClientId, Input) -> + #{ + max_batch_bytes := MaxBatchBytes, + compression := Compression, + partition_strategy := PartitionStrategy, + required_acks := RequiredAcks, + partition_count_refresh_interval := PCntRefreshInterval, + max_inflight := MaxInflight, + buffer := #{ + mode := BufferMode, + per_partition_limit := PerPartitionLimit, + segment_bytes := SegmentBytes, + memory_overload_protection := MemOLP + } + } = Input, + + {OffloadMode, ReplayqDir} = + case BufferMode of + memory -> {false, false}; + disk -> {false, replayq_dir(ClientId)}; + hybrid -> {true, replayq_dir(ClientId)} + end, + %% TODO: change this once we add kafka source + BridgeType = kafka, + ResourceID = emqx_bridge_resource:resource_id(BridgeType, BridgeName), + #{ + name => make_producer_name(BridgeName), + partitioner => PartitionStrategy, + partition_count_refresh_interval_seconds => PCntRefreshInterval, + replayq_dir => ReplayqDir, + replayq_offload_mode => OffloadMode, + replayq_max_total_bytes => PerPartitionLimit, + replayq_seg_bytes => SegmentBytes, + drop_if_highmem => MemOLP, + required_acks => RequiredAcks, + max_batch_bytes => MaxBatchBytes, + max_send_ahead => MaxInflight - 1, + compression => Compression, + telemetry_meta_data => #{bridge_id => ResourceID} + }. + +replayq_dir(ClientId) -> + filename:join([emqx:data_dir(), "kafka", ClientId]). + +%% Client ID is better to be unique to make it easier for Kafka side trouble shooting. +make_client_id(BridgeName) when is_atom(BridgeName) -> + make_client_id(atom_to_list(BridgeName)); +make_client_id(BridgeName) -> + iolist_to_binary([BridgeName, ":", atom_to_list(node())]). + +%% Producer name must be an atom which will be used as a ETS table name for +%% partition worker lookup. +make_producer_name(BridgeName) when is_atom(BridgeName) -> + make_producer_name(atom_to_list(BridgeName)); +make_producer_name(BridgeName) -> + %% Woff needs atom for ets table name registration + %% The assumption here is bridge is not often re-created + binary_to_atom(iolist_to_binary(["kafka_producer_", BridgeName])). + +with_log_at_error(Fun, Log) -> + try + Fun() + catch + C:E -> + ?SLOG(error, Log#{ + exception => C, + reason => E + }) + end. + +get_required(Field, Config, Throw) -> + Value = maps:get(Field, Config, none), + Value =:= none andalso throw(Throw), + Value. + +handle_telemetry_event( + [wolff, dropped], + #{counter_inc := Val}, + #{bridge_id := ID}, + _HandlerConfig +) when is_integer(Val) -> + emqx_resource_metrics:dropped_inc(ID, Val); +handle_telemetry_event( + [wolff, dropped_queue_full], + #{counter_inc := Val}, + #{bridge_id := ID}, + _HandlerConfig +) when is_integer(Val) -> + emqx_resource_metrics:dropped_queue_full_inc(ID, Val); +handle_telemetry_event( + [wolff, queuing], + #{counter_inc := Val}, + #{bridge_id := ID}, + _HandlerConfig +) when is_integer(Val) -> + emqx_resource_metrics:queuing_change(ID, Val); +handle_telemetry_event( + [wolff, retried], + #{counter_inc := Val}, + #{bridge_id := ID}, + _HandlerConfig +) when is_integer(Val) -> + emqx_resource_metrics:retried_inc(ID, Val); +handle_telemetry_event( + [wolff, failed], + #{counter_inc := Val}, + #{bridge_id := ID}, + _HandlerConfig +) when is_integer(Val) -> + emqx_resource_metrics:failed_inc(ID, Val); +handle_telemetry_event( + [wolff, inflight], + #{counter_inc := Val}, + #{bridge_id := ID}, + _HandlerConfig +) when is_integer(Val) -> + emqx_resource_metrics:inflight_change(ID, Val); +handle_telemetry_event( + [wolff, retried_failed], + #{counter_inc := Val}, + #{bridge_id := ID}, + _HandlerConfig +) when is_integer(Val) -> + emqx_resource_metrics:retried_failed_inc(ID, Val); +handle_telemetry_event( + [wolff, retried_success], + #{counter_inc := Val}, + #{bridge_id := ID}, + _HandlerConfig +) when is_integer(Val) -> + emqx_resource_metrics:retried_success_inc(ID, Val); +handle_telemetry_event(_EventId, _Metrics, _MetaData, _HandlerConfig) -> + %% Event that we do not handle + ok. + +-spec telemetry_handler_id(emqx_resource:resource_id()) -> binary(). +telemetry_handler_id(InstanceID) -> + <<"emqx-bridge-kafka-producer-", InstanceID/binary>>. + +uninstall_telemetry_handlers(InstanceID) -> + HandlerID = telemetry_handler_id(InstanceID), + telemetry:detach(HandlerID). + +maybe_install_wolff_telemetry_handlers(InstanceID) -> + %% Attach event handlers for Kafka telemetry events. If a handler with the + %% handler id already exists, the attach_many function does nothing + telemetry:attach_many( + %% unique handler id + telemetry_handler_id(InstanceID), + %% Note: we don't handle `[wolff, success]' because, + %% currently, we already increment the success counter for + %% this resource at `emqx_rule_runtime:handle_action' when + %% the response is `ok' and we would double increment it + %% here. + [ + [wolff, dropped], + [wolff, dropped_queue_full], + [wolff, queuing], + [wolff, retried], + [wolff, failed], + [wolff, inflight], + [wolff, retried_failed], + [wolff, retried_success] + ], + fun ?MODULE:handle_telemetry_event/4, + [] + ). diff --git a/lib-ee/emqx_ee_bridge/test/ee_bridge_hstreamdb_SUITE.erl b/lib-ee/emqx_ee_bridge/test/ee_bridge_hstreamdb_SUITE.erl new file mode 100644 index 000000000..429323ad7 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/test/ee_bridge_hstreamdb_SUITE.erl @@ -0,0 +1,16 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(ee_bridge_hstreamdb_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +%% TODO: diff --git a/lib-ee/emqx_ee_bridge/test/emqx_bridge_impl_kafka_producer_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_bridge_impl_kafka_producer_SUITE.erl new file mode 100644 index 000000000..0a26e5d26 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/test/emqx_bridge_impl_kafka_producer_SUITE.erl @@ -0,0 +1,631 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_impl_kafka_producer_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("brod/include/brod.hrl"). + +-define(PRODUCER, emqx_bridge_impl_kafka). + +%%------------------------------------------------------------------------------ +%% Things for REST API tests +%%------------------------------------------------------------------------------ + +-import( + emqx_common_test_http, + [ + request_api/3, + request_api/5, + get_http_data/1 + ] +). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("emqx/include/emqx.hrl"). +-include("emqx_dashboard.hrl"). + +-define(CONTENT_TYPE, "application/x-www-form-urlencoded"). + +-define(HOST, "http://127.0.0.1:18083"). + +%% -define(API_VERSION, "v5"). + +-define(BASE_PATH, "/api/v5"). + +-define(APP_DASHBOARD, emqx_dashboard). +-define(APP_MANAGEMENT, emqx_management). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + emqx_common_test_helpers:all(?MODULE). + +wait_until_kafka_is_up() -> + wait_until_kafka_is_up(0). + +wait_until_kafka_is_up(300) -> + ct:fail("Kafka is not up even though we have waited for a while"); +wait_until_kafka_is_up(Attempts) -> + KafkaTopic = "test-topic-one-partition", + case resolve_kafka_offset(kafka_hosts(), KafkaTopic, 0) of + {ok, _} -> + ok; + _ -> + timer:sleep(1000), + wait_until_kafka_is_up(Attempts + 1) + end. + +init_per_suite(Config) -> + %% Need to unload emqx_authz. See emqx_machine_SUITE:init_per_suite for + %% more info. + application:unload(emqx_authz), + %% some configs in emqx_conf app are mandatory + emqx_common_test_helpers:render_and_load_app_config(emqx_conf), + emqx_common_test_helpers:start_apps( + [emqx_conf, emqx_rule_engine, emqx_bridge, emqx_management, emqx_dashboard], + fun set_special_configs/1 + ), + application:set_env(emqx_machine, applications, [ + emqx_prometheus, + emqx_modules, + emqx_dashboard, + emqx_gateway, + emqx_statsd, + emqx_resource, + emqx_rule_engine, + emqx_bridge, + emqx_ee_bridge, + emqx_plugin_libs, + emqx_management, + emqx_retainer, + emqx_exhook, + emqx_authn, + emqx_authz, + emqx_plugin + ]), + {ok, _} = application:ensure_all_started(emqx_machine), + wait_until_kafka_is_up(), + %% Wait until bridges API is up + (fun WaitUntilRestApiUp() -> + case show(http_get(["bridges"])) of + {ok, 200, _Res} -> + ok; + Val -> + ct:pal("REST API for bridges not up. Wait and try again. Response: ~p", [Val]), + timer:sleep(1000), + WaitUntilRestApiUp() + end + end)(), + Config. + +end_per_suite(Config) -> + emqx_common_test_helpers:stop_apps([ + emqx_prometheus, + emqx_modules, + emqx_dashboard, + emqx_gateway, + emqx_statsd, + emqx_resource, + emqx_rule_engine, + emqx_bridge, + emqx_ee_bridge, + emqx_plugin_libs, + emqx_management, + emqx_retainer, + emqx_exhook, + emqx_authn, + emqx_authz, + emqx_plugin, + emqx_conf, + emqx_bridge, + emqx_management, + emqx_dashboard, + emqx_machine + ]), + mria:stop(), + Config. + +set_special_configs(emqx_management) -> + Listeners = #{http => #{port => 8081}}, + Config = #{ + listeners => Listeners, + applications => [#{id => "admin", secret => "public"}] + }, + emqx_config:put([emqx_management], Config), + ok; +set_special_configs(emqx_dashboard) -> + emqx_dashboard_api_test_helpers:set_default_config(), + ok; +set_special_configs(_) -> + ok. +%%------------------------------------------------------------------------------ +%% Test cases for all combinations of SSL, no SSL and authentication types +%%------------------------------------------------------------------------------ + +t_publish_no_auth(_CtConfig) -> + publish_with_and_without_ssl("none"). + +t_publish_sasl_plain(_CtConfig) -> + publish_with_and_without_ssl(valid_sasl_plain_settings()). + +t_publish_sasl_scram256(_CtConfig) -> + publish_with_and_without_ssl(valid_sasl_scram256_settings()). + +t_publish_sasl_scram512(_CtConfig) -> + publish_with_and_without_ssl(valid_sasl_scram512_settings()). + +t_publish_sasl_kerberos(_CtConfig) -> + publish_with_and_without_ssl(valid_sasl_kerberos_settings()). + +%%------------------------------------------------------------------------------ +%% Test cases for REST api +%%------------------------------------------------------------------------------ + +show(X) -> + % erlang:display('______________ SHOW ______________:'), + % erlang:display(X), + X. + +t_kafka_bridge_rest_api_plain_text(_CtConfig) -> + kafka_bridge_rest_api_all_auth_methods(false). + +t_kafka_bridge_rest_api_ssl(_CtConfig) -> + kafka_bridge_rest_api_all_auth_methods(true). + +kafka_bridge_rest_api_all_auth_methods(UseSSL) -> + NormalHostsString = + case UseSSL of + true -> kafka_hosts_string_ssl(); + false -> kafka_hosts_string() + end, + SASLHostsString = + case UseSSL of + true -> kafka_hosts_string_ssl_sasl(); + false -> kafka_hosts_string_sasl() + end, + BinifyMap = fun(Map) -> + maps:from_list([ + {erlang:iolist_to_binary(K), erlang:iolist_to_binary(V)} + || {K, V} <- maps:to_list(Map) + ]) + end, + SSLSettings = + case UseSSL of + true -> #{<<"ssl">> => BinifyMap(valid_ssl_settings())}; + false -> #{} + end, + kafka_bridge_rest_api_helper( + maps:merge( + #{ + <<"bootstrap_hosts">> => NormalHostsString, + <<"authentication">> => <<"none">> + }, + SSLSettings + ) + ), + kafka_bridge_rest_api_helper( + maps:merge( + #{ + <<"bootstrap_hosts">> => SASLHostsString, + <<"authentication">> => BinifyMap(valid_sasl_plain_settings()) + }, + SSLSettings + ) + ), + kafka_bridge_rest_api_helper( + maps:merge( + #{ + <<"bootstrap_hosts">> => SASLHostsString, + <<"authentication">> => BinifyMap(valid_sasl_scram256_settings()) + }, + SSLSettings + ) + ), + kafka_bridge_rest_api_helper( + maps:merge( + #{ + <<"bootstrap_hosts">> => SASLHostsString, + <<"authentication">> => BinifyMap(valid_sasl_scram512_settings()) + }, + SSLSettings + ) + ), + kafka_bridge_rest_api_helper( + maps:merge( + #{ + <<"bootstrap_hosts">> => SASLHostsString, + <<"authentication">> => BinifyMap(valid_sasl_kerberos_settings()) + }, + SSLSettings + ) + ), + ok. + +kafka_bridge_rest_api_helper(Config) -> + BridgeType = "kafka", + BridgeName = "my_kafka_bridge", + BridgeID = emqx_bridge_resource:bridge_id( + erlang:list_to_binary(BridgeType), + erlang:list_to_binary(BridgeName) + ), + ResourceId = emqx_bridge_resource:resource_id( + erlang:list_to_binary(BridgeType), + erlang:list_to_binary(BridgeName) + ), + UrlEscColon = "%3A", + BridgeIdUrlEnc = BridgeType ++ UrlEscColon ++ BridgeName, + BridgesParts = ["bridges"], + BridgesPartsIdDeleteAlsoActions = ["bridges", BridgeIdUrlEnc ++ "?also_delete_dep_actions"], + OpUrlFun = fun(OpName) -> ["bridges", BridgeIdUrlEnc, "operation", OpName] end, + BridgesPartsOpDisable = OpUrlFun("disable"), + BridgesPartsOpEnable = OpUrlFun("enable"), + BridgesPartsOpRestart = OpUrlFun("restart"), + BridgesPartsOpStop = OpUrlFun("stop"), + %% List bridges + MyKafkaBridgeExists = fun() -> + {ok, _Code, BridgesData} = show(http_get(BridgesParts)), + Bridges = show(json(BridgesData)), + lists:any( + fun + (#{<<"name">> := <<"my_kafka_bridge">>}) -> true; + (_) -> false + end, + Bridges + ) + end, + %% Delete if my_kafka_bridge exists + case MyKafkaBridgeExists() of + true -> + %% Delete the bridge my_kafka_bridge + {ok, 204, <<>>} = show(http_delete(BridgesPartsIdDeleteAlsoActions)); + false -> + ok + end, + false = MyKafkaBridgeExists(), + %% Create new Kafka bridge + KafkaTopic = "test-topic-one-partition", + CreateBodyTmp = #{ + <<"type">> => <<"kafka">>, + <<"name">> => <<"my_kafka_bridge">>, + <<"bootstrap_hosts">> => maps:get(<<"bootstrap_hosts">>, Config), + <<"enable">> => true, + <<"authentication">> => maps:get(<<"authentication">>, Config), + <<"producer">> => #{ + <<"mqtt">> => #{ + topic => <<"t/#">> + }, + <<"kafka">> => #{ + <<"topic">> => erlang:list_to_binary(KafkaTopic) + } + } + }, + CreateBody = + case maps:is_key(<<"ssl">>, Config) of + true -> CreateBodyTmp#{<<"ssl">> => maps:get(<<"ssl">>, Config)}; + false -> CreateBodyTmp + end, + {ok, 201, _Data} = show(http_post(BridgesParts, show(CreateBody))), + %% Check that the new bridge is in the list of bridges + true = MyKafkaBridgeExists(), + %% Create a rule that uses the bridge + {ok, 201, _Rule} = http_post( + ["rules"], + #{ + <<"name">> => <<"kafka_bridge_rest_api_helper_rule">>, + <<"enable">> => true, + <<"actions">> => [BridgeID], + <<"sql">> => <<"SELECT * from \"kafka_bridge_topic/#\"">> + } + ), + %% counters should be empty before + ?assertEqual(0, emqx_resource_metrics:matched_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:success_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:dropped_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:failed_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:inflight_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:batching_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:queuing_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:dropped_other_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:dropped_queue_full_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:dropped_queue_not_enabled_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:dropped_resource_not_found_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:dropped_resource_stopped_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:retried_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:retried_failed_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:retried_success_get(ResourceId)), + %% Get offset before sending message + {ok, Offset} = resolve_kafka_offset(kafka_hosts(), KafkaTopic, 0), + %% Send message to topic and check that it got forwarded to Kafka + Body = <<"message from EMQX">>, + emqx:publish(emqx_message:make(<<"kafka_bridge_topic/1">>, Body)), + %% Give Kafka some time to get message + timer:sleep(100), + %% Check that Kafka got message + BrodOut = brod:fetch(kafka_hosts(), KafkaTopic, 0, Offset), + {ok, {_, [KafkaMsg]}} = show(BrodOut), + Body = KafkaMsg#kafka_message.value, + %% Check crucial counters and gauges + ?assertEqual(1, emqx_resource_metrics:matched_get(ResourceId)), + ?assertEqual(1, emqx_resource_metrics:success_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:dropped_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:failed_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:inflight_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:batching_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:queuing_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:dropped_other_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:dropped_queue_full_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:dropped_queue_not_enabled_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:dropped_resource_not_found_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:dropped_resource_stopped_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:retried_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:retried_failed_get(ResourceId)), + ?assertEqual(0, emqx_resource_metrics:retried_success_get(ResourceId)), + %% Perform operations + {ok, 200, _} = show(http_post(show(BridgesPartsOpDisable), #{})), + {ok, 200, _} = show(http_post(show(BridgesPartsOpDisable), #{})), + {ok, 200, _} = show(http_post(show(BridgesPartsOpEnable), #{})), + {ok, 200, _} = show(http_post(show(BridgesPartsOpEnable), #{})), + {ok, 200, _} = show(http_post(show(BridgesPartsOpStop), #{})), + {ok, 200, _} = show(http_post(show(BridgesPartsOpStop), #{})), + {ok, 200, _} = show(http_post(show(BridgesPartsOpRestart), #{})), + %% Cleanup + {ok, 204, _} = show(http_delete(BridgesPartsIdDeleteAlsoActions)), + false = MyKafkaBridgeExists(), + ok. + +%%------------------------------------------------------------------------------ +%% Helper functions +%%------------------------------------------------------------------------------ + +publish_with_and_without_ssl(AuthSettings) -> + publish_helper(#{ + auth_settings => AuthSettings, + ssl_settings => #{} + }), + publish_helper(#{ + auth_settings => AuthSettings, + ssl_settings => valid_ssl_settings() + }), + ok. + +publish_helper(#{ + auth_settings := AuthSettings, + ssl_settings := SSLSettings +}) -> + HostsString = + case {AuthSettings, SSLSettings} of + {"none", Map} when map_size(Map) =:= 0 -> + kafka_hosts_string(); + {"none", Map} when map_size(Map) =/= 0 -> + kafka_hosts_string_ssl(); + {_, Map} when map_size(Map) =:= 0 -> + kafka_hosts_string_sasl(); + {_, _} -> + kafka_hosts_string_ssl_sasl() + end, + Hash = erlang:phash2([HostsString, AuthSettings, SSLSettings]), + Name = "kafka_bridge_name_" ++ erlang:integer_to_list(Hash), + InstId = emqx_bridge_resource:resource_id("kafka", Name), + BridgeId = emqx_bridge_resource:bridge_id("kafka", Name), + KafkaTopic = "test-topic-one-partition", + Conf = config(#{ + "authentication" => AuthSettings, + "kafka_hosts_string" => HostsString, + "kafka_topic" => KafkaTopic, + "instance_id" => InstId, + "ssl" => SSLSettings + }), + emqx_bridge_resource:create(kafka, erlang:list_to_atom(Name), Conf, #{}), + %% To make sure we get unique value + timer:sleep(1), + Time = erlang:monotonic_time(), + BinTime = integer_to_binary(Time), + Msg = #{ + clientid => BinTime, + payload => <<"payload">>, + timestamp => Time + }, + {ok, Offset} = resolve_kafka_offset(kafka_hosts(), KafkaTopic, 0), + ct:pal("base offset before testing ~p", [Offset]), + StartRes = ?PRODUCER:on_start(InstId, Conf), + {ok, State} = StartRes, + OnQueryRes = ?PRODUCER:on_query(InstId, {send_message, Msg}, State), + ok = OnQueryRes, + {ok, {_, [KafkaMsg]}} = brod:fetch(kafka_hosts(), KafkaTopic, 0, Offset), + ?assertMatch(#kafka_message{key = BinTime}, KafkaMsg), + ok = ?PRODUCER:on_stop(InstId, State), + ok = emqx_bridge_resource:remove(BridgeId), + ok. + +config(Args) -> + ConfText = hocon_config(Args), + ct:pal("Running tests with conf:\n~s", [ConfText]), + {ok, Conf} = hocon:binary(ConfText), + #{config := Parsed} = hocon_tconf:check_plain( + emqx_ee_bridge_kafka, + #{<<"config">> => Conf}, + #{atom_key => true} + ), + InstId = maps:get("instance_id", Args), + <<"bridge:", BridgeId/binary>> = InstId, + Parsed#{bridge_name => erlang:element(2, emqx_bridge_resource:parse_bridge_id(BridgeId))}. + +hocon_config(Args) -> + AuthConf = maps:get("authentication", Args), + AuthTemplate = iolist_to_binary(hocon_config_template_authentication(AuthConf)), + AuthConfRendered = bbmustache:render(AuthTemplate, AuthConf), + SSLConf = maps:get("ssl", Args, #{}), + SSLTemplate = iolist_to_binary(hocon_config_template_ssl(SSLConf)), + SSLConfRendered = bbmustache:render(SSLTemplate, SSLConf), + Hocon = bbmustache:render( + iolist_to_binary(hocon_config_template()), + Args#{ + "authentication" => AuthConfRendered, + "ssl" => SSLConfRendered + } + ), + Hocon. + +%% erlfmt-ignore +hocon_config_template() -> +""" +bootstrap_hosts = \"{{ kafka_hosts_string }}\" +enable = true +authentication = {{{ authentication }}} +ssl = {{{ ssl }}} +producer = { + mqtt { + topic = \"t/#\" + } + kafka = { + topic = \"{{ kafka_topic }}\" + } +} +""". + +%% erlfmt-ignore +hocon_config_template_authentication("none") -> + "none"; +hocon_config_template_authentication(#{"mechanism" := _}) -> +""" +{ + mechanism = {{ mechanism }} + password = {{ password }} + username = {{ username }} +} +"""; +hocon_config_template_authentication(#{"kerberos_principal" := _}) -> +""" +{ + kerberos_principal = \"{{ kerberos_principal }}\" + kerberos_keytab_file = \"{{ kerberos_keytab_file }}\" +} +""". + +%% erlfmt-ignore +hocon_config_template_ssl(Map) when map_size(Map) =:= 0 -> +""" +{ + enable = false +} +"""; +hocon_config_template_ssl(_) -> +""" +{ + enable = true + cacertfile = \"{{{cacertfile}}}\" + certfile = \"{{{certfile}}}\" + keyfile = \"{{{keyfile}}}\" +} +""". + +kafka_hosts_string() -> + "kafka-1.emqx.net:9092,". + +kafka_hosts_string_sasl() -> + "kafka-1.emqx.net:9093,". + +kafka_hosts_string_ssl() -> + "kafka-1.emqx.net:9094,". + +kafka_hosts_string_ssl_sasl() -> + "kafka-1.emqx.net:9095,". + +valid_ssl_settings() -> + #{ + "cacertfile" => <<"/var/lib/secret/ca.crt">>, + "certfile" => <<"/var/lib/secret/client.crt">>, + "keyfile" => <<"/var/lib/secret/client.key">>, + "enable" => <<"true">> + }. + +valid_sasl_plain_settings() -> + #{ + "mechanism" => "plain", + "username" => "emqxuser", + "password" => "password" + }. + +valid_sasl_scram256_settings() -> + (valid_sasl_plain_settings())#{ + "mechanism" => "scram_sha_256" + }. + +valid_sasl_scram512_settings() -> + (valid_sasl_plain_settings())#{ + "mechanism" => "scram_sha_512" + }. + +valid_sasl_kerberos_settings() -> + #{ + "kerberos_principal" => "rig@KDC.EMQX.NET", + "kerberos_keytab_file" => "/var/lib/secret/rig.keytab" + }. + +kafka_hosts() -> + kpro:parse_endpoints(kafka_hosts_string()). + +resolve_kafka_offset(Hosts, Topic, Partition) -> + brod:resolve_offset(Hosts, Topic, Partition, latest). + +%%------------------------------------------------------------------------------ +%% Internal functions rest API helpers +%%------------------------------------------------------------------------------ + +bin(X) -> iolist_to_binary(X). + +random_num() -> + erlang:system_time(nanosecond). + +http_get(Parts) -> + request_api(get, api_path(Parts), auth_header_()). + +http_delete(Parts) -> + request_api(delete, api_path(Parts), auth_header_()). + +http_post(Parts, Body) -> + request_api(post, api_path(Parts), [], auth_header_(), Body). + +http_put(Parts, Body) -> + request_api(put, api_path(Parts), [], auth_header_(), Body). + +request_dashboard(Method, Url, Auth) -> + Request = {Url, [Auth]}, + do_request_dashboard(Method, Request). +request_dashboard(Method, Url, QueryParams, Auth) -> + Request = {Url ++ "?" ++ QueryParams, [Auth]}, + do_request_dashboard(Method, Request). +do_request_dashboard(Method, Request) -> + ct:pal("Method: ~p, Request: ~p", [Method, Request]), + case httpc:request(Method, Request, [], []) of + {error, socket_closed_remotely} -> + {error, socket_closed_remotely}; + {ok, {{"HTTP/1.1", Code, _}, _Headers, Return}} when + Code >= 200 andalso Code =< 299 + -> + {ok, Return}; + {ok, {Reason, _, _}} -> + {error, Reason} + end. + +auth_header_() -> + auth_header_(<<"admin">>, <<"public">>). + +auth_header_(Username, Password) -> + {ok, Token} = emqx_dashboard_admin:sign_token(Username, Password), + {"Authorization", "Bearer " ++ binary_to_list(Token)}. + +api_path(Parts) -> + ?HOST ++ filename:join([?BASE_PATH | Parts]). + +json(Data) -> + {ok, Jsx} = emqx_json:safe_decode(Data, [return_maps]), + Jsx. diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_influxdb_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_influxdb_SUITE.erl new file mode 100644 index 000000000..c2ac45551 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_influxdb_SUITE.erl @@ -0,0 +1,863 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_ee_bridge_influxdb_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, with_batch}, + {group, without_batch} + ]. + +groups() -> + TCs = emqx_common_test_helpers:all(?MODULE), + [ + {with_batch, [ + {group, sync_query}, + {group, async_query} + ]}, + {without_batch, [ + {group, sync_query}, + {group, async_query} + ]}, + {sync_query, [ + {group, apiv1_tcp}, + {group, apiv1_tls}, + {group, apiv2_tcp}, + {group, apiv2_tls} + ]}, + {async_query, [ + {group, apiv1_tcp}, + {group, apiv1_tls}, + {group, apiv2_tcp}, + {group, apiv2_tls} + ]}, + {apiv1_tcp, TCs}, + {apiv1_tls, TCs}, + {apiv2_tcp, TCs}, + {apiv2_tls, TCs} + ]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + ok = emqx_common_test_helpers:stop_apps([emqx_conf]), + ok = emqx_connector_test_helpers:stop_apps([emqx_bridge, emqx_resource]), + _ = application:stop(emqx_connector), + ok. + +init_per_group(InfluxDBType, Config0) when + InfluxDBType =:= apiv1_tcp; + InfluxDBType =:= apiv1_tls +-> + #{ + host := InfluxDBHost, + port := InfluxDBPort, + use_tls := UseTLS, + proxy_name := ProxyName + } = + case InfluxDBType of + apiv1_tcp -> + #{ + host => os:getenv("INFLUXDB_APIV1_TCP_HOST", "toxiproxy"), + port => list_to_integer(os:getenv("INFLUXDB_APIV1_TCP_PORT", "8086")), + use_tls => false, + proxy_name => "influxdb_tcp" + }; + apiv1_tls -> + #{ + host => os:getenv("INFLUXDB_APIV1_TLS_HOST", "toxiproxy"), + port => list_to_integer(os:getenv("INFLUXDB_APIV1_TLS_PORT", "8087")), + use_tls => true, + proxy_name => "influxdb_tls" + } + end, + case emqx_common_test_helpers:is_tcp_server_available(InfluxDBHost, InfluxDBPort) of + true -> + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ok = start_apps(), + {ok, _} = application:ensure_all_started(emqx_connector), + Config = [{use_tls, UseTLS} | Config0], + {Name, ConfigString, InfluxDBConfig} = influxdb_config( + apiv1, InfluxDBHost, InfluxDBPort, Config + ), + EHttpcPoolNameBin = <<(atom_to_binary(?MODULE))/binary, "_apiv1">>, + EHttpcPoolName = binary_to_atom(EHttpcPoolNameBin), + {EHttpcTransport, EHttpcTransportOpts} = + case UseTLS of + true -> {tls, [{verify, verify_none}]}; + false -> {tcp, []} + end, + EHttpcPoolOpts = [ + {host, InfluxDBHost}, + {port, InfluxDBPort}, + {pool_size, 1}, + {transport, EHttpcTransport}, + {transport_opts, EHttpcTransportOpts} + ], + {ok, _} = ehttpc_sup:start_pool(EHttpcPoolName, EHttpcPoolOpts), + [ + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort}, + {proxy_name, ProxyName}, + {influxdb_host, InfluxDBHost}, + {influxdb_port, InfluxDBPort}, + {influxdb_type, apiv1}, + {influxdb_config, InfluxDBConfig}, + {influxdb_config_string, ConfigString}, + {ehttpc_pool_name, EHttpcPoolName}, + {influxdb_name, Name} + | Config + ]; + false -> + {skip, no_influxdb} + end; +init_per_group(InfluxDBType, Config0) when + InfluxDBType =:= apiv2_tcp; + InfluxDBType =:= apiv2_tls +-> + #{ + host := InfluxDBHost, + port := InfluxDBPort, + use_tls := UseTLS, + proxy_name := ProxyName + } = + case InfluxDBType of + apiv2_tcp -> + #{ + host => os:getenv("INFLUXDB_APIV2_TCP_HOST", "toxiproxy"), + port => list_to_integer(os:getenv("INFLUXDB_APIV2_TCP_PORT", "8086")), + use_tls => false, + proxy_name => "influxdb_tcp" + }; + apiv2_tls -> + #{ + host => os:getenv("INFLUXDB_APIV2_TLS_HOST", "toxiproxy"), + port => list_to_integer(os:getenv("INFLUXDB_APIV2_TLS_PORT", "8087")), + use_tls => true, + proxy_name => "influxdb_tls" + } + end, + case emqx_common_test_helpers:is_tcp_server_available(InfluxDBHost, InfluxDBPort) of + true -> + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ok = start_apps(), + {ok, _} = application:ensure_all_started(emqx_connector), + Config = [{use_tls, UseTLS} | Config0], + {Name, ConfigString, InfluxDBConfig} = influxdb_config( + apiv2, InfluxDBHost, InfluxDBPort, Config + ), + EHttpcPoolNameBin = <<(atom_to_binary(?MODULE))/binary, "_apiv2">>, + EHttpcPoolName = binary_to_atom(EHttpcPoolNameBin), + {EHttpcTransport, EHttpcTransportOpts} = + case UseTLS of + true -> {tls, [{verify, verify_none}]}; + false -> {tcp, []} + end, + EHttpcPoolOpts = [ + {host, InfluxDBHost}, + {port, InfluxDBPort}, + {pool_size, 1}, + {transport, EHttpcTransport}, + {transport_opts, EHttpcTransportOpts} + ], + {ok, _} = ehttpc_sup:start_pool(EHttpcPoolName, EHttpcPoolOpts), + [ + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort}, + {proxy_name, ProxyName}, + {influxdb_host, InfluxDBHost}, + {influxdb_port, InfluxDBPort}, + {influxdb_type, apiv2}, + {influxdb_config, InfluxDBConfig}, + {influxdb_config_string, ConfigString}, + {ehttpc_pool_name, EHttpcPoolName}, + {influxdb_name, Name} + | Config + ]; + false -> + {skip, no_influxdb} + end; +init_per_group(sync_query, Config) -> + [{query_mode, sync} | Config]; +init_per_group(async_query, Config) -> + [{query_mode, async} | Config]; +init_per_group(with_batch, Config) -> + [{enable_batch, true} | Config]; +init_per_group(without_batch, Config) -> + [{enable_batch, false} | Config]; +init_per_group(_Group, Config) -> + Config. + +end_per_group(Group, Config) when + Group =:= apiv1_tcp; + Group =:= apiv1_tls; + Group =:= apiv2_tcp; + Group =:= apiv2_tls +-> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + EHttpcPoolName = ?config(ehttpc_pool_name, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ehttpc_sup:stop_pool(EHttpcPoolName), + delete_bridge(Config), + ok; +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_Testcase, Config) -> + %% catch clear_db(Config), + %% delete_bridge(Config), + delete_all_bridges(), + Config. + +end_per_testcase(_Testcase, Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + ok = snabbkaffe:stop(), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + %% catch clear_db(Config), + %% delete_bridge(Config), + delete_all_bridges(), + ok. + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +example_write_syntax() -> + %% N.B.: this single space character is relevant + <<"${topic},clientid=${clientid}", " ", "payload=${payload},", + "${clientid}_int_value=${payload.int_key}i,", + "uint_value=${payload.uint_key}u," + "float_value=${payload.float_key},", "undef_value=${payload.undef},", + "${undef_key}=\"hard-coded-value\",", "bool=${payload.bool}">>. + +influxdb_config(apiv1 = Type, InfluxDBHost, InfluxDBPort, Config) -> + EnableBatch = proplists:get_value(enable_batch, Config, true), + QueryMode = proplists:get_value(query_mode, Config, sync), + UseTLS = proplists:get_value(use_tls, Config, false), + Name = atom_to_binary(?MODULE), + WriteSyntax = example_write_syntax(), + ConfigString = + io_lib:format( + "bridges.influxdb_api_v1.~s {\n" + " enable = true\n" + " server = \"~p:~b\"\n" + " database = mqtt\n" + " username = root\n" + " password = emqx@123\n" + " precision = ns\n" + " write_syntax = \"~s\"\n" + " resource_opts = {\n" + " enable_batch = ~p\n" + " query_mode = ~s\n" + " }\n" + " ssl {\n" + " enable = ~p\n" + " verify = verify_none\n" + " }\n" + "}\n", + [Name, InfluxDBHost, InfluxDBPort, WriteSyntax, EnableBatch, QueryMode, UseTLS] + ), + {Name, ConfigString, parse_and_check(ConfigString, Type, Name)}; +influxdb_config(apiv2 = Type, InfluxDBHost, InfluxDBPort, Config) -> + EnableBatch = proplists:get_value(enable_batch, Config, true), + QueryMode = proplists:get_value(query_mode, Config, sync), + UseTLS = proplists:get_value(use_tls, Config, false), + Name = atom_to_binary(?MODULE), + WriteSyntax = example_write_syntax(), + ConfigString = + io_lib:format( + "bridges.influxdb_api_v2.~s {\n" + " enable = true\n" + " server = \"~p:~b\"\n" + " bucket = mqtt\n" + " org = emqx\n" + " token = abcdefg\n" + " precision = ns\n" + " write_syntax = \"~s\"\n" + " resource_opts = {\n" + " enable_batch = ~p\n" + " query_mode = ~s\n" + " }\n" + " ssl {\n" + " enable = ~p\n" + " verify = verify_none\n" + " }\n" + "}\n", + [Name, InfluxDBHost, InfluxDBPort, WriteSyntax, EnableBatch, QueryMode, UseTLS] + ), + {Name, ConfigString, parse_and_check(ConfigString, Type, Name)}. + +parse_and_check(ConfigString, Type, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + TypeBin = influxdb_type_bin(Type), + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{TypeBin := #{Name := Config}}} = RawConf, + Config. + +influxdb_type_bin(apiv1) -> + <<"influxdb_api_v1">>; +influxdb_type_bin(apiv2) -> + <<"influxdb_api_v2">>. + +create_bridge(Config) -> + Type = influxdb_type_bin(?config(influxdb_type, Config)), + Name = ?config(influxdb_name, Config), + InfluxDBConfig = ?config(influxdb_config, Config), + emqx_bridge:create(Type, Name, InfluxDBConfig). + +delete_bridge(Config) -> + Type = influxdb_type_bin(?config(influxdb_type, Config)), + Name = ?config(influxdb_name, Config), + emqx_bridge:remove(Type, Name). + +delete_all_bridges() -> + lists:foreach( + fun(#{name := Name, type := Type}) -> + emqx_bridge:remove(Type, Name) + end, + emqx_bridge:list() + ). + +send_message(Config, Payload) -> + Name = ?config(influxdb_name, Config), + Type = influxdb_type_bin(?config(influxdb_type, Config)), + BridgeId = emqx_bridge_resource:bridge_id(Type, Name), + emqx_bridge:send_message(BridgeId, Payload). + +query_by_clientid(ClientId, Config) -> + InfluxDBHost = ?config(influxdb_host, Config), + InfluxDBPort = ?config(influxdb_port, Config), + EHttpcPoolName = ?config(ehttpc_pool_name, Config), + UseTLS = ?config(use_tls, Config), + Path = <<"/api/v2/query?org=emqx">>, + Scheme = + case UseTLS of + true -> <<"https://">>; + false -> <<"http://">> + end, + URI = iolist_to_binary([ + Scheme, + list_to_binary(InfluxDBHost), + ":", + integer_to_binary(InfluxDBPort), + Path + ]), + Query = + << + "from(bucket: \"mqtt\")\n" + " |> range(start: -12h)\n" + " |> filter(fn: (r) => r.clientid == \"", + ClientId/binary, + "\")" + >>, + Headers = [ + {"Authorization", "Token abcdefg"}, + {"Content-Type", "application/json"} + ], + Body = + emqx_json:encode(#{ + query => Query, + dialect => #{ + header => true, + delimiter => <<";">> + } + }), + {ok, 200, _Headers, RawBody0} = + ehttpc:request( + EHttpcPoolName, + post, + {URI, Headers, Body}, + _Timeout = 10_000, + _Retry = 0 + ), + RawBody1 = iolist_to_binary(string:replace(RawBody0, <<"\r\n">>, <<"\n">>, all)), + {ok, DecodedCSV0} = erl_csv:decode(RawBody1, #{separator => <<$;>>}), + DecodedCSV1 = [ + [Field || Field <- Line, Field =/= <<>>] + || Line <- DecodedCSV0, + Line =/= [<<>>] + ], + DecodedCSV2 = csv_lines_to_maps(DecodedCSV1, []), + index_by_field(DecodedCSV2). + +decode_csv(RawBody) -> + Lines = + [ + binary:split(Line, [<<";">>], [global, trim_all]) + || Line <- binary:split(RawBody, [<<"\r\n">>], [global, trim_all]) + ], + csv_lines_to_maps(Lines, []). + +csv_lines_to_maps([Fields, Data | Rest], Acc) -> + Map = maps:from_list(lists:zip(Fields, Data)), + csv_lines_to_maps(Rest, [Map | Acc]); +csv_lines_to_maps(_Data, Acc) -> + lists:reverse(Acc). + +index_by_field(DecodedCSV) -> + maps:from_list([{Field, Data} || Data = #{<<"_field">> := Field} <- DecodedCSV]). + +assert_persisted_data(ClientId, Expected, PersistedData) -> + ClientIdIntKey = <>, + maps:foreach( + fun + (int_value, ExpectedValue) -> + ?assertMatch( + #{<<"_value">> := ExpectedValue}, + maps:get(ClientIdIntKey, PersistedData) + ); + (Key, ExpectedValue) -> + ?assertMatch( + #{<<"_value">> := ExpectedValue}, + maps:get(atom_to_binary(Key), PersistedData), + #{expected => ExpectedValue} + ) + end, + Expected + ), + ok. + +resource_id(Config) -> + Type = influxdb_type_bin(?config(influxdb_type, Config)), + Name = ?config(influxdb_name, Config), + emqx_bridge_resource:resource_id(Type, Name). + +instance_id(Config) -> + ResourceId = resource_id(Config), + [{_, InstanceId}] = ets:lookup(emqx_resource_manager, {owner, ResourceId}), + InstanceId. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_start_ok(Config) -> + QueryMode = ?config(query_mode, Config), + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + Payload = #{ + int_key => -123, + bool => true, + float_key => 24.5, + uint_key => 123 + }, + SentData = #{ + <<"clientid">> => ClientId, + <<"topic">> => atom_to_binary(?FUNCTION_NAME), + <<"timestamp">> => erlang:system_time(nanosecond), + <<"payload">> => Payload + }, + ?check_trace( + begin + ?assertEqual(ok, send_message(Config, SentData)), + case QueryMode of + async -> ct:sleep(500); + sync -> ok + end, + PersistedData = query_by_clientid(ClientId, Config), + Expected = #{ + bool => <<"true">>, + int_value => <<"-123">>, + uint_value => <<"123">>, + float_value => <<"24.5">>, + payload => emqx_json:encode(Payload) + }, + assert_persisted_data(ClientId, Expected, PersistedData), + ok + end, + fun(Trace0) -> + Trace = ?of_kind(influxdb_connector_send_query, Trace0), + ?assertMatch([#{points := [_]}], Trace), + [#{points := [Point]}] = Trace, + ct:pal("sent point: ~p", [Point]), + ?assertMatch( + #{ + fields := #{}, + measurement := <<_/binary>>, + tags := #{}, + timestamp := TS + } when is_integer(TS), + Point + ), + #{fields := Fields} = Point, + ?assert(lists:all(fun is_binary/1, maps:keys(Fields))), + ?assertNot(maps:is_key(<<"undefined">>, Fields)), + ?assertNot(maps:is_key(<<"undef_value">>, Fields)), + ok + end + ), + ok. + +t_start_already_started(Config) -> + Type = influxdb_type_bin(?config(influxdb_type, Config)), + Name = ?config(influxdb_name, Config), + InfluxDBConfigString = ?config(influxdb_config_string, Config), + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + InstanceId = instance_id(Config), + TypeAtom = binary_to_atom(Type), + NameAtom = binary_to_atom(Name), + {ok, #{bridges := #{TypeAtom := #{NameAtom := InfluxDBConfigMap}}}} = emqx_hocon:check( + emqx_bridge_schema, InfluxDBConfigString + ), + ?check_trace( + emqx_ee_connector_influxdb:on_start(InstanceId, InfluxDBConfigMap), + fun(Result, Trace) -> + ?assertMatch({ok, _}, Result), + ?assertMatch([_], ?of_kind(influxdb_connector_start_already_started, Trace)), + ok + end + ), + ok. + +t_start_ok_timestamp_write_syntax(Config) -> + InfluxDBType = ?config(influxdb_type, Config), + InfluxDBName = ?config(influxdb_name, Config), + InfluxDBConfigString0 = ?config(influxdb_config_string, Config), + InfluxDBTypeCfg = + case InfluxDBType of + apiv1 -> "influxdb_api_v1"; + apiv2 -> "influxdb_api_v2" + end, + WriteSyntax = + %% N.B.: this single space characters are relevant + <<"${topic},clientid=${clientid}", " ", "payload=${payload},", + "${clientid}_int_value=${payload.int_key}i,", + "uint_value=${payload.uint_key}u," + "bool=${payload.bool}", " ", "${timestamp}">>, + %% append this to override the config + InfluxDBConfigString1 = + io_lib:format( + "bridges.~s.~s {\n" + " write_syntax = \"~s\"\n" + "}\n", + [InfluxDBTypeCfg, InfluxDBName, WriteSyntax] + ), + InfluxDBConfig1 = parse_and_check( + InfluxDBConfigString0 ++ InfluxDBConfigString1, + InfluxDBType, + InfluxDBName + ), + Config1 = [{influxdb_config, InfluxDBConfig1} | Config], + ?assertMatch( + {ok, _}, + create_bridge(Config1) + ), + ok. + +t_start_ok_no_subject_tags_write_syntax(Config) -> + InfluxDBType = ?config(influxdb_type, Config), + InfluxDBName = ?config(influxdb_name, Config), + InfluxDBConfigString0 = ?config(influxdb_config_string, Config), + InfluxDBTypeCfg = + case InfluxDBType of + apiv1 -> "influxdb_api_v1"; + apiv2 -> "influxdb_api_v2" + end, + WriteSyntax = + %% N.B.: this single space characters are relevant + <<"${topic}", " ", "payload=${payload},", "${clientid}_int_value=${payload.int_key}i,", + "uint_value=${payload.uint_key}u," + "bool=${payload.bool}", " ", "${timestamp}">>, + %% append this to override the config + InfluxDBConfigString1 = + io_lib:format( + "bridges.~s.~s {\n" + " write_syntax = \"~s\"\n" + "}\n", + [InfluxDBTypeCfg, InfluxDBName, WriteSyntax] + ), + InfluxDBConfig1 = parse_and_check( + InfluxDBConfigString0 ++ InfluxDBConfigString1, + InfluxDBType, + InfluxDBName + ), + Config1 = [{influxdb_config, InfluxDBConfig1} | Config], + ?assertMatch( + {ok, _}, + create_bridge(Config1) + ), + ok. + +t_boolean_variants(Config) -> + QueryMode = ?config(query_mode, Config), + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + BoolVariants = #{ + true => true, + false => false, + <<"t">> => true, + <<"f">> => false, + <<"T">> => true, + <<"F">> => false, + <<"TRUE">> => true, + <<"FALSE">> => false, + <<"True">> => true, + <<"False">> => false + }, + maps:foreach( + fun(BoolVariant, Translation) -> + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + Payload = #{ + int_key => -123, + bool => BoolVariant, + uint_key => 123 + }, + SentData = #{ + <<"clientid">> => ClientId, + <<"topic">> => atom_to_binary(?FUNCTION_NAME), + <<"timestamp">> => erlang:system_time(nanosecond), + <<"payload">> => Payload + }, + ?assertEqual(ok, send_message(Config, SentData)), + case QueryMode of + async -> ct:sleep(500); + sync -> ok + end, + PersistedData = query_by_clientid(ClientId, Config), + Expected = #{ + bool => atom_to_binary(Translation), + int_value => <<"-123">>, + uint_value => <<"123">>, + payload => emqx_json:encode(Payload) + }, + assert_persisted_data(ClientId, Expected, PersistedData), + ok + end, + BoolVariants + ), + ok. + +t_bad_timestamp(Config) -> + InfluxDBType = ?config(influxdb_type, Config), + InfluxDBName = ?config(influxdb_name, Config), + QueryMode = ?config(query_mode, Config), + EnableBatch = ?config(enable_batch, Config), + InfluxDBConfigString0 = ?config(influxdb_config_string, Config), + InfluxDBTypeCfg = + case InfluxDBType of + apiv1 -> "influxdb_api_v1"; + apiv2 -> "influxdb_api_v2" + end, + WriteSyntax = + %% N.B.: this single space characters are relevant + <<"${topic}", " ", "payload=${payload},", "${clientid}_int_value=${payload.int_key}i,", + "uint_value=${payload.uint_key}u," + "bool=${payload.bool}", " ", "bad_timestamp">>, + %% append this to override the config + InfluxDBConfigString1 = + io_lib:format( + "bridges.~s.~s {\n" + " write_syntax = \"~s\"\n" + "}\n", + [InfluxDBTypeCfg, InfluxDBName, WriteSyntax] + ), + InfluxDBConfig1 = parse_and_check( + InfluxDBConfigString0 ++ InfluxDBConfigString1, + InfluxDBType, + InfluxDBName + ), + Config1 = [{influxdb_config, InfluxDBConfig1} | Config], + ?assertMatch( + {ok, _}, + create_bridge(Config1) + ), + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + Payload = #{ + int_key => -123, + bool => false, + uint_key => 123 + }, + SentData = #{ + <<"clientid">> => ClientId, + <<"topic">> => atom_to_binary(?FUNCTION_NAME), + <<"timestamp">> => erlang:system_time(nanosecond), + <<"payload">> => Payload + }, + ?check_trace( + ?wait_async_action( + send_message(Config1, SentData), + #{?snk_kind := influxdb_connector_send_query_error}, + 10_000 + ), + fun(Result, Trace) -> + ?assertMatch({_, {ok, _}}, Result), + {Return, {ok, _}} = Result, + case {QueryMode, EnableBatch} of + {async, true} -> + ?assertEqual(ok, Return), + ?assertMatch( + [#{error := points_trans_failed}], + ?of_kind(influxdb_connector_send_query_error, Trace) + ); + {async, false} -> + ?assertEqual(ok, Return), + ?assertMatch( + [#{error := [{error, {bad_timestamp, [<<"bad_timestamp">>]}}]}], + ?of_kind(influxdb_connector_send_query_error, Trace) + ); + {sync, false} -> + ?assertEqual( + {error, [{error, {bad_timestamp, [<<"bad_timestamp">>]}}]}, Return + ); + {sync, true} -> + ?assertEqual({error, points_trans_failed}, Return) + end, + ok + end + ), + ok. + +t_get_status(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + {ok, _} = create_bridge(Config), + ResourceId = resource_id(Config), + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)) + end), + ok. + +t_create_disconnected(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + ?check_trace( + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ?assertMatch({ok, _}, create_bridge(Config)) + end), + fun(Trace) -> + ?assertMatch( + [#{error := influxdb_client_not_alive}], + ?of_kind(influxdb_connector_start_failed, Trace) + ), + ok + end + ), + ok. + +t_start_error(Config) -> + %% simulate client start error + ?check_trace( + emqx_common_test_helpers:with_mock( + influxdb, + start_client, + fun(_Config) -> {error, some_error} end, + fun() -> + ?wait_async_action( + ?assertMatch({ok, _}, create_bridge(Config)), + #{?snk_kind := influxdb_connector_start_failed}, + 10_000 + ) + end + ), + fun(Trace) -> + ?assertMatch( + [#{error := some_error}], + ?of_kind(influxdb_connector_start_failed, Trace) + ), + ok + end + ), + ok. + +t_start_exception(Config) -> + %% simulate client start exception + ?check_trace( + emqx_common_test_helpers:with_mock( + influxdb, + start_client, + fun(_Config) -> error(boom) end, + fun() -> + ?wait_async_action( + ?assertMatch({ok, _}, create_bridge(Config)), + #{?snk_kind := influxdb_connector_start_exception}, + 10_000 + ) + end + ), + fun(Trace) -> + ?assertMatch( + [#{error := {error, boom}}], + ?of_kind(influxdb_connector_start_exception, Trace) + ), + ok + end + ), + ok. + +t_write_failure(Config) -> + ProxyName = ?config(proxy_name, Config), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + QueryMode = ?config(query_mode, Config), + {ok, _} = create_bridge(Config), + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + Payload = #{ + int_key => -123, + bool => true, + float_key => 24.5, + uint_key => 123 + }, + SentData = #{ + <<"clientid">> => ClientId, + <<"topic">> => atom_to_binary(?FUNCTION_NAME), + <<"timestamp">> => erlang:system_time(nanosecond), + <<"payload">> => Payload + }, + ?check_trace( + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + send_message(Config, SentData) + end), + fun(Result, _Trace) -> + case QueryMode of + sync -> + ?assert( + {error, {error, {closed, "The connection was lost."}}} =:= Result orelse + {error, {error, closed}} =:= Result orelse + {error, {error, econnrefused}} =:= Result, + #{got => Result} + ); + async -> + ?assertEqual(ok, Result) + end, + ok + end + ), + ok. + +start_apps() -> + %% some configs in emqx_conf app are mandatory + %% we want to make sure they are loaded before + %% ekka start in emqx_common_test_helpers:start_apps/1 + emqx_common_test_helpers:render_and_load_app_config(emqx_conf), + ok = emqx_common_test_helpers:start_apps([emqx_conf]), + ok = emqx_connector_test_helpers:start_apps([emqx_resource, emqx_bridge]). diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mongodb_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mongodb_SUITE.erl new file mode 100644 index 000000000..35698f812 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mongodb_SUITE.erl @@ -0,0 +1,277 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_ee_bridge_mongodb_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, rs}, + {group, sharded}, + {group, single} + | (emqx_common_test_helpers:all(?MODULE) -- group_tests()) + ]. + +group_tests() -> + [ + t_setup_via_config_and_publish, + t_setup_via_http_api_and_publish + ]. + +groups() -> + [ + {rs, group_tests()}, + {sharded, group_tests()}, + {single, group_tests()} + ]. + +init_per_group(Type = rs, Config) -> + MongoHost = os:getenv("MONGO_RS_HOST", "mongo1"), + MongoPort = list_to_integer(os:getenv("MONGO_RS_PORT", "27017")), + case emqx_common_test_helpers:is_tcp_server_available(MongoHost, MongoPort) of + true -> + ok = start_apps(), + emqx_mgmt_api_test_util:init_suite(), + {Name, MongoConfig} = mongo_config(MongoHost, MongoPort, Type), + [ + {mongo_host, MongoHost}, + {mongo_port, MongoPort}, + {mongo_config, MongoConfig}, + {mongo_type, Type}, + {mongo_name, Name} + | Config + ]; + false -> + {skip, no_mongo} + end; +init_per_group(Type = sharded, Config) -> + MongoHost = os:getenv("MONGO_SHARDED_HOST", "mongosharded3"), + MongoPort = list_to_integer(os:getenv("MONGO_SHARDED_PORT", "27017")), + case emqx_common_test_helpers:is_tcp_server_available(MongoHost, MongoPort) of + true -> + ok = start_apps(), + emqx_mgmt_api_test_util:init_suite(), + {Name, MongoConfig} = mongo_config(MongoHost, MongoPort, Type), + [ + {mongo_host, MongoHost}, + {mongo_port, MongoPort}, + {mongo_config, MongoConfig}, + {mongo_type, Type}, + {mongo_name, Name} + | Config + ]; + false -> + {skip, no_mongo} + end; +init_per_group(Type = single, Config) -> + MongoHost = os:getenv("MONGO_SINGLE_HOST", "mongo"), + MongoPort = list_to_integer(os:getenv("MONGO_SINGLE_PORT", "27017")), + case emqx_common_test_helpers:is_tcp_server_available(MongoHost, MongoPort) of + true -> + ok = start_apps(), + emqx_mgmt_api_test_util:init_suite(), + {Name, MongoConfig} = mongo_config(MongoHost, MongoPort, Type), + [ + {mongo_host, MongoHost}, + {mongo_port, MongoPort}, + {mongo_config, MongoConfig}, + {mongo_type, Type}, + {mongo_name, Name} + | Config + ]; + false -> + {skip, no_mongo} + end. + +end_per_group(_Type, _Config) -> + ok. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_bridge, emqx_conf]), + ok. + +init_per_testcase(_Testcase, Config) -> + catch clear_db(Config), + delete_bridge(Config), + Config. + +end_per_testcase(_Testcase, Config) -> + catch clear_db(Config), + delete_bridge(Config), + ok. + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +start_apps() -> + ensure_loaded(), + %% some configs in emqx_conf app are mandatory, + %% we want to make sure they are loaded before + %% ekka start in emqx_common_test_helpers:start_apps/1 + emqx_common_test_helpers:render_and_load_app_config(emqx_conf), + ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]). + +ensure_loaded() -> + _ = application:load(emqx_ee_bridge), + _ = emqx_ee_bridge:module_info(), + ok. + +mongo_type_bin(rs) -> + <<"mongodb_rs">>; +mongo_type_bin(sharded) -> + <<"mongodb_sharded">>; +mongo_type_bin(single) -> + <<"mongodb_single">>. + +mongo_config(MongoHost, MongoPort0, rs = Type) -> + MongoPort = integer_to_list(MongoPort0), + Servers = MongoHost ++ ":" ++ MongoPort, + Name = atom_to_binary(?MODULE), + ConfigString = + io_lib:format( + "bridges.mongodb_rs.~s {\n" + " enable = true\n" + " collection = mycol\n" + " replica_set_name = rs0\n" + " servers = [~p]\n" + " w_mode = safe\n" + " database = mqtt\n" + "}", + [Name, Servers] + ), + {Name, parse_and_check(ConfigString, Type, Name)}; +mongo_config(MongoHost, MongoPort0, sharded = Type) -> + MongoPort = integer_to_list(MongoPort0), + Servers = MongoHost ++ ":" ++ MongoPort, + Name = atom_to_binary(?MODULE), + ConfigString = + io_lib:format( + "bridges.mongodb_sharded.~s {\n" + " enable = true\n" + " collection = mycol\n" + " servers = [~p]\n" + " w_mode = safe\n" + " database = mqtt\n" + "}", + [Name, Servers] + ), + {Name, parse_and_check(ConfigString, Type, Name)}; +mongo_config(MongoHost, MongoPort0, single = Type) -> + MongoPort = integer_to_list(MongoPort0), + Server = MongoHost ++ ":" ++ MongoPort, + Name = atom_to_binary(?MODULE), + ConfigString = + io_lib:format( + "bridges.mongodb_single.~s {\n" + " enable = true\n" + " collection = mycol\n" + " server = ~p\n" + " w_mode = safe\n" + " database = mqtt\n" + "}", + [Name, Server] + ), + {Name, parse_and_check(ConfigString, Type, Name)}. + +parse_and_check(ConfigString, Type, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + TypeBin = mongo_type_bin(Type), + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{TypeBin := #{Name := Config}}} = RawConf, + Config. + +create_bridge(Config) -> + Type = mongo_type_bin(?config(mongo_type, Config)), + Name = ?config(mongo_name, Config), + MongoConfig = ?config(mongo_config, Config), + emqx_bridge:create(Type, Name, MongoConfig). + +delete_bridge(Config) -> + Type = mongo_type_bin(?config(mongo_type, Config)), + Name = ?config(mongo_name, Config), + emqx_bridge:remove(Type, Name). + +create_bridge_http(Params) -> + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_json:decode(Res, [return_maps])}; + Error -> Error + end. + +clear_db(Config) -> + Type = mongo_type_bin(?config(mongo_type, Config)), + Name = ?config(mongo_name, Config), + #{<<"collection">> := Collection} = ?config(mongo_config, Config), + ResourceID = emqx_bridge_resource:resource_id(Type, Name), + {ok, _, #{state := #{poolname := PoolName}}} = emqx_resource:get_instance(ResourceID), + Selector = #{}, + {true, _} = ecpool:pick_and_do( + PoolName, {mongo_api, delete, [Collection, Selector]}, no_handover + ), + ok. + +find_all(Config) -> + Type = mongo_type_bin(?config(mongo_type, Config)), + Name = ?config(mongo_name, Config), + #{<<"collection">> := Collection} = ?config(mongo_config, Config), + ResourceID = emqx_bridge_resource:resource_id(Type, Name), + emqx_resource:query(ResourceID, {find, Collection, #{}, #{}}). + +send_message(Config, Payload) -> + Name = ?config(mongo_name, Config), + Type = mongo_type_bin(?config(mongo_type, Config)), + BridgeID = emqx_bridge_resource:bridge_id(Type, Name), + emqx_bridge:send_message(BridgeID, Payload). + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_setup_via_config_and_publish(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + Val = erlang:unique_integer(), + ok = send_message(Config, #{key => Val}), + ?assertMatch( + {ok, [#{<<"key">> := Val}]}, + find_all(Config) + ), + ok. + +t_setup_via_http_api_and_publish(Config) -> + Type = mongo_type_bin(?config(mongo_type, Config)), + Name = ?config(mongo_name, Config), + MongoConfig0 = ?config(mongo_config, Config), + MongoConfig = MongoConfig0#{ + <<"name">> => Name, + <<"type">> => Type + }, + ?assertMatch( + {ok, _}, + create_bridge_http(MongoConfig) + ), + Val = erlang:unique_integer(), + ok = send_message(Config, #{key => Val}), + ?assertMatch( + {ok, [#{<<"key">> := Val}]}, + find_all(Config) + ), + ok. diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mysql_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mysql_SUITE.erl new file mode 100644 index 000000000..292c02580 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mysql_SUITE.erl @@ -0,0 +1,394 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_ee_bridge_mysql_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +% SQL definitions +-define(SQL_BRIDGE, + "INSERT INTO mqtt_test(payload, arrived) " + "VALUES (${payload}, FROM_UNIXTIME(${timestamp}/1000))" +). +-define(SQL_CREATE_TABLE, + "CREATE TABLE IF NOT EXISTS mqtt_test (payload blob, arrived datetime NOT NULL) " + "DEFAULT CHARSET=utf8MB4;" +). +-define(SQL_DROP_TABLE, "DROP TABLE mqtt_test"). +-define(SQL_DELETE, "DELETE from mqtt_test"). +-define(SQL_SELECT, "SELECT payload FROM mqtt_test"). + +% DB defaults +-define(MYSQL_DATABASE, "mqtt"). +-define(MYSQL_USERNAME, "root"). +-define(MYSQL_PASSWORD, "public"). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, with_batch}, + {group, without_batch} + ]. + +groups() -> + TCs = emqx_common_test_helpers:all(?MODULE), + [ + {with_batch, [ + {group, sync_query} + ]}, + {without_batch, [ + {group, sync_query} + ]}, + {sync_query, [ + {group, tcp}, + {group, tls} + ]}, + {tcp, TCs}, + {tls, TCs} + ]. + +init_per_group(tcp, Config0) -> + MysqlHost = os:getenv("MYSQL_TCP_HOST", "toxiproxy"), + MysqlPort = list_to_integer(os:getenv("MYSQL_TCP_PORT", "3306")), + Config = [ + {mysql_host, MysqlHost}, + {mysql_port, MysqlPort}, + {enable_tls, false}, + {proxy_name, "mysql_tcp"} + | Config0 + ], + common_init(Config); +init_per_group(tls, Config0) -> + MysqlHost = os:getenv("MYSQL_TLS_HOST", "toxiproxy"), + MysqlPort = list_to_integer(os:getenv("MYSQL_TLS_PORT", "3307")), + Config = [ + {mysql_host, MysqlHost}, + {mysql_port, MysqlPort}, + {enable_tls, true}, + {proxy_name, "mysql_tls"} + | Config0 + ], + common_init(Config); +init_per_group(sync_query, Config) -> + [{query_mode, sync} | Config]; +init_per_group(with_batch, Config) -> + [{enable_batch, true} | Config]; +init_per_group(without_batch, Config) -> + [{enable_batch, false} | Config]; +init_per_group(_Group, Config) -> + Config. + +end_per_group(Group, Config) when Group =:= tcp; Group =:= tls -> + connect_and_drop_table(Config), + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ok; +end_per_group(_Group, _Config) -> + ok. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_bridge, emqx_conf]), + ok. + +init_per_testcase(_Testcase, Config) -> + connect_and_clear_table(Config), + delete_bridge(Config), + Config. + +end_per_testcase(_Testcase, Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + connect_and_clear_table(Config), + ok = snabbkaffe:stop(), + delete_bridge(Config), + ok. + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +common_init(Config0) -> + BridgeType = <<"mysql">>, + MysqlHost = ?config(mysql_host, Config0), + MysqlPort = ?config(mysql_port, Config0), + case emqx_common_test_helpers:is_tcp_server_available(MysqlHost, MysqlPort) of + true -> + % Setup toxiproxy + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + % Ensure EE bridge module is loaded + _ = application:load(emqx_ee_bridge), + _ = emqx_ee_bridge:module_info(), + ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]), + emqx_mgmt_api_test_util:init_suite(), + % Connect to mysql directly and create the table + connect_and_create_table(Config0), + {Name, MysqlConfig} = mysql_config(BridgeType, Config0), + Config = + [ + {mysql_config, MysqlConfig}, + {mysql_bridge_type, BridgeType}, + {mysql_name, Name}, + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort} + | Config0 + ], + Config; + false -> + {skip, no_mysql} + end. + +mysql_config(BridgeType, Config) -> + MysqlPort = integer_to_list(?config(mysql_port, Config)), + Server = ?config(mysql_host, Config) ++ ":" ++ MysqlPort, + Name = atom_to_binary(?MODULE), + EnableBatch = ?config(enable_batch, Config), + QueryMode = ?config(query_mode, Config), + TlsEnabled = ?config(enable_tls, Config), + ConfigString = + io_lib:format( + "bridges.~s.~s {\n" + " enable = true\n" + " server = ~p\n" + " database = ~p\n" + " username = ~p\n" + " password = ~p\n" + " sql = ~p\n" + " resource_opts = {\n" + " enable_batch = ~p\n" + " query_mode = ~s\n" + " }\n" + " ssl = {\n" + " enable = ~w\n" + " }\n" + "}", + [ + BridgeType, + Name, + Server, + ?MYSQL_DATABASE, + ?MYSQL_USERNAME, + ?MYSQL_PASSWORD, + ?SQL_BRIDGE, + EnableBatch, + QueryMode, + TlsEnabled + ] + ), + {Name, parse_and_check(ConfigString, BridgeType, Name)}. + +parse_and_check(ConfigString, BridgeType, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{BridgeType := #{Name := Config}}} = RawConf, + Config. + +create_bridge(Config) -> + BridgeType = ?config(mysql_bridge_type, Config), + Name = ?config(mysql_name, Config), + MysqlConfig = ?config(mysql_config, Config), + emqx_bridge:create(BridgeType, Name, MysqlConfig). + +delete_bridge(Config) -> + BridgeType = ?config(mysql_bridge_type, Config), + Name = ?config(mysql_name, Config), + emqx_bridge:remove(BridgeType, Name). + +create_bridge_http(Params) -> + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_json:decode(Res, [return_maps])}; + Error -> Error + end. + +send_message(Config, Payload) -> + Name = ?config(mysql_name, Config), + BridgeType = ?config(mysql_bridge_type, Config), + BridgeID = emqx_bridge_resource:bridge_id(BridgeType, Name), + emqx_bridge:send_message(BridgeID, Payload). + +% We need to create and drop the test table outside of using bridges +% since a bridge expects the table to exist when enabling it. We +% therefore call the mysql module directly, in addition to using it +% for querying the DB directly. +connect_direct_mysql(Config) -> + Opts = [ + {host, ?config(mysql_host, Config)}, + {port, ?config(mysql_port, Config)}, + {user, ?MYSQL_USERNAME}, + {password, ?MYSQL_PASSWORD}, + {database, ?MYSQL_DATABASE} + ], + SslOpts = + case ?config(enable_tls, Config) of + true -> + [{ssl, emqx_tls_lib:to_client_opts(#{enable => true})}]; + false -> + [] + end, + {ok, Pid} = mysql:start_link(Opts ++ SslOpts), + Pid. + +% These funs connect and then stop the mysql connection +connect_and_create_table(Config) -> + DirectPid = connect_direct_mysql(Config), + ok = mysql:query(DirectPid, ?SQL_CREATE_TABLE), + mysql:stop(DirectPid). + +connect_and_drop_table(Config) -> + DirectPid = connect_direct_mysql(Config), + ok = mysql:query(DirectPid, ?SQL_DROP_TABLE), + mysql:stop(DirectPid). + +connect_and_clear_table(Config) -> + DirectPid = connect_direct_mysql(Config), + ok = mysql:query(DirectPid, ?SQL_DELETE), + mysql:stop(DirectPid). + +connect_and_get_payload(Config) -> + DirectPid = connect_direct_mysql(Config), + Result = mysql:query(DirectPid, ?SQL_SELECT), + mysql:stop(DirectPid), + Result. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_setup_via_config_and_publish(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + Val = integer_to_binary(erlang:unique_integer()), + SentData = #{payload => Val, timestamp => 1668602148000}, + ?check_trace( + begin + ?wait_async_action( + ?assertEqual(ok, send_message(Config, SentData)), + #{?snk_kind := mysql_connector_query_return}, + 10_000 + ), + ?assertMatch( + {ok, [<<"payload">>], [[Val]]}, + connect_and_get_payload(Config) + ), + ok + end, + fun(Trace0) -> + Trace = ?of_kind(mysql_connector_query_return, Trace0), + ?assertMatch([#{result := ok}], Trace), + ok + end + ), + ok. + +t_setup_via_http_api_and_publish(Config) -> + BridgeType = ?config(mysql_bridge_type, Config), + Name = ?config(mysql_name, Config), + MysqlConfig0 = ?config(mysql_config, Config), + MysqlConfig = MysqlConfig0#{ + <<"name">> => Name, + <<"type">> => BridgeType + }, + ?assertMatch( + {ok, _}, + create_bridge_http(MysqlConfig) + ), + Val = integer_to_binary(erlang:unique_integer()), + SentData = #{payload => Val, timestamp => 1668602148000}, + ?check_trace( + begin + ?wait_async_action( + ?assertEqual(ok, send_message(Config, SentData)), + #{?snk_kind := mysql_connector_query_return}, + 10_000 + ), + ?assertMatch( + {ok, [<<"payload">>], [[Val]]}, + connect_and_get_payload(Config) + ), + ok + end, + fun(Trace0) -> + Trace = ?of_kind(mysql_connector_query_return, Trace0), + ?assertMatch([#{result := ok}], Trace), + ok + end + ), + ok. + +t_get_status(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + + Name = ?config(mysql_name, Config), + BridgeType = ?config(mysql_bridge_type, Config), + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ?assertMatch( + {ok, Status} when Status =:= disconnected orelse Status =:= connecting, + emqx_resource_manager:health_check(ResourceID) + ) + end), + ok. + +t_create_disconnected(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + ?check_trace( + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ?assertMatch({ok, _}, create_bridge(Config)) + end), + fun(Trace) -> + ?assertMatch( + [#{error := {start_pool_failed, _, _}}], + ?of_kind(mysql_connector_start_failed, Trace) + ), + ok + end + ), + ok. + +t_write_failure(Config) -> + ProxyName = ?config(proxy_name, Config), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + {ok, _} = create_bridge(Config), + Val = integer_to_binary(erlang:unique_integer()), + SentData = #{payload => Val, timestamp => 1668602148000}, + ?check_trace( + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + send_message(Config, SentData) + end), + fun(Result, _Trace) -> + ?assertMatch({error, {resource_error, _}}, Result), + ok + end + ), + ok. diff --git a/lib-ee/emqx_ee_conf/.gitignore b/lib-ee/emqx_ee_conf/.gitignore new file mode 100644 index 000000000..f1c455451 --- /dev/null +++ b/lib-ee/emqx_ee_conf/.gitignore @@ -0,0 +1,19 @@ +.rebar3 +_* +.eunit +*.o +*.beam +*.plt +*.swp +*.swo +.erlang.cookie +ebin +log +erl_crash.dump +.rebar +logs +_build +.idea +*.iml +rebar3.crashdump +*~ diff --git a/lib-ee/emqx_enterprise_conf/README.md b/lib-ee/emqx_ee_conf/README.md similarity index 62% rename from lib-ee/emqx_enterprise_conf/README.md rename to lib-ee/emqx_ee_conf/README.md index b5b28dfdb..701d285cc 100644 --- a/lib-ee/emqx_enterprise_conf/README.md +++ b/lib-ee/emqx_ee_conf/README.md @@ -1,3 +1,3 @@ -# emqx_enterprise_conf +# emqx_ee_conf EMQX Enterprise configuration schema diff --git a/lib-ee/emqx_enterprise_conf/rebar.config b/lib-ee/emqx_ee_conf/rebar.config similarity index 100% rename from lib-ee/emqx_enterprise_conf/rebar.config rename to lib-ee/emqx_ee_conf/rebar.config diff --git a/lib-ee/emqx_enterprise_conf/src/emqx_enterprise_conf.app.src b/lib-ee/emqx_ee_conf/src/emqx_ee_conf.app.src similarity index 54% rename from lib-ee/emqx_enterprise_conf/src/emqx_enterprise_conf.app.src rename to lib-ee/emqx_ee_conf/src/emqx_ee_conf.app.src index 37cb78b54..324e7e308 100644 --- a/lib-ee/emqx_enterprise_conf/src/emqx_enterprise_conf.app.src +++ b/lib-ee/emqx_ee_conf/src/emqx_ee_conf.app.src @@ -1,5 +1,5 @@ -{application, emqx_enterprise_conf, [ - {description, "EMQX Enterprise configuration schema"}, +{application, emqx_ee_conf, [ + {description, "EMQX Enterprise Edition configuration schema"}, {vsn, "0.1.0"}, {registered, []}, {applications, [ @@ -9,6 +9,5 @@ {env, []}, {modules, []}, - {licenses, ["Apache 2.0"]}, {links, []} ]}. diff --git a/lib-ee/emqx_enterprise_conf/src/emqx_enterprise_conf_schema.erl b/lib-ee/emqx_ee_conf/src/emqx_ee_conf_schema.erl similarity index 95% rename from lib-ee/emqx_enterprise_conf/src/emqx_enterprise_conf_schema.erl rename to lib-ee/emqx_ee_conf/src/emqx_ee_conf_schema.erl index 60aeb1f81..38f6689c5 100644 --- a/lib-ee/emqx_enterprise_conf/src/emqx_enterprise_conf_schema.erl +++ b/lib-ee/emqx_ee_conf/src/emqx_ee_conf_schema.erl @@ -2,7 +2,7 @@ %% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_enterprise_conf_schema). +-module(emqx_ee_conf_schema). -behaviour(hocon_schema). diff --git a/lib-ee/emqx_enterprise_conf/test/emqx_enterprise_conf_schema_SUITE.erl b/lib-ee/emqx_ee_conf/test/emqx_ee_conf_schema_SUITE.erl similarity index 78% rename from lib-ee/emqx_enterprise_conf/test/emqx_enterprise_conf_schema_SUITE.erl rename to lib-ee/emqx_ee_conf/test/emqx_ee_conf_schema_SUITE.erl index 396faa4f5..0d6d4f061 100644 --- a/lib-ee/emqx_enterprise_conf/test/emqx_enterprise_conf_schema_SUITE.erl +++ b/lib-ee/emqx_ee_conf/test/emqx_ee_conf_schema_SUITE.erl @@ -2,7 +2,7 @@ %% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_enterprise_conf_schema_SUITE). +-module(emqx_ee_conf_schema_SUITE). -compile(nowarn_export_all). -compile(export_all). @@ -20,12 +20,12 @@ all() -> t_namespace(_Config) -> ?assertEqual( emqx_conf_schema:namespace(), - emqx_enterprise_conf_schema:namespace() + emqx_ee_conf_schema:namespace() ). t_roots(_Config) -> BaseRoots = emqx_conf_schema:roots(), - EnterpriseRoots = emqx_enterprise_conf_schema:roots(), + EnterpriseRoots = emqx_ee_conf_schema:roots(), ?assertEqual([], BaseRoots -- EnterpriseRoots), @@ -42,12 +42,12 @@ t_roots(_Config) -> t_fields(_Config) -> ?assertEqual( emqx_conf_schema:fields("node"), - emqx_enterprise_conf_schema:fields("node") + emqx_ee_conf_schema:fields("node") ). t_translations(_Config) -> - [Root | _] = emqx_enterprise_conf_schema:translations(), + [Root | _] = emqx_ee_conf_schema:translations(), ?assertEqual( emqx_conf_schema:translation(Root), - emqx_enterprise_conf_schema:translation(Root) + emqx_ee_conf_schema:translation(Root) ). diff --git a/lib-ee/emqx_enterprise_conf/test/emqx_enterprise_conf_schema_tests.erl b/lib-ee/emqx_ee_conf/test/emqx_ee_conf_schema_tests.erl similarity index 84% rename from lib-ee/emqx_enterprise_conf/test/emqx_enterprise_conf_schema_tests.erl rename to lib-ee/emqx_ee_conf/test/emqx_ee_conf_schema_tests.erl index d7c4e35dd..b4bf0de3d 100644 --- a/lib-ee/emqx_enterprise_conf/test/emqx_enterprise_conf_schema_tests.erl +++ b/lib-ee/emqx_ee_conf/test/emqx_ee_conf_schema_tests.erl @@ -2,7 +2,7 @@ %% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_enterprise_conf_schema_tests). +-module(emqx_ee_conf_schema_tests). -include_lib("eunit/include/eunit.hrl"). @@ -22,7 +22,7 @@ doc_gen_test() -> "priv", "i18n.conf" ]), - _ = emqx_conf:dump_schema(Dir, emqx_enterprise_conf_schema, I18nFile), + _ = emqx_conf:dump_schema(Dir, emqx_ee_conf_schema, I18nFile), ok end }. diff --git a/lib-ee/emqx_ee_connector/.gitignore b/lib-ee/emqx_ee_connector/.gitignore new file mode 100644 index 000000000..f1c455451 --- /dev/null +++ b/lib-ee/emqx_ee_connector/.gitignore @@ -0,0 +1,19 @@ +.rebar3 +_* +.eunit +*.o +*.beam +*.plt +*.swp +*.swo +.erlang.cookie +ebin +log +erl_crash.dump +.rebar +logs +_build +.idea +*.iml +rebar3.crashdump +*~ diff --git a/lib-ee/emqx_ee_connector/README.md b/lib-ee/emqx_ee_connector/README.md new file mode 100644 index 000000000..e665af458 --- /dev/null +++ b/lib-ee/emqx_ee_connector/README.md @@ -0,0 +1,9 @@ +emqx_ee_connector +===== + +An OTP application + +Build +----- + + $ rebar3 compile diff --git a/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_hstreamdb.conf b/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_hstreamdb.conf new file mode 100644 index 000000000..0826c8f0c --- /dev/null +++ b/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_hstreamdb.conf @@ -0,0 +1,74 @@ +emqx_ee_connector_hstreamdb { + config { + desc { + en: "HStreamDB connection config" + zh: "HStreamDB 连接配置。" + } + label: { + en: "Connection config" + zh: "连接配置" + } + } + + type { + desc { + en: "The Connector Type." + zh: "连接器类型。" + } + label: { + en: "Connector Type" + zh: "连接器类型" + } + } + + name { + desc { + en: "Connector name, used as a human-readable description of the connector." + zh: "连接器名称,人类可读的连接器描述。" + } + label: { + en: "Connector Name" + zh: "连接器名称" + } + } + url { + desc { + en: """HStreamDB Server URL""" + zh: """HStreamDB 服务器 URL""" + } + label { + en: """HStreamDB Server URL""" + zh: """HStreamDB 服务器 URL""" + } + } + stream_name { + desc { + en: """HStreamDB Stream Name""" + zh: """HStreamDB 流名称""" + } + label { + en: """HStreamDB Stream Name""" + zh: """HStreamDB 流名称""" + } + } + ordering_key { + desc { + en: """HStreamDB Ordering Key""" + zh: """HStreamDB 分区键""" + } + label { + en: """HStreamDB Ordering Key""" + zh: """HStreamDB 分区键""" + } + } + pool_size { + desc { + en: """HStreamDB Pool Size""" + zh: """HStreamDB 连接池大小""" + } + label { + en: """HStreamDB Pool Size""" + zh: """HStreamDB 连接池大小""" + } + } +} diff --git a/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_influxdb.conf b/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_influxdb.conf new file mode 100644 index 000000000..81ea39d49 --- /dev/null +++ b/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_influxdb.conf @@ -0,0 +1,130 @@ +emqx_ee_connector_influxdb { + + server { + desc { + en: """The IPv4 or IPv6 address or the hostname to connect to.
+A host entry has the following form: `Host[:Port]`.
+The InfluxDB default port 8086 is used if `[:Port]` is not specified. +""" + zh: """将要连接的 IPv4 或 IPv6 地址,或者主机名。
+主机名具有以下形式:`Host[:Port]`。
+如果未指定 `[:Port]`,则使用 InfluxDB 默认端口 8086。 +""" + } + label { + en: "Server Host" + zh: "服务器地址" + } + } + precision { + desc { + en: """InfluxDB time precision.""" + zh: """InfluxDB 时间精度。""" + } + label { + en: """Time Precision""" + zh: """时间精度""" + } + } + protocol { + desc { + en: """InfluxDB's protocol. UDP or HTTP API or HTTP API V2.""" + zh: """InfluxDB 协议。UDP 或 HTTP API 或 HTTP API V2。""" + } + label { + en: """Protocol""" + zh: """协议""" + } + } + influxdb_udp { + desc { + en: """InfluxDB's UDP protocol.""" + zh: """InfluxDB UDP 协议。""" + } + label { + en: """UDP Protocol""" + zh: """UDP 协议""" + } + } + influxdb_api_v1 { + desc { + en: """InfluxDB's protocol. Support InfluxDB v1.8 and before.""" + zh: """InfluxDB HTTP API 协议。支持 Influxdb v1.8 以及之前的版本。""" + } + label { + en: """HTTP API Protocol""" + zh: """HTTP API 协议""" + } + } + influxdb_api_v2 { + desc { + en: """InfluxDB's protocol. Support InfluxDB v2.0 and after.""" + zh: """InfluxDB HTTP API V2 协议。支持 Influxdb v2.0 以及之后的版本。""" + } + label { + en: """HTTP API V2 Protocol""" + zh: """HTTP API V2 协议""" + } + } + database { + desc { + en: """InfluxDB database.""" + zh: """InfluxDB 数据库。""" + } + label { + en: "Database" + zh: "数据库" + } + } + username { + desc { + en: "InfluxDB username." + zh: "InfluxDB 用户名。" + } + label { + en: "Username" + zh: "用户名" + } + } + password { + desc { + en: "InfluxDB password." + zh: "InfluxDB 密码。" + } + label { + en: "Password" + zh: "密码" + } + } + bucket { + desc { + en: "InfluxDB bucket name." + zh: "InfluxDB bucket 名称。" + } + label { + en: "Bucket" + zh: "Bucket" + } + } + org { + desc { + en: """Organization name of InfluxDB.""" + zh: """InfluxDB 组织名称。""" + } + label { + en: """Organization""" + zh: """组织""" + } + } + token { + desc { + en: """InfluxDB token.""" + zh: """InfluxDB token。""" + } + label { + en: """Token""" + zh: """Token""" + } + } + +} diff --git a/lib-ee/emqx_ee_connector/include/emqx_ee_connector.hrl b/lib-ee/emqx_ee_connector/include/emqx_ee_connector.hrl new file mode 100644 index 000000000..73807d13a --- /dev/null +++ b/lib-ee/emqx_ee_connector/include/emqx_ee_connector.hrl @@ -0,0 +1,5 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%------------------------------------------------------------------- + +-define(INFLUXDB_DEFAULT_PORT, 8086). diff --git a/lib-ee/emqx_ee_connector/rebar.config b/lib-ee/emqx_ee_connector/rebar.config new file mode 100644 index 000000000..1419c2070 --- /dev/null +++ b/lib-ee/emqx_ee_connector/rebar.config @@ -0,0 +1,10 @@ +{erl_opts, [debug_info]}. +{deps, [ + {hstreamdb_erl, {git, "https://github.com/hstreamdb/hstreamdb_erl.git", {tag, "0.2.5"}}}, + {influxdb, {git, "https://github.com/emqx/influxdb-client-erl", {tag, "1.1.4"}}}, + {emqx, {path, "../../apps/emqx"}} +]}. + +{shell, [ + {apps, [emqx_ee_connector]} +]}. diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src b/lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src new file mode 100644 index 000000000..1163e391c --- /dev/null +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src @@ -0,0 +1,17 @@ +{application, emqx_ee_connector, [ + {description, "EMQX Enterprise connectors"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + hstreamdb_erl, + influxdb, + wolff, + brod + ]}, + {env, []}, + {modules, []}, + + {links, []} +]}. diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_hstreamdb.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_hstreamdb.erl new file mode 100644 index 000000000..e4bbe8425 --- /dev/null +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_hstreamdb.erl @@ -0,0 +1,311 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_ee_connector_hstreamdb). + +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("emqx/include/logger.hrl"). + +-import(hoconsc, [mk/2, enum/1]). + +-behaviour(emqx_resource). + +%% callbacks of behaviour emqx_resource +-export([ + callback_mode/0, + on_start/2, + on_stop/2, + on_query/3, + on_get_status/2 +]). + +-export([ + on_flush_result/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1, + connector_examples/1 +]). + +%% ------------------------------------------------------------------------------------------------- +%% resource callback +callback_mode() -> always_sync. + +on_start(InstId, Config) -> + start_client(InstId, Config). + +on_stop(InstId, #{client := Client, producer := Producer}) -> + StopClientRes = hstreamdb:stop_client(Client), + StopProducerRes = hstreamdb:stop_producer(Producer), + ?SLOG(info, #{ + msg => "stop hstreamdb connector", + connector => InstId, + client => Client, + producer => Producer, + stop_client => StopClientRes, + stop_producer => StopProducerRes + }). + +on_query( + _InstId, + {send_message, Data}, + #{producer := Producer, ordering_key := OrderingKey, payload := Payload} +) -> + Record = to_record(OrderingKey, Payload, Data), + do_append(Producer, Record). + +on_get_status(_InstId, #{client := Client}) -> + case is_alive(Client) of + true -> + connected; + false -> + disconnected + end. + +%% ------------------------------------------------------------------------------------------------- +%% hstreamdb batch callback +%% TODO: maybe remove it after disk cache is ready + +on_flush_result({{flush, _Stream, _Records}, {ok, _Resp}}) -> + ok; +on_flush_result({{flush, _Stream, _Records}, {error, _Reason}}) -> + ok. + +%% ------------------------------------------------------------------------------------------------- +%% schema +namespace() -> connector_hstreamdb. + +roots() -> + fields(config). + +fields(config) -> + [ + {url, mk(binary(), #{required => true, desc => ?DESC("url")})}, + {stream, mk(binary(), #{required => true, desc => ?DESC("stream_name")})}, + {ordering_key, mk(binary(), #{required => false, desc => ?DESC("ordering_key")})}, + {pool_size, mk(pos_integer(), #{required => true, desc => ?DESC("pool_size")})} + ]; +fields("get") -> + fields("post"); +fields("put") -> + fields(config); +fields("post") -> + [ + {type, mk(hstreamdb, #{required => true, desc => ?DESC("type")})}, + {name, mk(binary(), #{required => true, desc => ?DESC("name")})} + ] ++ fields("put"). + +connector_examples(Method) -> + [ + #{ + <<"hstreamdb">> => #{ + summary => <<"HStreamDB Connector">>, + value => values(Method) + } + } + ]. + +values(post) -> + maps:merge(values(put), #{name => <<"connector">>}); +values(get) -> + values(post); +values(put) -> + #{ + type => hstreamdb, + url => <<"http://127.0.0.1:6570">>, + stream => <<"stream1">>, + ordering_key => <<"some_key">>, + pool_size => 8 + }; +values(_) -> + #{}. + +desc(config) -> + ?DESC("config"). + +%% ------------------------------------------------------------------------------------------------- +%% internal functions +start_client(InstId, Config) -> + try + do_start_client(InstId, Config) + catch + E:R:S -> + Error = #{ + msg => "start hstreamdb connector error", + connector => InstId, + error => E, + reason => R, + stack => S + }, + ?SLOG(error, Error), + {error, Error} + end. + +do_start_client(InstId, Config = #{url := Server, pool_size := PoolSize}) -> + ?SLOG(info, #{ + msg => "starting hstreamdb connector: client", + connector => InstId, + config => Config + }), + ClientName = client_name(InstId), + ClientOptions = [ + {url, binary_to_list(Server)}, + {rpc_options, #{pool_size => PoolSize}} + ], + case hstreamdb:start_client(ClientName, ClientOptions) of + {ok, Client} -> + case is_alive(Client) of + true -> + ?SLOG(info, #{ + msg => "hstreamdb connector: client started", + connector => InstId, + client => Client + }), + start_producer(InstId, Client, Config); + _ -> + ?SLOG(error, #{ + msg => "hstreamdb connector: client not alive", + connector => InstId + }), + {error, connect_failed} + end; + {error, {already_started, Pid}} -> + ?SLOG(info, #{ + msg => "starting hstreamdb connector: client, find old client. restart client", + old_client_pid => Pid, + old_client_name => ClientName + }), + _ = hstreamdb:stop_client(ClientName), + start_client(InstId, Config); + {error, Error} -> + ?SLOG(error, #{ + msg => "hstreamdb connector: client failed", + connector => InstId, + reason => Error + }), + {error, Error} + end. + +is_alive(Client) -> + case hstreamdb:echo(Client) of + {ok, _Echo} -> + true; + _ErrorEcho -> + false + end. + +start_producer( + InstId, + Client, + Options = #{stream := Stream, pool_size := PoolSize, egress := #{payload := PayloadBin}} +) -> + %% TODO: change these batch options after we have better disk cache. + BatchSize = maps:get(batch_size, Options, 100), + Interval = maps:get(batch_interval, Options, 1000), + ProducerOptions = [ + {stream, Stream}, + {callback, {?MODULE, on_flush_result, []}}, + {max_records, BatchSize}, + {interval, Interval}, + {pool_size, PoolSize} + ], + Name = produce_name(InstId), + ?SLOG(info, #{ + msg => "starting hstreamdb connector: producer", + connector => InstId + }), + case hstreamdb:start_producer(Client, Name, ProducerOptions) of + {ok, Producer} -> + ?SLOG(info, #{ + msg => "hstreamdb connector: producer started" + }), + EnableBatch = maps:get(enable_batch, Options, false), + Payload = emqx_plugin_libs_rule:preproc_tmpl(PayloadBin), + OrderingKeyBin = maps:get(ordering_key, Options, <<"">>), + OrderingKey = emqx_plugin_libs_rule:preproc_tmpl(OrderingKeyBin), + State = #{ + client => Client, + producer => Producer, + enable_batch => EnableBatch, + ordering_key => OrderingKey, + payload => Payload + }, + {ok, State}; + {error, {already_started, Pid}} -> + ?SLOG(info, #{ + msg => + "starting hstreamdb connector: producer, find old producer. restart producer", + old_producer_pid => Pid, + old_producer_name => Name + }), + _ = hstreamdb:stop_producer(Name), + start_producer(InstId, Client, Options); + {error, Reason} -> + ?SLOG(error, #{ + msg => "starting hstreamdb connector: producer, failed", + reason => Reason + }), + {error, Reason} + end. + +to_record(OrderingKeyTmpl, PayloadTmpl, Data) -> + OrderingKey = emqx_plugin_libs_rule:proc_tmpl(OrderingKeyTmpl, Data), + Payload = emqx_plugin_libs_rule:proc_tmpl(PayloadTmpl, Data), + to_record(OrderingKey, Payload). + +to_record(OrderingKey, Payload) when is_binary(OrderingKey) -> + to_record(binary_to_list(OrderingKey), Payload); +to_record(OrderingKey, Payload) -> + hstreamdb:to_record(OrderingKey, raw, Payload). + +do_append(Producer, Record) -> + do_append(false, Producer, Record). + +%% TODO: this append is async, remove or change it after we have better disk cache. +% do_append(true, Producer, Record) -> +% case hstreamdb:append(Producer, Record) of +% ok -> +% ?SLOG(debug, #{ +% msg => "hstreamdb producer async append success", +% record => Record +% }); +% {error, Reason} = Err -> +% ?SLOG(error, #{ +% msg => "hstreamdb producer async append failed", +% reason => Reason, +% record => Record +% }), +% Err +% end; +do_append(false, Producer, Record) -> + %% TODO: this append is sync, but it does not support [Record], can only append one Record. + %% Change it after we have better dick cache. + case hstreamdb:append_flush(Producer, Record) of + {ok, _} -> + ?SLOG(debug, #{ + msg => "hstreamdb producer sync append success", + record => Record + }); + {error, Reason} = Err -> + ?SLOG(error, #{ + msg => "hstreamdb producer sync append failed", + reason => Reason, + record => Record + }), + Err + end. + +client_name(InstId) -> + "client:" ++ to_string(InstId). + +produce_name(ActionId) -> + list_to_atom("producer:" ++ to_string(ActionId)). + +to_string(List) when is_list(List) -> List; +to_string(Bin) when is_binary(Bin) -> binary_to_list(Bin); +to_string(Atom) when is_atom(Atom) -> atom_to_list(Atom). diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_influxdb.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_influxdb.erl new file mode 100644 index 000000000..36b2ec44d --- /dev/null +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_influxdb.erl @@ -0,0 +1,607 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_ee_connector_influxdb). + +-include("emqx_ee_connector.hrl"). +-include_lib("emqx_connector/include/emqx_connector.hrl"). + +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-behaviour(emqx_resource). + +%% callbacks of behaviour emqx_resource +-export([ + callback_mode/0, + on_start/2, + on_stop/2, + on_query/3, + on_batch_query/3, + on_query_async/4, + on_batch_query_async/4, + on_get_status/2 +]). + +-export([ + namespace/0, + fields/1, + desc/1 +]). + +%% influxdb servers don't need parse +-define(INFLUXDB_HOST_OPTIONS, #{ + host_type => hostname, + default_port => ?INFLUXDB_DEFAULT_PORT +}). + +%% ------------------------------------------------------------------------------------------------- +%% resource callback +callback_mode() -> async_if_possible. + +on_start(InstId, Config) -> + start_client(InstId, Config). + +on_stop(_InstId, #{client := Client}) -> + influxdb:stop_client(Client). + +on_query(InstId, {send_message, Data}, _State = #{write_syntax := SyntaxLines, client := Client}) -> + case data_to_points(Data, SyntaxLines) of + {ok, Points} -> + ?tp( + influxdb_connector_send_query, + #{points => Points, batch => false, mode => sync} + ), + do_query(InstId, Client, Points); + {error, ErrorPoints} = Err -> + ?tp( + influxdb_connector_send_query_error, + #{batch => false, mode => sync, error => ErrorPoints} + ), + log_error_points(InstId, ErrorPoints), + Err + end. + +%% Once a Batched Data trans to points failed. +%% This batch query failed +on_batch_query(InstId, BatchData, _State = #{write_syntax := SyntaxLines, client := Client}) -> + case parse_batch_data(InstId, BatchData, SyntaxLines) of + {ok, Points} -> + ?tp( + influxdb_connector_send_query, + #{points => Points, batch => true, mode => sync} + ), + do_query(InstId, Client, Points); + {error, Reason} -> + ?tp( + influxdb_connector_send_query_error, + #{batch => true, mode => sync, error => Reason} + ), + {error, Reason} + end. + +on_query_async( + InstId, + {send_message, Data}, + {ReplayFun, Args}, + _State = #{write_syntax := SyntaxLines, client := Client} +) -> + case data_to_points(Data, SyntaxLines) of + {ok, Points} -> + ?tp( + influxdb_connector_send_query, + #{points => Points, batch => false, mode => async} + ), + do_async_query(InstId, Client, Points, {ReplayFun, Args}); + {error, ErrorPoints} = Err -> + ?tp( + influxdb_connector_send_query_error, + #{batch => false, mode => async, error => ErrorPoints} + ), + log_error_points(InstId, ErrorPoints), + Err + end. + +on_batch_query_async( + InstId, + BatchData, + {ReplayFun, Args}, + #{write_syntax := SyntaxLines, client := Client} +) -> + case parse_batch_data(InstId, BatchData, SyntaxLines) of + {ok, Points} -> + ?tp( + influxdb_connector_send_query, + #{points => Points, batch => true, mode => async} + ), + do_async_query(InstId, Client, Points, {ReplayFun, Args}); + {error, Reason} -> + ?tp( + influxdb_connector_send_query_error, + #{batch => true, mode => async, error => Reason} + ), + {error, Reason} + end. + +on_get_status(_InstId, #{client := Client}) -> + case influxdb:is_alive(Client) of + true -> + connected; + false -> + disconnected + end. + +%% ------------------------------------------------------------------------------------------------- +%% schema +namespace() -> connector_influxdb. + +fields(common) -> + [ + {server, fun server/1}, + {precision, + mk(enum([ns, us, ms, s, m, h]), #{ + required => false, default => ms, desc => ?DESC("precision") + })} + ]; +fields(influxdb_udp) -> + fields(common); +fields(influxdb_api_v1) -> + fields(common) ++ + [ + {database, mk(binary(), #{required => true, desc => ?DESC("database")})}, + {username, mk(binary(), #{desc => ?DESC("username")})}, + {password, mk(binary(), #{desc => ?DESC("password"), format => <<"password">>})} + ] ++ emqx_connector_schema_lib:ssl_fields(); +fields(influxdb_api_v2) -> + fields(common) ++ + [ + {bucket, mk(binary(), #{required => true, desc => ?DESC("bucket")})}, + {org, mk(binary(), #{required => true, desc => ?DESC("org")})}, + {token, mk(binary(), #{required => true, desc => ?DESC("token")})} + ] ++ emqx_connector_schema_lib:ssl_fields(). + +server(type) -> emqx_schema:ip_port(); +server(required) -> true; +server(validator) -> [?NOT_EMPTY("the value of the field 'server' cannot be empty")]; +server(converter) -> fun to_server_raw/1; +server(default) -> <<"127.0.0.1:8086">>; +server(desc) -> ?DESC("server"); +server(_) -> undefined. + +desc(common) -> + ?DESC("common"); +desc(influxdb_udp) -> + ?DESC("influxdb_udp"); +desc(influxdb_api_v1) -> + ?DESC("influxdb_api_v1"); +desc(influxdb_api_v2) -> + ?DESC("influxdb_api_v2"). + +%% ------------------------------------------------------------------------------------------------- +%% internal functions + +start_client(InstId, Config) -> + ClientConfig = client_config(InstId, Config), + ?SLOG(info, #{ + msg => "starting influxdb connector", + connector => InstId, + config => Config, + client_config => ClientConfig + }), + try + do_start_client(InstId, ClientConfig, Config) + catch + E:R:S -> + ?tp(influxdb_connector_start_exception, #{error => {E, R}}), + ?SLOG(error, #{ + msg => "start influxdb connector error", + connector => InstId, + error => E, + reason => R, + stack => S + }), + {error, R} + end. + +do_start_client( + InstId, + ClientConfig, + Config = #{ + write_syntax := Lines + } +) -> + case influxdb:start_client(ClientConfig) of + {ok, Client} -> + case influxdb:is_alive(Client) of + true -> + State = #{ + client => Client, + write_syntax => to_config(Lines) + }, + ?SLOG(info, #{ + msg => "starting influxdb connector success", + connector => InstId, + client => Client, + state => State + }), + {ok, State}; + false -> + ?tp(influxdb_connector_start_failed, #{error => influxdb_client_not_alive}), + ?SLOG(error, #{ + msg => "starting influxdb connector failed", + connector => InstId, + client => Client, + reason => "client is not alive" + }), + {error, influxdb_client_not_alive} + end; + {error, {already_started, Client0}} -> + ?tp(influxdb_connector_start_already_started, #{}), + ?SLOG(info, #{ + msg => "restarting influxdb connector, found already started client", + connector => InstId, + old_client => Client0 + }), + _ = influxdb:stop_client(Client0), + do_start_client(InstId, ClientConfig, Config); + {error, Reason} -> + ?tp(influxdb_connector_start_failed, #{error => Reason}), + ?SLOG(error, #{ + msg => "starting influxdb connector failed", + connector => InstId, + reason => Reason + }), + {error, Reason} + end. + +client_config( + InstId, + Config = #{ + server := {Host, Port} + } +) -> + [ + {host, str(Host)}, + {port, Port}, + {pool_size, erlang:system_info(schedulers)}, + {pool, binary_to_atom(InstId, utf8)}, + {precision, atom_to_binary(maps:get(precision, Config, ms), utf8)} + ] ++ protocol_config(Config). + +%% api v1 config +protocol_config(#{ + username := Username, + password := Password, + database := DB, + ssl := SSL +}) -> + [ + {protocol, http}, + {version, v1}, + {username, str(Username)}, + {password, str(Password)}, + {database, str(DB)} + ] ++ ssl_config(SSL); +%% api v2 config +protocol_config(#{ + bucket := Bucket, + org := Org, + token := Token, + ssl := SSL +}) -> + [ + {protocol, http}, + {version, v2}, + {bucket, str(Bucket)}, + {org, str(Org)}, + {token, Token} + ] ++ ssl_config(SSL); +%% udp config +protocol_config(_) -> + [ + {protocol, udp} + ]. + +ssl_config(#{enable := false}) -> + [ + {https_enabled, false} + ]; +ssl_config(SSL = #{enable := true}) -> + [ + {https_enabled, true}, + {transport, ssl} + ] ++ maps:to_list(maps:remove(enable, SSL)). + +%% ------------------------------------------------------------------------------------------------- +%% Query +do_query(InstId, Client, Points) -> + case influxdb:write(Client, Points) of + ok -> + ?SLOG(debug, #{ + msg => "influxdb write point success", + connector => InstId, + points => Points + }); + {error, Reason} = Err -> + ?tp(influxdb_connector_do_query_failure, #{error => Reason}), + ?SLOG(error, #{ + msg => "influxdb write point failed", + connector => InstId, + reason => Reason + }), + Err + end. + +do_async_query(InstId, Client, Points, ReplayFunAndArgs) -> + ?SLOG(info, #{ + msg => "influxdb write point async", + connector => InstId, + points => Points + }), + ok = influxdb:write_async(Client, Points, ReplayFunAndArgs). + +%% ------------------------------------------------------------------------------------------------- +%% Tags & Fields Config Trans + +to_config(Lines) -> + to_config(Lines, []). + +to_config([], Acc) -> + lists:reverse(Acc); +to_config( + [ + #{ + measurement := Measurement, + timestamp := Timestamp, + tags := Tags, + fields := Fields + } + | Rest + ], + Acc +) -> + Res = #{ + measurement => emqx_plugin_libs_rule:preproc_tmpl(Measurement), + timestamp => emqx_plugin_libs_rule:preproc_tmpl(Timestamp), + tags => to_kv_config(Tags), + fields => to_kv_config(Fields) + }, + to_config(Rest, [Res | Acc]). + +to_kv_config(KVfields) -> + maps:fold(fun to_maps_config/3, #{}, proplists:to_map(KVfields)). + +to_maps_config(K, V, Res) -> + NK = emqx_plugin_libs_rule:preproc_tmpl(bin(K)), + NV = emqx_plugin_libs_rule:preproc_tmpl(bin(V)), + Res#{NK => NV}. + +%% ------------------------------------------------------------------------------------------------- +%% Tags & Fields Data Trans +parse_batch_data(InstId, BatchData, SyntaxLines) -> + {Points, Errors} = lists:foldl( + fun({send_message, Data}, {ListOfPoints, ErrAccIn}) -> + case data_to_points(Data, SyntaxLines) of + {ok, Points} -> + {[Points | ListOfPoints], ErrAccIn}; + {error, ErrorPoints} -> + log_error_points(InstId, ErrorPoints), + {ListOfPoints, ErrAccIn + 1} + end + end, + {[], 0}, + BatchData + ), + case Errors of + 0 -> + {ok, lists:flatten(Points)}; + _ -> + ?SLOG(error, #{ + msg => io_lib:format("InfluxDB trans point failed, count: ~p", [Errors]), + connector => InstId, + reason => points_trans_failed + }), + {error, points_trans_failed} + end. + +-spec data_to_points(map(), [ + #{ + fields := [{binary(), binary()}], + measurement := binary(), + tags := [{binary(), binary()}], + timestamp := binary() + } +]) -> {ok, [map()]} | {error, term()}. +data_to_points(Data, SyntaxLines) -> + lines_to_points(Data, SyntaxLines, [], []). + +%% When converting multiple rows data into InfluxDB Line Protocol, they are considered to be strongly correlated. +%% And once a row fails to convert, all of them are considered to have failed. +lines_to_points(_, [], Points, ErrorPoints) -> + case ErrorPoints of + [] -> + {ok, Points}; + _ -> + %% ignore trans succeeded points + {error, ErrorPoints} + end; +lines_to_points( + Data, + [ + #{ + measurement := Measurement, + timestamp := Timestamp, + tags := Tags, + fields := Fields + } + | Rest + ], + ResultPointsAcc, + ErrorPointsAcc +) -> + TransOptions = #{return => rawlist, var_trans => fun data_filter/1}, + case emqx_plugin_libs_rule:proc_tmpl(Timestamp, Data, TransOptions) of + [TimestampInt] when is_integer(TimestampInt) -> + {_, EncodeTags} = maps:fold(fun maps_config_to_data/3, {Data, #{}}, Tags), + {_, EncodeFields} = maps:fold(fun maps_config_to_data/3, {Data, #{}}, Fields), + Point = #{ + measurement => emqx_plugin_libs_rule:proc_tmpl(Measurement, Data), + timestamp => TimestampInt, + tags => EncodeTags, + fields => EncodeFields + }, + lines_to_points(Data, Rest, [Point | ResultPointsAcc], ErrorPointsAcc); + BadTimestamp -> + lines_to_points(Data, Rest, ResultPointsAcc, [ + {error, {bad_timestamp, BadTimestamp}} | ErrorPointsAcc + ]) + end. + +maps_config_to_data(K, V, {Data, Res}) -> + KTransOptions = #{return => rawlist, var_trans => fun key_filter/1}, + VTransOptions = #{return => rawlist, var_trans => fun data_filter/1}, + NK0 = emqx_plugin_libs_rule:proc_tmpl(K, Data, KTransOptions), + NV = emqx_plugin_libs_rule:proc_tmpl(V, Data, VTransOptions), + case {NK0, NV} of + {[undefined], _} -> + {Data, Res}; + %% undefined value in normal format [undefined] or int/uint format [undefined, <<"i">>] + {_, [undefined | _]} -> + {Data, Res}; + _ -> + NK = list_to_binary(NK0), + {Data, Res#{NK => value_type(NV)}} + end. + +value_type([Int, <<"i">>]) when + is_integer(Int) +-> + {int, Int}; +value_type([UInt, <<"u">>]) when + is_integer(UInt) +-> + {uint, UInt}; +value_type([Float]) when is_float(Float) -> + Float; +value_type([<<"t">>]) -> + 't'; +value_type([<<"T">>]) -> + 'T'; +value_type([true]) -> + 'true'; +value_type([<<"TRUE">>]) -> + 'TRUE'; +value_type([<<"True">>]) -> + 'True'; +value_type([<<"f">>]) -> + 'f'; +value_type([<<"F">>]) -> + 'F'; +value_type([false]) -> + 'false'; +value_type([<<"FALSE">>]) -> + 'FALSE'; +value_type([<<"False">>]) -> + 'False'; +value_type(Val) -> + Val. + +key_filter(undefined) -> undefined; +key_filter(Value) -> emqx_plugin_libs_rule:bin(Value). + +data_filter(undefined) -> undefined; +data_filter(Int) when is_integer(Int) -> Int; +data_filter(Number) when is_number(Number) -> Number; +data_filter(Bool) when is_boolean(Bool) -> Bool; +data_filter(Data) -> bin(Data). + +bin(Data) -> emqx_plugin_libs_rule:bin(Data). + +%% helper funcs +log_error_points(InstId, Errs) -> + lists:foreach( + fun({error, Reason}) -> + ?SLOG(error, #{ + msg => "influxdb trans point failed", + connector => InstId, + reason => Reason + }) + end, + Errs + ). + +%% =================================================================== +%% typereflt funcs + +-spec to_server_raw(string() | binary()) -> + {string(), pos_integer()}. +to_server_raw(<<"http://", Server/binary>>) -> + emqx_connector_schema_lib:parse_server(Server, ?INFLUXDB_HOST_OPTIONS); +to_server_raw(<<"https://", Server/binary>>) -> + emqx_connector_schema_lib:parse_server(Server, ?INFLUXDB_HOST_OPTIONS); +to_server_raw(Server) -> + emqx_connector_schema_lib:parse_server(Server, ?INFLUXDB_HOST_OPTIONS). + +str(A) when is_atom(A) -> + atom_to_list(A); +str(B) when is_binary(B) -> + binary_to_list(B); +str(S) when is_list(S) -> + S. + +%%=================================================================== +%% eunit tests +%%=================================================================== + +-ifdef(TEST). +-include_lib("eunit/include/eunit.hrl"). + +to_server_raw_test_() -> + [ + ?_assertEqual( + {"foobar", 1234}, + to_server_raw(<<"http://foobar:1234">>) + ), + ?_assertEqual( + {"foobar", 1234}, + to_server_raw(<<"https://foobar:1234">>) + ), + ?_assertEqual( + {"foobar", 1234}, + to_server_raw(<<"foobar:1234">>) + ) + ]. + +%% for coverage +desc_test_() -> + [ + ?_assertMatch( + {desc, _, _}, + desc(common) + ), + ?_assertMatch( + {desc, _, _}, + desc(influxdb_udp) + ), + ?_assertMatch( + {desc, _, _}, + desc(influxdb_api_v1) + ), + ?_assertMatch( + {desc, _, _}, + desc(influxdb_api_v2) + ), + ?_assertMatch( + {desc, _, _}, + server(desc) + ), + ?_assertMatch( + connector_influxdb, + namespace() + ) + ]. +-endif. diff --git a/lib-ee/emqx_ee_connector/test/ee_connector_hstreamdb_SUITE.erl b/lib-ee/emqx_ee_connector/test/ee_connector_hstreamdb_SUITE.erl new file mode 100644 index 000000000..4de456b2b --- /dev/null +++ b/lib-ee/emqx_ee_connector/test/ee_connector_hstreamdb_SUITE.erl @@ -0,0 +1,16 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(ee_connector_hstreamdb_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +%% TODO: diff --git a/lib-ee/emqx_license/etc/emqx_license.conf b/lib-ee/emqx_license/etc/emqx_license.conf new file mode 100644 index 000000000..e69de29bb diff --git a/lib-ee/emqx_license/test/emqx_license_SUITE.erl b/lib-ee/emqx_license/test/emqx_license_SUITE.erl index f1cc7dbf8..b403d763d 100644 --- a/lib-ee/emqx_license/test/emqx_license_SUITE.erl +++ b/lib-ee/emqx_license/test/emqx_license_SUITE.erl @@ -42,6 +42,86 @@ set_override_paths(_TestCase) -> clean_overrides(_TestCase, _Config) -> ok. +setup_test(TestCase, Config) when + TestCase =:= t_update_file_cluster_backup +-> + DataDir = ?config(data_dir, Config), + {LicenseKey, _License} = mk_license( + [ + %% license format version + "220111", + %% license type + "0", + %% customer type + "10", + %% customer name + "Foo", + %% customer email + "contact@foo.com", + %% deplayment name + "bar-deployment", + %% start date + "20220111", + %% days + "100000", + %% max connections + "19" + ] + ), + Cluster = emqx_common_test_helpers:emqx_cluster( + [core, core], + [ + {apps, [emqx_conf, emqx_license]}, + {load_schema, false}, + {schema_mod, emqx_ee_conf_schema}, + {env_handler, fun + (emqx) -> + emqx_config:save_schema_mod_and_names(emqx_ee_conf_schema), + %% emqx_config:save_schema_mod_and_names(emqx_license_schema), + application:set_env(emqx, boot_modules, []), + application:set_env( + emqx, + data_dir, + filename:join([ + DataDir, + TestCase, + node() + ]) + ), + ok; + (emqx_conf) -> + emqx_config:save_schema_mod_and_names(emqx_ee_conf_schema), + %% emqx_config:save_schema_mod_and_names(emqx_license_schema), + application:set_env( + emqx, + data_dir, + filename:join([ + DataDir, + TestCase, + node() + ]) + ), + ok; + (emqx_license) -> + LicensePath = filename:join(emqx_license:license_dir(), "emqx.lic"), + filelib:ensure_dir(LicensePath), + ok = file:write_file(LicensePath, LicenseKey), + LicConfig = #{type => file, file => LicensePath}, + emqx_config:put([license], LicConfig), + RawConfig = #{<<"type">> => file, <<"file">> => LicensePath}, + emqx_config:put_raw([<<"license">>], RawConfig), + ok = persistent_term:put( + emqx_license_test_pubkey, + emqx_license_test_lib:public_key_pem() + ), + ok; + (_) -> + ok + end} + ] + ), + Nodes = [emqx_common_test_helpers:start_slave(Name, Opts) || {Name, Opts} <- Cluster], + [{nodes, Nodes}, {cluster, Cluster}, {old_license, LicenseKey}]; setup_test(_TestCase, _Config) -> []. diff --git a/mix.exs b/mix.exs index 0f0d67803..f43ca7119 100644 --- a/mix.exs +++ b/mix.exs @@ -44,10 +44,10 @@ defmodule EMQXUmbrella.MixProject do # we need several overrides here because dependencies specify # other exact versions, and not ranges. [ - {:lc, github: "emqx/lc", tag: "0.3.1"}, + {:lc, github: "emqx/lc", tag: "0.3.2", override: true}, {:redbug, "2.0.7"}, {:typerefl, github: "ieQu1/typerefl", tag: "0.9.1", override: true}, - {:ehttpc, github: "emqx/ehttpc", tag: "0.3.0"}, + {:ehttpc, github: "emqx/ehttpc", tag: "0.4.0", override: true}, {:gproc, github: "uwiger/gproc", tag: "0.8.0", override: true}, {:jiffy, github: "emqx/jiffy", tag: "1.0.5", override: true}, {:cowboy, github: "emqx/cowboy", tag: "2.9.0", override: true}, @@ -56,17 +56,18 @@ defmodule EMQXUmbrella.MixProject do {:gen_rpc, github: "emqx/gen_rpc", tag: "2.8.1", override: true}, {:grpc, github: "emqx/grpc-erl", tag: "0.6.7", override: true}, {:minirest, github: "emqx/minirest", tag: "1.3.7", override: true}, - {:ecpool, github: "emqx/ecpool", tag: "0.5.2"}, - {:replayq, "0.3.4", override: true}, + {:ecpool, github: "emqx/ecpool", tag: "0.5.2", override: true}, + {:replayq, github: "emqx/replayq", tag: "0.3.5", override: true}, {:pbkdf2, github: "emqx/erlang-pbkdf2", tag: "2.0.4", override: true}, - {:emqtt, github: "emqx/emqtt", tag: "1.6.0", override: true}, + {:emqtt, github: "emqx/emqtt", tag: "1.7.0-rc.2", override: true}, {:rulesql, github: "emqx/rulesql", tag: "0.1.4"}, {:observer_cli, "1.7.1"}, {:system_monitor, github: "ieQu1/system_monitor", tag: "3.0.3"}, + {:telemetry, "1.1.0"}, # in conflict by emqtt and hocon {:getopt, "1.0.2", override: true}, {:snabbkaffe, github: "kafka4beam/snabbkaffe", tag: "1.0.0", override: true}, - {:hocon, github: "emqx/hocon", tag: "0.30.0", override: true}, + {:hocon, github: "emqx/hocon", tag: "0.31.2", override: true}, {:emqx_http_lib, github: "emqx/emqx_http_lib", tag: "0.5.1", override: true}, {:esasl, github: "emqx/esasl", tag: "0.2.0"}, {:jose, github: "potatosalad/erlang-jose", tag: "1.11.2"}, @@ -90,7 +91,9 @@ defmodule EMQXUmbrella.MixProject do # in conflict by grpc and eetcd {:gpb, "4.19.5", override: true, runtime: false} ] ++ - umbrella_apps() ++ enterprise_apps(profile_info) ++ bcrypt_dep() ++ jq_dep() ++ quicer_dep() + umbrella_apps() ++ + enterprise_apps(profile_info) ++ + enterprise_deps(profile_info) ++ bcrypt_dep() ++ jq_dep() ++ quicer_dep() end defp umbrella_apps() do @@ -124,6 +127,23 @@ defmodule EMQXUmbrella.MixProject do [] end + defp enterprise_deps(_profile_info = %{edition_type: :enterprise}) do + [ + {:hstreamdb_erl, github: "hstreamdb/hstreamdb_erl", tag: "0.2.5"}, + {:influxdb, github: "emqx/influxdb-client-erl", tag: "1.1.4", override: true}, + {:wolff, github: "kafka4beam/wolff", tag: "1.7.0"}, + {:kafka_protocol, github: "kafka4beam/kafka_protocol", tag: "4.1.0", override: true}, + {:brod_gssapi, github: "kafka4beam/brod_gssapi", tag: "v0.1.0-rc1"}, + {:brod, github: "kafka4beam/brod", tag: "3.16.4"}, + {:snappyer, "1.2.8", override: true}, + {:supervisor3, "1.1.11", override: true} + ] + end + + defp enterprise_deps(_profile_info) do + [] + end + defp releases() do [ emqx: fn -> @@ -188,6 +208,7 @@ defmodule EMQXUmbrella.MixProject do redbug: :permanent, xmerl: :permanent, hocon: :load, + telemetry: :permanent, emqx: :load, emqx_conf: :load, emqx_machine: :permanent @@ -234,7 +255,9 @@ defmodule EMQXUmbrella.MixProject do if(edition_type == :enterprise, do: [ emqx_license: :permanent, - emqx_enterprise_conf: :load + emqx_ee_conf: :load, + emqx_ee_connector: :permanent, + emqx_ee_bridge: :permanent ], else: [] ) @@ -362,8 +385,8 @@ defmodule EMQXUmbrella.MixProject do assigns = template_vars(release, release_type, package_type, edition_type) - # This is generated by `scripts/merge-config.escript` or `make - # conf-segs`. So, this should be run before the release. + # This is generated by `scripts/merge-config.escript` or `make merge-config` + # So, this should be run before the release. # TODO: run as a "compiler" step??? render_template( "apps/emqx_conf/etc/emqx.conf.all", @@ -373,9 +396,9 @@ defmodule EMQXUmbrella.MixProject do if edition_type == :enterprise do render_template( - "apps/emqx_conf/etc/emqx_enterprise.conf.all", + "apps/emqx_conf/etc/emqx-enterprise.conf.all", assigns, - Path.join(etc, "emqx_enterprise.conf") + Path.join(etc, "emqx-enterprise.conf") ) end @@ -605,7 +628,7 @@ defmodule EMQXUmbrella.MixProject do end end - defp emqx_schema_mod(:enterprise), do: :emqx_enterprise_conf_schema + defp emqx_schema_mod(:enterprise), do: :emqx_ee_conf_schema defp emqx_schema_mod(:community), do: :emqx_conf_schema defp bcrypt_dep() do diff --git a/rebar.config b/rebar.config index 80ae099ac..687f49cea 100644 --- a/rebar.config +++ b/rebar.config @@ -44,12 +44,12 @@ {post_hooks,[]}. {deps, - [ {lc, {git, "https://github.com/emqx/lc.git", {tag, "0.3.1"}}} + [ {lc, {git, "https://github.com/emqx/lc.git", {tag, "0.3.2"}}} , {redbug, "2.0.7"} , {gpb, "4.19.5"} %% gpb only used to build, but not for release, pin it here to avoid fetching a wrong version due to rebar plugins scattered in all the deps , {typerefl, {git, "https://github.com/ieQu1/typerefl", {tag, "0.9.1"}}} , {gun, {git, "https://github.com/emqx/gun", {tag, "1.3.9"}}} - , {ehttpc, {git, "https://github.com/emqx/ehttpc", {tag, "0.3.0"}}} + , {ehttpc, {git, "https://github.com/emqx/ehttpc", {tag, "0.4.0"}}} , {gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}} , {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}} , {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}} @@ -59,18 +59,19 @@ , {grpc, {git, "https://github.com/emqx/grpc-erl", {tag, "0.6.7"}}} , {minirest, {git, "https://github.com/emqx/minirest", {tag, "1.3.7"}}} , {ecpool, {git, "https://github.com/emqx/ecpool", {tag, "0.5.2"}}} - , {replayq, "0.3.4"} + , {replayq, {git, "https://github.com/emqx/replayq.git", {tag, "0.3.5"}}} , {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}} - , {emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.6.0"}}} + , {emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.7.0-rc.2"}}} , {rulesql, {git, "https://github.com/emqx/rulesql", {tag, "0.1.4"}}} , {observer_cli, "1.7.1"} % NOTE: depends on recon 2.5.x , {system_monitor, {git, "https://github.com/ieQu1/system_monitor", {tag, "3.0.3"}}} , {getopt, "1.0.2"} , {snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.0"}}} - , {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.30.0"}}} + , {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.31.2"}}} , {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.1"}}} , {esasl, {git, "https://github.com/emqx/esasl", {tag, "0.2.0"}}} , {jose, {git, "https://github.com/potatosalad/erlang-jose", {tag, "1.11.2"}}} + , {telemetry, "1.1.0"} ]}. {xref_ignores, diff --git a/rebar.config.erl b/rebar.config.erl index 85130fb45..8c8f87fa8 100644 --- a/rebar.config.erl +++ b/rebar.config.erl @@ -150,7 +150,8 @@ test_deps() -> {bbmustache, "1.10.0"}, {meck, "0.9.2"}, {proper, "1.4.0"}, - {er_coap_client, {git, "https://github.com/emqx/er_coap_client", {tag, "v1.0.5"}}} + {er_coap_client, {git, "https://github.com/emqx/er_coap_client", {tag, "v1.0.5"}}}, + {erl_csv, "0.2.0"} ]. common_compile_opts(Vsn) -> @@ -312,7 +313,7 @@ overlay_vars_edition(ce) -> ]; overlay_vars_edition(ee) -> [ - {emqx_schema_mod, emqx_enterprise_conf_schema}, + {emqx_schema_mod, emqx_ee_conf_schema}, {is_enterprise, "yes"} ]. @@ -360,6 +361,7 @@ relx_apps(ReleaseType, Edition) -> redbug, xmerl, {hocon, load}, + telemetry, % started by emqx_machine {emqx, load}, {emqx_conf, load}, @@ -410,7 +412,9 @@ is_app(Name) -> relx_apps_per_edition(ee) -> [ emqx_license, - {emqx_enterprise_conf, load} + {emqx_ee_conf, load}, + emqx_ee_connector, + emqx_ee_bridge ]; relx_apps_per_edition(ce) -> []. @@ -477,7 +481,7 @@ emqx_etc_overlay_per_edition(ce) -> ]; emqx_etc_overlay_per_edition(ee) -> [ - {"{{base_dir}}/lib/emqx_conf/etc/emqx_enterprise.conf.all", "etc/emqx_enterprise.conf"}, + {"{{base_dir}}/lib/emqx_conf/etc/emqx-enterprise.conf.all", "etc/emqx-enterprise.conf"}, {"{{base_dir}}/lib/emqx_conf/etc/emqx.conf.all", "etc/emqx.conf"} ]. diff --git a/scripts/apps-version-check.sh b/scripts/apps-version-check.sh index f760c18b3..95e71f128 100755 --- a/scripts/apps-version-check.sh +++ b/scripts/apps-version-check.sh @@ -20,9 +20,17 @@ for app in ${APPS}; do app_path="." fi src_file="$app_path/src/$(basename "$app").app.src" - old_app_version="$(git show "$latest_release":"$src_file" | grep vsn | grep -oE '"[0-9]+\.[0-9]+\.[0-9]+"' | tr -d '"')" + if git show "$latest_release":"$src_file" >/dev/null 2>&1; then + old_app_version="$(git show "$latest_release":"$src_file" | grep vsn | grep -oE '"[0-9]+\.[0-9]+\.[0-9]+"' | tr -d '"')" + else + old_app_version='not_found' + fi now_app_version=$(grep -E 'vsn' "$src_file" | grep -oE '"[0-9]+\.[0-9]+\.[0-9]+"' | tr -d '"') - if [ "$old_app_version" = "$now_app_version" ]; then + + if [ "$old_app_version" = 'not_found' ]; then + echo "IGNORE: $src_file is newly added" + true + elif [ "$old_app_version" = "$now_app_version" ]; then changed_lines="$(git diff "$latest_release"...HEAD --ignore-blank-lines -G "$no_comment_re" \ -- "$app_path/src" \ -- "$app_path/include" \ @@ -30,7 +38,7 @@ for app in ${APPS}; do -- "$app_path/priv" \ -- "$app_path/c_src" | wc -l ) " if [ "$changed_lines" -gt 0 ]; then - echo "$src_file needs a vsn bump" + echo "ERROR: $src_file needs a vsn bump" bad_app_count=$(( bad_app_count + 1)) fi else diff --git a/scripts/ct/run.sh b/scripts/ct/run.sh index aa6f55022..18dfb2525 100755 --- a/scripts/ct/run.sh +++ b/scripts/ct/run.sh @@ -10,12 +10,20 @@ cd -P -- "$(dirname -- "${BASH_SOURCE[0]}")/../.." help() { echo echo "-h|--help: To display this usage info" - echo "--app lib_dir/app_name: Print apps in json" + echo "--app lib_dir/app_name: For which app to run start docker-compose, and run common tests" + echo "--suites SUITE1,SUITE2: Comma separated SUITE names to run. e.g. apps/emqx/test/emqx_SUITE.erl" echo "--console: Start EMQX in console mode" + echo "--attach: Attach to the Erlang docker container without running any test case" + echo "--only-up: Only start the testbed but do not run CT" + echo "--keep-up: Keep the testbed running after CT" } WHICH_APP='novalue' CONSOLE='no' +KEEP_UP='no' +ONLY_UP='no' +SUITES='' +ATTACH='no' while [ "$#" -gt 0 ]; do case $1 in -h|--help) @@ -26,10 +34,26 @@ while [ "$#" -gt 0 ]; do WHICH_APP="$2" shift 2 ;; + --only-up) + ONLY_UP='yes' + shift 1 + ;; + --keep-up) + KEEP_UP='yes' + shift 1 + ;; + --attach) + ATTACH='yes' + shift 1 + ;; --console) CONSOLE='yes' shift 1 ;; + --suites) + SUITES="$2" + shift 2 + ;; *) echo "unknown option $1" exit 1 @@ -45,11 +69,21 @@ fi ERLANG_CONTAINER='erlang24' DOCKER_CT_ENVS_FILE="${WHICH_APP}/docker-ct" +case "${WHICH_APP}" in + lib-ee*) + ## ensure enterprise profile when testing lib-ee applications + export PROFILE='emqx-enterprise' + ;; + *) + export PROFILE="${PROFILE:-emqx}" + ;; +esac + if [ -f "$DOCKER_CT_ENVS_FILE" ]; then # shellcheck disable=SC2002 CT_DEPS="$(cat "$DOCKER_CT_ENVS_FILE" | xargs)" fi -CT_DEPS="${ERLANG_CONTAINER} ${CT_DEPS}" +CT_DEPS="${ERLANG_CONTAINER} ${CT_DEPS:-}" FILES=( ) @@ -58,10 +92,21 @@ for dep in ${CT_DEPS}; do erlang24) FILES+=( '.ci/docker-compose-file/docker-compose.yaml' ) ;; + toxiproxy) + FILES+=( '.ci/docker-compose-file/docker-compose-toxiproxy.yaml' ) + ;; + influxdb) + FILES+=( '.ci/docker-compose-file/docker-compose-influxdb-tcp.yaml' + '.ci/docker-compose-file/docker-compose-influxdb-tls.yaml' ) + ;; mongo) FILES+=( '.ci/docker-compose-file/docker-compose-mongo-single-tcp.yaml' '.ci/docker-compose-file/docker-compose-mongo-single-tls.yaml' ) ;; + mongo_rs_sharded) + FILES+=( '.ci/docker-compose-file/docker-compose-mongo-replicaset-tcp.yaml' + '.ci/docker-compose-file/docker-compose-mongo-sharded-tcp.yaml' ) + ;; redis) FILES+=( '.ci/docker-compose-file/docker-compose-redis-single-tcp.yaml' '.ci/docker-compose-file/docker-compose-redis-single-tls.yaml' @@ -76,6 +121,12 @@ for dep in ${CT_DEPS}; do FILES+=( '.ci/docker-compose-file/docker-compose-pgsql-tcp.yaml' '.ci/docker-compose-file/docker-compose-pgsql-tls.yaml' ) ;; + kafka) + # Kafka container generates root owned ssl files + # the files are shared with EMQX (with a docker volume) + NEED_ROOT=yes + FILES+=( '.ci/docker-compose-file/docker-compose-kafka.yaml' ) + ;; *) echo "unknown_ct_dependency $dep" exit 1 @@ -89,13 +140,19 @@ for file in "${FILES[@]}"; do F_OPTIONS="$F_OPTIONS -f $file" done -# Passing $UID to docker-compose to be used in erlang container -# as owner of the main process to avoid git repo permissions issue. -# Permissions issue happens because we are mounting local filesystem -# where files are owned by $UID to docker container where it's using -# root (UID=0) by default, and git is not happy about it. +if [[ "${NEED_ROOT:-}" == 'yes' ]]; then + export UID_GID='root:root' +else + # Passing $UID to docker-compose to be used in erlang container + # as owner of the main process to avoid git repo permissions issue. + # Permissions issue happens because we are mounting local filesystem + # where files are owned by $UID to docker container where it's using + # root (UID=0) by default, and git is not happy about it. + export UID_GID="$UID:$UID" +fi + # shellcheck disable=2086 # no quotes for F_OPTIONS -UID_GID="$UID:$UID" docker-compose $F_OPTIONS up -d --build +docker-compose $F_OPTIONS up -d --build --remove-orphans # /emqx is where the source dir is mounted to the Erlang container # in .ci/docker-compose-file/docker-compose.yaml @@ -104,17 +161,29 @@ if [[ -t 1 ]]; then TTY='-t' fi +echo "Fixing file owners and permissions for $UID_GID" # rebar and hex cache directory need to be writable by $UID -docker exec -i $TTY -u root:root "$ERLANG_CONTAINER" bash -c "mkdir /.cache && chown $UID:$UID /.cache" +docker exec -i $TTY -u root:root "$ERLANG_CONTAINER" bash -c "mkdir -p /.cache && chown $UID_GID /.cache && chown -R $UID_GID /emqx" # need to initialize .erlang.cookie manually here because / is not writable by $UID -docker exec -i $TTY -u root:root "$ERLANG_CONTAINER" bash -c "openssl rand -base64 16 > /.erlang.cookie && chown $UID:$UID /.erlang.cookie && chmod 0400 /.erlang.cookie" -if [ "$CONSOLE" = 'yes' ]; then - docker exec -i $TTY "$ERLANG_CONTAINER" bash -c "make run" +docker exec -i $TTY -u root:root "$ERLANG_CONTAINER" bash -c "openssl rand -base64 16 > /.erlang.cookie && chown $UID_GID /.erlang.cookie && chmod 0400 /.erlang.cookie" + +if [ "$ONLY_UP" = 'yes' ]; then + exit 0 +fi + +if [ "$ATTACH" = 'yes' ]; then + docker exec -it "$ERLANG_CONTAINER" bash +elif [ "$CONSOLE" = 'yes' ]; then + docker exec -e PROFILE="$PROFILE" -i $TTY "$ERLANG_CONTAINER" bash -c "make run" else set +e - docker exec -i $TTY "$ERLANG_CONTAINER" bash -c "make ${WHICH_APP}-ct" + docker exec -e PROFILE="$PROFILE" -i $TTY -e EMQX_CT_SUITES="$SUITES" "$ERLANG_CONTAINER" bash -c "BUILD_WITHOUT_QUIC=1 make ${WHICH_APP}-ct" RESULT=$? - # shellcheck disable=2086 # no quotes for F_OPTIONS - UID_GID="$UID:$UID" docker-compose $F_OPTIONS down - exit $RESULT + if [ "$KEEP_UP" = 'yes' ]; then + exit $RESULT + else + # shellcheck disable=2086 # no quotes for F_OPTIONS + docker-compose $F_OPTIONS down + exit $RESULT + fi fi diff --git a/scripts/find-apps.sh b/scripts/find-apps.sh index 8c41fecbf..fefb73568 100755 --- a/scripts/find-apps.sh +++ b/scripts/find-apps.sh @@ -8,25 +8,18 @@ cd -P -- "$(dirname -- "$0")/.." help() { echo echo "-h|--help: To display this usage info" - echo "--ct fast|docker: Print apps which needs docker-compose to run ct" - echo "--json: Print apps in json" + echo "--ci fast|docker: Print apps in json format for github ci mtrix" } -WANT_JSON='no' -CT='novalue' +CI='novalue' while [ "$#" -gt 0 ]; do case $1 in -h|--help) help exit 0 ;; - --json) - WANT_JSON='yes' - shift 1 - ;; - - --ct) - CT="$2" + --ci) + CI="$2" shift 2 ;; *) @@ -52,27 +45,58 @@ CE="$(find_app 'apps')" EE="$(find_app 'lib-ee')" APPS_ALL="$(echo -e "${CE}\n${EE}")" -if [ "$CT" = 'novalue' ]; then - RESULT="${APPS_ALL}" -else - APPS_NORMAL_CT=( ) - APPS_DOCKER_CT=( ) - for app in ${APPS_ALL}; do - if [ -f "${app}/docker-ct" ]; then - APPS_DOCKER_CT+=("$app") - else - APPS_NORMAL_CT+=("$app") - fi - done - if [ "$CT" = 'docker' ]; then - RESULT="${APPS_DOCKER_CT[*]}" - else - RESULT="${APPS_NORMAL_CT[*]}" - fi +if [ "$CI" = 'novalue' ]; then + echo "${APPS_ALL}" + exit 0 fi -if [ "$WANT_JSON" = 'yes' ]; then - echo "${RESULT}" | xargs | tr -d '\n' | jq -R -s -c 'split(" ")' -else - echo "${RESULT}" | xargs -fi +################################################## +###### now deal with the github action's matrix. +################################################## + +dimensions() { + app="$1" + if [ -f "${app}/docker-ct" ]; then + if [[ "$CI" != 'docker' ]]; then + return + fi + else + if [[ "$CI" != 'fast' ]]; then + return + fi + fi + case "${app}" in + apps/*) + profile='emqx' + ;; + lib-ee/*) + profile='emqx-enterprise' + ;; + *) + echo "unknown app: $app" + exit 1 + ;; + esac + ## poor-man's json formatter + echo -n -e "[\"$app\", \"$profile\"]" +} + +matrix() { + first_row='yes' + for app in ${APPS_ALL}; do + row="$(dimensions "$app")" + if [ -z "$row" ]; then + continue + fi + if [ "$first_row" = 'yes' ]; then + first_row='no' + echo -n "$row" + else + echo -n ",${row}" + fi + done +} + +echo -n '[' +matrix +echo ']' diff --git a/scripts/find-suites.sh b/scripts/find-suites.sh index 4d2fd3bee..e7c1b422e 100755 --- a/scripts/find-suites.sh +++ b/scripts/find-suites.sh @@ -8,5 +8,9 @@ set -euo pipefail # ensure dir cd -P -- "$(dirname -- "$0")/.." -TESTDIR="$1/test" -find "${TESTDIR}" -name "*_SUITE.erl" -print0 2>/dev/null | xargs -0 | tr ' ' ',' +if [ -z "${EMQX_CT_SUITES:-}" ]; then + TESTDIR="$1/test" + find "${TESTDIR}" -name "*_SUITE.erl" -print0 2>/dev/null | xargs -0 | tr ' ' ',' +else + echo "${EMQX_CT_SUITES}" +fi diff --git a/scripts/merge-config.escript b/scripts/merge-config.escript index 67551bfbe..1b30dbd1d 100755 --- a/scripts/merge-config.escript +++ b/scripts/merge-config.escript @@ -13,21 +13,32 @@ main(_) -> {ok, BaseConf} = file:read_file("apps/emqx_conf/etc/emqx_conf.conf"), - Cfgs = get_all_cfgs("apps/"), + IsEnterprise = is_enterprise(), + Enterprise = + case IsEnterprise of + false -> []; + true -> [io_lib:nl(), "include emqx-enterprise.conf", io_lib:nl()] + end, Conf = [ merge(BaseConf, Cfgs), io_lib:nl(), - io_lib:nl(), - "include emqx_enterprise.conf", - io_lib:nl() + Enterprise ], ok = file:write_file("apps/emqx_conf/etc/emqx.conf.all", Conf), - EnterpriseCfgs = get_all_cfgs("lib-ee/"), - EnterpriseConf = merge("", EnterpriseCfgs), + case IsEnterprise of + true -> + EnterpriseCfgs = get_all_cfgs("lib-ee"), + EnterpriseConf = merge("", EnterpriseCfgs), + ok = file:write_file("apps/emqx_conf/etc/emqx-enterprise.conf.all", EnterpriseConf); + false -> + ok + end. - ok = file:write_file("apps/emqx_conf/etc/emqx_enterprise.conf.all", EnterpriseConf). +is_enterprise() -> + Profile = os:getenv("PROFILE", "emqx"), + nomatch =/= string:find(Profile, "enterprise"). merge(BaseConf, Cfgs) -> lists:foldl(