diff --git a/.ci/docker-compose-file/.env b/.ci/docker-compose-file/.env index d33637ea0..3b00b454f 100644 --- a/.ci/docker-compose-file/.env +++ b/.ci/docker-compose-file/.env @@ -7,6 +7,7 @@ INFLUXDB_TAG=2.5.0 TDENGINE_TAG=3.0.2.4 DYNAMO_TAG=1.21.0 CASSANDRA_TAG=3.11.6 +OPENTS_TAG=9aa7f88 MS_IMAGE_ADDR=mcr.microsoft.com/mssql/server SQLSERVER_TAG=2019-CU19-ubuntu-20.04 diff --git a/.ci/docker-compose-file/docker-compose-iotdb.yaml b/.ci/docker-compose-file/docker-compose-iotdb.yaml new file mode 100644 index 000000000..2e1ea881e --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-iotdb.yaml @@ -0,0 +1,31 @@ +version: '3.9' + +services: + iotdb: + container_name: iotdb + hostname: iotdb + image: apache/iotdb:1.1.0-standalone + restart: always + environment: + - enable_rest_service=true + - cn_internal_address=iotdb + - cn_internal_port=10710 + - cn_consensus_port=10720 + - cn_target_config_node_list=iotdb:10710 + - dn_rpc_address=iotdb + - dn_internal_address=iotdb + - dn_rpc_port=6667 + - dn_mpp_data_exchange_port=10740 + - dn_schema_region_consensus_port=10750 + - dn_data_region_consensus_port=10760 + - dn_target_config_node_list=iotdb:10710 + # volumes: + # - ./data:/iotdb/data + # - ./logs:/iotdb/logs + expose: + - "18080" + # IoTDB's REST interface, uncomment for local testing + # ports: + # - "18080:18080" + networks: + - emqx_bridge diff --git a/.ci/docker-compose-file/docker-compose-opents.yaml b/.ci/docker-compose-file/docker-compose-opents.yaml new file mode 100644 index 000000000..545aeb015 --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-opents.yaml @@ -0,0 +1,9 @@ +version: '3.9' + +services: + opents_server: + container_name: opents + image: petergrace/opentsdb-docker:${OPENTS_TAG} + restart: always + networks: + - emqx_bridge diff --git a/.ci/docker-compose-file/docker-compose-oracle.yaml b/.ci/docker-compose-file/docker-compose-oracle.yaml new file mode 100644 index 000000000..ea8965846 --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-oracle.yaml @@ -0,0 +1,11 @@ +version: '3.9' + +services: + oracle_server: + container_name: oracle + image: oracleinanutshell/oracle-xe-11g:1.0.0 + restart: always + environment: + ORACLE_DISABLE_ASYNCH_IO: true + networks: + - emqx_bridge diff --git a/.ci/docker-compose-file/docker-compose-pulsar.yaml b/.ci/docker-compose-file/docker-compose-pulsar.yaml new file mode 100644 index 000000000..926000ae4 --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-pulsar.yaml @@ -0,0 +1,32 @@ +version: '3' + +services: + pulsar: + container_name: pulsar + image: apachepulsar/pulsar:2.11.0 + # ports: + # - 6650:6650 + # - 8080:8080 + networks: + emqx_bridge: + volumes: + - ../../apps/emqx/etc/certs/cert.pem:/etc/certs/server.pem + - ../../apps/emqx/etc/certs/key.pem:/etc/certs/key.pem + - ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.pem + restart: always + command: + - bash + - "-c" + - | + sed -i 's/^advertisedAddress=/#advertisedAddress=/' conf/standalone.conf + sed -ie 's/^brokerServicePort=.*/brokerServicePort=6649/' conf/standalone.conf + sed -i 's/^bindAddress=/#bindAddress=/' conf/standalone.conf + sed -i 's#^bindAddresses=#bindAddresses=plain:pulsar://0.0.0.0:6650,ssl:pulsar+ssl://0.0.0.0:6651,toxiproxy:pulsar://0.0.0.0:6652,toxiproxy_ssl:pulsar+ssl://0.0.0.0:6653#' conf/standalone.conf + sed -i 's#^advertisedAddress=#advertisedAddress=plain:pulsar://pulsar:6650,ssl:pulsar+ssl://pulsar:6651,toxiproxy:pulsar://toxiproxy:6652,toxiproxy_ssl:pulsar+ssl://toxiproxy:6653#' conf/standalone.conf + sed -i 's#^tlsCertificateFilePath=#tlsCertificateFilePath=/etc/certs/server.pem#' conf/standalone.conf + sed -i 's#^tlsTrustCertsFilePath=#tlsTrustCertsFilePath=/etc/certs/ca.pem#' conf/standalone.conf + sed -i 's#^tlsKeyFilePath=#tlsKeyFilePath=/etc/certs/key.pem#' conf/standalone.conf + sed -i 's#^tlsProtocols=#tlsProtocols=TLSv1.3,TLSv1.2#' conf/standalone.conf + sed -i 's#^tlsCiphers=#tlsCiphers=TLS_AES_256_GCM_SHA384#' conf/standalone.conf + echo 'advertisedListeners=plain:pulsar://pulsar:6650,ssl:pulsar+ssl://pulsar:6651,toxiproxy:pulsar://toxiproxy:6652,toxiproxy_ssl:pulsar+ssl://toxiproxy:6653' >> conf/standalone.conf + bin/pulsar standalone -nfw -nss diff --git a/.ci/docker-compose-file/docker-compose-toxiproxy.yaml b/.ci/docker-compose-file/docker-compose-toxiproxy.yaml index ba5e831a5..d91118406 100644 --- a/.ci/docker-compose-file/docker-compose-toxiproxy.yaml +++ b/.ci/docker-compose-file/docker-compose-toxiproxy.yaml @@ -26,6 +26,8 @@ services: - 19876:9876 - 19042:9042 - 19142:9142 + - 14242:4242 + - 28080:18080 command: - "-host=0.0.0.0" - "-config=/config/toxiproxy.json" diff --git a/.ci/docker-compose-file/scripts/run-emqx.sh b/.ci/docker-compose-file/scripts/run-emqx.sh index 8f124aa63..fc61c9da1 100755 --- a/.ci/docker-compose-file/scripts/run-emqx.sh +++ b/.ci/docker-compose-file/scripts/run-emqx.sh @@ -20,8 +20,8 @@ esac { echo "HOCON_ENV_OVERRIDE_PREFIX=EMQX_" - echo "EMQX_ZONES__DEFAULT__MQTT__RETRY_INTERVAL=2s" - echo "EMQX_ZONES__DEFAULT__MQTT__MAX_TOPIC_ALIAS=10" + echo "EMQX_MQTT__RETRY_INTERVAL=2s" + echo "EMQX_MQTT__MAX_TOPIC_ALIAS=10" echo "EMQX_AUTHORIZATION__SOURCES=[]" echo "EMQX_AUTHORIZATION__NO_MATCH=allow" } >> .ci/docker-compose-file/conf.cluster.env diff --git a/.ci/docker-compose-file/toxiproxy.json b/.ci/docker-compose-file/toxiproxy.json index da2dff763..dee3134f5 100644 --- a/.ci/docker-compose-file/toxiproxy.json +++ b/.ci/docker-compose-file/toxiproxy.json @@ -101,5 +101,35 @@ "listen": "0.0.0.0:1433", "upstream": "sqlserver:1433", "enabled": true + }, + { + "name": "opents", + "listen": "0.0.0.0:4242", + "upstream": "opents:4242", + "enabled": true + }, + { + "name": "pulsar_plain", + "listen": "0.0.0.0:6652", + "upstream": "pulsar:6652", + "enabled": true + }, + { + "name": "pulsar_tls", + "listen": "0.0.0.0:6653", + "upstream": "pulsar:6653", + "enabled": true + }, + { + "name": "oracle", + "listen": "0.0.0.0:1521", + "upstream": "oracle:1521", + "enabled": true + }, + { + "name": "iotdb", + "listen": "0.0.0.0:18080", + "upstream": "iotdb:18080", + "enabled": true } ] diff --git a/.github/workflows/build_packages.yaml b/.github/workflows/build_packages.yaml index 63fcc2c84..eab9dc115 100644 --- a/.github/workflows/build_packages.yaml +++ b/.github/workflows/build_packages.yaml @@ -35,10 +35,7 @@ jobs: - name: Get profile to build id: get_profile run: | - set -e - THISDIR="$(pwd)" - echo "Adding $THISDIR as safe dir for git" - git config --global --add safe.directory "${THISDIR}" + git config --global --add safe.directory "$GITHUB_WORKSPACE" tag=${{ github.ref }} if git describe --tags --match "[v|e]*" --exact; then echo "WARN: This is an exact git tag, will publish release" @@ -233,7 +230,7 @@ jobs: ARCH: ${{ matrix.arch }} run: | set -eu - git config --global --add safe.directory "/__w/emqx/emqx" + git config --global --add safe.directory "$GITHUB_WORKSPACE" # Align path for CMake caches if [ ! "$PWD" = "/emqx" ]; then ln -s $PWD /emqx diff --git a/.github/workflows/build_packages_cron.yaml b/.github/workflows/build_packages_cron.yaml new file mode 100644 index 000000000..7f6773f4a --- /dev/null +++ b/.github/workflows/build_packages_cron.yaml @@ -0,0 +1,130 @@ +name: Scheduled build packages + +concurrency: + group: build-${{ github.event_name }}-${{ github.ref }} + cancel-in-progress: true + +on: + schedule: + - cron: '0 */6 * * *' + workflow_dispatch: + +jobs: + linux: + if: github.repository_owner == 'emqx' + runs-on: aws-${{ matrix.arch }} + # always run in builder container because the host might have the wrong OTP version etc. + # otherwise buildx.sh does not run docker if arch and os matches the target arch and os. + container: + image: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}" + + strategy: + fail-fast: false + matrix: + profile: + - ['emqx', 'master'] + - ['emqx-enterprise', 'release-50'] + branch: + - master + - release-50 + otp: + - 24.3.4.2-3 + arch: + - amd64 + os: + - debian10 + - amzn2 + builder: + - 5.0-34 + elixir: + - 1.13.4 + + defaults: + run: + shell: bash + + steps: + - uses: emqx/self-hosted-cleanup-action@v1.0.3 + - uses: actions/checkout@v3 + with: + ref: ${{ matrix.profile[1] }} + fetch-depth: 0 + + - name: build emqx packages + env: + ELIXIR: ${{ matrix.elixir }} + PROFILE: ${{ matrix.profile[0] }} + ARCH: ${{ matrix.arch }} + run: | + set -eu + git config --global --add safe.directory "$GITHUB_WORKSPACE" + PKGTYPES="tgz pkg" + IS_ELIXIR="no" + for PKGTYPE in ${PKGTYPES}; + do + ./scripts/buildx.sh \ + --profile "${PROFILE}" \ + --pkgtype "${PKGTYPE}" \ + --arch "${ARCH}" \ + --elixir "${IS_ELIXIR}" \ + --builder "force_host" + done + - uses: actions/upload-artifact@v3 + if: success() + with: + name: ${{ matrix.profile[0] }} + path: _packages/${{ matrix.profile[0] }}/ + - name: Send notification to Slack + uses: slackapi/slack-github-action@v1.23.0 + if: failure() + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + with: + payload: | + {"text": "Scheduled build of ${{ matrix.profile[0] }} package for ${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"} + + mac: + runs-on: ${{ matrix.os }} + if: github.repository_owner == 'emqx' + + strategy: + fail-fast: false + matrix: + profile: + - emqx + branch: + - master + otp: + - 24.3.4.2-3 + os: + - macos-12 + - macos-12-arm64 + + steps: + - uses: emqx/self-hosted-cleanup-action@v1.0.3 + - uses: actions/checkout@v3 + with: + ref: ${{ matrix.branch }} + fetch-depth: 0 + - uses: ./.github/actions/package-macos + with: + profile: ${{ matrix.profile }} + otp: ${{ matrix.otp }} + os: ${{ matrix.os }} + apple_id_password: ${{ secrets.APPLE_ID_PASSWORD }} + apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }} + apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }} + apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }} + - uses: actions/upload-artifact@v3 + if: success() + with: + name: ${{ matrix.profile }} + path: _packages/${{ matrix.profile }}/ + - name: Send notification to Slack + uses: slackapi/slack-github-action@v1.23.0 + if: failure() + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + with: + payload: | + {"text": "Scheduled build of ${{ matrix.profile }} package for ${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"} diff --git a/.github/workflows/build_slim_packages.yaml b/.github/workflows/build_slim_packages.yaml index 9ae5ba944..06bcb98a2 100644 --- a/.github/workflows/build_slim_packages.yaml +++ b/.github/workflows/build_slim_packages.yaml @@ -194,15 +194,12 @@ jobs: run: | CID=$(docker run -d --rm -P $EMQX_IMAGE_TAG) HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' $CID) - export EMQX_SMOKE_TEST_CHECK_HIDDEN_FIELDS='yes' ./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT docker stop $CID - name: test two nodes cluster with proto_dist=inet_tls in docker run: | ./scripts/test/start-two-nodes-in-docker.sh -P $EMQX_IMAGE_TAG $EMQX_IMAGE_OLD_VERSION_TAG HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' haproxy) - # versions before 5.0.22 have hidden fields included in the API spec - export EMQX_SMOKE_TEST_CHECK_HIDDEN_FIELDS='no' ./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT # cleanup ./scripts/test/start-two-nodes-in-docker.sh -c diff --git a/.github/workflows/geen_master.yaml b/.github/workflows/green_master.yaml similarity index 100% rename from .github/workflows/geen_master.yaml rename to .github/workflows/green_master.yaml diff --git a/.github/workflows/performance_test.yaml b/.github/workflows/performance_test.yaml new file mode 100644 index 000000000..1d474f7b2 --- /dev/null +++ b/.github/workflows/performance_test.yaml @@ -0,0 +1,128 @@ +name: Performance Test Suite + +on: + push: + branches: + - 'perf/**' + schedule: + - cron: '0 1 * * *' + workflow_dispatch: + inputs: + ref: + required: false + +jobs: + prepare: + runs-on: ubuntu-latest + if: github.repository_owner == 'emqx' + container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-25.1.2-3-ubuntu20.04 + outputs: + BENCH_ID: ${{ steps.prepare.outputs.BENCH_ID }} + PACKAGE_FILE: ${{ steps.package_file.outputs.PACKAGE_FILE }} + + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + ref: ${{ github.event.inputs.ref }} + - name: Work around https://github.com/actions/checkout/issues/766 + run: | + git config --global --add safe.directory "$GITHUB_WORKSPACE" + - id: prepare + run: | + echo "EMQX_NAME=emqx" >> $GITHUB_ENV + echo "CODE_PATH=$GITHUB_WORKSPACE" >> $GITHUB_ENV + echo "BENCH_ID=$(date --utc +%F)/emqx-$(./pkg-vsn.sh emqx)" >> $GITHUB_OUTPUT + - name: Build deb package + run: | + make ${EMQX_NAME}-pkg + ./scripts/pkg-tests.sh ${EMQX_NAME}-pkg + - name: Get package file name + id: package_file + run: | + echo "PACKAGE_FILE=$(find _packages/emqx -name 'emqx-*.deb' | head -n 1 | xargs basename)" >> $GITHUB_OUTPUT + - uses: actions/upload-artifact@v3 + with: + name: emqx-ubuntu20.04 + path: _packages/emqx/${{ steps.package_file.outputs.PACKAGE_FILE }} + + tf_emqx_perf_test: + runs-on: ubuntu-latest + needs: + - prepare + env: + TF_VAR_bench_id: ${{ needs.prepare.outputs.BENCH_ID }} + TF_VAR_package_file: ${{ needs.prepare.outputs.PACKAGE_FILE }} + TF_VAR_test_duration: 300 + TF_VAR_grafana_api_key: ${{ secrets.TF_EMQX_PERF_TEST_GRAFANA_API_KEY }} + TF_AWS_REGION: eu-north-1 + + steps: + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_PERF_TEST }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }} + aws-region: eu-north-1 + - name: Checkout tf-emqx-performance-test + uses: actions/checkout@v3 + with: + repository: emqx/tf-emqx-performance-test + path: tf-emqx-performance-test + - uses: actions/download-artifact@v3 + with: + name: emqx-ubuntu20.04 + path: tf-emqx-performance-test/ + - name: Setup Terraform + uses: hashicorp/setup-terraform@v2 + with: + terraform_wrapper: false + - name: terraform init + working-directory: ./tf-emqx-performance-test + run: | + terraform init + - name: terraform apply + working-directory: ./tf-emqx-performance-test + run: | + terraform apply -auto-approve + - name: Wait for test results + timeout-minutes: 30 + working-directory: ./tf-emqx-performance-test + id: test-results + run: | + sleep $TF_VAR_test_duration + until aws s3api head-object --bucket tf-emqx-performance-test --key "$TF_VAR_bench_id/DONE" > /dev/null 2>&1 + do + printf '.' + sleep 10 + done + echo + aws s3 cp "s3://tf-emqx-performance-test/$TF_VAR_bench_id/metrics.json" ./ + aws s3 cp "s3://tf-emqx-performance-test/$TF_VAR_bench_id/stats.json" ./ + echo MESSAGES_DELIVERED=$(cat metrics.json | jq '[.[]."messages.delivered"] | add') >> $GITHUB_OUTPUT + echo MESSAGES_DROPPED=$(cat metrics.json | jq '[.[]."messages.dropped"] | add') >> $GITHUB_OUTPUT + - name: Send notification to Slack + if: success() + uses: slackapi/slack-github-action@v1.23.0 + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + with: + payload: | + {"text": "EMQX performance test completed.\nMessages delivered: ${{ steps.test-results.outputs.MESSAGES_DELIVERED }}.\nMessages dropped: ${{ steps.test-results.outputs.MESSAGES_DROPPED }}.\nhttps://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"} + - name: terraform destroy + if: always() + working-directory: ./tf-emqx-performance-test + run: | + terraform destroy -auto-approve + - uses: actions/upload-artifact@v3 + if: success() + with: + name: test-results + path: "./tf-emqx-performance-test/*.json" + - uses: actions/upload-artifact@v3 + if: always() + with: + name: terraform + path: | + ./tf-emqx-performance-test/.terraform + ./tf-emqx-performance-test/*.tfstate diff --git a/.github/workflows/run_fvt_tests.yaml b/.github/workflows/run_fvt_tests.yaml index 1bb0486f1..185c76be1 100644 --- a/.github/workflows/run_fvt_tests.yaml +++ b/.github/workflows/run_fvt_tests.yaml @@ -167,8 +167,8 @@ jobs: --set image.pullPolicy=Never \ --set image.tag=$EMQX_TAG \ --set emqxAclConfig="" \ - --set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__RETRY_INTERVAL=2s \ - --set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__MAX_TOPIC_ALIAS=10 \ + --set emqxConfig.EMQX_MQTT__RETRY_INTERVAL=2s \ + --set emqxConfig.EMQX_MQTT__MAX_TOPIC_ALIAS=10 \ --set emqxConfig.EMQX_AUTHORIZATION__SOURCES=[] \ --set emqxConfig.EMQX_AUTHORIZATION__NO_MATCH=allow \ deploy/charts/${{ matrix.profile }} \ @@ -185,8 +185,8 @@ jobs: --set image.pullPolicy=Never \ --set image.tag=$EMQX_TAG \ --set emqxAclConfig="" \ - --set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__RETRY_INTERVAL=2s \ - --set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__MAX_TOPIC_ALIAS=10 \ + --set emqxConfig.EMQX_MQTT__RETRY_INTERVAL=2s \ + --set emqxConfig.EMQX_MQTT__MAX_TOPIC_ALIAS=10 \ --set emqxConfig.EMQX_AUTHORIZATION__SOURCES=[] \ --set emqxConfig.EMQX_AUTHORIZATION__NO_MATCH=allow \ deploy/charts/${{ matrix.profile }} \ diff --git a/.github/workflows/run_test_cases.yaml b/.github/workflows/run_test_cases.yaml index fb4f264e7..b82b545df 100644 --- a/.github/workflows/run_test_cases.yaml +++ b/.github/workflows/run_test_cases.yaml @@ -14,6 +14,9 @@ on: - e* pull_request: +env: + IS_CI: "yes" + jobs: build-matrix: runs-on: ubuntu-22.04 @@ -69,21 +72,14 @@ jobs: - uses: actions/checkout@v3 with: path: source - - uses: actions/cache@v3 - id: cache - with: - path: "$HOME/.cache/rebar3/rebar3_${{ matrix.otp }}_plt" - key: rebar3-dialyzer-plt-${{ matrix.otp }} - name: get_all_deps working-directory: source env: PROFILE: ${{ matrix.profile }} - #DIAGNOSTIC: 1 run: | make ensure-rebar3 # fetch all deps and compile - make ${{ matrix.profile }} - make static_checks + make ${{ matrix.profile }}-compile make test-compile cd .. zip -ryq source.zip source/* source/.[^.]* @@ -92,6 +88,34 @@ jobs: name: source-${{ matrix.profile }}-${{ matrix.otp }} path: source.zip + static_checks: + needs: + - build-matrix + - prepare + runs-on: ${{ needs.build-matrix.outputs.runs-on }} + strategy: + fail-fast: false + matrix: + include: ${{ fromJson(needs.build-matrix.outputs.prepare) }} + container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu22.04" + steps: + - uses: AutoModality/action-clean@v1 + - uses: actions/download-artifact@v3 + with: + name: source-${{ matrix.profile }}-${{ matrix.otp }} + path: . + - name: unzip source code + run: unzip -o -q source.zip + - uses: actions/cache@v3 + with: + path: "source/emqx_dialyzer_${{ matrix.otp }}_plt" + key: rebar3-dialyzer-plt-${{ matrix.profile }}-${{ matrix.otp }} + - name: run static checks + env: + PROFILE: ${{ matrix.profile }} + working-directory: source + run: make static_checks + eunit_and_proper: needs: - build-matrix @@ -168,6 +192,7 @@ jobs: REDIS_TAG: "7.0" INFLUXDB_TAG: "2.5.0" TDENGINE_TAG: "3.0.2.4" + OPENTS_TAG: "9aa7f88" PROFILE: ${{ matrix.profile }} CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }} run: ./scripts/ct/run.sh --ci --app ${{ matrix.app }} diff --git a/APL.txt b/APL.txt index 8dada3eda..dcb926a55 100644 --- a/APL.txt +++ b/APL.txt @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {yyyy} {name of copyright owner} + Copyright (c) 2016-2023 EMQ Technologies Co., Ltd. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/LICENSE b/LICENSE index 2a081b135..8ff0a9060 100644 --- a/LICENSE +++ b/LICENSE @@ -1,7 +1,7 @@ Source code in this repository is variously licensed under below licenses. -For EMQX: Apache License 2.0, see APL.txt, -which applies to all source files except for lib-ee sub-directory. +For Default: Apache License 2.0, see APL.txt, +which applies to all source files except for folders applied with Business Source License. For EMQX Enterprise (since version 5.0): Business Source License 1.1, -see lib-ee/BSL.txt, which applies to source code in lib-ee sub-directory. +see apps/emqx_bridge_kafka/BSL.txt as an example, please check license files under sub directory of apps. diff --git a/Makefile b/Makefile index 128f4f31d..6741317ee 100644 --- a/Makefile +++ b/Makefile @@ -4,10 +4,6 @@ SCRIPTS = $(CURDIR)/scripts export EMQX_RELUP ?= true export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2-debian11 export EMQX_DEFAULT_RUNNER = debian:11-slim -export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh) -export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh) -export EMQX_DASHBOARD_VERSION ?= v1.2.1 -export EMQX_EE_DASHBOARD_VERSION ?= e1.0.6 export EMQX_REL_FORM ?= tgz export QUICER_DOWNLOAD_FROM_RELEASE = 1 ifeq ($(OS),Windows_NT) @@ -17,6 +13,22 @@ else FIND=find endif +# Dashbord version +# from https://github.com/emqx/emqx-dashboard5 +export EMQX_DASHBOARD_VERSION ?= v1.2.4 +export EMQX_EE_DASHBOARD_VERSION ?= e1.0.6 + +# `:=` should be used here, otherwise the `$(shell ...)` will be executed every time when the variable is used +# In make 4.4+, for backward-compatibility the value from the original environment is used. +# so the shell script will be executed tons of times. +# https://github.com/emqx/emqx/pull/10627 +ifeq ($(strip $(OTP_VSN)),) + export OTP_VSN := $(shell $(SCRIPTS)/get-otp-vsn.sh) +endif +ifeq ($(strip $(ELIXIR_VSN)),) + export ELIXIR_VSN := $(shell $(SCRIPTS)/get-elixir-vsn.sh) +endif + PROFILE ?= emqx REL_PROFILES := emqx emqx-enterprise PKG_PROFILES := emqx-pkg emqx-enterprise-pkg @@ -73,6 +85,10 @@ proper: $(REBAR) test-compile: $(REBAR) merge-config $(REBAR) as test compile +.PHONY: $(REL_PROFILES:%=%-compile) +$(REL_PROFILES:%=%-compile): $(REBAR) merge-config + $(REBAR) as $(@:%-compile=%) compile + .PHONY: ct ct: $(REBAR) merge-config @ENABLE_COVER_COMPILE=1 $(REBAR) ct --name $(CT_NODE_NAME) -c -v --cover_export_name $(CT_COVER_EXPORT_PREFIX)-ct @@ -88,13 +104,17 @@ APPS=$(shell $(SCRIPTS)/find-apps.sh) .PHONY: $(APPS:%=%-ct) define gen-app-ct-target -$1-ct: $(REBAR) - @$(SCRIPTS)/pre-compile.sh $(PROFILE) - @ENABLE_COVER_COMPILE=1 $(REBAR) ct -c -v \ - --readable=$(CT_READABLE) \ - --name $(CT_NODE_NAME) \ - --cover_export_name $(CT_COVER_EXPORT_PREFIX)-$(subst /,-,$1) \ - --suite $(shell $(SCRIPTS)/find-suites.sh $1) +$1-ct: $(REBAR) merge-config + $(eval SUITES := $(shell $(SCRIPTS)/find-suites.sh $1)) +ifneq ($(SUITES),) + @ENABLE_COVER_COMPILE=1 $(REBAR) ct -c -v \ + --readable=$(CT_READABLE) \ + --name $(CT_NODE_NAME) \ + --cover_export_name $(CT_COVER_EXPORT_PREFIX)-$(subst /,-,$1) \ + --suite $(SUITES) +else + @echo 'No suites found for $1' +endif endef $(foreach app,$(APPS),$(eval $(call gen-app-ct-target,$(app)))) @@ -134,6 +154,11 @@ COMMON_DEPS := $(REBAR) $(REL_PROFILES:%=%): $(COMMON_DEPS) @$(BUILD) $(@) rel +.PHONY: compile $(PROFILES:%=compile-%) +compile: $(PROFILES:%=compile-%) +$(PROFILES:%=compile-%): + @$(BUILD) $(@:compile-%=%) apps + ## Not calling rebar3 clean because ## 1. rebar3 clean relies on rebar3, meaning it reads config, fetches dependencies etc. ## 2. it's slow @@ -217,11 +242,11 @@ endef $(foreach pt,$(PKG_PROFILES),$(eval $(call gen-pkg-target,$(pt)))) .PHONY: run -run: $(PROFILE) quickrun +run: compile-$(PROFILE) quickrun .PHONY: quickrun quickrun: - ./_build/$(PROFILE)/rel/emqx/bin/emqx console + ./dev -p $(PROFILE) ## Take the currently set PROFILE docker: @@ -239,7 +264,6 @@ $(foreach zt,$(ALL_DOCKERS),$(eval $(call gen-docker-target,$(zt)))) .PHONY: merge-config: @$(SCRIPTS)/merge-config.escript - @$(SCRIPTS)/merge-i18n.escript ## elixir target is to create release packages using Elixir's Mix .PHONY: $(REL_PROFILES:%=%-elixir) $(PKG_PROFILES:%=%-elixir) diff --git a/README-CN.md b/README-CN.md index 3eccc267c..314b34b9a 100644 --- a/README-CN.md +++ b/README-CN.md @@ -73,7 +73,7 @@ EMQX Cloud 文档:[docs.emqx.com/zh/cloud/latest/](https://docs.emqx.com/zh/cl 我们选取了各个编程语言中热门的 MQTT 客户端 SDK,并提供代码示例,帮助您快速掌握 MQTT 客户端库的使用。 -- [MQTT X](https://mqttx.app/zh) +- [MQTTX](https://mqttx.app/zh) 优雅的跨平台 MQTT 5.0 客户端工具,提供了桌面端、命令行、Web 三种版本,帮助您更快的开发和调试 MQTT 服务和应用。 diff --git a/README-RU.md b/README-RU.md index b8be19e80..6baf38e2c 100644 --- a/README-RU.md +++ b/README-RU.md @@ -71,7 +71,7 @@ docker run -d --name emqx -p 1883:1883 -p 8083:8083 -p 8883:8883 -p 8084:8084 -p Мы выбрали популярные SDK клиентов MQTT на различных языках программирования и предоставили примеры кода, которые помогут вам быстро понять, как использовать клиенты MQTT. -- [MQTT X](https://mqttx.app/) +- [MQTTX](https://mqttx.app/) Элегантный кроссплатформенный клиент MQTT 5.0, в виде десктопного приложения, приложения для командной строки и веб-приложения, чтобы помочь вам быстрее разрабатывать и отлаживать службы и приложения MQTT. diff --git a/README.md b/README.md index 280371a41..28b8cbf43 100644 --- a/README.md +++ b/README.md @@ -74,7 +74,7 @@ For more organised improvement proposals, you can send pull requests to [EIP](ht We have selected popular MQTT client SDKs in various programming languages and provided code examples to help you quickly understand the use of MQTT clients. -- [MQTT X](https://mqttx.app/) +- [MQTTX](https://mqttx.app/) An elegant cross-platform MQTT 5.0 client tool that provides desktop, command line, and web to help you develop and debug MQTT services and applications faster. diff --git a/apps/emqx/etc/ssl_dist.conf b/apps/emqx/etc/ssl_dist.conf index af1c7506d..b4c16e2cc 100644 --- a/apps/emqx/etc/ssl_dist.conf +++ b/apps/emqx/etc/ssl_dist.conf @@ -1,4 +1,4 @@ -%% This additional config file is used when the config 'cluster.proto_dis' in emqx.conf is set to 'inet_tls'. +%% This additional config file is used when the config 'cluster.proto_dist' in emqx.conf is set to 'inet_tls'. %% Which means the EMQX nodes will connect to each other over TLS. %% For more information about inter-broker security, see: https://docs.emqx.com/en/enterprise/v5.0/deploy/cluster/security.html diff --git a/apps/emqx/include/emqx_release.hrl b/apps/emqx/include/emqx_release.hrl index b0e987c76..2bb5877f1 100644 --- a/apps/emqx/include/emqx_release.hrl +++ b/apps/emqx/include/emqx_release.hrl @@ -32,10 +32,10 @@ %% `apps/emqx/src/bpapi/README.md' %% Community edition --define(EMQX_RELEASE_CE, "5.0.22"). +-define(EMQX_RELEASE_CE, "5.0.25-rc.1"). %% Enterprise edition --define(EMQX_RELEASE_EE, "5.0.3"). +-define(EMQX_RELEASE_EE, "5.0.4-alpha.1"). %% the HTTP API version -define(EMQX_API_VERSION, "5.0"). diff --git a/apps/emqx/include/http_api.hrl b/apps/emqx/include/http_api.hrl index 08dd08362..ba1438374 100644 --- a/apps/emqx/include/http_api.hrl +++ b/apps/emqx/include/http_api.hrl @@ -57,16 +57,16 @@ -define(ERROR_CODES, [ {?BAD_USERNAME_OR_PWD, <<"Bad username or password">>}, {?BAD_API_KEY_OR_SECRET, <<"Bad API key or secret">>}, - {'BAD_REQUEST', <<"Request parameters are not legal">>}, + {'BAD_REQUEST', <<"Request parameters are invalid">>}, {'NOT_MATCH', <<"Conditions are not matched">>}, {'ALREADY_EXISTS', <<"Resource already existed">>}, - {'BAD_CONFIG_SCHEMA', <<"Configuration data is not legal">>}, + {'BAD_CONFIG_SCHEMA', <<"Configuration data is invalid">>}, {'BAD_LISTENER_ID', <<"Bad listener ID">>}, {'BAD_NODE_NAME', <<"Bad Node Name">>}, {'BAD_RPC', <<"RPC Failed. Check the cluster status and the requested node status">>}, {'BAD_TOPIC', <<"Topic syntax error, Topic needs to comply with the MQTT protocol standard">>}, {'EXCEED_LIMIT', <<"Create resources that exceed the maximum limit or minimum limit">>}, - {'INVALID_PARAMETER', <<"Request parameters is not legal and exceeds the boundary value">>}, + {'INVALID_PARAMETER', <<"Request parameters is invalid and exceeds the boundary value">>}, {'CONFLICT', <<"Conflicting request resources">>}, {'NO_DEFAULT_VALUE', <<"Request parameters do not use default values">>}, {'DEPENDENCY_EXISTS', <<"Resource is dependent by another resource">>}, diff --git a/apps/emqx/rebar.config b/apps/emqx/rebar.config index f7e861f6b..18119607e 100644 --- a/apps/emqx/rebar.config +++ b/apps/emqx/rebar.config @@ -27,9 +27,9 @@ {gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}}, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}}, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}}, - {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.14.6"}}}, + {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.1"}}}, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}, - {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.38.2"}}}, + {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.4"}}}, {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}}, {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}}, {recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}}, diff --git a/apps/emqx/src/emqx.app.src b/apps/emqx/src/emqx.app.src index b2dfca9e1..5ca8fc797 100644 --- a/apps/emqx/src/emqx.app.src +++ b/apps/emqx/src/emqx.app.src @@ -3,7 +3,7 @@ {id, "emqx"}, {description, "EMQX Core"}, % strict semver, bump manually! - {vsn, "5.0.23"}, + {vsn, "5.0.25"}, {modules, []}, {registered, []}, {applications, [ diff --git a/apps/emqx/src/emqx.erl b/apps/emqx/src/emqx.erl index ef870685a..ffee5fba7 100644 --- a/apps/emqx/src/emqx.erl +++ b/apps/emqx/src/emqx.erl @@ -30,6 +30,12 @@ stop/0 ]). +%% Cluster API +-export([ + cluster_nodes/1, + running_nodes/0 +]). + %% PubSub API -export([ subscribe/1, @@ -102,6 +108,18 @@ is_running() -> _ -> true end. +%%-------------------------------------------------------------------- +%% Cluster API +%%-------------------------------------------------------------------- + +-spec running_nodes() -> [node()]. +running_nodes() -> + mria:running_nodes(). + +-spec cluster_nodes(all | running | cores | stopped) -> [node()]. +cluster_nodes(Type) -> + mria:cluster_nodes(Type). + %%-------------------------------------------------------------------- %% PubSub API %%-------------------------------------------------------------------- diff --git a/apps/emqx/src/emqx_alarm.erl b/apps/emqx/src/emqx_alarm.erl index 6aa3cb95d..056f36050 100644 --- a/apps/emqx/src/emqx_alarm.erl +++ b/apps/emqx/src/emqx_alarm.erl @@ -42,7 +42,9 @@ get_alarms/0, get_alarms/1, format/1, - format/2 + format/2, + safe_activate/3, + safe_deactivate/1 ]). %% gen_server callbacks @@ -57,7 +59,6 @@ %% Internal exports (RPC) -export([ - create_activate_alarm/3, do_get_alarms/0 ]). @@ -123,6 +124,9 @@ activate(Name, Details) -> activate(Name, Details, Message) -> gen_server:call(?MODULE, {activate_alarm, Name, Details, Message}). +safe_activate(Name, Details, Message) -> + safe_call({activate_alarm, Name, Details, Message}). + -spec ensure_deactivated(binary() | atom()) -> ok. ensure_deactivated(Name) -> ensure_deactivated(Name, no_details). @@ -155,6 +159,9 @@ deactivate(Name, Details) -> deactivate(Name, Details, Message) -> gen_server:call(?MODULE, {deactivate_alarm, Name, Details, Message}). +safe_deactivate(Name) -> + safe_call({deactivate_alarm, Name, no_details, <<"">>}). + -spec delete_all_deactivated_alarms() -> ok. delete_all_deactivated_alarms() -> gen_server:call(?MODULE, delete_all_deactivated_alarms). @@ -218,17 +225,12 @@ init([]) -> {ok, #{}, get_validity_period()}. handle_call({activate_alarm, Name, Details, Message}, _From, State) -> - Res = mria:transaction( - mria:local_content_shard(), - fun ?MODULE:create_activate_alarm/3, - [Name, Details, Message] - ), - case Res of - {atomic, Alarm} -> + case create_activate_alarm(Name, Details, Message) of + {ok, Alarm} -> do_actions(activate, Alarm, emqx:get_config([alarm, actions])), {reply, ok, State, get_validity_period()}; - {aborted, Reason} -> - {reply, Reason, State, get_validity_period()} + Err -> + {reply, Err, State, get_validity_period()} end; handle_call({deactivate_alarm, Name, Details, Message}, _From, State) -> case mnesia:dirty_read(?ACTIVATED_ALARM, Name) of @@ -283,9 +285,9 @@ get_validity_period() -> emqx:get_config([alarm, validity_period]). create_activate_alarm(Name, Details, Message) -> - case mnesia:read(?ACTIVATED_ALARM, Name) of + case mnesia:dirty_read(?ACTIVATED_ALARM, Name) of [#activated_alarm{name = Name}] -> - mnesia:abort({error, already_existed}); + {error, already_existed}; [] -> Alarm = #activated_alarm{ name = Name, @@ -293,8 +295,8 @@ create_activate_alarm(Name, Details, Message) -> message = normalize_message(Name, iolist_to_binary(Message)), activate_at = erlang:system_time(microsecond) }, - ok = mnesia:write(?ACTIVATED_ALARM, Alarm, write), - Alarm + ok = mria:dirty_write(?ACTIVATED_ALARM, Alarm), + {ok, Alarm} end. do_get_alarms() -> @@ -474,3 +476,19 @@ normalize_message(Name, <<"">>) -> list_to_binary(io_lib:format("~p", [Name])); normalize_message(_Name, Message) -> Message. + +safe_call(Req) -> + try + gen_server:call(?MODULE, Req) + catch + _:{timeout, _} = Reason -> + ?SLOG(warning, #{msg => "emqx_alarm_safe_call_timeout", reason => Reason}), + {error, timeout}; + _:Reason:St -> + ?SLOG(error, #{ + msg => "emqx_alarm_safe_call_exception", + reason => Reason, + stacktrace => St + }), + {error, Reason} + end. diff --git a/apps/emqx/src/emqx_channel.erl b/apps/emqx/src/emqx_channel.erl index 8a936067e..862b72c06 100644 --- a/apps/emqx/src/emqx_channel.erl +++ b/apps/emqx/src/emqx_channel.erl @@ -89,7 +89,7 @@ %% Authentication Data Cache auth_cache :: maybe(map()), %% Quota checkers - quota :: maybe(emqx_limiter_container:limiter()), + quota :: emqx_limiter_container:limiter(), %% Timers timers :: #{atom() => disabled | maybe(reference())}, %% Conn State @@ -760,7 +760,7 @@ do_publish( handle_out(disconnect, RC, Channel) end. -ensure_quota(_, Channel = #channel{quota = undefined}) -> +ensure_quota(_, Channel = #channel{quota = infinity}) -> Channel; ensure_quota(PubRes, Channel = #channel{quota = Limiter}) -> Cnt = lists:foldl( diff --git a/apps/emqx/src/emqx_config.erl b/apps/emqx/src/emqx_config.erl index 54648fca6..630952166 100644 --- a/apps/emqx/src/emqx_config.erl +++ b/apps/emqx/src/emqx_config.erl @@ -18,11 +18,11 @@ -compile({no_auto_import, [get/0, get/1, put/2, erase/1]}). -elvis([{elvis_style, god_modules, disable}]). -include("logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). -export([ init_load/1, init_load/2, - init_load/3, read_override_conf/1, has_deprecated_file/0, delete_override_conf_files/0, @@ -102,6 +102,8 @@ -define(ZONE_CONF_PATH(ZONE, PATH), [zones, ZONE | PATH]). -define(LISTENER_CONF_PATH(TYPE, LISTENER, PATH), [listeners, TYPE, LISTENER | PATH]). +-define(CONFIG_NOT_FOUND_MAGIC, '$0tFound'). + -export_type([ update_request/0, raw_config/0, @@ -150,7 +152,7 @@ get_root([RootName | _]) -> %% @doc For the given path, get raw root value enclosed in a single-key map. %% key is ensured to be binary. get_root_raw([RootName | _]) -> - #{bin(RootName) => do_get_raw([RootName], #{})}. + #{bin(RootName) => get_raw([RootName], #{})}. %% @doc Get a config value for the given path. %% The path should at least include root config name. @@ -163,9 +165,8 @@ get(KeyPath, Default) -> do_get(?CONF, KeyPath, Default). -spec find(emqx_utils_maps:config_key_path()) -> {ok, term()} | {not_found, emqx_utils_maps:config_key_path(), term()}. find([]) -> - Ref = make_ref(), - case do_get(?CONF, [], Ref) of - Ref -> {not_found, []}; + case do_get(?CONF, [], ?CONFIG_NOT_FOUND_MAGIC) of + ?CONFIG_NOT_FOUND_MAGIC -> {not_found, []}; Res -> {ok, Res} end; find(KeyPath) -> @@ -178,9 +179,8 @@ find(KeyPath) -> -spec find_raw(emqx_utils_maps:config_key_path()) -> {ok, term()} | {not_found, emqx_utils_maps:config_key_path(), term()}. find_raw([]) -> - Ref = make_ref(), - case do_get_raw([], Ref) of - Ref -> {not_found, []}; + case do_get_raw([], ?CONFIG_NOT_FOUND_MAGIC) of + ?CONFIG_NOT_FOUND_MAGIC -> {not_found, []}; Res -> {ok, Res} end; find_raw(KeyPath) -> @@ -231,14 +231,14 @@ find_listener_conf(Type, Listener, KeyPath) -> put(Config) -> maps:fold( fun(RootName, RootValue, _) -> - ?MODULE:put([RootName], RootValue) + ?MODULE:put([atom(RootName)], RootValue) end, ok, Config ). erase(RootName) -> - persistent_term:erase(?PERSIS_KEY(?CONF, bin(RootName))), + persistent_term:erase(?PERSIS_KEY(?CONF, atom(RootName))), persistent_term:erase(?PERSIS_KEY(?RAW_CONF, bin(RootName))), ok. @@ -287,9 +287,11 @@ get_default_value([RootName | _] = KeyPath) -> end. -spec get_raw(emqx_utils_maps:config_key_path()) -> term(). +get_raw([Root | T]) when is_atom(Root) -> get_raw([bin(Root) | T]); get_raw(KeyPath) -> do_get_raw(KeyPath). -spec get_raw(emqx_utils_maps:config_key_path(), term()) -> term(). +get_raw([Root | T], Default) when is_atom(Root) -> get_raw([bin(Root) | T], Default); get_raw(KeyPath, Default) -> do_get_raw(KeyPath, Default). -spec put_raw(map()) -> ok. @@ -314,44 +316,39 @@ put_raw(KeyPath, Config) -> %%============================================================================ init_load(SchemaMod) -> ConfFiles = application:get_env(emqx, config_files, []), - init_load(SchemaMod, ConfFiles, #{raw_with_default => true}). - -init_load(SchemaMod, Opts) when is_map(Opts) -> - ConfFiles = application:get_env(emqx, config_files, []), - init_load(SchemaMod, ConfFiles, Opts); -init_load(SchemaMod, ConfFiles) -> - init_load(SchemaMod, ConfFiles, #{raw_with_default => false}). + init_load(SchemaMod, ConfFiles). %% @doc Initial load of the given config files. %% NOTE: The order of the files is significant, configs from files ordered %% in the rear of the list overrides prior values. -spec init_load(module(), [string()] | binary() | hocon:config()) -> ok. -init_load(SchemaMod, Conf, Opts) when is_list(Conf) orelse is_binary(Conf) -> - HasDeprecatedFile = has_deprecated_file(), - RawConf = load_config_files(HasDeprecatedFile, Conf), - init_load(HasDeprecatedFile, SchemaMod, RawConf, Opts). - -init_load(true, SchemaMod, RawConf, Opts) when is_map(RawConf) -> +init_load(SchemaMod, Conf) when is_list(Conf) orelse is_binary(Conf) -> ok = save_schema_mod_and_names(SchemaMod), - %% deprecated conf will be removed in 5.1 - %% Merge environment variable overrides on top + HasDeprecatedFile = has_deprecated_file(), + RawConf0 = load_config_files(HasDeprecatedFile, Conf), + warning_deprecated_root_key(RawConf0), + RawConf1 = + case HasDeprecatedFile of + true -> + overlay_v0(SchemaMod, RawConf0); + false -> + overlay_v1(SchemaMod, RawConf0) + end, + RawConf = fill_defaults_for_all_roots(SchemaMod, RawConf1), + %% check configs against the schema + {AppEnvs, CheckedConf} = check_config(SchemaMod, RawConf, #{}), + save_to_app_env(AppEnvs), + ok = save_to_config_map(CheckedConf, RawConf). + +%% Merge environment variable overrides on top, then merge with overrides. +overlay_v0(SchemaMod, RawConf) when is_map(RawConf) -> RawConfWithEnvs = merge_envs(SchemaMod, RawConf), Overrides = read_override_confs(), - RawConfWithOverrides = hocon:deep_merge(RawConfWithEnvs, Overrides), - RawConfAll = maybe_fill_defaults(SchemaMod, RawConfWithOverrides, Opts), - %% check configs against the schema - {AppEnvs, CheckedConf} = check_config(SchemaMod, RawConfAll, #{}), - save_to_app_env(AppEnvs), - ok = save_to_config_map(CheckedConf, RawConfAll); -init_load(false, SchemaMod, RawConf, Opts) when is_map(RawConf) -> - ok = save_schema_mod_and_names(SchemaMod), - %% Merge environment variable overrides on top - RawConfWithEnvs = merge_envs(SchemaMod, RawConf), - RawConfAll = maybe_fill_defaults(SchemaMod, RawConfWithEnvs, Opts), - %% check configs against the schema - {AppEnvs, CheckedConf} = check_config(SchemaMod, RawConfAll, #{}), - save_to_app_env(AppEnvs), - ok = save_to_config_map(CheckedConf, RawConfAll). + hocon:deep_merge(RawConfWithEnvs, Overrides). + +%% Merge environment variable overrides on top. +overlay_v1(SchemaMod, RawConf) when is_map(RawConf) -> + merge_envs(SchemaMod, RawConf). %% @doc Read merged cluster + local overrides. read_override_confs() -> @@ -360,8 +357,7 @@ read_override_confs() -> hocon:deep_merge(ClusterOverrides, LocalOverrides). %% keep the raw and non-raw conf has the same keys to make update raw conf easier. -%% TODO: remove raw_with_default as it's now always true. -maybe_fill_defaults(SchemaMod, RawConf0, #{raw_with_default := true}) -> +fill_defaults_for_all_roots(SchemaMod, RawConf0) -> RootSchemas = hocon_schema:roots(SchemaMod), %% the roots which are missing from the loaded configs MissingRoots = lists:filtermap( @@ -380,9 +376,7 @@ maybe_fill_defaults(SchemaMod, RawConf0, #{raw_with_default := true}) -> RawConf0, MissingRoots ), - fill_defaults(RawConf); -maybe_fill_defaults(_SchemaMod, RawConf, _Opts) -> - RawConf. + fill_defaults(RawConf). %% So far, this can only return true when testing. %% e.g. when testing an app, we need to load its config first @@ -679,11 +673,9 @@ do_get_raw(Path, Default) -> do_get(?RAW_CONF, Path, Default). do_get(Type, KeyPath) -> - Ref = make_ref(), - Res = do_get(Type, KeyPath, Ref), - case Res =:= Ref of - true -> error({config_not_found, KeyPath}); - false -> Res + case do_get(Type, KeyPath, ?CONFIG_NOT_FOUND_MAGIC) of + ?CONFIG_NOT_FOUND_MAGIC -> error({config_not_found, KeyPath}); + Res -> Res end. do_get(Type, [], Default) -> @@ -702,9 +694,9 @@ do_get(Type, [], Default) -> false -> AllConf end; do_get(Type, [RootName], Default) -> - persistent_term:get(?PERSIS_KEY(Type, bin(RootName)), Default); + persistent_term:get(?PERSIS_KEY(Type, RootName), Default); do_get(Type, [RootName | KeyPath], Default) -> - RootV = persistent_term:get(?PERSIS_KEY(Type, bin(RootName)), #{}), + RootV = persistent_term:get(?PERSIS_KEY(Type, RootName), #{}), do_deep_get(Type, KeyPath, RootV, Default). do_put(Type, Putter, [], DeepValue) -> @@ -718,7 +710,7 @@ do_put(Type, Putter, [], DeepValue) -> do_put(Type, Putter, [RootName | KeyPath], DeepValue) -> OldValue = do_get(Type, [RootName], #{}), NewValue = do_deep_put(Type, Putter, KeyPath, OldValue, DeepValue), - persistent_term:put(?PERSIS_KEY(Type, bin(RootName)), NewValue). + persistent_term:put(?PERSIS_KEY(Type, RootName), NewValue). do_deep_get(?CONF, KeyPath, Map, Default) -> atom_conf_path( @@ -760,6 +752,22 @@ bin(Bin) when is_binary(Bin) -> Bin; bin(Str) when is_list(Str) -> list_to_binary(Str); bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8). +warning_deprecated_root_key(RawConf) -> + case maps:keys(RawConf) -- get_root_names() of + [] -> + ok; + Keys -> + Unknowns = string:join([binary_to_list(K) || K <- Keys], ","), + ?tp(unknown_config_keys, #{unknown_config_keys => Unknowns}), + ?SLOG( + warning, + #{ + msg => "config_key_not_recognized", + unknown_config_keys => Unknowns + } + ) + end. + conf_key(?CONF, RootName) -> atom(RootName); conf_key(?RAW_CONF, RootName) -> diff --git a/apps/emqx/src/emqx_connection.erl b/apps/emqx/src/emqx_connection.erl index 8d47f033c..79654e510 100644 --- a/apps/emqx/src/emqx_connection.erl +++ b/apps/emqx/src/emqx_connection.erl @@ -111,7 +111,7 @@ listener :: {Type :: atom(), Name :: atom()}, %% Limiter - limiter :: maybe(limiter()), + limiter :: limiter(), %% limiter buffer for overload use limiter_buffer :: queue:queue(pending_req()), @@ -182,10 +182,8 @@ -define(ALARM_SOCK_STATS_KEYS, [send_pend, recv_cnt, recv_oct, send_cnt, send_oct]). -define(ALARM_SOCK_OPTS_KEYS, [high_watermark, high_msgq_watermark, sndbuf, recbuf, buffer]). -%% use macro to do compile time limiter's type check --define(LIMITER_BYTES_IN, bytes_in). --define(LIMITER_MESSAGE_IN, message_in). --define(EMPTY_QUEUE, {[], []}). +-define(LIMITER_BYTES_IN, bytes). +-define(LIMITER_MESSAGE_IN, messages). -dialyzer({no_match, [info/2]}). -dialyzer( @@ -976,55 +974,61 @@ handle_cast(Req, State) -> list(any()), state() ) -> _. + +check_limiter( + _Needs, + Data, + WhenOk, + Msgs, + #state{limiter = infinity} = State +) -> + WhenOk(Data, Msgs, State); check_limiter( Needs, Data, WhenOk, Msgs, - #state{ - limiter = Limiter, - limiter_timer = LimiterTimer, - limiter_buffer = Cache - } = State -) when Limiter =/= undefined -> - case LimiterTimer of - undefined -> - case emqx_limiter_container:check_list(Needs, Limiter) of - {ok, Limiter2} -> - WhenOk(Data, Msgs, State#state{limiter = Limiter2}); - {pause, Time, Limiter2} -> - ?SLOG(debug, #{ - msg => "pause_time_dueto_rate_limit", - needs => Needs, - time_in_ms => Time - }), + #state{limiter_timer = undefined, limiter = Limiter} = State +) -> + case emqx_limiter_container:check_list(Needs, Limiter) of + {ok, Limiter2} -> + WhenOk(Data, Msgs, State#state{limiter = Limiter2}); + {pause, Time, Limiter2} -> + ?SLOG(debug, #{ + msg => "pause_time_dueto_rate_limit", + needs => Needs, + time_in_ms => Time + }), - Retry = #retry{ - types = [Type || {_, Type} <- Needs], - data = Data, - next = WhenOk - }, + Retry = #retry{ + types = [Type || {_, Type} <- Needs], + data = Data, + next = WhenOk + }, - Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2), + Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2), - TRef = start_timer(Time, limit_timeout), + TRef = start_timer(Time, limit_timeout), - {ok, State#state{ - limiter = Limiter3, - limiter_timer = TRef - }}; - {drop, Limiter2} -> - {ok, State#state{limiter = Limiter2}} - end; - _ -> - %% if there has a retry timer, - %% cache the operation and execute it after the retry is over - %% the maximum length of the cache queue is equal to the active_n - New = #pending_req{need = Needs, data = Data, next = WhenOk}, - {ok, State#state{limiter_buffer = queue:in(New, Cache)}} + {ok, State#state{ + limiter = Limiter3, + limiter_timer = TRef + }}; + {drop, Limiter2} -> + {ok, State#state{limiter = Limiter2}} end; -check_limiter(_, Data, WhenOk, Msgs, State) -> - WhenOk(Data, Msgs, State). +check_limiter( + Needs, + Data, + WhenOk, + _Msgs, + #state{limiter_buffer = Cache} = State +) -> + %% if there has a retry timer, + %% cache the operation and execute it after the retry is over + %% the maximum length of the cache queue is equal to the active_n + New = #pending_req{need = Needs, data = Data, next = WhenOk}, + {ok, State#state{limiter_buffer = queue:in(New, Cache)}}. %% try to perform a retry -spec retry_limiter(state()) -> _. diff --git a/apps/emqx/src/emqx_limiter/src/emqx_htb_limiter.erl b/apps/emqx/src/emqx_limiter/src/emqx_htb_limiter.erl index bbebd9460..bcd4166af 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_htb_limiter.erl +++ b/apps/emqx/src/emqx_limiter/src/emqx_htb_limiter.erl @@ -22,7 +22,7 @@ %% API -export([ - make_token_bucket_limiter/2, + make_local_limiter/2, make_ref_limiter/2, check/2, consume/2, @@ -32,12 +32,11 @@ make_future/1, available/1 ]). --export_type([token_bucket_limiter/0]). +-export_type([local_limiter/0]). -%% a token bucket limiter with a limiter server's bucket reference - -%% the number of tokens currently available --type token_bucket_limiter() :: #{ +%% a token bucket limiter which may or not contains a reference to another limiter, +%% and can be used in a client alone +-type local_limiter() :: #{ tokens := non_neg_integer(), rate := decimal(), capacity := decimal(), @@ -58,12 +57,12 @@ retry_ctx => undefined %% the retry context - | retry_context(token_bucket_limiter()), + | retry_context(local_limiter()), %% allow to add other keys atom => any() }. -%% a limiter server's bucket reference +%% a limiter instance which only contains a reference to another limiter(bucket) -type ref_limiter() :: #{ max_retry_time := non_neg_integer(), failure_strategy := failure_strategy(), @@ -88,7 +87,7 @@ }. -type bucket() :: emqx_limiter_bucket_ref:bucket_ref(). --type limiter() :: token_bucket_limiter() | ref_limiter() | infinity. +-type limiter() :: local_limiter() | ref_limiter() | infinity. -type millisecond() :: non_neg_integer(). -type pause_type() :: pause | partial. @@ -116,7 +115,7 @@ rate := decimal(), initial := non_neg_integer(), low_watermark := non_neg_integer(), - capacity := decimal(), + burst := decimal(), divisible := boolean(), max_retry_time := non_neg_integer(), failure_strategy := failure_strategy() @@ -134,12 +133,13 @@ %% API %%-------------------------------------------------------------------- %%@doc create a limiter --spec make_token_bucket_limiter(limiter_bucket_cfg(), bucket()) -> _. -make_token_bucket_limiter(Cfg, Bucket) -> +-spec make_local_limiter(limiter_bucket_cfg(), bucket()) -> _. +make_local_limiter(Cfg, Bucket) -> Cfg#{ tokens => emqx_limiter_server:get_initial_val(Cfg), lasttime => ?NOW, - bucket => Bucket + bucket => Bucket, + capacity => emqx_limiter_schema:calc_capacity(Cfg) }. %%@doc create a limiter server's reference @@ -311,8 +311,8 @@ on_failure(throw, Limiter) -> Message = io_lib:format("limiter consume failed, limiter:~p~n", [Limiter]), erlang:throw({rate_check_fail, Message}). --spec do_check_with_parent_limiter(pos_integer(), token_bucket_limiter()) -> - inner_check_result(token_bucket_limiter()). +-spec do_check_with_parent_limiter(pos_integer(), local_limiter()) -> + inner_check_result(local_limiter()). do_check_with_parent_limiter( Need, #{ @@ -335,7 +335,7 @@ do_check_with_parent_limiter( ) end. --spec do_reset(pos_integer(), token_bucket_limiter()) -> inner_check_result(token_bucket_limiter()). +-spec do_reset(pos_integer(), local_limiter()) -> inner_check_result(local_limiter()). do_reset( Need, #{ diff --git a/apps/emqx/src/emqx_limiter/src/emqx_limiter_bucket_ref.erl b/apps/emqx/src/emqx_limiter/src/emqx_limiter_bucket_ref.erl index fe30e41e9..139564df7 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_limiter_bucket_ref.erl +++ b/apps/emqx/src/emqx_limiter/src/emqx_limiter_bucket_ref.erl @@ -23,6 +23,7 @@ %% API -export([ new/3, + infinity_bucket/0, check/3, try_restore/2, available/1 @@ -58,6 +59,10 @@ new(Counter, Index, Rate) -> rate => Rate }. +-spec infinity_bucket() -> bucket_ref(). +infinity_bucket() -> + infinity. + %% @doc check tokens -spec check(pos_integer(), bucket_ref(), Disivisble :: boolean()) -> HasToken :: diff --git a/apps/emqx/src/emqx_limiter/src/emqx_limiter_container.erl b/apps/emqx/src/emqx_limiter/src/emqx_limiter_container.erl index ea02152a9..6a9101a0f 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_limiter_container.erl +++ b/apps/emqx/src/emqx_limiter/src/emqx_limiter_container.erl @@ -34,16 +34,18 @@ -export_type([container/0, check_result/0]). --type container() :: #{ - limiter_type() => undefined | limiter(), - %% the retry context of the limiter - retry_key() => - undefined - | retry_context() - | future(), - %% the retry context of the container - retry_ctx := undefined | any() -}. +-type container() :: + infinity + | #{ + limiter_type() => undefined | limiter(), + %% the retry context of the limiter + retry_key() => + undefined + | retry_context() + | future(), + %% the retry context of the container + retry_ctx := undefined | any() + }. -type future() :: pos_integer(). -type limiter_id() :: emqx_limiter_schema:limiter_id(). @@ -78,7 +80,20 @@ get_limiter_by_types(Id, Types, BucketCfgs) -> {ok, Limiter} = emqx_limiter_server:connect(Id, Type, BucketCfgs), add_new(Type, Limiter, Acc) end, - lists:foldl(Init, #{retry_ctx => undefined}, Types). + Container = lists:foldl(Init, #{retry_ctx => undefined}, Types), + case + lists:all( + fun(Type) -> + maps:get(Type, Container) =:= infinity + end, + Types + ) + of + true -> + infinity; + _ -> + Container + end. -spec add_new(limiter_type(), limiter(), container()) -> container(). add_new(Type, Limiter, Container) -> @@ -89,11 +104,15 @@ add_new(Type, Limiter, Container) -> %% @doc check the specified limiter -spec check(pos_integer(), limiter_type(), container()) -> check_result(). +check(_Need, _Type, infinity) -> + {ok, infinity}; check(Need, Type, Container) -> check_list([{Need, Type}], Container). %% @doc check multiple limiters -spec check_list(list({pos_integer(), limiter_type()}), container()) -> check_result(). +check_list(_Need, infinity) -> + {ok, infinity}; check_list([{Need, Type} | T], Container) -> Limiter = maps:get(Type, Container), case emqx_htb_limiter:check(Need, Limiter) of @@ -121,11 +140,15 @@ check_list([], Container) -> %% @doc retry the specified limiter -spec retry(limiter_type(), container()) -> check_result(). +retry(_Type, infinity) -> + {ok, infinity}; retry(Type, Container) -> retry_list([Type], Container). %% @doc retry multiple limiters -spec retry_list(list(limiter_type()), container()) -> check_result(). +retry_list(_Types, infinity) -> + {ok, infinity}; retry_list([Type | T], Container) -> Key = ?RETRY_KEY(Type), case Container of diff --git a/apps/emqx/src/emqx_limiter/src/emqx_limiter_manager.erl b/apps/emqx/src/emqx_limiter/src/emqx_limiter_manager.erl index 297bdffb0..40061e0b9 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_limiter_manager.erl +++ b/apps/emqx/src/emqx_limiter/src/emqx_limiter_manager.erl @@ -30,6 +30,12 @@ post_config_update/5 ]). +-export([ + find_root/1, + insert_root/2, + delete_root/1 +]). + -export([ start_server/1, start_server/2, @@ -62,6 +68,7 @@ -define(UID(Id, Type), {Id, Type}). -define(TAB, emqx_limiter_counters). +-define(ROOT_ID, root). %%-------------------------------------------------------------------- %% API @@ -104,9 +111,25 @@ insert_bucket(Id, Type, Bucket) -> ). -spec delete_bucket(limiter_id(), limiter_type()) -> true. -delete_bucket(Type, Id) -> +delete_bucket(Id, Type) -> ets:delete(?TAB, ?UID(Id, Type)). +-spec find_root(limiter_type()) -> + {ok, bucket_ref()} | undefined. +find_root(Type) -> + find_bucket(?ROOT_ID, Type). + +-spec insert_root( + limiter_type(), + bucket_ref() +) -> boolean(). +insert_root(Type, Bucket) -> + insert_bucket(?ROOT_ID, Type, Bucket). + +-spec delete_root(limiter_type()) -> true. +delete_root(Type) -> + delete_bucket(?ROOT_ID, Type). + post_config_update([limiter], _Config, NewConf, _OldConf, _AppEnvs) -> Types = lists:delete(client, maps:keys(NewConf)), _ = [on_post_config_update(Type, NewConf) || Type <- Types], diff --git a/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl b/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl index f45fc55b6..40b23415c 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl +++ b/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl @@ -24,6 +24,7 @@ fields/1, to_rate/1, to_capacity/1, + to_burst/1, default_period/0, to_burst_rate/1, to_initial/1, @@ -31,20 +32,22 @@ get_bucket_cfg_path/2, desc/1, types/0, - infinity_value/0 + calc_capacity/1, + extract_with_type/2, + default_client_config/0 ]). -define(KILOBYTE, 1024). --define(BUCKET_KEYS, [ - {bytes_in, bucket_infinity}, - {message_in, bucket_infinity}, - {connection, bucket_limit}, - {message_routing, bucket_infinity} +-define(LISTENER_BUCKET_KEYS, [ + bytes, + messages, + connection, + message_routing ]). -type limiter_type() :: - bytes_in - | message_in + bytes + | messages | connection | message_routing %% internal limiter for unclassified resources @@ -54,8 +57,10 @@ -type bucket_name() :: atom(). -type rate() :: infinity | float(). -type burst_rate() :: 0 | float(). +%% this is a compatible type for the deprecated field and type `capacity`. +-type burst() :: burst_rate(). %% the capacity of the token bucket --type capacity() :: non_neg_integer(). +%%-type capacity() :: non_neg_integer(). %% initial capacity of the token bucket -type initial() :: non_neg_integer(). -type bucket_path() :: list(atom()). @@ -72,13 +77,13 @@ -typerefl_from_string({rate/0, ?MODULE, to_rate}). -typerefl_from_string({burst_rate/0, ?MODULE, to_burst_rate}). --typerefl_from_string({capacity/0, ?MODULE, to_capacity}). +-typerefl_from_string({burst/0, ?MODULE, to_burst}). -typerefl_from_string({initial/0, ?MODULE, to_initial}). -reflect_type([ rate/0, burst_rate/0, - capacity/0, + burst/0, initial/0, failure_strategy/0, bucket_name/0 @@ -90,27 +95,34 @@ namespace() -> limiter. -roots() -> [limiter]. +roots() -> + [ + {limiter, + hoconsc:mk(hoconsc:ref(?MODULE, limiter), #{ + importance => ?IMPORTANCE_HIDDEN + })} + ]. fields(limiter) -> [ {Type, ?HOCON(?R_REF(node_opts), #{ desc => ?DESC(Type), - default => #{} + importance => ?IMPORTANCE_HIDDEN, + aliases => alias_of_type(Type) })} || Type <- types() ] ++ [ + %% This is an undocumented feature, and it won't be support anymore {client, ?HOCON( ?R_REF(client_fields), #{ desc => ?DESC(client), - default => maps:from_list([ - {erlang:atom_to_binary(Type), #{}} - || Type <- types() - ]) + importance => ?IMPORTANCE_HIDDEN, + required => {false, recursively}, + deprecated => {since, "5.0.25"} } )} ]; @@ -124,30 +136,18 @@ fields(node_opts) -> })} ]; fields(client_fields) -> - [ - {Type, - ?HOCON(?R_REF(client_opts), #{ - desc => ?DESC(Type), - default => #{} - })} - || Type <- types() - ]; -fields(bucket_infinity) -> - [ - {rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => <<"infinity">>})}, - {capacity, ?HOCON(capacity(), #{desc => ?DESC(capacity), default => <<"infinity">>})}, - {initial, ?HOCON(initial(), #{default => <<"0">>, desc => ?DESC(initial)})} - ]; -fields(bucket_limit) -> - [ - {rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => <<"1000/s">>})}, - {capacity, ?HOCON(capacity(), #{desc => ?DESC(capacity), default => <<"1000">>})}, - {initial, ?HOCON(initial(), #{default => <<"0">>, desc => ?DESC(initial)})} - ]; + client_fields(types()); +fields(bucket_opts) -> + fields_of_bucket(<<"infinity">>); fields(client_opts) -> [ {rate, ?HOCON(rate(), #{default => <<"infinity">>, desc => ?DESC(rate)})}, - {initial, ?HOCON(initial(), #{default => <<"0">>, desc => ?DESC(initial)})}, + {initial, + ?HOCON(initial(), #{ + default => <<"0">>, + desc => ?DESC(initial), + importance => ?IMPORTANCE_HIDDEN + })}, %% low_watermark add for emqx_channel and emqx_session %% both modules consume first and then check %% so we need to use this value to prevent excessive consumption @@ -157,20 +157,24 @@ fields(client_opts) -> initial(), #{ desc => ?DESC(low_watermark), - default => <<"0">> + default => <<"0">>, + importance => ?IMPORTANCE_HIDDEN } )}, - {capacity, - ?HOCON(capacity(), #{ - desc => ?DESC(client_bucket_capacity), - default => <<"infinity">> + {burst, + ?HOCON(burst(), #{ + desc => ?DESC(burst), + default => <<"0">>, + importance => ?IMPORTANCE_HIDDEN, + aliases => [capacity] })}, {divisible, ?HOCON( boolean(), #{ desc => ?DESC(divisible), - default => false + default => false, + importance => ?IMPORTANCE_HIDDEN } )}, {max_retry_time, @@ -178,7 +182,8 @@ fields(client_opts) -> emqx_schema:duration(), #{ desc => ?DESC(max_retry_time), - default => <<"10s">> + default => <<"10s">>, + importance => ?IMPORTANCE_HIDDEN } )}, {failure_strategy, @@ -186,25 +191,24 @@ fields(client_opts) -> failure_strategy(), #{ desc => ?DESC(failure_strategy), - default => force + default => force, + importance => ?IMPORTANCE_HIDDEN } )} ]; fields(listener_fields) -> - bucket_fields(?BUCKET_KEYS, listener_client_fields); + composite_bucket_fields(?LISTENER_BUCKET_KEYS, listener_client_fields); fields(listener_client_fields) -> - client_fields(?BUCKET_KEYS); + client_fields(?LISTENER_BUCKET_KEYS); fields(Type) -> - bucket_field(Type). + simple_bucket_field(Type). desc(limiter) -> "Settings for the rate limiter."; desc(node_opts) -> "Settings for the limiter of the node level."; -desc(bucket_infinity) -> +desc(bucket_opts) -> "Settings for the bucket."; -desc(bucket_limit) -> - desc(bucket_infinity); desc(client_opts) -> "Settings for the client in bucket level."; desc(client_fields) -> @@ -230,19 +234,37 @@ get_bucket_cfg_path(Type, BucketName) -> [limiter, Type, bucket, BucketName]. types() -> - [bytes_in, message_in, connection, message_routing, internal]. + [bytes, messages, connection, message_routing, internal]. -%%-------------------------------------------------------------------- -%% Internal functions -%%-------------------------------------------------------------------- +calc_capacity(#{rate := infinity}) -> + infinity; +calc_capacity(#{rate := Rate, burst := Burst}) -> + erlang:floor(1000 * Rate / default_period()) + Burst. -%% `infinity` to `infinity_value` rules: -%% 1. all infinity capacity will change to infinity_value -%% 2. if the rate of global and bucket both are `infinity`, -%% use `infinity_value` as bucket rate. see `emqx_limiter_server:get_counter_rate/2` -infinity_value() -> - %% 1 TB - 1099511627776. +extract_with_type(_Type, undefined) -> + undefined; +extract_with_type(Type, #{client := ClientCfg} = BucketCfg) -> + BucketVal = maps:find(Type, BucketCfg), + ClientVal = maps:find(Type, ClientCfg), + merge_client_bucket(Type, ClientVal, BucketVal); +extract_with_type(Type, BucketCfg) -> + BucketVal = maps:find(Type, BucketCfg), + merge_client_bucket(Type, undefined, BucketVal). + +%% Since the client configuration can be absent and be a undefined value, +%% but we must need some basic settings to control the behaviour of the limiter, +%% so here add this helper function to generate a default setting. +%% This is a temporary workaround until we found a better way to simplify. +default_client_config() -> + #{ + rate => infinity, + initial => 0, + low_watermark => 0, + burst => 0, + divisible => false, + max_retry_time => timer:seconds(10), + failure_strategy => force + }. %%-------------------------------------------------------------------- %% Internal functions @@ -251,6 +273,17 @@ infinity_value() -> to_burst_rate(Str) -> to_rate(Str, false, true). +%% The default value of `capacity` is `infinity`, +%% but we have changed `capacity` to `burst` which should not be `infinity` +%% and its default value is 0, so we should convert `infinity` to 0 +to_burst(Str) -> + case to_rate(Str, true, true) of + {ok, infinity} -> + {ok, 0}; + Any -> + Any + end. + %% rate can be: 10 10MB 10MB/s 10MB/2s infinity %% e.g. the bytes_in regex tree is: %% @@ -335,7 +368,7 @@ to_quota(Str, Regex) -> {match, [Quota, ""]} -> {ok, erlang:list_to_integer(Quota)}; {match, ""} -> - {ok, infinity_value()}; + {ok, infinity}; _ -> {error, Str} end @@ -350,26 +383,33 @@ apply_unit("mb", Val) -> Val * ?KILOBYTE * ?KILOBYTE; apply_unit("gb", Val) -> Val * ?KILOBYTE * ?KILOBYTE * ?KILOBYTE; apply_unit(Unit, _) -> throw("invalid unit:" ++ Unit). -bucket_field(Type) when is_atom(Type) -> - fields(bucket_infinity) ++ +%% A bucket with only one type +simple_bucket_field(Type) when is_atom(Type) -> + fields(bucket_opts) ++ [ {client, ?HOCON( ?R_REF(?MODULE, client_opts), #{ desc => ?DESC(client), - required => false + required => {false, recursively}, + importance => importance_of_type(Type), + aliases => alias_of_type(Type) } )} ]. -bucket_fields(Types, ClientRef) -> + +%% A bucket with multi types +composite_bucket_fields(Types, ClientRef) -> [ {Type, - ?HOCON(?R_REF(?MODULE, Opts), #{ + ?HOCON(?R_REF(?MODULE, bucket_opts), #{ desc => ?DESC(?MODULE, Type), - required => false + required => {false, recursively}, + importance => importance_of_type(Type), + aliases => alias_of_type(Type) })} - || {Type, Opts} <- Types + || Type <- Types ] ++ [ {client, @@ -377,17 +417,62 @@ bucket_fields(Types, ClientRef) -> ?R_REF(?MODULE, ClientRef), #{ desc => ?DESC(client), - required => false + required => {false, recursively} } )} ]. +fields_of_bucket(Default) -> + [ + {rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => Default})}, + {burst, + ?HOCON(burst(), #{ + desc => ?DESC(burst), + default => <<"0">>, + importance => ?IMPORTANCE_HIDDEN, + aliases => [capacity] + })}, + {initial, + ?HOCON(initial(), #{ + default => <<"0">>, + desc => ?DESC(initial), + importance => ?IMPORTANCE_HIDDEN + })} + ]. + client_fields(Types) -> [ {Type, ?HOCON(?R_REF(client_opts), #{ desc => ?DESC(Type), - required => false + required => false, + importance => importance_of_type(Type), + aliases => alias_of_type(Type) })} - || {Type, _} <- Types + || Type <- Types ]. + +importance_of_type(interval) -> + ?IMPORTANCE_HIDDEN; +importance_of_type(message_routing) -> + ?IMPORTANCE_HIDDEN; +importance_of_type(connection) -> + ?IMPORTANCE_HIDDEN; +importance_of_type(_) -> + ?DEFAULT_IMPORTANCE. + +alias_of_type(messages) -> + [message_in]; +alias_of_type(bytes) -> + [bytes_in]; +alias_of_type(_) -> + []. + +merge_client_bucket(Type, {ok, ClientVal}, {ok, BucketVal}) -> + #{Type => BucketVal, client => #{Type => ClientVal}}; +merge_client_bucket(Type, {ok, ClientVal}, _) -> + #{client => #{Type => ClientVal}}; +merge_client_bucket(Type, _, {ok, BucketVal}) -> + #{Type => BucketVal}; +merge_client_bucket(_, _, _) -> + undefined. diff --git a/apps/emqx/src/emqx_limiter/src/emqx_limiter_server.erl b/apps/emqx/src/emqx_limiter/src/emqx_limiter_server.erl index f1daeaaeb..2867283d6 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_limiter_server.erl +++ b/apps/emqx/src/emqx_limiter/src/emqx_limiter_server.erl @@ -59,7 +59,8 @@ burst := rate(), %% token generation interval(second) period := pos_integer(), - produced := float() + produced := float(), + correction := emqx_limiter_decimal:zero_or_float() }. -type bucket() :: #{ @@ -98,6 +99,7 @@ %% minimum coefficient for overloaded limiter -define(OVERLOAD_MIN_ALLOC, 0.3). -define(COUNTER_SIZE, 8). +-define(ROOT_COUNTER_IDX, 1). -export_type([index/0]). -import(emqx_limiter_decimal, [add/2, sub/2, mul/2, put_to_counter/3]). @@ -110,40 +112,24 @@ -spec connect( limiter_id(), limiter_type(), - bucket_name() | #{limiter_type() => bucket_name() | undefined} + hocons:config() | undefined ) -> {ok, emqx_htb_limiter:limiter()} | {error, _}. -%% If no bucket path is set in config, there will be no limit -connect(_Id, _Type, undefined) -> - {ok, emqx_htb_limiter:make_infinity_limiter()}; +%% undefined is the default situation, no limiter setting by default +connect(Id, Type, undefined) -> + create_limiter(Id, Type, undefined, undefined); +connect(Id, Type, #{rate := _} = Cfg) -> + create_limiter(Id, Type, maps:get(client, Cfg, undefined), Cfg); connect(Id, Type, Cfg) -> - case find_limiter_cfg(Type, Cfg) of - {undefined, _} -> - {ok, emqx_htb_limiter:make_infinity_limiter()}; - { - #{ - rate := BucketRate, - capacity := BucketSize - }, - #{rate := CliRate, capacity := CliSize} = ClientCfg - } -> - case emqx_limiter_manager:find_bucket(Id, Type) of - {ok, Bucket} -> - {ok, - if - CliRate < BucketRate orelse CliSize < BucketSize -> - emqx_htb_limiter:make_token_bucket_limiter(ClientCfg, Bucket); - true -> - emqx_htb_limiter:make_ref_limiter(ClientCfg, Bucket) - end}; - undefined -> - ?SLOG(error, #{msg => "bucket_not_found", type => Type, id => Id}), - {error, invalid_bucket} - end - end. + create_limiter( + Id, + Type, + emqx_utils_maps:deep_get([client, Type], Cfg, undefined), + maps:get(Type, Cfg, undefined) + ). -spec add_bucket(limiter_id(), limiter_type(), hocons:config() | undefined) -> ok. -add_bucket(_Id, _Type, undefine) -> +add_bucket(_Id, _Type, undefined) -> ok; add_bucket(Id, Type, Cfg) -> ?CALL(Type, {add_bucket, Id, Cfg}). @@ -281,7 +267,8 @@ handle_info(Info, State) -> Reason :: normal | shutdown | {shutdown, term()} | term(), State :: term() ) -> any(). -terminate(_Reason, _State) -> +terminate(_Reason, #{type := Type}) -> + emqx_limiter_manager:delete_root(Type), ok. %%-------------------------------------------------------------------- @@ -336,10 +323,14 @@ oscillation( oscillate(Interval), Ordereds = get_ordered_buckets(Buckets), {Alloced, Buckets2} = transverse(Ordereds, Flow, 0.0, Buckets), - maybe_burst(State#{ - buckets := Buckets2, - root := Root#{produced := Produced + Alloced} - }). + State2 = maybe_adjust_root_tokens( + State#{ + buckets := Buckets2, + root := Root#{produced := Produced + Alloced} + }, + Alloced + ), + maybe_burst(State2). %% @doc horizontal spread -spec transverse( @@ -412,6 +403,24 @@ get_ordered_buckets(Buckets) -> Buckets ). +-spec maybe_adjust_root_tokens(state(), float()) -> state(). +maybe_adjust_root_tokens(#{root := #{rate := infinity}} = State, _Alloced) -> + State; +maybe_adjust_root_tokens(#{root := #{rate := Rate}} = State, Alloced) when Alloced >= Rate -> + State; +maybe_adjust_root_tokens(#{root := #{rate := Rate} = Root, counter := Counter} = State, Alloced) -> + InFlow = Rate - Alloced, + Token = counters:get(Counter, ?ROOT_COUNTER_IDX), + case Token >= Rate of + true -> + State; + _ -> + Available = erlang:min(Rate - Token, InFlow), + {Inc, Root2} = emqx_limiter_correction:add(Available, Root), + counters:add(Counter, ?ROOT_COUNTER_IDX, Inc), + State#{root := Root2} + end. + -spec maybe_burst(state()) -> state(). maybe_burst( #{ @@ -475,12 +484,16 @@ init_tree(Type) when is_atom(Type) -> Cfg = emqx:get_config([limiter, Type]), init_tree(Type, Cfg). -init_tree(Type, Cfg) -> +init_tree(Type, #{rate := Rate} = Cfg) -> + Counter = counters:new(?COUNTER_SIZE, [write_concurrency]), + RootBucket = emqx_limiter_bucket_ref:new(Counter, ?ROOT_COUNTER_IDX, Rate), + emqx_limiter_manager:insert_root(Type, RootBucket), #{ type => Type, root => make_root(Cfg), - counter => counters:new(?COUNTER_SIZE, [write_concurrency]), - index => 0, + counter => Counter, + %% The first slot is reserved for the root + index => ?ROOT_COUNTER_IDX, buckets => #{} }. @@ -490,15 +503,18 @@ make_root(#{rate := Rate, burst := Burst}) -> rate => Rate, burst => Burst, period => emqx_limiter_schema:default_period(), - produced => 0.0 + produced => 0.0, + correction => 0 }. -do_add_bucket(Id, #{rate := Rate, capacity := Capacity} = Cfg, #{buckets := Buckets} = State) -> +do_add_bucket(_Id, #{rate := infinity}, #{root := #{rate := infinity}} = State) -> + State; +do_add_bucket(Id, #{rate := Rate} = Cfg, #{buckets := Buckets} = State) -> case maps:get(Id, Buckets, undefined) of undefined -> make_bucket(Id, Cfg, State); Bucket -> - Bucket2 = Bucket#{rate := Rate, capacity := Capacity}, + Bucket2 = Bucket#{rate := Rate, capacity := emqx_limiter_schema:calc_capacity(Cfg)}, State#{buckets := Buckets#{Id := Bucket2}} end. @@ -509,7 +525,7 @@ make_bucket(Id, Cfg, #{index := ?COUNTER_SIZE} = State) -> }); make_bucket( Id, - #{rate := Rate, capacity := Capacity} = Cfg, + #{rate := Rate} = Cfg, #{type := Type, counter := Counter, index := Index, buckets := Buckets} = State ) -> NewIndex = Index + 1, @@ -519,7 +535,7 @@ make_bucket( rate => Rate, obtained => Initial, correction => 0, - capacity => Capacity, + capacity => emqx_limiter_schema:calc_capacity(Cfg), counter => Counter, index => NewIndex }, @@ -541,19 +557,14 @@ do_del_bucket(Id, #{type := Type, buckets := Buckets} = State) -> get_initial_val( #{ initial := Initial, - rate := Rate, - capacity := Capacity + rate := Rate } ) -> - %% initial will nevner be infinity(see the emqx_limiter_schema) - InfVal = emqx_limiter_schema:infinity_value(), if Initial > 0 -> Initial; Rate =/= infinity -> - erlang:min(Rate, Capacity); - Capacity =/= infinity andalso Capacity =/= InfVal -> - Capacity; + Rate; true -> 0 end. @@ -567,21 +578,61 @@ call(Type, Msg) -> gen_server:call(Pid, Msg) end. -find_limiter_cfg(Type, #{rate := _} = Cfg) -> - {Cfg, find_client_cfg(Type, maps:get(client, Cfg, undefined))}; -find_limiter_cfg(Type, Cfg) -> - { - maps:get(Type, Cfg, undefined), - find_client_cfg(Type, emqx_utils_maps:deep_get([client, Type], Cfg, undefined)) - }. +create_limiter(Id, Type, #{rate := Rate} = ClientCfg, BucketCfg) when Rate =/= infinity -> + create_limiter_with_client(Id, Type, ClientCfg, BucketCfg); +create_limiter(Id, Type, _, BucketCfg) -> + create_limiter_without_client(Id, Type, BucketCfg). -find_client_cfg(Type, BucketCfg) -> - NodeCfg = emqx:get_config([limiter, client, Type], undefined), - merge_client_cfg(NodeCfg, BucketCfg). +%% create a limiter with the client-level configuration +create_limiter_with_client(Id, Type, ClientCfg, BucketCfg) -> + case find_referenced_bucket(Id, Type, BucketCfg) of + false -> + {ok, emqx_htb_limiter:make_local_limiter(ClientCfg, infinity)}; + {ok, Bucket, RefCfg} -> + create_limiter_with_ref(Bucket, ClientCfg, RefCfg); + Error -> + Error + end. -merge_client_cfg(undefined, BucketCfg) -> - BucketCfg; -merge_client_cfg(NodeCfg, undefined) -> - NodeCfg; -merge_client_cfg(NodeCfg, BucketCfg) -> - maps:merge(NodeCfg, BucketCfg). +%% create a limiter only with the referenced configuration +create_limiter_without_client(Id, Type, BucketCfg) -> + case find_referenced_bucket(Id, Type, BucketCfg) of + false -> + {ok, emqx_htb_limiter:make_infinity_limiter()}; + {ok, Bucket, RefCfg} -> + ClientCfg = emqx_limiter_schema:default_client_config(), + create_limiter_with_ref(Bucket, ClientCfg, RefCfg); + Error -> + Error + end. + +create_limiter_with_ref( + Bucket, + #{rate := CliRate} = ClientCfg, + #{rate := RefRate} +) when CliRate < RefRate -> + {ok, emqx_htb_limiter:make_local_limiter(ClientCfg, Bucket)}; +create_limiter_with_ref(Bucket, ClientCfg, _) -> + {ok, emqx_htb_limiter:make_ref_limiter(ClientCfg, Bucket)}. + +%% this is a listener(server)-level reference +find_referenced_bucket(Id, Type, #{rate := Rate} = Cfg) when Rate =/= infinity -> + case emqx_limiter_manager:find_bucket(Id, Type) of + {ok, Bucket} -> + {ok, Bucket, Cfg}; + _ -> + ?SLOG(error, #{msg => "bucket not found", type => Type, id => Id}), + {error, invalid_bucket} + end; +%% this is a node-level reference +find_referenced_bucket(Id, Type, _) -> + case emqx:get_config([limiter, Type], undefined) of + #{rate := infinity} -> + false; + undefined -> + ?SLOG(error, #{msg => "invalid limiter type", type => Type, id => Id}), + {error, invalid_bucket}; + NodeCfg -> + {ok, Bucket} = emqx_limiter_manager:find_root(Type), + {ok, Bucket, NodeCfg} + end. diff --git a/apps/emqx/src/emqx_listeners.erl b/apps/emqx/src/emqx_listeners.erl index b3043effc..7476b2718 100644 --- a/apps/emqx/src/emqx_listeners.erl +++ b/apps/emqx/src/emqx_listeners.erl @@ -35,7 +35,8 @@ current_conns/2, max_conns/2, id_example/0, - default_max_conn/0 + default_max_conn/0, + shutdown_count/2 ]). -export([ @@ -195,6 +196,17 @@ max_conns(Type, Name, _ListenOn) when Type =:= ws; Type =:= wss -> max_conns(_, _, _) -> {error, not_support}. +shutdown_count(ID, ListenOn) -> + {ok, #{type := Type, name := Name}} = parse_listener_id(ID), + shutdown_count(Type, Name, ListenOn). + +shutdown_count(Type, Name, ListenOn) when Type == tcp; Type == ssl -> + esockd:get_shutdown_count({listener_id(Type, Name), ListenOn}); +shutdown_count(Type, _Name, _ListenOn) when Type =:= ws; Type =:= wss -> + []; +shutdown_count(_, _, _) -> + {error, not_support}. + %% @doc Start all listeners. -spec start() -> ok. start() -> @@ -494,7 +506,7 @@ esockd_opts(ListenerId, Type, Opts0) -> Opts1 = maps:with([acceptors, max_connections, proxy_protocol, proxy_protocol_timeout], Opts0), Limiter = limiter(Opts0), Opts2 = - case maps:get(connection, Limiter, undefined) of + case emqx_limiter_schema:extract_with_type(connection, Limiter) of undefined -> Opts1; BucketCfg -> @@ -639,7 +651,7 @@ zone(Opts) -> maps:get(zone, Opts, undefined). limiter(Opts) -> - maps:get(limiter, Opts, #{}). + maps:get(limiter, Opts, undefined). add_limiter_bucket(Id, #{limiter := Limiter}) -> maps:fold( diff --git a/apps/emqx/src/emqx_logger.erl b/apps/emqx/src/emqx_logger.erl index 114a1af49..6087acd8a 100644 --- a/apps/emqx/src/emqx_logger.erl +++ b/apps/emqx/src/emqx_logger.erl @@ -237,7 +237,7 @@ set_log_handler_level(HandlerId, Level) -> end. %% @doc Set both the primary and all handlers level in one command --spec set_log_level(logger:handler_id()) -> ok | {error, term()}. +-spec set_log_level(logger:level()) -> ok | {error, term()}. set_log_level(Level) -> case set_primary_log_level(Level) of ok -> set_all_log_handlers_level(Level); diff --git a/apps/emqx/src/emqx_mqtt_caps.erl b/apps/emqx/src/emqx_mqtt_caps.erl index 1806ede1d..897bb93c4 100644 --- a/apps/emqx/src/emqx_mqtt_caps.erl +++ b/apps/emqx/src/emqx_mqtt_caps.erl @@ -37,7 +37,6 @@ max_qos_allowed => emqx_types:qos(), retain_available => boolean(), wildcard_subscription => boolean(), - subscription_identifiers => boolean(), shared_subscription => boolean(), exclusive_subscription => boolean() }. @@ -58,18 +57,17 @@ exclusive_subscription ]). --define(DEFAULT_CAPS, #{ - max_packet_size => ?MAX_PACKET_SIZE, - max_clientid_len => ?MAX_CLIENTID_LEN, - max_topic_alias => ?MAX_TOPIC_AlIAS, - max_topic_levels => ?MAX_TOPIC_LEVELS, - max_qos_allowed => ?QOS_2, - retain_available => true, - wildcard_subscription => true, - subscription_identifiers => true, - shared_subscription => true, - exclusive_subscription => false -}). +-define(DEFAULT_CAPS_KEYS, [ + max_packet_size, + max_clientid_len, + max_topic_alias, + max_topic_levels, + max_qos_allowed, + retain_available, + wildcard_subscription, + shared_subscription, + exclusive_subscription +]). -spec check_pub( emqx_types:zone(), @@ -88,7 +86,7 @@ check_pub(Zone, Flags) when is_map(Flags) -> error -> Flags end, - maps:with(?PUBCAP_KEYS, get_caps(Zone)) + get_caps(?PUBCAP_KEYS, Zone) ). do_check_pub(#{topic_levels := Levels}, #{max_topic_levels := Limit}) when @@ -111,7 +109,7 @@ do_check_pub(_Flags, _Caps) -> ) -> ok_or_error(emqx_types:reason_code()). check_sub(ClientInfo = #{zone := Zone}, Topic, SubOpts) -> - Caps = maps:with(?SUBCAP_KEYS, get_caps(Zone)), + Caps = get_caps(?SUBCAP_KEYS, Zone), Flags = lists:foldl( fun (max_topic_levels, Map) -> @@ -152,10 +150,12 @@ do_check_sub(_Flags, _Caps, _, _) -> ok. get_caps(Zone) -> - lists:foldl( - fun({K, V}, Acc) -> - Acc#{K => emqx_config:get_zone_conf(Zone, [mqtt, K], V)} - end, - #{}, - maps:to_list(?DEFAULT_CAPS) + get_caps(?DEFAULT_CAPS_KEYS, Zone). +get_caps(Keys, Zone) -> + maps:with( + Keys, + maps:merge( + emqx_config:get([mqtt]), + emqx_config:get_zone_conf(Zone, [mqtt]) + ) ). diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl index cf0f05abd..309a2a022 100644 --- a/apps/emqx/src/emqx_schema.erl +++ b/apps/emqx/src/emqx_schema.erl @@ -43,7 +43,12 @@ -type ip_port() :: tuple() | integer(). -type cipher() :: map(). -type port_number() :: 1..65536. --type server_parse_option() :: #{default_port => port_number(), no_port => boolean()}. +-type server_parse_option() :: #{ + default_port => port_number(), + no_port => boolean(), + supported_schemes => [string()], + default_scheme => string() +}. -type url() :: binary(). -type json_binary() :: binary(). @@ -62,6 +67,12 @@ -typerefl_from_string({url/0, emqx_schema, to_url}). -typerefl_from_string({json_binary/0, emqx_schema, to_json_binary}). +-type parsed_server() :: #{ + hostname := string(), + port => port_number(), + scheme => string() +}. + -export([ validate_heap_size/1, user_lookup_fun_tr/2, @@ -172,7 +183,7 @@ roots(high) -> } )}, {?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME, authentication(global)}, - %% NOTE: authorization schema here is only to keep emqx app prue + %% NOTE: authorization schema here is only to keep emqx app pure %% the full schema for EMQX node is injected in emqx_conf_schema. {?EMQX_AUTHORIZATION_CONFIG_ROOT_NAME, sc( @@ -676,12 +687,13 @@ fields("force_shutdown") -> desc => ?DESC(force_shutdown_enable) } )}, - {"max_message_queue_len", + {"max_mailbox_size", sc( range(0, inf), #{ default => 1000, - desc => ?DESC(force_shutdown_max_message_queue_len) + aliases => [max_message_queue_len], + desc => ?DESC(force_shutdown_max_mailbox_size) } )}, {"max_heap_size", @@ -924,15 +936,17 @@ fields("mqtt_quic_listener") -> string(), #{ %% TODO: deprecated => {since, "5.1.0"} - desc => ?DESC(fields_mqtt_quic_listener_certfile) + desc => ?DESC(fields_mqtt_quic_listener_certfile), + importance => ?IMPORTANCE_HIDDEN } )}, {"keyfile", sc( string(), - %% TODO: deprecated => {since, "5.1.0"} #{ - desc => ?DESC(fields_mqtt_quic_listener_keyfile) + %% TODO: deprecated => {since, "5.1.0"} + desc => ?DESC(fields_mqtt_quic_listener_keyfile), + importance => ?IMPORTANCE_HIDDEN } )}, {"ciphers", ciphers_schema(quic)}, @@ -1008,7 +1022,10 @@ fields("mqtt_quic_listener") -> duration_ms(), #{ default => 0, - desc => ?DESC(fields_mqtt_quic_listener_idle_timeout) + desc => ?DESC(fields_mqtt_quic_listener_idle_timeout), + %% TODO: deprecated => {since, "5.1.0"} + %% deprecated, use idle_timeout_ms instead + importance => ?IMPORTANCE_HIDDEN } )}, {"idle_timeout_ms", @@ -1022,7 +1039,10 @@ fields("mqtt_quic_listener") -> duration_ms(), #{ default => <<"10s">>, - desc => ?DESC(fields_mqtt_quic_listener_handshake_idle_timeout) + desc => ?DESC(fields_mqtt_quic_listener_handshake_idle_timeout), + %% TODO: deprecated => {since, "5.1.0"} + %% use handshake_idle_timeout_ms + importance => ?IMPORTANCE_HIDDEN } )}, {"handshake_idle_timeout_ms", @@ -1036,7 +1056,10 @@ fields("mqtt_quic_listener") -> duration_ms(), #{ default => 0, - desc => ?DESC(fields_mqtt_quic_listener_keep_alive_interval) + desc => ?DESC(fields_mqtt_quic_listener_keep_alive_interval), + %% TODO: deprecated => {since, "5.1.0"} + %% use keep_alive_interval_ms instead + importance => ?IMPORTANCE_HIDDEN } )}, {"keep_alive_interval_ms", @@ -1504,10 +1527,8 @@ fields("broker") -> sc( boolean(), #{ - %% TODO: deprecated => {since, "5.1.0"} - %% in favor of session message re-dispatch at termination - %% we will stop supporting dispatch acks for shared - %% subscriptions. + deprecated => {since, "5.1.0"}, + importance => ?IMPORTANCE_HIDDEN, default => false, desc => ?DESC(broker_shared_dispatch_ack_enabled) } @@ -2171,7 +2192,7 @@ common_ssl_opts_schema(Defaults) -> D = fun(Field) -> maps:get(to_atom(Field), Defaults, undefined) end, Df = fun(Field, Default) -> maps:get(to_atom(Field), Defaults, Default) end, Collection = maps:get(versions, Defaults, tls_all_available), - AvailableVersions = default_tls_vsns(Collection), + DefaultVersions = default_tls_vsns(Collection), [ {"cacertfile", sc( @@ -2233,6 +2254,7 @@ common_ssl_opts_schema(Defaults) -> example => <<"">>, format => <<"password">>, desc => ?DESC(common_ssl_opts_schema_password), + importance => ?IMPORTANCE_LOW, converter => fun password_converter/2 } )}, @@ -2240,9 +2262,10 @@ common_ssl_opts_schema(Defaults) -> sc( hoconsc:array(typerefl:atom()), #{ - default => AvailableVersions, + default => DefaultVersions, desc => ?DESC(common_ssl_opts_schema_versions), - validator => fun(Inputs) -> validate_tls_versions(AvailableVersions, Inputs) end + importance => ?IMPORTANCE_HIGH, + validator => fun(Input) -> validate_tls_versions(Collection, Input) end } )}, {"ciphers", ciphers_schema(D("ciphers"))}, @@ -2428,10 +2451,14 @@ client_ssl_opts_schema(Defaults) -> )} ]. -default_tls_vsns(dtls_all_available) -> - emqx_tls_lib:available_versions(dtls); -default_tls_vsns(tls_all_available) -> - emqx_tls_lib:available_versions(tls). +available_tls_vsns(dtls_all_available) -> emqx_tls_lib:available_versions(dtls); +available_tls_vsns(tls_all_available) -> emqx_tls_lib:available_versions(tls). + +outdated_tls_vsn(dtls_all_available) -> [dtlsv1]; +outdated_tls_vsn(tls_all_available) -> ['tlsv1.1', tlsv1]. + +default_tls_vsns(Key) -> + available_tls_vsns(Key) -- outdated_tls_vsn(Key). -spec ciphers_schema(quic | dtls_all_available | tls_all_available | undefined) -> hocon_schema:field_schema(). @@ -2740,7 +2767,8 @@ validate_ciphers(Ciphers) -> Bad -> {error, {bad_ciphers, Bad}} end. -validate_tls_versions(AvailableVersions, Versions) -> +validate_tls_versions(Collection, Versions) -> + AvailableVersions = available_tls_vsns(Collection), case lists:filter(fun(V) -> not lists:member(V, AvailableVersions) end, Versions) of [] -> ok; Vs -> {error, {unsupported_tls_versions, Vs}} @@ -2913,7 +2941,7 @@ servers_validator(Opts, Required) -> %% `no_port': by default it's `false', when set to `true', %% a `throw' exception is raised if the port is found. -spec parse_server(undefined | string() | binary(), server_parse_option()) -> - {string(), port_number()}. + undefined | parsed_server(). parse_server(Str, Opts) -> case parse_servers(Str, Opts) of undefined -> @@ -2927,7 +2955,7 @@ parse_server(Str, Opts) -> %% @doc Parse comma separated `host[:port][,host[:port]]' endpoints %% into a list of `{Host, Port}' tuples or just `Host' string. -spec parse_servers(undefined | string() | binary(), server_parse_option()) -> - [{string(), port_number()}]. + undefined | [parsed_server()]. parse_servers(undefined, _Opts) -> %% should not parse 'undefined' as string, %% not to throw exception either, @@ -2973,6 +3001,9 @@ split_host_port(Str) -> do_parse_server(Str, Opts) -> DefaultPort = maps:get(default_port, Opts, undefined), NotExpectingPort = maps:get(no_port, Opts, false), + DefaultScheme = maps:get(default_scheme, Opts, undefined), + SupportedSchemes = maps:get(supported_schemes, Opts, []), + NotExpectingScheme = (not is_list(DefaultScheme)) andalso length(SupportedSchemes) =:= 0, case is_integer(DefaultPort) andalso NotExpectingPort of true -> %% either provide a default port from schema, @@ -2981,22 +3012,129 @@ do_parse_server(Str, Opts) -> false -> ok end, + case is_list(DefaultScheme) andalso (not lists:member(DefaultScheme, SupportedSchemes)) of + true -> + %% inconsistent schema + error("bad_schema"); + false -> + ok + end, %% do not split with space, there should be no space allowed between host and port - case string:tokens(Str, ":") of - [Hostname, Port] -> - NotExpectingPort andalso throw("not_expecting_port_number"), - {check_hostname(Hostname), parse_port(Port)}; - [Hostname] -> - case is_integer(DefaultPort) of - true -> - {check_hostname(Hostname), DefaultPort}; - false when NotExpectingPort -> - check_hostname(Hostname); - false -> - throw("missing_port_number") - end; - _ -> - throw("bad_host_port") + Tokens = string:tokens(Str, ":"), + Context = #{ + not_expecting_port => NotExpectingPort, + not_expecting_scheme => NotExpectingScheme, + default_port => DefaultPort, + default_scheme => DefaultScheme, + opts => Opts + }, + check_server_parts(Tokens, Context). + +check_server_parts([Scheme, "//" ++ Hostname, Port], Context) -> + #{ + not_expecting_scheme := NotExpectingScheme, + not_expecting_port := NotExpectingPort, + opts := Opts + } = Context, + NotExpectingPort andalso throw("not_expecting_port_number"), + NotExpectingScheme andalso throw("not_expecting_scheme"), + #{ + scheme => check_scheme(Scheme, Opts), + hostname => check_hostname(Hostname), + port => parse_port(Port) + }; +check_server_parts([Scheme, "//" ++ Hostname], Context) -> + #{ + not_expecting_scheme := NotExpectingScheme, + not_expecting_port := NotExpectingPort, + default_port := DefaultPort, + opts := Opts + } = Context, + NotExpectingScheme andalso throw("not_expecting_scheme"), + case is_integer(DefaultPort) of + true -> + #{ + scheme => check_scheme(Scheme, Opts), + hostname => check_hostname(Hostname), + port => DefaultPort + }; + false when NotExpectingPort -> + #{ + scheme => check_scheme(Scheme, Opts), + hostname => check_hostname(Hostname) + }; + false -> + throw("missing_port_number") + end; +check_server_parts([Hostname, Port], Context) -> + #{ + not_expecting_port := NotExpectingPort, + default_scheme := DefaultScheme + } = Context, + NotExpectingPort andalso throw("not_expecting_port_number"), + case is_list(DefaultScheme) of + false -> + #{ + hostname => check_hostname(Hostname), + port => parse_port(Port) + }; + true -> + #{ + scheme => DefaultScheme, + hostname => check_hostname(Hostname), + port => parse_port(Port) + } + end; +check_server_parts([Hostname], Context) -> + #{ + not_expecting_scheme := NotExpectingScheme, + not_expecting_port := NotExpectingPort, + default_port := DefaultPort, + default_scheme := DefaultScheme + } = Context, + case is_integer(DefaultPort) orelse NotExpectingPort of + true -> + ok; + false -> + throw("missing_port_number") + end, + case is_list(DefaultScheme) orelse NotExpectingScheme of + true -> + ok; + false -> + throw("missing_scheme") + end, + case {is_integer(DefaultPort), is_list(DefaultScheme)} of + {true, true} -> + #{ + scheme => DefaultScheme, + hostname => check_hostname(Hostname), + port => DefaultPort + }; + {true, false} -> + #{ + hostname => check_hostname(Hostname), + port => DefaultPort + }; + {false, true} -> + #{ + scheme => DefaultScheme, + hostname => check_hostname(Hostname) + }; + {false, false} -> + #{hostname => check_hostname(Hostname)} + end; +check_server_parts(_Tokens, _Context) -> + throw("bad_host_port"). + +check_scheme(Str, Opts) -> + SupportedSchemes = maps:get(supported_schemes, Opts, []), + IsSupported = lists:member(Str, SupportedSchemes), + case IsSupported of + true -> + Str; + false -> + throw("unsupported_scheme") end. check_hostname(Str) -> diff --git a/apps/emqx/src/emqx_shared_sub.erl b/apps/emqx/src/emqx_shared_sub.erl index d7dc8c5a6..997364898 100644 --- a/apps/emqx/src/emqx_shared_sub.erl +++ b/apps/emqx/src/emqx_shared_sub.erl @@ -165,7 +165,7 @@ strategy(Group) -> -spec ack_enabled() -> boolean(). ack_enabled() -> - emqx:get_config([broker, shared_dispatch_ack_enabled]). + emqx:get_config([broker, shared_dispatch_ack_enabled], false). do_dispatch(SubPid, _Group, Topic, Msg, _Type) when SubPid =:= self() -> %% Deadlock otherwise @@ -181,7 +181,7 @@ do_dispatch(SubPid, _Group, Topic, Msg, retry) -> do_dispatch(SubPid, Group, Topic, Msg, fresh) -> case ack_enabled() of true -> - %% FIXME: replace with `emqx_shared_sub_proto:dispatch_with_ack' in 5.2 + %% TODO: delete this clase after 5.1.0 do_dispatch_with_ack(SubPid, Group, Topic, Msg); false -> send(SubPid, Topic, {deliver, Topic, Msg}) diff --git a/apps/emqx/src/emqx_trace/emqx_trace_formatter.erl b/apps/emqx/src/emqx_trace/emqx_trace_formatter.erl index c31bc0355..a44237bd0 100644 --- a/apps/emqx/src/emqx_trace/emqx_trace_formatter.erl +++ b/apps/emqx/src/emqx_trace/emqx_trace_formatter.erl @@ -27,7 +27,7 @@ format( #{level := debug, meta := Meta = #{trace_tag := Tag}, msg := Msg}, #{payload_encode := PEncode} ) -> - Time = calendar:system_time_to_rfc3339(erlang:system_time(second)), + Time = calendar:system_time_to_rfc3339(erlang:system_time(microsecond), [{unit, microsecond}]), ClientId = to_iolist(maps:get(clientid, Meta, "")), Peername = maps:get(peername, Meta, ""), MetaBin = format_meta(Meta, PEncode), diff --git a/apps/emqx/src/emqx_types.erl b/apps/emqx/src/emqx_types.erl index 7223da245..96d75daba 100644 --- a/apps/emqx/src/emqx_types.erl +++ b/apps/emqx/src/emqx_types.erl @@ -238,7 +238,7 @@ -type stats() :: [{atom(), term()}]. -type oom_policy() :: #{ - max_message_queue_len => non_neg_integer(), + max_mailbox_size => non_neg_integer(), max_heap_size => non_neg_integer(), enable => boolean() }. diff --git a/apps/emqx/src/emqx_ws_connection.erl b/apps/emqx/src/emqx_ws_connection.erl index 20962809f..00fe545eb 100644 --- a/apps/emqx/src/emqx_ws_connection.erl +++ b/apps/emqx/src/emqx_ws_connection.erl @@ -90,7 +90,7 @@ listener :: {Type :: atom(), Name :: atom()}, %% Limiter - limiter :: maybe(container()), + limiter :: container(), %% cache operation when overload limiter_cache :: queue:queue(cache()), @@ -121,8 +121,8 @@ -define(SOCK_STATS, [recv_oct, recv_cnt, send_oct, send_cnt]). -define(ENABLED(X), (X =/= undefined)). --define(LIMITER_BYTES_IN, bytes_in). --define(LIMITER_MESSAGE_IN, message_in). +-define(LIMITER_BYTES_IN, bytes). +-define(LIMITER_MESSAGE_IN, messages). -dialyzer({no_match, [info/2]}). -dialyzer({nowarn_function, [websocket_init/1]}). @@ -579,54 +579,61 @@ handle_timeout(TRef, TMsg, State) -> list(any()), state() ) -> state(). +check_limiter( + _Needs, + Data, + WhenOk, + Msgs, + #state{limiter = infinity} = State +) -> + WhenOk(Data, Msgs, State); check_limiter( Needs, Data, WhenOk, Msgs, - #state{ - limiter = Limiter, - limiter_timer = LimiterTimer, - limiter_cache = Cache - } = State + #state{limiter_timer = undefined, limiter = Limiter} = State ) -> - case LimiterTimer of - undefined -> - case emqx_limiter_container:check_list(Needs, Limiter) of - {ok, Limiter2} -> - WhenOk(Data, Msgs, State#state{limiter = Limiter2}); - {pause, Time, Limiter2} -> - ?SLOG(debug, #{ - msg => "pause_time_due_to_rate_limit", - needs => Needs, - time_in_ms => Time - }), + case emqx_limiter_container:check_list(Needs, Limiter) of + {ok, Limiter2} -> + WhenOk(Data, Msgs, State#state{limiter = Limiter2}); + {pause, Time, Limiter2} -> + ?SLOG(debug, #{ + msg => "pause_time_due_to_rate_limit", + needs => Needs, + time_in_ms => Time + }), - Retry = #retry{ - types = [Type || {_, Type} <- Needs], - data = Data, - next = WhenOk - }, + Retry = #retry{ + types = [Type || {_, Type} <- Needs], + data = Data, + next = WhenOk + }, - Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2), + Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2), - TRef = start_timer(Time, limit_timeout), + TRef = start_timer(Time, limit_timeout), - enqueue( - {active, false}, - State#state{ - sockstate = blocked, - limiter = Limiter3, - limiter_timer = TRef - } - ); - {drop, Limiter2} -> - {ok, State#state{limiter = Limiter2}} - end; - _ -> - New = #cache{need = Needs, data = Data, next = WhenOk}, - State#state{limiter_cache = queue:in(New, Cache)} - end. + enqueue( + {active, false}, + State#state{ + sockstate = blocked, + limiter = Limiter3, + limiter_timer = TRef + } + ); + {drop, Limiter2} -> + {ok, State#state{limiter = Limiter2}} + end; +check_limiter( + Needs, + Data, + WhenOk, + _Msgs, + #state{limiter_cache = Cache} = State +) -> + New = #cache{need = Needs, data = Data, next = WhenOk}, + State#state{limiter_cache = queue:in(New, Cache)}. -spec retry_limiter(state()) -> state(). retry_limiter(#state{limiter = Limiter} = State) -> diff --git a/apps/emqx/test/emqx_SUITE.erl b/apps/emqx/test/emqx_SUITE.erl index 09d5d8017..64ed2ea19 100644 --- a/apps/emqx/test/emqx_SUITE.erl +++ b/apps/emqx/test/emqx_SUITE.erl @@ -148,6 +148,14 @@ t_run_hook(_) -> ?assertEqual(3, emqx:run_fold_hook(foldl_filter2_hook, [arg], 1)), ?assertEqual(2, emqx:run_fold_hook(foldl_filter2_hook, [arg1], 1)). +t_cluster_nodes(_) -> + Expected = [node()], + ?assertEqual(Expected, emqx:running_nodes()), + ?assertEqual(Expected, emqx:cluster_nodes(running)), + ?assertEqual(Expected, emqx:cluster_nodes(all)), + ?assertEqual(Expected, emqx:cluster_nodes(cores)), + ?assertEqual([], emqx:cluster_nodes(stopped)). + %%-------------------------------------------------------------------- %% Hook fun %%-------------------------------------------------------------------- diff --git a/apps/emqx/test/emqx_banned_SUITE.erl b/apps/emqx/test/emqx_banned_SUITE.erl index 0c14f64c9..9419ba4c3 100644 --- a/apps/emqx/test/emqx_banned_SUITE.erl +++ b/apps/emqx/test/emqx_banned_SUITE.erl @@ -186,7 +186,7 @@ t_session_taken(_) -> false end end, - 6000 + 15_000 ), Publish(), diff --git a/apps/emqx/test/emqx_channel_SUITE.erl b/apps/emqx/test/emqx_channel_SUITE.erl index 29f8b1503..2b7280b32 100644 --- a/apps/emqx/test/emqx_channel_SUITE.erl +++ b/apps/emqx/test/emqx_channel_SUITE.erl @@ -31,7 +31,7 @@ force_gc_conf() -> #{bytes => 16777216, count => 16000, enable => true}. force_shutdown_conf() -> - #{enable => true, max_heap_size => 4194304, max_message_queue_len => 1000}. + #{enable => true, max_heap_size => 4194304, max_mailbox_size => 1000}. rpc_conf() -> #{ @@ -162,8 +162,7 @@ limiter_conf() -> Make = fun() -> #{ burst => 0, - rate => infinity, - capacity => infinity + rate => infinity } end, @@ -172,7 +171,7 @@ limiter_conf() -> Acc#{Name => Make()} end, #{}, - [bytes_in, message_in, message_routing, connection, internal] + [bytes, messages, message_routing, connection, internal] ). stats_conf() -> @@ -268,13 +267,14 @@ t_chan_info(_) -> t_chan_caps(_) -> ?assertMatch( #{ + exclusive_subscription := false, + max_packet_size := 1048576, max_clientid_len := 65535, max_qos_allowed := 2, max_topic_alias := 65535, max_topic_levels := Level, retain_available := true, shared_subscription := true, - subscription_identifiers := true, wildcard_subscription := true } when is_integer(Level), emqx_channel:caps(channel()) @@ -1258,7 +1258,7 @@ limiter_cfg() -> Client = #{ rate => 5, initial => 0, - capacity => 5, + burst => 0, low_watermark => 1, divisible => false, max_retry_time => timer:seconds(5), @@ -1270,7 +1270,7 @@ limiter_cfg() -> }. bucket_cfg() -> - #{rate => 10, initial => 0, capacity => 10}. + #{rate => 10, initial => 0, burst => 0}. add_bucket() -> emqx_limiter_server:add_bucket(?MODULE, message_routing, bucket_cfg()). diff --git a/apps/emqx/test/emqx_client_SUITE.erl b/apps/emqx/test/emqx_client_SUITE.erl index 82d4038da..ca5f53070 100644 --- a/apps/emqx/test/emqx_client_SUITE.erl +++ b/apps/emqx/test/emqx_client_SUITE.erl @@ -67,7 +67,8 @@ groups() -> %% t_keepalive, %% t_redelivery_on_reconnect, %% subscribe_failure_test, - t_dollar_topics + t_dollar_topics, + t_sub_non_utf8_topic ]}, {mqttv5, [non_parallel_tests], [t_basic_with_props_v5]}, {others, [non_parallel_tests], [ @@ -297,6 +298,36 @@ t_dollar_topics(_) -> ok = emqtt:disconnect(C), ct:pal("$ topics test succeeded"). +t_sub_non_utf8_topic(_) -> + {ok, Socket} = gen_tcp:connect({127, 0, 0, 1}, 1883, [{active, true}, binary]), + ConnPacket = emqx_frame:serialize(#mqtt_packet{ + header = #mqtt_packet_header{type = 1}, + variable = #mqtt_packet_connect{ + clientid = <<"abcdefg">> + } + }), + ok = gen_tcp:send(Socket, ConnPacket), + receive + {tcp, _, _ConnAck = <<32, 2, 0, 0>>} -> ok + after 3000 -> ct:fail({connect_ack_not_recv, process_info(self(), messages)}) + end, + SubHeader = <<130, 18, 25, 178>>, + SubTopicLen = <<0, 13>>, + %% this is not a valid utf8 topic + SubTopic = <<128, 10, 10, 12, 178, 159, 162, 47, 115, 1, 1, 1, 1>>, + SubQoS = <<1>>, + SubPacket = <>, + ok = gen_tcp:send(Socket, SubPacket), + receive + {tcp_closed, _} -> ok + after 3000 -> ct:fail({should_get_disconnected, process_info(self(), messages)}) + end, + timer:sleep(1000), + ListenerCounts = emqx_listeners:shutdown_count('tcp:default', {{0, 0, 0, 0}, 1883}), + TopicInvalidCount = proplists:get_value(topic_filter_invalid, ListenerCounts), + ?assert(is_integer(TopicInvalidCount) andalso TopicInvalidCount > 0), + ok. + %%-------------------------------------------------------------------- %% Test cases for MQTT v5 %%-------------------------------------------------------------------- diff --git a/apps/emqx/test/emqx_common_test_helpers.erl b/apps/emqx/test/emqx_common_test_helpers.erl index 26d908c5e..61373f638 100644 --- a/apps/emqx/test/emqx_common_test_helpers.erl +++ b/apps/emqx/test/emqx_common_test_helpers.erl @@ -55,12 +55,12 @@ is_tcp_server_available/2, is_tcp_server_available/3, load_config/2, - load_config/3, not_wait_mqtt_payload/1, read_schema_configs/2, render_config_file/2, wait_for/4, - wait_mqtt_payload/1 + wait_mqtt_payload/1, + select_free_port/1 ]). -export([ @@ -280,6 +280,7 @@ app_schema(App) -> mustache_vars(App, Opts) -> ExtraMustacheVars = maps:get(extra_mustache_vars, Opts, #{}), Defaults = #{ + node_cookie => atom_to_list(erlang:get_cookie()), platform_data_dir => app_path(App, "data"), platform_etc_dir => app_path(App, "etc") }, @@ -497,18 +498,14 @@ copy_certs(emqx_conf, Dest0) -> copy_certs(_, _) -> ok. -load_config(SchemaModule, Config, Opts) -> +load_config(SchemaModule, Config) -> ConfigBin = case is_map(Config) of true -> emqx_utils_json:encode(Config); false -> Config end, ok = emqx_config:delete_override_conf_files(), - ok = emqx_config:init_load(SchemaModule, ConfigBin, Opts), - ok. - -load_config(SchemaModule, Config) -> - load_config(SchemaModule, Config, #{raw_with_default => true}). + ok = emqx_config:init_load(SchemaModule, ConfigBin). -spec is_all_tcp_servers_available(Servers) -> Result when Servers :: [{Host, Port}], @@ -684,6 +681,7 @@ start_slave(Name, Opts) when is_map(Opts) -> SlaveMod = maps:get(peer_mod, Opts, ct_slave), Node = node_name(Name), put_peer_mod(Node, SlaveMod), + Cookie = atom_to_list(erlang:get_cookie()), DoStart = fun() -> case SlaveMod of @@ -695,7 +693,11 @@ start_slave(Name, Opts) when is_map(Opts) -> {monitor_master, true}, {init_timeout, 20_000}, {startup_timeout, 20_000}, - {erl_flags, erl_flags()} + {erl_flags, erl_flags()}, + {env, [ + {"HOCON_ENV_OVERRIDE_PREFIX", "EMQX_"}, + {"EMQX_NODE__COOKIE", Cookie} + ]} ] ); slave -> @@ -782,6 +784,7 @@ setup_node(Node, Opts) when is_map(Opts) -> load_apps => LoadApps, apps => Apps, env => Env, + join_to => JoinTo, start_apps => StartApps } ] @@ -1259,3 +1262,34 @@ get_or_spawn_janitor() -> on_exit(Fun) -> Janitor = get_or_spawn_janitor(), ok = emqx_test_janitor:push_on_exit_callback(Janitor, Fun). + +%%------------------------------------------------------------------------------- +%% Select a free transport port from the OS +%%------------------------------------------------------------------------------- +%% @doc get unused port from OS +-spec select_free_port(tcp | udp | ssl | quic) -> inets:port_number(). +select_free_port(tcp) -> + select_free_port(gen_tcp, listen); +select_free_port(udp) -> + select_free_port(gen_udp, open); +select_free_port(ssl) -> + select_free_port(tcp); +select_free_port(quic) -> + select_free_port(udp). + +select_free_port(GenModule, Fun) when + GenModule == gen_tcp orelse + GenModule == gen_udp +-> + {ok, S} = GenModule:Fun(0, [{reuseaddr, true}]), + {ok, Port} = inet:port(S), + ok = GenModule:close(S), + case os:type() of + {unix, darwin} -> + %% in MacOS, still get address_in_use after close port + timer:sleep(500); + _ -> + skip + end, + ct:pal("Select free OS port: ~p", [Port]), + Port. diff --git a/apps/emqx/test/emqx_config_SUITE.erl b/apps/emqx/test/emqx_config_SUITE.erl index 959c07dda..a55531c2d 100644 --- a/apps/emqx/test/emqx_config_SUITE.erl +++ b/apps/emqx/test/emqx_config_SUITE.erl @@ -19,6 +19,7 @@ -compile(export_all). -compile(nowarn_export_all). -include_lib("eunit/include/eunit.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). all() -> emqx_common_test_helpers:all(?MODULE). @@ -50,7 +51,6 @@ t_fill_default_values(_) -> }, <<"route_batch_clean">> := false, <<"session_locking_strategy">> := quorum, - <<"shared_dispatch_ack_enabled">> := false, <<"shared_subscription_strategy">> := round_robin } }, @@ -78,3 +78,21 @@ t_init_load(_Config) -> ?assertEqual(ExpectRootNames, lists:sort(emqx_config:get_root_names())), ?assertMatch({ok, #{raw_config := 128}}, emqx:update_config([mqtt, max_topic_levels], 128)), ok = file:delete(DeprecatedFile). + +t_unknown_rook_keys(_) -> + ?check_trace( + #{timetrap => 1000}, + begin + ok = emqx_config:init_load( + emqx_schema, <<"test_1 {}\n test_2 {sub = 100}\n listeners {}">> + ), + ?block_until(#{?snk_kind := unknown_config_keys}) + end, + fun(Trace) -> + ?assertMatch( + [#{unknown_config_keys := "test_1,test_2"}], + ?of_kind(unknown_config_keys, Trace) + ) + end + ), + ok. diff --git a/apps/emqx/test/emqx_config_handler_SUITE.erl b/apps/emqx/test/emqx_config_handler_SUITE.erl index deeee8d62..e21c1867f 100644 --- a/apps/emqx/test/emqx_config_handler_SUITE.erl +++ b/apps/emqx/test/emqx_config_handler_SUITE.erl @@ -177,7 +177,9 @@ t_sub_key_update_remove(_Config) -> {ok, #{post_config_update => #{emqx_config_handler_SUITE => ok}}}, emqx:remove_config(KeyPath) ), - ?assertError({config_not_found, KeyPath}, emqx:get_raw_config(KeyPath)), + ?assertError( + {config_not_found, [<<"sysmon">>, os, cpu_check_interval]}, emqx:get_raw_config(KeyPath) + ), OSKey = maps:keys(emqx:get_raw_config([sysmon, os])), ?assertEqual(false, lists:member(<<"cpu_check_interval">>, OSKey)), ?assert(length(OSKey) > 0), diff --git a/apps/emqx/test/emqx_connection_SUITE.erl b/apps/emqx/test/emqx_connection_SUITE.erl index 21ed45119..0692ec8f5 100644 --- a/apps/emqx/test/emqx_connection_SUITE.erl +++ b/apps/emqx/test/emqx_connection_SUITE.erl @@ -38,8 +38,6 @@ init_per_suite(Config) -> ok = meck:new(emqx_cm, [passthrough, no_history, no_link]), ok = meck:expect(emqx_cm, mark_channel_connected, fun(_) -> ok end), ok = meck:expect(emqx_cm, mark_channel_disconnected, fun(_) -> ok end), - %% Meck Limiter - ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]), %% Meck Pd ok = meck:new(emqx_pd, [passthrough, no_history, no_link]), %% Meck Metrics @@ -67,7 +65,6 @@ end_per_suite(_Config) -> ok = meck:unload(emqx_transport), catch meck:unload(emqx_channel), ok = meck:unload(emqx_cm), - ok = meck:unload(emqx_htb_limiter), ok = meck:unload(emqx_pd), ok = meck:unload(emqx_metrics), ok = meck:unload(emqx_hooks), @@ -421,20 +418,28 @@ t_ensure_rate_limit(_) -> {ok, [], State1} = emqx_connection:check_limiter([], [], WhenOk, [], st(#{limiter => Limiter})), ?assertEqual(Limiter, emqx_connection:info(limiter, State1)), + ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]), + + ok = meck:expect( + emqx_htb_limiter, + make_infinity_limiter, + fun() -> non_infinity end + ), + ok = meck:expect( emqx_htb_limiter, check, fun(_, Client) -> {pause, 3000, undefined, Client} end ), {ok, State2} = emqx_connection:check_limiter( - [{1000, bytes_in}], + [{1000, bytes}], [], WhenOk, [], - st(#{limiter => Limiter}) + st(#{limiter => init_limiter()}) ), meck:unload(emqx_htb_limiter), - ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]), + ?assertNotEqual(undefined, emqx_connection:info(limiter_timer, State2)). t_activate_socket(_) -> @@ -495,6 +500,7 @@ t_get_conn_info(_) -> end). t_oom_shutdown(init, Config) -> + ok = snabbkaffe:stop(), ok = snabbkaffe:start_trace(), ok = meck:new(emqx_utils, [non_strict, passthrough, no_history, no_link]), meck:expect( @@ -703,31 +709,32 @@ handle_call(Pid, Call, St) -> emqx_connection:handle_call(Pid, Call, St). -define(LIMITER_ID, 'tcp:default'). init_limiter() -> - emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes_in, message_in], limiter_cfg()). + emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes, messages], limiter_cfg()). limiter_cfg() -> - Infinity = emqx_limiter_schema:infinity_value(), Cfg = bucket_cfg(), - Client = #{ - rate => Infinity, + Client = client_cfg(), + #{bytes => Cfg, messages => Cfg, client => #{bytes => Client, messages => Client}}. + +bucket_cfg() -> + #{rate => infinity, initial => 0, burst => 0}. + +client_cfg() -> + #{ + rate => infinity, initial => 0, - capacity => Infinity, + burst => 0, low_watermark => 1, divisible => false, max_retry_time => timer:seconds(5), failure_strategy => force - }, - #{bytes_in => Cfg, message_in => Cfg, client => #{bytes_in => Client, message_in => Client}}. - -bucket_cfg() -> - Infinity = emqx_limiter_schema:infinity_value(), - #{rate => Infinity, initial => 0, capacity => Infinity}. + }. add_bucket() -> Cfg = bucket_cfg(), - emqx_limiter_server:add_bucket(?LIMITER_ID, bytes_in, Cfg), - emqx_limiter_server:add_bucket(?LIMITER_ID, message_in, Cfg). + emqx_limiter_server:add_bucket(?LIMITER_ID, bytes, Cfg), + emqx_limiter_server:add_bucket(?LIMITER_ID, messages, Cfg). del_bucket() -> - emqx_limiter_server:del_bucket(?LIMITER_ID, bytes_in), - emqx_limiter_server:del_bucket(?LIMITER_ID, message_in). + emqx_limiter_server:del_bucket(?LIMITER_ID, bytes), + emqx_limiter_server:del_bucket(?LIMITER_ID, messages). diff --git a/apps/emqx/test/emqx_crl_cache_SUITE.erl b/apps/emqx/test/emqx_crl_cache_SUITE.erl index dd3eb29e7..1b8abb9c3 100644 --- a/apps/emqx/test/emqx_crl_cache_SUITE.erl +++ b/apps/emqx/test/emqx_crl_cache_SUITE.erl @@ -35,6 +35,7 @@ all() -> init_per_suite(Config) -> application:load(emqx), + {ok, _} = application:ensure_all_started(ssl), emqx_config:save_schema_mod_and_names(emqx_schema), emqx_common_test_helpers:boot_modules(all), Config. @@ -328,7 +329,15 @@ drain_msgs() -> clear_crl_cache() -> %% reset the CRL cache + Ref = monitor(process, whereis(ssl_manager)), exit(whereis(ssl_manager), kill), + receive + {'DOWN', Ref, process, _, _} -> + ok + after 1_000 -> + ct:fail("ssl_manager didn't die") + end, + ensure_ssl_manager_alive(), ok. force_cacertfile(Cacertfile) -> @@ -382,7 +391,6 @@ setup_crl_options(Config, #{is_cached := IsCached} = Opts) -> false -> %% ensure cache is empty clear_crl_cache(), - ct:sleep(200), ok end, drain_msgs(), @@ -459,6 +467,13 @@ of_kinds(Trace0, Kinds0) -> Trace0 ). +ensure_ssl_manager_alive() -> + ?retry( + _Sleep0 = 200, + _Attempts0 = 50, + true = is_pid(whereis(ssl_manager)) + ). + %%-------------------------------------------------------------------- %% Test cases %%-------------------------------------------------------------------- diff --git a/apps/emqx/test/emqx_listeners_SUITE.erl b/apps/emqx/test/emqx_listeners_SUITE.erl index 8d965e8dd..fa0713cf0 100644 --- a/apps/emqx/test/emqx_listeners_SUITE.erl +++ b/apps/emqx/test/emqx_listeners_SUITE.erl @@ -47,13 +47,14 @@ init_per_testcase(Case, Config) when Case =:= t_max_conns_tcp; Case =:= t_current_conns_tcp -> catch emqx_config_handler:stop(), + Port = emqx_common_test_helpers:select_free_port(tcp), {ok, _} = emqx_config_handler:start_link(), PrevListeners = emqx_config:get([listeners], #{}), PureListeners = remove_default_limiter(PrevListeners), PureListeners2 = PureListeners#{ tcp => #{ listener_test => #{ - bind => {"127.0.0.1", 9999}, + bind => {"127.0.0.1", Port}, max_connections => 4321, limiter => #{} } @@ -63,19 +64,20 @@ init_per_testcase(Case, Config) when ok = emqx_listeners:start(), [ - {prev_listener_conf, PrevListeners} + {prev_listener_conf, PrevListeners}, + {tcp_port, Port} | Config ]; init_per_testcase(t_wss_conn, Config) -> catch emqx_config_handler:stop(), + Port = emqx_common_test_helpers:select_free_port(ssl), {ok, _} = emqx_config_handler:start_link(), - PrevListeners = emqx_config:get([listeners], #{}), PureListeners = remove_default_limiter(PrevListeners), PureListeners2 = PureListeners#{ wss => #{ listener_test => #{ - bind => {{127, 0, 0, 1}, 9998}, + bind => {{127, 0, 0, 1}, Port}, limiter => #{}, ssl_options => #{ cacertfile => ?CERTS_PATH("cacert.pem"), @@ -89,7 +91,8 @@ init_per_testcase(t_wss_conn, Config) -> ok = emqx_listeners:start(), [ - {prev_listener_conf, PrevListeners} + {prev_listener_conf, PrevListeners}, + {wss_port, Port} | Config ]; init_per_testcase(_, Config) -> @@ -171,20 +174,30 @@ t_restart_listeners_with_hibernate_after_disabled(_Config) -> ok = emqx_listeners:stop(), emqx_config:put([listeners], OldLConf). -t_max_conns_tcp(_) -> +t_max_conns_tcp(Config) -> %% Note: Using a string representation for the bind address like %% "127.0.0.1" does not work - ?assertEqual(4321, emqx_listeners:max_conns('tcp:listener_test', {{127, 0, 0, 1}, 9999})). + ?assertEqual( + 4321, + emqx_listeners:max_conns('tcp:listener_test', {{127, 0, 0, 1}, ?config(tcp_port, Config)}) + ). -t_current_conns_tcp(_) -> - ?assertEqual(0, emqx_listeners:current_conns('tcp:listener_test', {{127, 0, 0, 1}, 9999})). +t_current_conns_tcp(Config) -> + ?assertEqual( + 0, + emqx_listeners:current_conns('tcp:listener_test', { + {127, 0, 0, 1}, ?config(tcp_port, Config) + }) + ). -t_wss_conn(_) -> - {ok, Socket} = ssl:connect({127, 0, 0, 1}, 9998, [{verify, verify_none}], 1000), +t_wss_conn(Config) -> + {ok, Socket} = ssl:connect( + {127, 0, 0, 1}, ?config(wss_port, Config), [{verify, verify_none}], 1000 + ), ok = ssl:close(Socket). t_quic_conn(Config) -> - Port = 24568, + Port = emqx_common_test_helpers:select_free_port(quic), DataDir = ?config(data_dir, Config), SSLOpts = #{ password => ?SERVER_KEY_PASSWORD, @@ -207,7 +220,7 @@ t_quic_conn(Config) -> emqx_listeners:stop_listener(quic, ?FUNCTION_NAME, #{bind => Port}). t_ssl_password_cert(Config) -> - Port = 24568, + Port = emqx_common_test_helpers:select_free_port(ssl), DataDir = ?config(data_dir, Config), SSLOptsPWD = #{ password => ?SERVER_KEY_PASSWORD, diff --git a/apps/emqx/test/emqx_mqtt_caps_SUITE.erl b/apps/emqx/test/emqx_mqtt_caps_SUITE.erl index 2ee4b5ffd..297ee7f7d 100644 --- a/apps/emqx/test/emqx_mqtt_caps_SUITE.erl +++ b/apps/emqx/test/emqx_mqtt_caps_SUITE.erl @@ -22,7 +22,16 @@ -include_lib("emqx/include/emqx_mqtt.hrl"). -include_lib("eunit/include/eunit.hrl"). -all() -> emqx_common_test_helpers:all(?MODULE). +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + emqx_common_test_helpers:start_apps([]), + Config. + +end_per_suite(_Config) -> + emqx_common_test_helpers:stop_apps([]), + ok. t_check_pub(_) -> OldConf = emqx:get_config([zones], #{}), diff --git a/apps/emqx/test/emqx_quic_multistreams_SUITE.erl b/apps/emqx/test/emqx_quic_multistreams_SUITE.erl index 4afd965bd..b55a28206 100644 --- a/apps/emqx/test/emqx_quic_multistreams_SUITE.erl +++ b/apps/emqx/test/emqx_quic_multistreams_SUITE.erl @@ -2026,18 +2026,7 @@ stop_emqx() -> %% select a random port picked by OS -spec select_port() -> inet:port_number(). select_port() -> - {ok, S} = gen_udp:open(0, [{reuseaddr, true}]), - {ok, {_, Port}} = inet:sockname(S), - gen_udp:close(S), - case os:type() of - {unix, darwin} -> - %% in MacOS, still get address_in_use after close port - timer:sleep(500); - _ -> - skip - end, - ct:pal("select port: ~p", [Port]), - Port. + emqx_common_test_helpers:select_free_port(quic). -spec via_stream({quic, quicer:connection_handle(), quicer:stream_handle()}) -> quicer:stream_handle(). diff --git a/apps/emqx/test/emqx_ratelimiter_SUITE.erl b/apps/emqx/test/emqx_ratelimiter_SUITE.erl index f3b97d517..67ed8e6bc 100644 --- a/apps/emqx/test/emqx_ratelimiter_SUITE.erl +++ b/apps/emqx/test/emqx_ratelimiter_SUITE.erl @@ -38,6 +38,7 @@ -define(LOGT(Format, Args), ct:pal("TEST_SUITE: " ++ Format, Args)). -define(RATE(Rate), to_rate(Rate)). -define(NOW, erlang:system_time(millisecond)). +-define(ROOT_COUNTER_IDX, 1). %%-------------------------------------------------------------------- %% Setups @@ -72,7 +73,7 @@ t_consume(_) -> Cfg = fun(Cfg) -> Cfg#{ rate := 100, - capacity := 100, + burst := 0, initial := 100, max_retry_time := 1000, failure_strategy := force @@ -89,7 +90,7 @@ t_retry(_) -> Cfg = fun(Cfg) -> Cfg#{ rate := 50, - capacity := 200, + burst := 150, initial := 0, max_retry_time := 1000, failure_strategy := force @@ -109,7 +110,7 @@ t_restore(_) -> Cfg = fun(Cfg) -> Cfg#{ rate := 1, - capacity := 200, + burst := 199, initial := 50, max_retry_time := 100, failure_strategy := force @@ -129,7 +130,7 @@ t_max_retry_time(_) -> Cfg = fun(Cfg) -> Cfg#{ rate := 1, - capacity := 1, + burst := 0, max_retry_time := 500, failure_strategy := drop } @@ -139,8 +140,12 @@ t_max_retry_time(_) -> Begin = ?NOW, Result = emqx_htb_limiter:consume(101, Client), ?assertMatch({drop, _}, Result), - Time = ?NOW - Begin, - ?assert(Time >= 500 andalso Time < 550) + End = ?NOW, + Time = End - Begin, + ?assert( + Time >= 500 andalso Time < 550, + lists:flatten(io_lib:format("Begin:~p, End:~p, Time:~p~n", [Begin, End, Time])) + ) end, with_per_client(Cfg, Case). @@ -150,7 +155,7 @@ t_divisible(_) -> divisible := true, rate := ?RATE("1000/1s"), initial := 600, - capacity := 600 + burst := 0 } end, Case = fun(BucketCfg) -> @@ -176,7 +181,7 @@ t_low_watermark(_) -> low_watermark := 400, rate := ?RATE("1000/1s"), initial := 1000, - capacity := 1000 + burst := 0 } end, Case = fun(BucketCfg) -> @@ -201,23 +206,22 @@ t_infinity_client(_) -> Fun = fun(Cfg) -> Cfg end, Case = fun(Cfg) -> Client = connect(Cfg), - InfVal = emqx_limiter_schema:infinity_value(), - ?assertMatch(#{bucket := #{rate := InfVal}}, Client), + ?assertMatch(infinity, Client), Result = emqx_htb_limiter:check(100000, Client), ?assertEqual({ok, Client}, Result) end, with_per_client(Fun, Case). -t_try_restore_agg(_) -> +t_try_restore_with_bucket(_) -> Fun = fun(#{client := Cli} = Bucket) -> Bucket2 = Bucket#{ - rate := 1, - capacity := 200, + rate := 100, + burst := 100, initial := 50 }, Cli2 = Cli#{ rate := infinity, - capacity := infinity, + burst := 0, divisible := true, max_retry_time := 100, failure_strategy := force @@ -239,11 +243,11 @@ t_short_board(_) -> Bucket2 = Bucket#{ rate := ?RATE("100/1s"), initial := 0, - capacity := 100 + burst := 0 }, Cli2 = Cli#{ rate := ?RATE("600/1s"), - capacity := 600, + burst := 0, initial := 600 }, Bucket2#{client := Cli2} @@ -261,46 +265,45 @@ t_rate(_) -> Bucket2 = Bucket#{ rate := ?RATE("100/100ms"), initial := 0, - capacity := infinity + burst := 0 }, Cli2 = Cli#{ rate := infinity, - capacity := infinity, + burst := 0, initial := 0 }, Bucket2#{client := Cli2} end, Case = fun(Cfg) -> + Time = 1000, Client = connect(Cfg), - Ts1 = erlang:system_time(millisecond), C1 = emqx_htb_limiter:available(Client), - timer:sleep(1000), - Ts2 = erlang:system_time(millisecond), + timer:sleep(1100), C2 = emqx_htb_limiter:available(Client), - ShouldInc = floor((Ts2 - Ts1) / 100) * 100, + ShouldInc = floor(Time / 100) * 100, Inc = C2 - C1, ?assert(in_range(Inc, ShouldInc - 100, ShouldInc + 100), "test bucket rate") end, with_bucket(Fun, Case). t_capacity(_) -> - Capacity = 600, + Capacity = 1200, Fun = fun(#{client := Cli} = Bucket) -> Bucket2 = Bucket#{ rate := ?RATE("100/100ms"), initial := 0, - capacity := 600 + burst := 200 }, Cli2 = Cli#{ rate := infinity, - capacity := infinity, + burst := 0, initial := 0 }, Bucket2#{client := Cli2} end, Case = fun(Cfg) -> Client = connect(Cfg), - timer:sleep(1000), + timer:sleep(1500), C1 = emqx_htb_limiter:available(Client), ?assertEqual(Capacity, C1, "test bucket capacity") end, @@ -318,11 +321,11 @@ t_collaborative_alloc(_) -> Bucket2 = Bucket#{ rate := ?RATE("400/1s"), initial := 0, - capacity := 600 + burst := 200 }, Cli2 = Cli#{ rate := ?RATE("50"), - capacity := 100, + burst := 50, initial := 100 }, Bucket2#{client := Cli2} @@ -363,11 +366,11 @@ t_burst(_) -> Bucket2 = Bucket#{ rate := ?RATE("200/1s"), initial := 0, - capacity := 200 + burst := 0 }, Cli2 = Cli#{ rate := ?RATE("50/1s"), - capacity := 200, + burst := 150, divisible := true }, Bucket2#{client := Cli2} @@ -392,38 +395,6 @@ t_burst(_) -> Case ). -t_limit_global_with_unlimit_other(_) -> - GlobalMod = fun(#{message_routing := MR} = Cfg) -> - Cfg#{message_routing := MR#{rate := ?RATE("600/1s")}} - end, - - Bucket = fun(#{client := Cli} = Bucket) -> - Bucket2 = Bucket#{ - rate := infinity, - initial := 0, - capacity := infinity - }, - Cli2 = Cli#{ - rate := infinity, - capacity := infinity, - initial := 0 - }, - Bucket2#{client := Cli2} - end, - - Case = fun() -> - C1 = counters:new(1, []), - start_client({b1, Bucket}, ?NOW + 2000, C1, 20), - timer:sleep(2100), - check_average_rate(C1, 2, 600) - end, - - with_global( - GlobalMod, - [{b1, Bucket}], - Case - ). - %%-------------------------------------------------------------------- %% Test Cases container %%-------------------------------------------------------------------- @@ -432,7 +403,7 @@ t_check_container(_) -> Cfg#{ rate := ?RATE("1000/1s"), initial := 1000, - capacity := 1000 + burst := 0 } end, Case = fun(#{client := Client} = BucketCfg) -> @@ -452,38 +423,6 @@ t_check_container(_) -> end, with_per_client(Cfg, Case). -%%-------------------------------------------------------------------- -%% Test Override -%%-------------------------------------------------------------------- -t_bucket_no_client(_) -> - Rate = ?RATE("1/s"), - GlobalMod = fun(#{client := #{message_routing := MR} = Client} = Cfg) -> - Cfg#{client := Client#{message_routing := MR#{rate := Rate}}} - end, - BucketMod = fun(Bucket) -> - maps:remove(client, Bucket) - end, - Case = fun() -> - Limiter = connect(BucketMod(make_limiter_cfg())), - ?assertMatch(#{rate := Rate}, Limiter) - end, - with_global(GlobalMod, [BucketMod], Case). - -t_bucket_client(_) -> - GlobalRate = ?RATE("1/s"), - BucketRate = ?RATE("10/s"), - GlobalMod = fun(#{client := #{message_routing := MR} = Client} = Cfg) -> - Cfg#{client := Client#{message_routing := MR#{rate := GlobalRate}}} - end, - BucketMod = fun(#{client := Client} = Bucket) -> - Bucket#{client := Client#{rate := BucketRate}} - end, - Case = fun() -> - Limiter = connect(BucketMod(make_limiter_cfg())), - ?assertMatch(#{rate := BucketRate}, Limiter) - end, - with_global(GlobalMod, [BucketMod], Case). - %%-------------------------------------------------------------------- %% Test Cases misc %%-------------------------------------------------------------------- @@ -565,13 +504,241 @@ t_schema_unit(_) -> ?assertMatch({error, _}, M:to_rate("100MB/1")), ?assertMatch({error, _}, M:to_rate("100/10x")), - ?assertEqual({ok, emqx_limiter_schema:infinity_value()}, M:to_capacity("infinity")), + ?assertEqual({ok, infinity}, M:to_capacity("infinity")), ?assertEqual({ok, 100}, M:to_capacity("100")), ?assertEqual({ok, 100 * 1024}, M:to_capacity("100KB")), ?assertEqual({ok, 100 * 1024 * 1024}, M:to_capacity("100MB")), ?assertEqual({ok, 100 * 1024 * 1024 * 1024}, M:to_capacity("100GB")), ok. +t_compatibility_for_capacity(_) -> + CfgStr = << + "" + "\n" + "listeners.tcp.default {\n" + " bind = \"0.0.0.0:1883\"\n" + " max_connections = 1024000\n" + " limiter.messages.capacity = infinity\n" + " limiter.client.messages.capacity = infinity\n" + "}\n" + "" + >>, + ?assertMatch( + #{ + messages := #{burst := 0}, + client := #{messages := #{burst := 0}} + }, + parse_and_check(CfgStr) + ). + +t_compatibility_for_message_in(_) -> + CfgStr = << + "" + "\n" + "listeners.tcp.default {\n" + " bind = \"0.0.0.0:1883\"\n" + " max_connections = 1024000\n" + " limiter.message_in.rate = infinity\n" + " limiter.client.message_in.rate = infinity\n" + "}\n" + "" + >>, + ?assertMatch( + #{ + messages := #{rate := infinity}, + client := #{messages := #{rate := infinity}} + }, + parse_and_check(CfgStr) + ). + +t_compatibility_for_bytes_in(_) -> + CfgStr = << + "" + "\n" + "listeners.tcp.default {\n" + " bind = \"0.0.0.0:1883\"\n" + " max_connections = 1024000\n" + " limiter.bytes_in.rate = infinity\n" + " limiter.client.bytes_in.rate = infinity\n" + "}\n" + "" + >>, + ?assertMatch( + #{ + bytes := #{rate := infinity}, + client := #{bytes := #{rate := infinity}} + }, + parse_and_check(CfgStr) + ). + +t_extract_with_type(_) -> + IsOnly = fun + (_Key, Cfg) when map_size(Cfg) =/= 1 -> + false; + (Key, Cfg) -> + maps:is_key(Key, Cfg) + end, + Checker = fun + (Type, #{client := Client} = Cfg) -> + Cfg2 = maps:remove(client, Cfg), + IsOnly(Type, Client) andalso + (IsOnly(Type, Cfg2) orelse + map_size(Cfg2) =:= 0); + (Type, Cfg) -> + IsOnly(Type, Cfg) + end, + ?assertEqual(undefined, emqx_limiter_schema:extract_with_type(messages, undefined)), + ?assert( + Checker( + messages, + emqx_limiter_schema:extract_with_type(messages, #{ + messages => #{rate => 1}, bytes => #{rate => 1} + }) + ) + ), + ?assert( + Checker( + messages, + emqx_limiter_schema:extract_with_type(messages, #{ + messages => #{rate => 1}, + bytes => #{rate => 1}, + client => #{messages => #{rate => 2}} + }) + ) + ), + ?assert( + Checker( + messages, + emqx_limiter_schema:extract_with_type(messages, #{ + client => #{messages => #{rate => 2}, bytes => #{rate => 1}} + }) + ) + ). + +%%-------------------------------------------------------------------- +%% Test Cases Create Instance +%%-------------------------------------------------------------------- +t_create_instance_with_infinity_node(_) -> + emqx_limiter_manager:insert_bucket(?FUNCTION_NAME, bytes, ?FUNCTION_NAME), + Cases = make_create_test_data_with_infinity_node(?FUNCTION_NAME), + lists:foreach( + fun({Cfg, Expected}) -> + {ok, Result} = emqx_limiter_server:connect(?FUNCTION_NAME, bytes, Cfg), + IsMatched = + case is_atom(Expected) of + true -> + Result =:= Expected; + _ -> + Expected(Result) + end, + ?assert( + IsMatched, + lists:flatten( + io_lib:format("Got unexpected:~p~n, Cfg:~p~n", [ + Result, Cfg + ]) + ) + ) + end, + Cases + ), + emqx_limiter_manager:delete_bucket(?FUNCTION_NAME, bytes), + ok. + +t_not_exists_instance(_) -> + Cfg = #{bytes => #{rate => 100, burst => 0, initial => 0}}, + ?assertEqual( + {error, invalid_bucket}, + emqx_limiter_server:connect(?FUNCTION_NAME, bytes, Cfg) + ), + + ?assertEqual( + {error, invalid_bucket}, + emqx_limiter_server:connect(?FUNCTION_NAME, not_exists, Cfg) + ), + ok. + +t_create_instance_with_node(_) -> + GlobalMod = fun(#{message_routing := MR} = Cfg) -> + Cfg#{ + message_routing := MR#{rate := ?RATE("200/1s")}, + messages := MR#{rate := ?RATE("200/1s")} + } + end, + + B1 = fun(Bucket) -> + Bucket#{rate := ?RATE("400/1s")} + end, + + B2 = fun(Bucket) -> + Bucket#{rate := infinity} + end, + + IsRefLimiter = fun + ({ok, #{tokens := _}}, _IsRoot) -> + false; + ({ok, #{bucket := #{index := ?ROOT_COUNTER_IDX}}}, true) -> + true; + ({ok, #{bucket := #{index := Index}}}, false) when Index =/= ?ROOT_COUNTER_IDX -> + true; + (Result, _IsRoot) -> + ct:pal("The result is:~p~n", [Result]), + false + end, + + Case = fun() -> + BucketCfg = make_limiter_cfg(), + + ?assert( + IsRefLimiter(emqx_limiter_server:connect(b1, message_routing, B1(BucketCfg)), false) + ), + ?assert( + IsRefLimiter(emqx_limiter_server:connect(b2, message_routing, B2(BucketCfg)), true) + ), + ?assert(IsRefLimiter(emqx_limiter_server:connect(x, messages, undefined), true)), + ?assertNot(IsRefLimiter(emqx_limiter_server:connect(x, bytes, undefined), false)) + end, + + with_global( + GlobalMod, + [{b1, B1}, {b2, B2}], + Case + ), + ok. + +%%-------------------------------------------------------------------- +%% Test Cases emqx_esockd_htb_limiter +%%-------------------------------------------------------------------- +t_create_esockd_htb_limiter(_) -> + Opts = emqx_esockd_htb_limiter:new_create_options(?FUNCTION_NAME, bytes, undefined), + ?assertMatch( + #{module := _, id := ?FUNCTION_NAME, type := bytes, bucket := undefined}, + Opts + ), + + Limiter = emqx_esockd_htb_limiter:create(Opts), + ?assertMatch( + #{module := _, name := bytes, limiter := infinity}, + Limiter + ), + + ?assertEqual(ok, emqx_esockd_htb_limiter:delete(Limiter)), + ok. + +t_esockd_htb_consume(_) -> + ClientCfg = emqx_limiter_schema:default_client_config(), + Cfg = #{client => #{bytes => ClientCfg#{rate := 50, max_retry_time := 0}}}, + Opts = emqx_esockd_htb_limiter:new_create_options(?FUNCTION_NAME, bytes, Cfg), + Limiter = emqx_esockd_htb_limiter:create(Opts), + + C1R = emqx_esockd_htb_limiter:consume(51, Limiter), + ?assertMatch({pause, _Ms, _Limiter2}, C1R), + + timer:sleep(300), + C2R = emqx_esockd_htb_limiter:consume(50, Limiter), + ?assertMatch({ok, _}, C2R), + ok. + %%-------------------------------------------------------------------- %%% Internal functions %%-------------------------------------------------------------------- @@ -748,17 +915,16 @@ connect(Name, Cfg) -> Limiter. make_limiter_cfg() -> - Infinity = emqx_limiter_schema:infinity_value(), Client = #{ - rate => Infinity, + rate => infinity, initial => 0, - capacity => Infinity, + burst => 0, low_watermark => 0, divisible => false, max_retry_time => timer:seconds(5), failure_strategy => force }, - #{client => Client, rate => Infinity, initial => 0, capacity => Infinity}. + #{client => Client, rate => infinity, initial => 0, burst => 0}. add_bucket(Cfg) -> add_bucket(?MODULE, Cfg). @@ -812,3 +978,68 @@ apply_modifier(Pairs, #{default := Template}) -> Acc#{N => M(Template)} end, lists:foldl(Fun, #{}, Pairs). + +parse_and_check(ConfigString) -> + ok = emqx_common_test_helpers:load_config(emqx_schema, ConfigString), + emqx:get_config([listeners, tcp, default, limiter]). + +make_create_test_data_with_infinity_node(FakeInstnace) -> + Infinity = emqx_htb_limiter:make_infinity_limiter(), + ClientCfg = emqx_limiter_schema:default_client_config(), + InfinityRef = emqx_limiter_bucket_ref:infinity_bucket(), + MkC = fun(Rate) -> + #{client => #{bytes => ClientCfg#{rate := Rate}}} + end, + MkB = fun(Rate) -> + #{bytes => #{rate => Rate, burst => 0, initial => 0}} + end, + + MkA = fun(Client, Bucket) -> + maps:merge(MkC(Client), MkB(Bucket)) + end, + IsRefLimiter = fun(Expected) -> + fun + (#{tokens := _}) -> false; + (#{bucket := Bucket}) -> Bucket =:= Expected; + (_) -> false + end + end, + + IsTokenLimiter = fun(Expected) -> + fun + (#{tokens := _, bucket := Bucket}) -> Bucket =:= Expected; + (_) -> false + end + end, + + [ + %% default situation, no limiter setting + {undefined, Infinity}, + + %% client = undefined bucket = undefined + {#{}, Infinity}, + %% client = undefined bucket = infinity + {MkB(infinity), Infinity}, + %% client = undefined bucket = other + {MkB(100), IsRefLimiter(FakeInstnace)}, + + %% client = infinity bucket = undefined + {MkC(infinity), Infinity}, + %% client = infinity bucket = infinity + {MkA(infinity, infinity), Infinity}, + + %% client = infinity bucket = other + {MkA(infinity, 100), IsRefLimiter(FakeInstnace)}, + + %% client = other bucket = undefined + {MkC(100), IsTokenLimiter(InfinityRef)}, + + %% client = other bucket = infinity + {MkC(100), IsTokenLimiter(InfinityRef)}, + + %% client = C bucket = B C < B + {MkA(100, 1000), IsTokenLimiter(FakeInstnace)}, + + %% client = C bucket = B C > B + {MkA(1000, 100), IsRefLimiter(FakeInstnace)} + ]. diff --git a/apps/emqx/test/emqx_schema_tests.erl b/apps/emqx/test/emqx_schema_tests.erl index b4bb85a8a..81991f26e 100644 --- a/apps/emqx/test/emqx_schema_tests.erl +++ b/apps/emqx/test/emqx_schema_tests.erl @@ -219,112 +219,124 @@ parse_server_test_() -> ?T( "single server, binary, no port", ?assertEqual( - [{"localhost", DefaultPort}], + [#{hostname => "localhost", port => DefaultPort}], Parse(<<"localhost">>) ) ), ?T( "single server, string, no port", ?assertEqual( - [{"localhost", DefaultPort}], + [#{hostname => "localhost", port => DefaultPort}], Parse("localhost") ) ), ?T( "single server, list(string), no port", ?assertEqual( - [{"localhost", DefaultPort}], + [#{hostname => "localhost", port => DefaultPort}], Parse(["localhost"]) ) ), ?T( "single server, list(binary), no port", ?assertEqual( - [{"localhost", DefaultPort}], + [#{hostname => "localhost", port => DefaultPort}], Parse([<<"localhost">>]) ) ), ?T( "single server, binary, with port", ?assertEqual( - [{"localhost", 9999}], + [#{hostname => "localhost", port => 9999}], Parse(<<"localhost:9999">>) ) ), ?T( "single server, list(string), with port", ?assertEqual( - [{"localhost", 9999}], + [#{hostname => "localhost", port => 9999}], Parse(["localhost:9999"]) ) ), ?T( "single server, string, with port", ?assertEqual( - [{"localhost", 9999}], + [#{hostname => "localhost", port => 9999}], Parse("localhost:9999") ) ), ?T( "single server, list(binary), with port", ?assertEqual( - [{"localhost", 9999}], + [#{hostname => "localhost", port => 9999}], Parse([<<"localhost:9999">>]) ) ), ?T( "multiple servers, string, no port", ?assertEqual( - [{"host1", DefaultPort}, {"host2", DefaultPort}], + [ + #{hostname => "host1", port => DefaultPort}, + #{hostname => "host2", port => DefaultPort} + ], Parse("host1, host2") ) ), ?T( "multiple servers, binary, no port", ?assertEqual( - [{"host1", DefaultPort}, {"host2", DefaultPort}], + [ + #{hostname => "host1", port => DefaultPort}, + #{hostname => "host2", port => DefaultPort} + ], Parse(<<"host1, host2,,,">>) ) ), ?T( "multiple servers, list(string), no port", ?assertEqual( - [{"host1", DefaultPort}, {"host2", DefaultPort}], + [ + #{hostname => "host1", port => DefaultPort}, + #{hostname => "host2", port => DefaultPort} + ], Parse(["host1", "host2"]) ) ), ?T( "multiple servers, list(binary), no port", ?assertEqual( - [{"host1", DefaultPort}, {"host2", DefaultPort}], + [ + #{hostname => "host1", port => DefaultPort}, + #{hostname => "host2", port => DefaultPort} + ], Parse([<<"host1">>, <<"host2">>]) ) ), ?T( "multiple servers, string, with port", ?assertEqual( - [{"host1", 1234}, {"host2", 2345}], + [#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}], Parse("host1:1234, host2:2345") ) ), ?T( "multiple servers, binary, with port", ?assertEqual( - [{"host1", 1234}, {"host2", 2345}], + [#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}], Parse(<<"host1:1234, host2:2345, ">>) ) ), ?T( "multiple servers, list(string), with port", ?assertEqual( - [{"host1", 1234}, {"host2", 2345}], + [#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}], Parse([" host1:1234 ", "host2:2345"]) ) ), ?T( "multiple servers, list(binary), with port", ?assertEqual( - [{"host1", 1234}, {"host2", 2345}], + [#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}], Parse([<<"host1:1234">>, <<"host2:2345">>]) ) ), @@ -350,9 +362,9 @@ parse_server_test_() -> ) ), ?T( - "multiple servers wihtout port, mixed list(binary|string)", + "multiple servers without port, mixed list(binary|string)", ?assertEqual( - ["host1", "host2"], + [#{hostname => "host1"}, #{hostname => "host2"}], Parse2([<<"host1">>, "host2"], #{no_port => true}) ) ), @@ -394,14 +406,18 @@ parse_server_test_() -> ?T( "single server map", ?assertEqual( - [{"host1.domain", 1234}], + [#{hostname => "host1.domain", port => 1234}], HoconParse("host1.domain:1234") ) ), ?T( "multiple servers map", ?assertEqual( - [{"host1.domain", 1234}, {"host2.domain", 2345}, {"host3.domain", 3456}], + [ + #{hostname => "host1.domain", port => 1234}, + #{hostname => "host2.domain", port => 2345}, + #{hostname => "host3.domain", port => 3456} + ], HoconParse("host1.domain:1234,host2.domain:2345,host3.domain:3456") ) ), @@ -447,6 +463,171 @@ parse_server_test_() -> "bad_schema", emqx_schema:parse_server("whatever", #{default_port => 10, no_port => true}) ) + ), + ?T( + "scheme, hostname and port", + ?assertEqual( + #{scheme => "pulsar+ssl", hostname => "host", port => 6651}, + emqx_schema:parse_server( + "pulsar+ssl://host:6651", + #{ + default_port => 6650, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "scheme and hostname, default port", + ?assertEqual( + #{scheme => "pulsar", hostname => "host", port => 6650}, + emqx_schema:parse_server( + "pulsar://host", + #{ + default_port => 6650, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "scheme and hostname, no port", + ?assertEqual( + #{scheme => "pulsar", hostname => "host"}, + emqx_schema:parse_server( + "pulsar://host", + #{ + no_port => true, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "scheme and hostname, missing port", + ?assertThrow( + "missing_port_number", + emqx_schema:parse_server( + "pulsar://host", + #{ + no_port => false, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "hostname, default scheme, no default port", + ?assertEqual( + #{scheme => "pulsar", hostname => "host"}, + emqx_schema:parse_server( + "host", + #{ + default_scheme => "pulsar", + no_port => true, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "hostname, default scheme, default port", + ?assertEqual( + #{scheme => "pulsar", hostname => "host", port => 6650}, + emqx_schema:parse_server( + "host", + #{ + default_port => 6650, + default_scheme => "pulsar", + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "just hostname, expecting missing scheme", + ?assertThrow( + "missing_scheme", + emqx_schema:parse_server( + "host", + #{ + no_port => true, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "hostname, default scheme, defined port", + ?assertEqual( + #{scheme => "pulsar", hostname => "host", port => 6651}, + emqx_schema:parse_server( + "host:6651", + #{ + default_port => 6650, + default_scheme => "pulsar", + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "inconsistent scheme opts", + ?assertError( + "bad_schema", + emqx_schema:parse_server( + "pulsar+ssl://host:6651", + #{ + default_port => 6650, + default_scheme => "something", + supported_schemes => ["not", "supported"] + } + ) + ) + ), + ?T( + "hostname, default scheme, defined port", + ?assertEqual( + #{scheme => "pulsar", hostname => "host", port => 6651}, + emqx_schema:parse_server( + "host:6651", + #{ + default_port => 6650, + default_scheme => "pulsar", + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "unsupported scheme", + ?assertThrow( + "unsupported_scheme", + emqx_schema:parse_server( + "pulsar+quic://host:6651", + #{ + default_port => 6650, + supported_schemes => ["pulsar"] + } + ) + ) + ), + ?T( + "multiple hostnames with schemes (1)", + ?assertEqual( + [ + #{scheme => "pulsar", hostname => "host", port => 6649}, + #{scheme => "pulsar+ssl", hostname => "other.host", port => 6651}, + #{scheme => "pulsar", hostname => "yet.another", port => 6650} + ], + emqx_schema:parse_servers( + "pulsar://host:6649, pulsar+ssl://other.host:6651,pulsar://yet.another", + #{ + default_port => 6650, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) ) ]. diff --git a/apps/emqx/test/emqx_test_janitor.erl b/apps/emqx/test/emqx_test_janitor.erl index c3f82a3e1..041b03fa7 100644 --- a/apps/emqx/test/emqx_test_janitor.erl +++ b/apps/emqx/test/emqx_test_janitor.erl @@ -60,12 +60,12 @@ init(Parent) -> {ok, #{callbacks => [], owner => Parent}}. terminate(_Reason, #{callbacks := Callbacks}) -> - lists:foreach(fun(Fun) -> catch Fun() end, Callbacks). + do_terminate(Callbacks). handle_call({push, Callback}, _From, State = #{callbacks := Callbacks}) -> {reply, ok, State#{callbacks := [Callback | Callbacks]}}; handle_call(terminate, _From, State = #{callbacks := Callbacks}) -> - lists:foreach(fun(Fun) -> catch Fun() end, Callbacks), + do_terminate(Callbacks), {stop, normal, ok, State}; handle_call(_Req, _From, State) -> {reply, error, State}. @@ -77,3 +77,23 @@ handle_info({'EXIT', Parent, _Reason}, State = #{owner := Parent}) -> {stop, normal, State}; handle_info(_Msg, State) -> {noreply, State}. + +%%---------------------------------------------------------------------------------- +%% Internal fns +%%---------------------------------------------------------------------------------- + +do_terminate(Callbacks) -> + lists:foreach( + fun(Fun) -> + try + Fun() + catch + K:E:S -> + ct:pal("error executing callback ~p: ~p", [Fun, {K, E}]), + ct:pal("stacktrace: ~p", [S]), + ok + end + end, + Callbacks + ), + ok. diff --git a/apps/emqx/test/emqx_tls_lib_tests.erl b/apps/emqx/test/emqx_tls_lib_tests.erl index ad9598107..0f5883b10 100644 --- a/apps/emqx/test/emqx_tls_lib_tests.erl +++ b/apps/emqx/test/emqx_tls_lib_tests.erl @@ -229,7 +229,8 @@ ssl_files_handle_non_generated_file_test() -> ok = emqx_tls_lib:delete_ssl_files(Dir, undefined, SSL2), %% verify the file is not delete and not changed, because it is not generated by %% emqx_tls_lib - ?assertEqual({ok, KeyFileContent}, file:read_file(TmpKeyFile)). + ?assertEqual({ok, KeyFileContent}, file:read_file(TmpKeyFile)), + ok = file:delete(TmpKeyFile). ssl_file_replace_test() -> Key1 = bin(test_key()), diff --git a/apps/emqx/test/emqx_ws_connection_SUITE.erl b/apps/emqx/test/emqx_ws_connection_SUITE.erl index e62844547..60abe3d3c 100644 --- a/apps/emqx/test/emqx_ws_connection_SUITE.erl +++ b/apps/emqx/test/emqx_ws_connection_SUITE.erl @@ -447,7 +447,12 @@ t_websocket_info_deliver(_) -> t_websocket_info_timeout_limiter(_) -> Ref = make_ref(), - LimiterT = init_limiter(), + {ok, Rate} = emqx_limiter_schema:to_rate("50MB"), + LimiterT = init_limiter(#{ + bytes => bucket_cfg(), + messages => bucket_cfg(), + client => #{bytes => client_cfg(Rate)} + }), Next = fun emqx_ws_connection:when_msg_in/3, Limiter = emqx_limiter_container:set_retry_context({retry, [], [], Next}, LimiterT), Event = {timeout, Ref, limit_timeout}, @@ -513,16 +518,16 @@ t_handle_timeout_emit_stats(_) -> t_ensure_rate_limit(_) -> {ok, Rate} = emqx_limiter_schema:to_rate("50MB"), Limiter = init_limiter(#{ - bytes_in => bucket_cfg(), - message_in => bucket_cfg(), - client => #{bytes_in => client_cfg(Rate)} + bytes => bucket_cfg(), + messages => bucket_cfg(), + client => #{bytes => client_cfg(Rate)} }), St = st(#{limiter => Limiter}), %% must bigger than value in emqx_ratelimit_SUITE {ok, Need} = emqx_limiter_schema:to_capacity("1GB"), St1 = ?ws_conn:check_limiter( - [{Need, bytes_in}], + [{Need, bytes}], [], fun(_, _, S) -> S end, [], @@ -703,23 +708,21 @@ init_limiter() -> init_limiter(limiter_cfg()). init_limiter(LimiterCfg) -> - emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes_in, message_in], LimiterCfg). + emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes, messages], LimiterCfg). limiter_cfg() -> Cfg = bucket_cfg(), Client = client_cfg(), - #{bytes_in => Cfg, message_in => Cfg, client => #{bytes_in => Client, message_in => Client}}. + #{bytes => Cfg, messages => Cfg, client => #{bytes => Client, messages => Client}}. client_cfg() -> - Infinity = emqx_limiter_schema:infinity_value(), - client_cfg(Infinity). + client_cfg(infinity). client_cfg(Rate) -> - Infinity = emqx_limiter_schema:infinity_value(), #{ rate => Rate, initial => 0, - capacity => Infinity, + burst => 0, low_watermark => 1, divisible => false, max_retry_time => timer:seconds(5), @@ -727,14 +730,13 @@ client_cfg(Rate) -> }. bucket_cfg() -> - Infinity = emqx_limiter_schema:infinity_value(), - #{rate => Infinity, initial => 0, capacity => Infinity}. + #{rate => infinity, initial => 0, burst => 0}. add_bucket() -> Cfg = bucket_cfg(), - emqx_limiter_server:add_bucket(?LIMITER_ID, bytes_in, Cfg), - emqx_limiter_server:add_bucket(?LIMITER_ID, message_in, Cfg). + emqx_limiter_server:add_bucket(?LIMITER_ID, bytes, Cfg), + emqx_limiter_server:add_bucket(?LIMITER_ID, messages, Cfg). del_bucket() -> - emqx_limiter_server:del_bucket(?LIMITER_ID, bytes_in), - emqx_limiter_server:del_bucket(?LIMITER_ID, message_in). + emqx_limiter_server:del_bucket(?LIMITER_ID, bytes), + emqx_limiter_server:del_bucket(?LIMITER_ID, messages). diff --git a/apps/emqx_authn/src/emqx_authn.app.src b/apps/emqx_authn/src/emqx_authn.app.src index 063771e24..c1d48909c 100644 --- a/apps/emqx_authn/src/emqx_authn.app.src +++ b/apps/emqx_authn/src/emqx_authn.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_authn, [ {description, "EMQX Authentication"}, - {vsn, "0.1.17"}, + {vsn, "0.1.18"}, {modules, []}, {registered, [emqx_authn_sup, emqx_authn_registry]}, {applications, [kernel, stdlib, emqx_resource, emqx_connector, ehttpc, epgsql, mysql, jose]}, diff --git a/apps/emqx_authn/src/emqx_authn_utils.erl b/apps/emqx_authn/src/emqx_authn_utils.erl index 1352e3daf..12520251e 100644 --- a/apps/emqx_authn/src/emqx_authn_utils.erl +++ b/apps/emqx_authn/src/emqx_authn_utils.erl @@ -28,6 +28,7 @@ parse_sql/2, render_deep/2, render_str/2, + render_urlencoded_str/2, render_sql_params/2, is_superuser/1, bin/1, @@ -129,6 +130,13 @@ render_str(Template, Credential) -> #{return => full_binary, var_trans => fun handle_var/2} ). +render_urlencoded_str(Template, Credential) -> + emqx_placeholder:proc_tmpl( + Template, + mapping_credential(Credential), + #{return => full_binary, var_trans => fun urlencode_var/2} + ). + render_sql_params(ParamList, Credential) -> emqx_placeholder:proc_tmpl( ParamList, @@ -217,6 +225,11 @@ without_password(Credential, [Name | Rest]) -> without_password(Credential, Rest) end. +urlencode_var({var, _} = Var, Value) -> + emqx_http_lib:uri_encode(handle_var(Var, Value)); +urlencode_var(Var, Value) -> + handle_var(Var, Value). + handle_var({var, _Name}, undefined) -> <<>>; handle_var({var, <<"peerhost">>}, PeerHost) -> diff --git a/apps/emqx_authn/src/enhanced_authn/emqx_enhanced_authn_scram_mnesia.erl b/apps/emqx_authn/src/enhanced_authn/emqx_enhanced_authn_scram_mnesia.erl index 84f2c9525..b11b89081 100644 --- a/apps/emqx_authn/src/enhanced_authn/emqx_enhanced_authn_scram_mnesia.erl +++ b/apps/emqx_authn/src/enhanced_authn/emqx_enhanced_authn_scram_mnesia.erl @@ -105,14 +105,16 @@ mnesia(boot) -> %% Hocon Schema %%------------------------------------------------------------------------------ -namespace() -> "authn-scram-builtin_db". +namespace() -> "authn". tags() -> [<<"Authentication">>]. -roots() -> [?CONF_NS]. +%% used for config check when the schema module is resolved +roots() -> + [{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, scram))}]. -fields(?CONF_NS) -> +fields(scram) -> [ {mechanism, emqx_authn_schema:mechanism(scram)}, {backend, emqx_authn_schema:backend(built_in_database)}, @@ -120,7 +122,7 @@ fields(?CONF_NS) -> {iteration_count, fun iteration_count/1} ] ++ emqx_authn_schema:common_fields(). -desc(?CONF_NS) -> +desc(scram) -> "Settings for Salted Challenge Response Authentication Mechanism\n" "(SCRAM) authentication."; desc(_) -> @@ -141,7 +143,7 @@ iteration_count(_) -> undefined. %%------------------------------------------------------------------------------ refs() -> - [hoconsc:ref(?MODULE, ?CONF_NS)]. + [hoconsc:ref(?MODULE, scram)]. create( AuthenticatorID, diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_http.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_http.erl index 43701cbc7..eddad92a3 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_http.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_http.erl @@ -53,34 +53,35 @@ %% Hocon Schema %%------------------------------------------------------------------------------ -namespace() -> "authn-http". +namespace() -> "authn". tags() -> [<<"Authentication">>]. +%% used for config check when the schema module is resolved roots() -> [ {?CONF_NS, hoconsc:mk( - hoconsc:union(fun union_member_selector/1), + hoconsc:union(fun ?MODULE:union_member_selector/1), #{} )} ]. -fields(get) -> +fields(http_get) -> [ {method, #{type => get, required => true, desc => ?DESC(method)}}, {headers, fun headers_no_content_type/1} ] ++ common_fields(); -fields(post) -> +fields(http_post) -> [ {method, #{type => post, required => true, desc => ?DESC(method)}}, {headers, fun headers/1} ] ++ common_fields(). -desc(get) -> +desc(http_get) -> ?DESC(get); -desc(post) -> +desc(http_post) -> ?DESC(post); desc(_) -> undefined. @@ -158,8 +159,8 @@ request_timeout(_) -> undefined. refs() -> [ - hoconsc:ref(?MODULE, get), - hoconsc:ref(?MODULE, post) + hoconsc:ref(?MODULE, http_get), + hoconsc:ref(?MODULE, http_post) ]. union_member_selector(all_union_members) -> @@ -168,9 +169,9 @@ union_member_selector({value, Value}) -> refs(Value). refs(#{<<"method">> := <<"get">>}) -> - [hoconsc:ref(?MODULE, get)]; + [hoconsc:ref(?MODULE, http_get)]; refs(#{<<"method">> := <<"post">>}) -> - [hoconsc:ref(?MODULE, post)]; + [hoconsc:ref(?MODULE, http_post)]; refs(_) -> throw(#{ field_name => method, @@ -313,9 +314,9 @@ parse_url(Url) -> BaseUrl = iolist_to_binary([Scheme, "//", HostPort]), case string:split(Remaining, "?", leading) of [Path, QueryString] -> - {BaseUrl, Path, QueryString}; + {BaseUrl, <<"/", Path/binary>>, QueryString}; [Path] -> - {BaseUrl, Path, <<>>} + {BaseUrl, <<"/", Path/binary>>, <<>>} end; [HostPort] -> {iolist_to_binary([Scheme, "//", HostPort]), <<>>, <<>>} @@ -356,7 +357,7 @@ generate_request(Credential, #{ body_template := BodyTemplate }) -> Headers = maps:to_list(Headers0), - Path = emqx_authn_utils:render_str(BasePathTemplate, Credential), + Path = emqx_authn_utils:render_urlencoded_str(BasePathTemplate, Credential), Query = emqx_authn_utils:render_deep(BaseQueryTemplate, Credential), Body = emqx_authn_utils:render_deep(BodyTemplate, Credential), case Method of @@ -371,9 +372,9 @@ generate_request(Credential, #{ end. append_query(Path, []) -> - encode_path(Path); + Path; append_query(Path, Query) -> - encode_path(Path) ++ "?" ++ binary_to_list(qs(Query)). + Path ++ "?" ++ binary_to_list(qs(Query)). qs(KVs) -> qs(KVs, []). @@ -435,10 +436,6 @@ parse_body(ContentType, _) -> uri_encode(T) -> emqx_http_lib:uri_encode(to_list(T)). -encode_path(Path) -> - Parts = string:split(Path, "/", all), - lists:flatten(["/" ++ Part || Part <- lists:map(fun uri_encode/1, Parts)]). - request_for_log(Credential, #{url := Url} = State) -> SafeCredential = emqx_authn_utils:without_password(Credential), case generate_request(SafeCredential, State) of diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_jwks_connector.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_jwks_connector.erl index 1fdd7cef7..fe0349b4a 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_jwks_connector.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_jwks_connector.erl @@ -35,18 +35,17 @@ callback_mode() -> always_sync. on_start(InstId, Opts) -> - PoolName = emqx_plugin_libs_pool:pool_name(InstId), PoolOpts = [ {pool_size, maps:get(pool_size, Opts, ?DEFAULT_POOL_SIZE)}, {connector_opts, Opts} ], - case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, PoolOpts) of - ok -> {ok, #{pool_name => PoolName}}; + case emqx_resource_pool:start(InstId, ?MODULE, PoolOpts) of + ok -> {ok, #{pool_name => InstId}}; {error, Reason} -> {error, Reason} end. on_stop(_InstId, #{pool_name := PoolName}) -> - emqx_plugin_libs_pool:stop_pool(PoolName). + emqx_resource_pool:stop(PoolName). on_query(InstId, get_jwks, #{pool_name := PoolName}) -> Result = ecpool:pick_and_do(PoolName, {emqx_authn_jwks_client, get_jwks, []}, no_handover), @@ -72,18 +71,17 @@ on_query(_InstId, {update, Opts}, #{pool_name := PoolName}) -> ok. on_get_status(_InstId, #{pool_name := PoolName}) -> - Func = - fun(Conn) -> - case emqx_authn_jwks_client:get_jwks(Conn) of - {ok, _} -> true; - _ -> false - end - end, - case emqx_plugin_libs_pool:health_check_ecpool_workers(PoolName, Func) of + case emqx_resource_pool:health_check_workers(PoolName, fun health_check/1) of true -> connected; false -> disconnected end. +health_check(Conn) -> + case emqx_authn_jwks_client:get_jwks(Conn) of + {ok, _} -> true; + _ -> false + end. + connect(Opts) -> ConnectorOpts = proplists:get_value(connector_opts, Opts), emqx_authn_jwks_client:start_link(ConnectorOpts). diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_jwt.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_jwt.erl index a891a55e2..0df9014b8 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_jwt.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_jwt.erl @@ -43,36 +43,57 @@ %% Hocon Schema %%------------------------------------------------------------------------------ -namespace() -> "authn-jwt". +namespace() -> "authn". tags() -> [<<"Authentication">>]. +%% used for config check when the schema module is resolved roots() -> [ {?CONF_NS, hoconsc:mk( - hoconsc:union(fun union_member_selector/1), + hoconsc:union(fun ?MODULE:union_member_selector/1), #{} )} ]. -fields('hmac-based') -> +fields(jwt_hmac) -> [ - {use_jwks, sc(hoconsc:enum([false]), #{required => true, desc => ?DESC(use_jwks)})}, + %% for hmac, it's the 'algorithm' field which selects this type + %% use_jwks field can be ignored (kept for backward compatibility) + {use_jwks, + sc( + hoconsc:enum([false]), + #{ + required => false, + desc => ?DESC(use_jwks), + importance => ?IMPORTANCE_HIDDEN + } + )}, {algorithm, sc(hoconsc:enum(['hmac-based']), #{required => true, desc => ?DESC(algorithm)})}, {secret, fun secret/1}, {secret_base64_encoded, fun secret_base64_encoded/1} ] ++ common_fields(); -fields('public-key') -> +fields(jwt_public_key) -> [ - {use_jwks, sc(hoconsc:enum([false]), #{required => true, desc => ?DESC(use_jwks)})}, + %% for public-key, it's the 'algorithm' field which selects this type + %% use_jwks field can be ignored (kept for backward compatibility) + {use_jwks, + sc( + hoconsc:enum([false]), + #{ + required => false, + desc => ?DESC(use_jwks), + importance => ?IMPORTANCE_HIDDEN + } + )}, {algorithm, sc(hoconsc:enum(['public-key']), #{required => true, desc => ?DESC(algorithm)})}, {public_key, fun public_key/1} ] ++ common_fields(); -fields('jwks') -> +fields(jwt_jwks) -> [ {use_jwks, sc(hoconsc:enum([true]), #{required => true, desc => ?DESC(use_jwks)})}, {endpoint, fun endpoint/1}, @@ -85,12 +106,12 @@ fields('jwks') -> }} ] ++ common_fields(). -desc('hmac-based') -> - ?DESC('hmac-based'); -desc('public-key') -> - ?DESC('public-key'); -desc('jwks') -> - ?DESC('jwks'); +desc(jwt_hmac) -> + ?DESC(jwt_hmac); +desc(jwt_public_key) -> + ?DESC(jwt_public_key); +desc(jwt_jwks) -> + ?DESC(jwt_jwks); desc(undefined) -> undefined. @@ -160,9 +181,9 @@ from(_) -> undefined. refs() -> [ - hoconsc:ref(?MODULE, 'hmac-based'), - hoconsc:ref(?MODULE, 'public-key'), - hoconsc:ref(?MODULE, 'jwks') + hoconsc:ref(?MODULE, jwt_hmac), + hoconsc:ref(?MODULE, jwt_public_key), + hoconsc:ref(?MODULE, jwt_jwks) ]. union_member_selector(all_union_members) -> @@ -179,11 +200,11 @@ boolean(<<"false">>) -> false; boolean(Other) -> Other. select_ref(true, _) -> - [hoconsc:ref(?MODULE, 'jwks')]; + [hoconsc:ref(?MODULE, 'jwt_jwks')]; select_ref(false, #{<<"public_key">> := _}) -> - [hoconsc:ref(?MODULE, 'public-key')]; + [hoconsc:ref(?MODULE, jwt_public_key)]; select_ref(false, _) -> - [hoconsc:ref(?MODULE, 'hmac-based')]; + [hoconsc:ref(?MODULE, jwt_hmac)]; select_ref(_, _) -> throw(#{ field_name => use_jwks, diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_mnesia.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_mnesia.erl index 20b604e70..d57e9e00e 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_mnesia.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_mnesia.erl @@ -107,14 +107,16 @@ mnesia(boot) -> %% Hocon Schema %%------------------------------------------------------------------------------ -namespace() -> "authn-builtin_db". +namespace() -> "authn". tags() -> [<<"Authentication">>]. -roots() -> [?CONF_NS]. +%% used for config check when the schema module is resolved +roots() -> + [{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, builtin_db))}]. -fields(?CONF_NS) -> +fields(builtin_db) -> [ {mechanism, emqx_authn_schema:mechanism(password_based)}, {backend, emqx_authn_schema:backend(built_in_database)}, @@ -122,8 +124,8 @@ fields(?CONF_NS) -> {password_hash_algorithm, fun emqx_authn_password_hashing:type_rw/1} ] ++ emqx_authn_schema:common_fields(). -desc(?CONF_NS) -> - ?DESC(?CONF_NS); +desc(builtin_db) -> + ?DESC(builtin_db); desc(_) -> undefined. @@ -138,7 +140,7 @@ user_id_type(_) -> undefined. %%------------------------------------------------------------------------------ refs() -> - [hoconsc:ref(?MODULE, ?CONF_NS)]. + [hoconsc:ref(?MODULE, builtin_db)]. create(_AuthenticatorID, Config) -> create(Config). diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_mongodb.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_mongodb.erl index 22b930485..1a766b975 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_mongodb.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_mongodb.erl @@ -44,32 +44,33 @@ %% Hocon Schema %%------------------------------------------------------------------------------ -namespace() -> "authn-mongodb". +namespace() -> "authn". tags() -> [<<"Authentication">>]. +%% used for config check when the schema module is resolved roots() -> [ {?CONF_NS, hoconsc:mk( - hoconsc:union(fun union_member_selector/1), + hoconsc:union(fun ?MODULE:union_member_selector/1), #{} )} ]. -fields(standalone) -> +fields(mongo_single) -> common_fields() ++ emqx_connector_mongo:fields(single); -fields('replica-set') -> +fields(mongo_rs) -> common_fields() ++ emqx_connector_mongo:fields(rs); -fields('sharded-cluster') -> +fields(mongo_sharded) -> common_fields() ++ emqx_connector_mongo:fields(sharded). -desc(standalone) -> - ?DESC(standalone); -desc('replica-set') -> +desc(mongo_single) -> + ?DESC(single); +desc(mongo_rs) -> ?DESC('replica-set'); -desc('sharded-cluster') -> +desc(mongo_sharded) -> ?DESC('sharded-cluster'); desc(_) -> undefined. @@ -126,9 +127,9 @@ is_superuser_field(_) -> undefined. refs() -> [ - hoconsc:ref(?MODULE, standalone), - hoconsc:ref(?MODULE, 'replica-set'), - hoconsc:ref(?MODULE, 'sharded-cluster') + hoconsc:ref(?MODULE, mongo_single), + hoconsc:ref(?MODULE, mongo_rs), + hoconsc:ref(?MODULE, mongo_sharded) ]. create(_AuthenticatorID, Config) -> @@ -254,11 +255,11 @@ union_member_selector({value, Value}) -> refs(Value). refs(#{<<"mongo_type">> := <<"single">>}) -> - [hoconsc:ref(?MODULE, standalone)]; + [hoconsc:ref(?MODULE, mongo_single)]; refs(#{<<"mongo_type">> := <<"rs">>}) -> - [hoconsc:ref(?MODULE, 'replica-set')]; + [hoconsc:ref(?MODULE, mongo_rs)]; refs(#{<<"mongo_type">> := <<"sharded">>}) -> - [hoconsc:ref(?MODULE, 'sharded-cluster')]; + [hoconsc:ref(?MODULE, mongo_sharded)]; refs(_) -> throw(#{ field_name => mongo_type, diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_mysql.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_mysql.erl index bedd169e2..d8e631885 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_mysql.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_mysql.erl @@ -45,14 +45,16 @@ %% Hocon Schema %%------------------------------------------------------------------------------ -namespace() -> "authn-mysql". +namespace() -> "authn". tags() -> [<<"Authentication">>]. -roots() -> [?CONF_NS]. +%% used for config check when the schema module is resolved +roots() -> + [{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, mysql))}]. -fields(?CONF_NS) -> +fields(mysql) -> [ {mechanism, emqx_authn_schema:mechanism(password_based)}, {backend, emqx_authn_schema:backend(mysql)}, @@ -62,8 +64,8 @@ fields(?CONF_NS) -> ] ++ emqx_authn_schema:common_fields() ++ proplists:delete(prepare_statement, emqx_connector_mysql:fields(config)). -desc(?CONF_NS) -> - ?DESC(?CONF_NS); +desc(mysql) -> + ?DESC(mysql); desc(_) -> undefined. @@ -82,7 +84,7 @@ query_timeout(_) -> undefined. %%------------------------------------------------------------------------------ refs() -> - [hoconsc:ref(?MODULE, ?CONF_NS)]. + [hoconsc:ref(?MODULE, mysql)]. create(_AuthenticatorID, Config) -> create(Config). diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_pgsql.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_pgsql.erl index 3d9e1e08f..d9526cc7b 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_pgsql.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_pgsql.erl @@ -49,14 +49,16 @@ %% Hocon Schema %%------------------------------------------------------------------------------ -namespace() -> "authn-postgresql". +namespace() -> "authn". tags() -> [<<"Authentication">>]. -roots() -> [?CONF_NS]. +%% used for config check when the schema module is resolved +roots() -> + [{?CONF_NS, hoconsc:mk(hoconsc:ref(?MODULE, postgresql))}]. -fields(?CONF_NS) -> +fields(postgresql) -> [ {mechanism, emqx_authn_schema:mechanism(password_based)}, {backend, emqx_authn_schema:backend(postgresql)}, @@ -66,8 +68,8 @@ fields(?CONF_NS) -> emqx_authn_schema:common_fields() ++ proplists:delete(prepare_statement, emqx_connector_pgsql:fields(config)). -desc(?CONF_NS) -> - ?DESC(?CONF_NS); +desc(postgresql) -> + ?DESC(postgresql); desc(_) -> undefined. @@ -81,7 +83,7 @@ query(_) -> undefined. %%------------------------------------------------------------------------------ refs() -> - [hoconsc:ref(?MODULE, ?CONF_NS)]. + [hoconsc:ref(?MODULE, postgresql)]. create(_AuthenticatorID, Config) -> create(Config). diff --git a/apps/emqx_authn/src/simple_authn/emqx_authn_redis.erl b/apps/emqx_authn/src/simple_authn/emqx_authn_redis.erl index ff81fd4ca..27d8c540a 100644 --- a/apps/emqx_authn/src/simple_authn/emqx_authn_redis.erl +++ b/apps/emqx_authn/src/simple_authn/emqx_authn_redis.erl @@ -44,32 +44,33 @@ %% Hocon Schema %%------------------------------------------------------------------------------ -namespace() -> "authn-redis". +namespace() -> "authn". tags() -> [<<"Authentication">>]. +%% used for config check when the schema module is resolved roots() -> [ {?CONF_NS, hoconsc:mk( - hoconsc:union(fun union_member_selector/1), + hoconsc:union(fun ?MODULE:union_member_selector/1), #{} )} ]. -fields(standalone) -> +fields(redis_single) -> common_fields() ++ emqx_connector_redis:fields(single); -fields(cluster) -> +fields(redis_cluster) -> common_fields() ++ emqx_connector_redis:fields(cluster); -fields(sentinel) -> +fields(redis_sentinel) -> common_fields() ++ emqx_connector_redis:fields(sentinel). -desc(standalone) -> - ?DESC(standalone); -desc(cluster) -> +desc(redis_single) -> + ?DESC(single); +desc(redis_cluster) -> ?DESC(cluster); -desc(sentinel) -> +desc(redis_sentinel) -> ?DESC(sentinel); desc(_) -> "". @@ -93,9 +94,9 @@ cmd(_) -> undefined. refs() -> [ - hoconsc:ref(?MODULE, standalone), - hoconsc:ref(?MODULE, cluster), - hoconsc:ref(?MODULE, sentinel) + hoconsc:ref(?MODULE, redis_single), + hoconsc:ref(?MODULE, redis_cluster), + hoconsc:ref(?MODULE, redis_sentinel) ]. union_member_selector(all_union_members) -> @@ -104,11 +105,11 @@ union_member_selector({value, Value}) -> refs(Value). refs(#{<<"redis_type">> := <<"single">>}) -> - [hoconsc:ref(?MODULE, standalone)]; + [hoconsc:ref(?MODULE, redis_single)]; refs(#{<<"redis_type">> := <<"cluster">>}) -> - [hoconsc:ref(?MODULE, cluster)]; + [hoconsc:ref(?MODULE, redis_cluster)]; refs(#{<<"redis_type">> := <<"sentinel">>}) -> - [hoconsc:ref(?MODULE, sentinel)]; + [hoconsc:ref(?MODULE, redis_sentinel)]; refs(_) -> throw(#{ field_name => redis_type, diff --git a/apps/emqx_authn/test/emqx_authn_http_SUITE.erl b/apps/emqx_authn/test/emqx_authn_http_SUITE.erl index 851e80f6d..b08167a5b 100644 --- a/apps/emqx_authn/test/emqx_authn_http_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_http_SUITE.erl @@ -47,7 +47,6 @@ }) ). --define(SERVER_RESPONSE_URLENCODE(Result), ?SERVER_RESPONSE_URLENCODE(Result, false)). -define(SERVER_RESPONSE_URLENCODE(Result, IsSuperuser), list_to_binary( "result=" ++ @@ -166,6 +165,54 @@ test_user_auth(#{ ?GLOBAL ). +t_authenticate_path_placeholders(_Config) -> + ok = emqx_authn_http_test_server:stop(), + {ok, _} = emqx_authn_http_test_server:start_link(?HTTP_PORT, <<"/[...]">>), + ok = emqx_authn_http_test_server:set_handler( + fun(Req0, State) -> + Req = + case cowboy_req:path(Req0) of + <<"/my/p%20ath//us%20er/auth//">> -> + cowboy_req:reply( + 200, + #{<<"content-type">> => <<"application/json">>}, + emqx_utils_json:encode(#{result => allow, is_superuser => false}), + Req0 + ); + Path -> + ct:pal("Unexpected path: ~p", [Path]), + cowboy_req:reply(403, Req0) + end, + {ok, Req, State} + end + ), + + Credentials = ?CREDENTIALS#{ + username => <<"us er">> + }, + + AuthConfig = maps:merge( + raw_http_auth_config(), + #{ + <<"url">> => <<"http://127.0.0.1:32333/my/p%20ath//${username}/auth//">>, + <<"body">> => #{} + } + ), + {ok, _} = emqx:update_config( + ?PATH, + {create_authenticator, ?GLOBAL, AuthConfig} + ), + + ?assertMatch( + {ok, #{is_superuser := false}}, + emqx_access_control:authenticate(Credentials) + ), + + _ = emqx_authn_test_lib:delete_authenticators( + [authentication], + ?GLOBAL + ). + t_no_value_for_placeholder(_Config) -> Handler = fun(Req0, State) -> {ok, RawBody, Req1} = cowboy_req:read_body(Req0), diff --git a/apps/emqx_authn/test/emqx_authn_pgsql_SUITE.erl b/apps/emqx_authn/test/emqx_authn_pgsql_SUITE.erl index 65c783298..075ae5cb7 100644 --- a/apps/emqx_authn/test/emqx_authn_pgsql_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_pgsql_SUITE.erl @@ -107,7 +107,7 @@ t_update_with_invalid_config(_Config) -> ?assertMatch( {error, #{ kind := validation_error, - matched_type := "authn-postgresql:authentication", + matched_type := "authn:postgresql", path := "authentication.1.server", reason := required_field }}, diff --git a/apps/emqx_authn/test/emqx_authn_redis_SUITE.erl b/apps/emqx_authn/test/emqx_authn_redis_SUITE.erl index 4f95bef93..1354e06cc 100644 --- a/apps/emqx_authn/test/emqx_authn_redis_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_redis_SUITE.erl @@ -162,7 +162,7 @@ t_create_invalid_config(_Config) -> ?assertMatch( {error, #{ kind := validation_error, - matched_type := "authn-redis:standalone", + matched_type := "authn:redis_single", path := "authentication.1.server", reason := required_field }}, diff --git a/apps/emqx_authn/test/emqx_authn_schema_SUITE.erl b/apps/emqx_authn/test/emqx_authn_schema_SUITE.erl index cd1c38d06..3afb8e973 100644 --- a/apps/emqx_authn/test/emqx_authn_schema_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_schema_SUITE.erl @@ -53,7 +53,7 @@ t_check_schema(_Config) -> ?assertThrow( #{ path := "authentication.1.password_hash_algorithm.name", - matched_type := "authn-builtin_db:authentication/authn-hash:simple", + matched_type := "authn:builtin_db/authn-hash:simple", reason := unable_to_convert_to_enum_symbol }, Check(ConfigNotOk) @@ -72,7 +72,7 @@ t_check_schema(_Config) -> #{ path := "authentication.1.password_hash_algorithm", reason := "algorithm_name_missing", - matched_type := "authn-builtin_db:authentication" + matched_type := "authn:builtin_db" }, Check(ConfigMissingAlgoName) ). diff --git a/apps/emqx_authn/test/emqx_authn_schema_tests.erl b/apps/emqx_authn/test/emqx_authn_schema_tests.erl index 25fcd28e4..622655b2d 100644 --- a/apps/emqx_authn/test/emqx_authn_schema_tests.erl +++ b/apps/emqx_authn/test/emqx_authn_schema_tests.erl @@ -32,19 +32,19 @@ union_member_selector_mongo_test_() -> end}, {"single", fun() -> ?assertMatch( - ?ERR(#{matched_type := "authn-mongodb:standalone"}), + ?ERR(#{matched_type := "authn:mongo_single"}), Check("{mongo_type: single}") ) end}, {"replica-set", fun() -> ?assertMatch( - ?ERR(#{matched_type := "authn-mongodb:replica-set"}), + ?ERR(#{matched_type := "authn:mongo_rs"}), Check("{mongo_type: rs}") ) end}, {"sharded", fun() -> ?assertMatch( - ?ERR(#{matched_type := "authn-mongodb:sharded-cluster"}), + ?ERR(#{matched_type := "authn:mongo_sharded"}), Check("{mongo_type: sharded}") ) end} @@ -61,19 +61,19 @@ union_member_selector_jwt_test_() -> end}, {"jwks", fun() -> ?assertMatch( - ?ERR(#{matched_type := "authn-jwt:jwks"}), + ?ERR(#{matched_type := "authn:jwt_jwks"}), Check("{use_jwks = true}") ) end}, {"publick-key", fun() -> ?assertMatch( - ?ERR(#{matched_type := "authn-jwt:public-key"}), + ?ERR(#{matched_type := "authn:jwt_public_key"}), Check("{use_jwks = false, public_key = 1}") ) end}, {"hmac-based", fun() -> ?assertMatch( - ?ERR(#{matched_type := "authn-jwt:hmac-based"}), + ?ERR(#{matched_type := "authn:jwt_hmac"}), Check("{use_jwks = false}") ) end} @@ -90,19 +90,19 @@ union_member_selector_redis_test_() -> end}, {"single", fun() -> ?assertMatch( - ?ERR(#{matched_type := "authn-redis:standalone"}), + ?ERR(#{matched_type := "authn:redis_single"}), Check("{redis_type = single}") ) end}, {"cluster", fun() -> ?assertMatch( - ?ERR(#{matched_type := "authn-redis:cluster"}), + ?ERR(#{matched_type := "authn:redis_cluster"}), Check("{redis_type = cluster}") ) end}, {"sentinel", fun() -> ?assertMatch( - ?ERR(#{matched_type := "authn-redis:sentinel"}), + ?ERR(#{matched_type := "authn:redis_sentinel"}), Check("{redis_type = sentinel}") ) end} @@ -119,13 +119,13 @@ union_member_selector_http_test_() -> end}, {"get", fun() -> ?assertMatch( - ?ERR(#{matched_type := "authn-http:get"}), + ?ERR(#{matched_type := "authn:http_get"}), Check("{method = get}") ) end}, {"post", fun() -> ?assertMatch( - ?ERR(#{matched_type := "authn-http:post"}), + ?ERR(#{matched_type := "authn:http_post"}), Check("{method = post}") ) end} diff --git a/apps/emqx_authz/src/emqx_authz.app.src b/apps/emqx_authz/src/emqx_authz.app.src index a17ce5dea..dd0325694 100644 --- a/apps/emqx_authz/src/emqx_authz.app.src +++ b/apps/emqx_authz/src/emqx_authz.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_authz, [ {description, "An OTP application"}, - {vsn, "0.1.17"}, + {vsn, "0.1.19"}, {registered, []}, {mod, {emqx_authz_app, []}}, {applications, [ diff --git a/apps/emqx_authz/src/emqx_authz_http.erl b/apps/emqx_authz/src/emqx_authz_http.erl index 53378d9c2..5747e6eeb 100644 --- a/apps/emqx_authz/src/emqx_authz_http.erl +++ b/apps/emqx_authz/src/emqx_authz_http.erl @@ -161,9 +161,9 @@ parse_url(Url) -> BaseUrl = iolist_to_binary([Scheme, "//", HostPort]), case string:split(Remaining, "?", leading) of [Path, QueryString] -> - {BaseUrl, Path, QueryString}; + {BaseUrl, <<"/", Path/binary>>, QueryString}; [Path] -> - {BaseUrl, Path, <<>>} + {BaseUrl, <<"/", Path/binary>>, <<>>} end; [HostPort] -> {iolist_to_binary([Scheme, "//", HostPort]), <<>>, <<>>} @@ -185,7 +185,7 @@ generate_request( } ) -> Values = client_vars(Client, PubSub, Topic), - Path = emqx_authz_utils:render_str(BasePathTemplate, Values), + Path = emqx_authz_utils:render_urlencoded_str(BasePathTemplate, Values), Query = emqx_authz_utils:render_deep(BaseQueryTemplate, Values), Body = emqx_authz_utils:render_deep(BodyTemplate, Values), case Method of @@ -202,9 +202,9 @@ generate_request( end. append_query(Path, []) -> - encode_path(Path); + to_list(Path); append_query(Path, Query) -> - encode_path(Path) ++ "?" ++ to_list(query_string(Query)). + to_list(Path) ++ "?" ++ to_list(query_string(Query)). query_string(Body) -> query_string(Body, []). @@ -222,10 +222,6 @@ query_string([{K, V} | More], Acc) -> uri_encode(T) -> emqx_http_lib:uri_encode(to_list(T)). -encode_path(Path) -> - Parts = string:split(Path, "/", all), - lists:flatten(["/" ++ Part || Part <- lists:map(fun uri_encode/1, Parts)]). - serialize_body(<<"application/json">>, Body) -> emqx_utils_json:encode(Body); serialize_body(<<"application/x-www-form-urlencoded">>, Body) -> diff --git a/apps/emqx_authz/src/emqx_authz_rule.erl b/apps/emqx_authz/src/emqx_authz_rule.erl index 306ca9433..bdd0904f7 100644 --- a/apps/emqx_authz/src/emqx_authz_rule.erl +++ b/apps/emqx_authz/src/emqx_authz_rule.erl @@ -185,7 +185,7 @@ match_who(#{peerhost := IpAddress}, {ipaddrs, CIDRs}) -> match_who(ClientInfo, {'and', Principals}) when is_list(Principals) -> lists:foldl( fun(Principal, Permission) -> - match_who(ClientInfo, Principal) andalso Permission + Permission andalso match_who(ClientInfo, Principal) end, true, Principals @@ -193,7 +193,7 @@ match_who(ClientInfo, {'and', Principals}) when is_list(Principals) -> match_who(ClientInfo, {'or', Principals}) when is_list(Principals) -> lists:foldl( fun(Principal, Permission) -> - match_who(ClientInfo, Principal) orelse Permission + Permission orelse match_who(ClientInfo, Principal) end, false, Principals diff --git a/apps/emqx_authz/src/emqx_authz_schema.erl b/apps/emqx_authz/src/emqx_authz_schema.erl index 280b9b16c..a2a7c6b52 100644 --- a/apps/emqx_authz/src/emqx_authz_schema.erl +++ b/apps/emqx_authz/src/emqx_authz_schema.erl @@ -54,7 +54,7 @@ type_names() -> file, http_get, http_post, - mnesia, + builtin_db, mongo_single, mongo_rs, mongo_sharded, @@ -93,7 +93,7 @@ fields(http_post) -> {method, method(post)}, {headers, fun headers/1} ]; -fields(mnesia) -> +fields(builtin_db) -> authz_common_fields(built_in_database); fields(mongo_single) -> authz_common_fields(mongodb) ++ @@ -191,8 +191,8 @@ desc(http_get) -> ?DESC(http_get); desc(http_post) -> ?DESC(http_post); -desc(mnesia) -> - ?DESC(mnesia); +desc(builtin_db) -> + ?DESC(builtin_db); desc(mongo_single) -> ?DESC(mongo_single); desc(mongo_rs) -> @@ -459,7 +459,7 @@ select_union_member(#{<<"type">> := <<"http">>} = Value) -> }) end; select_union_member(#{<<"type">> := <<"built_in_database">>}) -> - ?R_REF(mnesia); + ?R_REF(builtin_db); select_union_member(#{<<"type">> := Type}) -> select_union_member_loop(Type, type_names()); select_union_member(_) -> diff --git a/apps/emqx_authz/src/emqx_authz_utils.erl b/apps/emqx_authz/src/emqx_authz_utils.erl index 560141d0a..c01505680 100644 --- a/apps/emqx_authz/src/emqx_authz_utils.erl +++ b/apps/emqx_authz/src/emqx_authz_utils.erl @@ -16,7 +16,6 @@ -module(emqx_authz_utils). --include_lib("emqx/include/emqx_placeholder.hrl"). -include_lib("emqx_authz.hrl"). -export([ @@ -28,6 +27,7 @@ update_config/2, parse_deep/2, parse_str/2, + render_urlencoded_str/2, parse_sql/3, render_deep/2, render_str/2, @@ -128,6 +128,13 @@ render_str(Template, Values) -> #{return => full_binary, var_trans => fun handle_var/2} ). +render_urlencoded_str(Template, Values) -> + emqx_placeholder:proc_tmpl( + Template, + client_vars(Values), + #{return => full_binary, var_trans => fun urlencode_var/2} + ). + render_sql_params(ParamList, Values) -> emqx_placeholder:proc_tmpl( ParamList, @@ -181,6 +188,11 @@ convert_client_var({dn, DN}) -> {cert_subject, DN}; convert_client_var({protocol, Proto}) -> {proto_name, Proto}; convert_client_var(Other) -> Other. +urlencode_var({var, _} = Var, Value) -> + emqx_http_lib:uri_encode(handle_var(Var, Value)); +urlencode_var(Var, Value) -> + handle_var(Var, Value). + handle_var({var, _Name}, undefined) -> <<>>; handle_var({var, <<"peerhost">>}, IpAddr) -> diff --git a/apps/emqx_authz/test/emqx_authz_http_SUITE.erl b/apps/emqx_authz/test/emqx_authz_http_SUITE.erl index 9ff84b805..702bf2756 100644 --- a/apps/emqx_authz/test/emqx_authz_http_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_http_SUITE.erl @@ -199,7 +199,7 @@ t_query_params(_Config) -> peerhost := <<"127.0.0.1">>, proto_name := <<"MQTT">>, mountpoint := <<"MOUNTPOINT">>, - topic := <<"t">>, + topic := <<"t/1">>, action := <<"publish">> } = cowboy_req:match_qs( [ @@ -241,7 +241,7 @@ t_query_params(_Config) -> ?assertEqual( allow, - emqx_access_control:authorize(ClientInfo, publish, <<"t">>) + emqx_access_control:authorize(ClientInfo, publish, <<"t/1">>) ). t_path(_Config) -> @@ -249,13 +249,13 @@ t_path(_Config) -> fun(Req0, State) -> ?assertEqual( << - "/authz/users/" + "/authz/use%20rs/" "user%20name/" "client%20id/" "127.0.0.1/" "MQTT/" "MOUNTPOINT/" - "t/1/" + "t%2F1/" "publish" >>, cowboy_req:path(Req0) @@ -264,7 +264,7 @@ t_path(_Config) -> end, #{ <<"url">> => << - "http://127.0.0.1:33333/authz/users/" + "http://127.0.0.1:33333/authz/use%20rs/" "${username}/" "${clientid}/" "${peerhost}/" diff --git a/apps/emqx_bridge/src/emqx_bridge.app.src b/apps/emqx_bridge/src/emqx_bridge.app.src index 04f2b58a7..e408250be 100644 --- a/apps/emqx_bridge/src/emqx_bridge.app.src +++ b/apps/emqx_bridge/src/emqx_bridge.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_bridge, [ {description, "EMQX bridges"}, - {vsn, "0.1.16"}, + {vsn, "0.1.18"}, {registered, [emqx_bridge_sup]}, {mod, {emqx_bridge_app, []}}, {applications, [ diff --git a/apps/emqx_bridge/src/emqx_bridge.erl b/apps/emqx_bridge/src/emqx_bridge.erl index 08b8222f2..3aade0369 100644 --- a/apps/emqx_bridge/src/emqx_bridge.erl +++ b/apps/emqx_bridge/src/emqx_bridge.erl @@ -70,7 +70,10 @@ T == dynamo; T == rocketmq; T == cassandra; - T == sqlserver + T == sqlserver; + T == pulsar_producer; + T == oracle; + T == iotdb ). load() -> diff --git a/apps/emqx_bridge/src/emqx_bridge_api.erl b/apps/emqx_bridge/src/emqx_bridge_api.erl index c87ea8ea6..09d1159bd 100644 --- a/apps/emqx_bridge/src/emqx_bridge_api.erl +++ b/apps/emqx_bridge/src/emqx_bridge_api.erl @@ -64,7 +64,7 @@ {BridgeType, BridgeName} -> EXPR catch - throw:{invalid_bridge_id, Reason} -> + throw:#{reason := Reason} -> ?NOT_FOUND(<<"Invalid bridge ID, ", Reason/binary>>) end ). @@ -546,6 +546,8 @@ schema("/bridges_probe") -> case emqx_bridge_resource:create_dry_run(ConnType, maps:remove(<<"type">>, Params1)) of ok -> ?NO_CONTENT; + {error, #{kind := validation_error} = Reason} -> + ?BAD_REQUEST('TEST_FAILED', map_to_json(Reason)); {error, Reason} when not is_tuple(Reason); element(1, Reason) =/= 'exit' -> ?BAD_REQUEST('TEST_FAILED', Reason) end; diff --git a/apps/emqx_bridge/src/emqx_bridge_resource.erl b/apps/emqx_bridge/src/emqx_bridge_resource.erl index 347f9d973..a8dd76214 100644 --- a/apps/emqx_bridge/src/emqx_bridge_resource.erl +++ b/apps/emqx_bridge/src/emqx_bridge_resource.erl @@ -56,6 +56,11 @@ (TYPE) =:= <<"kafka_consumer">> orelse ?IS_BI_DIR_BRIDGE(TYPE) ). +%% [FIXME] this has no place here, it's used in parse_confs/3, which should +%% rather delegate to a behavior callback than implementing domain knowledge +%% here (reversed dependency) +-define(INSERT_TABLET_PATH, "/rest/v2/insertTablet"). + -if(?EMQX_RELEASE_EDITION == ee). bridge_to_resource_type(<<"mqtt">>) -> emqx_connector_mqtt; bridge_to_resource_type(mqtt) -> emqx_connector_mqtt; @@ -87,7 +92,7 @@ parse_bridge_id(BridgeId) -> [Type, Name] -> {to_type_atom(Type), validate_name(Name)}; _ -> - invalid_bridge_id( + invalid_data( <<"should be of pattern {type}:{name}, but got ", BridgeId/binary>> ) end. @@ -108,14 +113,14 @@ validate_name(Name0) -> true -> Name0; false -> - invalid_bridge_id(<<"bad name: ", Name0/binary>>) + invalid_data(<<"bad name: ", Name0/binary>>) end; false -> - invalid_bridge_id(<<"only 0-9a-zA-Z_-. is allowed in name: ", Name0/binary>>) + invalid_data(<<"only 0-9a-zA-Z_-. is allowed in name: ", Name0/binary>>) end. --spec invalid_bridge_id(binary()) -> no_return(). -invalid_bridge_id(Reason) -> throw({?FUNCTION_NAME, Reason}). +-spec invalid_data(binary()) -> no_return(). +invalid_data(Reason) -> throw(#{kind => validation_error, reason => Reason}). is_id_char(C) when C >= $0 andalso C =< $9 -> true; is_id_char(C) when C >= $a andalso C =< $z -> true; @@ -130,7 +135,7 @@ to_type_atom(Type) -> erlang:binary_to_existing_atom(Type, utf8) catch _:_ -> - invalid_bridge_id(<<"unknown type: ", Type/binary>>) + invalid_data(<<"unknown bridge type: ", Type/binary>>) end. reset_metrics(ResourceId) -> @@ -243,12 +248,19 @@ create_dry_run(Type, Conf0) -> {error, Reason} -> {error, Reason}; {ok, ConfNew} -> - ParseConf = parse_confs(bin(Type), TmpPath, ConfNew), - Res = emqx_resource:create_dry_run_local( - bridge_to_resource_type(Type), ParseConf - ), - _ = maybe_clear_certs(TmpPath, ConfNew), - Res + try + ParseConf = parse_confs(bin(Type), TmpPath, ConfNew), + Res = emqx_resource:create_dry_run_local( + bridge_to_resource_type(Type), ParseConf + ), + Res + catch + %% validation errors + throw:Reason -> + {error, Reason} + after + _ = maybe_clear_certs(TmpPath, ConfNew) + end end. remove(BridgeId) -> @@ -300,10 +312,18 @@ parse_confs( max_retries := Retry } = Conf ) -> - {BaseUrl, Path} = parse_url(Url), - {ok, BaseUrl2} = emqx_http_lib:uri_parse(BaseUrl), + Url1 = bin(Url), + {BaseUrl, Path} = parse_url(Url1), + BaseUrl1 = + case emqx_http_lib:uri_parse(BaseUrl) of + {ok, BUrl} -> + BUrl; + {error, Reason} -> + Reason1 = emqx_utils:readable_error_msg(Reason), + invalid_data(<<"Invalid URL: ", Url1/binary, ", details: ", Reason1/binary>>) + end, Conf#{ - base_url => BaseUrl2, + base_url => BaseUrl1, request => #{ path => Path, @@ -314,6 +334,30 @@ parse_confs( max_retries => Retry } }; +parse_confs(<<"iotdb">>, Name, Conf) -> + #{ + base_url := BaseURL, + authentication := + #{ + username := Username, + password := Password + } + } = Conf, + BasicToken = base64:encode(<>), + WebhookConfig = + Conf#{ + method => <<"post">>, + url => <>, + headers => [ + {<<"Content-type">>, <<"application/json">>}, + {<<"Authorization">>, BasicToken} + ] + }, + parse_confs( + <<"webhook">>, + Name, + WebhookConfig + ); parse_confs(Type, Name, Conf) when ?IS_INGRESS_BRIDGE(Type) -> %% For some drivers that can be used as data-sources, we need to provide a %% hookpoint. The underlying driver will run `emqx_hooks:run/3` when it @@ -325,6 +369,8 @@ parse_confs(Type, Name, Conf) when ?IS_INGRESS_BRIDGE(Type) -> %% to hocon; keeping this as just `kafka' for backwards compatibility. parse_confs(<<"kafka">> = _Type, Name, Conf) -> Conf#{bridge_name => Name}; +parse_confs(<<"pulsar_producer">> = _Type, Name, Conf) -> + Conf#{bridge_name => Name}; parse_confs(_Type, _Name, Conf) -> Conf. @@ -338,7 +384,7 @@ parse_url(Url) -> {iolist_to_binary([Scheme, "//", HostPort]), <<>>} end; [Url] -> - error({invalid_url, Url}) + invalid_data(<<"Missing scheme in URL: ", Url/binary>>) end. str(Bin) when is_binary(Bin) -> binary_to_list(Bin); diff --git a/apps/emqx_bridge/test/emqx_bridge_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_SUITE.erl index ed4807d12..a8864bf00 100644 --- a/apps/emqx_bridge/test/emqx_bridge_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_SUITE.erl @@ -141,8 +141,7 @@ setup_fake_telemetry_data() -> } } }, - Opts = #{raw_with_default => true}, - ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, Conf, Opts), + ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, Conf), ok = snabbkaffe:start_trace(), Predicate = fun(#{?snk_kind := K}) -> K =:= emqx_bridge_loaded end, diff --git a/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl index 3afe17080..d55b92138 100644 --- a/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl @@ -414,6 +414,18 @@ t_http_crud_apis(Config) -> }, json(maps:get(<<"message">>, PutFail2)) ), + {ok, 400, _} = request_json( + put, + uri(["bridges", BridgeID]), + ?HTTP_BRIDGE(<<"localhost:1234/foo">>, Name), + Config + ), + {ok, 400, _} = request_json( + put, + uri(["bridges", BridgeID]), + ?HTTP_BRIDGE(<<"htpp://localhost:12341234/foo">>, Name), + Config + ), %% delete the bridge {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), Config), @@ -498,6 +510,22 @@ t_http_crud_apis(Config) -> %% Try create bridge with bad characters as name {ok, 400, _} = request(post, uri(["bridges"]), ?HTTP_BRIDGE(URL1, <<"隋达"/utf8>>), Config), + %% Missing scheme in URL + {ok, 400, _} = request( + post, + uri(["bridges"]), + ?HTTP_BRIDGE(<<"localhost:1234/foo">>, <<"missing_url_scheme">>), + Config + ), + + %% Invalid port + {ok, 400, _} = request( + post, + uri(["bridges"]), + ?HTTP_BRIDGE(<<"http://localhost:12341234/foo">>, <<"invalid_port">>), + Config + ), + {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), Config). t_http_bridges_local_topic(Config) -> @@ -1016,6 +1044,34 @@ t_bridges_probe(Config) -> ) ), + %% Missing scheme in URL + ?assertMatch( + {ok, 400, #{ + <<"code">> := <<"TEST_FAILED">>, + <<"message">> := _ + }}, + request_json( + post, + uri(["bridges_probe"]), + ?HTTP_BRIDGE(<<"203.0.113.3:1234/foo">>), + Config + ) + ), + + %% Invalid port + ?assertMatch( + {ok, 400, #{ + <<"code">> := <<"TEST_FAILED">>, + <<"message">> := _ + }}, + request_json( + post, + uri(["bridges_probe"]), + ?HTTP_BRIDGE(<<"http://203.0.113.3:12341234/foo">>), + Config + ) + ), + {ok, 204, _} = request( post, uri(["bridges_probe"]), diff --git a/apps/emqx_bridge/test/emqx_bridge_testlib.erl b/apps/emqx_bridge/test/emqx_bridge_testlib.erl new file mode 100644 index 000000000..47f29aa36 --- /dev/null +++ b/apps/emqx_bridge/test/emqx_bridge_testlib.erl @@ -0,0 +1,350 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_testlib). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +%% ct setup helpers + +init_per_suite(Config, Apps) -> + [{start_apps, Apps} | Config]. + +end_per_suite(Config) -> + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_conf]), + ok = emqx_connector_test_helpers:stop_apps(lists:reverse(?config(start_apps, Config))), + _ = application:stop(emqx_connector), + ok. + +init_per_group(TestGroup, BridgeType, Config) -> + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + application:load(emqx_bridge), + ok = emqx_common_test_helpers:start_apps([emqx_conf]), + ok = emqx_connector_test_helpers:start_apps(?config(start_apps, Config)), + {ok, _} = application:ensure_all_started(emqx_connector), + emqx_mgmt_api_test_util:init_suite(), + UniqueNum = integer_to_binary(erlang:unique_integer([positive])), + MQTTTopic = <<"mqtt/topic/", UniqueNum/binary>>, + [ + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort}, + {mqtt_topic, MQTTTopic}, + {test_group, TestGroup}, + {bridge_type, BridgeType} + | Config + ]. + +end_per_group(Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + delete_all_bridges(), + ok. + +init_per_testcase(TestCase, Config0, BridgeConfigCb) -> + ct:timetrap(timer:seconds(60)), + delete_all_bridges(), + UniqueNum = integer_to_binary(erlang:unique_integer()), + BridgeTopic = + << + (atom_to_binary(TestCase))/binary, + UniqueNum/binary + >>, + TestGroup = ?config(test_group, Config0), + Config = [{bridge_topic, BridgeTopic} | Config0], + {Name, ConfigString, BridgeConfig} = BridgeConfigCb( + TestCase, TestGroup, Config + ), + ok = snabbkaffe:start_trace(), + [ + {bridge_name, Name}, + {bridge_config_string, ConfigString}, + {bridge_config, BridgeConfig} + | Config + ]. + +end_per_testcase(_Testcase, Config) -> + case proplists:get_bool(skip_does_not_apply, Config) of + true -> + ok; + false -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + delete_all_bridges(), + %% in CI, apparently this needs more time since the + %% machines struggle with all the containers running... + emqx_common_test_helpers:call_janitor(60_000), + ok = snabbkaffe:stop(), + ok + end. + +delete_all_bridges() -> + lists:foreach( + fun(#{name := Name, type := Type}) -> + emqx_bridge:remove(Type, Name) + end, + emqx_bridge:list() + ). + +%% test helpers +parse_and_check(Config, ConfigString, Name) -> + BridgeType = ?config(bridge_type, Config), + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{BridgeType := #{Name := BridgeConfig}}} = RawConf, + BridgeConfig. + +resource_id(Config) -> + BridgeType = ?config(bridge_type, Config), + Name = ?config(bridge_name, Config), + emqx_bridge_resource:resource_id(BridgeType, Name). + +create_bridge(Config) -> + create_bridge(Config, _Overrides = #{}). + +create_bridge(Config, Overrides) -> + BridgeType = ?config(bridge_type, Config), + Name = ?config(bridge_name, Config), + BridgeConfig0 = ?config(bridge_config, Config), + BridgeConfig = emqx_utils_maps:deep_merge(BridgeConfig0, Overrides), + emqx_bridge:create(BridgeType, Name, BridgeConfig). + +create_bridge_api(Config) -> + create_bridge_api(Config, _Overrides = #{}). + +create_bridge_api(Config, Overrides) -> + BridgeType = ?config(bridge_type, Config), + Name = ?config(bridge_name, Config), + BridgeConfig0 = ?config(bridge_config, Config), + BridgeConfig = emqx_utils_maps:deep_merge(BridgeConfig0, Overrides), + Params = BridgeConfig#{<<"type">> => BridgeType, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("creating bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of + {ok, {Status, Headers, Body0}} -> + {ok, {Status, Headers, emqx_utils_json:decode(Body0, [return_maps])}}; + Error -> + Error + end, + ct:pal("bridge create result: ~p", [Res]), + Res. + +update_bridge_api(Config) -> + update_bridge_api(Config, _Overrides = #{}). + +update_bridge_api(Config, Overrides) -> + BridgeType = ?config(bridge_type, Config), + Name = ?config(bridge_name, Config), + BridgeConfig0 = ?config(bridge_config, Config), + BridgeConfig = emqx_utils_maps:deep_merge(BridgeConfig0, Overrides), + BridgeId = emqx_bridge_resource:bridge_id(BridgeType, Name), + Params = BridgeConfig#{<<"type">> => BridgeType, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("updating bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(put, Path, "", AuthHeader, Params, Opts) of + {ok, {_Status, _Headers, Body0}} -> {ok, emqx_utils_json:decode(Body0, [return_maps])}; + Error -> Error + end, + ct:pal("bridge update result: ~p", [Res]), + Res. + +probe_bridge_api(Config) -> + probe_bridge_api(Config, _Overrides = #{}). + +probe_bridge_api(Config, _Overrides) -> + BridgeType = ?config(bridge_type, Config), + Name = ?config(bridge_name, Config), + BridgeConfig = ?config(bridge_config, Config), + Params = BridgeConfig#{<<"type">> => BridgeType, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges_probe"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("probing bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of + {ok, {{_, 204, _}, _Headers, _Body0} = Res0} -> {ok, Res0}; + Error -> Error + end, + ct:pal("bridge probe result: ~p", [Res]), + Res. + +create_rule_and_action_http(BridgeType, RuleTopic, Config) -> + BridgeName = ?config(bridge_name, Config), + BridgeId = emqx_bridge_resource:bridge_id(BridgeType, BridgeName), + Params = #{ + enable => true, + sql => <<"SELECT * FROM \"", RuleTopic/binary, "\"">>, + actions => [BridgeId] + }, + Path = emqx_mgmt_api_test_util:api_path(["rules"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + ct:pal("rule action params: ~p", [Params]), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + Error -> Error + end. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_sync_query(Config, MakeMessageFun, IsSuccessCheck) -> + ResourceId = resource_id(Config), + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + Message = {send_message, MakeMessageFun()}, + IsSuccessCheck(emqx_resource:simple_sync_query(ResourceId, Message)), + ok + end, + [] + ), + ok. + +t_async_query(Config, MakeMessageFun, IsSuccessCheck) -> + ResourceId = resource_id(Config), + ReplyFun = + fun(Pid, Result) -> + Pid ! {result, Result} + end, + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + Message = {send_message, MakeMessageFun()}, + emqx_resource:query(ResourceId, Message, #{async_reply_fun => {ReplyFun, [self()]}}), + ok + end, + [] + ), + receive + {result, Result} -> IsSuccessCheck(Result) + after 5_000 -> + throw(timeout) + end, + ok. + +t_create_via_http(Config) -> + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + + %% lightweight matrix testing some configs + ?assertMatch( + {ok, _}, + update_bridge_api( + Config + ) + ), + ?assertMatch( + {ok, _}, + update_bridge_api( + Config + ) + ), + ok + end, + [] + ), + ok. + +t_start_stop(Config, StopTracePoint) -> + BridgeType = ?config(bridge_type, Config), + BridgeName = ?config(bridge_name, Config), + ResourceId = resource_id(Config), + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge(Config)), + %% Since the connection process is async, we give it some time to + %% stabilize and avoid flakiness. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + + %% Check that the bridge probe API doesn't leak atoms. + ProbeRes0 = probe_bridge_api( + Config, + #{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}} + ), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0), + AtomsBefore = erlang:system_info(atom_count), + %% Probe again; shouldn't have created more atoms. + ProbeRes1 = probe_bridge_api( + Config, + #{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}} + ), + + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1), + AtomsAfter = erlang:system_info(atom_count), + ?assertEqual(AtomsBefore, AtomsAfter), + + %% Now stop the bridge. + ?assertMatch( + {{ok, _}, {ok, _}}, + ?wait_async_action( + emqx_bridge:disable_enable(disable, BridgeType, BridgeName), + #{?snk_kind := StopTracePoint}, + 5_000 + ) + ), + + ok + end, + fun(Trace) -> + %% one for each probe, one for real + ?assertMatch([_, _, _], ?of_kind(StopTracePoint, Trace)), + ok + end + ), + ok. + +t_on_get_status(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + ResourceId = resource_id(Config), + ?assertMatch({ok, _}, create_bridge(Config)), + %% Since the connection process is async, we give it some time to + %% stabilize and avoid flakiness. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ct:sleep(500), + ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)) + end), + %% Check that it recovers itself. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + ok. diff --git a/apps/emqx_bridge_cassandra/BSL.txt b/apps/emqx_bridge_cassandra/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_cassandra/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_cassandra/README.md b/apps/emqx_bridge_cassandra/README.md new file mode 100644 index 000000000..c5a2609a5 --- /dev/null +++ b/apps/emqx_bridge_cassandra/README.md @@ -0,0 +1,41 @@ +# EMQX Cassandra Bridge + +[Apache Cassandra](https://github.com/apache/cassandra) is an open-source, distributed +NoSQL database management system that is designed to manage large amounts of structured +and semi-structured data across many commodity servers, providing high availability +with no single point of failure. +It is commonly used in web and mobile applications, IoT, and other systems that +require storing, querying, and analyzing large amounts of data. + +The application is used to connect EMQX and Cassandra. User can create a rule +and easily ingest IoT data into Cassandra by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). + diff --git a/apps/emqx_bridge_cassandra/docker-ct b/apps/emqx_bridge_cassandra/docker-ct new file mode 100644 index 000000000..2626b4068 --- /dev/null +++ b/apps/emqx_bridge_cassandra/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +cassandra diff --git a/apps/emqx_bridge_cassandra/include/emqx_bridge_cassandra.hrl b/apps/emqx_bridge_cassandra/include/emqx_bridge_cassandra.hrl new file mode 100644 index 000000000..eef7c5d2b --- /dev/null +++ b/apps/emqx_bridge_cassandra/include/emqx_bridge_cassandra.hrl @@ -0,0 +1,5 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-define(CASSANDRA_DEFAULT_PORT, 9042). diff --git a/apps/emqx_bridge_cassandra/rebar.config b/apps/emqx_bridge_cassandra/rebar.config new file mode 100644 index 000000000..b8bfc7dd6 --- /dev/null +++ b/apps/emqx_bridge_cassandra/rebar.config @@ -0,0 +1,11 @@ +%% -*- mode: erlang; -*- +{erl_opts, [debug_info]}. +{deps, [ {ecql, {git, "https://github.com/emqx/ecql.git", {tag, "v0.5.1"}}} + , {emqx_connector, {path, "../../apps/emqx_connector"}} + , {emqx_resource, {path, "../../apps/emqx_resource"}} + , {emqx_bridge, {path, "../../apps/emqx_bridge"}} + ]}. + +{shell, [ + {apps, [emqx_bridge_cassandra]} +]}. diff --git a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.app.src b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.app.src new file mode 100644 index 000000000..1bde274f3 --- /dev/null +++ b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_cassandra, [ + {description, "EMQX Enterprise Cassandra Bridge"}, + {vsn, "0.1.1"}, + {registered, []}, + {applications, [kernel, stdlib, ecql]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_cassa.erl b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.erl similarity index 97% rename from lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_cassa.erl rename to apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.erl index 26c6de04d..e8f7d50ce 100644 --- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_cassa.erl +++ b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.erl @@ -1,7 +1,7 @@ %%-------------------------------------------------------------------- %% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_ee_bridge_cassa). +-module(emqx_bridge_cassandra). -include_lib("typerefl/include/types.hrl"). -include_lib("hocon/include/hoconsc.hrl"). @@ -88,7 +88,7 @@ fields("config") -> #{desc => ?DESC("local_topic"), default => undefined} )} ] ++ emqx_resource_schema:fields("resource_opts") ++ - (emqx_ee_connector_cassa:fields(config) -- + (emqx_bridge_cassandra_connector:fields(config) -- emqx_connector_schema_lib:prepare_statement_fields()); fields("post") -> fields("post", cassandra); diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_cassa.erl b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_connector.erl similarity index 91% rename from lib-ee/emqx_ee_connector/src/emqx_ee_connector_cassa.erl rename to apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_connector.erl index c4f3e9b87..a3032a9df 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_cassa.erl +++ b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_connector.erl @@ -2,12 +2,12 @@ %% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_ee_connector_cassa). +-module(emqx_bridge_cassandra_connector). -behaviour(emqx_resource). -include_lib("emqx_connector/include/emqx_connector.hrl"). --include_lib("emqx_ee_connector/include/emqx_ee_connector.hrl"). +-include("emqx_bridge_cassandra.hrl"). -include_lib("typerefl/include/types.hrl"). -include_lib("emqx/include/logger.hrl"). -include_lib("hocon/include/hoconsc.hrl"). @@ -44,7 +44,7 @@ -type state() :: #{ - poolname := atom(), + pool_name := binary(), prepare_cql := prepares(), params_tokens := params_tokens(), %% returned by ecql:prepare/2 @@ -92,7 +92,7 @@ callback_mode() -> async_if_possible. on_start( InstId, #{ - servers := Servers, + servers := Servers0, keyspace := Keyspace, username := Username, pool_size := PoolSize, @@ -104,9 +104,16 @@ on_start( connector => InstId, config => emqx_utils:redact(Config) }), + Servers = + lists:map( + fun(#{hostname := Host, port := Port}) -> + {Host, Port} + end, + emqx_schema:parse_servers(Servers0, ?DEFAULT_SERVER_OPTION) + ), Options = [ - {nodes, emqx_schema:parse_servers(Servers, ?DEFAULT_SERVER_OPTION)}, + {nodes, Servers}, {username, Username}, {password, emqx_secret:wrap(maps:get(password, Config, ""))}, {keyspace, Keyspace}, @@ -124,14 +131,10 @@ on_start( false -> [] end, - %% use InstaId of binary type as Pool name, which is supported in ecpool. - PoolName = InstId, - Prepares = parse_prepare_cql(Config), - InitState = #{poolname => PoolName, prepare_statement => #{}}, - State = maps:merge(InitState, Prepares), - case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options ++ SslOpts) of + State = parse_prepare_cql(Config), + case emqx_resource_pool:start(InstId, ?MODULE, Options ++ SslOpts) of ok -> - {ok, init_prepare(State)}; + {ok, init_prepare(State#{pool_name => InstId, prepare_statement => #{}})}; {error, Reason} -> ?tp( cassandra_connector_start_failed, @@ -140,12 +143,12 @@ on_start( {error, Reason} end. -on_stop(InstId, #{poolname := PoolName}) -> +on_stop(InstId, #{pool_name := PoolName}) -> ?SLOG(info, #{ msg => "stopping_cassandra_connector", connector => InstId }), - emqx_plugin_libs_pool:stop_pool(PoolName). + emqx_resource_pool:stop(PoolName). -type request() :: % emqx_bridge.erl @@ -184,7 +187,7 @@ do_single_query( InstId, Request, Async, - #{poolname := PoolName} = State + #{pool_name := PoolName} = State ) -> {Type, PreparedKeyOrSQL, Params} = parse_request_to_cql(Request), ?tp( @@ -232,7 +235,7 @@ do_batch_query( InstId, Requests, Async, - #{poolname := PoolName} = State + #{pool_name := PoolName} = State ) -> CQLs = lists:map( @@ -312,8 +315,8 @@ exec_cql_batch_query(InstId, PoolName, Async, CQLs) -> exec(PoolName, Query) -> ecpool:pick_and_do(PoolName, Query, no_handover). -on_get_status(_InstId, #{poolname := Pool} = State) -> - case emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1) of +on_get_status(_InstId, #{pool_name := PoolName} = State) -> + case emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1) of true -> case do_check_prepares(State) of ok -> @@ -334,7 +337,7 @@ do_get_status(Conn) -> do_check_prepares(#{prepare_cql := Prepares}) when is_map(Prepares) -> ok; -do_check_prepares(State = #{poolname := PoolName, prepare_cql := {error, Prepares}}) -> +do_check_prepares(State = #{pool_name := PoolName, prepare_cql := {error, Prepares}}) -> %% retry to prepare case prepare_cql(Prepares, PoolName) of {ok, Sts} -> @@ -410,7 +413,7 @@ parse_prepare_cql([], Prepares, Tokens) -> params_tokens => Tokens }. -init_prepare(State = #{prepare_cql := Prepares, poolname := PoolName}) -> +init_prepare(State = #{prepare_cql := Prepares, pool_name := PoolName}) -> case maps:size(Prepares) of 0 -> State; @@ -442,17 +445,17 @@ prepare_cql(Prepares, PoolName) -> end. do_prepare_cql(Prepares, PoolName) -> - do_prepare_cql(ecpool:workers(PoolName), Prepares, PoolName, #{}). + do_prepare_cql(ecpool:workers(PoolName), Prepares, #{}). -do_prepare_cql([{_Name, Worker} | T], Prepares, PoolName, _LastSts) -> +do_prepare_cql([{_Name, Worker} | T], Prepares, _LastSts) -> {ok, Conn} = ecpool_worker:client(Worker), case prepare_cql_to_conn(Conn, Prepares) of {ok, Sts} -> - do_prepare_cql(T, Prepares, PoolName, Sts); + do_prepare_cql(T, Prepares, Sts); Error -> Error end; -do_prepare_cql([], _Prepares, _PoolName, LastSts) -> +do_prepare_cql([], _Prepares, LastSts) -> {ok, LastSts}. prepare_cql_to_conn(Conn, Prepares) -> diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_cassa_SUITE.erl b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl similarity index 97% rename from lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_cassa_SUITE.erl rename to apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl index fa696c185..79220321e 100644 --- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_cassa_SUITE.erl +++ b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl @@ -2,7 +2,7 @@ %% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_ee_bridge_cassa_SUITE). +-module(emqx_bridge_cassandra_SUITE). -compile(nowarn_export_all). -compile(export_all). @@ -57,7 +57,7 @@ %% CASSA_TCP_HOST=127.0.0.1 CASSA_TCP_PORT=19042 \ %% CASSA_TLS_HOST=127.0.0.1 CASSA_TLS_PORT=19142 \ %% PROXY_HOST=127.0.0.1 ./rebar3 as test ct -c -v --name ct@127.0.0.1 \ -%% --suite lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_cassa_SUITE.erl +%% --suite apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl %% %%------------------------------------------------------------------------------ @@ -530,15 +530,16 @@ t_write_failure(Config) -> fun(Trace0) -> ct:pal("trace: ~p", [Trace0]), Trace = ?of_kind(buffer_worker_flush_nack, Trace0), - ?assertMatch([#{result := {async_return, {error, _}}} | _], Trace), - [#{result := {async_return, {error, Error}}} | _] = Trace, - case Error of - {resource_error, _} -> + [#{result := Result} | _] = Trace, + case Result of + {async_return, {error, {resource_error, _}}} -> ok; - {recoverable_error, disconnected} -> + {async_return, {error, {recoverable_error, disconnected}}} -> + ok; + {error, {resource_error, _}} -> ok; _ -> - ct:fail("unexpected error: ~p", [Error]) + ct:fail("unexpected error: ~p", [Result]) end end ), @@ -589,7 +590,7 @@ t_missing_data(Config) -> {ok, _}, create_bridge(Config) ), - %% emqx_ee_connector_cassa will send missed data as a `null` atom + %% emqx_bridge_cassandra_connector will send missed data as a `null` atom %% to ecql driver ?check_trace( begin diff --git a/lib-ee/emqx_ee_connector/test/emqx_ee_connector_cassa_SUITE.erl b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_connector_SUITE.erl similarity index 78% rename from lib-ee/emqx_ee_connector/test/emqx_ee_connector_cassa_SUITE.erl rename to apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_connector_SUITE.erl index 52ed03a62..452db33a7 100644 --- a/lib-ee/emqx_ee_connector/test/emqx_ee_connector_cassa_SUITE.erl +++ b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_connector_SUITE.erl @@ -2,13 +2,13 @@ %% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_ee_connector_cassa_SUITE). +-module(emqx_bridge_cassandra_connector_SUITE). -compile(nowarn_export_all). -compile(export_all). --include("emqx_connector.hrl"). --include("emqx_ee_connector.hrl"). +-include("emqx_bridge_cassandra.hrl"). +-include("emqx_connector/include/emqx_connector.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("emqx/include/emqx.hrl"). -include_lib("stdlib/include/assert.hrl"). @@ -16,7 +16,7 @@ %% Cassandra server defined at `.ci/docker-compose-file/docker-compose-cassandra-tcp.yaml` %% You can change it to `127.0.0.1`, if you run this SUITE locally -define(CASSANDRA_HOST, "cassandra"). --define(CASSANDRA_RESOURCE_MOD, emqx_ee_connector_cassa). +-define(CASSANDRA_RESOURCE_MOD, emqx_bridge_cassandra_connector). %% This test SUITE requires a running cassandra instance. If you don't want to %% bring up the whole CI infrastuctucture with the `scripts/ct/run.sh` script @@ -38,9 +38,14 @@ groups() -> []. cassandra_servers() -> - emqx_schema:parse_servers( - iolist_to_binary([?CASSANDRA_HOST, ":", erlang:integer_to_list(?CASSANDRA_DEFAULT_PORT)]), - #{default_port => ?CASSANDRA_DEFAULT_PORT} + lists:map( + fun(#{hostname := Host, port := Port}) -> + {Host, Port} + end, + emqx_schema:parse_servers( + iolist_to_binary([?CASSANDRA_HOST, ":", erlang:integer_to_list(?CASSANDRA_DEFAULT_PORT)]), + #{default_port => ?CASSANDRA_DEFAULT_PORT} + ) ). init_per_suite(Config) -> @@ -101,15 +106,15 @@ show(Label, What) -> erlang:display({Label, What}), What. -perform_lifecycle_check(PoolName, InitialConfig) -> +perform_lifecycle_check(ResourceId, InitialConfig) -> {ok, #{config := CheckedConfig}} = emqx_resource:check_config(?CASSANDRA_RESOURCE_MOD, InitialConfig), {ok, #{ - state := #{poolname := ReturnedPoolName} = State, + state := #{pool_name := PoolName} = State, status := InitialStatus }} = emqx_resource:create_local( - PoolName, + ResourceId, ?CONNECTOR_RESOURCE_GROUP, ?CASSANDRA_RESOURCE_MOD, CheckedConfig, @@ -121,45 +126,45 @@ perform_lifecycle_check(PoolName, InitialConfig) -> state := State, status := InitialStatus }} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), % % Perform query as further check that the resource is working as expected (fun() -> - erlang:display({pool_name, PoolName}), - QueryNoParamsResWrapper = emqx_resource:query(PoolName, test_query_no_params()), + erlang:display({pool_name, ResourceId}), + QueryNoParamsResWrapper = emqx_resource:query(ResourceId, test_query_no_params()), ?assertMatch({ok, _}, QueryNoParamsResWrapper) end)(), - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Resource will be listed still, but state will be changed and healthcheck will fail % as the worker no longer exists. {ok, ?CONNECTOR_RESOURCE_GROUP, #{ state := State, status := StoppedStatus }} = - emqx_resource:get_instance(PoolName), + emqx_resource:get_instance(ResourceId), ?assertEqual(stopped, StoppedStatus), - ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), + ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceId)), % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Can call stop/1 again on an already stopped instance - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Make sure it can be restarted and the healthchecks and queries work properly - ?assertEqual(ok, emqx_resource:restart(PoolName)), + ?assertEqual(ok, emqx_resource:restart(ResourceId)), % async restart, need to wait resource timer:sleep(500), {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), (fun() -> QueryNoParamsResWrapper = - emqx_resource:query(PoolName, test_query_no_params()), + emqx_resource:query(ResourceId, test_query_no_params()), ?assertMatch({ok, _}, QueryNoParamsResWrapper) end)(), % Stop and remove the resource in one go. - ?assertEqual(ok, emqx_resource:remove_local(PoolName)), - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual(ok, emqx_resource:remove_local(ResourceId)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Should not even be able to get the resource data out of ets now unlike just stopping. - ?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)). + ?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceId)). %%-------------------------------------------------------------------- %% utils diff --git a/apps/emqx_bridge_clickhouse/BSL.txt b/apps/emqx_bridge_clickhouse/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_clickhouse/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_clickhouse/README.md b/apps/emqx_bridge_clickhouse/README.md new file mode 100644 index 000000000..ff870e87d --- /dev/null +++ b/apps/emqx_bridge_clickhouse/README.md @@ -0,0 +1,37 @@ +# EMQX ClickHouse Bridge + +[ClickHouse](https://github.com/ClickHouse/ClickHouse) is an open-source, column-based +database management system. It is designed for real-time processing of large volumes of +data and is known for its high performance and scalability. + +The application is used to connect EMQX and ClickHouse. +User can create a rule and easily ingest IoT data into ClickHouse by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documentation + +- Refer to [Ingest data into ClickHouse](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-clickhouse.html) + for how to use EMQX dashboard to ingest IoT data into ClickHouse. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse.app.src b/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse.app.src new file mode 100644 index 000000000..a0b409d5b --- /dev/null +++ b/apps/emqx_bridge_clickhouse/src/emqx_bridge_clickhouse.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_clickhouse, [ + {description, "EMQX Enterprise ClickHouse Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_dynamo/BSL.txt b/apps/emqx_bridge_dynamo/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_dynamo/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_dynamo/README.md b/apps/emqx_bridge_dynamo/README.md new file mode 100644 index 000000000..48dcb781d --- /dev/null +++ b/apps/emqx_bridge_dynamo/README.md @@ -0,0 +1,40 @@ +# EMQX DynamoDB Bridge + +[Dynamodb](https://aws.amazon.com/dynamodb/) is a high-performance NoSQL database +service provided by Amazon that's designed for scalability and low-latency access +to structured data. + +It's often used in applications that require fast and reliable access to data, +such as mobile, ad tech, and IoT. + +The application is used to connect EMQX and DynamoDB. +User can create a rule and easily ingest IoT data into DynamoDB by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documentation + +- Refer to [Ingest data into DynamoDB](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-dynamo.html) + for how to use EMQX dashboard to ingest IoT data into DynamoDB. + +- Refer to [Rules engine](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo.app.src b/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo.app.src new file mode 100644 index 000000000..51c717220 --- /dev/null +++ b/apps/emqx_bridge_dynamo/src/emqx_bridge_dynamo.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_dynamo, [ + {description, "EMQX Enterprise Dynamo Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_gcp_pubsub/BSL.txt b/apps/emqx_bridge_gcp_pubsub/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_gcp_pubsub/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_gcp_pubsub/README.md b/apps/emqx_bridge_gcp_pubsub/README.md new file mode 100644 index 000000000..e33c5ab15 --- /dev/null +++ b/apps/emqx_bridge_gcp_pubsub/README.md @@ -0,0 +1,36 @@ +# EMQX GCP Pub/Sub Bridge + +[Google Cloud Pub/Sub](https://cloud.google.com/pubsub) is a messaging service +provided by Google Cloud Platform (GCP). + +The application is used to connect EMQX and GCP Pub/Sub. +User can create a rule and easily ingest IoT data into GCP Pub/Sub by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documentation + +- Refer to [Ingest data into GCP Pub/Sub](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-gcp-pubsub.html) + for how to use EMQX dashboard to ingest IoT data into GCP Pub/Sub. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_gcp_pubsub/rebar.config b/apps/emqx_bridge_gcp_pubsub/rebar.config new file mode 100644 index 000000000..2fd264fc0 --- /dev/null +++ b/apps/emqx_bridge_gcp_pubsub/rebar.config @@ -0,0 +1,10 @@ +%% -*- mode: erlang; -*- +{erl_opts, [debug_info]}. +{deps, [ {emqx_connector, {path, "../../apps/emqx_connector"}} + , {emqx_resource, {path, "../../apps/emqx_resource"}} + , {emqx_bridge, {path, "../../apps/emqx_bridge"}} + ]}. + +{shell, [ + {apps, [emqx_bridge_gcp_pubsub]} +]}. diff --git a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.app.src b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.app.src new file mode 100644 index 000000000..2b3d359d3 --- /dev/null +++ b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.app.src @@ -0,0 +1,13 @@ +{application, emqx_bridge_gcp_pubsub, [ + {description, "EMQX Enterprise GCP Pub/Sub Bridge"}, + {vsn, "0.1.1"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + ehttpc + ]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_gcp_pubsub.erl b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.erl similarity index 99% rename from lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_gcp_pubsub.erl rename to apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.erl index 180640d65..70109a0ea 100644 --- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_gcp_pubsub.erl +++ b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.erl @@ -2,7 +2,7 @@ %% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_ee_bridge_gcp_pubsub). +-module(emqx_bridge_gcp_pubsub). -include_lib("typerefl/include/types.hrl"). -include_lib("hocon/include/hoconsc.hrl"). diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_gcp_pubsub.erl b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_connector.erl similarity index 79% rename from lib-ee/emqx_ee_connector/src/emqx_ee_connector_gcp_pubsub.erl rename to apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_connector.erl index 2295f63ab..be5e56e85 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_gcp_pubsub.erl +++ b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_connector.erl @@ -2,7 +2,7 @@ %% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_ee_connector_gcp_pubsub). +-module(emqx_bridge_gcp_pubsub_connector). -behaviour(emqx_resource). @@ -26,9 +26,8 @@ ]). -export([reply_delegator/3]). --type bridge_id() :: binary(). -type jwt_worker() :: binary(). --type service_account_json() :: emqx_ee_bridge_gcp_pubsub:service_account_json(). +-type service_account_json() :: emqx_bridge_gcp_pubsub:service_account_json(). -type config() :: #{ connect_timeout := emqx_schema:duration_ms(), max_retries := non_neg_integer(), @@ -39,11 +38,10 @@ }. -type state() :: #{ connect_timeout := timer:time(), - instance_id := manager_id(), jwt_worker_id := jwt_worker(), max_retries := non_neg_integer(), payload_template := emqx_plugin_libs_rule:tmpl_token(), - pool_name := atom(), + pool_name := binary(), project_id := binary(), pubsub_topic := binary(), request_timeout := timer:time() @@ -62,9 +60,9 @@ is_buffer_supported() -> false. callback_mode() -> async_if_possible. --spec on_start(manager_id(), config()) -> {ok, state()} | {error, term()}. +-spec on_start(resource_id(), config()) -> {ok, state()} | {error, term()}. on_start( - InstanceId, + ResourceId, #{ connect_timeout := ConnectTimeout, max_retries := MaxRetries, @@ -76,13 +74,13 @@ on_start( ) -> ?SLOG(info, #{ msg => "starting_gcp_pubsub_bridge", - connector => InstanceId, + connector => ResourceId, config => Config }), %% emulating the emulator behavior %% https://cloud.google.com/pubsub/docs/emulator HostPort = os:getenv("PUBSUB_EMULATOR_HOST", "pubsub.googleapis.com:443"), - {Host, Port} = emqx_schema:parse_server(HostPort, #{default_port => 443}), + #{hostname := Host, port := Port} = emqx_schema:parse_server(HostPort, #{default_port => 443}), PoolType = random, Transport = tls, TransportOpts = emqx_tls_lib:to_client_opts(#{enable => true, verify => verify_none}), @@ -101,15 +99,13 @@ on_start( #{ jwt_worker_id := JWTWorkerId, project_id := ProjectId - } = ensure_jwt_worker(InstanceId, Config), - PoolName = emqx_plugin_libs_pool:pool_name(InstanceId), + } = ensure_jwt_worker(ResourceId, Config), State = #{ connect_timeout => ConnectTimeout, - instance_id => InstanceId, jwt_worker_id => JWTWorkerId, max_retries => MaxRetries, payload_template => emqx_plugin_libs_rule:preproc_tmpl(PayloadTemplate), - pool_name => PoolName, + pool_name => ResourceId, project_id => ProjectId, pubsub_topic => PubSubTopic, request_timeout => RequestTimeout @@ -117,45 +113,42 @@ on_start( ?tp( gcp_pubsub_on_start_before_starting_pool, #{ - instance_id => InstanceId, - pool_name => PoolName, + resource_id => ResourceId, + pool_name => ResourceId, pool_opts => PoolOpts } ), - ?tp(gcp_pubsub_starting_ehttpc_pool, #{pool_name => PoolName}), - case ehttpc_sup:start_pool(PoolName, PoolOpts) of + ?tp(gcp_pubsub_starting_ehttpc_pool, #{pool_name => ResourceId}), + case ehttpc_sup:start_pool(ResourceId, PoolOpts) of {ok, _} -> {ok, State}; {error, {already_started, _}} -> - ?tp(gcp_pubsub_ehttpc_pool_already_started, #{pool_name => PoolName}), + ?tp(gcp_pubsub_ehttpc_pool_already_started, #{pool_name => ResourceId}), {ok, State}; {error, Reason} -> ?tp(gcp_pubsub_ehttpc_pool_start_failure, #{ - pool_name => PoolName, + pool_name => ResourceId, reason => Reason }), {error, Reason} end. --spec on_stop(manager_id(), state()) -> ok | {error, term()}. +-spec on_stop(resource_id(), state()) -> ok | {error, term()}. on_stop( - InstanceId, - _State = #{ - jwt_worker_id := JWTWorkerId, - pool_name := PoolName - } + ResourceId, + _State = #{jwt_worker_id := JWTWorkerId} ) -> - ?tp(gcp_pubsub_stop, #{instance_id => InstanceId, jwt_worker_id => JWTWorkerId}), + ?tp(gcp_pubsub_stop, #{resource_id => ResourceId, jwt_worker_id => JWTWorkerId}), ?SLOG(info, #{ msg => "stopping_gcp_pubsub_bridge", - connector => InstanceId + connector => ResourceId }), emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId), - emqx_connector_jwt:delete_jwt(?JWT_TABLE, InstanceId), - ehttpc_sup:stop_pool(PoolName). + emqx_connector_jwt:delete_jwt(?JWT_TABLE, ResourceId), + ehttpc_sup:stop_pool(ResourceId). -spec on_query( - bridge_id(), + resource_id(), {send_message, map()}, state() ) -> @@ -163,32 +156,32 @@ on_stop( | {ok, status_code(), headers(), body()} | {error, {recoverable_error, term()}} | {error, term()}. -on_query(BridgeId, {send_message, Selected}, State) -> +on_query(ResourceId, {send_message, Selected}, State) -> Requests = [{send_message, Selected}], ?TRACE( "QUERY_SYNC", "gcp_pubsub_received", - #{requests => Requests, connector => BridgeId, state => State} + #{requests => Requests, connector => ResourceId, state => State} ), - do_send_requests_sync(State, Requests, BridgeId). + do_send_requests_sync(State, Requests, ResourceId). -spec on_query_async( - bridge_id(), + resource_id(), {send_message, map()}, {ReplyFun :: function(), Args :: list()}, state() ) -> {ok, pid()}. -on_query_async(BridgeId, {send_message, Selected}, ReplyFunAndArgs, State) -> +on_query_async(ResourceId, {send_message, Selected}, ReplyFunAndArgs, State) -> Requests = [{send_message, Selected}], ?TRACE( "QUERY_ASYNC", "gcp_pubsub_received", - #{requests => Requests, connector => BridgeId, state => State} + #{requests => Requests, connector => ResourceId, state => State} ), - do_send_requests_async(State, Requests, ReplyFunAndArgs, BridgeId). + do_send_requests_async(State, Requests, ReplyFunAndArgs, ResourceId). -spec on_batch_query( - bridge_id(), + resource_id(), [{send_message, map()}], state() ) -> @@ -196,35 +189,31 @@ on_query_async(BridgeId, {send_message, Selected}, ReplyFunAndArgs, State) -> | {ok, status_code(), headers(), body()} | {error, {recoverable_error, term()}} | {error, term()}. -on_batch_query(BridgeId, Requests, State) -> +on_batch_query(ResourceId, Requests, State) -> ?TRACE( "QUERY_SYNC", "gcp_pubsub_received", - #{requests => Requests, connector => BridgeId, state => State} + #{requests => Requests, connector => ResourceId, state => State} ), - do_send_requests_sync(State, Requests, BridgeId). + do_send_requests_sync(State, Requests, ResourceId). -spec on_batch_query_async( - bridge_id(), + resource_id(), [{send_message, map()}], {ReplyFun :: function(), Args :: list()}, state() ) -> {ok, pid()}. -on_batch_query_async(BridgeId, Requests, ReplyFunAndArgs, State) -> +on_batch_query_async(ResourceId, Requests, ReplyFunAndArgs, State) -> ?TRACE( "QUERY_ASYNC", "gcp_pubsub_received", - #{requests => Requests, connector => BridgeId, state => State} + #{requests => Requests, connector => ResourceId, state => State} ), - do_send_requests_async(State, Requests, ReplyFunAndArgs, BridgeId). + do_send_requests_async(State, Requests, ReplyFunAndArgs, ResourceId). --spec on_get_status(manager_id(), state()) -> connected | disconnected. -on_get_status(InstanceId, State) -> - #{ - connect_timeout := Timeout, - pool_name := PoolName - } = State, - case do_get_status(InstanceId, PoolName, Timeout) of +-spec on_get_status(resource_id(), state()) -> connected | disconnected. +on_get_status(ResourceId, #{connect_timeout := Timeout} = State) -> + case do_get_status(ResourceId, Timeout) of true -> connected; false -> @@ -239,14 +228,13 @@ on_get_status(InstanceId, State) -> %% Helper fns %%------------------------------------------------------------------------------------------------- --spec ensure_jwt_worker(manager_id(), config()) -> +-spec ensure_jwt_worker(resource_id(), config()) -> #{ jwt_worker_id := jwt_worker(), project_id := binary() }. -ensure_jwt_worker(InstanceId, #{ - service_account_json := ServiceAccountJSON, - pubsub_topic := PubSubTopic +ensure_jwt_worker(ResourceId, #{ + service_account_json := ServiceAccountJSON }) -> #{ project_id := ProjectId, @@ -260,7 +248,7 @@ ensure_jwt_worker(InstanceId, #{ Alg = <<"RS256">>, Config = #{ private_key => PrivateKeyPEM, - resource_id => InstanceId, + resource_id => ResourceId, expiration => ExpirationMS, table => ?JWT_TABLE, iss => ServiceAccountEmail, @@ -270,20 +258,14 @@ ensure_jwt_worker(InstanceId, #{ alg => Alg }, - JWTWorkerId = <<"gcp_pubsub_jwt_worker:", InstanceId/binary>>, + JWTWorkerId = <<"gcp_pubsub_jwt_worker:", ResourceId/binary>>, Worker = case emqx_connector_jwt_sup:ensure_worker_present(JWTWorkerId, Config) of {ok, Worker0} -> Worker0; Error -> - ?tp( - gcp_pubsub_bridge_jwt_worker_failed_to_start, - #{instance_id => InstanceId, reason => Error} - ), - ?SLOG(error, #{ - msg => "failed_to_start_gcp_pubsub_jwt_worker", - instance_id => InstanceId, - pubsub_topic => PubSubTopic, + ?tp(error, "gcp_pubsub_bridge_jwt_worker_failed_to_start", #{ + connector => ResourceId, reason => Error }), _ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId), @@ -297,30 +279,18 @@ ensure_jwt_worker(InstanceId, #{ %% produced by the worker. receive {Ref, token_created} -> - ?tp(gcp_pubsub_bridge_jwt_created, #{resource_id => InstanceId}), + ?tp(gcp_pubsub_bridge_jwt_created, #{resource_id => ResourceId}), demonitor(MRef, [flush]), ok; {'DOWN', MRef, process, Worker, Reason} -> - ?tp( - gcp_pubsub_bridge_jwt_worker_failed_to_start, - #{ - resource_id => InstanceId, - reason => Reason - } - ), - ?SLOG(error, #{ - msg => "gcp_pubsub_bridge_jwt_worker_failed_to_start", - connector => InstanceId, + ?tp(error, "gcp_pubsub_bridge_jwt_worker_failed_to_start", #{ + connector => ResourceId, reason => Reason }), _ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId), throw(failed_to_start_jwt_worker) after 10_000 -> - ?tp(gcp_pubsub_bridge_jwt_timeout, #{resource_id => InstanceId}), - ?SLOG(warning, #{ - msg => "gcp_pubsub_bridge_jwt_timeout", - connector => InstanceId - }), + ?tp(warning, "gcp_pubsub_bridge_jwt_timeout", #{connector => ResourceId}), demonitor(MRef, [flush]), _ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId), throw(timeout_creating_jwt) @@ -353,8 +323,8 @@ publish_path( <<"/v1/projects/", ProjectId/binary, "/topics/", PubSubTopic/binary, ":publish">>. -spec get_jwt_authorization_header(resource_id()) -> [{binary(), binary()}]. -get_jwt_authorization_header(InstanceId) -> - case emqx_connector_jwt:lookup_jwt(?JWT_TABLE, InstanceId) of +get_jwt_authorization_header(ResourceId) -> + case emqx_connector_jwt:lookup_jwt(?JWT_TABLE, ResourceId) of %% Since we synchronize the JWT creation during resource start %% (see `on_start/2'), this will be always be populated. {ok, JWT} -> @@ -373,7 +343,6 @@ get_jwt_authorization_header(InstanceId) -> do_send_requests_sync(State, Requests, ResourceId) -> #{ pool_name := PoolName, - instance_id := InstanceId, max_retries := MaxRetries, request_timeout := RequestTimeout } = State, @@ -381,12 +350,11 @@ do_send_requests_sync(State, Requests, ResourceId) -> gcp_pubsub_bridge_do_send_requests, #{ query_mode => sync, - instance_id => InstanceId, resource_id => ResourceId, requests => Requests } ), - Headers = get_jwt_authorization_header(InstanceId), + Headers = get_jwt_authorization_header(ResourceId), Payloads = lists:map( fun({send_message, Selected}) -> @@ -499,19 +467,17 @@ do_send_requests_sync(State, Requests, ResourceId) -> do_send_requests_async(State, Requests, ReplyFunAndArgs, ResourceId) -> #{ pool_name := PoolName, - instance_id := InstanceId, request_timeout := RequestTimeout } = State, ?tp( gcp_pubsub_bridge_do_send_requests, #{ query_mode => async, - instance_id => InstanceId, resource_id => ResourceId, requests => Requests } ), - Headers = get_jwt_authorization_header(InstanceId), + Headers = get_jwt_authorization_header(ResourceId), Payloads = lists:map( fun({send_message, Selected}) -> @@ -569,9 +535,9 @@ reply_delegator(_ResourceId, ReplyFunAndArgs, Result) -> emqx_resource:apply_reply_fun(ReplyFunAndArgs, Result) end. --spec do_get_status(manager_id(), atom(), timer:time()) -> boolean(). -do_get_status(InstanceId, PoolName, Timeout) -> - Workers = [Worker || {_WorkerName, Worker} <- ehttpc:workers(PoolName)], +-spec do_get_status(resource_id(), timer:time()) -> boolean(). +do_get_status(ResourceId, Timeout) -> + Workers = [Worker || {_WorkerName, Worker} <- ehttpc:workers(ResourceId)], DoPerWorker = fun(Worker) -> case ehttpc:health_check(Worker, Timeout) of @@ -580,7 +546,7 @@ do_get_status(InstanceId, PoolName, Timeout) -> {error, Reason} -> ?SLOG(error, #{ msg => "ehttpc_health_check_failed", - instance_id => InstanceId, + connector => ResourceId, reason => Reason, worker => Worker }), diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_gcp_pubsub_SUITE.erl b/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_SUITE.erl similarity index 98% rename from lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_gcp_pubsub_SUITE.erl rename to apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_SUITE.erl index a785924d4..55527bf1f 100644 --- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_gcp_pubsub_SUITE.erl +++ b/apps/emqx_bridge_gcp_pubsub/test/emqx_bridge_gcp_pubsub_SUITE.erl @@ -2,7 +2,7 @@ %% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_ee_bridge_gcp_pubsub_SUITE). +-module(emqx_bridge_gcp_pubsub_SUITE). -compile(nowarn_export_all). -compile(export_all). @@ -70,22 +70,13 @@ init_per_suite(Config) -> ok = emqx_connector_test_helpers:start_apps([emqx_resource, emqx_bridge, emqx_rule_engine]), {ok, _} = application:ensure_all_started(emqx_connector), emqx_mgmt_api_test_util:init_suite(), - HTTPHost = "localhost", - HTTPPort = 56000, - HostPort = HTTPHost ++ ":" ++ integer_to_list(HTTPPort), - true = os:putenv("PUBSUB_EMULATOR_HOST", HostPort), - [ - {http_host, HTTPHost}, - {http_port, HTTPPort} - | Config - ]. + Config. end_per_suite(_Config) -> emqx_mgmt_api_test_util:end_suite(), ok = emqx_common_test_helpers:stop_apps([emqx_conf]), ok = emqx_connector_test_helpers:stop_apps([emqx_bridge, emqx_resource, emqx_rule_engine]), _ = application:stop(emqx_connector), - os:unsetenv("PUBSUB_EMULATOR_HOST"), ok. init_per_group(sync_query, Config) -> @@ -113,26 +104,26 @@ init_per_testcase(TestCase, Config0) when 1 -> [{skip_due_to_no_batching, true}]; _ -> - {ok, _} = start_echo_http_server(), delete_all_bridges(), Tid = install_telemetry_handler(TestCase), Config = generate_config(Config0), put(telemetry_table, Tid), - [{telemetry_table, Tid} | Config] + {ok, HttpServer} = start_echo_http_server(), + [{telemetry_table, Tid}, {http_server, HttpServer} | Config] end; init_per_testcase(TestCase, Config0) -> ct:timetrap({seconds, 30}), - {ok, _} = start_echo_http_server(), + {ok, HttpServer} = start_echo_http_server(), delete_all_bridges(), Tid = install_telemetry_handler(TestCase), Config = generate_config(Config0), put(telemetry_table, Tid), - [{telemetry_table, Tid} | Config]. + [{telemetry_table, Tid}, {http_server, HttpServer} | Config]. end_per_testcase(_TestCase, _Config) -> ok = snabbkaffe:stop(), delete_all_bridges(), - ok = emqx_connector_web_hook_server:stop(), + ok = stop_echo_http_server(), emqx_common_test_helpers:call_janitor(), ok. @@ -242,7 +233,6 @@ success_http_handler() -> start_echo_http_server() -> HTTPHost = "localhost", - HTTPPort = 56000, HTTPPath = <<"/v1/projects/myproject/topics/mytopic:publish">>, ServerSSLOpts = [ @@ -250,14 +240,23 @@ start_echo_http_server() -> {versions, ['tlsv1.2', 'tlsv1.3']}, {ciphers, ["ECDHE-RSA-AES256-GCM-SHA384", "TLS_CHACHA20_POLY1305_SHA256"]} ] ++ certs(), - {ok, _} = emqx_connector_web_hook_server:start_link(HTTPPort, HTTPPath, ServerSSLOpts), + {ok, {HTTPPort, _Pid}} = emqx_connector_web_hook_server:start_link( + random, HTTPPath, ServerSSLOpts + ), ok = emqx_connector_web_hook_server:set_handler(success_http_handler()), + HTTPHost = "localhost", + HostPort = HTTPHost ++ ":" ++ integer_to_list(HTTPPort), + true = os:putenv("PUBSUB_EMULATOR_HOST", HostPort), {ok, #{ - host_port => HTTPHost ++ ":" ++ integer_to_list(HTTPPort), + host_port => HostPort, host => HTTPHost, port => HTTPPort }}. +stop_echo_http_server() -> + os:unsetenv("PUBSUB_EMULATOR_HOST"), + ok = emqx_connector_web_hook_server:stop(). + certs() -> CertsPath = emqx_common_test_helpers:deps_path(emqx, "etc/certs"), [ @@ -917,7 +916,7 @@ t_invalid_private_key(Config) -> #{<<"private_key">> => InvalidPrivateKeyPEM} } ), - #{?snk_kind := gcp_pubsub_bridge_jwt_worker_failed_to_start}, + #{?snk_kind := "gcp_pubsub_bridge_jwt_worker_failed_to_start"}, 20_000 ), Res @@ -928,7 +927,7 @@ t_invalid_private_key(Config) -> [#{reason := Reason}] when Reason =:= noproc orelse Reason =:= {shutdown, {error, empty_key}}, - ?of_kind(gcp_pubsub_bridge_jwt_worker_failed_to_start, Trace) + ?of_kind("gcp_pubsub_bridge_jwt_worker_failed_to_start", Trace) ), ?assertMatch( [#{error := empty_key}], @@ -956,14 +955,14 @@ t_jwt_worker_start_timeout(Config) -> #{<<"private_key">> => InvalidPrivateKeyPEM} } ), - #{?snk_kind := gcp_pubsub_bridge_jwt_timeout}, + #{?snk_kind := "gcp_pubsub_bridge_jwt_timeout"}, 20_000 ), Res end, fun(Res, Trace) -> ?assertMatch({ok, _}, Res), - ?assertMatch([_], ?of_kind(gcp_pubsub_bridge_jwt_timeout, Trace)), + ?assertMatch([_], ?of_kind("gcp_pubsub_bridge_jwt_timeout", Trace)), ok end ), @@ -1329,7 +1328,7 @@ t_failed_to_start_jwt_worker(Config) -> fun(Trace) -> ?assertMatch( [#{reason := {error, restarting}}], - ?of_kind(gcp_pubsub_bridge_jwt_worker_failed_to_start, Trace) + ?of_kind("gcp_pubsub_bridge_jwt_worker_failed_to_start", Trace) ), ok end diff --git a/apps/emqx_bridge_hstreamdb/BSL.txt b/apps/emqx_bridge_hstreamdb/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_hstreamdb/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_hstreamdb/README.md b/apps/emqx_bridge_hstreamdb/README.md new file mode 100644 index 000000000..520817e82 --- /dev/null +++ b/apps/emqx_bridge_hstreamdb/README.md @@ -0,0 +1,40 @@ +# EMQX HStreamDB Bridge + +[HStreamDB](https://hstream.io/) is streaming database purpose-built to ingest, +store, process, and analyze massive data streams. It is a modern data infrastructure +that unifies messaging, stream processing, and storage to help get value out of +your data in real-time. + +The application is used to connect EMQX and HStreamDB. +User can create a rule and easily ingest IoT data into HStreamDB by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_hstreamdb/src/emqx_bridge_hstreamdb.app.src b/apps/emqx_bridge_hstreamdb/src/emqx_bridge_hstreamdb.app.src new file mode 100644 index 000000000..1cb3742b3 --- /dev/null +++ b/apps/emqx_bridge_hstreamdb/src/emqx_bridge_hstreamdb.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_hstreamdb, [ + {description, "EMQX Enterprise HStreamDB Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_influxdb/BSL.txt b/apps/emqx_bridge_influxdb/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_influxdb/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_influxdb/README.md b/apps/emqx_bridge_influxdb/README.md new file mode 100644 index 000000000..fe0f14600 --- /dev/null +++ b/apps/emqx_bridge_influxdb/README.md @@ -0,0 +1,49 @@ +# EMQX InfluxDB Bridge + +[InfluxDB](https://github.com/influxdata/influxdb) is an open-source time-series +database that is optimized for storing, retrieving, and querying large volumes of +time-stamped data. +It is commonly used for monitoring and analysis of metrics, events, and real-time +analytics. +InfluxDB is designed to be fast, efficient, and scalable, and it has a SQL-like +query language that makes it easy to extract insights from time-series data. + +The application is used to connect EMQX and InfluxDB. User can create a rule and +easily ingest IoT data into InfluxDB by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documentation + +- Refer to [Ingest data into InfluxDB](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-influxdb.html) + for how to use EMQX dashboard to ingest IoT data into InfluxDB. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information. + +- [Create bridge API doc](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges/paths/~1bridges/post) + list required parameters for creating a InfluxDB bridge. + There are two types of InfluxDB API (`v1` and `v2`), please select the right + version of InfluxDB. Below are several important parameters for `v1`, + - `server`: The IPv4 or IPv6 address or the hostname to connect to. + - `database`: InfluxDB database name + - `write_syntax`: Conf of InfluxDB line protocol to write data points. It is a text-based format that provides the measurement, tag set, field set, and timestamp of a data point, and placeholder supported. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). + diff --git a/apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb.app.src b/apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb.app.src new file mode 100644 index 000000000..5443417c3 --- /dev/null +++ b/apps/emqx_bridge_influxdb/src/emqx_bridge_influxdb.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_influxdb, [ + {description, "EMQX Enterprise InfluxDB Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_iotdb/.gitignore b/apps/emqx_bridge_iotdb/.gitignore new file mode 100644 index 000000000..e9bc1c544 --- /dev/null +++ b/apps/emqx_bridge_iotdb/.gitignore @@ -0,0 +1,19 @@ +.rebar3 + _* + .eunit + *.o + *.beam + *.plt + *.swp + *.swo + .erlang.cookie + ebin + log + erl_crash.dump + .rebar + logs + _build + .idea + *.iml + rebar3.crashdump + *~ diff --git a/apps/emqx_bridge_iotdb/BSL.txt b/apps/emqx_bridge_iotdb/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_iotdb/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_iotdb/README.md b/apps/emqx_bridge_iotdb/README.md new file mode 100644 index 000000000..48f5d74c2 --- /dev/null +++ b/apps/emqx_bridge_iotdb/README.md @@ -0,0 +1,26 @@ +# Apache IoTDB Data Integration Bridge + +This application houses the IoTDB data integration bridge for EMQX Enterprise + Edition. It provides the means to connect to IoTDB and publish messages to it. + +It implements the connection management and interaction without need for a + separate connector app, since it's not used by authentication and authorization + applications. + +# Documentation links + +For more information on Apache IoTDB, please see its [official + site](https://iotdb.apache.org/). + +# Configurations + +Please see [our official + documentation](https://www.emqx.io/docs/en/v5.0/data-integration/data-bridge-iotdb.html) + for more detailed info. + +# Contributing - [Mandatory] +Please see our [contributing.md](../../CONTRIBUTING.md). + +# License + +See [BSL](./BSL.txt). diff --git a/apps/emqx_bridge_iotdb/docker-ct b/apps/emqx_bridge_iotdb/docker-ct new file mode 100644 index 000000000..8a8973a88 --- /dev/null +++ b/apps/emqx_bridge_iotdb/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +iotdb diff --git a/apps/emqx_bridge_iotdb/etc/emqx_bridge_iotdb.conf b/apps/emqx_bridge_iotdb/etc/emqx_bridge_iotdb.conf new file mode 100644 index 000000000..e69de29bb diff --git a/apps/emqx_bridge_iotdb/include/emqx_bridge_iotdb.hrl b/apps/emqx_bridge_iotdb/include/emqx_bridge_iotdb.hrl new file mode 100644 index 000000000..5e6bf9ac5 --- /dev/null +++ b/apps/emqx_bridge_iotdb/include/emqx_bridge_iotdb.hrl @@ -0,0 +1,11 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-ifndef(EMQX_BRIDGE_IOTDB_HRL). +-define(EMQX_BRIDGE_IOTDB_HRL, true). + +-define(VSN_1_0_X, 'v1.0.x'). +-define(VSN_0_13_X, 'v0.13.x'). + +-endif. diff --git a/apps/emqx_bridge_iotdb/rebar.config b/apps/emqx_bridge_iotdb/rebar.config new file mode 100644 index 000000000..a4afd2877 --- /dev/null +++ b/apps/emqx_bridge_iotdb/rebar.config @@ -0,0 +1,14 @@ +%% -*- mode: erlang -*- + +{erl_opts, [ + debug_info +]}. + +{deps, [ + {emqx, {path, "../../apps/emqx"}}, + {emqx_connector, {path, "../../apps/emqx_connector"}}, + {emqx_resource, {path, "../../apps/emqx_resource"}}, + {emqx_bridge, {path, "../../apps/emqx_bridge"}} +]}. +{plugins, [rebar3_path_deps]}. +{project_plugins, [erlfmt]}. diff --git a/apps/emqx_bridge_iotdb/src/emqx_bridge_iotdb.app.src b/apps/emqx_bridge_iotdb/src/emqx_bridge_iotdb.app.src new file mode 100644 index 000000000..9c5108307 --- /dev/null +++ b/apps/emqx_bridge_iotdb/src/emqx_bridge_iotdb.app.src @@ -0,0 +1,22 @@ +%% -*- mode: erlang -*- +{application, emqx_bridge_iotdb, [ + {description, "EMQX Enterprise Apache IoTDB Bridge"}, + {vsn, "0.1.0"}, + {modules, [ + emqx_bridge_iotdb, + emqx_bridge_iotdb_impl + ]}, + {registered, []}, + {applications, [ + kernel, + stdlib, + emqx_connector + ]}, + {env, []}, + {licenses, ["Business Source License 1.1"]}, + {maintainers, ["EMQX Team "]}, + {links, [ + {"Homepage", "https://emqx.io/"}, + {"Github", "https://github.com/emqx/emqx"} + ]} +]}. diff --git a/apps/emqx_bridge_iotdb/src/emqx_bridge_iotdb.erl b/apps/emqx_bridge_iotdb/src/emqx_bridge_iotdb.erl new file mode 100644 index 000000000..e0312bb02 --- /dev/null +++ b/apps/emqx_bridge_iotdb/src/emqx_bridge_iotdb.erl @@ -0,0 +1,232 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_iotdb). + +-include("emqx_bridge_iotdb.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +%% hocon_schema API +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +%% emqx_ee_bridge "unofficial" API +-export([conn_bridge_examples/1]). + +%%------------------------------------------------------------------------------------------------- +%% `hocon_schema' API +%%------------------------------------------------------------------------------------------------- + +namespace() -> "bridge_iotdb". + +roots() -> []. + +fields("config") -> + basic_config() ++ request_config(); +fields("post") -> + [ + type_field(), + name_field() + ] ++ fields("config"); +fields("put") -> + fields("config"); +fields("get") -> + emqx_bridge_schema:status_fields() ++ fields("post"); +fields("creation_opts") -> + lists:filter( + fun({K, _V}) -> + not lists:member(K, unsupported_opts()) + end, + emqx_resource_schema:fields("creation_opts") + ); +fields(auth_basic) -> + [ + {username, mk(binary(), #{required => true, desc => ?DESC("config_auth_basic_username")})}, + {password, + mk(binary(), #{ + required => true, + desc => ?DESC("config_auth_basic_password"), + sensitive => true, + converter => fun emqx_schema:password_converter/2 + })} + ]. + +desc("config") -> + ?DESC("desc_config"); +desc("creation_opts") -> + ?DESC(emqx_resource_schema, "creation_opts"); +desc("post") -> + ["Configuration for IoTDB using `POST` method."]; +desc(Name) -> + lists:member(Name, struct_names()) orelse throw({missing_desc, Name}), + ?DESC(Name). + +struct_names() -> + [ + auth_basic + ]. + +basic_config() -> + [ + {enable, + mk( + boolean(), + #{ + desc => ?DESC("config_enable"), + default => true + } + )}, + {authentication, + mk( + hoconsc:union([ref(?MODULE, auth_basic)]), + #{ + default => auth_basic, desc => ?DESC("config_authentication") + } + )}, + {is_aligned, + mk( + boolean(), + #{ + desc => ?DESC("config_is_aligned"), + default => false + } + )}, + {device_id, + mk( + binary(), + #{ + desc => ?DESC("config_device_id") + } + )}, + {iotdb_version, + mk( + hoconsc:enum([?VSN_1_0_X, ?VSN_0_13_X]), + #{ + desc => ?DESC("config_iotdb_version"), + default => ?VSN_1_0_X + } + )} + ] ++ resource_creation_opts() ++ + proplists_without( + [max_retries, base_url, request], + emqx_connector_http:fields(config) + ). + +proplists_without(Keys, List) -> + [El || El = {K, _} <- List, not lists:member(K, Keys)]. + +request_config() -> + [ + {base_url, + mk( + emqx_schema:url(), + #{ + desc => ?DESC("config_base_url") + } + )}, + {max_retries, + mk( + non_neg_integer(), + #{ + default => 2, + desc => ?DESC("config_max_retries") + } + )}, + {request_timeout, + mk( + emqx_schema:duration_ms(), + #{ + default => <<"15s">>, + desc => ?DESC("config_request_timeout") + } + )} + ]. + +resource_creation_opts() -> + [ + {resource_opts, + mk( + ref(?MODULE, "creation_opts"), + #{ + required => false, + default => #{}, + desc => ?DESC(emqx_resource_schema, <<"resource_opts">>) + } + )} + ]. + +unsupported_opts() -> + [ + batch_size, + batch_time + ]. + +%%====================================================================================== + +type_field() -> + {type, + mk( + hoconsc:enum([iotdb]), + #{ + required => true, + desc => ?DESC("desc_type") + } + )}. + +name_field() -> + {name, + mk( + binary(), + #{ + required => true, + desc => ?DESC("desc_name") + } + )}. + +%%====================================================================================== + +conn_bridge_examples(Method) -> + [ + #{ + <<"iotdb">> => + #{ + summary => <<"Apache IoTDB Bridge">>, + value => conn_bridge_example(Method, iotdb) + } + } + ]. + +conn_bridge_example(_Method, Type) -> + #{ + name => <<"My IoTDB Bridge">>, + type => Type, + enable => true, + authentication => #{ + <<"username">> => <<"root">>, + <<"password">> => <<"*****">> + }, + is_aligned => false, + device_id => <<"my_device">>, + base_url => <<"http://iotdb.local:18080/">>, + iotdb_version => ?VSN_1_0_X, + connect_timeout => <<"15s">>, + pool_type => <<"random">>, + pool_size => 8, + enable_pipelining => 100, + ssl => #{enable => false}, + resource_opts => #{ + worker_pool_size => 8, + health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, + auto_restart_interval => ?AUTO_RESTART_INTERVAL_RAW, + query_mode => async, + max_buffer_bytes => ?DEFAULT_BUFFER_BYTES + } + }. diff --git a/apps/emqx_bridge_iotdb/src/emqx_bridge_iotdb_impl.erl b/apps/emqx_bridge_iotdb/src/emqx_bridge_iotdb_impl.erl new file mode 100644 index 000000000..2f8794560 --- /dev/null +++ b/apps/emqx_bridge_iotdb/src/emqx_bridge_iotdb_impl.erl @@ -0,0 +1,382 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_iotdb_impl). + +-include("emqx_bridge_iotdb.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +%% `emqx_resource' API +-export([ + callback_mode/0, + on_start/2, + on_stop/2, + on_get_status/2, + on_query/3, + on_query_async/4 +]). + +-type config() :: + #{ + base_url := #{ + scheme := http | https, + host := iolist(), + port := inet:port_number(), + path := '_' + }, + connect_timeout := pos_integer(), + pool_type := random | hash, + pool_size := pos_integer(), + request := undefined | map(), + is_aligned := boolean(), + iotdb_version := binary(), + device_id := binary() | undefined, + atom() => '_' + }. + +-type state() :: + #{ + base_path := '_', + base_url := #{ + scheme := http | https, + host := iolist(), + port := inet:port_number(), + path := '_' + }, + connect_timeout := pos_integer(), + pool_type := random | hash, + pool_size := pos_integer(), + request := undefined | map(), + is_aligned := boolean(), + iotdb_version := binary(), + device_id := binary() | undefined, + atom() => '_' + }. + +-type manager_id() :: binary(). + +%%------------------------------------------------------------------------------------- +%% `emqx_resource' API +%%------------------------------------------------------------------------------------- +callback_mode() -> async_if_possible. + +-spec on_start(manager_id(), config()) -> {ok, state()} | no_return(). +on_start(InstanceId, Config) -> + %% [FIXME] The configuration passed in here is pre-processed and transformed + %% in emqx_bridge_resource:parse_confs/2. + case emqx_connector_http:on_start(InstanceId, Config) of + {ok, State} -> + ?SLOG(info, #{ + msg => "iotdb_bridge_started", + instance_id => InstanceId, + request => maps:get(request, State, <<>>) + }), + ?tp(iotdb_bridge_started, #{}), + {ok, maps:merge(Config, State)}; + {error, Reason} -> + ?SLOG(error, #{ + msg => "failed_to_start_iotdb_bridge", + instance_id => InstanceId, + base_url => maps:get(request, Config, <<>>), + reason => Reason + }), + throw(failed_to_start_iotdb_bridge) + end. + +-spec on_stop(manager_id(), state()) -> ok | {error, term()}. +on_stop(InstanceId, State) -> + ?SLOG(info, #{ + msg => "stopping_iotdb_bridge", + connector => InstanceId + }), + Res = emqx_connector_http:on_stop(InstanceId, State), + ?tp(iotdb_bridge_stopped, #{instance_id => InstanceId}), + Res. + +-spec on_get_status(manager_id(), state()) -> + {connected, state()} | {disconnected, state(), term()}. +on_get_status(InstanceId, State) -> + emqx_connector_http:on_get_status(InstanceId, State). + +-spec on_query(manager_id(), {send_message, map()}, state()) -> + {ok, pos_integer(), [term()], term()} + | {ok, pos_integer(), [term()]} + | {error, term()}. +on_query(InstanceId, {send_message, Message}, State) -> + ?SLOG(debug, #{ + msg => "iotdb_bridge_on_query_called", + instance_id => InstanceId, + send_message => Message, + state => emqx_utils:redact(State) + }), + IoTDBPayload = make_iotdb_insert_request(Message, State), + handle_response( + emqx_connector_http:on_query( + InstanceId, {send_message, IoTDBPayload}, State + ) + ). + +-spec on_query_async(manager_id(), {send_message, map()}, {function(), [term()]}, state()) -> + {ok, pid()}. +on_query_async(InstanceId, {send_message, Message}, ReplyFunAndArgs0, State) -> + ?SLOG(debug, #{ + msg => "iotdb_bridge_on_query_async_called", + instance_id => InstanceId, + send_message => Message, + state => emqx_utils:redact(State) + }), + IoTDBPayload = make_iotdb_insert_request(Message, State), + ReplyFunAndArgs = + { + fun(Result) -> + Response = handle_response(Result), + emqx_resource:apply_reply_fun(ReplyFunAndArgs0, Response) + end, + [] + }, + emqx_connector_http:on_query_async( + InstanceId, {send_message, IoTDBPayload}, ReplyFunAndArgs, State + ). + +%%-------------------------------------------------------------------- +%% Internal Functions +%%-------------------------------------------------------------------- + +preproc_data(DataList) -> + lists:map( + fun( + #{ + measurement := Measurement, + data_type := DataType, + value := Value + } = Data + ) -> + #{ + timestamp => emqx_plugin_libs_rule:preproc_tmpl( + maps:get(<<"timestamp">>, Data, <<"now">>) + ), + measurement => emqx_plugin_libs_rule:preproc_tmpl(Measurement), + data_type => DataType, + value => emqx_plugin_libs_rule:preproc_tmpl(Value) + } + end, + DataList + ). + +proc_data(PreProcessedData, Msg) -> + NowNS = erlang:system_time(nanosecond), + Nows = #{ + now_ms => erlang:convert_time_unit(NowNS, nanosecond, millisecond), + now_us => erlang:convert_time_unit(NowNS, nanosecond, microsecond), + now_ns => NowNS + }, + lists:map( + fun( + #{ + timestamp := TimestampTkn, + measurement := Measurement, + data_type := DataType, + value := ValueTkn + } + ) -> + #{ + timestamp => iot_timestamp( + emqx_plugin_libs_rule:proc_tmpl(TimestampTkn, Msg), Nows + ), + measurement => emqx_plugin_libs_rule:proc_tmpl(Measurement, Msg), + data_type => DataType, + value => proc_value(DataType, ValueTkn, Msg) + } + end, + PreProcessedData + ). + +iot_timestamp(Timestamp, #{now_ms := NowMs}) when + Timestamp =:= <<"now">>; Timestamp =:= <<"now_ms">>; Timestamp =:= <<>> +-> + NowMs; +iot_timestamp(Timestamp, #{now_us := NowUs}) when Timestamp =:= <<"now_us">> -> + NowUs; +iot_timestamp(Timestamp, #{now_ns := NowNs}) when Timestamp =:= <<"now_ns">> -> + NowNs; +iot_timestamp(Timestamp, _) when is_binary(Timestamp) -> + binary_to_integer(Timestamp). + +proc_value(<<"TEXT">>, ValueTkn, Msg) -> + case emqx_plugin_libs_rule:proc_tmpl(ValueTkn, Msg) of + <<"undefined">> -> null; + Val -> Val + end; +proc_value(<<"BOOLEAN">>, ValueTkn, Msg) -> + convert_bool(replace_var(ValueTkn, Msg)); +proc_value(Int, ValueTkn, Msg) when Int =:= <<"INT32">>; Int =:= <<"INT64">> -> + convert_int(replace_var(ValueTkn, Msg)); +proc_value(Int, ValueTkn, Msg) when Int =:= <<"FLOAT">>; Int =:= <<"DOUBLE">> -> + convert_float(replace_var(ValueTkn, Msg)). + +replace_var(Tokens, Data) when is_list(Tokens) -> + [Val] = emqx_plugin_libs_rule:proc_tmpl(Tokens, Data, #{return => rawlist}), + Val; +replace_var(Val, _Data) -> + Val. + +convert_bool(B) when is_boolean(B) -> B; +convert_bool(1) -> true; +convert_bool(0) -> false; +convert_bool(<<"1">>) -> true; +convert_bool(<<"0">>) -> false; +convert_bool(<<"true">>) -> true; +convert_bool(<<"True">>) -> true; +convert_bool(<<"TRUE">>) -> true; +convert_bool(<<"false">>) -> false; +convert_bool(<<"False">>) -> false; +convert_bool(<<"FALSE">>) -> false; +convert_bool(undefined) -> null. + +convert_int(Int) when is_integer(Int) -> Int; +convert_int(Float) when is_float(Float) -> floor(Float); +convert_int(Str) when is_binary(Str) -> + try + binary_to_integer(Str) + catch + _:_ -> + convert_int(binary_to_float(Str)) + end; +convert_int(undefined) -> + null. + +convert_float(Float) when is_float(Float) -> Float; +convert_float(Int) when is_integer(Int) -> Int * 10 / 10; +convert_float(Str) when is_binary(Str) -> + try + binary_to_float(Str) + catch + _:_ -> + convert_float(binary_to_integer(Str)) + end; +convert_float(undefined) -> + null. + +make_iotdb_insert_request(Message, State) -> + IsAligned = maps:get(is_aligned, State, false), + DeviceId = device_id(Message, State), + IotDBVsn = maps:get(iotdb_version, State, ?VSN_1_0_X), + Payload = make_list(maps:get(payload, Message)), + PreProcessedData = preproc_data(Payload), + DataList = proc_data(PreProcessedData, Message), + InitAcc = #{timestamps => [], measurements => [], dtypes => [], values => []}, + Rows = replace_dtypes(aggregate_rows(DataList, InitAcc), IotDBVsn), + maps:merge(Rows, #{ + iotdb_field_key(is_aligned, IotDBVsn) => IsAligned, + iotdb_field_key(device_id, IotDBVsn) => DeviceId + }). + +replace_dtypes(Rows, IotDBVsn) -> + {Types, Map} = maps:take(dtypes, Rows), + Map#{iotdb_field_key(data_types, IotDBVsn) => Types}. + +aggregate_rows(DataList, InitAcc) -> + lists:foldr( + fun( + #{ + timestamp := Timestamp, + measurement := Measurement, + data_type := DataType, + value := Data + }, + #{ + timestamps := AccTs, + measurements := AccM, + dtypes := AccDt, + values := AccV + } = Acc + ) -> + Timestamps = [Timestamp | AccTs], + case index_of(Measurement, AccM) of + 0 -> + Acc#{ + timestamps => Timestamps, + values => [pad_value(Data, length(AccTs)) | pad_existing_values(AccV)], + measurements => [Measurement | AccM], + dtypes => [DataType | AccDt] + }; + Index -> + Acc#{ + timestamps => Timestamps, + values => insert_value(Index, Data, AccV), + measurements => AccM, + dtypes => AccDt + } + end + end, + InitAcc, + DataList + ). + +pad_value(Data, N) -> + [Data | lists:duplicate(N, null)]. + +pad_existing_values(Values) -> + [[null | Value] || Value <- Values]. + +index_of(E, List) -> + string:str(List, [E]). + +insert_value(_Index, _Data, []) -> + []; +insert_value(1, Data, [Value | Values]) -> + [[Data | Value] | insert_value(0, Data, Values)]; +insert_value(Index, Data, [Value | Values]) -> + [[null | Value] | insert_value(Index - 1, Data, Values)]. + +iotdb_field_key(is_aligned, ?VSN_1_0_X) -> + <<"is_aligned">>; +iotdb_field_key(is_aligned, ?VSN_0_13_X) -> + <<"isAligned">>; +iotdb_field_key(device_id, ?VSN_1_0_X) -> + <<"device">>; +iotdb_field_key(device_id, ?VSN_0_13_X) -> + <<"deviceId">>; +iotdb_field_key(data_types, ?VSN_1_0_X) -> + <<"data_types">>; +iotdb_field_key(data_types, ?VSN_0_13_X) -> + <<"dataTypes">>. + +make_list(List) when is_list(List) -> List; +make_list(Data) -> [Data]. + +device_id(Message, State) -> + case maps:get(device_id, State, undefined) of + undefined -> + case maps:get(payload, Message) of + #{device_id := DeviceId} -> + DeviceId; + _NotFound -> + Topic = maps:get(topic, Message), + case re:replace(Topic, "/", ".", [global, {return, binary}]) of + <<"root.", _/binary>> = Device -> Device; + Device -> <<"root.", Device/binary>> + end + end; + DeviceId -> + DeviceIdTkn = emqx_plugin_libs_rule:preproc_tmpl(DeviceId), + emqx_plugin_libs_rule:proc_tmpl(DeviceIdTkn, Message) + end. + +handle_response({ok, 200, _Headers, Body} = Resp) -> + eval_response_body(Body, Resp); +handle_response({ok, 200, Body} = Resp) -> + eval_response_body(Body, Resp); +handle_response({ok, Code, _Headers, Body}) -> + {error, #{code => Code, body => Body}}; +handle_response({ok, Code, Body}) -> + {error, #{code => Code, body => Body}}; +handle_response({error, _} = Error) -> + Error. + +eval_response_body(Body, Resp) -> + case emqx_utils_json:decode(Body) of + #{<<"code">> := 200} -> Resp; + Reason -> {error, Reason} + end. diff --git a/apps/emqx_bridge_iotdb/test/emqx_bridge_iotdb_impl_SUITE.erl b/apps/emqx_bridge_iotdb/test/emqx_bridge_iotdb_impl_SUITE.erl new file mode 100644 index 000000000..434587cf0 --- /dev/null +++ b/apps/emqx_bridge_iotdb/test/emqx_bridge_iotdb_impl_SUITE.erl @@ -0,0 +1,229 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_iotdb_impl_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). + +-define(BRIDGE_TYPE_BIN, <<"iotdb">>). +-define(APPS, [emqx_bridge, emqx_resource, emqx_rule_engine, emqx_bridge_iotdb]). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, plain} + ]. + +groups() -> + AllTCs = emqx_common_test_helpers:all(?MODULE), + [ + {plain, AllTCs} + ]. + +init_per_suite(Config) -> + emqx_bridge_testlib:init_per_suite(Config, ?APPS). + +end_per_suite(Config) -> + emqx_bridge_testlib:end_per_suite(Config). + +init_per_group(plain = Type, Config0) -> + Host = os:getenv("IOTDB_PLAIN_HOST", "toxiproxy.emqx.net"), + Port = list_to_integer(os:getenv("IOTDB_PLAIN_PORT", "18080")), + ProxyName = "iotdb", + case emqx_common_test_helpers:is_tcp_server_available(Host, Port) of + true -> + Config = emqx_bridge_testlib:init_per_group(Type, ?BRIDGE_TYPE_BIN, Config0), + [ + {bridge_host, Host}, + {bridge_port, Port}, + {proxy_name, ProxyName} + | Config + ]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_iotdb); + _ -> + {skip, no_iotdb} + end + end; +init_per_group(_Group, Config) -> + Config. + +end_per_group(Group, Config) when + Group =:= plain +-> + emqx_bridge_testlib:end_per_group(Config), + ok; +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(TestCase, Config0) -> + Config = emqx_bridge_testlib:init_per_testcase(TestCase, Config0, fun bridge_config/3), + reset_service(Config), + Config. + +end_per_testcase(TestCase, Config) -> + emqx_bridge_testlib:end_per_testcase(TestCase, Config). + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +bridge_config(TestCase, _TestGroup, Config) -> + UniqueNum = integer_to_binary(erlang:unique_integer()), + Host = ?config(bridge_host, Config), + Port = ?config(bridge_port, Config), + Name = << + (atom_to_binary(TestCase))/binary, UniqueNum/binary + >>, + ServerURL = iolist_to_binary([ + "http://", + Host, + ":", + integer_to_binary(Port) + ]), + ConfigString = + io_lib:format( + "bridges.iotdb.~s {\n" + " enable = true\n" + " base_url = \"~s\"\n" + " authentication = {\n" + " username = \"root\"\n" + " password = \"root\"\n" + " }\n" + " pool_size = 1\n" + " resource_opts = {\n" + " auto_restart_interval = 5000\n" + " request_timeout = 30000\n" + " query_mode = \"async\"\n" + " worker_pool_size = 1\n" + " }\n" + "}\n", + [ + Name, + ServerURL + ] + ), + {Name, ConfigString, emqx_bridge_testlib:parse_and_check(Config, ConfigString, Name)}. + +reset_service(Config) -> + _BridgeConfig = + #{ + <<"base_url">> := BaseURL, + <<"authentication">> := #{ + <<"username">> := Username, + <<"password">> := Password + } + } = + ?config(bridge_config, Config), + ct:pal("bridge config: ~p", [_BridgeConfig]), + Path = <>, + BasicToken = base64:encode(<>), + Headers = [ + {"Content-type", "application/json"}, + {"Authorization", binary_to_list(BasicToken)} + ], + Device = iotdb_device(Config), + Body = #{sql => <<"delete from ", Device/binary, ".*">>}, + {ok, _} = emqx_mgmt_api_test_util:request_api(post, Path, "", Headers, Body, #{}). + +make_iotdb_payload(DeviceId) -> + make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "36"). + +make_iotdb_payload(DeviceId, Measurement, Type, Value) -> + #{ + measurement => Measurement, + data_type => Type, + value => Value, + device_id => DeviceId, + is_aligned => false + }. + +make_message_fun(Topic, Payload) -> + fun() -> + MsgId = erlang:unique_integer([positive]), + #{ + topic => Topic, + id => MsgId, + payload => Payload, + retain => true + } + end. + +iotdb_device(Config) -> + MQTTTopic = ?config(mqtt_topic, Config), + Device = re:replace(MQTTTopic, "/", ".dev", [global, {return, binary}]), + <<"root.", Device/binary>>. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_sync_query_simple(Config) -> + DeviceId = iotdb_device(Config), + Payload = make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "36"), + MakeMessageFun = make_message_fun(DeviceId, Payload), + IsSuccessCheck = + fun(Result) -> + ?assertEqual(ok, element(1, Result)) + end, + emqx_bridge_testlib:t_sync_query(Config, MakeMessageFun, IsSuccessCheck). + +t_async_query(Config) -> + DeviceId = iotdb_device(Config), + Payload = make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "36"), + MakeMessageFun = make_message_fun(DeviceId, Payload), + IsSuccessCheck = + fun(Result) -> + ?assertEqual(ok, element(1, Result)) + end, + emqx_bridge_testlib:t_async_query(Config, MakeMessageFun, IsSuccessCheck). + +t_sync_query_aggregated(Config) -> + DeviceId = iotdb_device(Config), + Payload = [ + make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "36"), + (make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "37"))#{timestamp => <<"mow_us">>}, + (make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "38"))#{timestamp => <<"mow_ns">>}, + make_iotdb_payload(DeviceId, "charged", <<"BOOLEAN">>, "1"), + make_iotdb_payload(DeviceId, "stoked", <<"BOOLEAN">>, "true"), + make_iotdb_payload(DeviceId, "enriched", <<"BOOLEAN">>, <<"TRUE">>), + make_iotdb_payload(DeviceId, "drained", <<"BOOLEAN">>, "0"), + make_iotdb_payload(DeviceId, "dazzled", <<"BOOLEAN">>, "false"), + make_iotdb_payload(DeviceId, "unplugged", <<"BOOLEAN">>, <<"FALSE">>), + make_iotdb_payload(DeviceId, "weight", <<"FLOAT">>, "87.3"), + make_iotdb_payload(DeviceId, "foo", <<"TEXT">>, <<"bar">>) + ], + MakeMessageFun = make_message_fun(DeviceId, Payload), + IsSuccessCheck = + fun(Result) -> + ?assertEqual(ok, element(1, Result)) + end, + emqx_bridge_testlib:t_sync_query(Config, MakeMessageFun, IsSuccessCheck). + +t_sync_query_fail(Config) -> + DeviceId = iotdb_device(Config), + Payload = make_iotdb_payload(DeviceId, "temp", <<"INT32">>, "Anton"), + MakeMessageFun = make_message_fun(DeviceId, Payload), + IsSuccessCheck = + fun(Result) -> + ?assertEqual(error, element(1, Result)) + end, + emqx_bridge_testlib:t_sync_query(Config, MakeMessageFun, IsSuccessCheck). + +t_create_via_http(Config) -> + emqx_bridge_testlib:t_create_via_http(Config). + +t_start_stop(Config) -> + emqx_bridge_testlib:t_start_stop(Config, iotdb_bridge_stopped). + +t_on_get_status(Config) -> + emqx_bridge_testlib:t_on_get_status(Config). diff --git a/apps/emqx_bridge_kafka/README.md b/apps/emqx_bridge_kafka/README.md index 72cbeecc6..07cae256b 100644 --- a/apps/emqx_bridge_kafka/README.md +++ b/apps/emqx_bridge_kafka/README.md @@ -10,10 +10,18 @@ workers from `emqx_resource`. It implements the connection management and interaction without need for a separate connector app, since it's not used by authentication and authorization applications. -## Contributing +# Documentation links + +For more information about Apache Kafka, please see its [official site](https://kafka.apache.org/). + +# Configurations + +Please see [Ingest data into Kafka](https://www.emqx.io/docs/en/v5.0/data-integration/data-bridge-kafka.html) for more detailed info. + +# Contributing Please see our [contributing.md](../../CONTRIBUTING.md). -## License +# License -See [BSL](./BSL.txt). +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_kafka/rebar.config b/apps/emqx_bridge_kafka/rebar.config index fd21fd15b..68a8d3e69 100644 --- a/apps/emqx_bridge_kafka/rebar.config +++ b/apps/emqx_bridge_kafka/rebar.config @@ -2,7 +2,7 @@ {erl_opts, [debug_info]}. {deps, [ {wolff, {git, "https://github.com/kafka4beam/wolff.git", {tag, "1.7.5"}}} , {kafka_protocol, {git, "https://github.com/kafka4beam/kafka_protocol.git", {tag, "4.1.2"}}} - , {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.0-rc1"}}} + , {brod_gssapi, {git, "https://github.com/kafka4beam/brod_gssapi.git", {tag, "v0.1.0"}}} , {brod, {git, "https://github.com/kafka4beam/brod.git", {tag, "3.16.8"}}} , {emqx_connector, {path, "../../apps/emqx_connector"}} , {emqx_resource, {path, "../../apps/emqx_resource"}} diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src index 81b522164..6c103f73b 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_kafka, [ {description, "EMQX Enterprise Kafka Bridge"}, - {vsn, "0.1.0"}, + {vsn, "0.1.2"}, {registered, [emqx_bridge_kafka_consumer_sup]}, {applications, [ kernel, diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_consumer.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_consumer.erl index fdfa3300c..f7958af81 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_consumer.erl +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_consumer.erl @@ -114,8 +114,8 @@ callback_mode() -> is_buffer_supported() -> true. --spec on_start(manager_id(), config()) -> {ok, state()}. -on_start(InstanceId, Config) -> +-spec on_start(resource_id(), config()) -> {ok, state()}. +on_start(ResourceId, Config) -> #{ authentication := Auth, bootstrap_hosts := BootstrapHosts0, @@ -133,7 +133,7 @@ on_start(InstanceId, Config) -> BootstrapHosts = emqx_bridge_kafka_impl:hosts(BootstrapHosts0), KafkaType = kafka_consumer, %% Note: this is distinct per node. - ClientID = make_client_id(InstanceId, KafkaType, BridgeName), + ClientID = make_client_id(ResourceId, KafkaType, BridgeName), ClientOpts0 = case Auth of none -> []; @@ -144,26 +144,26 @@ on_start(InstanceId, Config) -> ok -> ?tp( kafka_consumer_client_started, - #{client_id => ClientID, instance_id => InstanceId} + #{client_id => ClientID, resource_id => ResourceId} ), ?SLOG(info, #{ msg => "kafka_consumer_client_started", - instance_id => InstanceId, + resource_id => ResourceId, kafka_hosts => BootstrapHosts }); {error, Reason} -> ?SLOG(error, #{ msg => "failed_to_start_kafka_consumer_client", - instance_id => InstanceId, + resource_id => ResourceId, kafka_hosts => BootstrapHosts, reason => emqx_utils:redact(Reason) }), throw(?CLIENT_DOWN_MESSAGE) end, - start_consumer(Config, InstanceId, ClientID). + start_consumer(Config, ResourceId, ClientID). --spec on_stop(manager_id(), state()) -> ok. -on_stop(_InstanceID, State) -> +-spec on_stop(resource_id(), state()) -> ok. +on_stop(_ResourceID, State) -> #{ subscriber_id := SubscriberId, kafka_client_id := ClientID @@ -172,14 +172,19 @@ on_stop(_InstanceID, State) -> stop_client(ClientID), ok. --spec on_get_status(manager_id(), state()) -> connected | disconnected. -on_get_status(_InstanceID, State) -> +-spec on_get_status(resource_id(), state()) -> connected | disconnected. +on_get_status(_ResourceID, State) -> #{ subscriber_id := SubscriberId, kafka_client_id := ClientID, kafka_topics := KafkaTopics } = State, - do_get_status(State, ClientID, KafkaTopics, SubscriberId). + case do_get_status(ClientID, KafkaTopics, SubscriberId) of + {disconnected, Message} -> + {disconnected, State, Message}; + Res -> + Res + end. %%------------------------------------------------------------------------------------- %% `brod_group_subscriber' API @@ -266,8 +271,8 @@ ensure_consumer_supervisor_started() -> ok end. --spec start_consumer(config(), manager_id(), brod:client_id()) -> {ok, state()}. -start_consumer(Config, InstanceId, ClientID) -> +-spec start_consumer(config(), resource_id(), brod:client_id()) -> {ok, state()}. +start_consumer(Config, ResourceId, ClientID) -> #{ bootstrap_hosts := BootstrapHosts0, bridge_name := BridgeName, @@ -287,7 +292,7 @@ start_consumer(Config, InstanceId, ClientID) -> InitialState = #{ key_encoding_mode => KeyEncodingMode, hookpoint => Hookpoint, - resource_id => emqx_bridge_resource:resource_id(kafka_consumer, BridgeName), + resource_id => ResourceId, topic_mapping => TopicMapping, value_encoding_mode => ValueEncodingMode }, @@ -332,7 +337,7 @@ start_consumer(Config, InstanceId, ClientID) -> {ok, _ConsumerPid} -> ?tp( kafka_consumer_subscriber_started, - #{instance_id => InstanceId, subscriber_id => SubscriberId} + #{resource_id => ResourceId, subscriber_id => SubscriberId} ), {ok, #{ subscriber_id => SubscriberId, @@ -342,7 +347,7 @@ start_consumer(Config, InstanceId, ClientID) -> {error, Reason2} -> ?SLOG(error, #{ msg => "failed_to_start_kafka_consumer", - instance_id => InstanceId, + resource_id => ResourceId, kafka_hosts => emqx_bridge_kafka_impl:hosts(BootstrapHosts0), reason => emqx_utils:redact(Reason2) }), @@ -376,41 +381,41 @@ stop_client(ClientID) -> ), ok. -do_get_status(State, ClientID, [KafkaTopic | RestTopics], SubscriberId) -> +do_get_status(ClientID, [KafkaTopic | RestTopics], SubscriberId) -> case brod:get_partitions_count(ClientID, KafkaTopic) of {ok, NPartitions} -> - case do_get_status1(ClientID, KafkaTopic, SubscriberId, NPartitions) of - connected -> do_get_status(State, ClientID, RestTopics, SubscriberId); + case do_get_topic_status(ClientID, KafkaTopic, SubscriberId, NPartitions) of + connected -> do_get_status(ClientID, RestTopics, SubscriberId); disconnected -> disconnected end; {error, {client_down, Context}} -> case infer_client_error(Context) of auth_error -> Message = "Authentication error. " ++ ?CLIENT_DOWN_MESSAGE, - {disconnected, State, Message}; + {disconnected, Message}; {auth_error, Message0} -> Message = binary_to_list(Message0) ++ "; " ++ ?CLIENT_DOWN_MESSAGE, - {disconnected, State, Message}; + {disconnected, Message}; connection_refused -> Message = "Connection refused. " ++ ?CLIENT_DOWN_MESSAGE, - {disconnected, State, Message}; + {disconnected, Message}; _ -> - {disconnected, State, ?CLIENT_DOWN_MESSAGE} + {disconnected, ?CLIENT_DOWN_MESSAGE} end; {error, leader_not_available} -> Message = "Leader connection not available. Please check the Kafka topic used," " the connection parameters and Kafka cluster health", - {disconnected, State, Message}; + {disconnected, Message}; _ -> disconnected end; -do_get_status(_State, _ClientID, _KafkaTopics = [], _SubscriberId) -> +do_get_status(_ClientID, _KafkaTopics = [], _SubscriberId) -> connected. --spec do_get_status1(brod:client_id(), binary(), subscriber_id(), pos_integer()) -> +-spec do_get_topic_status(brod:client_id(), binary(), subscriber_id(), pos_integer()) -> connected | disconnected. -do_get_status1(ClientID, KafkaTopic, SubscriberId, NPartitions) -> +do_get_topic_status(ClientID, KafkaTopic, SubscriberId, NPartitions) -> Results = lists:map( fun(N) -> @@ -466,19 +471,19 @@ consumer_group_id(BridgeName0) -> BridgeName = to_bin(BridgeName0), <<"emqx-kafka-consumer-", BridgeName/binary>>. --spec is_dry_run(manager_id()) -> boolean(). -is_dry_run(InstanceId) -> - TestIdStart = string:find(InstanceId, ?TEST_ID_PREFIX), +-spec is_dry_run(resource_id()) -> boolean(). +is_dry_run(ResourceId) -> + TestIdStart = string:find(ResourceId, ?TEST_ID_PREFIX), case TestIdStart of nomatch -> false; _ -> - string:equal(TestIdStart, InstanceId) + string:equal(TestIdStart, ResourceId) end. --spec make_client_id(manager_id(), kafka_consumer, atom() | binary()) -> atom(). -make_client_id(InstanceId, KafkaType, KafkaName) -> - case is_dry_run(InstanceId) of +-spec make_client_id(resource_id(), kafka_consumer, atom() | binary()) -> atom(). +make_client_id(ResourceId, KafkaType, KafkaName) -> + case is_dry_run(ResourceId) of false -> ClientID0 = emqx_bridge_kafka_impl:make_client_id(KafkaType, KafkaName), binary_to_atom(ClientID0); diff --git a/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_consumer_SUITE.erl b/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_consumer_SUITE.erl index 08fbf5e15..4f98f33cf 100644 --- a/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_consumer_SUITE.erl +++ b/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_consumer_SUITE.erl @@ -1156,11 +1156,12 @@ t_start_and_consume_ok(Config) -> ), %% Check that the bridge probe API doesn't leak atoms. - ProbeRes = probe_bridge_api(Config), - ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes), + ProbeRes0 = probe_bridge_api(Config), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0), AtomsBefore = erlang:system_info(atom_count), %% Probe again; shouldn't have created more atoms. - ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes), + ProbeRes1 = probe_bridge_api(Config), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1), AtomsAfter = erlang:system_info(atom_count), ?assertEqual(AtomsBefore, AtomsAfter), @@ -1259,11 +1260,12 @@ t_multiple_topic_mappings(Config) -> {ok, _} = snabbkaffe:receive_events(SRef0), %% Check that the bridge probe API doesn't leak atoms. - ProbeRes = probe_bridge_api(Config), - ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes), + ProbeRes0 = probe_bridge_api(Config), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0), AtomsBefore = erlang:system_info(atom_count), %% Probe again; shouldn't have created more atoms. - ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes), + ProbeRes1 = probe_bridge_api(Config), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1), AtomsAfter = erlang:system_info(atom_count), ?assertEqual(AtomsBefore, AtomsAfter), @@ -1473,7 +1475,10 @@ do_t_receive_after_recovery(Config) -> ResourceId = resource_id(Config), ?check_trace( begin - {ok, _} = create_bridge(Config), + {ok, _} = create_bridge( + Config, + #{<<"kafka">> => #{<<"offset_reset_policy">> => <<"earliest">>}} + ), ping_until_healthy(Config, _Period = 1_500, _Timeout0 = 24_000), {ok, connected} = emqx_resource_manager:health_check(ResourceId), %% 0) ensure each partition commits its offset so it can diff --git a/apps/emqx_bridge_matrix/BSL.txt b/apps/emqx_bridge_matrix/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_matrix/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_matrix/README.md b/apps/emqx_bridge_matrix/README.md new file mode 100644 index 000000000..0d9c4fc4a --- /dev/null +++ b/apps/emqx_bridge_matrix/README.md @@ -0,0 +1,37 @@ +# EMQX MatrixDB Bridge + +[YMatrix](https://www.ymatrix.cn/) is a hyper-converged database product developed by YMatrix based on the PostgreSQL / Greenplum classic open source database. In addition to being able to handle time series scenarios with ease, it also supports classic scenarios such as online transaction processing (OLTP) and online analytical processing (OLAP). + +The application is used to connect EMQX and MatrixDB. +User can create a rule and easily ingest IoT data into MatrixDB by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src new file mode 100644 index 000000000..e2a17e070 --- /dev/null +++ b/apps/emqx_bridge_matrix/src/emqx_bridge_matrix.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_matrix, [ + {description, "EMQX Enterprise MatrixDB Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_mongodb/BSL.txt b/apps/emqx_bridge_mongodb/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_mongodb/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_mongodb/README.md b/apps/emqx_bridge_mongodb/README.md new file mode 100644 index 000000000..088c8467f --- /dev/null +++ b/apps/emqx_bridge_mongodb/README.md @@ -0,0 +1,39 @@ +# EMQX MongoDB Bridge + +[MongoDB](https://github.com/mongodb/mongo) is a source-available cross-platform +document-oriented database. It is a NoSQL database that stores flexible JSON-like +documents for faster iteration and better data organization. +It provides high availability and scaling with its built-in replication and sharding +features, and is used in a variety of industries + +The application is used to connect EMQX and MongoDB. +User can create a rule and easily ingest IoT data into MongoDB by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documentation + +- Refer to [Ingest data into MongoDB](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-mongodb.html) + for how to use EMQX dashboard to ingest IoT data into MongoDB. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.app.src b/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.app.src new file mode 100644 index 000000000..008a9e164 --- /dev/null +++ b/apps/emqx_bridge_mongodb/src/emqx_bridge_mongodb.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_mongodb, [ + {description, "EMQX Enterprise MongoDB Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_mysql/BSL.txt b/apps/emqx_bridge_mysql/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_mysql/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_mysql/README.md b/apps/emqx_bridge_mysql/README.md new file mode 100644 index 000000000..d7c9b5647 --- /dev/null +++ b/apps/emqx_bridge_mysql/README.md @@ -0,0 +1,36 @@ +# EMQX MySQL Bridge + +[MySQL](https://github.com/mysql/mysql-server) is a popular open-source relational database +management system. + +The application is used to connect EMQX and MySQL. +User can create a rule and easily ingest IoT data into MySQL by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documentation + +- Refer to [Ingest data into MySQL](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-mysql.html) + for how to use EMQX dashboard to ingest IoT data into MySQL. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_mysql/src/emqx_bridge_mysql.app.src b/apps/emqx_bridge_mysql/src/emqx_bridge_mysql.app.src new file mode 100644 index 000000000..2e36587a7 --- /dev/null +++ b/apps/emqx_bridge_mysql/src/emqx_bridge_mysql.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_mysql, [ + {description, "EMQX Enterprise MySQL Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_opents/BSL.txt b/apps/emqx_bridge_opents/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_opents/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_opents/README.md b/apps/emqx_bridge_opents/README.md new file mode 100644 index 000000000..a1d6511ee --- /dev/null +++ b/apps/emqx_bridge_opents/README.md @@ -0,0 +1,36 @@ +# EMQX OpenTSDB Bridge + +[OpenTSDB](http://opentsdb.net) is a distributed, scalable Time Series Database (TSDB) written on top of HBase. + +OpenTSDB was written to address a common need: store, index and serve metrics collected from computer systems (network gear, operating systems, applications) at a large scale, and make this data easily accessible and graphable. + +OpenTSDB allows you to collect thousands of metrics from tens of thousands of hosts and applications, at a high rate (every few seconds). + +OpenTSDB will never delete or downsample data and can easily store hundreds of billions of data points. + +The application is used to connect EMQX and OpenTSDB. User can create a rule and easily ingest IoT data into OpenTSDB by leveraging the +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documentation + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_opents/docker-ct b/apps/emqx_bridge_opents/docker-ct new file mode 100644 index 000000000..fc68b978e --- /dev/null +++ b/apps/emqx_bridge_opents/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +opents diff --git a/apps/emqx_bridge_opents/rebar.config b/apps/emqx_bridge_opents/rebar.config new file mode 100644 index 000000000..d7bd4560f --- /dev/null +++ b/apps/emqx_bridge_opents/rebar.config @@ -0,0 +1,8 @@ +{erl_opts, [debug_info]}. + +{deps, [ + {opentsdb, {git, "https://github.com/emqx/opentsdb-client-erl", {tag, "v0.5.1"}}}, + {emqx_connector, {path, "../../apps/emqx_connector"}}, + {emqx_resource, {path, "../../apps/emqx_resource"}}, + {emqx_bridge, {path, "../../apps/emqx_bridge"}} +]}. diff --git a/apps/emqx_bridge_opents/src/emqx_bridge_opents.app.src b/apps/emqx_bridge_opents/src/emqx_bridge_opents.app.src new file mode 100644 index 000000000..d001446b3 --- /dev/null +++ b/apps/emqx_bridge_opents/src/emqx_bridge_opents.app.src @@ -0,0 +1,15 @@ +{application, emqx_bridge_opents, [ + {description, "EMQX Enterprise OpenTSDB Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + opentsdb + ]}, + {env, []}, + {modules, []}, + + {licenses, ["BSL"]}, + {links, []} +]}. diff --git a/apps/emqx_bridge_opents/src/emqx_bridge_opents.erl b/apps/emqx_bridge_opents/src/emqx_bridge_opents.erl new file mode 100644 index 000000000..2eb6a554f --- /dev/null +++ b/apps/emqx_bridge_opents/src/emqx_bridge_opents.erl @@ -0,0 +1,85 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_opents). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + conn_bridge_examples/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +%% ------------------------------------------------------------------------------------------------- +%% api +conn_bridge_examples(Method) -> + [ + #{ + <<"opents">> => #{ + summary => <<"OpenTSDB Bridge">>, + value => values(Method) + } + } + ]. + +values(_Method) -> + #{ + enable => true, + type => opents, + name => <<"foo">>, + server => <<"http://127.0.0.1:4242">>, + pool_size => 8, + resource_opts => #{ + worker_pool_size => 1, + health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, + auto_restart_interval => ?AUTO_RESTART_INTERVAL_RAW, + batch_size => ?DEFAULT_BATCH_SIZE, + batch_time => ?DEFAULT_BATCH_TIME, + query_mode => async, + max_buffer_bytes => ?DEFAULT_BUFFER_BYTES + } + }. + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions +namespace() -> "bridge_opents". + +roots() -> []. + +fields("config") -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})} + ] ++ emqx_resource_schema:fields("resource_opts") ++ + emqx_bridge_opents_connector:fields(config); +fields("post") -> + [type_field(), name_field() | fields("config")]; +fields("put") -> + fields("config"); +fields("get") -> + emqx_bridge_schema:status_fields() ++ fields("post"). + +desc("config") -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for OpenTSDB using `", string:to_upper(Method), "` method."]; +desc(_) -> + undefined. + +%% ------------------------------------------------------------------------------------------------- +%% internal + +type_field() -> + {type, mk(enum([opents]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. diff --git a/apps/emqx_bridge_opents/src/emqx_bridge_opents_connector.erl b/apps/emqx_bridge_opents/src/emqx_bridge_opents_connector.erl new file mode 100644 index 000000000..0366c9dc2 --- /dev/null +++ b/apps/emqx_bridge_opents/src/emqx_bridge_opents_connector.erl @@ -0,0 +1,184 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_opents_connector). + +-behaviour(emqx_resource). + +-include_lib("emqx_resource/include/emqx_resource.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-export([roots/0, fields/1]). + +%% `emqx_resource' API +-export([ + callback_mode/0, + is_buffer_supported/0, + on_start/2, + on_stop/2, + on_query/3, + on_batch_query/3, + on_get_status/2 +]). + +-export([connect/1]). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +%%===================================================================== +%% Hocon schema +roots() -> + [{config, #{type => hoconsc:ref(?MODULE, config)}}]. + +fields(config) -> + [ + {server, mk(binary(), #{required => true, desc => ?DESC("server")})}, + {pool_size, fun emqx_connector_schema_lib:pool_size/1}, + {summary, mk(boolean(), #{default => true, desc => ?DESC("summary")})}, + {details, mk(boolean(), #{default => false, desc => ?DESC("details")})}, + {auto_reconnect, fun emqx_connector_schema_lib:auto_reconnect/1} + ]. + +%%======================================================================================== +%% `emqx_resource' API +%%======================================================================================== + +callback_mode() -> always_sync. + +is_buffer_supported() -> false. + +on_start( + InstanceId, + #{ + server := Server, + pool_size := PoolSize, + summary := Summary, + details := Details, + resource_opts := #{batch_size := BatchSize} + } = Config +) -> + ?SLOG(info, #{ + msg => "starting_opents_connector", + connector => InstanceId, + config => emqx_utils:redact(Config) + }), + + Options = [ + {server, to_str(Server)}, + {summary, Summary}, + {details, Details}, + {max_batch_size, BatchSize}, + {pool_size, PoolSize} + ], + + State = #{pool_name => InstanceId, server => Server}, + case opentsdb_connectivity(Server) of + ok -> + case emqx_resource_pool:start(InstanceId, ?MODULE, Options) of + ok -> + {ok, State}; + Error -> + Error + end; + {error, Reason} = Error -> + ?SLOG(error, #{msg => "Initiate resource failed", reason => Reason}), + Error + end. + +on_stop(InstanceId, #{pool_name := PoolName} = _State) -> + ?SLOG(info, #{ + msg => "stopping_opents_connector", + connector => InstanceId + }), + emqx_resource_pool:stop(PoolName). + +on_query(InstanceId, Request, State) -> + on_batch_query(InstanceId, [Request], State). + +on_batch_query( + InstanceId, + BatchReq, + State +) -> + Datas = [format_opentsdb_msg(Msg) || {_Key, Msg} <- BatchReq], + do_query(InstanceId, Datas, State). + +on_get_status(_InstanceId, #{server := Server}) -> + Result = + case opentsdb_connectivity(Server) of + ok -> + connected; + {error, Reason} -> + ?SLOG(error, #{msg => "OpenTSDB lost connection", reason => Reason}), + connecting + end, + Result. + +%%======================================================================================== +%% Helper fns +%%======================================================================================== + +do_query(InstanceId, Query, #{pool_name := PoolName} = State) -> + ?TRACE( + "QUERY", + "opents_connector_received", + #{connector => InstanceId, query => Query, state => State} + ), + Result = ecpool:pick_and_do(PoolName, {opentsdb, put, [Query]}, no_handover), + + case Result of + {error, Reason} -> + ?tp( + opents_connector_query_return, + #{error => Reason} + ), + ?SLOG(error, #{ + msg => "opents_connector_do_query_failed", + connector => InstanceId, + query => Query, + reason => Reason + }), + Result; + _ -> + ?tp( + opents_connector_query_return, + #{result => Result} + ), + Result + end. + +connect(Opts) -> + opentsdb:start_link(Opts). + +to_str(List) when is_list(List) -> + List; +to_str(Bin) when is_binary(Bin) -> + erlang:binary_to_list(Bin). + +opentsdb_connectivity(Server) -> + SvrUrl = + case Server of + <<"http://", _/binary>> -> Server; + <<"https://", _/binary>> -> Server; + _ -> "http://" ++ Server + end, + emqx_plugin_libs_rule:http_connectivity(SvrUrl). + +format_opentsdb_msg(Msg) -> + maps:with( + [ + timestamp, + metric, + tags, + value, + <<"timestamp">>, + <<"metric">>, + <<"tags">>, + <<"value">> + ], + Msg + ). diff --git a/apps/emqx_bridge_opents/test/emqx_bridge_opents_SUITE.erl b/apps/emqx_bridge_opents/test/emqx_bridge_opents_SUITE.erl new file mode 100644 index 000000000..6f444b93e --- /dev/null +++ b/apps/emqx_bridge_opents/test/emqx_bridge_opents_SUITE.erl @@ -0,0 +1,363 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_opents_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +% DB defaults +-define(BATCH_SIZE, 10). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, with_batch}, + {group, without_batch} + ]. + +groups() -> + TCs = emqx_common_test_helpers:all(?MODULE), + [ + {with_batch, TCs}, + {without_batch, TCs} + ]. + +init_per_group(with_batch, Config0) -> + Config = [{batch_size, ?BATCH_SIZE} | Config0], + common_init(Config); +init_per_group(without_batch, Config0) -> + Config = [{batch_size, 1} | Config0], + common_init(Config); +init_per_group(_Group, Config) -> + Config. + +end_per_group(Group, Config) when Group =:= with_batch; Group =:= without_batch -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ok; +end_per_group(_Group, _Config) -> + ok. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_bridge, emqx_conf]), + ok. + +init_per_testcase(_Testcase, Config) -> + delete_bridge(Config), + snabbkaffe:start_trace(), + Config. + +end_per_testcase(_Testcase, Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ok = snabbkaffe:stop(), + delete_bridge(Config), + ok. + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +common_init(ConfigT) -> + Host = os:getenv("OPENTS_HOST", "toxiproxy"), + Port = list_to_integer(os:getenv("OPENTS_PORT", "4242")), + + Config0 = [ + {opents_host, Host}, + {opents_port, Port}, + {proxy_name, "opents"} + | ConfigT + ], + + BridgeType = proplists:get_value(bridge_type, Config0, <<"opents">>), + case emqx_common_test_helpers:is_tcp_server_available(Host, Port) of + true -> + % Setup toxiproxy + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + % Ensure EE bridge module is loaded + _ = application:load(emqx_ee_bridge), + _ = emqx_ee_bridge:module_info(), + ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]), + emqx_mgmt_api_test_util:init_suite(), + {Name, OpenTSConf} = opents_config(BridgeType, Config0), + Config = + [ + {opents_config, OpenTSConf}, + {opents_bridge_type, BridgeType}, + {opents_name, Name}, + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort} + | Config0 + ], + Config; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_opents); + _ -> + {skip, no_opents} + end + end. + +opents_config(BridgeType, Config) -> + Port = integer_to_list(?config(opents_port, Config)), + Server = "http://" ++ ?config(opents_host, Config) ++ ":" ++ Port, + Name = atom_to_binary(?MODULE), + BatchSize = ?config(batch_size, Config), + ConfigString = + io_lib:format( + "bridges.~s.~s {\n" + " enable = true\n" + " server = ~p\n" + " resource_opts = {\n" + " request_timeout = 500ms\n" + " batch_size = ~b\n" + " query_mode = sync\n" + " }\n" + "}", + [ + BridgeType, + Name, + Server, + BatchSize + ] + ), + {Name, parse_and_check(ConfigString, BridgeType, Name)}. + +parse_and_check(ConfigString, BridgeType, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{BridgeType := #{Name := Config}}} = RawConf, + Config. + +create_bridge(Config) -> + create_bridge(Config, _Overrides = #{}). + +create_bridge(Config, Overrides) -> + BridgeType = ?config(opents_bridge_type, Config), + Name = ?config(opents_name, Config), + Config0 = ?config(opents_config, Config), + Config1 = emqx_utils_maps:deep_merge(Config0, Overrides), + emqx_bridge:create(BridgeType, Name, Config1). + +delete_bridge(Config) -> + BridgeType = ?config(opents_bridge_type, Config), + Name = ?config(opents_name, Config), + emqx_bridge:remove(BridgeType, Name). + +create_bridge_http(Params) -> + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + Error -> Error + end. + +send_message(Config, Payload) -> + Name = ?config(opents_name, Config), + BridgeType = ?config(opents_bridge_type, Config), + BridgeID = emqx_bridge_resource:bridge_id(BridgeType, Name), + emqx_bridge:send_message(BridgeID, Payload). + +query_resource(Config, Request) -> + query_resource(Config, Request, 1_000). + +query_resource(Config, Request, Timeout) -> + Name = ?config(opents_name, Config), + BridgeType = ?config(opents_bridge_type, Config), + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + emqx_resource:query(ResourceID, Request, #{timeout => Timeout}). + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_setup_via_config_and_publish(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + SentData = make_data(), + ?check_trace( + begin + {_, {ok, #{result := Result}}} = + ?wait_async_action( + send_message(Config, SentData), + #{?snk_kind := buffer_worker_flush_ack}, + 2_000 + ), + ?assertMatch( + {ok, 200, #{failed := 0, success := 1}}, Result + ), + ok + end, + fun(Trace0) -> + Trace = ?of_kind(opents_connector_query_return, Trace0), + ?assertMatch([#{result := {ok, 200, #{failed := 0, success := 1}}}], Trace), + ok + end + ), + ok. + +t_setup_via_http_api_and_publish(Config) -> + BridgeType = ?config(opents_bridge_type, Config), + Name = ?config(opents_name, Config), + OpentsConfig0 = ?config(opents_config, Config), + OpentsConfig = OpentsConfig0#{ + <<"name">> => Name, + <<"type">> => BridgeType + }, + ?assertMatch( + {ok, _}, + create_bridge_http(OpentsConfig) + ), + SentData = make_data(), + ?check_trace( + begin + Request = {send_message, SentData}, + Res0 = query_resource(Config, Request, 2_500), + ?assertMatch( + {ok, 200, #{failed := 0, success := 1}}, Res0 + ), + ok + end, + fun(Trace0) -> + Trace = ?of_kind(opents_connector_query_return, Trace0), + ?assertMatch([#{result := {ok, 200, #{failed := 0, success := 1}}}], Trace), + ok + end + ), + ok. + +t_get_status(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + + Name = ?config(opents_name, Config), + BridgeType = ?config(opents_bridge_type, Config), + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)), + ok. + +t_create_disconnected(Config) -> + BridgeType = proplists:get_value(bridge_type, Config, <<"opents">>), + Config1 = lists:keyreplace(opents_port, 1, Config, {opents_port, 61234}), + {_Name, OpenTSConf} = opents_config(BridgeType, Config1), + + Config2 = lists:keyreplace(opents_config, 1, Config1, {opents_config, OpenTSConf}), + ?assertMatch({ok, _}, create_bridge(Config2)), + + Name = ?config(opents_name, Config), + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceID)), + ok. + +t_write_failure(Config) -> + ProxyName = ?config(proxy_name, Config), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + {ok, _} = create_bridge(Config), + SentData = make_data(), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + {_, {ok, #{result := Result}}} = + ?wait_async_action( + send_message(Config, SentData), + #{?snk_kind := buffer_worker_flush_ack}, + 2_000 + ), + ?assertMatch({error, _}, Result), + ok + end), + ok. + +t_write_timeout(Config) -> + ProxyName = ?config(proxy_name, Config), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + {ok, _} = create_bridge( + Config, + #{ + <<"resource_opts">> => #{ + <<"request_timeout">> => 500, + <<"resume_interval">> => 100, + <<"health_check_interval">> => 100 + } + } + ), + SentData = make_data(), + emqx_common_test_helpers:with_failure( + timeout, ProxyName, ProxyHost, ProxyPort, fun() -> + ?assertMatch( + {error, {resource_error, #{reason := timeout}}}, + query_resource(Config, {send_message, SentData}) + ) + end + ), + ok. + +t_missing_data(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + {_, {ok, #{result := Result}}} = + ?wait_async_action( + send_message(Config, #{}), + #{?snk_kind := buffer_worker_flush_ack}, + 2_000 + ), + ?assertMatch( + {error, {400, #{failed := 1, success := 0}}}, + Result + ), + ok. + +t_bad_data(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + Data = maps:without([metric], make_data()), + {_, {ok, #{result := Result}}} = + ?wait_async_action( + send_message(Config, Data), + #{?snk_kind := buffer_worker_flush_ack}, + 2_000 + ), + + ?assertMatch( + {error, {400, #{failed := 1, success := 0}}}, Result + ), + ok. + +make_data() -> + make_data(<<"cpu">>, 12). + +make_data(Metric, Value) -> + #{ + metric => Metric, + tags => #{ + <<"host">> => <<"serverA">> + }, + value => Value + }. diff --git a/apps/emqx_bridge_oracle/BSL.txt b/apps/emqx_bridge_oracle/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_oracle/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_oracle/README.md b/apps/emqx_bridge_oracle/README.md new file mode 100644 index 000000000..d2974b722 --- /dev/null +++ b/apps/emqx_bridge_oracle/README.md @@ -0,0 +1,28 @@ +# EMQX Oracle Database Bridge + +This application houses the Oracle Database bridge for EMQX Enterprise Edition. +It implements the data bridge APIs for interacting with an Oracle Database Bridge. + + +# Documentation + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information. + + +## Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +## License + +See [BSL](./BSL.txt). diff --git a/apps/emqx_bridge_oracle/docker-ct b/apps/emqx_bridge_oracle/docker-ct new file mode 100644 index 000000000..c24dc4bc9 --- /dev/null +++ b/apps/emqx_bridge_oracle/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +oracle diff --git a/apps/emqx_bridge_oracle/etc/emqx_bridge_oracle.conf b/apps/emqx_bridge_oracle/etc/emqx_bridge_oracle.conf new file mode 100644 index 000000000..e69de29bb diff --git a/apps/emqx_bridge_oracle/rebar.config b/apps/emqx_bridge_oracle/rebar.config new file mode 100644 index 000000000..c238546c4 --- /dev/null +++ b/apps/emqx_bridge_oracle/rebar.config @@ -0,0 +1,13 @@ +%% -*- mode: erlang; -*- + +{erl_opts, [debug_info]}. +{deps, [ {emqx_oracle, {path, "../../apps/emqx_oracle"}} + , {emqx_connector, {path, "../../apps/emqx_connector"}} + , {emqx_resource, {path, "../../apps/emqx_resource"}} + , {emqx_bridge, {path, "../../apps/emqx_bridge"}} + ]}. + +{shell, [ + % {config, "config/sys.config"}, + {apps, [emqx_bridge_oracle]} +]}. diff --git a/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.app.src b/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.app.src new file mode 100644 index 000000000..4f81c2110 --- /dev/null +++ b/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.app.src @@ -0,0 +1,14 @@ +{application, emqx_bridge_oracle, [ + {description, "EMQX Enterprise Oracle Database Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + emqx_oracle + ]}, + {env, []}, + {modules, []}, + + {links, []} +]}. diff --git a/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.erl b/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.erl new file mode 100644 index 000000000..8a87f02ba --- /dev/null +++ b/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.erl @@ -0,0 +1,109 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_oracle). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx_bridge/include/emqx_bridge.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). + +-export([ + conn_bridge_examples/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +-define(DEFAULT_SQL, << + "insert into t_mqtt_msg(msgid, topic, qos, payload)" + "values (${id}, ${topic}, ${qos}, ${payload})" +>>). + +conn_bridge_examples(Method) -> + [ + #{ + <<"oracle">> => #{ + summary => <<"Oracle Database Bridge">>, + value => values(Method) + } + } + ]. + +values(_Method) -> + #{ + enable => true, + type => oracle, + name => <<"foo">>, + server => <<"127.0.0.1:1521">>, + pool_size => 8, + database => <<"ORCL">>, + sid => <<"ORCL">>, + username => <<"root">>, + password => <<"******">>, + sql => ?DEFAULT_SQL, + local_topic => <<"local/topic/#">>, + resource_opts => #{ + worker_pool_size => 8, + health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, + auto_restart_interval => ?AUTO_RESTART_INTERVAL_RAW, + batch_size => ?DEFAULT_BATCH_SIZE, + batch_time => ?DEFAULT_BATCH_TIME, + query_mode => async, + max_buffer_bytes => ?DEFAULT_BUFFER_BYTES + } + }. + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions + +namespace() -> "bridge_oracle". + +roots() -> []. + +fields("config") -> + [ + {enable, + hoconsc:mk( + boolean(), + #{desc => ?DESC("config_enable"), default => true} + )}, + {sql, + hoconsc:mk( + binary(), + #{desc => ?DESC("sql_template"), default => ?DEFAULT_SQL, format => <<"sql">>} + )}, + {local_topic, + hoconsc:mk( + binary(), + #{desc => ?DESC("local_topic"), default => undefined} + )} + ] ++ emqx_resource_schema:fields("resource_opts") ++ + (emqx_oracle_schema:fields(config) -- + emqx_connector_schema_lib:prepare_statement_fields()); +fields("post") -> + fields("post", oracle); +fields("put") -> + fields("config"); +fields("get") -> + emqx_bridge_schema:status_fields() ++ fields("post"). + +fields("post", Type) -> + [type_field(Type), name_field() | fields("config")]. + +desc("config") -> + ?DESC("desc_config"); +desc(_) -> + undefined. + +%% ------------------------------------------------------------------------------------------------- + +type_field(Type) -> + {type, hoconsc:mk(hoconsc:enum([Type]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, hoconsc:mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. diff --git a/apps/emqx_bridge_oracle/test/emqx_bridge_oracle_SUITE.erl b/apps/emqx_bridge_oracle/test/emqx_bridge_oracle_SUITE.erl new file mode 100644 index 000000000..b50788277 --- /dev/null +++ b/apps/emqx_bridge_oracle/test/emqx_bridge_oracle_SUITE.erl @@ -0,0 +1,514 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_oracle_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-import(emqx_common_test_helpers, [on_exit/1]). + +-define(BRIDGE_TYPE_BIN, <<"oracle">>). +-define(APPS, [emqx_bridge, emqx_resource, emqx_rule_engine, emqx_oracle, emqx_bridge_oracle]). +-define(DATABASE, "XE"). +-define(RULE_TOPIC, "mqtt/rule"). +% -define(RULE_TOPIC_BIN, <>). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, plain} + ]. + +groups() -> + AllTCs = emqx_common_test_helpers:all(?MODULE), + [ + {plain, AllTCs} + ]. + +only_once_tests() -> + [t_create_via_http]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_conf]), + ok = emqx_connector_test_helpers:stop_apps(lists:reverse(?APPS)), + _ = application:stop(emqx_connector), + ok. + +init_per_group(plain = Type, Config) -> + OracleHost = os:getenv("ORACLE_PLAIN_HOST", "toxiproxy.emqx.net"), + OraclePort = list_to_integer(os:getenv("ORACLE_PLAIN_PORT", "1521")), + ProxyName = "oracle", + case emqx_common_test_helpers:is_tcp_server_available(OracleHost, OraclePort) of + true -> + Config1 = common_init_per_group(), + [ + {proxy_name, ProxyName}, + {oracle_host, OracleHost}, + {oracle_port, OraclePort}, + {connection_type, Type} + | Config1 ++ Config + ]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_oracle); + _ -> + {skip, no_oracle} + end + end; +init_per_group(_Group, Config) -> + Config. + +end_per_group(Group, Config) when + Group =:= plain +-> + common_end_per_group(Config), + ok; +end_per_group(_Group, _Config) -> + ok. + +common_init_per_group() -> + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + application:load(emqx_bridge), + ok = emqx_common_test_helpers:start_apps([emqx_conf]), + ok = emqx_connector_test_helpers:start_apps(?APPS), + {ok, _} = application:ensure_all_started(emqx_connector), + emqx_mgmt_api_test_util:init_suite(), + UniqueNum = integer_to_binary(erlang:unique_integer()), + MQTTTopic = <<"mqtt/topic/", UniqueNum/binary>>, + [ + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort}, + {mqtt_topic, MQTTTopic} + ]. + +common_end_per_group(Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + delete_all_bridges(), + ok. + +init_per_testcase(TestCase, Config) -> + common_init_per_testcase(TestCase, Config). + +end_per_testcase(_Testcase, Config) -> + common_end_per_testcase(_Testcase, Config). + +common_init_per_testcase(TestCase, Config0) -> + ct:timetrap(timer:seconds(60)), + delete_all_bridges(), + UniqueNum = integer_to_binary(erlang:unique_integer()), + OracleTopic = + << + (atom_to_binary(TestCase))/binary, + UniqueNum/binary + >>, + ConnectionType = ?config(connection_type, Config0), + Config = [{oracle_topic, OracleTopic} | Config0], + {Name, ConfigString, OracleConfig} = oracle_config( + TestCase, ConnectionType, Config + ), + ok = snabbkaffe:start_trace(), + [ + {oracle_name, Name}, + {oracle_config_string, ConfigString}, + {oracle_config, OracleConfig} + | Config + ]. + +common_end_per_testcase(_Testcase, Config) -> + case proplists:get_bool(skip_does_not_apply, Config) of + true -> + ok; + false -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + delete_all_bridges(), + %% in CI, apparently this needs more time since the + %% machines struggle with all the containers running... + emqx_common_test_helpers:call_janitor(60_000), + ok = snabbkaffe:stop(), + ok + end. + +delete_all_bridges() -> + lists:foreach( + fun(#{name := Name, type := Type}) -> + emqx_bridge:remove(Type, Name) + end, + emqx_bridge:list() + ). + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ +sql_insert_template_for_bridge() -> + "INSERT INTO mqtt_test(topic, msgid, payload, retain) VALUES (${topic}, ${id}, ${payload}, ${retain})". + +sql_create_table() -> + "CREATE TABLE mqtt_test (topic VARCHAR2(255), msgid VARCHAR2(64), payload NCLOB, retain NUMBER(1))". + +sql_drop_table() -> + "DROP TABLE mqtt_test". + +reset_table(Config) -> + ResourceId = resource_id(Config), + _ = emqx_resource:simple_sync_query(ResourceId, {sql, sql_drop_table()}), + {ok, [{proc_result, 0, _}]} = emqx_resource:simple_sync_query( + ResourceId, {sql, sql_create_table()} + ), + ok. + +drop_table(Config) -> + ResourceId = resource_id(Config), + emqx_resource:simple_sync_query(ResourceId, {query, sql_drop_table()}), + ok. + +oracle_config(TestCase, _ConnectionType, Config) -> + UniqueNum = integer_to_binary(erlang:unique_integer()), + OracleHost = ?config(oracle_host, Config), + OraclePort = ?config(oracle_port, Config), + Name = << + (atom_to_binary(TestCase))/binary, UniqueNum/binary + >>, + ServerURL = iolist_to_binary([ + OracleHost, + ":", + integer_to_binary(OraclePort) + ]), + ConfigString = + io_lib:format( + "bridges.oracle.~s {\n" + " enable = true\n" + " database = \"~s\"\n" + " sid = \"~s\"\n" + " server = \"~s\"\n" + " username = \"system\"\n" + " password = \"oracle\"\n" + " pool_size = 1\n" + " sql = \"~s\"\n" + " resource_opts = {\n" + " auto_restart_interval = 5000\n" + " request_timeout = 30000\n" + " query_mode = \"async\"\n" + " enable_batch = true\n" + " batch_size = 3\n" + " batch_time = \"3s\"\n" + " worker_pool_size = 1\n" + " }\n" + "}\n", + [ + Name, + ?DATABASE, + ?DATABASE, + ServerURL, + sql_insert_template_for_bridge() + ] + ), + {Name, ConfigString, parse_and_check(ConfigString, Name)}. + +parse_and_check(ConfigString, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + TypeBin = ?BRIDGE_TYPE_BIN, + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{TypeBin := #{Name := Config}}} = RawConf, + Config. + +resource_id(Config) -> + Type = ?BRIDGE_TYPE_BIN, + Name = ?config(oracle_name, Config), + emqx_bridge_resource:resource_id(Type, Name). + +create_bridge(Config) -> + create_bridge(Config, _Overrides = #{}). + +create_bridge(Config, Overrides) -> + Type = ?BRIDGE_TYPE_BIN, + Name = ?config(oracle_name, Config), + OracleConfig0 = ?config(oracle_config, Config), + OracleConfig = emqx_utils_maps:deep_merge(OracleConfig0, Overrides), + emqx_bridge:create(Type, Name, OracleConfig). + +create_bridge_api(Config) -> + create_bridge_api(Config, _Overrides = #{}). + +create_bridge_api(Config, Overrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(oracle_name, Config), + OracleConfig0 = ?config(oracle_config, Config), + OracleConfig = emqx_utils_maps:deep_merge(OracleConfig0, Overrides), + Params = OracleConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("creating bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of + {ok, {Status, Headers, Body0}} -> + {ok, {Status, Headers, emqx_utils_json:decode(Body0, [return_maps])}}; + Error -> + Error + end, + ct:pal("bridge create result: ~p", [Res]), + Res. + +update_bridge_api(Config) -> + update_bridge_api(Config, _Overrides = #{}). + +update_bridge_api(Config, Overrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(oracle_name, Config), + OracleConfig0 = ?config(oracle_config, Config), + OracleConfig = emqx_utils_maps:deep_merge(OracleConfig0, Overrides), + BridgeId = emqx_bridge_resource:bridge_id(TypeBin, Name), + Params = OracleConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("updating bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(put, Path, "", AuthHeader, Params, Opts) of + {ok, {_Status, _Headers, Body0}} -> {ok, emqx_utils_json:decode(Body0, [return_maps])}; + Error -> Error + end, + ct:pal("bridge update result: ~p", [Res]), + Res. + +probe_bridge_api(Config) -> + probe_bridge_api(Config, _Overrides = #{}). + +probe_bridge_api(Config, _Overrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(oracle_name, Config), + OracleConfig = ?config(oracle_config, Config), + Params = OracleConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges_probe"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("probing bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of + {ok, {{_, 204, _}, _Headers, _Body0} = Res0} -> {ok, Res0}; + Error -> Error + end, + ct:pal("bridge probe result: ~p", [Res]), + Res. + +create_rule_and_action_http(Config) -> + OracleName = ?config(oracle_name, Config), + BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_BIN, OracleName), + Params = #{ + enable => true, + sql => <<"SELECT * FROM \"", ?RULE_TOPIC, "\"">>, + actions => [BridgeId] + }, + Path = emqx_mgmt_api_test_util:api_path(["rules"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + ct:pal("rule action params: ~p", [Params]), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + Error -> Error + end. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_sync_query(Config) -> + ResourceId = resource_id(Config), + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + reset_table(Config), + MsgId = erlang:unique_integer(), + Params = #{ + topic => ?config(mqtt_topic, Config), + id => MsgId, + payload => ?config(oracle_name, Config), + retain => true + }, + Message = {send_message, Params}, + ?assertEqual( + {ok, [{affected_rows, 1}]}, emqx_resource:simple_sync_query(ResourceId, Message) + ), + ok + end, + [] + ), + ok. + +t_batch_sync_query(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + ResourceId = resource_id(Config), + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + ?retry( + _Sleep = 1_000, + _Attempts = 30, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + reset_table(Config), + MsgId = erlang:unique_integer(), + Params = #{ + topic => ?config(mqtt_topic, Config), + id => MsgId, + payload => ?config(oracle_name, Config), + retain => false + }, + % Send 3 async messages while resource is down. When it comes back, these messages + % will be delivered in sync way. If we try to send sync messages directly, it will + % be sent async as callback_mode is set to async_if_possible. + Message = {send_message, Params}, + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ct:sleep(1000), + emqx_resource:query(ResourceId, Message), + emqx_resource:query(ResourceId, Message), + emqx_resource:query(ResourceId, Message) + end), + ?retry( + _Sleep = 1_000, + _Attempts = 30, + ?assertMatch( + {ok, [{result_set, _, _, [[{3}]]}]}, + emqx_resource:simple_sync_query( + ResourceId, {query, "SELECT COUNT(*) FROM mqtt_test"} + ) + ) + ), + ok + end, + [] + ), + ok. + +t_create_via_http(Config) -> + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + + %% lightweight matrix testing some configs + ?assertMatch( + {ok, _}, + update_bridge_api( + Config, + #{ + <<"resource_opts">> => + #{<<"batch_size">> => 4} + } + ) + ), + ?assertMatch( + {ok, _}, + update_bridge_api( + Config, + #{ + <<"resource_opts">> => + #{<<"batch_time">> => <<"4s">>} + } + ) + ), + ok + end, + [] + ), + ok. + +t_start_stop(Config) -> + OracleName = ?config(oracle_name, Config), + ResourceId = resource_id(Config), + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge(Config)), + %% Since the connection process is async, we give it some time to + %% stabilize and avoid flakiness. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + + %% Check that the bridge probe API doesn't leak atoms. + ProbeRes0 = probe_bridge_api( + Config, + #{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}} + ), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0), + AtomsBefore = erlang:system_info(atom_count), + %% Probe again; shouldn't have created more atoms. + ProbeRes1 = probe_bridge_api( + Config, + #{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}} + ), + + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1), + AtomsAfter = erlang:system_info(atom_count), + ?assertEqual(AtomsBefore, AtomsAfter), + + %% Now stop the bridge. + ?assertMatch( + {{ok, _}, {ok, _}}, + ?wait_async_action( + emqx_bridge:disable_enable(disable, ?BRIDGE_TYPE_BIN, OracleName), + #{?snk_kind := oracle_bridge_stopped}, + 5_000 + ) + ), + + ok + end, + fun(Trace) -> + %% one for each probe, one for real + ?assertMatch([_, _, _], ?of_kind(oracle_bridge_stopped, Trace)), + ok + end + ), + ok. + +t_on_get_status(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + ResourceId = resource_id(Config), + ?assertMatch({ok, _}, create_bridge(Config)), + %% Since the connection process is async, we give it some time to + %% stabilize and avoid flakiness. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ct:sleep(500), + ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)) + end), + %% Check that it recovers itself. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + ok. diff --git a/apps/emqx_bridge_pgsql/BSL.txt b/apps/emqx_bridge_pgsql/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_pgsql/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_pgsql/README.md b/apps/emqx_bridge_pgsql/README.md new file mode 100644 index 000000000..fc0bd3c6f --- /dev/null +++ b/apps/emqx_bridge_pgsql/README.md @@ -0,0 +1,38 @@ +# EMQX PostgreSQL Bridge + +[PostgreSQL](https://github.com/PostgreSQL/PostgreSQL) is an open-source relational +database management system (RDBMS) that uses and extends the SQL language. +It is known for its reliability, data integrity, and advanced features such as +support for JSON, XML, and other data formats. + +The application is used to connect EMQX and PostgreSQL. +User can create a rule and easily ingest IoT data into PostgreSQL by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documentation + +- Refer to [Ingest data into PostgreSQL](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-pgsql.html) + for how to use EMQX dashboard to ingest IoT data into PostgreSQL. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.app.src b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.app.src new file mode 100644 index 000000000..c695283f3 --- /dev/null +++ b/apps/emqx_bridge_pgsql/src/emqx_bridge_pgsql.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_pgsql, [ + {description, "EMQX Enterprise PostgreSQL Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_pulsar/.gitignore b/apps/emqx_bridge_pulsar/.gitignore new file mode 100644 index 000000000..f1c455451 --- /dev/null +++ b/apps/emqx_bridge_pulsar/.gitignore @@ -0,0 +1,19 @@ +.rebar3 +_* +.eunit +*.o +*.beam +*.plt +*.swp +*.swo +.erlang.cookie +ebin +log +erl_crash.dump +.rebar +logs +_build +.idea +*.iml +rebar3.crashdump +*~ diff --git a/apps/emqx_bridge_pulsar/BSL.txt b/apps/emqx_bridge_pulsar/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_pulsar/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_pulsar/README.md b/apps/emqx_bridge_pulsar/README.md new file mode 100644 index 000000000..fbd8bf81d --- /dev/null +++ b/apps/emqx_bridge_pulsar/README.md @@ -0,0 +1,30 @@ +# Pulsar Data Integration Bridge + +This application houses the Pulsar Producer data integration bridge +for EMQX Enterprise Edition. It provides the means to connect to +Pulsar and publish messages to it. + +Currently, our Pulsar Producer library has its own `replayq` buffering +implementation, so this bridge does not require buffer workers from +`emqx_resource`. It implements the connection management and +interaction without need for a separate connector app, since it's not +used by authentication and authorization applications. + +# Documentation links + +For more information on Apache Pulsar, please see its [official +site](https://pulsar.apache.org/). + +# Configurations + +Please see [our official +documentation](https://www.emqx.io/docs/en/v5.0/data-integration/data-bridge-pulsar.html) +for more detailed info. + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_pulsar/docker-ct b/apps/emqx_bridge_pulsar/docker-ct new file mode 100644 index 000000000..6324bb4f7 --- /dev/null +++ b/apps/emqx_bridge_pulsar/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +pulsar diff --git a/apps/emqx_bridge_pulsar/etc/emqx_bridge_pulsar.conf b/apps/emqx_bridge_pulsar/etc/emqx_bridge_pulsar.conf new file mode 100644 index 000000000..e69de29bb diff --git a/apps/emqx_bridge_pulsar/include/emqx_bridge_pulsar.hrl b/apps/emqx_bridge_pulsar/include/emqx_bridge_pulsar.hrl new file mode 100644 index 000000000..5ee87e48f --- /dev/null +++ b/apps/emqx_bridge_pulsar/include/emqx_bridge_pulsar.hrl @@ -0,0 +1,14 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-ifndef(EMQX_BRIDGE_PULSAR_HRL). +-define(EMQX_BRIDGE_PULSAR_HRL, true). + +-define(PULSAR_HOST_OPTIONS, #{ + default_port => 6650, + default_scheme => "pulsar", + supported_schemes => ["pulsar", "pulsar+ssl"] +}). + +-endif. diff --git a/apps/emqx_bridge_pulsar/rebar.config b/apps/emqx_bridge_pulsar/rebar.config new file mode 100644 index 000000000..d5a63f320 --- /dev/null +++ b/apps/emqx_bridge_pulsar/rebar.config @@ -0,0 +1,14 @@ +%% -*- mode: erlang; -*- + +{erl_opts, [debug_info]}. +{deps, [ + {pulsar, {git, "https://github.com/emqx/pulsar-client-erl.git", {tag, "0.8.1"}}}, + {emqx_connector, {path, "../../apps/emqx_connector"}}, + {emqx_resource, {path, "../../apps/emqx_resource"}}, + {emqx_bridge, {path, "../../apps/emqx_bridge"}} +]}. + +{shell, [ + % {config, "config/sys.config"}, + {apps, [emqx_bridge_pulsar]} +]}. diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.app.src b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.app.src new file mode 100644 index 000000000..b169aa2c4 --- /dev/null +++ b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.app.src @@ -0,0 +1,14 @@ +{application, emqx_bridge_pulsar, [ + {description, "EMQX Pulsar Bridge"}, + {vsn, "0.1.1"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + pulsar + ]}, + {env, []}, + {modules, []}, + + {links, []} +]}. diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.erl b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.erl new file mode 100644 index 000000000..18faf0e3b --- /dev/null +++ b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.erl @@ -0,0 +1,228 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_pulsar). + +-include("emqx_bridge_pulsar.hrl"). +-include_lib("emqx_connector/include/emqx_connector.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +%% hocon_schema API +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). +%% emqx_ee_bridge "unofficial" API +-export([conn_bridge_examples/1]). + +%%------------------------------------------------------------------------------------------------- +%% `hocon_schema' API +%%------------------------------------------------------------------------------------------------- + +namespace() -> + "bridge_pulsar". + +roots() -> + []. + +fields(pulsar_producer) -> + fields(config) ++ fields(producer_opts); +fields(config) -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {servers, + mk( + binary(), + #{ + required => true, + desc => ?DESC("servers"), + validator => emqx_schema:servers_validator( + ?PULSAR_HOST_OPTIONS, _Required = true + ) + } + )}, + {authentication, + mk(hoconsc:union([none, ref(auth_basic), ref(auth_token)]), #{ + default => none, desc => ?DESC("authentication") + })} + ] ++ emqx_connector_schema_lib:ssl_fields(); +fields(producer_opts) -> + [ + {batch_size, + mk( + pos_integer(), + #{default => 100, desc => ?DESC("producer_batch_size")} + )}, + {compression, + mk( + hoconsc:enum([no_compression, snappy, zlib]), + #{default => no_compression, desc => ?DESC("producer_compression")} + )}, + {send_buffer, + mk(emqx_schema:bytesize(), #{ + default => <<"1MB">>, desc => ?DESC("producer_send_buffer") + })}, + {sync_timeout, + mk(emqx_schema:duration_ms(), #{ + default => <<"3s">>, desc => ?DESC("producer_sync_timeout") + })}, + {retention_period, + mk( + hoconsc:union([infinity, emqx_schema:duration_ms()]), + #{default => infinity, desc => ?DESC("producer_retention_period")} + )}, + {max_batch_bytes, + mk( + emqx_schema:bytesize(), + #{default => <<"900KB">>, desc => ?DESC("producer_max_batch_bytes")} + )}, + {local_topic, mk(binary(), #{required => false, desc => ?DESC("producer_local_topic")})}, + {pulsar_topic, mk(binary(), #{required => true, desc => ?DESC("producer_pulsar_topic")})}, + {strategy, + mk( + hoconsc:enum([random, roundrobin, key_dispatch]), + #{default => random, desc => ?DESC("producer_strategy")} + )}, + {buffer, mk(ref(producer_buffer), #{required => false, desc => ?DESC("producer_buffer")})}, + {message, + mk(ref(producer_pulsar_message), #{ + required => false, desc => ?DESC("producer_message_opts") + })}, + {resource_opts, + mk( + ref(producer_resource_opts), + #{ + required => false, + desc => ?DESC(emqx_resource_schema, "creation_opts") + } + )} + ]; +fields(producer_buffer) -> + [ + {mode, + mk( + hoconsc:enum([memory, disk, hybrid]), + #{default => memory, desc => ?DESC("buffer_mode")} + )}, + {per_partition_limit, + mk( + emqx_schema:bytesize(), + #{default => <<"2GB">>, desc => ?DESC("buffer_per_partition_limit")} + )}, + {segment_bytes, + mk( + emqx_schema:bytesize(), + #{default => <<"100MB">>, desc => ?DESC("buffer_segment_bytes")} + )}, + {memory_overload_protection, + mk(boolean(), #{ + default => false, + desc => ?DESC("buffer_memory_overload_protection") + })} + ]; +fields(producer_pulsar_message) -> + [ + {key, + mk(string(), #{default => <<"${.clientid}">>, desc => ?DESC("producer_key_template")})}, + {value, mk(string(), #{default => <<"${.}">>, desc => ?DESC("producer_value_template")})} + ]; +fields(producer_resource_opts) -> + SupportedOpts = [ + health_check_interval, + resume_interval, + start_after_created, + start_timeout, + auto_restart_interval + ], + lists:filtermap( + fun + ({health_check_interval = Field, MetaFn}) -> + {true, {Field, override_default(MetaFn, 1_000)}}; + ({Field, _Meta}) -> + lists:member(Field, SupportedOpts) + end, + emqx_resource_schema:fields("creation_opts") + ); +fields(auth_basic) -> + [ + {username, mk(binary(), #{required => true, desc => ?DESC("auth_basic_username")})}, + {password, + mk(binary(), #{ + required => true, + desc => ?DESC("auth_basic_password"), + sensitive => true, + converter => fun emqx_schema:password_converter/2 + })} + ]; +fields(auth_token) -> + [ + {jwt, + mk(binary(), #{ + required => true, + desc => ?DESC("auth_token_jwt"), + sensitive => true, + converter => fun emqx_schema:password_converter/2 + })} + ]; +fields("get_" ++ Type) -> + emqx_bridge_schema:status_fields() ++ fields("post_" ++ Type); +fields("put_" ++ Type) -> + fields("config_" ++ Type); +fields("post_" ++ Type) -> + [type_field(), name_field() | fields("config_" ++ Type)]; +fields("config_producer") -> + fields(pulsar_producer). + +desc(pulsar_producer) -> + ?DESC(pulsar_producer_struct); +desc(producer_resource_opts) -> + ?DESC(emqx_resource_schema, "creation_opts"); +desc("get_" ++ Type) when Type =:= "producer" -> + ["Configuration for Pulsar using `GET` method."]; +desc("put_" ++ Type) when Type =:= "producer" -> + ["Configuration for Pulsar using `PUT` method."]; +desc("post_" ++ Type) when Type =:= "producer" -> + ["Configuration for Pulsar using `POST` method."]; +desc(Name) -> + lists:member(Name, struct_names()) orelse throw({missing_desc, Name}), + ?DESC(Name). + +conn_bridge_examples(_Method) -> + [ + #{ + <<"pulsar_producer">> => #{ + summary => <<"Pulsar Producer Bridge">>, + value => #{todo => true} + } + } + ]. + +%%------------------------------------------------------------------------------------------------- +%% Internal fns +%%------------------------------------------------------------------------------------------------- + +mk(Type, Meta) -> hoconsc:mk(Type, Meta). +ref(Name) -> hoconsc:ref(?MODULE, Name). + +type_field() -> + {type, mk(hoconsc:enum([pulsar_producer]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. + +struct_names() -> + [ + auth_basic, + auth_token, + producer_buffer, + producer_pulsar_message + ]. + +override_default(OriginalFn, NewDefault) -> + fun + (default) -> NewDefault; + (Field) -> OriginalFn(Field) + end. diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl new file mode 100644 index 000000000..300fe9b2d --- /dev/null +++ b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl @@ -0,0 +1,421 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_pulsar_impl_producer). + +-include("emqx_bridge_pulsar.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +%% `emqx_resource' API +-export([ + callback_mode/0, + is_buffer_supported/0, + on_start/2, + on_stop/2, + on_get_status/2, + on_query/3, + on_query_async/4 +]). + +-type pulsar_client_id() :: atom(). +-type state() :: #{ + pulsar_client_id := pulsar_client_id(), + producers := pulsar_producers:producers(), + sync_timeout := infinity | time:time(), + message_template := message_template() +}. +-type buffer_mode() :: memory | disk | hybrid. +-type compression_mode() :: no_compression | snappy | zlib. +-type partition_strategy() :: random | roundrobin | key_dispatch. +-type message_template_raw() :: #{ + key := binary(), + value := binary() +}. +-type message_template() :: #{ + key := emqx_plugin_libs_rule:tmpl_token(), + value := emqx_plugin_libs_rule:tmpl_token() +}. +-type config() :: #{ + authentication := _, + batch_size := pos_integer(), + bridge_name := atom(), + buffer := #{ + mode := buffer_mode(), + per_partition_limit := emqx_schema:byte_size(), + segment_bytes := emqx_schema:byte_size(), + memory_overload_protection := boolean() + }, + compression := compression_mode(), + max_batch_bytes := emqx_schema:bytesize(), + message := message_template_raw(), + pulsar_topic := binary(), + retention_period := infinity | emqx_schema:duration_ms(), + send_buffer := emqx_schema:bytesize(), + servers := binary(), + ssl := _, + strategy := partition_strategy(), + sync_timeout := emqx_schema:duration_ms() +}. + +%%------------------------------------------------------------------------------------- +%% `emqx_resource' API +%%------------------------------------------------------------------------------------- + +callback_mode() -> async_if_possible. + +%% there are no queries to be made to this bridge, so we say that +%% buffer is supported so we don't spawn unused resource buffer +%% workers. +is_buffer_supported() -> true. + +-spec on_start(resource_id(), config()) -> {ok, state()}. +on_start(InstanceId, Config) -> + #{ + authentication := _Auth, + bridge_name := BridgeName, + servers := Servers0, + ssl := SSL + } = Config, + Servers = format_servers(Servers0), + ClientId = make_client_id(InstanceId, BridgeName), + SSLOpts = emqx_tls_lib:to_client_opts(SSL), + ClientOpts = #{ + ssl_opts => SSLOpts, + conn_opts => conn_opts(Config) + }, + case pulsar:ensure_supervised_client(ClientId, Servers, ClientOpts) of + {ok, _Pid} -> + ?tp( + info, + "pulsar_client_started", + #{ + instance_id => InstanceId, + pulsar_hosts => Servers + } + ); + {error, Reason} -> + ?SLOG(error, #{ + msg => "failed_to_start_pulsar_client", + instance_id => InstanceId, + pulsar_hosts => Servers, + reason => Reason + }), + throw(failed_to_start_pulsar_client) + end, + start_producer(Config, InstanceId, ClientId, ClientOpts). + +-spec on_stop(resource_id(), state()) -> ok. +on_stop(_InstanceId, State) -> + #{ + pulsar_client_id := ClientId, + producers := Producers + } = State, + stop_producers(ClientId, Producers), + stop_client(ClientId), + ?tp(pulsar_bridge_stopped, #{instance_id => _InstanceId}), + ok. + +-spec on_get_status(resource_id(), state()) -> connected | disconnected. +on_get_status(_InstanceId, State = #{}) -> + #{ + pulsar_client_id := ClientId, + producers := Producers + } = State, + case pulsar_client_sup:find_client(ClientId) of + {ok, Pid} -> + try pulsar_client:get_status(Pid) of + true -> + get_producer_status(Producers); + false -> + disconnected + catch + error:timeout -> + disconnected; + exit:{noproc, _} -> + disconnected + end; + {error, _} -> + disconnected + end; +on_get_status(_InstanceId, _State) -> + %% If a health check happens just after a concurrent request to + %% create the bridge is not quite finished, `State = undefined'. + connecting. + +-spec on_query(resource_id(), {send_message, map()}, state()) -> + {ok, term()} + | {error, timeout} + | {error, term()}. +on_query(_InstanceId, {send_message, Message}, State) -> + #{ + producers := Producers, + sync_timeout := SyncTimeout, + message_template := MessageTemplate + } = State, + PulsarMessage = render_message(Message, MessageTemplate), + try + pulsar:send_sync(Producers, [PulsarMessage], SyncTimeout) + catch + error:timeout -> + {error, timeout} + end. + +-spec on_query_async( + resource_id(), {send_message, map()}, {ReplyFun :: function(), Args :: list()}, state() +) -> + {ok, pid()}. +on_query_async(_InstanceId, {send_message, Message}, AsyncReplyFn, State) -> + ?tp_span( + pulsar_producer_on_query_async, + #{instance_id => _InstanceId, message => Message}, + do_on_query_async(Message, AsyncReplyFn, State) + ). + +do_on_query_async(Message, AsyncReplyFn, State) -> + #{ + producers := Producers, + message_template := MessageTemplate + } = State, + PulsarMessage = render_message(Message, MessageTemplate), + pulsar:send(Producers, [PulsarMessage], #{callback_fn => AsyncReplyFn}). + +%%------------------------------------------------------------------------------------- +%% Internal fns +%%------------------------------------------------------------------------------------- + +-spec to_bin(atom() | string() | binary()) -> binary(). +to_bin(A) when is_atom(A) -> + atom_to_binary(A); +to_bin(L) when is_list(L) -> + list_to_binary(L); +to_bin(B) when is_binary(B) -> + B. + +-spec format_servers(binary()) -> [string()]. +format_servers(Servers0) -> + Servers1 = emqx_schema:parse_servers(Servers0, ?PULSAR_HOST_OPTIONS), + lists:map( + fun(#{scheme := Scheme, hostname := Host, port := Port}) -> + Scheme ++ "://" ++ Host ++ ":" ++ integer_to_list(Port) + end, + Servers1 + ). + +-spec make_client_id(resource_id(), atom() | binary()) -> pulsar_client_id(). +make_client_id(InstanceId, BridgeName) -> + case is_dry_run(InstanceId) of + true -> + pulsar_producer_probe; + false -> + ClientIdBin = iolist_to_binary([ + <<"pulsar_producer:">>, + to_bin(BridgeName), + <<":">>, + to_bin(node()) + ]), + binary_to_atom(ClientIdBin) + end. + +-spec is_dry_run(resource_id()) -> boolean(). +is_dry_run(InstanceId) -> + TestIdStart = string:find(InstanceId, ?TEST_ID_PREFIX), + case TestIdStart of + nomatch -> + false; + _ -> + string:equal(TestIdStart, InstanceId) + end. + +conn_opts(#{authentication := none}) -> + #{}; +conn_opts(#{authentication := #{username := Username, password := Password}}) -> + #{ + auth_data => iolist_to_binary([Username, <<":">>, Password]), + auth_method_name => <<"basic">> + }; +conn_opts(#{authentication := #{jwt := JWT}}) -> + #{ + auth_data => JWT, + auth_method_name => <<"token">> + }. + +-spec replayq_dir(pulsar_client_id()) -> string(). +replayq_dir(ClientId) -> + filename:join([emqx:data_dir(), "pulsar", to_bin(ClientId)]). + +-spec producer_name(pulsar_client_id()) -> atom(). +producer_name(ClientId) -> + ClientIdBin = to_bin(ClientId), + binary_to_atom( + iolist_to_binary([ + <<"producer-">>, + ClientIdBin + ]) + ). + +-spec start_producer(config(), resource_id(), pulsar_client_id(), map()) -> {ok, state()}. +start_producer(Config, InstanceId, ClientId, ClientOpts) -> + #{ + conn_opts := ConnOpts, + ssl_opts := SSLOpts + } = ClientOpts, + #{ + batch_size := BatchSize, + buffer := #{ + mode := BufferMode, + per_partition_limit := PerPartitionLimit, + segment_bytes := SegmentBytes, + memory_overload_protection := MemOLP0 + }, + compression := Compression, + max_batch_bytes := MaxBatchBytes, + message := MessageTemplateOpts, + pulsar_topic := PulsarTopic0, + retention_period := RetentionPeriod, + send_buffer := SendBuffer, + strategy := Strategy, + sync_timeout := SyncTimeout + } = Config, + {OffloadMode, ReplayQDir} = + case BufferMode of + memory -> {false, false}; + disk -> {false, replayq_dir(ClientId)}; + hybrid -> {true, replayq_dir(ClientId)} + end, + MemOLP = + case os:type() of + {unix, linux} -> MemOLP0; + _ -> false + end, + ReplayQOpts = #{ + replayq_dir => ReplayQDir, + replayq_offload_mode => OffloadMode, + replayq_max_total_bytes => PerPartitionLimit, + replayq_seg_bytes => SegmentBytes, + drop_if_highmem => MemOLP + }, + ProducerName = producer_name(ClientId), + ?tp(pulsar_producer_capture_name, #{producer_name => ProducerName}), + MessageTemplate = compile_message_template(MessageTemplateOpts), + ProducerOpts0 = + #{ + batch_size => BatchSize, + compression => Compression, + conn_opts => ConnOpts, + max_batch_bytes => MaxBatchBytes, + name => ProducerName, + retention_period => RetentionPeriod, + ssl_opts => SSLOpts, + strategy => partition_strategy(Strategy), + tcp_opts => [{sndbuf, SendBuffer}] + }, + ProducerOpts = maps:merge(ReplayQOpts, ProducerOpts0), + PulsarTopic = binary_to_list(PulsarTopic0), + ?tp(pulsar_producer_about_to_start_producers, #{producer_name => ProducerName}), + try pulsar:ensure_supervised_producers(ClientId, PulsarTopic, ProducerOpts) of + {ok, Producers} -> + State = #{ + pulsar_client_id => ClientId, + producers => Producers, + sync_timeout => SyncTimeout, + message_template => MessageTemplate + }, + ?tp(pulsar_producer_bridge_started, #{}), + {ok, State} + catch + Kind:Error:Stacktrace -> + ?tp( + error, + "failed_to_start_pulsar_producer", + #{ + instance_id => InstanceId, + kind => Kind, + reason => Error, + stacktrace => Stacktrace + } + ), + stop_client(ClientId), + throw(failed_to_start_pulsar_producer) + end. + +-spec stop_client(pulsar_client_id()) -> ok. +stop_client(ClientId) -> + _ = log_when_error( + fun() -> + ok = pulsar:stop_and_delete_supervised_client(ClientId), + ?tp(pulsar_bridge_client_stopped, #{pulsar_client_id => ClientId}), + ok + end, + #{ + msg => "failed_to_delete_pulsar_client", + pulsar_client_id => ClientId + } + ), + ok. + +-spec stop_producers(pulsar_client_id(), pulsar_producers:producers()) -> ok. +stop_producers(ClientId, Producers) -> + _ = log_when_error( + fun() -> + ok = pulsar:stop_and_delete_supervised_producers(Producers), + ?tp(pulsar_bridge_producer_stopped, #{pulsar_client_id => ClientId}), + ok + end, + #{ + msg => "failed_to_delete_pulsar_producer", + pulsar_client_id => ClientId + } + ), + ok. + +log_when_error(Fun, Log) -> + try + Fun() + catch + C:E -> + ?SLOG(error, Log#{ + exception => C, + reason => E + }) + end. + +-spec compile_message_template(message_template_raw()) -> message_template(). +compile_message_template(TemplateOpts) -> + KeyTemplate = maps:get(key, TemplateOpts, <<"${.clientid}">>), + ValueTemplate = maps:get(value, TemplateOpts, <<"${.}">>), + #{ + key => preproc_tmpl(KeyTemplate), + value => preproc_tmpl(ValueTemplate) + }. + +preproc_tmpl(Template) -> + emqx_plugin_libs_rule:preproc_tmpl(Template). + +render_message( + Message, #{key := KeyTemplate, value := ValueTemplate} +) -> + #{ + key => render(Message, KeyTemplate), + value => render(Message, ValueTemplate) + }. + +render(Message, Template) -> + Opts = #{ + var_trans => fun + (undefined) -> <<"">>; + (X) -> emqx_plugin_libs_rule:bin(X) + end, + return => full_binary + }, + emqx_plugin_libs_rule:proc_tmpl(Template, Message, Opts). + +get_producer_status(Producers) -> + case pulsar_producers:all_connected(Producers) of + true -> connected; + false -> connecting + end. + +partition_strategy(key_dispatch) -> first_key_dispatch; +partition_strategy(Strategy) -> Strategy. diff --git a/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE.erl b/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE.erl new file mode 100644 index 000000000..be38f6625 --- /dev/null +++ b/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE.erl @@ -0,0 +1,1019 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_pulsar_impl_producer_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-import(emqx_common_test_helpers, [on_exit/1]). + +-define(BRIDGE_TYPE_BIN, <<"pulsar_producer">>). +-define(APPS, [emqx_bridge, emqx_resource, emqx_rule_engine, emqx_bridge_pulsar]). +-define(RULE_TOPIC, "mqtt/rule"). +-define(RULE_TOPIC_BIN, <>). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, plain}, + {group, tls} + ]. + +groups() -> + AllTCs = emqx_common_test_helpers:all(?MODULE), + OnlyOnceTCs = only_once_tests(), + TCs = AllTCs -- OnlyOnceTCs, + [ + {plain, AllTCs}, + {tls, TCs} + ]. + +only_once_tests() -> + [ + t_create_via_http, + t_start_when_down, + t_send_when_down, + t_send_when_timeout, + t_failure_to_start_producer, + t_producer_process_crash + ]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_conf]), + ok = emqx_connector_test_helpers:stop_apps(lists:reverse(?APPS)), + _ = application:stop(emqx_connector), + ok. + +init_per_group(plain = Type, Config) -> + PulsarHost = os:getenv("PULSAR_PLAIN_HOST", "toxiproxy"), + PulsarPort = list_to_integer(os:getenv("PULSAR_PLAIN_PORT", "6652")), + ProxyName = "pulsar_plain", + case emqx_common_test_helpers:is_tcp_server_available(PulsarHost, PulsarPort) of + true -> + Config1 = common_init_per_group(), + [ + {proxy_name, ProxyName}, + {pulsar_host, PulsarHost}, + {pulsar_port, PulsarPort}, + {pulsar_type, Type}, + {use_tls, false} + | Config1 ++ Config + ]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_pulsar); + _ -> + {skip, no_pulsar} + end + end; +init_per_group(tls = Type, Config) -> + PulsarHost = os:getenv("PULSAR_TLS_HOST", "toxiproxy"), + PulsarPort = list_to_integer(os:getenv("PULSAR_TLS_PORT", "6653")), + ProxyName = "pulsar_tls", + case emqx_common_test_helpers:is_tcp_server_available(PulsarHost, PulsarPort) of + true -> + Config1 = common_init_per_group(), + [ + {proxy_name, ProxyName}, + {pulsar_host, PulsarHost}, + {pulsar_port, PulsarPort}, + {pulsar_type, Type}, + {use_tls, true} + | Config1 ++ Config + ]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_pulsar); + _ -> + {skip, no_pulsar} + end + end; +init_per_group(_Group, Config) -> + Config. + +end_per_group(Group, Config) when + Group =:= plain; + Group =:= tls +-> + common_end_per_group(Config), + ok; +end_per_group(_Group, _Config) -> + ok. + +common_init_per_group() -> + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + application:load(emqx_bridge), + ok = emqx_common_test_helpers:start_apps([emqx_conf]), + ok = emqx_connector_test_helpers:start_apps(?APPS), + {ok, _} = application:ensure_all_started(emqx_connector), + emqx_mgmt_api_test_util:init_suite(), + UniqueNum = integer_to_binary(erlang:unique_integer()), + MQTTTopic = <<"mqtt/topic/", UniqueNum/binary>>, + [ + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort}, + {mqtt_topic, MQTTTopic} + ]. + +common_end_per_group(Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + delete_all_bridges(), + ok. + +init_per_testcase(TestCase, Config) -> + common_init_per_testcase(TestCase, Config). + +end_per_testcase(_Testcase, Config) -> + case proplists:get_bool(skip_does_not_apply, Config) of + true -> + ok; + false -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + delete_all_bridges(), + stop_consumer(Config), + %% in CI, apparently this needs more time since the + %% machines struggle with all the containers running... + emqx_common_test_helpers:call_janitor(60_000), + ok = snabbkaffe:stop(), + ok + end. + +common_init_per_testcase(TestCase, Config0) -> + ct:timetrap(timer:seconds(60)), + delete_all_bridges(), + UniqueNum = integer_to_binary(erlang:unique_integer()), + PulsarTopic = + << + (atom_to_binary(TestCase))/binary, + UniqueNum/binary + >>, + PulsarType = ?config(pulsar_type, Config0), + Config1 = [{pulsar_topic, PulsarTopic} | Config0], + {Name, ConfigString, PulsarConfig} = pulsar_config( + TestCase, PulsarType, Config1 + ), + ConsumerConfig = start_consumer(TestCase, Config1), + Config = ConsumerConfig ++ Config1, + ok = snabbkaffe:start_trace(), + [ + {pulsar_name, Name}, + {pulsar_config_string, ConfigString}, + {pulsar_config, PulsarConfig} + | Config + ]. + +delete_all_bridges() -> + lists:foreach( + fun(#{name := Name, type := Type}) -> + emqx_bridge:remove(Type, Name) + end, + emqx_bridge:list() + ). + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +pulsar_config(TestCase, _PulsarType, Config) -> + UniqueNum = integer_to_binary(erlang:unique_integer()), + PulsarHost = ?config(pulsar_host, Config), + PulsarPort = ?config(pulsar_port, Config), + PulsarTopic = ?config(pulsar_topic, Config), + AuthType = proplists:get_value(sasl_auth_mechanism, Config, none), + UseTLS = proplists:get_value(use_tls, Config, false), + Name = << + (atom_to_binary(TestCase))/binary, UniqueNum/binary + >>, + MQTTTopic = proplists:get_value(mqtt_topic, Config, <<"mqtt/topic/", UniqueNum/binary>>), + Prefix = + case UseTLS of + true -> <<"pulsar+ssl://">>; + false -> <<"pulsar://">> + end, + ServerURL = iolist_to_binary([ + Prefix, + PulsarHost, + ":", + integer_to_binary(PulsarPort) + ]), + ConfigString = + io_lib:format( + "bridges.pulsar_producer.~s {\n" + " enable = true\n" + " servers = \"~s\"\n" + " sync_timeout = 5s\n" + " compression = no_compression\n" + " send_buffer = 1MB\n" + " retention_period = infinity\n" + " max_batch_bytes = 900KB\n" + " batch_size = 1\n" + " strategy = random\n" + " buffer {\n" + " mode = memory\n" + " per_partition_limit = 10MB\n" + " segment_bytes = 5MB\n" + " memory_overload_protection = true\n" + " }\n" + " message {\n" + " key = \"${.clientid}\"\n" + " value = \"${.}\"\n" + " }\n" + "~s" + " ssl {\n" + " enable = ~p\n" + " verify = verify_none\n" + " server_name_indication = \"auto\"\n" + " }\n" + " pulsar_topic = \"~s\"\n" + " local_topic = \"~s\"\n" + "}\n", + [ + Name, + ServerURL, + authentication(AuthType), + UseTLS, + PulsarTopic, + MQTTTopic + ] + ), + {Name, ConfigString, parse_and_check(ConfigString, Name)}. + +parse_and_check(ConfigString, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + TypeBin = ?BRIDGE_TYPE_BIN, + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{TypeBin := #{Name := Config}}} = RawConf, + Config. + +authentication(_) -> + " authentication = none\n". + +resource_id(Config) -> + Type = ?BRIDGE_TYPE_BIN, + Name = ?config(pulsar_name, Config), + emqx_bridge_resource:resource_id(Type, Name). + +create_bridge(Config) -> + create_bridge(Config, _Overrides = #{}). + +create_bridge(Config, Overrides) -> + Type = ?BRIDGE_TYPE_BIN, + Name = ?config(pulsar_name, Config), + PulsarConfig0 = ?config(pulsar_config, Config), + PulsarConfig = emqx_utils_maps:deep_merge(PulsarConfig0, Overrides), + emqx_bridge:create(Type, Name, PulsarConfig). + +create_bridge_api(Config) -> + create_bridge_api(Config, _Overrides = #{}). + +create_bridge_api(Config, Overrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(pulsar_name, Config), + PulsarConfig0 = ?config(pulsar_config, Config), + PulsarConfig = emqx_utils_maps:deep_merge(PulsarConfig0, Overrides), + Params = PulsarConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("creating bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of + {ok, {Status, Headers, Body0}} -> + {ok, {Status, Headers, emqx_utils_json:decode(Body0, [return_maps])}}; + Error -> + Error + end, + ct:pal("bridge create result: ~p", [Res]), + Res. + +update_bridge_api(Config) -> + update_bridge_api(Config, _Overrides = #{}). + +update_bridge_api(Config, Overrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(pulsar_name, Config), + PulsarConfig0 = ?config(pulsar_config, Config), + PulsarConfig = emqx_utils_maps:deep_merge(PulsarConfig0, Overrides), + BridgeId = emqx_bridge_resource:bridge_id(TypeBin, Name), + Params = PulsarConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("updating bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(put, Path, "", AuthHeader, Params, Opts) of + {ok, {_Status, _Headers, Body0}} -> {ok, emqx_utils_json:decode(Body0, [return_maps])}; + Error -> Error + end, + ct:pal("bridge update result: ~p", [Res]), + Res. + +probe_bridge_api(Config) -> + probe_bridge_api(Config, _Overrides = #{}). + +probe_bridge_api(Config, Overrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(pulsar_name, Config), + PulsarConfig = ?config(pulsar_config, Config), + Params0 = PulsarConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Params = maps:merge(Params0, Overrides), + Path = emqx_mgmt_api_test_util:api_path(["bridges_probe"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("probing bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of + {ok, {{_, 204, _}, _Headers, _Body0} = Res0} -> {ok, Res0}; + Error -> Error + end, + ct:pal("bridge probe result: ~p", [Res]), + Res. + +start_consumer(TestCase, Config) -> + PulsarHost = ?config(pulsar_host, Config), + PulsarPort = ?config(pulsar_port, Config), + PulsarTopic = ?config(pulsar_topic, Config), + UseTLS = ?config(use_tls, Config), + %% FIXME: patch pulsar to accept binary urls... + Scheme = + case UseTLS of + true -> <<"pulsar+ssl://">>; + false -> <<"pulsar://">> + end, + URL = + binary_to_list( + <> + ), + ConnOpts = #{}, + ConsumerClientId = TestCase, + CertsPath = emqx_common_test_helpers:deps_path(emqx, "etc/certs"), + SSLOpts = #{ + enable => UseTLS, + keyfile => filename:join([CertsPath, "key.pem"]), + certfile => filename:join([CertsPath, "cert.pem"]), + cacertfile => filename:join([CertsPath, "cacert.pem"]) + }, + {ok, _ClientPid} = pulsar:ensure_supervised_client( + ConsumerClientId, + [URL], + #{ + conn_opts => ConnOpts, + ssl_opts => emqx_tls_lib:to_client_opts(SSLOpts) + } + ), + ConsumerOpts = #{ + cb_init_args => #{send_to => self()}, + cb_module => pulsar_echo_consumer, + sub_type => 'Shared', + subscription => atom_to_list(TestCase), + max_consumer_num => 1, + %% Note! This must not coincide with the client + %% id, or else weird bugs will happen, like the + %% consumer never starts... + name => test_consumer, + consumer_id => 1, + conn_opts => ConnOpts + }, + {ok, Consumer} = pulsar:ensure_supervised_consumers( + ConsumerClientId, + PulsarTopic, + ConsumerOpts + ), + %% since connection is async, and there's currently no way to + %% specify the subscription initial position as `Earliest', we + %% need to wait until the consumer is connected to avoid + %% flakiness. + ok = wait_until_consumer_connected(Consumer), + [ + {consumer_client_id, ConsumerClientId}, + {pulsar_consumer, Consumer} + ]. + +stop_consumer(Config) -> + ConsumerClientId = ?config(consumer_client_id, Config), + Consumer = ?config(pulsar_consumer, Config), + ok = pulsar:stop_and_delete_supervised_consumers(Consumer), + ok = pulsar:stop_and_delete_supervised_client(ConsumerClientId), + ok. + +wait_until_consumer_connected(Consumer) -> + ?retry( + _Sleep = 300, + _Attempts0 = 20, + true = pulsar_consumers:all_connected(Consumer) + ), + ok. + +wait_until_producer_connected() -> + wait_until_connected(pulsar_producers_sup, pulsar_producer). + +wait_until_connected(SupMod, Mod) -> + Pids = [ + P + || {_Name, SupPid, _Type, _Mods} <- supervisor:which_children(SupMod), + P <- element(2, process_info(SupPid, links)), + case proc_lib:initial_call(P) of + {Mod, init, _} -> true; + _ -> false + end + ], + ?retry( + _Sleep = 300, + _Attempts0 = 20, + lists:foreach(fun(P) -> {connected, _} = sys:get_state(P) end, Pids) + ), + ok. + +create_rule_and_action_http(Config) -> + PulsarName = ?config(pulsar_name, Config), + BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_BIN, PulsarName), + Params = #{ + enable => true, + sql => <<"SELECT * FROM \"", ?RULE_TOPIC, "\"">>, + actions => [BridgeId] + }, + Path = emqx_mgmt_api_test_util:api_path(["rules"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + ct:pal("rule action params: ~p", [Params]), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + Error -> Error + end. + +receive_consumed(Timeout) -> + receive + {pulsar_message, #{payloads := Payloads}} -> + lists:map(fun try_decode_json/1, Payloads) + after Timeout -> + ct:pal("mailbox: ~p", [process_info(self(), messages)]), + ct:fail("no message consumed") + end. + +try_decode_json(Payload) -> + case emqx_utils_json:safe_decode(Payload, [return_maps]) of + {error, _} -> + Payload; + {ok, JSON} -> + JSON + end. + +cluster(Config) -> + PrivDataDir = ?config(priv_dir, Config), + PeerModule = + case os:getenv("IS_CI") of + false -> + slave; + _ -> + ct_slave + end, + Cluster = emqx_common_test_helpers:emqx_cluster( + [core, core], + [ + {apps, [emqx_conf, emqx_bridge, emqx_rule_engine, emqx_bridge_pulsar]}, + {listener_ports, []}, + {peer_mod, PeerModule}, + {priv_data_dir, PrivDataDir}, + {load_schema, true}, + {start_autocluster, true}, + {schema_mod, emqx_ee_conf_schema}, + {env_handler, fun + (emqx) -> + application:set_env(emqx, boot_modules, [broker, router]), + ok; + (emqx_conf) -> + ok; + (_) -> + ok + end} + ] + ), + ct:pal("cluster: ~p", [Cluster]), + Cluster. + +start_cluster(Cluster) -> + Nodes = + [ + emqx_common_test_helpers:start_slave(Name, Opts) + || {Name, Opts} <- Cluster + ], + on_exit(fun() -> + emqx_utils:pmap( + fun(N) -> + ct:pal("stopping ~p", [N]), + ok = emqx_common_test_helpers:stop_slave(N) + end, + Nodes + ) + end), + Nodes. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_start_and_produce_ok(Config) -> + MQTTTopic = ?config(mqtt_topic, Config), + ResourceId = resource_id(Config), + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + QoS = 0, + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + ?check_trace( + begin + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), + on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), + %% Publish using local topic. + Message0 = emqx_message:make(ClientId, QoS, MQTTTopic, Payload), + emqx:publish(Message0), + %% Publish using rule engine. + Message1 = emqx_message:make(ClientId, QoS, ?RULE_TOPIC_BIN, Payload), + emqx:publish(Message1), + + #{rule_id => RuleId} + end, + fun(#{rule_id := RuleId}, _Trace) -> + Data0 = receive_consumed(5_000), + ?assertMatch( + [ + #{ + <<"clientid">> := ClientId, + <<"event">> := <<"message.publish">>, + <<"payload">> := Payload, + <<"topic">> := MQTTTopic + } + ], + Data0 + ), + Data1 = receive_consumed(5_000), + ?assertMatch( + [ + #{ + <<"clientid">> := ClientId, + <<"event">> := <<"message.publish">>, + <<"payload">> := Payload, + <<"topic">> := ?RULE_TOPIC_BIN + } + ], + Data1 + ), + ?retry( + _Sleep = 100, + _Attempts0 = 20, + begin + ?assertMatch( + #{ + counters := #{ + dropped := 0, + failed := 0, + late_reply := 0, + matched := 2, + received := 0, + retried := 0, + success := 2 + } + }, + emqx_resource_manager:get_metrics(ResourceId) + ), + ?assertEqual( + 1, emqx_metrics_worker:get(rule_metrics, RuleId, 'actions.success') + ), + ?assertEqual( + 0, emqx_metrics_worker:get(rule_metrics, RuleId, 'actions.failed') + ), + ok + end + ), + ok + end + ), + ok. + +%% Under normal operations, the bridge will be called async via +%% `simple_async_query'. +t_sync_query(Config) -> + ResourceId = resource_id(Config), + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + Message = {send_message, #{payload => Payload}}, + ?assertMatch( + {ok, #{sequence_id := _}}, emqx_resource:simple_sync_query(ResourceId, Message) + ), + ok + end, + [] + ), + ok. + +t_create_via_http(Config) -> + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + + %% lightweight matrix testing some configs + ?assertMatch( + {ok, _}, + update_bridge_api( + Config, + #{ + <<"buffer">> => + #{<<"mode">> => <<"disk">>} + } + ) + ), + ?assertMatch( + {ok, _}, + update_bridge_api( + Config, + #{ + <<"buffer">> => + #{ + <<"mode">> => <<"hybrid">>, + <<"memory_overload_protection">> => true + } + } + ) + ), + ok + end, + [] + ), + ok. + +t_start_stop(Config) -> + PulsarName = ?config(pulsar_name, Config), + ResourceId = resource_id(Config), + ?check_trace( + begin + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + %% Since the connection process is async, we give it some time to + %% stabilize and avoid flakiness. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + + %% Check that the bridge probe API doesn't leak atoms. + redbug:start( + [ + "emqx_resource_manager:health_check_interval -> return", + "emqx_resource_manager:with_health_check -> return" + ], + [{msgs, 100}, {time, 30_000}] + ), + ProbeRes0 = probe_bridge_api( + Config, + #{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}} + ), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0), + AtomsBefore = erlang:system_info(atom_count), + %% Probe again; shouldn't have created more atoms. + ProbeRes1 = probe_bridge_api( + Config, + #{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}} + ), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1), + AtomsAfter = erlang:system_info(atom_count), + ?assertEqual(AtomsBefore, AtomsAfter), + + %% Now stop the bridge. + ?assertMatch( + {{ok, _}, {ok, _}}, + ?wait_async_action( + emqx_bridge:disable_enable(disable, ?BRIDGE_TYPE_BIN, PulsarName), + #{?snk_kind := pulsar_bridge_stopped}, + 5_000 + ) + ), + + ok + end, + fun(Trace) -> + %% one for each probe, one for real + ?assertMatch([_, _, _], ?of_kind(pulsar_bridge_producer_stopped, Trace)), + ?assertMatch([_, _, _], ?of_kind(pulsar_bridge_client_stopped, Trace)), + ?assertMatch([_, _, _], ?of_kind(pulsar_bridge_stopped, Trace)), + ok + end + ), + ok. + +t_on_get_status(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + ResourceId = resource_id(Config), + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + %% Since the connection process is async, we give it some time to + %% stabilize and avoid flakiness. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ct:sleep(500), + ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)) + end), + %% Check that it recovers itself. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + ok. + +t_start_when_down(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + ResourceId = resource_id(Config), + ?check_trace( + begin + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + ok + end), + %% Should recover given enough time. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + ok + end, + [] + ), + ok. + +t_send_when_down(Config) -> + do_t_send_with_failure(Config, down). + +t_send_when_timeout(Config) -> + do_t_send_with_failure(Config, timeout). + +do_t_send_with_failure(Config, FailureType) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + MQTTTopic = ?config(mqtt_topic, Config), + QoS = 0, + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + Message0 = emqx_message:make(ClientId, QoS, MQTTTopic, Payload), + + {{ok, _}, {ok, _}} = + ?wait_async_action( + create_bridge(Config), + #{?snk_kind := pulsar_producer_bridge_started}, + 10_000 + ), + ?check_trace( + begin + emqx_common_test_helpers:with_failure( + FailureType, ProxyName, ProxyHost, ProxyPort, fun() -> + {_, {ok, _}} = + ?wait_async_action( + emqx:publish(Message0), + #{ + ?snk_kind := pulsar_producer_on_query_async, + ?snk_span := {complete, _} + }, + 5_000 + ), + ok + end + ), + ok + end, + fun(_Trace) -> + %% Should recover given enough time. + Data0 = receive_consumed(20_000), + ?assertMatch( + [ + #{ + <<"clientid">> := ClientId, + <<"event">> := <<"message.publish">>, + <<"payload">> := Payload, + <<"topic">> := MQTTTopic + } + ], + Data0 + ), + ok + end + ), + ok. + +%% Check that we correctly terminate the pulsar client when the pulsar +%% producer processes fail to start for whatever reason. +t_failure_to_start_producer(Config) -> + ?check_trace( + begin + ?force_ordering( + #{?snk_kind := name_registered}, + #{?snk_kind := pulsar_producer_about_to_start_producers} + ), + spawn_link(fun() -> + ?tp(will_register_name, #{}), + {ok, #{producer_name := ProducerName}} = ?block_until( + #{?snk_kind := pulsar_producer_capture_name}, 10_000 + ), + true = register(ProducerName, self()), + ?tp(name_registered, #{name => ProducerName}), + %% Just simulating another process so that starting the + %% producers fail. Currently it does a gen_server:call + %% with `infinity' timeout, so this is just to avoid + %% hanging. + receive + {'$gen_call', From, _Request} -> + gen_server:reply(From, {error, im_not, your_producer}) + end, + receive + die -> ok + end + end), + {{ok, _}, {ok, _}} = + ?wait_async_action( + create_bridge(Config), + #{?snk_kind := pulsar_bridge_client_stopped}, + 20_000 + ), + ok + end, + [] + ), + ok. + +%% Check the driver recovers itself if one of the producer processes +%% die for whatever reason. +t_producer_process_crash(Config) -> + MQTTTopic = ?config(mqtt_topic, Config), + ResourceId = resource_id(Config), + QoS = 0, + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + Message0 = emqx_message:make(ClientId, QoS, MQTTTopic, Payload), + ?check_trace( + begin + {{ok, _}, {ok, _}} = + ?wait_async_action( + create_bridge( + Config, + #{<<"buffer">> => #{<<"mode">> => <<"disk">>}} + ), + #{?snk_kind := pulsar_producer_bridge_started}, + 10_000 + ), + [ProducerPid | _] = [ + Pid + || {_Name, PS, _Type, _Mods} <- supervisor:which_children(pulsar_producers_sup), + Pid <- element(2, process_info(PS, links)), + case proc_lib:initial_call(Pid) of + {pulsar_producer, init, _} -> true; + _ -> false + end + ], + Ref = monitor(process, ProducerPid), + exit(ProducerPid, kill), + receive + {'DOWN', Ref, process, ProducerPid, _Killed} -> + ok + after 1_000 -> ct:fail("pid didn't die") + end, + ?assertEqual({ok, connecting}, emqx_resource_manager:health_check(ResourceId)), + %% Should recover given enough time. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + {_, {ok, _}} = + ?wait_async_action( + emqx:publish(Message0), + #{?snk_kind := pulsar_producer_on_query_async, ?snk_span := {complete, _}}, + 5_000 + ), + Data0 = receive_consumed(20_000), + ?assertMatch( + [ + #{ + <<"clientid">> := ClientId, + <<"event">> := <<"message.publish">>, + <<"payload">> := Payload, + <<"topic">> := MQTTTopic + } + ], + Data0 + ), + ok + end, + [] + ), + ok. + +t_cluster(Config) -> + MQTTTopic = ?config(mqtt_topic, Config), + ResourceId = resource_id(Config), + Cluster = cluster(Config), + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + QoS = 0, + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + ?check_trace( + begin + Nodes = [N1, N2 | _] = start_cluster(Cluster), + {ok, SRef0} = snabbkaffe:subscribe( + ?match_event(#{?snk_kind := pulsar_producer_bridge_started}), + length(Nodes), + 15_000 + ), + {ok, _} = erpc:call(N1, fun() -> create_bridge(Config) end), + {ok, _} = snabbkaffe:receive_events(SRef0), + lists:foreach( + fun(N) -> + ?retry( + _Sleep = 1_000, + _Attempts0 = 20, + ?assertEqual( + {ok, connected}, + erpc:call(N, emqx_resource_manager, health_check, [ResourceId]), + #{node => N} + ) + ) + end, + Nodes + ), + erpc:multicall(Nodes, fun wait_until_producer_connected/0), + Message0 = emqx_message:make(ClientId, QoS, MQTTTopic, Payload), + erpc:call(N2, emqx, publish, [Message0]), + + lists:foreach( + fun(N) -> + ?assertEqual( + {ok, connected}, + erpc:call(N, emqx_resource_manager, health_check, [ResourceId]), + #{node => N} + ) + end, + Nodes + ), + + ok + end, + fun(_Trace) -> + Data0 = receive_consumed(10_000), + ?assertMatch( + [ + #{ + <<"clientid">> := ClientId, + <<"event">> := <<"message.publish">>, + <<"payload">> := Payload, + <<"topic">> := MQTTTopic + } + ], + Data0 + ), + ok + end + ), + ok. diff --git a/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE_data/pulsar_echo_consumer.erl b/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE_data/pulsar_echo_consumer.erl new file mode 100644 index 000000000..834978851 --- /dev/null +++ b/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE_data/pulsar_echo_consumer.erl @@ -0,0 +1,25 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(pulsar_echo_consumer). + +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +%% pulsar consumer API +-export([init/2, handle_message/3]). + +init(Topic, Args) -> + ct:pal("consumer init: ~p", [#{topic => Topic, args => Args}]), + SendTo = maps:get(send_to, Args), + ?tp(pulsar_echo_consumer_init, #{topic => Topic}), + {ok, #{topic => Topic, send_to => SendTo}}. + +handle_message(Message, Payloads, State) -> + #{send_to := SendTo, topic := Topic} = State, + ct:pal( + "pulsar consumer received:\n ~p", + [#{message => Message, payloads => Payloads}] + ), + SendTo ! {pulsar_message, #{topic => Topic, message => Message, payloads => Payloads}}, + ?tp(pulsar_echo_consumer_message, #{topic => Topic, message => Message, payloads => Payloads}), + {ok, 'Individual', State}. diff --git a/apps/emqx_bridge_redis/BSL.txt b/apps/emqx_bridge_redis/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_redis/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_redis/README.md b/apps/emqx_bridge_redis/README.md new file mode 100644 index 000000000..73ec41f07 --- /dev/null +++ b/apps/emqx_bridge_redis/README.md @@ -0,0 +1,37 @@ +# EMQX Redis Bridge + +[Redis](https://github.com/redis/redis) is an in-memory data structure store, +used as a distributed, in-memory key–value database, cache and message broker, +with optional durability. + +The application is used to connect EMQX and Redis. +User can create a rule and easily ingest IoT data into Redis by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documentation + +- Refer to [Ingest data into Redis](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-redis.html) + for how to use EMQX dashboard to ingest IoT data into Redis. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_redis/src/emqx_bridge_redis.app.src b/apps/emqx_bridge_redis/src/emqx_bridge_redis.app.src new file mode 100644 index 000000000..6b57c6cd7 --- /dev/null +++ b/apps/emqx_bridge_redis/src/emqx_bridge_redis.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_redis, [ + {description, "EMQX Enterprise Redis Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_rocketmq/BSL.txt b/apps/emqx_bridge_rocketmq/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_rocketmq/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_rocketmq/README.md b/apps/emqx_bridge_rocketmq/README.md new file mode 100644 index 000000000..252e6beac --- /dev/null +++ b/apps/emqx_bridge_rocketmq/README.md @@ -0,0 +1,37 @@ +# EMQX RocketMQ Bridge + +[RocketMQ](https://github.com/apache/rocketmq) is a distributed messaging and +streaming platform developed by the Apache Software Foundation. +It provides reliable, scalable, and high-throughput messaging services for modern cloud-native applications + +The application is used to connect EMQX and RocketMQ. +User can create a rule and easily ingest IoT data into RocketMQ by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documentation + +- Refer to [Ingest data into RocketMQ](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-rocketmq.html) + for how to use EMQX dashboard to ingest IoT data into RocketMQ. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.app.src b/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.app.src new file mode 100644 index 000000000..e1916034c --- /dev/null +++ b/apps/emqx_bridge_rocketmq/src/emqx_bridge_rocketmq.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_rocketmq, [ + {description, "EMQX Enterprise RocketMQ Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_sqlserver/BSL.txt b/apps/emqx_bridge_sqlserver/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_sqlserver/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_sqlserver/README.md b/apps/emqx_bridge_sqlserver/README.md new file mode 100644 index 000000000..ccb1267d8 --- /dev/null +++ b/apps/emqx_bridge_sqlserver/README.md @@ -0,0 +1,36 @@ +# EMQX SQL Server Bridge + +[Microsoft SQL Server](https://www.microsoft.com/en-us/sql-server) is a relational database management system (RDBMS) that is developed and owned by Microsoft. +Microsoft SQL Server offers a wide range of features, including support for high availability and disaster recovery, +integration with other Microsoft products and services, and advanced security and encryption options. +It also provides tools for data warehousing, business intelligence, and analytics, making it a versatile and powerful database platform. + +The application is used to connect EMQX and Microsoft SQL Server. +User can create a rule and easily ingest IoT data into Microsoft SQL Server by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +## Documentation links + +For more information about Microsoft SQL Server, please see the [official site](https://learn.microsoft.com/en-us/sql/sql-server/?view=sql-server-ver16) + +# Configurations + +Please see [Ingest data into SQL Server](https://www.emqx.io/docs/en/v5.0/data-integration/data-bridge-sqlserver.html) for more detailed information. + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_sqlserver/docker-ct b/apps/emqx_bridge_sqlserver/docker-ct new file mode 100644 index 000000000..6f046e2df --- /dev/null +++ b/apps/emqx_bridge_sqlserver/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +sqlserver diff --git a/apps/emqx_bridge_sqlserver/include/emqx_bridge_sqlserver.hrl b/apps/emqx_bridge_sqlserver/include/emqx_bridge_sqlserver.hrl new file mode 100644 index 000000000..3aa78fdd8 --- /dev/null +++ b/apps/emqx_bridge_sqlserver/include/emqx_bridge_sqlserver.hrl @@ -0,0 +1,5 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-define(SQLSERVER_DEFAULT_PORT, 1433). diff --git a/apps/emqx_bridge_sqlserver/rebar.config b/apps/emqx_bridge_sqlserver/rebar.config new file mode 100644 index 000000000..5f586f529 --- /dev/null +++ b/apps/emqx_bridge_sqlserver/rebar.config @@ -0,0 +1,10 @@ +%% -*- mode: erlang; -*- +{erl_opts, [debug_info]}. +{deps, [ {emqx_connector, {path, "../../apps/emqx_connector"}} + , {emqx_resource, {path, "../../apps/emqx_resource"}} + , {emqx_bridge, {path, "../../apps/emqx_bridge"}} + ]}. + +{shell, [ + {apps, [emqx_bridge_sqlserver]} +]}. diff --git a/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver.app.src b/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver.app.src new file mode 100644 index 000000000..a0b4e287b --- /dev/null +++ b/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_sqlserver, [ + {description, "EMQX Enterprise SQL Server Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib, odbc]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_sqlserver.erl b/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver.erl similarity index 97% rename from lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_sqlserver.erl rename to apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver.erl index 49db815a6..8a97cb2ad 100644 --- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_sqlserver.erl +++ b/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver.erl @@ -1,7 +1,7 @@ %%-------------------------------------------------------------------- %% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_ee_bridge_sqlserver). +-module(emqx_bridge_sqlserver). -include_lib("typerefl/include/types.hrl"). -include_lib("hocon/include/hoconsc.hrl"). @@ -96,7 +96,7 @@ fields("config") -> } )} ] ++ - (emqx_ee_connector_sqlserver:fields(config) -- + (emqx_bridge_sqlserver_connector:fields(config) -- emqx_connector_schema_lib:prepare_statement_fields()); fields("creation_opts") -> emqx_resource_schema:fields("creation_opts"); diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_sqlserver.erl b/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver_connector.erl similarity index 90% rename from lib-ee/emqx_ee_connector/src/emqx_ee_connector_sqlserver.erl rename to apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver_connector.erl index 97a46152d..ed8134051 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_sqlserver.erl +++ b/apps/emqx_bridge_sqlserver/src/emqx_bridge_sqlserver_connector.erl @@ -2,14 +2,15 @@ %% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_ee_connector_sqlserver). +-module(emqx_bridge_sqlserver_connector). -behaviour(emqx_resource). +-include("emqx_bridge_sqlserver.hrl"). + -include_lib("kernel/include/file.hrl"). -include_lib("emqx/include/logger.hrl"). -include_lib("emqx_resource/include/emqx_resource.hrl"). --include_lib("emqx_ee_connector/include/emqx_ee_connector.hrl"). -include_lib("typerefl/include/types.hrl"). -include_lib("hocon/include/hoconsc.hrl"). @@ -51,7 +52,7 @@ -define(SYNC_QUERY_MODE, handover). -define(SQLSERVER_HOST_OPTIONS, #{ - default_port => 1433 + default_port => ?SQLSERVER_DEFAULT_PORT }). -define(REQUEST_TIMEOUT(RESOURCE_OPTS), @@ -123,7 +124,7 @@ %% -type size() :: integer(). -type state() :: #{ - poolname := binary(), + pool_name := binary(), resource_opts := map(), sql_templates := map() }. @@ -171,7 +172,7 @@ callback_mode() -> always_sync. is_buffer_supported() -> false. on_start( - InstanceId = PoolName, + ResourceId = PoolName, #{ server := Server, username := Username, @@ -184,7 +185,7 @@ on_start( ) -> ?SLOG(info, #{ msg => "starting_sqlserver_connector", - connector => InstanceId, + connector => ResourceId, config => emqx_utils:redact(Config) }), @@ -205,17 +206,16 @@ on_start( {password, Password}, {driver, Driver}, {database, Database}, - {pool_size, PoolSize}, - {poolname, PoolName} + {pool_size, PoolSize} ], State = #{ - %% also InstanceId - poolname => PoolName, + %% also ResourceId + pool_name => PoolName, sql_templates => parse_sql_template(Config), resource_opts => ResourceOpts }, - case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options) of + case emqx_resource_pool:start(PoolName, ?MODULE, Options) of ok -> {ok, State}; {error, Reason} -> @@ -226,15 +226,15 @@ on_start( {error, Reason} end. -on_stop(InstanceId, #{poolname := PoolName} = _State) -> +on_stop(ResourceId, _State) -> ?SLOG(info, #{ msg => "stopping_sqlserver_connector", - connector => InstanceId + connector => ResourceId }), - emqx_plugin_libs_pool:stop_pool(PoolName). + emqx_resource_pool:stop(ResourceId). -spec on_query( - manager_id(), + resource_id(), {?ACTION_SEND_MESSAGE, map()}, state() ) -> @@ -242,16 +242,16 @@ on_stop(InstanceId, #{poolname := PoolName} = _State) -> | {ok, list()} | {error, {recoverable_error, term()}} | {error, term()}. -on_query(InstanceId, {?ACTION_SEND_MESSAGE, _Msg} = Query, State) -> +on_query(ResourceId, {?ACTION_SEND_MESSAGE, _Msg} = Query, State) -> ?TRACE( "SINGLE_QUERY_SYNC", "bridge_sqlserver_received", - #{requests => Query, connector => InstanceId, state => State} + #{requests => Query, connector => ResourceId, state => State} ), - do_query(InstanceId, Query, ?SYNC_QUERY_MODE, State). + do_query(ResourceId, Query, ?SYNC_QUERY_MODE, State). -spec on_batch_query( - manager_id(), + resource_id(), [{?ACTION_SEND_MESSAGE, map()}], state() ) -> @@ -259,17 +259,18 @@ on_query(InstanceId, {?ACTION_SEND_MESSAGE, _Msg} = Query, State) -> | {ok, list()} | {error, {recoverable_error, term()}} | {error, term()}. -on_batch_query(InstanceId, BatchRequests, State) -> +on_batch_query(ResourceId, BatchRequests, State) -> ?TRACE( "BATCH_QUERY_SYNC", "bridge_sqlserver_received", - #{requests => BatchRequests, connector => InstanceId, state => State} + #{requests => BatchRequests, connector => ResourceId, state => State} ), - do_query(InstanceId, BatchRequests, ?SYNC_QUERY_MODE, State). + do_query(ResourceId, BatchRequests, ?SYNC_QUERY_MODE, State). -on_get_status(_InstanceId, #{poolname := Pool} = _State) -> - Health = emqx_plugin_libs_pool:health_check_ecpool_workers( - Pool, {?MODULE, do_get_status, []} +on_get_status(_InstanceId, #{pool_name := PoolName} = _State) -> + Health = emqx_resource_pool:health_check_workers( + PoolName, + {?MODULE, do_get_status, []} ), status_result(Health). @@ -315,7 +316,7 @@ conn_str([], Acc) -> conn_str([{driver, Driver} | Opts], Acc) -> conn_str(Opts, ["Driver=" ++ str(Driver) | Acc]); conn_str([{server, Server} | Opts], Acc) -> - {Host, Port} = emqx_schema:parse_server(Server, ?SQLSERVER_HOST_OPTIONS), + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, ?SQLSERVER_HOST_OPTIONS), conn_str(Opts, ["Server=" ++ str(Host) ++ "," ++ str(Port) | Acc]); conn_str([{database, Database} | Opts], Acc) -> conn_str(Opts, ["Database=" ++ str(Database) | Acc]); @@ -328,7 +329,7 @@ conn_str([{_, _} | Opts], Acc) -> %% Query with singe & batch sql statement -spec do_query( - manager_id(), + resource_id(), Query :: {?ACTION_SEND_MESSAGE, map()} | [{?ACTION_SEND_MESSAGE, map()}], ApplyMode :: handover, state() @@ -337,15 +338,15 @@ conn_str([{_, _} | Opts], Acc) -> | {error, {recoverable_error, term()}} | {error, term()}. do_query( - InstanceId, + ResourceId, Query, ApplyMode, - #{poolname := PoolName, sql_templates := Templates} = State + #{pool_name := PoolName, sql_templates := Templates} = State ) -> ?TRACE( "SINGLE_QUERY_SYNC", "sqlserver_connector_received", - #{query => Query, connector => InstanceId, state => State} + #{query => Query, connector => ResourceId, state => State} ), %% only insert sql statement for single query and batch query @@ -369,7 +370,7 @@ do_query( ), ?SLOG(error, #{ msg => "sqlserver_connector_do_query_failed", - connector => InstanceId, + connector => ResourceId, query => Query, reason => Reason }), @@ -383,9 +384,9 @@ do_query( end. worker_do_insert( - Conn, SQL, #{resource_opts := ResourceOpts, poolname := InstanceId} = State + Conn, SQL, #{resource_opts := ResourceOpts, pool_name := ResourceId} = State ) -> - LogMeta = #{connector => InstanceId, state => State}, + LogMeta = #{connector => ResourceId, state => State}, try case execute(Conn, SQL, ?REQUEST_TIMEOUT(ResourceOpts)) of {selected, Rows, _} -> diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_sqlserver_SUITE.erl b/apps/emqx_bridge_sqlserver/test/emqx_bridge_sqlserver_SUITE.erl similarity index 96% rename from lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_sqlserver_SUITE.erl rename to apps/emqx_bridge_sqlserver/test/emqx_bridge_sqlserver_SUITE.erl index 68bf7a057..fcf20da8f 100644 --- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_sqlserver_SUITE.erl +++ b/apps/emqx_bridge_sqlserver/test/emqx_bridge_sqlserver_SUITE.erl @@ -2,11 +2,12 @@ % Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. %%-------------------------------------------------------------------- --module(emqx_ee_bridge_sqlserver_SUITE). +-module(emqx_bridge_sqlserver_SUITE). -compile(nowarn_export_all). -compile(export_all). +-include("emqx_bridge_sqlserver/include/emqx_bridge_sqlserver.hrl"). -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). @@ -59,24 +60,30 @@ %% How to run it locally (all commands are run in $PROJ_ROOT dir): %% A: run ct on host %% 1. Start all deps services +%% ```bash %% sudo docker compose -f .ci/docker-compose-file/docker-compose.yaml \ %% -f .ci/docker-compose-file/docker-compose-sqlserver.yaml \ %% -f .ci/docker-compose-file/docker-compose-toxiproxy.yaml \ %% up --build +%% ``` %% %% 2. Run use cases with special environment variables %% 11433 is toxiproxy exported port. %% Local: -%% ``` +%% ```bash %% SQLSERVER_HOST=toxiproxy SQLSERVER_PORT=11433 \ %% PROXY_HOST=toxiproxy PROXY_PORT=1433 \ -%% ./rebar3 as test ct -c -v --readable true --name ct@127.0.0.1 --suite lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_sqlserver_SUITE.erl +%% ./rebar3 as test ct -c -v --readable true --name ct@127.0.0.1 \ +%% --suite apps/emqx_bridge_sqlserver/test/emqx_bridge_sqlserver_SUITE.erl %% ``` %% %% B: run ct in docker container %% run script: -%% ./scripts/ct/run.sh --ci --app lib-ee/emqx_ee_bridge/ \ -%% -- --name 'test@127.0.0.1' -c -v --readable true --suite lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_sqlserver_SUITE.erl +%% ```bash +%% ./scripts/ct/run.sh --ci --app apps/emqx_bridge_sqlserver/ -- \ +%% --name 'test@127.0.0.1' -c -v --readable true \ +%% --suite apps/emqx_bridge_sqlserver/test/emqx_bridge_sqlserver_SUITE.erl +%% ```` %%------------------------------------------------------------------------------ %% CT boilerplate @@ -391,7 +398,7 @@ t_bad_parameter(Config) -> common_init(ConfigT) -> Host = os:getenv("SQLSERVER_HOST", "toxiproxy"), - Port = list_to_integer(os:getenv("SQLSERVER_PORT", "1433")), + Port = list_to_integer(os:getenv("SQLSERVER_PORT", str(?SQLSERVER_DEFAULT_PORT))), Config0 = [ {sqlserver_host, Host}, @@ -631,7 +638,7 @@ conn_str([], Acc) -> conn_str([{driver, Driver} | Opts], Acc) -> conn_str(Opts, ["Driver=" ++ str(Driver) | Acc]); conn_str([{host, Host} | Opts], Acc) -> - Port = proplists:get_value(port, Opts, "1433"), + Port = proplists:get_value(port, Opts, str(?SQLSERVER_DEFAULT_PORT)), NOpts = proplists:delete(port, Opts), conn_str(NOpts, ["Server=" ++ str(Host) ++ "," ++ str(Port) | Acc]); conn_str([{port, Port} | Opts], Acc) -> diff --git a/apps/emqx_bridge_tdengine/BSL.txt b/apps/emqx_bridge_tdengine/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_tdengine/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_tdengine/README.md b/apps/emqx_bridge_tdengine/README.md new file mode 100644 index 000000000..25faf4c14 --- /dev/null +++ b/apps/emqx_bridge_tdengine/README.md @@ -0,0 +1,39 @@ +# EMQX TDEngine Bridge + +[TDEngine](https://github.com/taosdata/TDengine) is an open-source, cloud-native +time series database (TSDB) optimized for Internet of Things (IoT), Connected Cars, +and Industrial IoT. +It enables efficient, real-time ingestion, processing, and monitoring of petabytes +of data per day, generated by billions of sensors and data collectors. + +The application is used to connect EMQX and TDEngine. +User can create a rule and easily ingest IoT data into TDEngine by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documentation + +- Refer to [Ingest data into TDEngine](https://docs.emqx.com/en/enterprise/v5.0/data-integration/data-bridge-tdengine.html) + for how to use EMQX dashboard to ingest IoT data into TDEngine. + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine.app.src b/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine.app.src new file mode 100644 index 000000000..05e8a6f9f --- /dev/null +++ b/apps/emqx_bridge_tdengine/src/emqx_bridge_tdengine.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_tdengine, [ + {description, "EMQX Enterprise TDEngine Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_bridge_timescale/BSL.txt b/apps/emqx_bridge_timescale/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_timescale/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_timescale/README.md b/apps/emqx_bridge_timescale/README.md new file mode 100644 index 000000000..071cb0fa6 --- /dev/null +++ b/apps/emqx_bridge_timescale/README.md @@ -0,0 +1,40 @@ +# EMQX TimescaleDB Bridge + +[TimescaleDB](https://github.com/timescaleDB/timescaleDB) is an open-source database +designed to make SQL scalable for time-series data. +It is engineered up from PostgreSQL and packaged as a PostgreSQL extension, +providing automatic partitioning across time and space (partitioning key), as well as full SQL support. + +The application is used to connect EMQX and TimescaleDB. +User can create a rule and easily ingest IoT data into TimescaleDB by leveraging +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) + for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.app.src b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.app.src new file mode 100644 index 000000000..5b4431f73 --- /dev/null +++ b/apps/emqx_bridge_timescale/src/emqx_bridge_timescale.app.src @@ -0,0 +1,9 @@ +{application, emqx_bridge_timescale, [ + {description, "EMQX Enterprise TimescaleDB Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [kernel, stdlib]}, + {env, []}, + {modules, []}, + {links, []} +]}. diff --git a/apps/emqx_conf/README.md b/apps/emqx_conf/README.md new file mode 100644 index 000000000..f1efe7987 --- /dev/null +++ b/apps/emqx_conf/README.md @@ -0,0 +1,15 @@ +# Configuration Management + +This application provides configuration management capabilities for EMQX. + +At compile time it reads all configuration schemas and generates the following files: + * `config-en.md`: documentation for all configuration options. + * `schema-en.json`: JSON description of all configuration schema options. + * `emqx.conf.example`: an example of a complete configuration file. + +At runtime, it provides: +- Cluster configuration synchronization capability. + Responsible for synchronizing hot-update configurations from the HTTP API to the entire cluster + and ensuring consistency. + +In addition, this application manages system-level configurations such as `cluster`, `node`, `log`. diff --git a/apps/emqx_conf/src/emqx_conf.app.src b/apps/emqx_conf/src/emqx_conf.app.src index 234690374..e6c3d9cd9 100644 --- a/apps/emqx_conf/src/emqx_conf.app.src +++ b/apps/emqx_conf/src/emqx_conf.app.src @@ -1,6 +1,6 @@ {application, emqx_conf, [ {description, "EMQX configuration management"}, - {vsn, "0.1.17"}, + {vsn, "0.1.19"}, {registered, []}, {mod, {emqx_conf_app, []}}, {applications, [kernel, stdlib, emqx_ctl]}, diff --git a/apps/emqx_conf/src/emqx_conf.erl b/apps/emqx_conf/src/emqx_conf.erl index 28fcaf624..cc56d5e46 100644 --- a/apps/emqx_conf/src/emqx_conf.erl +++ b/apps/emqx_conf/src/emqx_conf.erl @@ -27,9 +27,16 @@ -export([remove/2, remove/3]). -export([tombstone/2]). -export([reset/2, reset/3]). --export([dump_schema/1, dump_schema/3]). +-export([dump_schema/2]). -export([schema_module/0]). --export([gen_example_conf/4]). +-export([gen_example_conf/2]). + +%% TODO: move to emqx_dashboard when we stop building api schema at build time +-export([ + hotconf_schema_json/0, + bridge_schema_json/0, + hocon_schema_to_spec/2 +]). %% for rpc -export([get_node_and_config/1]). @@ -142,24 +149,21 @@ reset(Node, KeyPath, Opts) -> emqx_conf_proto_v2:reset(Node, KeyPath, Opts). %% @doc Called from build script. --spec dump_schema(file:name_all()) -> ok. -dump_schema(Dir) -> - I18nFile = emqx_dashboard:i18n_file(), - dump_schema(Dir, emqx_conf_schema, I18nFile). - -dump_schema(Dir, SchemaModule, I18nFile) -> +%% TODO: move to a external escript after all refactoring is done +dump_schema(Dir, SchemaModule) -> + _ = application:load(emqx_dashboard), + ok = emqx_dashboard_desc_cache:init(), lists:foreach( fun(Lang) -> - gen_config_md(Dir, I18nFile, SchemaModule, Lang), - gen_api_schema_json(Dir, I18nFile, Lang), - gen_example_conf(Dir, I18nFile, SchemaModule, Lang), - gen_schema_json(Dir, I18nFile, SchemaModule, Lang) + ok = gen_config_md(Dir, SchemaModule, Lang), + ok = gen_schema_json(Dir, SchemaModule, Lang) end, ["en", "zh"] - ). + ), + ok = gen_example_conf(Dir, SchemaModule). %% for scripts/spellcheck. -gen_schema_json(Dir, I18nFile, SchemaModule, Lang) -> +gen_schema_json(Dir, SchemaModule, Lang) -> SchemaJsonFile = filename:join([Dir, "schema-" ++ Lang ++ ".json"]), io:format(user, "===< Generating: ~s~n", [SchemaJsonFile]), %% EMQX_SCHEMA_FULL_DUMP is quite a hidden API @@ -170,40 +174,36 @@ gen_schema_json(Dir, I18nFile, SchemaModule, Lang) -> false -> ?IMPORTANCE_LOW end, io:format(user, "===< Including fields from importance level: ~p~n", [IncludeImportance]), - Opts = #{desc_file => I18nFile, lang => Lang, include_importance_up_from => IncludeImportance}, + Opts = #{ + include_importance_up_from => IncludeImportance, + desc_resolver => make_desc_resolver(Lang) + }, JsonMap = hocon_schema_json:gen(SchemaModule, Opts), IoData = emqx_utils_json:encode(JsonMap, [pretty, force_utf8]), ok = file:write_file(SchemaJsonFile, IoData). -gen_api_schema_json(Dir, I18nFile, Lang) -> - emqx_dashboard:init_i18n(I18nFile, list_to_binary(Lang)), - gen_api_schema_json_hotconf(Dir, Lang), - gen_api_schema_json_bridge(Dir, Lang), - emqx_dashboard:clear_i18n(). - -gen_api_schema_json_hotconf(Dir, Lang) -> +%% TODO: move this function to emqx_dashboard when we stop generating this JSON at build time. +hotconf_schema_json() -> SchemaInfo = #{title => <<"EMQX Hot Conf API Schema">>, version => <<"0.1.0">>}, - File = schema_filename(Dir, "hot-config-schema-", Lang), - ok = do_gen_api_schema_json(File, emqx_mgmt_api_configs, SchemaInfo). + gen_api_schema_json_iodata(emqx_mgmt_api_configs, SchemaInfo). -gen_api_schema_json_bridge(Dir, Lang) -> +%% TODO: move this function to emqx_dashboard when we stop generating this JSON at build time. +bridge_schema_json() -> SchemaInfo = #{title => <<"EMQX Data Bridge API Schema">>, version => <<"0.1.0">>}, - File = schema_filename(Dir, "bridge-api-", Lang), - ok = do_gen_api_schema_json(File, emqx_bridge_api, SchemaInfo). + gen_api_schema_json_iodata(emqx_bridge_api, SchemaInfo). -schema_filename(Dir, Prefix, Lang) -> - Filename = Prefix ++ Lang ++ ".json", - filename:join([Dir, Filename]). - -gen_config_md(Dir, I18nFile, SchemaModule, Lang) -> +%% TODO: remove it and also remove hocon_md.erl and friends. +%% markdown generation from schema is a failure and we are moving to an interactive +%% viewer like swagger UI. +gen_config_md(Dir, SchemaModule, Lang) -> SchemaMdFile = filename:join([Dir, "config-" ++ Lang ++ ".md"]), io:format(user, "===< Generating: ~s~n", [SchemaMdFile]), - ok = gen_doc(SchemaMdFile, SchemaModule, I18nFile, Lang). + ok = gen_doc(SchemaMdFile, SchemaModule, Lang). -gen_example_conf(Dir, I18nFile, SchemaModule, Lang) -> - SchemaMdFile = filename:join([Dir, "emqx.conf." ++ Lang ++ ".example"]), +gen_example_conf(Dir, SchemaModule) -> + SchemaMdFile = filename:join([Dir, "emqx.conf.example"]), io:format(user, "===< Generating: ~s~n", [SchemaMdFile]), - ok = gen_example(SchemaMdFile, SchemaModule, I18nFile, Lang). + ok = gen_example(SchemaMdFile, SchemaModule). %% @doc return the root schema module. -spec schema_module() -> module(). @@ -217,79 +217,45 @@ schema_module() -> %% Internal functions %%-------------------------------------------------------------------- --spec gen_doc(file:name_all(), module(), file:name_all(), string()) -> ok. -gen_doc(File, SchemaModule, I18nFile, Lang) -> +%% @doc Make a resolver function that can be used to lookup the description by hocon_schema_json dump. +make_desc_resolver(Lang) -> + fun + ({desc, Namespace, Id}) -> + emqx_dashboard_desc_cache:lookup(Lang, Namespace, Id, desc); + (Desc) -> + unicode:characters_to_binary(Desc) + end. + +-spec gen_doc(file:name_all(), module(), string()) -> ok. +gen_doc(File, SchemaModule, Lang) -> Version = emqx_release:version(), Title = "# " ++ emqx_release:description() ++ " Configuration\n\n" ++ "", BodyFile = filename:join([rel, "emqx_conf.template." ++ Lang ++ ".md"]), {ok, Body} = file:read_file(BodyFile), - Opts = #{title => Title, body => Body, desc_file => I18nFile, lang => Lang}, + Resolver = make_desc_resolver(Lang), + Opts = #{title => Title, body => Body, desc_resolver => Resolver}, Doc = hocon_schema_md:gen(SchemaModule, Opts), file:write_file(File, Doc). -gen_example(File, SchemaModule, I18nFile, Lang) -> +gen_example(File, SchemaModule) -> + %% we do not generate description in example files + %% so there is no need for a desc_resolver Opts = #{ title => <<"EMQX Configuration Example">>, body => <<"">>, - desc_file => I18nFile, - lang => Lang, include_importance_up_from => ?IMPORTANCE_MEDIUM }, Example = hocon_schema_example:gen(SchemaModule, Opts), file:write_file(File, Example). -%% Only gen hot_conf schema, not all configuration fields. -do_gen_api_schema_json(File, SchemaMod, SchemaInfo) -> - io:format(user, "===< Generating: ~s~n", [File]), - {ApiSpec0, Components0} = emqx_dashboard_swagger:spec( +gen_api_schema_json_iodata(SchemaMod, SchemaInfo) -> + emqx_dashboard_swagger:gen_api_schema_json_iodata( SchemaMod, - #{schema_converter => fun hocon_schema_to_spec/2} - ), - ApiSpec = lists:foldl( - fun({Path, Spec, _, _}, Acc) -> - NewSpec = maps:fold( - fun(Method, #{responses := Responses}, SubAcc) -> - case Responses of - #{ - <<"200">> := - #{ - <<"content">> := #{ - <<"application/json">> := #{<<"schema">> := Schema} - } - } - } -> - SubAcc#{Method => Schema}; - _ -> - SubAcc - end - end, - #{}, - Spec - ), - Acc#{list_to_atom(Path) => NewSpec} - end, - #{}, - ApiSpec0 - ), - Components = lists:foldl(fun(M, Acc) -> maps:merge(M, Acc) end, #{}, Components0), - IoData = emqx_utils_json:encode( - #{ - info => SchemaInfo, - paths => ApiSpec, - components => #{schemas => Components} - }, - [pretty, force_utf8] - ), - file:write_file(File, IoData). - --define(INIT_SCHEMA, #{ - fields => #{}, - translations => #{}, - validations => [], - namespace => undefined -}). + SchemaInfo, + fun ?MODULE:hocon_schema_to_spec/2 + ). -define(TO_REF(_N_, _F_), iolist_to_binary([to_bin(_N_), ".", to_bin(_F_)])). -define(TO_COMPONENTS_SCHEMA(_M_, _F_), @@ -345,7 +311,7 @@ typename_to_spec("float()", _Mod) -> typename_to_spec("integer()", _Mod) -> #{type => number}; typename_to_spec("non_neg_integer()", _Mod) -> - #{type => number, minimum => 1}; + #{type => number, minimum => 0}; typename_to_spec("number()", _Mod) -> #{type => number}; typename_to_spec("string()", _Mod) -> diff --git a/apps/emqx_conf/src/emqx_conf_app.erl b/apps/emqx_conf/src/emqx_conf_app.erl index 2231b8336..70234b525 100644 --- a/apps/emqx_conf/src/emqx_conf_app.erl +++ b/apps/emqx_conf/src/emqx_conf_app.erl @@ -89,7 +89,7 @@ sync_data_from_node() -> %% ------------------------------------------------------------------------------ init_load() -> - emqx_config:init_load(emqx_conf:schema_module(), #{raw_with_default => true}). + emqx_config:init_load(emqx_conf:schema_module()). init_conf() -> %% Workaround for https://github.com/emqx/mria/issues/94: diff --git a/apps/emqx_conf/src/emqx_conf_schema.erl b/apps/emqx_conf/src/emqx_conf_schema.erl index ee718b3a1..94cbfb221 100644 --- a/apps/emqx_conf/src/emqx_conf_schema.erl +++ b/apps/emqx_conf/src/emqx_conf_schema.erl @@ -138,7 +138,7 @@ fields("cluster") -> )}, {"core_nodes", sc( - emqx_schema:comma_separated_atoms(), + node_array(), #{ mapping => "mria.core_nodes", default => [], @@ -206,7 +206,7 @@ fields(cluster_static) -> [ {"seeds", sc( - hoconsc:array(atom()), + node_array(), #{ default => [], desc => ?DESC(cluster_static_seeds), @@ -338,11 +338,12 @@ fields(cluster_etcd) -> desc => ?DESC(cluster_etcd_node_ttl) } )}, - {"ssl", + {"ssl_options", sc( ?R_REF(emqx_schema, "ssl_client_opts"), #{ desc => ?DESC(cluster_etcd_ssl), + aliases => [ssl], 'readOnly' => true } )} @@ -1285,7 +1286,7 @@ cluster_options(dns, Conf) -> {type, conf_get("cluster.dns.record_type", Conf)} ]; cluster_options(etcd, Conf) -> - Namespace = "cluster.etcd.ssl", + Namespace = "cluster.etcd.ssl_options", SslOpts = fun(C) -> Options = keys(Namespace, C), lists:map(fun(Key) -> {to_atom(Key), conf_get([Namespace, Key], Conf)} end, Options) @@ -1351,3 +1352,6 @@ validator_string_re(Val, RE, Error) -> catch _:_ -> {error, Error} end. + +node_array() -> + hoconsc:union([emqx_schema:comma_separated_atoms(), hoconsc:array(atom())]). diff --git a/apps/emqx_conf/test/emqx_conf_schema_tests.erl b/apps/emqx_conf/test/emqx_conf_schema_tests.erl index 192aa0e93..667d1766f 100644 --- a/apps/emqx_conf/test/emqx_conf_schema_tests.erl +++ b/apps/emqx_conf/test/emqx_conf_schema_tests.erl @@ -25,8 +25,8 @@ array_nodes_test() -> ExpectNodes = ['emqx1@127.0.0.1', 'emqx2@127.0.0.1'], lists:foreach( - fun({Seeds, Nodes}) -> - ConfFile = to_bin(?BASE_CONF, [Seeds, Nodes]), + fun(Nodes) -> + ConfFile = to_bin(?BASE_CONF, [Nodes, Nodes]), {ok, Conf} = hocon:binary(ConfFile, #{format => richmap}), ConfList = hocon_tconf:generate(emqx_conf_schema, Conf), ClusterDiscovery = proplists:get_value( @@ -43,7 +43,7 @@ array_nodes_test() -> Nodes ) end, - [{["emqx1@127.0.0.1", "emqx2@127.0.0.1"], "emqx1@127.0.0.1, emqx2@127.0.0.1"}] + [["emqx1@127.0.0.1", "emqx2@127.0.0.1"], "emqx1@127.0.0.1, emqx2@127.0.0.1"] ), ok. @@ -79,7 +79,7 @@ array_nodes_test() -> ). authn_validations_test() -> - BaseConf = to_bin(?BASE_CONF, [["emqx1@127.0.0.1"], "emqx1@127.0.0.1,emqx1@127.0.0.1"]), + BaseConf = to_bin(?BASE_CONF, ["emqx1@127.0.0.1", "emqx1@127.0.0.1"]), OKHttps = to_bin(?BASE_AUTHN_ARRAY, [post, true, <<"https://127.0.0.1:8080">>]), Conf0 = <>, diff --git a/apps/emqx_connector/src/emqx_connector.app.src b/apps/emqx_connector/src/emqx_connector.app.src index 38ca230b2..db55c7032 100644 --- a/apps/emqx_connector/src/emqx_connector.app.src +++ b/apps/emqx_connector/src/emqx_connector.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_connector, [ {description, "EMQX Data Integration Connectors"}, - {vsn, "0.1.20"}, + {vsn, "0.1.22"}, {registered, []}, {mod, {emqx_connector_app, []}}, {applications, [ diff --git a/apps/emqx_connector/src/emqx_connector_http.erl b/apps/emqx_connector/src/emqx_connector_http.erl index c24ada6f3..bb822a60a 100644 --- a/apps/emqx_connector/src/emqx_connector_http.erl +++ b/apps/emqx_connector/src/emqx_connector_http.erl @@ -47,7 +47,7 @@ namespace/0 ]). --export([check_ssl_opts/2, validate_method/1]). +-export([check_ssl_opts/2, validate_method/1, join_paths/2]). -type connect_timeout() :: emqx_schema:duration() | infinity. -type pool_type() :: random | hash. @@ -231,9 +231,8 @@ on_start( {transport_opts, NTransportOpts}, {enable_pipelining, maps:get(enable_pipelining, Config, ?DEFAULT_PIPELINE_SIZE)} ], - PoolName = emqx_plugin_libs_pool:pool_name(InstId), State = #{ - pool_name => PoolName, + pool_name => InstId, pool_type => PoolType, host => Host, port => Port, @@ -241,7 +240,7 @@ on_start( base_path => BasePath, request => preprocess_request(maps:get(request, Config, undefined)) }, - case ehttpc_sup:start_pool(PoolName, PoolOpts) of + case ehttpc_sup:start_pool(InstId, PoolOpts) of {ok, _} -> {ok, State}; {error, {already_started, _}} -> {ok, State}; {error, Reason} -> {error, Reason} @@ -471,10 +470,10 @@ preprocess_request( } = Req ) -> #{ - method => emqx_plugin_libs_rule:preproc_tmpl(bin(Method)), + method => emqx_plugin_libs_rule:preproc_tmpl(to_bin(Method)), path => emqx_plugin_libs_rule:preproc_tmpl(Path), body => maybe_preproc_tmpl(body, Req), - headers => preproc_headers(Headers), + headers => wrap_auth_header(preproc_headers(Headers)), request_timeout => maps:get(request_timeout, Req, 30000), max_retries => maps:get(max_retries, Req, 2) }. @@ -484,8 +483,8 @@ preproc_headers(Headers) when is_map(Headers) -> fun(K, V, Acc) -> [ { - emqx_plugin_libs_rule:preproc_tmpl(bin(K)), - emqx_plugin_libs_rule:preproc_tmpl(bin(V)) + emqx_plugin_libs_rule:preproc_tmpl(to_bin(K)), + emqx_plugin_libs_rule:preproc_tmpl(to_bin(V)) } | Acc ] @@ -497,13 +496,43 @@ preproc_headers(Headers) when is_list(Headers) -> lists:map( fun({K, V}) -> { - emqx_plugin_libs_rule:preproc_tmpl(bin(K)), - emqx_plugin_libs_rule:preproc_tmpl(bin(V)) + emqx_plugin_libs_rule:preproc_tmpl(to_bin(K)), + emqx_plugin_libs_rule:preproc_tmpl(to_bin(V)) } end, Headers ). +wrap_auth_header(Headers) -> + lists:map(fun maybe_wrap_auth_header/1, Headers). + +maybe_wrap_auth_header({[{str, Key}] = StrKey, Val}) -> + {_, MaybeWrapped} = maybe_wrap_auth_header({Key, Val}), + {StrKey, MaybeWrapped}; +maybe_wrap_auth_header({Key, Val} = Header) when + is_binary(Key), (size(Key) =:= 19 orelse size(Key) =:= 13) +-> + %% We check the size of potential keys in the guard above and consider only + %% those that match the number of characters of either "Authorization" or + %% "Proxy-Authorization". + case try_bin_to_lower(Key) of + <<"authorization">> -> + {Key, emqx_secret:wrap(Val)}; + <<"proxy-authorization">> -> + {Key, emqx_secret:wrap(Val)}; + _Other -> + Header + end; +maybe_wrap_auth_header(Header) -> + Header. + +try_bin_to_lower(Bin) -> + try iolist_to_binary(string:lowercase(Bin)) of + LowercaseBin -> LowercaseBin + catch + _:_ -> Bin + end. + maybe_preproc_tmpl(Key, Conf) -> case maps:get(Key, Conf, undefined) of undefined -> undefined; @@ -538,7 +567,7 @@ proc_headers(HeaderTks, Msg) -> fun({K, V}) -> { emqx_plugin_libs_rule:proc_tmpl(K, Msg), - emqx_plugin_libs_rule:proc_tmpl(V, Msg) + emqx_plugin_libs_rule:proc_tmpl(emqx_secret:unwrap(V), Msg) } end, HeaderTks @@ -566,15 +595,41 @@ formalize_request(Method, BasePath, {Path, Headers, _Body}) when -> formalize_request(Method, BasePath, {Path, Headers}); formalize_request(_Method, BasePath, {Path, Headers, Body}) -> - {filename:join(BasePath, Path), Headers, Body}; + {join_paths(BasePath, Path), Headers, Body}; formalize_request(_Method, BasePath, {Path, Headers}) -> - {filename:join(BasePath, Path), Headers}. + {join_paths(BasePath, Path), Headers}. -bin(Bin) when is_binary(Bin) -> +%% By default, we cannot treat HTTP paths as "file" or "resource" paths, +%% because an HTTP server may handle paths like +%% "/a/b/c/", "/a/b/c" and "/a//b/c" differently. +%% +%% So we try to avoid unneccessary path normalization. +%% +%% See also: `join_paths_test_/0` +join_paths(Path1, Path2) -> + do_join_paths(lists:reverse(to_list(Path1)), to_list(Path2)). + +%% "abc/" + "/cde" +do_join_paths([$/ | Path1], [$/ | Path2]) -> + lists:reverse(Path1) ++ [$/ | Path2]; +%% "abc/" + "cde" +do_join_paths([$/ | Path1], Path2) -> + lists:reverse(Path1) ++ [$/ | Path2]; +%% "abc" + "/cde" +do_join_paths(Path1, [$/ | Path2]) -> + lists:reverse(Path1) ++ [$/ | Path2]; +%% "abc" + "cde" +do_join_paths(Path1, Path2) -> + lists:reverse(Path1) ++ [$/ | Path2]. + +to_list(List) when is_list(List) -> List; +to_list(Bin) when is_binary(Bin) -> binary_to_list(Bin). + +to_bin(Bin) when is_binary(Bin) -> Bin; -bin(Str) when is_list(Str) -> +to_bin(Str) when is_list(Str) -> list_to_binary(Str); -bin(Atom) when is_atom(Atom) -> +to_bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8). reply_delegator(ReplyFunAndArgs, Result) -> @@ -604,21 +659,13 @@ is_sensitive_key([{str, StringKey}]) -> is_sensitive_key(Atom) when is_atom(Atom) -> is_sensitive_key(erlang:atom_to_binary(Atom)); is_sensitive_key(Bin) when is_binary(Bin), (size(Bin) =:= 19 orelse size(Bin) =:= 13) -> - try - %% This is wrapped in a try-catch since we don't know that Bin is a - %% valid string so string:lowercase/1 might throw an exception. - %% - %% We want to convert this to lowercase since the http header fields - %% are case insensitive, which means that a user of the Webhook bridge - %% can write this field name in many different ways. - LowercaseBin = iolist_to_binary(string:lowercase(Bin)), - case LowercaseBin of - <<"authorization">> -> true; - <<"proxy-authorization">> -> true; - _ -> false - end - catch - _:_ -> false + %% We want to convert this to lowercase since the http header fields + %% are case insensitive, which means that a user of the Webhook bridge + %% can write this field name in many different ways. + case try_bin_to_lower(Bin) of + <<"authorization">> -> true; + <<"proxy-authorization">> -> true; + _ -> false end; is_sensitive_key(_) -> false. @@ -665,4 +712,21 @@ redact_test_() -> ?_assertNotEqual(TestData2, redact(TestData2)) ]. +join_paths_test_() -> + [ + ?_assertEqual("abc/cde", join_paths("abc", "cde")), + ?_assertEqual("abc/cde", join_paths("abc", "/cde")), + ?_assertEqual("abc/cde", join_paths("abc/", "cde")), + ?_assertEqual("abc/cde", join_paths("abc/", "/cde")), + + ?_assertEqual("/", join_paths("", "")), + ?_assertEqual("/cde", join_paths("", "cde")), + ?_assertEqual("/cde", join_paths("", "/cde")), + ?_assertEqual("/cde", join_paths("/", "cde")), + ?_assertEqual("/cde", join_paths("/", "/cde")), + + ?_assertEqual("//cde/", join_paths("/", "//cde/")), + ?_assertEqual("abc///cde/", join_paths("abc//", "//cde/")) + ]. + -endif. diff --git a/apps/emqx_connector/src/emqx_connector_ldap.erl b/apps/emqx_connector/src/emqx_connector_ldap.erl index ac2af301e..c3e1db7d3 100644 --- a/apps/emqx_connector/src/emqx_connector_ldap.erl +++ b/apps/emqx_connector/src/emqx_connector_ldap.erl @@ -67,7 +67,17 @@ on_start( connector => InstId, config => emqx_utils:redact(Config) }), - Servers = emqx_schema:parse_servers(Servers0, ?LDAP_HOST_OPTIONS), + Servers1 = emqx_schema:parse_servers(Servers0, ?LDAP_HOST_OPTIONS), + Servers = + lists:map( + fun + (#{hostname := Host, port := Port0}) -> + {Host, Port0}; + (#{hostname := Host}) -> + Host + end, + Servers1 + ), SslOpts = case maps:get(enable, SSL) of true -> @@ -87,20 +97,19 @@ on_start( {pool_size, PoolSize}, {auto_reconnect, ?AUTO_RECONNECT_INTERVAL} ], - PoolName = emqx_plugin_libs_pool:pool_name(InstId), - case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts ++ SslOpts) of - ok -> {ok, #{poolname => PoolName}}; + case emqx_resource_pool:start(InstId, ?MODULE, Opts ++ SslOpts) of + ok -> {ok, #{pool_name => InstId}}; {error, Reason} -> {error, Reason} end. -on_stop(InstId, #{poolname := PoolName}) -> +on_stop(InstId, #{pool_name := PoolName}) -> ?SLOG(info, #{ msg => "stopping_ldap_connector", connector => InstId }), - emqx_plugin_libs_pool:stop_pool(PoolName). + emqx_resource_pool:stop(PoolName). -on_query(InstId, {search, Base, Filter, Attributes}, #{poolname := PoolName} = State) -> +on_query(InstId, {search, Base, Filter, Attributes}, #{pool_name := PoolName} = State) -> Request = {Base, Filter, Attributes}, ?TRACE( "QUERY", diff --git a/apps/emqx_connector/src/emqx_connector_mongo.erl b/apps/emqx_connector/src/emqx_connector_mongo.erl index a5873bcf6..dde8652f0 100644 --- a/apps/emqx_connector/src/emqx_connector_mongo.erl +++ b/apps/emqx_connector/src/emqx_connector_mongo.erl @@ -182,12 +182,11 @@ on_start( {options, init_topology_options(maps:to_list(Topology), [])}, {worker_options, init_worker_options(maps:to_list(NConfig), SslOpts)} ], - PoolName = emqx_plugin_libs_pool:pool_name(InstId), Collection = maps:get(collection, Config, <<"mqtt">>), - case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts) of + case emqx_resource_pool:start(InstId, ?MODULE, Opts) of ok -> {ok, #{ - poolname => PoolName, + pool_name => InstId, type => Type, collection => Collection }}; @@ -195,17 +194,17 @@ on_start( {error, Reason} end. -on_stop(InstId, #{poolname := PoolName}) -> +on_stop(InstId, #{pool_name := PoolName}) -> ?SLOG(info, #{ msg => "stopping_mongodb_connector", connector => InstId }), - emqx_plugin_libs_pool:stop_pool(PoolName). + emqx_resource_pool:stop(PoolName). on_query( InstId, {send_message, Document}, - #{poolname := PoolName, collection := Collection} = State + #{pool_name := PoolName, collection := Collection} = State ) -> Request = {insert, Collection, Document}, ?TRACE( @@ -234,7 +233,7 @@ on_query( on_query( InstId, {Action, Collection, Filter, Projector}, - #{poolname := PoolName} = State + #{pool_name := PoolName} = State ) -> Request = {Action, Collection, Filter, Projector}, ?TRACE( @@ -263,8 +262,7 @@ on_query( {ok, Result} end. --dialyzer({nowarn_function, [on_get_status/2]}). -on_get_status(InstId, #{poolname := PoolName} = _State) -> +on_get_status(InstId, #{pool_name := PoolName}) -> case health_check(PoolName) of true -> ?tp(debug, emqx_connector_mongo_health_check, #{ @@ -281,8 +279,10 @@ on_get_status(InstId, #{poolname := PoolName} = _State) -> end. health_check(PoolName) -> - emqx_plugin_libs_pool:health_check_ecpool_workers( - PoolName, fun ?MODULE:check_worker_health/1, ?HEALTH_CHECK_TIMEOUT + timer:seconds(1) + emqx_resource_pool:health_check_workers( + PoolName, + fun ?MODULE:check_worker_health/1, + ?HEALTH_CHECK_TIMEOUT + timer:seconds(1) ). %% =================================================================== @@ -537,4 +537,9 @@ format_hosts(Hosts) -> lists:map(fun format_host/1, Hosts). parse_servers(HoconValue) -> - emqx_schema:parse_servers(HoconValue, ?MONGO_HOST_OPTIONS). + lists:map( + fun(#{hostname := Host, port := Port}) -> + {Host, Port} + end, + emqx_schema:parse_servers(HoconValue, ?MONGO_HOST_OPTIONS) + ). diff --git a/apps/emqx_connector/src/emqx_connector_mqtt.erl b/apps/emqx_connector/src/emqx_connector_mqtt.erl index 5b488825b..5cafd2d50 100644 --- a/apps/emqx_connector/src/emqx_connector_mqtt.erl +++ b/apps/emqx_connector/src/emqx_connector_mqtt.erl @@ -248,13 +248,12 @@ make_sub_confs(EmptyMap, _Conf, _) when map_size(EmptyMap) == 0 -> undefined; make_sub_confs(undefined, _Conf, _) -> undefined; -make_sub_confs(SubRemoteConf, Conf, InstanceId) -> - ResId = emqx_resource_manager:manager_id_to_resource_id(InstanceId), +make_sub_confs(SubRemoteConf, Conf, ResourceId) -> case maps:find(hookpoint, Conf) of error -> error({no_hookpoint_provided, Conf}); {ok, HookPoint} -> - MFA = {?MODULE, on_message_received, [HookPoint, ResId]}, + MFA = {?MODULE, on_message_received, [HookPoint, ResourceId]}, SubRemoteConf#{on_message_received => MFA} end. diff --git a/apps/emqx_connector/src/emqx_connector_mysql.erl b/apps/emqx_connector/src/emqx_connector_mysql.erl index 6600a5f77..b8c1250fe 100644 --- a/apps/emqx_connector/src/emqx_connector_mysql.erl +++ b/apps/emqx_connector/src/emqx_connector_mysql.erl @@ -51,7 +51,7 @@ -type sqls() :: #{atom() => binary()}. -type state() :: #{ - poolname := atom(), + pool_name := binary(), prepare_statement := prepares(), params_tokens := params_tokens(), batch_inserts := sqls(), @@ -98,7 +98,7 @@ on_start( ssl := SSL } = Config ) -> - {Host, Port} = emqx_schema:parse_server(Server, ?MYSQL_HOST_OPTIONS), + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, ?MYSQL_HOST_OPTIONS), ?SLOG(info, #{ msg => "starting_mysql_connector", connector => InstId, @@ -123,13 +123,10 @@ on_start( {pool_size, PoolSize} ] ), - - PoolName = emqx_plugin_libs_pool:pool_name(InstId), - Prepares = parse_prepare_sql(Config), - State = maps:merge(#{poolname => PoolName}, Prepares), - case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options ++ SslOpts) of + State = parse_prepare_sql(Config), + case emqx_resource_pool:start(InstId, ?MODULE, Options ++ SslOpts) of ok -> - {ok, init_prepare(State)}; + {ok, init_prepare(State#{pool_name => InstId})}; {error, Reason} -> ?tp( mysql_connector_start_failed, @@ -143,12 +140,12 @@ maybe_add_password_opt(undefined, Options) -> maybe_add_password_opt(Password, Options) -> [{password, Password} | Options]. -on_stop(InstId, #{poolname := PoolName}) -> +on_stop(InstId, #{pool_name := PoolName}) -> ?SLOG(info, #{ msg => "stopping_mysql_connector", connector => InstId }), - emqx_plugin_libs_pool:stop_pool(PoolName). + emqx_resource_pool:stop(PoolName). on_query(InstId, {TypeOrKey, SQLOrKey}, State) -> on_query(InstId, {TypeOrKey, SQLOrKey, [], default_timeout}, State); @@ -157,7 +154,7 @@ on_query(InstId, {TypeOrKey, SQLOrKey, Params}, State) -> on_query( InstId, {TypeOrKey, SQLOrKey, Params, Timeout}, - #{poolname := PoolName, prepare_statement := Prepares} = State + #{pool_name := PoolName, prepare_statement := Prepares} = State ) -> MySqlFunction = mysql_function(TypeOrKey), {SQLOrKey2, Data} = proc_sql_params(TypeOrKey, SQLOrKey, Params, State), @@ -216,8 +213,8 @@ mysql_function(prepared_query) -> mysql_function(_) -> mysql_function(prepared_query). -on_get_status(_InstId, #{poolname := Pool} = State) -> - case emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1) of +on_get_status(_InstId, #{pool_name := PoolName} = State) -> + case emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1) of true -> case do_check_prepares(State) of ok -> @@ -238,7 +235,7 @@ do_get_status(Conn) -> do_check_prepares(#{prepare_statement := Prepares}) when is_map(Prepares) -> ok; -do_check_prepares(State = #{poolname := PoolName, prepare_statement := {error, Prepares}}) -> +do_check_prepares(State = #{pool_name := PoolName, prepare_statement := {error, Prepares}}) -> %% retry to prepare case prepare_sql(Prepares, PoolName) of ok -> @@ -253,7 +250,7 @@ do_check_prepares(State = #{poolname := PoolName, prepare_statement := {error, P connect(Options) -> mysql:start_link(Options). -init_prepare(State = #{prepare_statement := Prepares, poolname := PoolName}) -> +init_prepare(State = #{prepare_statement := Prepares, pool_name := PoolName}) -> case maps:size(Prepares) of 0 -> State; @@ -409,7 +406,7 @@ on_sql_query( SQLOrKey, Params, Timeout, - #{poolname := PoolName} = State + #{pool_name := PoolName} = State ) -> LogMeta = #{connector => InstId, sql => SQLOrKey, state => State}, ?TRACE("QUERY", "mysql_connector_received", LogMeta), diff --git a/apps/emqx_connector/src/emqx_connector_pgsql.erl b/apps/emqx_connector/src/emqx_connector_pgsql.erl index 8796e00a5..3b2375d04 100644 --- a/apps/emqx_connector/src/emqx_connector_pgsql.erl +++ b/apps/emqx_connector/src/emqx_connector_pgsql.erl @@ -56,7 +56,7 @@ -type state() :: #{ - poolname := atom(), + pool_name := binary(), prepare_sql := prepares(), params_tokens := params_tokens(), prepare_statement := epgsql:statement() @@ -91,7 +91,7 @@ on_start( ssl := SSL } = Config ) -> - {Host, Port} = emqx_schema:parse_server(Server, ?PGSQL_HOST_OPTIONS), + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, ?PGSQL_HOST_OPTIONS), ?SLOG(info, #{ msg => "starting_postgresql_connector", connector => InstId, @@ -120,13 +120,10 @@ on_start( {auto_reconnect, ?AUTO_RECONNECT_INTERVAL}, {pool_size, PoolSize} ], - PoolName = emqx_plugin_libs_pool:pool_name(InstId), - Prepares = parse_prepare_sql(Config), - InitState = #{poolname => PoolName, prepare_statement => #{}}, - State = maps:merge(InitState, Prepares), - case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options ++ SslOpts) of + State = parse_prepare_sql(Config), + case emqx_resource_pool:start(InstId, ?MODULE, Options ++ SslOpts) of ok -> - {ok, init_prepare(State)}; + {ok, init_prepare(State#{pool_name => InstId, prepare_statement => #{}})}; {error, Reason} -> ?tp( pgsql_connector_start_failed, @@ -135,19 +132,19 @@ on_start( {error, Reason} end. -on_stop(InstId, #{poolname := PoolName}) -> +on_stop(InstId, #{pool_name := PoolName}) -> ?SLOG(info, #{ msg => "stopping postgresql connector", connector => InstId }), - emqx_plugin_libs_pool:stop_pool(PoolName). + emqx_resource_pool:stop(PoolName). -on_query(InstId, {TypeOrKey, NameOrSQL}, #{poolname := _PoolName} = State) -> +on_query(InstId, {TypeOrKey, NameOrSQL}, State) -> on_query(InstId, {TypeOrKey, NameOrSQL, []}, State); on_query( InstId, {TypeOrKey, NameOrSQL, Params}, - #{poolname := PoolName} = State + #{pool_name := PoolName} = State ) -> ?SLOG(debug, #{ msg => "postgresql connector received sql query", @@ -174,7 +171,7 @@ pgsql_query_type(_) -> on_batch_query( InstId, BatchReq, - #{poolname := PoolName, params_tokens := Tokens, prepare_statement := Sts} = State + #{pool_name := PoolName, params_tokens := Tokens, prepare_statement := Sts} = State ) -> case BatchReq of [{Key, _} = Request | _] -> @@ -258,8 +255,8 @@ on_sql_query(InstId, PoolName, Type, NameOrSQL, Data) -> {error, {unrecoverable_error, invalid_request}} end. -on_get_status(_InstId, #{poolname := Pool} = State) -> - case emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1) of +on_get_status(_InstId, #{pool_name := PoolName} = State) -> + case emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1) of true -> case do_check_prepares(State) of ok -> @@ -280,7 +277,7 @@ do_get_status(Conn) -> do_check_prepares(#{prepare_sql := Prepares}) when is_map(Prepares) -> ok; -do_check_prepares(State = #{poolname := PoolName, prepare_sql := {error, Prepares}}) -> +do_check_prepares(State = #{pool_name := PoolName, prepare_sql := {error, Prepares}}) -> %% retry to prepare case prepare_sql(Prepares, PoolName) of {ok, Sts} -> @@ -358,7 +355,7 @@ parse_prepare_sql([], Prepares, Tokens) -> params_tokens => Tokens }. -init_prepare(State = #{prepare_sql := Prepares, poolname := PoolName}) -> +init_prepare(State = #{prepare_sql := Prepares, pool_name := PoolName}) -> case maps:size(Prepares) of 0 -> State; @@ -389,17 +386,17 @@ prepare_sql(Prepares, PoolName) -> end. do_prepare_sql(Prepares, PoolName) -> - do_prepare_sql(ecpool:workers(PoolName), Prepares, PoolName, #{}). + do_prepare_sql(ecpool:workers(PoolName), Prepares, #{}). -do_prepare_sql([{_Name, Worker} | T], Prepares, PoolName, _LastSts) -> +do_prepare_sql([{_Name, Worker} | T], Prepares, _LastSts) -> {ok, Conn} = ecpool_worker:client(Worker), case prepare_sql_to_conn(Conn, Prepares) of {ok, Sts} -> - do_prepare_sql(T, Prepares, PoolName, Sts); + do_prepare_sql(T, Prepares, Sts); Error -> Error end; -do_prepare_sql([], _Prepares, _PoolName, LastSts) -> +do_prepare_sql([], _Prepares, LastSts) -> {ok, LastSts}. prepare_sql_to_conn(Conn, Prepares) -> diff --git a/apps/emqx_connector/src/emqx_connector_redis.erl b/apps/emqx_connector/src/emqx_connector_redis.erl index 4ef778e6b..32ac77226 100644 --- a/apps/emqx_connector/src/emqx_connector_redis.erl +++ b/apps/emqx_connector/src/emqx_connector_redis.erl @@ -131,7 +131,13 @@ on_start( _ -> servers end, Servers0 = maps:get(ConfKey, Config), - Servers = [{servers, emqx_schema:parse_servers(Servers0, ?REDIS_HOST_OPTIONS)}], + Servers1 = lists:map( + fun(#{hostname := Host, port := Port}) -> + {Host, Port} + end, + emqx_schema:parse_servers(Servers0, ?REDIS_HOST_OPTIONS) + ), + Servers = [{servers, Servers1}], Database = case Type of cluster -> []; @@ -153,11 +159,10 @@ on_start( false -> [{ssl, false}] end ++ [{sentinel, maps:get(sentinel, Config, undefined)}], - PoolName = InstId, - State = #{poolname => PoolName, type => Type}, + State = #{pool_name => InstId, type => Type}, case Type of cluster -> - case eredis_cluster:start_pool(PoolName, Opts ++ [{options, Options}]) of + case eredis_cluster:start_pool(InstId, Opts ++ [{options, Options}]) of {ok, _} -> {ok, State}; {ok, _, _} -> @@ -166,22 +171,20 @@ on_start( {error, Reason} end; _ -> - case - emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts ++ [{options, Options}]) - of + case emqx_resource_pool:start(InstId, ?MODULE, Opts ++ [{options, Options}]) of ok -> {ok, State}; {error, Reason} -> {error, Reason} end end. -on_stop(InstId, #{poolname := PoolName, type := Type}) -> +on_stop(InstId, #{pool_name := PoolName, type := Type}) -> ?SLOG(info, #{ msg => "stopping_redis_connector", connector => InstId }), case Type of cluster -> eredis_cluster:stop_pool(PoolName); - _ -> emqx_plugin_libs_pool:stop_pool(PoolName) + _ -> emqx_resource_pool:stop(PoolName) end. on_query(InstId, {cmd, _} = Query, State) -> @@ -189,7 +192,7 @@ on_query(InstId, {cmd, _} = Query, State) -> on_query(InstId, {cmds, _} = Query, State) -> do_query(InstId, Query, State). -do_query(InstId, Query, #{poolname := PoolName, type := Type} = State) -> +do_query(InstId, Query, #{pool_name := PoolName, type := Type} = State) -> ?TRACE( "QUERY", "redis_connector_received", @@ -227,7 +230,7 @@ is_unrecoverable_error({error, invalid_cluster_command}) -> is_unrecoverable_error(_) -> false. -on_get_status(_InstId, #{type := cluster, poolname := PoolName}) -> +on_get_status(_InstId, #{type := cluster, pool_name := PoolName}) -> case eredis_cluster:pool_exists(PoolName) of true -> Health = eredis_cluster:ping_all(PoolName), @@ -235,8 +238,8 @@ on_get_status(_InstId, #{type := cluster, poolname := PoolName}) -> false -> disconnected end; -on_get_status(_InstId, #{poolname := Pool}) -> - Health = emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1), +on_get_status(_InstId, #{pool_name := PoolName}) -> + Health = emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1), status_result(Health). do_get_status(Conn) -> diff --git a/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_schema.erl b/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_schema.erl index e08804685..2a40980af 100644 --- a/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_schema.erl +++ b/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_schema.erl @@ -293,4 +293,5 @@ qos() -> hoconsc:union([emqx_schema:qos(), binary()]). parse_server(Str) -> - emqx_schema:parse_server(Str, ?MQTT_HOST_OPTS). + #{hostname := Host, port := Port} = emqx_schema:parse_server(Str, ?MQTT_HOST_OPTS), + {Host, Port}. diff --git a/apps/emqx_connector/test/emqx_connector_http_tests.erl b/apps/emqx_connector/test/emqx_connector_http_tests.erl new file mode 100644 index 000000000..8d0fa6d2c --- /dev/null +++ b/apps/emqx_connector/test/emqx_connector_http_tests.erl @@ -0,0 +1,90 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-module(emqx_connector_http_tests). + +-include_lib("eunit/include/eunit.hrl"). + +-define(MY_SECRET, <<"my_precious">>). + +wrap_auth_headers_test_() -> + {setup, + fun() -> + meck:expect(ehttpc_sup, start_pool, 2, {ok, foo}), + meck:expect(ehttpc, request, fun(_, _, Req, _, _) -> {ok, 200, Req} end), + meck:expect(ehttpc_pool, pick_worker, 1, self()), + [ehttpc_sup, ehttpc, ehttpc_pool] + end, + fun meck:unload/1, fun(_) -> + Config = #{ + base_url => #{ + scheme => http, + host => "localhost", + port => 18083, + path => "/status" + }, + connect_timeout => 1000, + pool_type => random, + pool_size => 1, + request => #{ + method => get, + path => "/status", + headers => auth_headers() + } + }, + {ok, #{request := #{headers := Headers}} = State} = emqx_connector_http:on_start( + <<"test">>, Config + ), + {ok, 200, Req} = emqx_connector_http:on_query(foo, {send_message, #{}}, State), + Tests = + [ + ?_assert(is_wrapped(V)) + || H <- Headers, is_tuple({K, V} = H), is_auth_header(untmpl(K)) + ], + [ + ?_assertEqual(4, length(Tests)), + ?_assert(is_unwrapped_headers(element(2, Req))) + | Tests + ] + end}. + +auth_headers() -> + [ + {<<"Authorization">>, ?MY_SECRET}, + {<<"authorization">>, ?MY_SECRET}, + {<<"Proxy-Authorization">>, ?MY_SECRET}, + {<<"proxy-authorization">>, ?MY_SECRET}, + {<<"X-Custom-Header">>, <<"foobar">>} + ]. + +is_auth_header(<<"Authorization">>) -> true; +is_auth_header(<<"Proxy-Authorization">>) -> true; +is_auth_header(<<"authorization">>) -> true; +is_auth_header(<<"proxy-authorization">>) -> true; +is_auth_header(_Other) -> false. + +is_wrapped(Secret) when is_function(Secret) -> + untmpl(emqx_secret:unwrap(Secret)) =:= ?MY_SECRET; +is_wrapped(_Other) -> + false. + +untmpl([{_, V} | _]) -> V. + +is_unwrapped_headers(Headers) -> + lists:all(fun is_unwrapped_header/1, Headers). + +is_unwrapped_header({_, V}) when is_function(V) -> false; +is_unwrapped_header({_, [{str, _V}]}) -> throw(unexpected_tmpl_token); +is_unwrapped_header(_) -> true. diff --git a/apps/emqx_connector/test/emqx_connector_mongo_SUITE.erl b/apps/emqx_connector/test/emqx_connector_mongo_SUITE.erl index 2be30466c..9067c85de 100644 --- a/apps/emqx_connector/test/emqx_connector_mongo_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_mongo_SUITE.erl @@ -64,15 +64,15 @@ t_lifecycle(_Config) -> mongo_config() ). -perform_lifecycle_check(PoolName, InitialConfig) -> +perform_lifecycle_check(ResourceId, InitialConfig) -> {ok, #{config := CheckedConfig}} = emqx_resource:check_config(?MONGO_RESOURCE_MOD, InitialConfig), {ok, #{ - state := #{poolname := ReturnedPoolName} = State, + state := #{pool_name := PoolName} = State, status := InitialStatus }} = emqx_resource:create_local( - PoolName, + ResourceId, ?CONNECTOR_RESOURCE_GROUP, ?MONGO_RESOURCE_MOD, CheckedConfig, @@ -84,39 +84,39 @@ perform_lifecycle_check(PoolName, InitialConfig) -> state := State, status := InitialStatus }} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), % % Perform query as further check that the resource is working as expected - ?assertMatch({ok, []}, emqx_resource:query(PoolName, test_query_find())), - ?assertMatch({ok, undefined}, emqx_resource:query(PoolName, test_query_find_one())), - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertMatch({ok, []}, emqx_resource:query(ResourceId, test_query_find())), + ?assertMatch({ok, undefined}, emqx_resource:query(ResourceId, test_query_find_one())), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Resource will be listed still, but state will be changed and healthcheck will fail % as the worker no longer exists. {ok, ?CONNECTOR_RESOURCE_GROUP, #{ state := State, status := StoppedStatus }} = - emqx_resource:get_instance(PoolName), + emqx_resource:get_instance(ResourceId), ?assertEqual(stopped, StoppedStatus), - ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), + ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceId)), % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Can call stop/1 again on an already stopped instance - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Make sure it can be restarted and the healthchecks and queries work properly - ?assertEqual(ok, emqx_resource:restart(PoolName)), + ?assertEqual(ok, emqx_resource:restart(ResourceId)), % async restart, need to wait resource timer:sleep(500), {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), - ?assertMatch({ok, []}, emqx_resource:query(PoolName, test_query_find())), - ?assertMatch({ok, undefined}, emqx_resource:query(PoolName, test_query_find_one())), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), + ?assertMatch({ok, []}, emqx_resource:query(ResourceId, test_query_find())), + ?assertMatch({ok, undefined}, emqx_resource:query(ResourceId, test_query_find_one())), % Stop and remove the resource in one go. - ?assertEqual(ok, emqx_resource:remove_local(PoolName)), - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual(ok, emqx_resource:remove_local(ResourceId)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Should not even be able to get the resource data out of ets now unlike just stopping. - ?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)). + ?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceId)). % %%------------------------------------------------------------------------------ % %% Helpers diff --git a/apps/emqx_connector/test/emqx_connector_mysql_SUITE.erl b/apps/emqx_connector/test/emqx_connector_mysql_SUITE.erl index dc5826766..a0455c92c 100644 --- a/apps/emqx_connector/test/emqx_connector_mysql_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_mysql_SUITE.erl @@ -64,14 +64,14 @@ t_lifecycle(_Config) -> mysql_config() ). -perform_lifecycle_check(PoolName, InitialConfig) -> +perform_lifecycle_check(ResourceId, InitialConfig) -> {ok, #{config := CheckedConfig}} = emqx_resource:check_config(?MYSQL_RESOURCE_MOD, InitialConfig), {ok, #{ - state := #{poolname := ReturnedPoolName} = State, + state := #{pool_name := PoolName} = State, status := InitialStatus }} = emqx_resource:create_local( - PoolName, + ResourceId, ?CONNECTOR_RESOURCE_GROUP, ?MYSQL_RESOURCE_MOD, CheckedConfig, @@ -83,53 +83,53 @@ perform_lifecycle_check(PoolName, InitialConfig) -> state := State, status := InitialStatus }} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), % % Perform query as further check that the resource is working as expected - ?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_no_params())), - ?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_with_params())), + ?assertMatch({ok, _, [[1]]}, emqx_resource:query(ResourceId, test_query_no_params())), + ?assertMatch({ok, _, [[1]]}, emqx_resource:query(ResourceId, test_query_with_params())), ?assertMatch( {ok, _, [[1]]}, emqx_resource:query( - PoolName, + ResourceId, test_query_with_params_and_timeout() ) ), - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Resource will be listed still, but state will be changed and healthcheck will fail % as the worker no longer exists. {ok, ?CONNECTOR_RESOURCE_GROUP, #{ state := State, status := StoppedStatus }} = - emqx_resource:get_instance(PoolName), + emqx_resource:get_instance(ResourceId), ?assertEqual(stopped, StoppedStatus), - ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), + ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceId)), % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Can call stop/1 again on an already stopped instance - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Make sure it can be restarted and the healthchecks and queries work properly - ?assertEqual(ok, emqx_resource:restart(PoolName)), + ?assertEqual(ok, emqx_resource:restart(ResourceId)), % async restart, need to wait resource timer:sleep(500), {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), - ?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_no_params())), - ?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_with_params())), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), + ?assertMatch({ok, _, [[1]]}, emqx_resource:query(ResourceId, test_query_no_params())), + ?assertMatch({ok, _, [[1]]}, emqx_resource:query(ResourceId, test_query_with_params())), ?assertMatch( {ok, _, [[1]]}, emqx_resource:query( - PoolName, + ResourceId, test_query_with_params_and_timeout() ) ), % Stop and remove the resource in one go. - ?assertEqual(ok, emqx_resource:remove_local(PoolName)), - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual(ok, emqx_resource:remove_local(ResourceId)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Should not even be able to get the resource data out of ets now unlike just stopping. - ?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)). + ?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceId)). % %%------------------------------------------------------------------------------ % %% Helpers diff --git a/apps/emqx_connector/test/emqx_connector_pgsql_SUITE.erl b/apps/emqx_connector/test/emqx_connector_pgsql_SUITE.erl index 2f77ca38d..a4ac4f932 100644 --- a/apps/emqx_connector/test/emqx_connector_pgsql_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_pgsql_SUITE.erl @@ -64,15 +64,15 @@ t_lifecycle(_Config) -> pgsql_config() ). -perform_lifecycle_check(PoolName, InitialConfig) -> +perform_lifecycle_check(ResourceId, InitialConfig) -> {ok, #{config := CheckedConfig}} = emqx_resource:check_config(?PGSQL_RESOURCE_MOD, InitialConfig), {ok, #{ - state := #{poolname := ReturnedPoolName} = State, + state := #{pool_name := PoolName} = State, status := InitialStatus }} = emqx_resource:create_local( - PoolName, + ResourceId, ?CONNECTOR_RESOURCE_GROUP, ?PGSQL_RESOURCE_MOD, CheckedConfig, @@ -84,39 +84,39 @@ perform_lifecycle_check(PoolName, InitialConfig) -> state := State, status := InitialStatus }} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), % % Perform query as further check that the resource is working as expected - ?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_no_params())), - ?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_with_params())), - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertMatch({ok, _, [{1}]}, emqx_resource:query(ResourceId, test_query_no_params())), + ?assertMatch({ok, _, [{1}]}, emqx_resource:query(ResourceId, test_query_with_params())), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Resource will be listed still, but state will be changed and healthcheck will fail % as the worker no longer exists. {ok, ?CONNECTOR_RESOURCE_GROUP, #{ state := State, status := StoppedStatus }} = - emqx_resource:get_instance(PoolName), + emqx_resource:get_instance(ResourceId), ?assertEqual(stopped, StoppedStatus), - ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), + ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceId)), % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Can call stop/1 again on an already stopped instance - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Make sure it can be restarted and the healthchecks and queries work properly - ?assertEqual(ok, emqx_resource:restart(PoolName)), + ?assertEqual(ok, emqx_resource:restart(ResourceId)), % async restart, need to wait resource timer:sleep(500), {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), - ?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_no_params())), - ?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_with_params())), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), + ?assertMatch({ok, _, [{1}]}, emqx_resource:query(ResourceId, test_query_no_params())), + ?assertMatch({ok, _, [{1}]}, emqx_resource:query(ResourceId, test_query_with_params())), % Stop and remove the resource in one go. - ?assertEqual(ok, emqx_resource:remove_local(PoolName)), - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual(ok, emqx_resource:remove_local(ResourceId)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Should not even be able to get the resource data out of ets now unlike just stopping. - ?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)). + ?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceId)). % %%------------------------------------------------------------------------------ % %% Helpers diff --git a/apps/emqx_connector/test/emqx_connector_redis_SUITE.erl b/apps/emqx_connector/test/emqx_connector_redis_SUITE.erl index 3a134ad35..e6df4f711 100644 --- a/apps/emqx_connector/test/emqx_connector_redis_SUITE.erl +++ b/apps/emqx_connector/test/emqx_connector_redis_SUITE.erl @@ -102,14 +102,14 @@ t_sentinel_lifecycle(_Config) -> [<<"PING">>] ). -perform_lifecycle_check(PoolName, InitialConfig, RedisCommand) -> +perform_lifecycle_check(ResourceId, InitialConfig, RedisCommand) -> {ok, #{config := CheckedConfig}} = emqx_resource:check_config(?REDIS_RESOURCE_MOD, InitialConfig), {ok, #{ - state := #{poolname := ReturnedPoolName} = State, + state := #{pool_name := PoolName} = State, status := InitialStatus }} = emqx_resource:create_local( - PoolName, + ResourceId, ?CONNECTOR_RESOURCE_GROUP, ?REDIS_RESOURCE_MOD, CheckedConfig, @@ -121,49 +121,49 @@ perform_lifecycle_check(PoolName, InitialConfig, RedisCommand) -> state := State, status := InitialStatus }} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), % Perform query as further check that the resource is working as expected - ?assertEqual({ok, <<"PONG">>}, emqx_resource:query(PoolName, {cmd, RedisCommand})), + ?assertEqual({ok, <<"PONG">>}, emqx_resource:query(ResourceId, {cmd, RedisCommand})), ?assertEqual( {ok, [{ok, <<"PONG">>}, {ok, <<"PONG">>}]}, - emqx_resource:query(PoolName, {cmds, [RedisCommand, RedisCommand]}) + emqx_resource:query(ResourceId, {cmds, [RedisCommand, RedisCommand]}) ), ?assertMatch( {error, {unrecoverable_error, [{ok, <<"PONG">>}, {error, _}]}}, emqx_resource:query( - PoolName, + ResourceId, {cmds, [RedisCommand, [<<"INVALID_COMMAND">>]]}, #{timeout => 500} ) ), - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Resource will be listed still, but state will be changed and healthcheck will fail % as the worker no longer exists. {ok, ?CONNECTOR_RESOURCE_GROUP, #{ state := State, status := StoppedStatus }} = - emqx_resource:get_instance(PoolName), + emqx_resource:get_instance(ResourceId), ?assertEqual(stopped, StoppedStatus), - ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), + ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(ResourceId)), % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Can call stop/1 again on an already stopped instance - ?assertEqual(ok, emqx_resource:stop(PoolName)), + ?assertEqual(ok, emqx_resource:stop(ResourceId)), % Make sure it can be restarted and the healthchecks and queries work properly - ?assertEqual(ok, emqx_resource:restart(PoolName)), + ?assertEqual(ok, emqx_resource:restart(ResourceId)), % async restart, need to wait resource timer:sleep(500), {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} = - emqx_resource:get_instance(PoolName), - ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), - ?assertEqual({ok, <<"PONG">>}, emqx_resource:query(PoolName, {cmd, RedisCommand})), + emqx_resource:get_instance(ResourceId), + ?assertEqual({ok, connected}, emqx_resource:health_check(ResourceId)), + ?assertEqual({ok, <<"PONG">>}, emqx_resource:query(ResourceId, {cmd, RedisCommand})), % Stop and remove the resource in one go. - ?assertEqual(ok, emqx_resource:remove_local(PoolName)), - ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + ?assertEqual(ok, emqx_resource:remove_local(ResourceId)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(PoolName)), % Should not even be able to get the resource data out of ets now unlike just stopping. - ?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)). + ?assertEqual({error, not_found}, emqx_resource:get_instance(ResourceId)). % %%------------------------------------------------------------------------------ % %% Helpers diff --git a/apps/emqx_connector/test/emqx_connector_web_hook_server.erl b/apps/emqx_connector/test/emqx_connector_web_hook_server.erl index b68ebcbba..bdc6e100c 100644 --- a/apps/emqx_connector/test/emqx_connector_web_hook_server.erl +++ b/apps/emqx_connector/test/emqx_connector_web_hook_server.erl @@ -29,7 +29,14 @@ start_link(Port, Path) -> start_link(Port, Path, false). start_link(Port, Path, SSLOpts) -> - supervisor:start_link({local, ?MODULE}, ?MODULE, [Port, Path, SSLOpts]). + case Port of + random -> + PickedPort = pick_port_number(56000), + {ok, Pid} = supervisor:start_link({local, ?MODULE}, ?MODULE, [PickedPort, Path, SSLOpts]), + {ok, {PickedPort, Pid}}; + _ -> + supervisor:start_link({local, ?MODULE}, ?MODULE, [Port, Path, SSLOpts]) + end. stop() -> try @@ -103,3 +110,20 @@ default_handler(Req0, State) -> Req0 ), {ok, Req, State}. + +pick_port_number(Port) -> + case is_port_in_use(Port) of + true -> + pick_port_number(Port + 1); + false -> + Port + end. + +is_port_in_use(Port) -> + case gen_tcp:listen(Port, [{reuseaddr, true}, {active, false}]) of + {ok, ListenSocket} -> + gen_tcp:close(ListenSocket), + false; + {error, eaddrinuse} -> + true + end. diff --git a/apps/emqx_dashboard/src/emqx_dashboard.app.src b/apps/emqx_dashboard/src/emqx_dashboard.app.src index b810f9c5f..bd022f226 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard.app.src +++ b/apps/emqx_dashboard/src/emqx_dashboard.app.src @@ -2,7 +2,7 @@ {application, emqx_dashboard, [ {description, "EMQX Web Dashboard"}, % strict semver, bump manually! - {vsn, "5.0.18"}, + {vsn, "5.0.20"}, {modules, []}, {registered, [emqx_dashboard_sup]}, {applications, [kernel, stdlib, mnesia, minirest, emqx, emqx_ctl]}, diff --git a/apps/emqx_dashboard/src/emqx_dashboard.erl b/apps/emqx_dashboard/src/emqx_dashboard.erl index 6f0c8334a..08b7f0142 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard.erl @@ -16,22 +16,13 @@ -module(emqx_dashboard). --define(APP, ?MODULE). - -export([ start_listeners/0, start_listeners/1, stop_listeners/1, stop_listeners/0, - list_listeners/0 -]). - --export([ - init_i18n/2, - init_i18n/0, - get_i18n/0, - i18n_file/0, - clear_i18n/0 + list_listeners/0, + wait_for_listeners/0 ]). %% Authorization @@ -90,30 +81,34 @@ start_listeners(Listeners) -> dispatch => Dispatch, middlewares => [?EMQX_MIDDLE, cowboy_router, cowboy_handler] }, - Res = + {OkListeners, ErrListeners} = lists:foldl( - fun({Name, Protocol, Bind, RanchOptions, ProtoOpts}, Acc) -> + fun({Name, Protocol, Bind, RanchOptions, ProtoOpts}, {OkAcc, ErrAcc}) -> Minirest = BaseMinirest#{protocol => Protocol, protocol_options => ProtoOpts}, case minirest:start(Name, RanchOptions, Minirest) of {ok, _} -> ?ULOG("Listener ~ts on ~ts started.~n", [ Name, emqx_listeners:format_bind(Bind) ]), - Acc; + {[Name | OkAcc], ErrAcc}; {error, _Reason} -> %% Don't record the reason because minirest already does(too much logs noise). - [Name | Acc] + {OkAcc, [Name | ErrAcc]} end end, - [], + {[], []}, listeners(Listeners) ), - case Res of - [] -> ok; - _ -> {error, Res} + case ErrListeners of + [] -> + optvar:set(emqx_dashboard_listeners_ready, OkListeners), + ok; + _ -> + {error, ErrListeners} end. stop_listeners(Listeners) -> + optvar:unset(emqx_dashboard_listeners_ready), [ begin case minirest:stop(Name) of @@ -129,23 +124,8 @@ stop_listeners(Listeners) -> ], ok. -get_i18n() -> - application:get_env(emqx_dashboard, i18n). - -init_i18n(File, Lang) when is_atom(Lang) -> - init_i18n(File, atom_to_binary(Lang)); -init_i18n(File, Lang) when is_binary(Lang) -> - Cache = hocon_schema:new_desc_cache(File), - application:set_env(emqx_dashboard, i18n, #{lang => Lang, cache => Cache}). - -clear_i18n() -> - case application:get_env(emqx_dashboard, i18n) of - {ok, #{cache := Cache}} -> - hocon_schema:delete_desc_cache(Cache), - application:unset_env(emqx_dashboard, i18n); - undefined -> - ok - end. +wait_for_listeners() -> + optvar:read(emqx_dashboard_listeners_ready). %%-------------------------------------------------------------------- %% internal @@ -187,11 +167,6 @@ ip_port(error, Opts) -> {Opts#{port => 18083}, 18083}; ip_port({Port, Opts}, _) when is_integer(Port) -> {Opts#{port => Port}, Port}; ip_port({{IP, Port}, Opts}, _) -> {Opts#{port => Port, ip => IP}, {IP, Port}}. -init_i18n() -> - File = i18n_file(), - Lang = emqx_conf:get([dashboard, i18n_lang], en), - init_i18n(File, Lang). - ranch_opts(Options) -> Keys = [ handshake_timeout, @@ -255,12 +230,6 @@ return_unauthorized(Code, Message) -> }, #{code => Code, message => Message}}. -i18n_file() -> - case application:get_env(emqx_dashboard, i18n_file) of - undefined -> filename:join([code:priv_dir(emqx_dashboard), "i18n.conf"]); - {ok, File} -> File - end. - listeners() -> emqx_conf:get([dashboard, listeners], #{}). diff --git a/apps/emqx_dashboard/src/emqx_dashboard_api.erl b/apps/emqx_dashboard/src/emqx_dashboard_api.erl index d5655d99d..108cde379 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_api.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_api.erl @@ -18,7 +18,6 @@ -behaviour(minirest_api). --include("emqx_dashboard.hrl"). -include_lib("hocon/include/hoconsc.hrl"). -include_lib("emqx/include/logger.hrl"). -include_lib("typerefl/include/types.hrl"). diff --git a/apps/emqx_dashboard/src/emqx_dashboard_desc_cache.erl b/apps/emqx_dashboard/src/emqx_dashboard_desc_cache.erl new file mode 100644 index 000000000..b503fed88 --- /dev/null +++ b/apps/emqx_dashboard/src/emqx_dashboard_desc_cache.erl @@ -0,0 +1,110 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +%% @doc This module is used to cache the description of the configuration items. +-module(emqx_dashboard_desc_cache). + +-export([init/0]). + +%% internal exports +-export([load_desc/2, lookup/4, lookup/5]). + +-include_lib("emqx/include/logger.hrl"). + +%% @doc Global ETS table to cache the description of the configuration items. +%% The table is owned by the emqx_dashboard_sup the root supervisor of emqx_dashboard. +%% The cache is initialized with the default language (English) and +%% all the desc..hocon files in the www/static directory (extracted from dashboard package). +init() -> + ok = ensure_app_loaded(emqx_dashboard), + PrivDir = code:priv_dir(emqx_dashboard), + EngDesc = filename:join([PrivDir, "desc.en.hocon"]), + WwwStaticDir = filename:join([PrivDir, "www", "static"]), + OtherLangDesc0 = filelib:wildcard("desc.*.hocon", WwwStaticDir), + OtherLangDesc = lists:map(fun(F) -> filename:join([WwwStaticDir, F]) end, OtherLangDesc0), + Files = [EngDesc | OtherLangDesc], + ok = emqx_utils_ets:new(?MODULE, [public, ordered_set, {read_concurrency, true}]), + ok = lists:foreach(fun(F) -> load_desc(?MODULE, F) end, Files). + +%% @doc Load the description of the configuration items from the file. +%% Load is incremental, so it can be called multiple times. +%% NOTE: no garbage collection is done, because stale entries are harmless. +load_desc(EtsTab, File) -> + ?SLOG(info, #{msg => "loading desc", file => File}), + {ok, Descs} = hocon:load(File), + ["desc", Lang, "hocon"] = string:tokens(filename:basename(File), "."), + Insert = fun(Namespace, Id, Tag, Text) -> + Key = {bin(Lang), bin(Namespace), bin(Id), bin(Tag)}, + true = ets:insert(EtsTab, {Key, bin(Text)}), + ok + end, + walk_ns(Insert, maps:to_list(Descs)). + +%% @doc Lookup the description of the configuration item from the global cache. +lookup(Lang, Namespace, Id, Tag) -> + lookup(?MODULE, Lang, Namespace, Id, Tag). + +%% @doc Lookup the description of the configuration item from the given cache. +lookup(EtsTab, Lang0, Namespace, Id, Tag) -> + Lang = bin(Lang0), + try ets:lookup(EtsTab, {Lang, bin(Namespace), bin(Id), bin(Tag)}) of + [{_, Desc}] -> + Desc; + [] when Lang =/= <<"en">> -> + %% fallback to English + lookup(EtsTab, <<"en">>, Namespace, Id, Tag); + _ -> + %% undefined but not <<>> + undefined + catch + error:badarg -> + %% schema is not initialized + %% most likely in test cases + undefined + end. + +%% The desc files are of names like: +%% desc.en.hocon or desc.zh.hocon +%% And with content like: +%% namespace.id.desc = "description" +%% namespace.id.label = "label" +walk_ns(_Insert, []) -> + ok; +walk_ns(Insert, [{Namespace, Ids} | Rest]) -> + walk_id(Insert, Namespace, maps:to_list(Ids)), + walk_ns(Insert, Rest). + +walk_id(_Insert, _Namespace, []) -> + ok; +walk_id(Insert, Namespace, [{Id, Tags} | Rest]) -> + walk_tag(Insert, Namespace, Id, maps:to_list(Tags)), + walk_id(Insert, Namespace, Rest). + +walk_tag(_Insert, _Namespace, _Id, []) -> + ok; +walk_tag(Insert, Namespace, Id, [{Tag, Text} | Rest]) -> + ok = Insert(Namespace, Id, Tag, Text), + walk_tag(Insert, Namespace, Id, Rest). + +bin(A) when is_atom(A) -> atom_to_binary(A, utf8); +bin(B) when is_binary(B) -> B; +bin(L) when is_list(L) -> list_to_binary(L). + +ensure_app_loaded(App) -> + case application:load(App) of + ok -> ok; + {error, {already_loaded, _}} -> ok + end. diff --git a/apps/emqx_dashboard/src/emqx_dashboard_listener.erl b/apps/emqx_dashboard/src/emqx_dashboard_listener.erl index 01d96bdf0..6a306c288 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_listener.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_listener.erl @@ -15,9 +15,11 @@ %%-------------------------------------------------------------------- -module(emqx_dashboard_listener). --include_lib("emqx/include/logger.hrl"). -behaviour(emqx_config_handler). +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + %% API -export([add_handler/0, remove_handler/0]). -export([pre_config_update/3, post_config_update/5]). @@ -54,12 +56,10 @@ init([]) -> {ok, undefined, {continue, regenerate_dispatch}}. handle_continue(regenerate_dispatch, _State) -> - NewState = regenerate_minirest_dispatch(), - {noreply, NewState, hibernate}. + %% initialize the swagger dispatches + ready = regenerate_minirest_dispatch(), + {noreply, ready, hibernate}. -handle_call(is_ready, _From, retry) -> - NewState = regenerate_minirest_dispatch(), - {reply, NewState, NewState, hibernate}; handle_call(is_ready, _From, State) -> {reply, State, State, hibernate}; handle_call(_Request, _From, State) -> @@ -68,6 +68,9 @@ handle_call(_Request, _From, State) -> handle_cast(_Request, State) -> {noreply, State, hibernate}. +handle_info(i18n_lang_changed, _State) -> + NewState = regenerate_minirest_dispatch(), + {noreply, NewState, hibernate}; handle_info({update_listeners, OldListeners, NewListeners}, _State) -> ok = emqx_dashboard:stop_listeners(OldListeners), ok = emqx_dashboard:start_listeners(NewListeners), @@ -83,29 +86,26 @@ terminate(_Reason, _State) -> code_change(_OldVsn, State, _Extra) -> {ok, State}. -%% generate dispatch is very slow. +%% generate dispatch is very slow, takes about 1s. regenerate_minirest_dispatch() -> - try - emqx_dashboard:init_i18n(), - lists:foreach( - fun(Listener) -> - minirest:update_dispatch(element(1, Listener)) - end, - emqx_dashboard:list_listeners() - ), - ready - catch - T:E:S -> - ?SLOG(error, #{ - msg => "regenerate_minirest_dispatch_failed", - reason => E, - type => T, - stacktrace => S - }), - retry - after - emqx_dashboard:clear_i18n() - end. + %% optvar:read waits for the var to be set + Names = emqx_dashboard:wait_for_listeners(), + {Time, ok} = timer:tc(fun() -> do_regenerate_minirest_dispatch(Names) end), + Lang = emqx:get_config([dashboard, i18n_lang]), + ?tp(info, regenerate_minirest_dispatch, #{ + elapsed => erlang:convert_time_unit(Time, microsecond, millisecond), + listeners => Names, + i18n_lang => Lang + }), + ready. + +do_regenerate_minirest_dispatch(Names) -> + lists:foreach( + fun(Name) -> + ok = minirest:update_dispatch(Name) + end, + Names + ). add_handler() -> Roots = emqx_dashboard_schema:roots(), @@ -117,6 +117,12 @@ remove_handler() -> ok = emqx_config_handler:remove_handler(Roots), ok. +pre_config_update(_Path, {change_i18n_lang, NewLang}, RawConf) -> + %% e.g. emqx_conf:update([dashboard], {change_i18n_lang, zh}, #{}). + %% TODO: check if there is such a language (all languages are cached in emqx_dashboard_desc_cache) + Update = #{<<"i18n_lang">> => NewLang}, + NewConf = emqx_utils_maps:deep_merge(RawConf, Update), + {ok, NewConf}; pre_config_update(_Path, UpdateConf0, RawConf) -> UpdateConf = remove_sensitive_data(UpdateConf0), NewConf = emqx_utils_maps:deep_merge(RawConf, UpdateConf), @@ -139,6 +145,8 @@ remove_sensitive_data(Conf0) -> Conf1 end. +post_config_update(_, {change_i18n_lang, _}, _NewConf, _OldConf, _AppEnvs) -> + delay_job(i18n_lang_changed); post_config_update(_, _Req, NewConf, OldConf, _AppEnvs) -> OldHttp = get_listener(http, OldConf), OldHttps = get_listener(https, OldConf), @@ -148,7 +156,12 @@ post_config_update(_, _Req, NewConf, OldConf, _AppEnvs) -> {StopHttps, StartHttps} = diff_listeners(https, OldHttps, NewHttps), Stop = maps:merge(StopHttp, StopHttps), Start = maps:merge(StartHttp, StartHttps), - _ = erlang:send_after(500, ?MODULE, {update_listeners, Stop, Start}), + delay_job({update_listeners, Stop, Start}). + +%% in post_config_update, the config is not yet persisted to persistent_term +%% so we need to delegate the listener update to the gen_server a bit later +delay_job(Msg) -> + _ = erlang:send_after(500, ?MODULE, Msg), ok. get_listener(Type, Conf) -> diff --git a/apps/emqx_dashboard/src/emqx_dashboard_schema.erl b/apps/emqx_dashboard/src/emqx_dashboard_schema.erl index d3e4233d3..28bfb709a 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_schema.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_schema.erl @@ -102,9 +102,7 @@ fields("https") -> server_ssl_opts() -> Opts0 = emqx_schema:server_ssl_opts_schema(#{}, true), - Opts1 = exclude_fields(["fail_if_no_peer_cert"], Opts0), - {value, {_, Meta}, Opts2} = lists:keytake("password", 1, Opts1), - [{"password", Meta#{importance => ?IMPORTANCE_HIDDEN}} | Opts2]. + exclude_fields(["fail_if_no_peer_cert"], Opts0). exclude_fields([], Fields) -> Fields; @@ -233,6 +231,8 @@ cors(required) -> false; cors(desc) -> ?DESC(cors); cors(_) -> undefined. +%% TODO: change it to string type +%% It will be up to the dashboard package which languagues to support i18n_lang(type) -> ?ENUM([en, zh]); i18n_lang(default) -> en; i18n_lang('readOnly') -> true; diff --git a/apps/emqx_dashboard/src/emqx_dashboard_schema_api.erl b/apps/emqx_dashboard/src/emqx_dashboard_schema_api.erl new file mode 100644 index 000000000..e4f2f0c1a --- /dev/null +++ b/apps/emqx_dashboard/src/emqx_dashboard_schema_api.erl @@ -0,0 +1,76 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +%% This module is for dashboard to retrieve the schema hot config and bridges. +-module(emqx_dashboard_schema_api). + +-behaviour(minirest_api). + +-include_lib("hocon/include/hoconsc.hrl"). + +%% minirest API +-export([api_spec/0, paths/0, schema/1]). + +-export([get_schema/2]). + +-define(TAGS, [<<"dashboard">>]). +-define(BAD_REQUEST, 'BAD_REQUEST'). + +%%-------------------------------------------------------------------- +%% minirest API and schema +%%-------------------------------------------------------------------- + +api_spec() -> + emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}). + +paths() -> + ["/schemas/:name"]. + +%% This is a rather hidden API, so we don't need to add translations for the description. +schema("/schemas/:name") -> + #{ + 'operationId' => get_schema, + get => #{ + parameters => [ + {name, hoconsc:mk(hoconsc:enum([hotconf, bridges]), #{in => path})} + ], + desc => << + "Get the schema JSON of the specified name. " + "NOTE: only intended for EMQX Dashboard." + >>, + tags => ?TAGS, + security => [], + responses => #{ + 200 => hoconsc:mk(binary(), #{desc => <<"The JSON schema of the specified name.">>}) + } + } + }. + +%%-------------------------------------------------------------------- +%% API Handler funcs +%%-------------------------------------------------------------------- + +get_schema(get, #{ + bindings := #{name := Name} +}) -> + {200, gen_schema(Name)}; +get_schema(get, _) -> + {400, ?BAD_REQUEST, <<"unknown">>}. + +gen_schema(hotconf) -> + emqx_conf:hotconf_schema_json(); +gen_schema(bridges) -> + emqx_conf:bridge_schema_json(). diff --git a/apps/emqx_dashboard/src/emqx_dashboard_sup.erl b/apps/emqx_dashboard/src/emqx_dashboard_sup.erl index 896b44859..04d8ed1d5 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_sup.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_sup.erl @@ -28,6 +28,8 @@ start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). init([]) -> + %% supervisor owns the cache table + ok = emqx_dashboard_desc_cache:init(), {ok, {{one_for_one, 5, 100}, [ ?CHILD(emqx_dashboard_listener, brutal_kill), diff --git a/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl b/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl index bffedaf3c..528fcd972 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl @@ -26,7 +26,11 @@ -export([error_codes/1, error_codes/2]). -export([file_schema/1]). --export([filter_check_request/2, filter_check_request_and_translate_body/2]). +-export([ + filter_check_request/2, + filter_check_request_and_translate_body/2, + gen_api_schema_json_iodata/3 +]). -ifdef(TEST). -export([ @@ -72,6 +76,8 @@ ]) ). +-define(SPECIAL_LANG_MSGID, <<"$msgid">>). + -define(MAX_ROW_LIMIT, 1000). -define(DEFAULT_ROW, 100). @@ -84,7 +90,8 @@ -type spec_opts() :: #{ check_schema => boolean() | filter(), translate_body => boolean(), - schema_converter => fun((hocon_schema:schema(), Module :: atom()) -> map()) + schema_converter => fun((hocon_schema:schema(), Module :: atom()) -> map()), + i18n_lang => atom() | string() | binary() }. -type route_path() :: string() | binary(). @@ -191,6 +198,50 @@ file_schema(FileName) -> } }. +gen_api_schema_json_iodata(SchemaMod, SchemaInfo, Converter) -> + {ApiSpec0, Components0} = emqx_dashboard_swagger:spec( + SchemaMod, + #{ + schema_converter => Converter, + i18n_lang => ?SPECIAL_LANG_MSGID + } + ), + ApiSpec = lists:foldl( + fun({Path, Spec, _, _}, Acc) -> + NewSpec = maps:fold( + fun(Method, #{responses := Responses}, SubAcc) -> + case Responses of + #{ + <<"200">> := + #{ + <<"content">> := #{ + <<"application/json">> := #{<<"schema">> := Schema} + } + } + } -> + SubAcc#{Method => Schema}; + _ -> + SubAcc + end + end, + #{}, + Spec + ), + Acc#{list_to_atom(Path) => NewSpec} + end, + #{}, + ApiSpec0 + ), + Components = lists:foldl(fun(M, Acc) -> maps:merge(M, Acc) end, #{}, Components0), + emqx_utils_json:encode( + #{ + info => SchemaInfo, + paths => ApiSpec, + components => #{schemas => Components} + }, + [pretty, force_utf8] + ). + %%------------------------------------------------------------------------------ %% Private functions %%------------------------------------------------------------------------------ @@ -340,11 +391,11 @@ check_request_body(#{body := Body}, Spec, _Module, _CheckFun, false) when is_map %% tags, description, summary, security, deprecated meta_to_spec(Meta, Module, Options) -> - {Params, Refs1} = parameters(maps:get(parameters, Meta, []), Module), + {Params, Refs1} = parameters(maps:get(parameters, Meta, []), Module, Options), {RequestBody, Refs2} = request_body(maps:get('requestBody', Meta, []), Module, Options), {Responses, Refs3} = responses(maps:get(responses, Meta, #{}), Module, Options), { - generate_method_desc(to_spec(Meta, Params, RequestBody, Responses)), + generate_method_desc(to_spec(Meta, Params, RequestBody, Responses), Options), lists:usort(Refs1 ++ Refs2 ++ Refs3) }. @@ -355,13 +406,13 @@ to_spec(Meta, Params, RequestBody, Responses) -> Spec = to_spec(Meta, Params, [], Responses), maps:put('requestBody', RequestBody, Spec). -generate_method_desc(Spec = #{desc := _Desc}) -> - Spec1 = trans_description(maps:remove(desc, Spec), Spec), +generate_method_desc(Spec = #{desc := _Desc}, Options) -> + Spec1 = trans_description(maps:remove(desc, Spec), Spec, Options), trans_tags(Spec1); -generate_method_desc(Spec = #{description := _Desc}) -> - Spec1 = trans_description(Spec, Spec), +generate_method_desc(Spec = #{description := _Desc}, Options) -> + Spec1 = trans_description(Spec, Spec, Options), trans_tags(Spec1); -generate_method_desc(Spec) -> +generate_method_desc(Spec, _Options) -> trans_tags(Spec). trans_tags(Spec = #{tags := Tags}) -> @@ -369,7 +420,7 @@ trans_tags(Spec = #{tags := Tags}) -> trans_tags(Spec) -> Spec. -parameters(Params, Module) -> +parameters(Params, Module, Options) -> {SpecList, AllRefs} = lists:foldl( fun(Param, {Acc, RefsAcc}) -> @@ -395,7 +446,7 @@ parameters(Params, Module) -> Type ), Spec1 = trans_required(Spec0, Required, In), - Spec2 = trans_description(Spec1, Type), + Spec2 = trans_description(Spec1, Type, Options), {[Spec2 | Acc], Refs ++ RefsAcc} end end, @@ -439,38 +490,38 @@ trans_required(Spec, true, _) -> Spec#{required => true}; trans_required(Spec, _, path) -> Spec#{required => true}; trans_required(Spec, _, _) -> Spec. -trans_desc(Init, Hocon, Func, Name) -> - Spec0 = trans_description(Init, Hocon), +trans_desc(Init, Hocon, Func, Name, Options) -> + Spec0 = trans_description(Init, Hocon, Options), case Func =:= fun hocon_schema_to_spec/2 of true -> Spec0; false -> - Spec1 = trans_label(Spec0, Hocon, Name), + Spec1 = trans_label(Spec0, Hocon, Name, Options), case Spec1 of #{description := _} -> Spec1; _ -> Spec1#{description => <>} end end. -trans_description(Spec, Hocon) -> +trans_description(Spec, Hocon, Options) -> Desc = case desc_struct(Hocon) of undefined -> undefined; - ?DESC(_, _) = Struct -> get_i18n(<<"desc">>, Struct, undefined); - Struct -> to_bin(Struct) + ?DESC(_, _) = Struct -> get_i18n(<<"desc">>, Struct, undefined, Options); + Text -> to_bin(Text) end, case Desc of undefined -> Spec; Desc -> Desc1 = binary:replace(Desc, [<<"\n">>], <<"
">>, [global]), - maybe_add_summary_from_label(Spec#{description => Desc1}, Hocon) + maybe_add_summary_from_label(Spec#{description => Desc1}, Hocon, Options) end. -maybe_add_summary_from_label(Spec, Hocon) -> +maybe_add_summary_from_label(Spec, Hocon, Options) -> Label = case desc_struct(Hocon) of - ?DESC(_, _) = Struct -> get_i18n(<<"label">>, Struct, undefined); + ?DESC(_, _) = Struct -> get_i18n(<<"label">>, Struct, undefined, Options); _ -> undefined end, case Label of @@ -478,29 +529,60 @@ maybe_add_summary_from_label(Spec, Hocon) -> _ -> Spec#{summary => Label} end. -get_i18n(Key, Struct, Default) -> - {ok, #{cache := Cache, lang := Lang}} = emqx_dashboard:get_i18n(), - Desc = hocon_schema:resolve_schema(Struct, Cache), - emqx_utils_maps:deep_get([Key, Lang], Desc, Default). +get_i18n(Tag, ?DESC(Namespace, Id), Default, Options) -> + Lang = get_lang(Options), + case Lang of + ?SPECIAL_LANG_MSGID -> + make_msgid(Namespace, Id, Tag); + _ -> + get_i18n_text(Lang, Namespace, Id, Tag, Default) + end. -trans_label(Spec, Hocon, Default) -> +get_i18n_text(Lang, Namespace, Id, Tag, Default) -> + case emqx_dashboard_desc_cache:lookup(Lang, Namespace, Id, Tag) of + undefined -> + Default; + Text -> + Text + end. + +%% Format:$msgid:Namespace.Id.Tag +%% e.g. $msgid:emqx_schema.key.desc +%% $msgid:emqx_schema.key.label +%% if needed, the consumer of this schema JSON can use this msgid to +%% resolve the text in the i18n database. +make_msgid(Namespace, Id, Tag) -> + iolist_to_binary(["$msgid:", to_bin(Namespace), ".", to_bin(Id), ".", Tag]). + +%% So far i18n_lang in options is only used at build time. +%% At runtime, it's still the global config which controls the language. +get_lang(#{i18n_lang := Lang}) -> Lang; +get_lang(_) -> emqx:get_config([dashboard, i18n_lang]). + +trans_label(Spec, Hocon, Default, Options) -> Label = case desc_struct(Hocon) of - ?DESC(_, _) = Struct -> get_i18n(<<"label">>, Struct, Default); + ?DESC(_, _) = Struct -> get_i18n(<<"label">>, Struct, Default, Options); _ -> Default end, Spec#{label => Label}. desc_struct(Hocon) -> - case hocon_schema:field_schema(Hocon, desc) of - undefined -> - case hocon_schema:field_schema(Hocon, description) of - undefined -> get_ref_desc(Hocon); - Struct1 -> Struct1 - end; - Struct -> - Struct - end. + R = + case hocon_schema:field_schema(Hocon, desc) of + undefined -> + case hocon_schema:field_schema(Hocon, description) of + undefined -> get_ref_desc(Hocon); + Struct1 -> Struct1 + end; + Struct -> + Struct + end, + ensure_bin(R). + +ensure_bin(undefined) -> undefined; +ensure_bin(?DESC(_Namespace, _Id) = Desc) -> Desc; +ensure_bin(Text) -> to_bin(Text). get_ref_desc(?R_REF(Mod, Name)) -> case erlang:function_exported(Mod, desc, 1) of @@ -531,7 +613,7 @@ responses(Responses, Module, Options) -> {Spec, Refs}. response(Status, ?DESC(_Mod, _Id) = Schema, {Acc, RefsAcc, Module, Options}) -> - Desc = trans_description(#{}, #{desc => Schema}), + Desc = trans_description(#{}, #{desc => Schema}, Options), {Acc#{integer_to_binary(Status) => Desc}, RefsAcc, Module, Options}; response(Status, Bin, {Acc, RefsAcc, Module, Options}) when is_binary(Bin) -> {Acc#{integer_to_binary(Status) => #{description => Bin}}, RefsAcc, Module, Options}; @@ -560,7 +642,7 @@ response(Status, Schema, {Acc, RefsAcc, Module, Options}) -> Hocon = hocon_schema:field_schema(Schema, type), Examples = hocon_schema:field_schema(Schema, examples), {Spec, Refs} = hocon_schema_to_spec(Hocon, Module), - Init = trans_description(#{}, Schema), + Init = trans_description(#{}, Schema, Options), Content = content(Spec, Examples), { Acc#{integer_to_binary(Status) => Init#{<<"content">> => Content}}, @@ -570,7 +652,7 @@ response(Status, Schema, {Acc, RefsAcc, Module, Options}) -> }; false -> {Props, Refs} = parse_object(Schema, Module, Options), - Init = trans_description(#{}, Schema), + Init = trans_description(#{}, Schema, Options), Content = Init#{<<"content">> => content(Props)}, {Acc#{integer_to_binary(Status) => Content}, Refs ++ RefsAcc, Module, Options} end. @@ -597,7 +679,7 @@ components(Options, [{Module, Field} | Refs], SpecAcc, SubRefsAcc) -> %% parameters in ref only have one value, not array components(Options, [{Module, Field, parameter} | Refs], SpecAcc, SubRefsAcc) -> Props = hocon_schema_fields(Module, Field), - {[Param], SubRefs} = parameters(Props, Module), + {[Param], SubRefs} = parameters(Props, Module, Options), Namespace = namespace(Module), NewSpecAcc = SpecAcc#{?TO_REF(Namespace, Field) => Param}, components(Options, Refs, NewSpecAcc, SubRefs ++ SubRefsAcc). @@ -751,7 +833,7 @@ typename_to_spec("log_level()", _Mod) -> }; typename_to_spec("rate()", _Mod) -> #{type => string, example => <<"10MB">>}; -typename_to_spec("capacity()", _Mod) -> +typename_to_spec("burst()", _Mod) -> #{type => string, example => <<"100MB">>}; typename_to_spec("burst_rate()", _Mod) -> %% 0/0s = no burst @@ -876,7 +958,7 @@ parse_object_loop([{Name, Hocon} | Rest], Module, Options, Props, Required, Refs HoconType = hocon_schema:field_schema(Hocon, type), Init0 = init_prop([default | ?DEFAULT_FIELDS], #{}, Hocon), SchemaToSpec = schema_converter(Options), - Init = trans_desc(Init0, Hocon, SchemaToSpec, NameBin), + Init = trans_desc(Init0, Hocon, SchemaToSpec, NameBin, Options), {Prop, Refs1} = SchemaToSpec(HoconType, Module), NewRequiredAcc = case is_required(Hocon) of diff --git a/apps/emqx_dashboard/test/emqx_dashboard_error_code_SUITE.erl b/apps/emqx_dashboard/test/emqx_dashboard_error_code_SUITE.erl index c0a772d2d..588a69065 100644 --- a/apps/emqx_dashboard/test/emqx_dashboard_error_code_SUITE.erl +++ b/apps/emqx_dashboard/test/emqx_dashboard_error_code_SUITE.erl @@ -57,7 +57,7 @@ t_look_up_code(_) -> t_description_code(_) -> {error, not_found} = emqx_dashboard_error_code:description('_____NOT_EXIST_NAME'), - {ok, <<"Request parameters are not legal">>} = + {ok, <<"Request parameters are invalid">>} = emqx_dashboard_error_code:description('BAD_REQUEST'), ok. @@ -79,7 +79,7 @@ t_api_code(_) -> Url = ?SERVER ++ "/error_codes/BAD_REQUEST", {ok, #{ <<"code">> := <<"BAD_REQUEST">>, - <<"description">> := <<"Request parameters are not legal">> + <<"description">> := <<"Request parameters are invalid">> }} = request(Url), ok. diff --git a/apps/emqx_dashboard/test/emqx_dashboard_listener_SUITE.erl b/apps/emqx_dashboard/test/emqx_dashboard_listener_SUITE.erl new file mode 100644 index 000000000..7f28841fc --- /dev/null +++ b/apps/emqx_dashboard/test/emqx_dashboard_listener_SUITE.erl @@ -0,0 +1,51 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-module(emqx_dashboard_listener_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + emqx_mgmt_api_test_util:init_suite([emqx_conf]), + ok = change_i18n_lang(en), + Config. + +end_per_suite(_Config) -> + ok = change_i18n_lang(en), + emqx_mgmt_api_test_util:end_suite([emqx_conf]). + +t_change_i18n_lang(_Config) -> + ?check_trace( + begin + ok = change_i18n_lang(zh), + {ok, _} = ?block_until(#{?snk_kind := regenerate_minirest_dispatch}, 10_000), + ok + end, + fun(ok, Trace) -> + ?assertMatch([#{i18n_lang := zh}], ?of_kind(regenerate_minirest_dispatch, Trace)) + end + ), + ok. + +change_i18n_lang(Lang) -> + {ok, _} = emqx_conf:update([dashboard], {change_i18n_lang, Lang}, #{}), + ok. diff --git a/apps/emqx_dashboard/test/emqx_dashboard_schema_api_SUITE.erl b/apps/emqx_dashboard/test/emqx_dashboard_schema_api_SUITE.erl new file mode 100644 index 000000000..e4425aed8 --- /dev/null +++ b/apps/emqx_dashboard/test/emqx_dashboard_schema_api_SUITE.erl @@ -0,0 +1,52 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_dashboard_schema_api_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("emqx/include/http_api.hrl"). + +-include_lib("eunit/include/eunit.hrl"). + +-define(SERVER, "http://127.0.0.1:18083/api/v5"). + +-import(emqx_mgmt_api_test_util, [request/2]). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + emqx_mgmt_api_test_util:init_suite([emqx_conf]), + Config. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite([emqx_conf]). + +t_hotconf(_) -> + Url = ?SERVER ++ "/schemas/hotconf", + {ok, 200, Body} = request(get, Url), + %% assert it's a valid json + _ = emqx_utils_json:decode(Body), + ok. + +t_bridges(_) -> + Url = ?SERVER ++ "/schemas/bridges", + {ok, 200, Body} = request(get, Url), + %% assert it's a valid json + _ = emqx_utils_json:decode(Body), + ok. diff --git a/apps/emqx_dashboard/test/emqx_swagger_parameter_SUITE.erl b/apps/emqx_dashboard/test/emqx_swagger_parameter_SUITE.erl index 472e90405..81b3f4402 100644 --- a/apps/emqx_dashboard/test/emqx_swagger_parameter_SUITE.erl +++ b/apps/emqx_dashboard/test/emqx_swagger_parameter_SUITE.erl @@ -64,7 +64,6 @@ groups() -> init_per_suite(Config) -> emqx_mgmt_api_test_util:init_suite([emqx_conf]), - emqx_dashboard:init_i18n(), Config. end_per_suite(_Config) -> diff --git a/apps/emqx_dashboard/test/emqx_swagger_requestBody_SUITE.erl b/apps/emqx_dashboard/test/emqx_swagger_requestBody_SUITE.erl index 4dbc8abdc..af4b901b2 100644 --- a/apps/emqx_dashboard/test/emqx_swagger_requestBody_SUITE.erl +++ b/apps/emqx_dashboard/test/emqx_swagger_requestBody_SUITE.erl @@ -33,7 +33,6 @@ init_per_suite(Config) -> mria:start(), application:load(emqx_dashboard), emqx_common_test_helpers:start_apps([emqx_conf, emqx_dashboard], fun set_special_configs/1), - emqx_dashboard:init_i18n(), Config. set_special_configs(emqx_dashboard) -> diff --git a/apps/emqx_dashboard/test/emqx_swagger_response_SUITE.erl b/apps/emqx_dashboard/test/emqx_swagger_response_SUITE.erl index f357bdc8f..a3d2b4e75 100644 --- a/apps/emqx_dashboard/test/emqx_swagger_response_SUITE.erl +++ b/apps/emqx_dashboard/test/emqx_swagger_response_SUITE.erl @@ -33,7 +33,6 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> emqx_mgmt_api_test_util:init_suite([emqx_conf]), - emqx_dashboard:init_i18n(), Config. end_per_suite(Config) -> diff --git a/apps/emqx_gateway/src/emqx_gateway.app.src b/apps/emqx_gateway/src/emqx_gateway.app.src index 0419666b4..2ffca464d 100644 --- a/apps/emqx_gateway/src/emqx_gateway.app.src +++ b/apps/emqx_gateway/src/emqx_gateway.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_gateway, [ {description, "The Gateway management application"}, - {vsn, "0.1.15"}, + {vsn, "0.1.16"}, {registered, []}, {mod, {emqx_gateway_app, []}}, {applications, [kernel, stdlib, emqx, emqx_authn, emqx_ctl]}, diff --git a/apps/emqx_gateway/src/emqx_gateway_utils.erl b/apps/emqx_gateway/src/emqx_gateway_utils.erl index 7a0188387..ced9eff48 100644 --- a/apps/emqx_gateway/src/emqx_gateway_utils.erl +++ b/apps/emqx_gateway/src/emqx_gateway_utils.erl @@ -78,7 +78,7 @@ -define(DEFAULT_GC_OPTS, #{count => 1000, bytes => 1024 * 1024}). -define(DEFAULT_OOM_POLICY, #{ max_heap_size => 4194304, - max_message_queue_len => 32000 + max_mailbox_size => 32000 }). -elvis([{elvis_style, god_modules, disable}]). diff --git a/apps/emqx_gateway/test/emqx_gateway_conf_SUITE.erl b/apps/emqx_gateway/test/emqx_gateway_conf_SUITE.erl index 1e947e793..ce709efc3 100644 --- a/apps/emqx_gateway/test/emqx_gateway_conf_SUITE.erl +++ b/apps/emqx_gateway/test/emqx_gateway_conf_SUITE.erl @@ -274,7 +274,7 @@ t_load_unload_gateway(_) -> ?assertException( error, - {config_not_found, [gateway, stomp]}, + {config_not_found, [<<"gateway">>, stomp]}, emqx:get_raw_config([gateway, stomp]) ), ok. @@ -307,7 +307,7 @@ t_load_remove_authn(_) -> ?assertException( error, - {config_not_found, [gateway, stomp, authentication]}, + {config_not_found, [<<"gateway">>, stomp, authentication]}, emqx:get_raw_config([gateway, stomp, authentication]) ), ok. @@ -352,7 +352,7 @@ t_load_remove_listeners(_) -> ?assertException( error, - {config_not_found, [gateway, stomp, listeners, tcp, default]}, + {config_not_found, [<<"gateway">>, stomp, listeners, tcp, default]}, emqx:get_raw_config([gateway, stomp, listeners, tcp, default]) ), ok. @@ -401,7 +401,7 @@ t_load_remove_listener_authn(_) -> Path = [gateway, stomp, listeners, tcp, default, authentication], ?assertException( error, - {config_not_found, Path}, + {config_not_found, [<<"gateway">>, stomp, listeners, tcp, default, authentication]}, emqx:get_raw_config(Path) ), ok. @@ -421,7 +421,7 @@ t_load_gateway_with_certs_content(_) -> assert_ssl_confs_files_deleted(SslConf), ?assertException( error, - {config_not_found, [gateway, stomp]}, + {config_not_found, [<<"gateway">>, stomp]}, emqx:get_raw_config([gateway, stomp]) ), ok. @@ -489,7 +489,7 @@ t_add_listener_with_certs_content(_) -> ?assertException( error, - {config_not_found, [gateway, stomp, listeners, ssl, default]}, + {config_not_found, [<<"gateway">>, stomp, listeners, ssl, default]}, emqx:get_raw_config([gateway, stomp, listeners, ssl, default]) ), ok. diff --git a/apps/emqx_machine/README.md b/apps/emqx_machine/README.md index 8c2bb6391..a8221ba73 100644 --- a/apps/emqx_machine/README.md +++ b/apps/emqx_machine/README.md @@ -31,7 +31,7 @@ It helps to shut down EMQX broker gracefully when it receives `SIGTERM` signal. Currently `emqx_machine` boots the business apps before starting autocluster, so a fresh node joining the cluster actually starts business application twice: first in the singleton mode, and then in clustered mode. -# Documention links +# Documentation links Configuration: [node.global_gc_interval](https://www.emqx.io/docs/en/v5.0/configuration/configuration-manual.html#node-and-cookie) diff --git a/apps/emqx_machine/src/emqx_machine.app.src b/apps/emqx_machine/src/emqx_machine.app.src index 6bd36aab5..a44d2b36e 100644 --- a/apps/emqx_machine/src/emqx_machine.app.src +++ b/apps/emqx_machine/src/emqx_machine.app.src @@ -3,7 +3,7 @@ {id, "emqx_machine"}, {description, "The EMQX Machine"}, % strict semver, bump manually! - {vsn, "0.2.2"}, + {vsn, "0.2.3"}, {modules, []}, {registered, []}, {applications, [kernel, stdlib, emqx_ctl]}, diff --git a/apps/emqx_machine/src/emqx_machine.erl b/apps/emqx_machine/src/emqx_machine.erl index 6872b150c..aa8f03ae5 100644 --- a/apps/emqx_machine/src/emqx_machine.erl +++ b/apps/emqx_machine/src/emqx_machine.erl @@ -43,7 +43,7 @@ start() -> start_sysmon(), configure_shard_transports(), ekka:start(), - ok = print_otp_version_warning(). + ok. graceful_shutdown() -> emqx_machine_terminator:graceful_wait(). @@ -61,17 +61,6 @@ set_backtrace_depth() -> is_ready() -> emqx_machine_terminator:is_running(). --if(?OTP_RELEASE > 22). -print_otp_version_warning() -> ok. --else. -print_otp_version_warning() -> - ?ULOG( - "WARNING: Running on Erlang/OTP version ~p. Recommended: 23~n", - [?OTP_RELEASE] - ). -% OTP_RELEASE > 22 --endif. - start_sysmon() -> _ = application:load(system_monitor), application:set_env(system_monitor, node_status_fun, {?MODULE, node_status}), diff --git a/apps/emqx_machine/src/emqx_machine_terminator.erl b/apps/emqx_machine/src/emqx_machine_terminator.erl index 7120cc19b..77c53a64d 100644 --- a/apps/emqx_machine/src/emqx_machine_terminator.erl +++ b/apps/emqx_machine/src/emqx_machine_terminator.erl @@ -41,7 +41,7 @@ -define(DO_IT, graceful_shutdown). %% @doc This API is called to shutdown the Erlang VM by RPC call from remote shell node. -%% The shutown of apps is delegated to a to a process instead of doing it in the RPC spawned +%% The shutdown of apps is delegated to a to a process instead of doing it in the RPC spawned %% process which has a remote group leader. start_link() -> {ok, _} = gen_server:start_link({local, ?TERMINATOR}, ?MODULE, [], []). @@ -87,8 +87,9 @@ handle_cast(_Cast, State) -> handle_call(?DO_IT, _From, State) -> try - emqx_machine_boot:stop_apps(), - emqx_machine_boot:stop_port_apps() + %% stop port apps before stopping other apps. + emqx_machine_boot:stop_port_apps(), + emqx_machine_boot:stop_apps() catch C:E:St -> Apps = [element(1, A) || A <- application:which_applications()], diff --git a/apps/emqx_machine/test/emqx_machine_SUITE.erl b/apps/emqx_machine/test/emqx_machine_SUITE.erl index 691cda677..02d03d983 100644 --- a/apps/emqx_machine/test/emqx_machine_SUITE.erl +++ b/apps/emqx_machine/test/emqx_machine_SUITE.erl @@ -103,3 +103,13 @@ t_custom_shard_transports(_Config) -> emqx_machine:start(), ?assertEqual(distr, mria_config:shard_transport(Shard)), ok. + +t_node_status(_Config) -> + JSON = emqx_machine:node_status(), + ?assertMatch( + #{ + <<"backend">> := _, + <<"role">> := <<"core">> + }, + jsx:decode(JSON) + ). diff --git a/apps/emqx_management/src/emqx_management.app.src b/apps/emqx_management/src/emqx_management.app.src index f423213af..34f3dd1fe 100644 --- a/apps/emqx_management/src/emqx_management.app.src +++ b/apps/emqx_management/src/emqx_management.app.src @@ -2,7 +2,7 @@ {application, emqx_management, [ {description, "EMQX Management API and CLI"}, % strict semver, bump manually! - {vsn, "5.0.19"}, + {vsn, "5.0.21"}, {modules, []}, {registered, [emqx_management_sup]}, {applications, [kernel, stdlib, emqx_plugins, minirest, emqx, emqx_ctl]}, diff --git a/apps/emqx_management/src/emqx_mgmt.erl b/apps/emqx_management/src/emqx_mgmt.erl index 5ba12646f..0b91817f0 100644 --- a/apps/emqx_management/src/emqx_mgmt.erl +++ b/apps/emqx_management/src/emqx_mgmt.erl @@ -112,8 +112,8 @@ %%-------------------------------------------------------------------- list_nodes() -> - Running = mria:cluster_nodes(running), - Stopped = mria:cluster_nodes(stopped), + Running = emqx:cluster_nodes(running), + Stopped = emqx:cluster_nodes(stopped), DownNodes = lists:map(fun stopped_node_info/1, Stopped), [{Node, Info} || #{node := Node} = Info <- node_info(Running)] ++ DownNodes. @@ -199,7 +199,7 @@ vm_stats() -> %%-------------------------------------------------------------------- list_brokers() -> - Running = mria:running_nodes(), + Running = emqx:running_nodes(), [{Node, Broker} || #{node := Node} = Broker <- broker_info(Running)]. lookup_broker(Node) -> @@ -223,7 +223,7 @@ broker_info(Nodes) -> %%-------------------------------------------------------------------- get_metrics() -> - nodes_info_count([get_metrics(Node) || Node <- mria:running_nodes()]). + nodes_info_count([get_metrics(Node) || Node <- emqx:running_nodes()]). get_metrics(Node) -> unwrap_rpc(emqx_proto_v1:get_metrics(Node)). @@ -238,13 +238,20 @@ get_stats() -> 'subscriptions.shared.count', 'subscriptions.shared.max' ], - CountStats = nodes_info_count([ - begin - Stats = get_stats(Node), - delete_keys(Stats, GlobalStatsKeys) - end - || Node <- mria:running_nodes() - ]), + CountStats = nodes_info_count( + lists:foldl( + fun(Node, Acc) -> + case get_stats(Node) of + {error, _} -> + Acc; + Stats -> + [delete_keys(Stats, GlobalStatsKeys) | Acc] + end + end, + [], + emqx:running_nodes() + ) + ), GlobalStats = maps:with(GlobalStatsKeys, maps:from_list(get_stats(node()))), maps:merge(CountStats, GlobalStats). @@ -275,12 +282,12 @@ nodes_info_count(PropList) -> lookup_client({clientid, ClientId}, FormatFun) -> lists:append([ lookup_client(Node, {clientid, ClientId}, FormatFun) - || Node <- mria:running_nodes() + || Node <- emqx:running_nodes() ]); lookup_client({username, Username}, FormatFun) -> lists:append([ lookup_client(Node, {username, Username}, FormatFun) - || Node <- mria:running_nodes() + || Node <- emqx:running_nodes() ]). lookup_client(Node, Key, FormatFun) -> @@ -307,7 +314,7 @@ kickout_client(ClientId) -> [] -> {error, not_found}; _ -> - Results = [kickout_client(Node, ClientId) || Node <- mria:running_nodes()], + Results = [kickout_client(Node, ClientId) || Node <- emqx:running_nodes()], check_results(Results) end. @@ -322,7 +329,7 @@ list_client_subscriptions(ClientId) -> [] -> {error, not_found}; _ -> - Results = [client_subscriptions(Node, ClientId) || Node <- mria:running_nodes()], + Results = [client_subscriptions(Node, ClientId) || Node <- emqx:running_nodes()], Filter = fun ({error, _}) -> @@ -340,18 +347,18 @@ client_subscriptions(Node, ClientId) -> {Node, unwrap_rpc(emqx_broker_proto_v1:list_client_subscriptions(Node, ClientId))}. clean_authz_cache(ClientId) -> - Results = [clean_authz_cache(Node, ClientId) || Node <- mria:running_nodes()], + Results = [clean_authz_cache(Node, ClientId) || Node <- emqx:running_nodes()], check_results(Results). clean_authz_cache(Node, ClientId) -> unwrap_rpc(emqx_proto_v1:clean_authz_cache(Node, ClientId)). clean_authz_cache_all() -> - Results = [{Node, clean_authz_cache_all(Node)} || Node <- mria:running_nodes()], + Results = [{Node, clean_authz_cache_all(Node)} || Node <- emqx:running_nodes()], wrap_results(Results). clean_pem_cache_all() -> - Results = [{Node, clean_pem_cache_all(Node)} || Node <- mria:running_nodes()], + Results = [{Node, clean_pem_cache_all(Node)} || Node <- emqx:running_nodes()], wrap_results(Results). wrap_results(Results) -> @@ -379,7 +386,7 @@ set_keepalive(_ClientId, _Interval) -> %% @private call_client(ClientId, Req) -> - Results = [call_client(Node, ClientId, Req) || Node <- mria:running_nodes()], + Results = [call_client(Node, ClientId, Req) || Node <- emqx:running_nodes()], Expected = lists:filter( fun ({error, _}) -> false; @@ -428,7 +435,7 @@ list_subscriptions(Node) -> list_subscriptions_via_topic(Topic, FormatFun) -> lists:append([ list_subscriptions_via_topic(Node, Topic, FormatFun) - || Node <- mria:running_nodes() + || Node <- emqx:running_nodes() ]). list_subscriptions_via_topic(Node, Topic, _FormatFun = {M, F}) -> @@ -442,7 +449,7 @@ list_subscriptions_via_topic(Node, Topic, _FormatFun = {M, F}) -> %%-------------------------------------------------------------------- subscribe(ClientId, TopicTables) -> - subscribe(mria:running_nodes(), ClientId, TopicTables). + subscribe(emqx:running_nodes(), ClientId, TopicTables). subscribe([Node | Nodes], ClientId, TopicTables) -> case unwrap_rpc(emqx_management_proto_v3:subscribe(Node, ClientId, TopicTables)) of @@ -467,7 +474,7 @@ publish(Msg) -> -spec unsubscribe(emqx_types:clientid(), emqx_types:topic()) -> {unsubscribe, _} | {error, channel_not_found}. unsubscribe(ClientId, Topic) -> - unsubscribe(mria:running_nodes(), ClientId, Topic). + unsubscribe(emqx:running_nodes(), ClientId, Topic). -spec unsubscribe([node()], emqx_types:clientid(), emqx_types:topic()) -> {unsubscribe, _} | {error, channel_not_found}. @@ -490,7 +497,7 @@ do_unsubscribe(ClientId, Topic) -> -spec unsubscribe_batch(emqx_types:clientid(), [emqx_types:topic()]) -> {unsubscribe, _} | {error, channel_not_found}. unsubscribe_batch(ClientId, Topics) -> - unsubscribe_batch(mria:running_nodes(), ClientId, Topics). + unsubscribe_batch(emqx:running_nodes(), ClientId, Topics). -spec unsubscribe_batch([node()], emqx_types:clientid(), [emqx_types:topic()]) -> {unsubscribe_batch, _} | {error, channel_not_found}. @@ -515,7 +522,7 @@ do_unsubscribe_batch(ClientId, Topics) -> %%-------------------------------------------------------------------- get_alarms(Type) -> - [{Node, get_alarms(Node, Type)} || Node <- mria:running_nodes()]. + [{Node, get_alarms(Node, Type)} || Node <- emqx:running_nodes()]. get_alarms(Node, Type) -> add_duration_field(unwrap_rpc(emqx_proto_v1:get_alarms(Node, Type))). @@ -524,7 +531,7 @@ deactivate(Node, Name) -> unwrap_rpc(emqx_proto_v1:deactivate_alarm(Node, Name)). delete_all_deactivated_alarms() -> - [delete_all_deactivated_alarms(Node) || Node <- mria:running_nodes()]. + [delete_all_deactivated_alarms(Node) || Node <- emqx:running_nodes()]. delete_all_deactivated_alarms(Node) -> unwrap_rpc(emqx_proto_v1:delete_all_deactivated_alarms(Node)). diff --git a/apps/emqx_management/src/emqx_mgmt_api.erl b/apps/emqx_management/src/emqx_mgmt_api.erl index c77752f7d..8365b983c 100644 --- a/apps/emqx_management/src/emqx_mgmt_api.erl +++ b/apps/emqx_management/src/emqx_mgmt_api.erl @@ -163,7 +163,7 @@ cluster_query(Tab, QString, QSchema, MsFun, FmtFun) -> {error, page_limit_invalid}; Meta -> {_CodCnt, NQString} = parse_qstring(QString, QSchema), - Nodes = mria:running_nodes(), + Nodes = emqx:running_nodes(), ResultAcc = init_query_result(), QueryState = init_query_state(Tab, NQString, MsFun, Meta), NResultAcc = do_cluster_query( diff --git a/apps/emqx_management/src/emqx_mgmt_api_cluster.erl b/apps/emqx_management/src/emqx_mgmt_api_cluster.erl index 68bb6c81d..e74b6c362 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_cluster.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_cluster.erl @@ -101,7 +101,7 @@ cluster_info(get, _) -> ClusterName = application:get_env(ekka, cluster_name, emqxcl), Info = #{ name => ClusterName, - nodes => mria:running_nodes(), + nodes => emqx:running_nodes(), self => node() }, {200, Info}. diff --git a/apps/emqx_management/src/emqx_mgmt_api_configs.erl b/apps/emqx_management/src/emqx_mgmt_api_configs.erl index c94dc17b6..bc9aaf768 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_configs.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_configs.erl @@ -43,7 +43,6 @@ <<"sys_topics">>, <<"sysmon">>, <<"limiter">>, - <<"trace">>, <<"log">>, <<"persistent_session_store">>, <<"zones">> @@ -260,7 +259,7 @@ configs(get, Params, _Req) -> QS = maps:get(query_string, Params, #{}), Node = maps:get(<<"node">>, QS, node()), case - lists:member(Node, mria:running_nodes()) andalso + lists:member(Node, emqx:running_nodes()) andalso emqx_management_proto_v2:get_full_config(Node) of false -> diff --git a/apps/emqx_management/src/emqx_mgmt_api_listeners.erl b/apps/emqx_management/src/emqx_mgmt_api_listeners.erl index fdd555bab..152ccc599 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_listeners.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_listeners.erl @@ -485,7 +485,7 @@ err_msg_str(Reason) -> io_lib:format("~p", [Reason]). list_listeners() -> - [list_listeners(Node) || Node <- mria:running_nodes()]. + [list_listeners(Node) || Node <- emqx:running_nodes()]. list_listeners(Node) -> wrap_rpc(emqx_management_proto_v2:list_listeners(Node)). diff --git a/apps/emqx_management/src/emqx_mgmt_api_metrics.erl b/apps/emqx_management/src/emqx_mgmt_api_metrics.erl index 1c5c8f62a..0fcc45d8e 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_metrics.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_metrics.erl @@ -59,7 +59,7 @@ metrics(get, #{query_string := Qs}) -> maps:from_list( emqx_mgmt:get_metrics(Node) ++ [{node, Node}] ) - || Node <- mria:running_nodes() + || Node <- emqx:running_nodes() ], {200, Data} end. diff --git a/apps/emqx_management/src/emqx_mgmt_api_stats.erl b/apps/emqx_management/src/emqx_mgmt_api_stats.erl index 1e752aaac..5f4bbce65 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_stats.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_stats.erl @@ -127,21 +127,21 @@ list(get, #{query_string := Qs}) -> true -> {200, emqx_mgmt:get_stats()}; _ -> - Data = [ - maps:from_list(emqx_mgmt:get_stats(Node) ++ [{node, Node}]) - || Node <- running_nodes() - ], + Data = lists:foldl( + fun(Node, Acc) -> + case emqx_mgmt:get_stats(Node) of + {error, _Err} -> + Acc; + Stats when is_list(Stats) -> + Data = maps:from_list([{node, Node} | Stats]), + [Data | Acc] + end + end, + [], + emqx:running_nodes() + ), {200, Data} end. %%%============================================================================================== %% Internal - -running_nodes() -> - Nodes = erlang:nodes([visible, this]), - RpcResults = emqx_proto_v2:are_running(Nodes), - [ - Node - || {Node, IsRunning} <- lists:zip(Nodes, RpcResults), - IsRunning =:= {ok, true} - ]. diff --git a/apps/emqx_management/src/emqx_mgmt_api_status.erl b/apps/emqx_management/src/emqx_mgmt_api_status.erl index 7d5c18e59..c0ee42e2b 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_status.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_status.erl @@ -45,6 +45,17 @@ schema("/status") -> #{ 'operationId' => get_status, get => #{ + parameters => [ + {format, + hoconsc:mk( + string(), + #{ + in => query, + default => <<"text">>, + desc => ?DESC(get_status_api_format) + } + )} + ], description => ?DESC(get_status_api), tags => ?TAGS, security => [], @@ -70,7 +81,16 @@ path() -> "/status". init(Req0, State) -> - {Code, Headers, Body} = running_status(), + Format = + try + QS = cowboy_req:parse_qs(Req0), + {_, F} = lists:keyfind(<<"format">>, 1, QS), + F + catch + _:_ -> + <<"text">> + end, + {Code, Headers, Body} = running_status(Format), Req = cowboy_req:reply(Code, Headers, Body, Req0), {ok, Req, State}. @@ -78,29 +98,52 @@ init(Req0, State) -> %% API Handler funcs %%-------------------------------------------------------------------- -get_status(get, _Params) -> - running_status(). +get_status(get, Params) -> + Format = maps:get(<<"format">>, maps:get(query_string, Params, #{}), <<"text">>), + running_status(iolist_to_binary(Format)). -running_status() -> +running_status(Format) -> case emqx_dashboard_listener:is_ready(timer:seconds(20)) of true -> - BrokerStatus = broker_status(), AppStatus = application_status(), - Body = io_lib:format("Node ~ts is ~ts~nemqx is ~ts", [node(), BrokerStatus, AppStatus]), + Body = do_get_status(AppStatus, Format), StatusCode = case AppStatus of running -> 200; not_running -> 503 end, + ContentType = + case Format of + <<"json">> -> <<"applicatin/json">>; + _ -> <<"text/plain">> + end, Headers = #{ - <<"content-type">> => <<"text/plain">>, + <<"content-type">> => ContentType, <<"retry-after">> => <<"15">> }, - {StatusCode, Headers, list_to_binary(Body)}; + {StatusCode, Headers, iolist_to_binary(Body)}; false -> {503, #{<<"retry-after">> => <<"15">>}, <<>>} end. +do_get_status(AppStatus, <<"json">>) -> + BrokerStatus = broker_status(), + emqx_utils_json:encode(#{ + node_name => atom_to_binary(node(), utf8), + rel_vsn => vsn(), + broker_status => atom_to_binary(BrokerStatus), + app_status => atom_to_binary(AppStatus) + }); +do_get_status(AppStatus, _) -> + BrokerStatus = broker_status(), + io_lib:format("Node ~ts is ~ts~nemqx is ~ts", [node(), BrokerStatus, AppStatus]). + +vsn() -> + iolist_to_binary([ + emqx_release:edition_vsn_prefix(), + emqx_release:version() + ]). + broker_status() -> case emqx:is_running() of true -> diff --git a/apps/emqx_management/src/emqx_mgmt_api_trace.erl b/apps/emqx_management/src/emqx_mgmt_api_trace.erl index 5df641add..25cc2734f 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_trace.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_trace.erl @@ -390,7 +390,7 @@ trace(get, _Params) -> fun(#{start_at := A}, #{start_at := B}) -> A > B end, emqx_trace:format(List0) ), - Nodes = mria:running_nodes(), + Nodes = emqx:running_nodes(), TraceSize = wrap_rpc(emqx_mgmt_trace_proto_v2:get_trace_size(Nodes)), AllFileSize = lists:foldl(fun(F, Acc) -> maps:merge(Acc, F) end, #{}, TraceSize), Now = erlang:system_time(second), @@ -464,7 +464,7 @@ format_trace(Trace0) -> LogSize = lists:foldl( fun(Node, Acc) -> Acc#{Node => 0} end, #{}, - mria:running_nodes() + emqx:running_nodes() ), Trace2 = maps:without([enable, filter], Trace1), Trace2#{ @@ -560,13 +560,13 @@ group_trace_file(ZipDir, TraceLog, TraceFiles) -> ). collect_trace_file(undefined, TraceLog) -> - Nodes = mria:running_nodes(), + Nodes = emqx:running_nodes(), wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file(Nodes, TraceLog)); collect_trace_file(Node, TraceLog) -> wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file([Node], TraceLog)). collect_trace_file_detail(TraceLog) -> - Nodes = mria:running_nodes(), + Nodes = emqx:running_nodes(), wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file_detail(Nodes, TraceLog)). wrap_rpc({GoodRes, BadNodes}) -> @@ -696,7 +696,7 @@ parse_node(Query, Default) -> {ok, Default}; {ok, NodeBin} -> Node = binary_to_existing_atom(NodeBin), - true = lists:member(Node, mria:running_nodes()), + true = lists:member(Node, emqx:running_nodes()), {ok, Node} end catch diff --git a/apps/emqx_management/src/emqx_mgmt_cli.erl b/apps/emqx_management/src/emqx_mgmt_cli.erl index 253d527ac..448940904 100644 --- a/apps/emqx_management/src/emqx_mgmt_cli.erl +++ b/apps/emqx_management/src/emqx_mgmt_cli.erl @@ -615,13 +615,18 @@ listeners([]) -> {error, _} -> []; MC -> [{max_conns, MC}] end, + ShutdownCount = + case emqx_listeners:shutdown_count(ID, Bind) of + {error, _} -> []; + SC -> [{shutdown_count, SC}] + end, Info = [ {listen_on, {string, emqx_listeners:format_bind(Bind)}}, {acceptors, Acceptors}, {proxy_protocol, ProxyProtocol}, {running, Running} - ] ++ CurrentConns ++ MaxConn, + ] ++ CurrentConns ++ MaxConn ++ ShutdownCount, emqx_ctl:print("~ts~n", [ID]), lists:foreach(fun indent_print/1, Info) end, diff --git a/apps/emqx_management/test/emqx_mgmt_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_SUITE.erl index 71b51b67c..3eb37060e 100644 --- a/apps/emqx_management/test/emqx_mgmt_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_SUITE.erl @@ -36,16 +36,16 @@ end_per_suite(_) -> emqx_mgmt_api_test_util:end_suite([emqx_management, emqx_conf]). init_per_testcase(TestCase, Config) -> - meck:expect(mria, running_nodes, 0, [node()]), + meck:expect(emqx, running_nodes, 0, [node()]), emqx_common_test_helpers:init_per_testcase(?MODULE, TestCase, Config). end_per_testcase(TestCase, Config) -> - meck:unload(mria), + meck:unload(emqx), emqx_common_test_helpers:end_per_testcase(?MODULE, TestCase, Config). t_list_nodes(init, Config) -> meck:expect( - mria, + emqx, cluster_nodes, fun (running) -> [node()]; @@ -125,7 +125,7 @@ t_lookup_client(_Config) -> emqx_mgmt:lookup_client({username, <<"user1">>}, ?FORMATFUN) ), ?assertEqual([], emqx_mgmt:lookup_client({clientid, <<"notfound">>}, ?FORMATFUN)), - meck:expect(mria, running_nodes, 0, [node(), 'fake@nonode']), + meck:expect(emqx, running_nodes, 0, [node(), 'fake@nonode']), ?assertMatch( [_ | {error, nodedown}], emqx_mgmt:lookup_client({clientid, <<"client1">>}, ?FORMATFUN) ). @@ -188,7 +188,7 @@ t_clean_cache(_Config) -> {error, _}, emqx_mgmt:clean_pem_cache_all() ), - meck:expect(mria, running_nodes, 0, [node(), 'fake@nonode']), + meck:expect(emqx, running_nodes, 0, [node(), 'fake@nonode']), ?assertMatch( {error, [{'fake@nonode', {error, _}}]}, emqx_mgmt:clean_authz_cache_all() diff --git a/apps/emqx_management/test/emqx_mgmt_api_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_SUITE.erl index bacec718d..a53ffc9c4 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_SUITE.erl @@ -179,14 +179,14 @@ t_bad_rpc(_) -> ClientLs1 = [start_emqtt_client(node(), I, 1883) || I <- lists:seq(1, 10)], Path = emqx_mgmt_api_test_util:api_path(["clients?limit=2&page=2"]), try - meck:expect(mria, running_nodes, 0, ['fake@nohost']), + meck:expect(emqx, running_nodes, 0, ['fake@nohost']), {error, {_, 500, _}} = emqx_mgmt_api_test_util:request_api(get, Path), %% good cop, bad cop - meck:expect(mria, running_nodes, 0, [node(), 'fake@nohost']), + meck:expect(emqx, running_nodes, 0, [node(), 'fake@nohost']), {error, {_, 500, _}} = emqx_mgmt_api_test_util:request_api(get, Path) after _ = lists:foreach(fun(C) -> emqtt:disconnect(C) end, ClientLs1), - meck:unload(mria), + meck:unload(emqx), emqx_mgmt_api_test_util:end_suite() end. diff --git a/apps/emqx_management/test/emqx_mgmt_api_configs_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_configs_SUITE.erl index 1638b815a..5a0116a4d 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_configs_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_configs_SUITE.erl @@ -246,7 +246,7 @@ t_dashboard(_Config) -> t_configs_node({'init', Config}) -> Node = node(), - meck:expect(mria, running_nodes, fun() -> [Node, bad_node, other_node] end), + meck:expect(emqx, running_nodes, fun() -> [Node, bad_node, other_node] end), meck:expect( emqx_management_proto_v2, get_full_config, @@ -258,7 +258,7 @@ t_configs_node({'init', Config}) -> ), Config; t_configs_node({'end', _}) -> - meck:unload([mria, emqx_management_proto_v2]); + meck:unload([emqx, emqx_management_proto_v2]); t_configs_node(_) -> Node = atom_to_list(node()), diff --git a/apps/emqx_management/test/emqx_mgmt_api_listeners_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_listeners_SUITE.erl index 3a26c948e..977c81c2b 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_listeners_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_listeners_SUITE.erl @@ -196,8 +196,8 @@ t_api_listeners_list_not_ready(Config) when is_list(Config) -> L3 = get_tcp_listeners(Node2), Comment = #{ - node1 => rpc:call(Node1, mria, running_nodes, []), - node2 => rpc:call(Node2, mria, running_nodes, []) + node1 => rpc:call(Node1, emqx, running_nodes, []), + node2 => rpc:call(Node2, emqx, running_nodes, []) }, ?assert(length(L1) > length(L2), Comment), diff --git a/apps/emqx_management/test/emqx_mgmt_api_stats_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_stats_SUITE.erl index 4099426b8..2550bdbe2 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_stats_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_stats_SUITE.erl @@ -24,10 +24,12 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> + meck:expect(emqx, running_nodes, 0, [node(), 'fake@node']), emqx_mgmt_api_test_util:init_suite(), Config. end_per_suite(_) -> + meck:unload(emqx), emqx_mgmt_api_test_util:end_suite(). t_stats_api(_) -> diff --git a/apps/emqx_management/test/emqx_mgmt_api_status_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_status_SUITE.erl index f0200c410..e8e0b4ac9 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_status_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_status_SUITE.erl @@ -38,7 +38,10 @@ all() -> get_status_tests() -> [ t_status_ok, - t_status_not_ok + t_status_not_ok, + t_status_text_format, + t_status_json_format, + t_status_bad_format_qs ]. groups() -> @@ -87,8 +90,10 @@ do_request(Opts) -> headers := Headers, body := Body0 } = Opts, + QS = maps:get(qs, Opts, ""), URL = ?HOST ++ filename:join(Path0), - {ok, #{host := Host, port := Port, path := Path}} = emqx_http_lib:uri_parse(URL), + {ok, #{host := Host, port := Port, path := Path1}} = emqx_http_lib:uri_parse(URL), + Path = Path1 ++ QS, %% we must not use `httpc' here, because it keeps retrying when it %% receives a 503 with `retry-after' header, and there's no option %% to stop that behavior... @@ -165,3 +170,73 @@ t_status_not_ok(Config) -> Headers ), ok. + +t_status_text_format(Config) -> + Path = ?config(get_status_path, Config), + #{ + body := Resp, + status_code := StatusCode + } = do_request(#{ + method => get, + path => Path, + qs => "?format=text", + headers => [], + body => no_body + }), + ?assertEqual(200, StatusCode), + ?assertMatch( + {match, _}, + re:run(Resp, <<"emqx is running$">>) + ), + ok. + +t_status_json_format(Config) -> + Path = ?config(get_status_path, Config), + #{ + body := Resp, + status_code := StatusCode + } = do_request(#{ + method => get, + path => Path, + qs => "?format=json", + headers => [], + body => no_body + }), + ?assertEqual(200, StatusCode), + ?assertMatch( + #{<<"app_status">> := <<"running">>}, + emqx_utils_json:decode(Resp) + ), + ok. + +t_status_bad_format_qs(Config) -> + lists:foreach( + fun(QS) -> + test_status_bad_format_qs(QS, Config) + end, + [ + "?a=b", + "?format=", + "?format=x" + ] + ). + +%% when query-sting is invalid, fallback to text format +test_status_bad_format_qs(QS, Config) -> + Path = ?config(get_status_path, Config), + #{ + body := Resp, + status_code := StatusCode + } = do_request(#{ + method => get, + path => Path, + qs => QS, + headers => [], + body => no_body + }), + ?assertEqual(200, StatusCode), + ?assertMatch( + {match, _}, + re:run(Resp, <<"emqx is running$">>) + ), + ok. diff --git a/apps/emqx_modules/README.md b/apps/emqx_modules/README.md new file mode 100644 index 000000000..dfa349514 --- /dev/null +++ b/apps/emqx_modules/README.md @@ -0,0 +1,53 @@ +# EMQX Modules + +The application provides some minor functional modules that are not included in the MQTT +protocol standard, including "Delayed Publish", "Topic Rewrite", "Topic Metrics" and "Telemetry". + + +## Delayed Publish + +After enabling this module, messages sent by the clients with the topic prefixed with +`$delayed/{Interval}/{Topic}` will be delayed by `{Interval}` seconds before +being published to the `{Topic}`. + +More introduction about [Delayed Publish](https://www.emqx.io/docs/en/v5.0/mqtt/mqtt-delayed-publish.html). + +See [Enabling/Disabling Delayed Publish via HTTP API](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/MQTT/paths/~1mqtt~1delayed/put). + + +## Topic Rewrite + +Topic Rewrite allows users to configure rules to change the topic strings that +the client requests to subscribe or publish. + +This feature is very useful when there's a need to transform between different topic structures. +For example, an old device that has already been issued and cannot +be upgraded may use old topic designs, but for some reason, we adjusted the format of topics. We can use this feature to rewrite the old topics as the new format to eliminate these differences. + +More introduction about [Topic Rewrite](https://www.emqx.io/docs/en/v5.0/mqtt/mqtt-topic-rewrite.html). + +See [List all rewrite rules](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/MQTT/paths/~1mqtt~1topic_rewrite/get) +and [Create or Update rewrite rules](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/MQTT/paths/~1mqtt~1topic_rewrite/put). + + +## Topic Metrics + +Topic Metrics is used for users to specify monitoring of certain topics and to +count the number of messages, QoS distribution, and rate for all messages on that topic. + +More introduction about [Topic Metrics](https://www.emqx.io/docs/en/v5.0/dashboard/diagnose.html#topic-metrics). + +See HTTP API docs to [List all monitored topics](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/MQTT/paths/~1mqtt~1topic_metrics/get), +[Create topic metrics](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/MQTT/paths/~1mqtt~1topic_metrics/post) +and [Get the monitored result](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/MQTT/paths/~1mqtt~1topic_metrics~1%7Btopic%7D/get). + + +## Telemetry + +Telemetry is used for collecting non-sensitive information about the EMQX cluster. + +More introduction about [Telemetry](https://www.emqx.io/docs/en/v5.0/telemetry/telemetry.html#telemetry). + +See HTTP API docs to [Enable/Disable telemetry](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/Telemetry/paths/~1telemetry~1status/put), +[Get the enabled status](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/Telemetry/paths/~1telemetry~1status/get) +and [Get the data of the module collected](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/Telemetry/paths/~1telemetry~1data/get). diff --git a/apps/emqx_modules/src/emqx_modules.app.src b/apps/emqx_modules/src/emqx_modules.app.src index 1e38e25d1..b984cf658 100644 --- a/apps/emqx_modules/src/emqx_modules.app.src +++ b/apps/emqx_modules/src/emqx_modules.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_modules, [ {description, "EMQX Modules"}, - {vsn, "5.0.13"}, + {vsn, "5.0.14"}, {modules, []}, {applications, [kernel, stdlib, emqx, emqx_ctl]}, {mod, {emqx_modules_app, []}}, diff --git a/apps/emqx_modules/src/emqx_modules_schema.erl b/apps/emqx_modules/src/emqx_modules_schema.erl index 36a08de60..9057333d5 100644 --- a/apps/emqx_modules/src/emqx_modules_schema.erl +++ b/apps/emqx_modules/src/emqx_modules_schema.erl @@ -36,11 +36,13 @@ roots() -> "telemetry", array("rewrite", #{ desc => "List of topic rewrite rules.", - importance => ?IMPORTANCE_HIDDEN + importance => ?IMPORTANCE_HIDDEN, + default => [] }), array("topic_metrics", #{ desc => "List of topics whose metrics are reported.", - importance => ?IMPORTANCE_HIDDEN + importance => ?IMPORTANCE_HIDDEN, + default => [] }) ]. diff --git a/apps/emqx_modules/src/emqx_topic_metrics.erl b/apps/emqx_modules/src/emqx_topic_metrics.erl index de09e568f..efe309b9e 100644 --- a/apps/emqx_modules/src/emqx_topic_metrics.erl +++ b/apps/emqx_modules/src/emqx_topic_metrics.erl @@ -179,7 +179,12 @@ deregister_all() -> gen_server:call(?MODULE, {deregister, all}). is_registered(Topic) -> - ets:member(?TAB, Topic). + try + ets:member(?TAB, Topic) + catch + error:badarg -> + false + end. all_registered_topics() -> [Topic || {Topic, _} <- ets:tab2list(?TAB)]. diff --git a/apps/emqx_modules/test/emqx_delayed_SUITE.erl b/apps/emqx_modules/test/emqx_delayed_SUITE.erl index 59ab0269c..8c271f0c1 100644 --- a/apps/emqx_modules/test/emqx_delayed_SUITE.erl +++ b/apps/emqx_modules/test/emqx_delayed_SUITE.erl @@ -40,9 +40,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), emqx_common_test_helpers:start_apps([emqx_conf, emqx_modules]), Config. diff --git a/apps/emqx_modules/test/emqx_delayed_api_SUITE.erl b/apps/emqx_modules/test/emqx_delayed_api_SUITE.erl index 4ae3dec88..b5995a47a 100644 --- a/apps/emqx_modules/test/emqx_delayed_api_SUITE.erl +++ b/apps/emqx_modules/test/emqx_delayed_api_SUITE.erl @@ -32,10 +32,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF, #{ - raw_with_default => true - }), - + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), ok = emqx_mgmt_api_test_util:init_suite( [emqx_conf, emqx_modules] ), diff --git a/apps/emqx_modules/test/emqx_modules_conf_SUITE.erl b/apps/emqx_modules/test/emqx_modules_conf_SUITE.erl index 666af9ef0..14e477bf9 100644 --- a/apps/emqx_modules/test/emqx_modules_conf_SUITE.erl +++ b/apps/emqx_modules/test/emqx_modules_conf_SUITE.erl @@ -29,9 +29,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Conf) -> - emqx_common_test_helpers:load_config(emqx_modules_schema, <<"gateway {}">>, #{ - raw_with_default => true - }), + emqx_common_test_helpers:load_config(emqx_modules_schema, <<"gateway {}">>), emqx_common_test_helpers:start_apps([emqx_conf, emqx_modules]), Conf. diff --git a/apps/emqx_modules/test/emqx_rewrite_SUITE.erl b/apps/emqx_modules/test/emqx_rewrite_SUITE.erl index 1847f876e..aa2c7cad7 100644 --- a/apps/emqx_modules/test/emqx_rewrite_SUITE.erl +++ b/apps/emqx_modules/test/emqx_rewrite_SUITE.erl @@ -73,9 +73,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> emqx_common_test_helpers:boot_modules(all), - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, #{}, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, #{}), emqx_common_test_helpers:start_apps([emqx_conf, emqx_modules]), Config. @@ -160,17 +158,13 @@ t_rewrite_re_error(_Config) -> ok. t_list(_Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE), Expect = maps:get(<<"rewrite">>, ?REWRITE), ?assertEqual(Expect, emqx_rewrite:list()), ok. t_update(_Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE), Init = emqx_rewrite:list(), Rules = [ #{ @@ -186,9 +180,7 @@ t_update(_Config) -> ok. t_update_disable(_Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE), ?assertEqual(ok, emqx_rewrite:update([])), timer:sleep(150), @@ -203,9 +195,7 @@ t_update_disable(_Config) -> ok. t_update_re_failed(_Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE), Re = <<"*^test/*">>, Rules = [ #{ @@ -261,9 +251,7 @@ receive_publish(Timeout) -> end. init() -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE), ok = emqx_rewrite:enable(), {ok, C} = emqtt:start_link([{clientid, <<"c1">>}, {username, <<"u1">>}]), {ok, _} = emqtt:connect(C), diff --git a/apps/emqx_modules/test/emqx_rewrite_api_SUITE.erl b/apps/emqx_modules/test/emqx_rewrite_api_SUITE.erl index 68d12a2c1..528102d9e 100644 --- a/apps/emqx_modules/test/emqx_rewrite_api_SUITE.erl +++ b/apps/emqx_modules/test/emqx_rewrite_api_SUITE.erl @@ -33,10 +33,7 @@ init_per_testcase(_, Config) -> Config. init_per_suite(Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF, #{ - raw_with_default => true - }), - + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), ok = emqx_mgmt_api_test_util:init_suite( [emqx_conf, emqx_modules] ), diff --git a/apps/emqx_modules/test/emqx_telemetry_SUITE.erl b/apps/emqx_modules/test/emqx_telemetry_SUITE.erl index cc9a8b20f..86ea65620 100644 --- a/apps/emqx_modules/test/emqx_telemetry_SUITE.erl +++ b/apps/emqx_modules/test/emqx_telemetry_SUITE.erl @@ -42,9 +42,7 @@ init_per_suite(Config) -> emqx_common_test_helpers:deps_path(emqx_authz, "etc/acl.conf") end ), - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), emqx_gateway_test_utils:load_all_gateway_apps(), emqx_common_test_helpers:start_apps( [emqx_conf, emqx_authn, emqx_authz, emqx_modules], @@ -154,9 +152,7 @@ init_per_testcase(t_exhook_info, Config) -> {ok, _} = emqx_exhook_demo_svr:start(), {ok, Sock} = gen_tcp:connect("localhost", 9000, [], 3000), _ = gen_tcp:close(Sock), - ok = emqx_common_test_helpers:load_config(emqx_exhook_schema, ExhookConf, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_exhook_schema, ExhookConf), {ok, _} = application:ensure_all_started(emqx_exhook), Config; init_per_testcase(t_cluster_uuid, Config) -> @@ -177,9 +173,7 @@ init_per_testcase(t_uuid_restored_from_file, Config) -> %% clear the UUIDs in the DB {atomic, ok} = mria:clear_table(emqx_telemetry), emqx_common_test_helpers:stop_apps([emqx_conf, emqx_authn, emqx_authz, emqx_modules]), - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), emqx_common_test_helpers:start_apps( [emqx_conf, emqx_authn, emqx_authz, emqx_modules], fun set_special_configs/1 @@ -332,9 +326,7 @@ t_uuid_saved_to_file(_Config) -> %% clear the UUIDs in the DB {atomic, ok} = mria:clear_table(emqx_telemetry), emqx_common_test_helpers:stop_apps([emqx_conf, emqx_authn, emqx_authz, emqx_modules]), - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), emqx_common_test_helpers:start_apps( [emqx_conf, emqx_authn, emqx_authz, emqx_modules], fun set_special_configs/1 @@ -463,16 +455,6 @@ t_num_clients(_Config) -> ok. t_advanced_mqtt_features(_) -> - try - ok = test_advanced_mqtt_features() - catch - _:_ -> - %% delayed messages' metrics might not be reported yet - timer:sleep(1000), - test_advanced_mqtt_features() - end. - -test_advanced_mqtt_features() -> {ok, TelemetryData} = emqx_telemetry:get_telemetry(), AdvFeats = get_value(advanced_mqtt_features, TelemetryData), ?assertEqual( @@ -667,8 +649,10 @@ mock_advanced_mqtt_features() -> lists:foreach( fun(N) -> - Num = integer_to_binary(N), - Message = emqx_message:make(<<"$delayed/", Num/binary, "/delayed">>, <<"payload">>), + DelaySec = integer_to_binary(N + 10), + Message = emqx_message:make( + <<"$delayed/", DelaySec/binary, "/delayed">>, <<"payload">> + ), {stop, _} = emqx_delayed:on_message_publish(Message) end, lists:seq(1, 4) @@ -834,15 +818,11 @@ start_slave(Name) -> (emqx) -> application:set_env(emqx, boot_modules, []), ekka:join(TestNode), - emqx_common_test_helpers:load_config( - emqx_modules_schema, ?BASE_CONF, #{raw_with_default => true} - ), + emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), ok; (_App) -> - emqx_common_test_helpers:load_config( - emqx_modules_schema, ?BASE_CONF, #{raw_with_default => true} - ), + emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), ok end, Opts = #{ diff --git a/apps/emqx_modules/test/emqx_telemetry_api_SUITE.erl b/apps/emqx_modules/test/emqx_telemetry_api_SUITE.erl index ac6d12039..c375810b5 100644 --- a/apps/emqx_modules/test/emqx_telemetry_api_SUITE.erl +++ b/apps/emqx_modules/test/emqx_telemetry_api_SUITE.erl @@ -29,10 +29,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF, #{ - raw_with_default => true - }), - + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), ok = emqx_mgmt_api_test_util:init_suite( [emqx_conf, emqx_authn, emqx_authz, emqx_modules], fun set_special_configs/1 diff --git a/apps/emqx_modules/test/emqx_topic_metrics_SUITE.erl b/apps/emqx_modules/test/emqx_topic_metrics_SUITE.erl index 10ce5d0df..a147f41cd 100644 --- a/apps/emqx_modules/test/emqx_topic_metrics_SUITE.erl +++ b/apps/emqx_modules/test/emqx_topic_metrics_SUITE.erl @@ -28,9 +28,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> emqx_common_test_helpers:boot_modules(all), - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?TOPIC, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?TOPIC), emqx_common_test_helpers:start_apps([emqx_conf, emqx_modules]), Config. @@ -42,6 +40,9 @@ init_per_testcase(_Case, Config) -> emqx_topic_metrics:deregister_all(), Config. +end_per_testcase(t_metrics_not_started, _Config) -> + _ = supervisor:restart_child(emqx_modules_sup, emqx_topic_metrics), + ok; end_per_testcase(_Case, _Config) -> emqx_topic_metrics:deregister_all(), emqx_config:put([topic_metrics], []), @@ -181,3 +182,10 @@ t_unknown_messages(_) -> OldPid, whereis(emqx_topic_metrics) ). + +t_metrics_not_started(_Config) -> + _ = emqx_topic_metrics:register(<<"a/b/c">>), + ?assert(emqx_topic_metrics:is_registered(<<"a/b/c">>)), + ok = supervisor:terminate_child(emqx_modules_sup, emqx_topic_metrics), + ?assertNot(emqx_topic_metrics:is_registered(<<"a/b/c">>)), + {ok, _} = supervisor:restart_child(emqx_modules_sup, emqx_topic_metrics). diff --git a/apps/emqx_modules/test/emqx_topic_metrics_api_SUITE.erl b/apps/emqx_modules/test/emqx_topic_metrics_api_SUITE.erl index 5d64f123d..2e668b6cf 100644 --- a/apps/emqx_modules/test/emqx_topic_metrics_api_SUITE.erl +++ b/apps/emqx_modules/test/emqx_topic_metrics_api_SUITE.erl @@ -40,10 +40,7 @@ init_per_testcase(_, Config) -> Config. init_per_suite(Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF, #{ - raw_with_default => true - }), - + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), ok = emqx_mgmt_api_test_util:init_suite( [emqx_conf, emqx_modules] ), diff --git a/apps/emqx_oracle/BSL.txt b/apps/emqx_oracle/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_oracle/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_oracle/README.md b/apps/emqx_oracle/README.md new file mode 100644 index 000000000..873d52259 --- /dev/null +++ b/apps/emqx_oracle/README.md @@ -0,0 +1,14 @@ +# Oracle Database Connector + +This application houses the Oracle Database connector for EMQX Enterprise Edition. +It provides the APIs to connect to Oracle Database. + +So far it is only used to insert messages as data bridge. + +## Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + +## License + +See [BSL](./BSL.txt). diff --git a/apps/emqx_oracle/rebar.config b/apps/emqx_oracle/rebar.config new file mode 100644 index 000000000..14461ba34 --- /dev/null +++ b/apps/emqx_oracle/rebar.config @@ -0,0 +1,7 @@ +%% -*- mode: erlang; -*- + +{erl_opts, [debug_info]}. +{deps, [ {jamdb_oracle, {git, "https://github.com/emqx/jamdb_oracle", {tag, "0.4.9.4"}}} + , {emqx_connector, {path, "../../apps/emqx_connector"}} + , {emqx_resource, {path, "../../apps/emqx_resource"}} + ]}. diff --git a/apps/emqx_oracle/src/emqx_oracle.app.src b/apps/emqx_oracle/src/emqx_oracle.app.src new file mode 100644 index 000000000..fa48e8479 --- /dev/null +++ b/apps/emqx_oracle/src/emqx_oracle.app.src @@ -0,0 +1,14 @@ +{application, emqx_oracle, [ + {description, "EMQX Enterprise Oracle Database Connector"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + jamdb_oracle + ]}, + {env, []}, + {modules, []}, + + {links, []} +]}. diff --git a/apps/emqx_oracle/src/emqx_oracle.erl b/apps/emqx_oracle/src/emqx_oracle.erl new file mode 100644 index 000000000..a0d7169f3 --- /dev/null +++ b/apps/emqx_oracle/src/emqx_oracle.erl @@ -0,0 +1,367 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_oracle). + +-behaviour(emqx_resource). + +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-define(ORACLE_DEFAULT_PORT, 1521). + +%%==================================================================== +%% Exports +%%==================================================================== + +%% callbacks for behaviour emqx_resource +-export([ + callback_mode/0, + is_buffer_supported/0, + on_start/2, + on_stop/2, + on_query/3, + on_batch_query/3, + on_get_status/2 +]). + +%% callbacks for ecpool +-export([connect/1, prepare_sql_to_conn/2]). + +%% Internal exports used to execute code with ecpool worker +-export([ + query/3, + execute_batch/3, + do_get_status/1 +]). + +-export([ + oracle_host_options/0 +]). + +-define(ACTION_SEND_MESSAGE, send_message). + +-define(SYNC_QUERY_MODE, no_handover). + +-define(ORACLE_HOST_OPTIONS, #{ + default_port => ?ORACLE_DEFAULT_PORT +}). + +-define(MAX_CURSORS, 10). +-define(DEFAULT_POOL_SIZE, 8). +-define(OPT_TIMEOUT, 30000). + +-type prepares() :: #{atom() => binary()}. +-type params_tokens() :: #{atom() => list()}. + +-type state() :: + #{ + pool_name := binary(), + prepare_sql := prepares(), + params_tokens := params_tokens(), + batch_params_tokens := params_tokens() + }. + +% As ecpool is not monitoring the worker's PID when doing a handover_async, the +% request can be lost if worker crashes. Thus, it's better to force requests to +% be sync for now. +callback_mode() -> always_sync. + +is_buffer_supported() -> false. + +-spec on_start(binary(), hoconsc:config()) -> {ok, state()} | {error, _}. +on_start( + InstId, + #{ + server := Server, + database := DB, + sid := Sid, + username := User + } = Config +) -> + ?SLOG(info, #{ + msg => "starting_oracle_connector", + connector => InstId, + config => emqx_utils:redact(Config) + }), + ?tp(oracle_bridge_started, #{instance_id => InstId, config => Config}), + {ok, _} = application:ensure_all_started(ecpool), + {ok, _} = application:ensure_all_started(jamdb_oracle), + jamdb_oracle_conn:set_max_cursors_number(?MAX_CURSORS), + + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, oracle_host_options()), + ServiceName = maps:get(<<"service_name">>, Config, Sid), + Options = [ + {host, Host}, + {port, Port}, + {user, emqx_plugin_libs_rule:str(User)}, + {password, emqx_secret:wrap(maps:get(password, Config, ""))}, + {sid, emqx_plugin_libs_rule:str(Sid)}, + {service_name, emqx_plugin_libs_rule:str(ServiceName)}, + {database, DB}, + {pool_size, maps:get(<<"pool_size">>, Config, ?DEFAULT_POOL_SIZE)}, + {timeout, ?OPT_TIMEOUT}, + {app_name, "EMQX Data To Oracle Database Action"} + ], + PoolName = InstId, + Prepares = parse_prepare_sql(Config), + InitState = #{pool_name => PoolName, prepare_statement => #{}}, + State = maps:merge(InitState, Prepares), + case emqx_resource_pool:start(InstId, ?MODULE, Options) of + ok -> + {ok, init_prepare(State)}; + {error, Reason} -> + ?tp( + oracle_connector_start_failed, + #{error => Reason} + ), + {error, Reason} + end. + +on_stop(InstId, #{pool_name := PoolName}) -> + ?SLOG(info, #{ + msg => "stopping_oracle_connector", + connector => InstId + }), + ?tp(oracle_bridge_stopped, #{instance_id => InstId}), + emqx_resource_pool:stop(PoolName). + +on_query(InstId, {TypeOrKey, NameOrSQL}, #{pool_name := _PoolName} = State) -> + on_query(InstId, {TypeOrKey, NameOrSQL, []}, State); +on_query( + InstId, + {TypeOrKey, NameOrSQL, Params}, + #{pool_name := PoolName} = State +) -> + ?SLOG(debug, #{ + msg => "oracle database connector received sql query", + connector => InstId, + type => TypeOrKey, + sql => NameOrSQL, + state => State + }), + Type = query, + {NameOrSQL2, Data} = proc_sql_params(TypeOrKey, NameOrSQL, Params, State), + Res = on_sql_query(InstId, PoolName, Type, ?SYNC_QUERY_MODE, NameOrSQL2, Data), + handle_result(Res). + +on_batch_query( + InstId, + BatchReq, + #{pool_name := PoolName, params_tokens := Tokens, prepare_statement := Sts} = State +) -> + case BatchReq of + [{Key, _} = Request | _] -> + BinKey = to_bin(Key), + case maps:get(BinKey, Tokens, undefined) of + undefined -> + Log = #{ + connector => InstId, + first_request => Request, + state => State, + msg => "batch prepare not implemented" + }, + ?SLOG(error, Log), + {error, {unrecoverable_error, batch_prepare_not_implemented}}; + TokenList -> + {_, Datas} = lists:unzip(BatchReq), + Datas2 = [emqx_plugin_libs_rule:proc_sql(TokenList, Data) || Data <- Datas], + St = maps:get(BinKey, Sts), + case + on_sql_query(InstId, PoolName, execute_batch, ?SYNC_QUERY_MODE, St, Datas2) + of + {ok, Results} -> + handle_batch_result(Results, 0); + Result -> + Result + end + end; + _ -> + Log = #{ + connector => InstId, + request => BatchReq, + state => State, + msg => "invalid request" + }, + ?SLOG(error, Log), + {error, {unrecoverable_error, invalid_request}} + end. + +proc_sql_params(query, SQLOrKey, Params, _State) -> + {SQLOrKey, Params}; +proc_sql_params(TypeOrKey, SQLOrData, Params, #{ + params_tokens := ParamsTokens, prepare_sql := PrepareSql +}) -> + Key = to_bin(TypeOrKey), + case maps:get(Key, ParamsTokens, undefined) of + undefined -> + {SQLOrData, Params}; + Tokens -> + case maps:get(Key, PrepareSql, undefined) of + undefined -> + {SQLOrData, Params}; + Sql -> + {Sql, emqx_plugin_libs_rule:proc_sql(Tokens, SQLOrData)} + end + end. + +on_sql_query(InstId, PoolName, Type, ApplyMode, NameOrSQL, Data) -> + case ecpool:pick_and_do(PoolName, {?MODULE, Type, [NameOrSQL, Data]}, ApplyMode) of + {error, Reason} = Result -> + ?tp( + oracle_connector_query_return, + #{error => Reason} + ), + ?SLOG(error, #{ + msg => "oracle database connector do sql query failed", + connector => InstId, + type => Type, + sql => NameOrSQL, + reason => Reason + }), + Result; + Result -> + ?tp( + oracle_connector_query_return, + #{result => Result} + ), + Result + end. + +on_get_status(_InstId, #{pool_name := Pool} = State) -> + case emqx_resource_pool:health_check_workers(Pool, fun ?MODULE:do_get_status/1) of + true -> + case do_check_prepares(State) of + ok -> + connected; + {ok, NState} -> + %% return new state with prepared statements + {connected, NState} + end; + false -> + disconnected + end. + +do_get_status(Conn) -> + ok == element(1, jamdb_oracle:sql_query(Conn, "select 1 from dual")). + +do_check_prepares(#{prepare_sql := Prepares}) when is_map(Prepares) -> + ok; +do_check_prepares(State = #{pool_name := PoolName, prepare_sql := {error, Prepares}}) -> + {ok, Sts} = prepare_sql(Prepares, PoolName), + {ok, State#{prepare_sql => Prepares, prepare_statement := Sts}}. + +%% =================================================================== + +oracle_host_options() -> + ?ORACLE_HOST_OPTIONS. + +connect(Opts) -> + Password = emqx_secret:unwrap(proplists:get_value(password, Opts)), + NewOpts = lists:keyreplace(password, 1, Opts, {password, Password}), + jamdb_oracle:start_link(NewOpts). + +sql_query_to_str(SqlQuery) -> + emqx_plugin_libs_rule:str(SqlQuery). + +sql_params_to_str(Params) when is_list(Params) -> + lists:map( + fun + (false) -> "0"; + (true) -> "1"; + (Value) -> emqx_plugin_libs_rule:str(Value) + end, + Params + ). + +query(Conn, SQL, Params) -> + Ret = jamdb_oracle:sql_query(Conn, {sql_query_to_str(SQL), sql_params_to_str(Params)}), + ?tp(oracle_query, #{conn => Conn, sql => SQL, params => Params, result => Ret}), + handle_result(Ret). + +execute_batch(Conn, SQL, ParamsList) -> + ParamsListStr = lists:map(fun sql_params_to_str/1, ParamsList), + Ret = jamdb_oracle:sql_query(Conn, {batch, sql_query_to_str(SQL), ParamsListStr}), + ?tp(oracle_batch_query, #{conn => Conn, sql => SQL, params => ParamsList, result => Ret}), + handle_result(Ret). + +parse_prepare_sql(Config) -> + SQL = + case maps:get(prepare_statement, Config, undefined) of + undefined -> + case maps:get(sql, Config, undefined) of + undefined -> #{}; + Template -> #{<<"send_message">> => Template} + end; + Any -> + Any + end, + parse_prepare_sql(maps:to_list(SQL), #{}, #{}). + +parse_prepare_sql([{Key, H} | T], Prepares, Tokens) -> + {PrepareSQL, ParamsTokens} = emqx_plugin_libs_rule:preproc_sql(H, ':n'), + parse_prepare_sql( + T, Prepares#{Key => PrepareSQL}, Tokens#{Key => ParamsTokens} + ); +parse_prepare_sql([], Prepares, Tokens) -> + #{ + prepare_sql => Prepares, + params_tokens => Tokens + }. + +init_prepare(State = #{prepare_sql := Prepares, pool_name := PoolName}) -> + {ok, Sts} = prepare_sql(Prepares, PoolName), + State#{prepare_statement := Sts}. + +prepare_sql(Prepares, PoolName) when is_map(Prepares) -> + prepare_sql(maps:to_list(Prepares), PoolName); +prepare_sql(Prepares, PoolName) -> + Data = do_prepare_sql(Prepares, PoolName), + {ok, _Sts} = Data, + ecpool:add_reconnect_callback(PoolName, {?MODULE, prepare_sql_to_conn, [Prepares]}), + Data. + +do_prepare_sql(Prepares, PoolName) -> + do_prepare_sql(ecpool:workers(PoolName), Prepares, PoolName, #{}). + +do_prepare_sql([{_Name, Worker} | T], Prepares, PoolName, _LastSts) -> + {ok, Conn} = ecpool_worker:client(Worker), + {ok, Sts} = prepare_sql_to_conn(Conn, Prepares), + do_prepare_sql(T, Prepares, PoolName, Sts); +do_prepare_sql([], _Prepares, _PoolName, LastSts) -> + {ok, LastSts}. + +prepare_sql_to_conn(Conn, Prepares) -> + prepare_sql_to_conn(Conn, Prepares, #{}). + +prepare_sql_to_conn(Conn, [], Statements) when is_pid(Conn) -> {ok, Statements}; +prepare_sql_to_conn(Conn, [{Key, SQL} | PrepareList], Statements) when is_pid(Conn) -> + LogMeta = #{msg => "Oracle Database Prepare Statement", name => Key, prepare_sql => SQL}, + ?SLOG(info, LogMeta), + prepare_sql_to_conn(Conn, PrepareList, Statements#{Key => SQL}). + +to_bin(Bin) when is_binary(Bin) -> + Bin; +to_bin(Atom) when is_atom(Atom) -> + erlang:atom_to_binary(Atom). + +handle_result({error, disconnected}) -> + {error, {recoverable_error, disconnected}}; +handle_result({error, Error}) -> + {error, {unrecoverable_error, Error}}; +handle_result({error, socket, closed} = Error) -> + {error, {recoverable_error, Error}}; +handle_result({error, Type, Reason}) -> + {error, {unrecoverable_error, {Type, Reason}}}; +handle_result(Res) -> + Res. + +handle_batch_result([{affected_rows, RowCount} | Rest], Acc) -> + handle_batch_result(Rest, Acc + RowCount); +handle_batch_result([{proc_result, RetCode, _Rows} | Rest], Acc) when RetCode =:= 0 -> + handle_batch_result(Rest, Acc); +handle_batch_result([{proc_result, RetCode, Reason} | _Rest], _Acc) -> + {error, {unrecoverable_error, {RetCode, Reason}}}; +handle_batch_result([], Acc) -> + {ok, Acc}. diff --git a/apps/emqx_oracle/src/emqx_oracle_schema.erl b/apps/emqx_oracle/src/emqx_oracle_schema.erl new file mode 100644 index 000000000..cfa74054a --- /dev/null +++ b/apps/emqx_oracle/src/emqx_oracle_schema.erl @@ -0,0 +1,33 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_oracle_schema). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-define(REF_MODULE, emqx_oracle). + +%% Hocon config schema exports +-export([ + roots/0, + fields/1 +]). + +roots() -> + [{config, #{type => hoconsc:ref(?REF_MODULE, config)}}]. + +fields(config) -> + [{server, server()}, {sid, fun sid/1}] ++ + emqx_connector_schema_lib:relational_db_fields() ++ + emqx_connector_schema_lib:prepare_statement_fields(). + +server() -> + Meta = #{desc => ?DESC(?REF_MODULE, "server")}, + emqx_schema:servers_sc(Meta, (?REF_MODULE):oracle_host_options()). + +sid(type) -> binary(); +sid(desc) -> ?DESC(?REF_MODULE, "sid"); +sid(required) -> true; +sid(_) -> undefined. diff --git a/apps/emqx_plugin_libs/src/emqx_placeholder.erl b/apps/emqx_plugin_libs/src/emqx_placeholder.erl index 18ef9e8fb..dcd666f5b 100644 --- a/apps/emqx_plugin_libs/src/emqx_placeholder.erl +++ b/apps/emqx_plugin_libs/src/emqx_placeholder.erl @@ -69,7 +69,7 @@ -type preproc_sql_opts() :: #{ placeholders => list(binary()), - replace_with => '?' | '$n', + replace_with => '?' | '$n' | ':n', strip_double_quote => boolean() }. @@ -149,7 +149,7 @@ proc_cmd(Tokens, Data, Opts) -> preproc_sql(Sql) -> preproc_sql(Sql, '?'). --spec preproc_sql(binary(), '?' | '$n' | preproc_sql_opts()) -> +-spec preproc_sql(binary(), '?' | '$n' | ':n' | preproc_sql_opts()) -> {prepare_statement_key(), tmpl_token()}. preproc_sql(Sql, ReplaceWith) when is_atom(ReplaceWith) -> preproc_sql(Sql, #{replace_with => ReplaceWith}); @@ -316,13 +316,17 @@ preproc_tmpl_deep_map_key(Key, _) -> replace_with(Tmpl, RE, '?') -> re:replace(Tmpl, RE, "?", [{return, binary}, global]); replace_with(Tmpl, RE, '$n') -> + replace_with(Tmpl, RE, <<"$">>); +replace_with(Tmpl, RE, ':n') -> + replace_with(Tmpl, RE, <<":">>); +replace_with(Tmpl, RE, String) when is_binary(String) -> Parts = re:split(Tmpl, RE, [{return, binary}, trim, group]), {Res, _} = lists:foldl( fun ([Tkn, _Phld], {Acc, Seq}) -> Seq1 = erlang:integer_to_binary(Seq), - {<>, Seq + 1}; + {<>, Seq + 1}; ([Tkn], {Acc, Seq}) -> {<>, Seq} end, @@ -330,6 +334,7 @@ replace_with(Tmpl, RE, '$n') -> Parts ), Res. + parse_nested(<<".", R/binary>>) -> %% ignore the root . parse_nested(R); diff --git a/apps/emqx_plugin_libs/src/emqx_plugin_libs.app.src b/apps/emqx_plugin_libs/src/emqx_plugin_libs.app.src index 24b5a3240..bfd7e68fa 100644 --- a/apps/emqx_plugin_libs/src/emqx_plugin_libs.app.src +++ b/apps/emqx_plugin_libs/src/emqx_plugin_libs.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_plugin_libs, [ {description, "EMQX Plugin utility libs"}, - {vsn, "4.3.9"}, + {vsn, "4.3.10"}, {modules, []}, {applications, [kernel, stdlib]}, {env, []} diff --git a/apps/emqx_plugin_libs/src/emqx_plugin_libs_rule.erl b/apps/emqx_plugin_libs/src/emqx_plugin_libs_rule.erl index 8844fe586..9a4c01a2b 100644 --- a/apps/emqx_plugin_libs/src/emqx_plugin_libs_rule.erl +++ b/apps/emqx_plugin_libs/src/emqx_plugin_libs_rule.erl @@ -105,9 +105,8 @@ proc_cmd(Tokens, Data, Opts) -> preproc_sql(Sql) -> emqx_placeholder:preproc_sql(Sql). --spec preproc_sql(Sql :: binary(), ReplaceWith :: '?' | '$n') -> +-spec preproc_sql(Sql :: binary(), ReplaceWith :: '?' | '$n' | ':n') -> {prepare_statement_key(), tmpl_token()}. - preproc_sql(Sql, ReplaceWith) -> emqx_placeholder:preproc_sql(Sql, ReplaceWith). diff --git a/apps/emqx_plugins/README.md b/apps/emqx_plugins/README.md new file mode 100644 index 000000000..9c8faccd1 --- /dev/null +++ b/apps/emqx_plugins/README.md @@ -0,0 +1,12 @@ +# Plugins Management + +This application provides the feature for users to upload and install custom, Erlang-based plugins. + +More introduction about [Plugins](https://www.emqx.io/docs/en/v5.0/extensions/plugins.html#develop-emqx-plugins) + +See HTTP API to learn how to [Install/Uninstall a Plugin](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/Plugins) + +## Plugin Template + +We provide a [plugin template](https://github.com/emqx/emqx-plugin-template) that +you can use to learn how to write and package custom plugins. diff --git a/apps/emqx_plugins/src/emqx_plugins.app.src b/apps/emqx_plugins/src/emqx_plugins.app.src index c0372c003..d5c16ea59 100644 --- a/apps/emqx_plugins/src/emqx_plugins.app.src +++ b/apps/emqx_plugins/src/emqx_plugins.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_plugins, [ {description, "EMQX Plugin Management"}, - {vsn, "0.1.3"}, + {vsn, "0.1.4"}, {modules, []}, {mod, {emqx_plugins_app, []}}, {applications, [kernel, stdlib, emqx]}, diff --git a/apps/emqx_plugins/src/emqx_plugins.erl b/apps/emqx_plugins/src/emqx_plugins.erl index 264247086..04faa44e9 100644 --- a/apps/emqx_plugins/src/emqx_plugins.erl +++ b/apps/emqx_plugins/src/emqx_plugins.erl @@ -479,22 +479,39 @@ ensure_exists_and_installed(NameVsn) -> case filelib:is_dir(dir(NameVsn)) of true -> ok; - _ -> - Nodes = [N || N <- mria:running_nodes(), N /= node()], - case get_from_any_node(Nodes, NameVsn, []) of + false -> + %% Do we have the package, but it's not extracted yet? + case get_tar(NameVsn) of {ok, TarContent} -> ok = file:write_file(pkg_file(NameVsn), TarContent), ok = do_ensure_installed(NameVsn); - {error, NodeErrors} -> - ?SLOG(error, #{ - msg => "failed_to_copy_plugin_from_other_nodes", - name_vsn => NameVsn, - node_errors => NodeErrors - }), - {error, plugin_not_found} + _ -> + %% If not, try to get it from the cluster. + do_get_from_cluster(NameVsn) end end. +do_get_from_cluster(NameVsn) -> + Nodes = [N || N <- mria:running_nodes(), N /= node()], + case get_from_any_node(Nodes, NameVsn, []) of + {ok, TarContent} -> + ok = file:write_file(pkg_file(NameVsn), TarContent), + ok = do_ensure_installed(NameVsn); + {error, NodeErrors} when Nodes =/= [] -> + ?SLOG(error, #{ + msg => "failed_to_copy_plugin_from_other_nodes", + name_vsn => NameVsn, + node_errors => NodeErrors + }), + {error, plugin_not_found}; + {error, _} -> + ?SLOG(error, #{ + msg => "no_nodes_to_copy_plugin_from", + name_vsn => NameVsn + }), + {error, plugin_not_found} + end. + get_from_any_node([], _NameVsn, Errors) -> {error, Errors}; get_from_any_node([Node | T], NameVsn, Errors) -> diff --git a/apps/emqx_plugins/src/emqx_plugins_schema.erl b/apps/emqx_plugins/src/emqx_plugins_schema.erl index 9d9d045de..b86f6b6c1 100644 --- a/apps/emqx_plugins/src/emqx_plugins_schema.erl +++ b/apps/emqx_plugins/src/emqx_plugins_schema.erl @@ -29,7 +29,7 @@ namespace() -> "plugin". -roots() -> [?CONF_ROOT]. +roots() -> [{?CONF_ROOT, ?HOCON(?R_REF(?CONF_ROOT), #{importance => ?IMPORTANCE_LOW})}]. fields(?CONF_ROOT) -> #{ @@ -73,16 +73,19 @@ states(type) -> ?ARRAY(?R_REF(state)); states(required) -> false; states(default) -> []; states(desc) -> ?DESC(states); +states(importance) -> ?IMPORTANCE_HIGH; states(_) -> undefined. install_dir(type) -> string(); install_dir(required) -> false; -%% runner's root dir +%% runner's root dir todo move to data dir in 5.1 install_dir(default) -> <<"plugins">>; -install_dir(T) when T =/= desc -> undefined; -install_dir(desc) -> ?DESC(install_dir). +install_dir(desc) -> ?DESC(install_dir); +install_dir(importance) -> ?IMPORTANCE_LOW; +install_dir(_) -> undefined. check_interval(type) -> emqx_schema:duration(); check_interval(default) -> <<"5s">>; -check_interval(T) when T =/= desc -> undefined; -check_interval(desc) -> ?DESC(check_interval). +check_interval(desc) -> ?DESC(check_interval); +check_interval(deprecated) -> {since, "5.0.24"}; +check_interval(_) -> undefined. diff --git a/apps/emqx_plugins/test/emqx_plugins_SUITE.erl b/apps/emqx_plugins/test/emqx_plugins_SUITE.erl index 260ad1681..14d6d06fc 100644 --- a/apps/emqx_plugins/test/emqx_plugins_SUITE.erl +++ b/apps/emqx_plugins/test/emqx_plugins_SUITE.erl @@ -45,7 +45,10 @@ all() -> groups() -> [ - {copy_plugin, [sequence], [group_t_copy_plugin_to_a_new_node]}, + {copy_plugin, [sequence], [ + group_t_copy_plugin_to_a_new_node, + group_t_copy_plugin_to_a_new_node_single_node + ]}, {create_tar_copy_plugin, [sequence], [group_t_copy_plugin_to_a_new_node]} ]. @@ -601,6 +604,78 @@ group_t_copy_plugin_to_a_new_node(Config) -> rpc:call(CopyToNode, emqx_plugins, describe, [NameVsn]) ). +%% checks that we can start a cluster with a lone node. +group_t_copy_plugin_to_a_new_node_single_node({init, Config}) -> + PrivDataDir = ?config(priv_dir, Config), + ToInstallDir = filename:join(PrivDataDir, "plugins_copy_to"), + file:del_dir_r(ToInstallDir), + ok = filelib:ensure_path(ToInstallDir), + #{package := Package, release_name := PluginName} = get_demo_plugin_package(ToInstallDir), + NameVsn = filename:basename(Package, ?PACKAGE_SUFFIX), + [{CopyTo, CopyToOpts}] = + emqx_common_test_helpers:emqx_cluster( + [ + {core, plugins_copy_to} + ], + #{ + apps => [emqx_conf, emqx_plugins], + env => [ + {emqx, init_config_load_done, false}, + {emqx, boot_modules, []} + ], + env_handler => fun + (emqx_plugins) -> + ok = emqx_plugins:put_config(install_dir, ToInstallDir), + %% this is to simulate an user setting the state + %% via environment variables before starting the node + ok = emqx_plugins:put_config( + states, + [#{name_vsn => NameVsn, enable => true}] + ), + ok; + (_) -> + ok + end, + priv_data_dir => PrivDataDir, + schema_mod => emqx_conf_schema, + peer_mod => slave, + load_schema => true + } + ), + [ + {to_install_dir, ToInstallDir}, + {copy_to_node_name, CopyTo}, + {copy_to_opts, CopyToOpts}, + {name_vsn, NameVsn}, + {plugin_name, PluginName} + | Config + ]; +group_t_copy_plugin_to_a_new_node_single_node({'end', Config}) -> + CopyToNode = proplists:get_value(copy_to_node, Config), + ok = emqx_common_test_helpers:stop_slave(CopyToNode), + ok = file:del_dir_r(proplists:get_value(to_install_dir, Config)), + ok; +group_t_copy_plugin_to_a_new_node_single_node(Config) -> + CopyTo = ?config(copy_to_node_name, Config), + CopyToOpts = ?config(copy_to_opts, Config), + ToInstallDir = ?config(to_install_dir, Config), + NameVsn = proplists:get_value(name_vsn, Config), + %% Start the node for the first time. The plugin should start + %% successfully even if it's not extracted yet. Simply starting + %% the node would crash if not working properly. + CopyToNode = emqx_common_test_helpers:start_slave(CopyTo, CopyToOpts), + ct:pal("~p config:\n ~p", [ + CopyToNode, erpc:call(CopyToNode, emqx_plugins, get_config, [[], #{}]) + ]), + ct:pal("~p install_dir:\n ~p", [ + CopyToNode, erpc:call(CopyToNode, file, list_dir, [ToInstallDir]) + ]), + ?assertMatch( + {ok, #{running_status := running, config_status := enabled}}, + rpc:call(CopyToNode, emqx_plugins, describe, [NameVsn]) + ), + ok. + make_tar(Cwd, NameWithVsn) -> make_tar(Cwd, NameWithVsn, NameWithVsn). diff --git a/apps/emqx_prometheus/README.md b/apps/emqx_prometheus/README.md index ddff9774c..c008adc81 100644 --- a/apps/emqx_prometheus/README.md +++ b/apps/emqx_prometheus/README.md @@ -1,279 +1,16 @@ -# emqx-prometheus +# EMQX Prometheus Agent -EMQX Prometheus Agent - -## push emqx stats/metrics to prometheus PushGateway - -``` -prometheus.push.gateway.server = http://127.0.0.1:9091 - -prometheus.interval = 15000 -``` - -## pull emqx stats/metrics - -``` -Method: GET -Path: api/v4/emqx_prometheus?type=prometheus -params: type: [prometheus| json] - -prometheus data - -# TYPE erlang_vm_ets_limit gauge -erlang_vm_ets_limit 256000 -# TYPE erlang_vm_logical_processors gauge -erlang_vm_logical_processors 4 -# TYPE erlang_vm_logical_processors_available gauge -erlang_vm_logical_processors_available NaN -# TYPE erlang_vm_logical_processors_online gauge -erlang_vm_logical_processors_online 4 -# TYPE erlang_vm_port_count gauge -erlang_vm_port_count 17 -# TYPE erlang_vm_port_limit gauge -erlang_vm_port_limit 1048576 +This application provides the ability to integrate with Prometheus. It provides +an HTTP API for collecting metrics of the current node +and also supports configuring a Push Gateway URL address for pushing these metrics. -json data +More introduction about [Integrate with Prometheus](https://www.emqx.io/docs/en/v5.0/observability/prometheus.html#integrate-with-prometheus) -{ - "stats": {key:value}, - "metrics": {key:value}, - "packets": {key:value}, - "messages": {key:value}, - "delivery": {key:value}, - "client": {key:value}, - "session": {key:value} -} - -``` +See HTTP API docs to learn how to +[Update Prometheus config](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/Monitor/paths/~1prometheus/put) +and [Get all metrics data](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/Monitor/paths/~1prometheus~1stats/get). -## Before EMQX v4.0.0 -The prometheus data simple is: - - -```bash -# TYPE erlang_vm_ets_limit gauge -erlang_vm_ets_limit 2097152 -# TYPE erlang_vm_logical_processors gauge -erlang_vm_logical_processors 2 -# TYPE erlang_vm_logical_processors_available gauge -erlang_vm_logical_processors_available 2 -# TYPE erlang_vm_logical_processors_online gauge -erlang_vm_logical_processors_online 2 -# TYPE erlang_vm_port_count gauge -erlang_vm_port_count 19 -# TYPE erlang_vm_port_limit gauge -erlang_vm_port_limit 1048576 -# TYPE erlang_vm_process_count gauge -erlang_vm_process_count 460 -# TYPE erlang_vm_process_limit gauge -erlang_vm_process_limit 2097152 -# TYPE erlang_vm_schedulers gauge -erlang_vm_schedulers 2 -# TYPE erlang_vm_schedulers_online gauge -erlang_vm_schedulers_online 2 -# TYPE erlang_vm_smp_support untyped -erlang_vm_smp_support 1 -# TYPE erlang_vm_threads untyped -erlang_vm_threads 1 -# TYPE erlang_vm_thread_pool_size gauge -erlang_vm_thread_pool_size 32 -# TYPE erlang_vm_time_correction untyped -erlang_vm_time_correction 1 -# TYPE erlang_vm_statistics_context_switches counter -erlang_vm_statistics_context_switches 39850 -# TYPE erlang_vm_statistics_garbage_collection_number_of_gcs counter -erlang_vm_statistics_garbage_collection_number_of_gcs 17116 -# TYPE erlang_vm_statistics_garbage_collection_words_reclaimed counter -erlang_vm_statistics_garbage_collection_words_reclaimed 55711819 -# TYPE erlang_vm_statistics_garbage_collection_bytes_reclaimed counter -erlang_vm_statistics_garbage_collection_bytes_reclaimed 445694552 -# TYPE erlang_vm_statistics_bytes_received_total counter -erlang_vm_statistics_bytes_received_total 400746 -# TYPE erlang_vm_statistics_bytes_output_total counter -erlang_vm_statistics_bytes_output_total 337197 -# TYPE erlang_vm_statistics_reductions_total counter -erlang_vm_statistics_reductions_total 21157980 -# TYPE erlang_vm_statistics_run_queues_length_total gauge -erlang_vm_statistics_run_queues_length_total 0 -# TYPE erlang_vm_statistics_runtime_milliseconds counter -erlang_vm_statistics_runtime_milliseconds 6559 -# TYPE erlang_vm_statistics_wallclock_time_milliseconds counter -erlang_vm_statistics_wallclock_time_milliseconds 261243 -# TYPE erlang_vm_memory_atom_bytes_total gauge -erlang_vm_memory_atom_bytes_total{usage="used"} 1814822 -erlang_vm_memory_atom_bytes_total{usage="free"} 22459 -# TYPE erlang_vm_memory_bytes_total gauge -erlang_vm_memory_bytes_total{kind="system"} 109820104 -erlang_vm_memory_bytes_total{kind="processes"} 44983656 -# TYPE erlang_vm_dets_tables gauge -erlang_vm_dets_tables 1 -# TYPE erlang_vm_ets_tables gauge -erlang_vm_ets_tables 139 -# TYPE erlang_vm_memory_processes_bytes_total gauge -erlang_vm_memory_processes_bytes_total{usage="used"} 44983656 -erlang_vm_memory_processes_bytes_total{usage="free"} 0 -# TYPE erlang_vm_memory_system_bytes_total gauge -erlang_vm_memory_system_bytes_total{usage="atom"} 1837281 -erlang_vm_memory_system_bytes_total{usage="binary"} 595872 -erlang_vm_memory_system_bytes_total{usage="code"} 40790577 -erlang_vm_memory_system_bytes_total{usage="ets"} 37426896 -erlang_vm_memory_system_bytes_total{usage="other"} 29169478 -# TYPE erlang_mnesia_held_locks gauge -erlang_mnesia_held_locks 0 -# TYPE erlang_mnesia_lock_queue gauge -erlang_mnesia_lock_queue 0 -# TYPE erlang_mnesia_transaction_participants gauge -erlang_mnesia_transaction_participants 0 -# TYPE erlang_mnesia_transaction_coordinators gauge -erlang_mnesia_transaction_coordinators 0 -# TYPE erlang_mnesia_failed_transactions counter -erlang_mnesia_failed_transactions 2 -# TYPE erlang_mnesia_committed_transactions counter -erlang_mnesia_committed_transactions 239 -# TYPE erlang_mnesia_logged_transactions counter -erlang_mnesia_logged_transactions 60 -# TYPE erlang_mnesia_restarted_transactions counter -erlang_mnesia_restarted_transactions 0 -# TYPE emqx_packets_auth_received counter -emqx_packets_auth_received 0 -# TYPE emqx_packets_auth_sent counter -emqx_packets_auth_sent 0 -# TYPE emqx_packets_received counter -emqx_packets_received 0 -# TYPE emqx_packets_sent counter -emqx_packets_sent 0 -# TYPE emqx_packets_connect counter -emqx_packets_connect 0 -# TYPE emqx_packets_connack_sent counter -emqx_packets_connack_sent 0 -# TYPE emqx_packets_connack_error counter -emqx_packets_connack_error 0 -# TYPE emqx_packets_connack_auth_error counter -emqx_packets_connack_auth_error 0 -# TYPE emqx_packets_disconnect_received counter -emqx_packets_disconnect_received 0 -# TYPE emqx_packets_disconnect_sent counter -emqx_packets_disconnect_sent 0 -# TYPE emqx_packets_subscribe counter -emqx_packets_subscribe 0 -# TYPE emqx_packets_subscribe_error counter -emqx_packets_subscribe_error 0 -# TYPE emqx_packets_subscribe_auth_error counter -emqx_packets_subscribe_auth_error 0 -# TYPE emqx_packets_suback counter -emqx_packets_suback 0 -# TYPE emqx_packets_unsubscribe counter -emqx_packets_unsubscribe 0 -# TYPE emqx_packets_unsubscribe_error counter -emqx_packets_unsubscribe_error 0 -# TYPE emqx_packets_unsuback counter -emqx_packets_unsuback 0 -# TYPE emqx_packets_publish_received counter -emqx_packets_publish_received 0 -# TYPE emqx_packets_publish_sent counter -emqx_packets_publish_sent 0 -# TYPE emqx_packets_publish_auth_error counter -emqx_packets_publish_auth_error 0 -# TYPE emqx_packets_publish_error counter -emqx_packets_publish_error 0 -# TYPE emqx_packets_puback_received counter -emqx_packets_puback_received 0 -# TYPE emqx_packets_puback_sent counter -emqx_packets_puback_sent 0 -# TYPE emqx_packets_puback_missed counter -emqx_packets_puback_missed 0 -# TYPE emqx_packets_pubrec_received counter -emqx_packets_pubrec_received 0 -# TYPE emqx_packets_pubrec_sent counter -emqx_packets_pubrec_sent 0 -# TYPE emqx_packets_pubrec_missed counter -emqx_packets_pubrec_missed 0 -# TYPE emqx_packets_pubrel_received counter -emqx_packets_pubrel_received 0 -# TYPE emqx_packets_pubrel_sent counter -emqx_packets_pubrel_sent 0 -# TYPE emqx_packets_pubrel_missed counter -emqx_packets_pubrel_missed 0 -# TYPE emqx_packets_pubcomp_received counter -emqx_packets_pubcomp_received 0 -# TYPE emqx_packets_pubcomp_sent counter -emqx_packets_pubcomp_sent 0 -# TYPE emqx_packets_pubcomp_missed counter -emqx_packets_pubcomp_missed 0 -# TYPE emqx_packets_pingreq counter -emqx_packets_pingreq 0 -# TYPE emqx_packets_pingresp counter -emqx_packets_pingresp 0 -# TYPE emqx_bytes_received counter -emqx_bytes_received 0 -# TYPE emqx_bytes_sent counter -emqx_bytes_sent 0 -# TYPE emqx_connections_count gauge -emqx_connections_count 0 -# TYPE emqx_connections_max gauge -emqx_connections_max 0 -# TYPE emqx_retained_count gauge -emqx_retained_count 3 -# TYPE emqx_retained_max gauge -emqx_retained_max 3 -# TYPE emqx_sessions_count gauge -emqx_sessions_count 0 -# TYPE emqx_sessions_max gauge -emqx_sessions_max 0 -# TYPE emqx_subscriptions_count gauge -emqx_subscriptions_count 0 -# TYPE emqx_subscriptions_max gauge -emqx_subscriptions_max 0 -# TYPE emqx_topics_count gauge -emqx_topics_count 0 -# TYPE emqx_topics_max gauge -emqx_topics_max 0 -# TYPE emqx_vm_cpu_use gauge -emqx_vm_cpu_use 100.0 -# TYPE emqx_vm_cpu_idle gauge -emqx_vm_cpu_idle 0.0 -# TYPE emqx_vm_run_queue gauge -emqx_vm_run_queue 1 -# TYPE emqx_vm_process_messages_in_queues gauge -emqx_vm_process_messages_in_queues 0 -# TYPE emqx_messages_received counter -emqx_messages_received 0 -# TYPE emqx_messages_sent counter -emqx_messages_sent 0 -# TYPE emqx_messages_dropped counter -emqx_messages_dropped 0 -# TYPE emqx_messages_retained counter -emqx_messages_retained 3 -# TYPE emqx_messages_qos0_received counter -emqx_messages_qos0_received 0 -# TYPE emqx_messages_qos0_sent counter -emqx_messages_qos0_sent 0 -# TYPE emqx_messages_qos1_received counter -emqx_messages_qos1_received 0 -# TYPE emqx_messages_qos1_sent counter -emqx_messages_qos1_sent 0 -# TYPE emqx_messages_qos2_received counter -emqx_messages_qos2_received 0 -# TYPE emqx_messages_qos2_expired counter -emqx_messages_qos2_expired 0 -# TYPE emqx_messages_qos2_sent counter -emqx_messages_qos2_sent 0 -# TYPE emqx_messages_qos2_dropped counter -emqx_messages_qos2_dropped 0 -# TYPE emqx_messages_forward counter -emqx_messages_forward 0 -``` - - -License -------- - -Apache License Version 2.0 - -Author ------- - -EMQX Team. - +Correspondingly, we have also provided a [Grafana template](https://grafana.com/grafana/dashboards/17446-emqx/) +for visualizing these metrics. diff --git a/apps/emqx_prometheus/src/emqx_prometheus.app.src b/apps/emqx_prometheus/src/emqx_prometheus.app.src index ae879da8f..f94b22d81 100644 --- a/apps/emqx_prometheus/src/emqx_prometheus.app.src +++ b/apps/emqx_prometheus/src/emqx_prometheus.app.src @@ -2,7 +2,7 @@ {application, emqx_prometheus, [ {description, "Prometheus for EMQX"}, % strict semver, bump manually! - {vsn, "5.0.9"}, + {vsn, "5.0.10"}, {modules, []}, {registered, [emqx_prometheus_sup]}, {applications, [kernel, stdlib, prometheus, emqx, emqx_management]}, diff --git a/apps/emqx_prometheus/src/emqx_prometheus.erl b/apps/emqx_prometheus/src/emqx_prometheus.erl index 05d9508b6..d999f294e 100644 --- a/apps/emqx_prometheus/src/emqx_prometheus.erl +++ b/apps/emqx_prometheus/src/emqx_prometheus.erl @@ -599,8 +599,8 @@ emqx_cluster() -> ]. emqx_cluster_data() -> - Running = mria:cluster_nodes(running), - Stopped = mria:cluster_nodes(stopped), + Running = emqx:cluster_nodes(running), + Stopped = emqx:cluster_nodes(stopped), [ {nodes_running, length(Running)}, {nodes_stopped, length(Stopped)} diff --git a/apps/emqx_prometheus/src/emqx_prometheus_api.erl b/apps/emqx_prometheus/src/emqx_prometheus_api.erl index 945c6eba9..d3bfc0224 100644 --- a/apps/emqx_prometheus/src/emqx_prometheus_api.erl +++ b/apps/emqx_prometheus/src/emqx_prometheus_api.erl @@ -122,13 +122,7 @@ prometheus_config_example() -> interval => "15s", push_gateway_server => <<"http://127.0.0.1:9091">>, headers => #{'header-name' => 'header-value'}, - job_name => <<"${name}/instance/${name}~${host}">>, - vm_dist_collector => enabled, - mnesia_collector => enabled, - vm_statistics_collector => enabled, - vm_system_info_collector => enabled, - vm_memory_collector => enabled, - vm_msacc_collector => enabled + job_name => <<"${name}/instance/${name}~${host}">> }. prometheus_data_schema() -> diff --git a/apps/emqx_resource/include/emqx_resource.hrl b/apps/emqx_resource/include/emqx_resource.hrl index 91572eac3..e6f86fb59 100644 --- a/apps/emqx_resource/include/emqx_resource.hrl +++ b/apps/emqx_resource/include/emqx_resource.hrl @@ -15,7 +15,6 @@ %%-------------------------------------------------------------------- -type resource_type() :: module(). -type resource_id() :: binary(). --type manager_id() :: binary(). -type raw_resource_config() :: binary() | raw_term_resource_config(). -type raw_term_resource_config() :: #{binary() => term()} | [raw_term_resource_config()]. -type resource_config() :: term(). diff --git a/apps/emqx_resource/src/emqx_resource.app.src b/apps/emqx_resource/src/emqx_resource.app.src index 00c315714..3e264cb3e 100644 --- a/apps/emqx_resource/src/emqx_resource.app.src +++ b/apps/emqx_resource/src/emqx_resource.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_resource, [ {description, "Manager for all external resources"}, - {vsn, "0.1.13"}, + {vsn, "0.1.15"}, {registered, []}, {mod, {emqx_resource_app, []}}, {applications, [ diff --git a/apps/emqx_resource/src/emqx_resource.erl b/apps/emqx_resource/src/emqx_resource.erl index d8b91942b..7c48e8ee4 100644 --- a/apps/emqx_resource/src/emqx_resource.erl +++ b/apps/emqx_resource/src/emqx_resource.erl @@ -113,7 +113,10 @@ -export([apply_reply_fun/2]). --export_type([resource_data/0]). +-export_type([ + resource_id/0, + resource_data/0 +]). -optional_callbacks([ on_query/3, @@ -362,11 +365,11 @@ is_buffer_supported(Module) -> false end. --spec call_start(manager_id(), module(), resource_config()) -> +-spec call_start(resource_id(), module(), resource_config()) -> {ok, resource_state()} | {error, Reason :: term()}. -call_start(MgrId, Mod, Config) -> +call_start(ResId, Mod, Config) -> try - Mod:on_start(MgrId, Config) + Mod:on_start(ResId, Config) catch throw:Error -> {error, Error}; @@ -374,17 +377,17 @@ call_start(MgrId, Mod, Config) -> {error, #{exception => Kind, reason => Error, stacktrace => Stacktrace}} end. --spec call_health_check(manager_id(), module(), resource_state()) -> +-spec call_health_check(resource_id(), module(), resource_state()) -> resource_status() | {resource_status(), resource_state()} | {resource_status(), resource_state(), term()} | {error, term()}. -call_health_check(MgrId, Mod, ResourceState) -> - ?SAFE_CALL(Mod:on_get_status(MgrId, ResourceState)). +call_health_check(ResId, Mod, ResourceState) -> + ?SAFE_CALL(Mod:on_get_status(ResId, ResourceState)). --spec call_stop(manager_id(), module(), resource_state()) -> term(). -call_stop(MgrId, Mod, ResourceState) -> - ?SAFE_CALL(Mod:on_stop(MgrId, ResourceState)). +-spec call_stop(resource_id(), module(), resource_state()) -> term(). +call_stop(ResId, Mod, ResourceState) -> + ?SAFE_CALL(Mod:on_stop(ResId, ResourceState)). -spec check_config(resource_type(), raw_resource_config()) -> {ok, resource_config()} | {error, term()}. diff --git a/apps/emqx_resource/src/emqx_resource_buffer_worker.erl b/apps/emqx_resource/src/emqx_resource_buffer_worker.erl index d9cca8f03..2dd14c46b 100644 --- a/apps/emqx_resource/src/emqx_resource_buffer_worker.erl +++ b/apps/emqx_resource/src/emqx_resource_buffer_worker.erl @@ -1463,7 +1463,7 @@ mark_inflight_items_as_retriable(Data, WorkerMRef) -> end ), _NumAffected = ets:select_replace(InflightTID, MatchSpec), - ?tp(buffer_worker_worker_down_update, #{num_affected => _NumAffected}), + ?tp(buffer_worker_async_agent_down, #{num_affected => _NumAffected}), ok. %% used to update a batch after dropping expired individual queries. diff --git a/apps/emqx_resource/src/emqx_resource_buffer_worker_sup.erl b/apps/emqx_resource/src/emqx_resource_buffer_worker_sup.erl index ae30c3927..104ad7ade 100644 --- a/apps/emqx_resource/src/emqx_resource_buffer_worker_sup.erl +++ b/apps/emqx_resource/src/emqx_resource_buffer_worker_sup.erl @@ -52,6 +52,7 @@ init([]) -> ChildSpecs = [], {ok, {SupFlags, ChildSpecs}}. +-spec start_workers(emqx_resource:resource_id(), _Opts :: #{atom() => _}) -> ok. start_workers(ResId, Opts) -> WorkerPoolSize = worker_pool_size(Opts), _ = ensure_worker_pool(ResId, hash, [{size, WorkerPoolSize}]), @@ -63,6 +64,7 @@ start_workers(ResId, Opts) -> lists:seq(1, WorkerPoolSize) ). +-spec stop_workers(emqx_resource:resource_id(), _Opts :: #{atom() => _}) -> ok. stop_workers(ResId, Opts) -> WorkerPoolSize = worker_pool_size(Opts), lists:foreach( @@ -75,6 +77,7 @@ stop_workers(ResId, Opts) -> ensure_worker_pool_removed(ResId), ok. +-spec worker_pids(emqx_resource:resource_id()) -> [pid()]. worker_pids(ResId) -> lists:map( fun({_Name, Pid}) -> diff --git a/apps/emqx_resource/src/emqx_resource_manager.erl b/apps/emqx_resource/src/emqx_resource_manager.erl index 7ecf56c18..f42d3c1b5 100644 --- a/apps/emqx_resource/src/emqx_resource_manager.erl +++ b/apps/emqx_resource/src/emqx_resource_manager.erl @@ -42,23 +42,24 @@ ]). -export([ - set_resource_status_connecting/1, - manager_id_to_resource_id/1 + set_resource_status_connecting/1 ]). % Server --export([start_link/6]). +-export([start_link/5]). % Behaviour -export([init/1, callback_mode/0, handle_event/4, terminate/3]). % State record -record(data, { - id, manager_id, group, mod, callback_mode, query_mode, config, opts, status, state, error, pid + id, group, mod, callback_mode, query_mode, config, opts, status, state, error, pid }). -type data() :: #data{}. --define(ETS_TABLE, ?MODULE). +-define(NAME(ResId), {n, l, {?MODULE, ResId}}). +-define(REF(ResId), {via, gproc, ?NAME(ResId)}). + -define(WAIT_FOR_RESOURCE_DELAY, 100). -define(T_OPERATION, 5000). -define(T_LOOKUP, 1000). @@ -69,13 +70,6 @@ %% API %%------------------------------------------------------------------------------ -make_manager_id(ResId) -> - emqx_resource:generate_id(ResId). - -manager_id_to_resource_id(MgrId) -> - [ResId, _Index] = string:split(MgrId, ":", trailing), - ResId. - %% @doc Called from emqx_resource when starting a resource instance. %% %% Triggers the emqx_resource_manager_sup supervisor to actually create @@ -92,8 +86,7 @@ ensure_resource(ResId, Group, ResourceType, Config, Opts) -> {ok, _Group, Data} -> {ok, Data}; {error, not_found} -> - MgrId = set_new_owner(ResId), - create_and_return_data(MgrId, ResId, Group, ResourceType, Config, Opts) + create_and_return_data(ResId, Group, ResourceType, Config, Opts) end. %% @doc Called from emqx_resource when recreating a resource which may or may not exist @@ -103,23 +96,22 @@ recreate(ResId, ResourceType, NewConfig, Opts) -> case lookup(ResId) of {ok, Group, #{mod := ResourceType, status := _} = _Data} -> _ = remove(ResId, false), - MgrId = set_new_owner(ResId), - create_and_return_data(MgrId, ResId, Group, ResourceType, NewConfig, Opts); + create_and_return_data(ResId, Group, ResourceType, NewConfig, Opts); {ok, _, #{mod := Mod}} when Mod =/= ResourceType -> {error, updating_to_incorrect_resource_type}; {error, not_found} -> {error, not_found} end. -create_and_return_data(MgrId, ResId, Group, ResourceType, Config, Opts) -> - _ = create(MgrId, ResId, Group, ResourceType, Config, Opts), +create_and_return_data(ResId, Group, ResourceType, Config, Opts) -> + _ = create(ResId, Group, ResourceType, Config, Opts), {ok, _Group, Data} = lookup(ResId), {ok, Data}. %% @doc Create a resource_manager and wait until it is running -create(MgrId, ResId, Group, ResourceType, Config, Opts) -> +create(ResId, Group, ResourceType, Config, Opts) -> % The state machine will make the actual call to the callback/resource module after init - ok = emqx_resource_manager_sup:ensure_child(MgrId, ResId, Group, ResourceType, Config, Opts), + ok = emqx_resource_manager_sup:ensure_child(ResId, Group, ResourceType, Config, Opts), ok = emqx_metrics_worker:create_metrics( ?RES_METRICS, ResId, @@ -164,10 +156,12 @@ create(MgrId, ResId, Group, ResourceType, Config, Opts) -> ok | {error, Reason :: term()}. create_dry_run(ResourceType, Config) -> ResId = make_test_id(), - MgrId = set_new_owner(ResId), - ok = emqx_resource_manager_sup:ensure_child( - MgrId, ResId, <<"dry_run">>, ResourceType, Config, #{} - ), + Opts = + case is_map(Config) of + true -> maps:get(resource_opts, Config, #{}); + false -> #{} + end, + ok = emqx_resource_manager_sup:ensure_child(ResId, <<"dry_run">>, ResourceType, Config, Opts), case wait_for_ready(ResId, 5000) of ok -> remove(ResId); @@ -237,10 +231,11 @@ lookup(ResId) -> %% @doc Lookup the group and data of a resource from the cache -spec lookup_cached(resource_id()) -> {ok, resource_group(), resource_data()} | {error, not_found}. lookup_cached(ResId) -> - case read_cache(ResId) of - {Group, Data} -> - {ok, Group, data_record_to_external_map(Data)}; - not_found -> + try read_cache(ResId) of + Data = #data{group = Group} -> + {ok, Group, data_record_to_external_map(Data)} + catch + error:badarg -> {error, not_found} end. @@ -256,20 +251,16 @@ reset_metrics(ResId) -> %% @doc Returns the data for all resources -spec list_all() -> [resource_data()]. list_all() -> - try - [ - data_record_to_external_map(Data) - || {_Id, _Group, Data} <- ets:tab2list(?ETS_TABLE) - ] - catch - error:badarg -> [] - end. + lists:map( + fun data_record_to_external_map/1, + gproc:select({local, names}, [{{?NAME('_'), '_', '$1'}, [], ['$1']}]) + ). %% @doc Returns a list of ids for all the resources in a group -spec list_group(resource_group()) -> [resource_id()]. list_group(Group) -> - List = ets:match(?ETS_TABLE, {'$1', Group, '_'}), - lists:flatten(List). + Guard = {'==', {element, #data.group, '$1'}, Group}, + gproc:select({local, names}, [{{?NAME('$2'), '_', '$1'}, [Guard], ['$2']}]). -spec health_check(resource_id()) -> {ok, resource_status()} | {error, term()}. health_check(ResId) -> @@ -278,10 +269,9 @@ health_check(ResId) -> %% Server start/stop callbacks %% @doc Function called from the supervisor to actually start the server -start_link(MgrId, ResId, Group, ResourceType, Config, Opts) -> +start_link(ResId, Group, ResourceType, Config, Opts) -> Data = #data{ id = ResId, - manager_id = MgrId, group = Group, mod = ResourceType, callback_mode = emqx_resource:get_callback_mode(ResourceType), @@ -295,7 +285,7 @@ start_link(MgrId, ResId, Group, ResourceType, Config, Opts) -> state = undefined, error = undefined }, - gen_statem:start_link(?MODULE, {Data, Opts}, []). + gen_statem:start_link(?REF(ResId), ?MODULE, {Data, Opts}, []). init({DataIn, Opts}) -> process_flag(trap_exit, true), @@ -315,7 +305,7 @@ terminate({shutdown, removed}, _State, _Data) -> ok; terminate(_Reason, _State, Data) -> _ = maybe_stop_resource(Data), - ok = delete_cache(Data#data.id, Data#data.manager_id), + _ = erase_cache(Data), ok. %% Behavior callback @@ -340,9 +330,6 @@ handle_event({call, From}, start, State, Data) when start_resource(Data, From); handle_event({call, From}, start, _State, _Data) -> {keep_state_and_data, [{reply, From, ok}]}; -% Called when the resource received a `quit` message -handle_event(info, quit, _State, _Data) -> - {stop, {shutdown, quit}}; % Called when the resource is to be stopped handle_event({call, From}, stop, stopped, _Data) -> {keep_state_and_data, [{reply, From, ok}]}; @@ -375,7 +362,7 @@ handle_event(state_timeout, health_check, connecting, Data) -> %% and successful health_checks handle_event(enter, _OldState, connected = State, Data) -> ok = log_state_consistency(State, Data), - _ = emqx_alarm:deactivate(Data#data.id), + _ = emqx_alarm:safe_deactivate(Data#data.id), ?tp(resource_connected_enter, #{}), {keep_state_and_data, health_check_actions(Data)}; handle_event(state_timeout, health_check, connected, Data) -> @@ -413,9 +400,9 @@ log_state_consistency(State, Data) -> data => Data }). -log_cache_consistency({_, Data}, Data) -> +log_cache_consistency(Data, Data) -> ok; -log_cache_consistency({_, DataCached}, Data) -> +log_cache_consistency(DataCached, Data) -> ?tp(warning, "inconsistent_cache", #{ cache => DataCached, data => Data @@ -424,56 +411,20 @@ log_cache_consistency({_, DataCached}, Data) -> %%------------------------------------------------------------------------------ %% internal functions %%------------------------------------------------------------------------------ -insert_cache(ResId, Group, Data = #data{manager_id = MgrId}) -> - case get_owner(ResId) of - not_found -> - ets:insert(?ETS_TABLE, {ResId, Group, Data}); - MgrId -> - ets:insert(?ETS_TABLE, {ResId, Group, Data}); - _ -> - ?SLOG(error, #{ - msg => get_resource_owner_failed, - resource_id => ResId, - action => quit_resource - }), - self() ! quit - end. +insert_cache(ResId, Data = #data{}) -> + gproc:set_value(?NAME(ResId), Data). read_cache(ResId) -> - case ets:lookup(?ETS_TABLE, ResId) of - [{_Id, Group, Data}] -> {Group, Data}; - [] -> not_found - end. + gproc:lookup_value(?NAME(ResId)). -delete_cache(ResId, MgrId) -> - case get_owner(ResId) of - MgrIdNow when MgrIdNow == not_found; MgrIdNow == MgrId -> - do_delete_cache(ResId); - _ -> - ok - end. +erase_cache(_Data = #data{id = ResId}) -> + gproc:unreg(?NAME(ResId)). -do_delete_cache(<> = ResId) -> - true = ets:delete(?ETS_TABLE, {owner, ResId}), - true = ets:delete(?ETS_TABLE, ResId), - ok; -do_delete_cache(ResId) -> - true = ets:delete(?ETS_TABLE, ResId), - ok. - -set_new_owner(ResId) -> - MgrId = make_manager_id(ResId), - ok = set_owner(ResId, MgrId), - MgrId. - -set_owner(ResId, MgrId) -> - ets:insert(?ETS_TABLE, {{owner, ResId}, MgrId}), - ok. - -get_owner(ResId) -> - case ets:lookup(?ETS_TABLE, {owner, ResId}) of - [{_, MgrId}] -> MgrId; - [] -> not_found +try_read_cache(ResId) -> + try + read_cache(ResId) + catch + error:badarg -> not_found end. retry_actions(Data) -> @@ -489,17 +440,17 @@ health_check_actions(Data) -> handle_remove_event(From, ClearMetrics, Data) -> _ = stop_resource(Data), - ok = delete_cache(Data#data.id, Data#data.manager_id), ok = emqx_resource_buffer_worker_sup:stop_workers(Data#data.id, Data#data.opts), case ClearMetrics of true -> ok = emqx_metrics_worker:clear_metrics(?RES_METRICS, Data#data.id); false -> ok end, + _ = erase_cache(Data), {stop_and_reply, {shutdown, removed}, [{reply, From, ok}]}. start_resource(Data, From) -> %% in case the emqx_resource:call_start/2 hangs, the lookup/1 can read status from the cache - case emqx_resource:call_start(Data#data.manager_id, Data#data.mod, Data#data.config) of + case emqx_resource:call_start(Data#data.id, Data#data.mod, Data#data.config) of {ok, ResourceState} -> UpdatedData = Data#data{status = connecting, state = ResourceState}, %% Perform an initial health_check immediately before transitioning into a connected state @@ -511,10 +462,10 @@ start_resource(Data, From) -> id => Data#data.id, reason => Reason }), - _ = maybe_alarm(disconnected, Data#data.id, Data#data.error), + _ = maybe_alarm(disconnected, Data#data.id, Err, Data#data.error), %% Keep track of the error reason why the connection did not work %% so that the Reason can be returned when the verification call is made. - UpdatedData = Data#data{status = disconnected, error = Reason}, + UpdatedData = Data#data{status = disconnected, error = Err}, Actions = maybe_reply(retry_actions(UpdatedData), From, Err), {next_state, disconnected, update_state(UpdatedData, Data), Actions} end. @@ -530,7 +481,7 @@ stop_resource(#data{state = ResState, id = ResId} = Data) -> %% is returned. case ResState /= undefined of true -> - emqx_resource:call_stop(Data#data.manager_id, Data#data.mod, ResState); + emqx_resource:call_stop(Data#data.id, Data#data.mod, ResState); false -> ok end, @@ -582,11 +533,11 @@ handle_connected_health_check(Data) -> with_health_check(#data{state = undefined} = Data, Func) -> Func(disconnected, Data); -with_health_check(Data, Func) -> +with_health_check(#data{error = PrevError} = Data, Func) -> ResId = Data#data.id, - HCRes = emqx_resource:call_health_check(Data#data.manager_id, Data#data.mod, Data#data.state), + HCRes = emqx_resource:call_health_check(Data#data.id, Data#data.mod, Data#data.state), {Status, NewState, Err} = parse_health_check_result(HCRes, Data), - _ = maybe_alarm(Status, ResId, Err), + _ = maybe_alarm(Status, ResId, Err, PrevError), ok = maybe_resume_resource_workers(ResId, Status), UpdatedData = Data#data{ state = NewState, status = Status, error = Err @@ -599,27 +550,31 @@ update_state(Data) -> update_state(DataWas, DataWas) -> DataWas; update_state(Data, _DataWas) -> - _ = insert_cache(Data#data.id, Data#data.group, Data), + _ = insert_cache(Data#data.id, Data), Data. health_check_interval(Opts) -> maps:get(health_check_interval, Opts, ?HEALTHCHECK_INTERVAL). -maybe_alarm(connected, _ResId, _Error) -> +maybe_alarm(connected, _ResId, _Error, _PrevError) -> ok; -maybe_alarm(_Status, <>, _Error) -> +maybe_alarm(_Status, <>, _Error, _PrevError) -> ok; -maybe_alarm(_Status, ResId, Error) -> +%% Assume that alarm is already active +maybe_alarm(_Status, _ResId, Error, Error) -> + ok; +maybe_alarm(_Status, ResId, Error, _PrevError) -> HrError = case Error of - undefined -> <<"Unknown reason">>; - _Else -> emqx_utils:readable_error_msg(Error) + {error, undefined} -> <<"Unknown reason">>; + {error, Reason} -> emqx_utils:readable_error_msg(Reason) end, - emqx_alarm:activate( + emqx_alarm:safe_activate( ResId, #{resource_id => ResId, reason => resource_down}, <<"resource down: ", HrError/binary>> - ). + ), + ?tp(resource_activate_alarm, #{resource_id => ResId}). maybe_resume_resource_workers(ResId, connected) -> lists:foreach( @@ -632,14 +587,14 @@ maybe_resume_resource_workers(_, _) -> maybe_clear_alarm(<>) -> ok; maybe_clear_alarm(ResId) -> - emqx_alarm:deactivate(ResId). + emqx_alarm:safe_deactivate(ResId). parse_health_check_result(Status, Data) when ?IS_STATUS(Status) -> - {Status, Data#data.state, undefined}; + {Status, Data#data.state, status_to_error(Status)}; parse_health_check_result({Status, NewState}, _Data) when ?IS_STATUS(Status) -> - {Status, NewState, undefined}; + {Status, NewState, status_to_error(Status)}; parse_health_check_result({Status, NewState, Error}, _Data) when ?IS_STATUS(Status) -> - {Status, NewState, Error}; + {Status, NewState, {error, Error}}; parse_health_check_result({error, Error}, Data) -> ?SLOG( error, @@ -649,7 +604,16 @@ parse_health_check_result({error, Error}, Data) -> reason => Error } ), - {disconnected, Data#data.state, Error}. + {disconnected, Data#data.state, {error, Error}}. + +status_to_error(connected) -> + undefined; +status_to_error(_) -> + {error, undefined}. + +%% Compatibility +external_error({error, Reason}) -> Reason; +external_error(Other) -> Other. maybe_reply(Actions, undefined, _Reply) -> Actions; @@ -660,7 +624,7 @@ maybe_reply(Actions, From, Reply) -> data_record_to_external_map(Data) -> #{ id => Data#data.id, - error => Data#data.error, + error => external_error(Data#data.error), mod => Data#data.mod, callback_mode => Data#data.callback_mode, query_mode => Data#data.query_mode, @@ -676,11 +640,11 @@ wait_for_ready(ResId, WaitTime) -> do_wait_for_ready(_ResId, 0) -> timeout; do_wait_for_ready(ResId, Retry) -> - case read_cache(ResId) of - {_Group, #data{status = connected}} -> + case try_read_cache(ResId) of + #data{status = connected} -> ok; - {_Group, #data{status = disconnected, error = Reason}} -> - {error, Reason}; + #data{status = disconnected, error = Err} -> + {error, external_error(Err)}; _ -> timer:sleep(?WAIT_FOR_RESOURCE_DELAY), do_wait_for_ready(ResId, Retry - 1) @@ -688,12 +652,7 @@ do_wait_for_ready(ResId, Retry) -> safe_call(ResId, Message, Timeout) -> try - case read_cache(ResId) of - not_found -> - {error, not_found}; - {_, #data{pid = ManagerPid}} -> - gen_statem:call(ManagerPid, Message, {clean_timeout, Timeout}) - end + gen_statem:call(?REF(ResId), Message, {clean_timeout, Timeout}) catch error:badarg -> {error, not_found}; diff --git a/apps/emqx_resource/src/emqx_resource_manager_sup.erl b/apps/emqx_resource/src/emqx_resource_manager_sup.erl index 5b731d6cf..2f442cd56 100644 --- a/apps/emqx_resource/src/emqx_resource_manager_sup.erl +++ b/apps/emqx_resource/src/emqx_resource_manager_sup.erl @@ -17,23 +17,20 @@ -behaviour(supervisor). --export([ensure_child/6]). +-export([ensure_child/5]). -export([start_link/0]). -export([init/1]). -ensure_child(MgrId, ResId, Group, ResourceType, Config, Opts) -> - _ = supervisor:start_child(?MODULE, [MgrId, ResId, Group, ResourceType, Config, Opts]), +ensure_child(ResId, Group, ResourceType, Config, Opts) -> + _ = supervisor:start_child(?MODULE, [ResId, Group, ResourceType, Config, Opts]), ok. start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). init([]) -> - TabOpts = [named_table, set, public, {read_concurrency, true}], - _ = ets:new(emqx_resource_manager, TabOpts), - ChildSpecs = [ #{ id => emqx_resource_manager, @@ -44,6 +41,5 @@ init([]) -> modules => [emqx_resource_manager] } ], - SupFlags = #{strategy => simple_one_for_one, intensity => 10, period => 10}, {ok, {SupFlags, ChildSpecs}}. diff --git a/apps/emqx_plugin_libs/src/emqx_plugin_libs_pool.erl b/apps/emqx_resource/src/emqx_resource_pool.erl similarity index 82% rename from apps/emqx_plugin_libs/src/emqx_plugin_libs_pool.erl rename to apps/emqx_resource/src/emqx_resource_pool.erl index a3048e5dd..913b29c86 100644 --- a/apps/emqx_plugin_libs/src/emqx_plugin_libs_pool.erl +++ b/apps/emqx_resource/src/emqx_resource_pool.erl @@ -14,31 +14,27 @@ %% limitations under the License. %%-------------------------------------------------------------------- --module(emqx_plugin_libs_pool). +-module(emqx_resource_pool). -export([ - start_pool/3, - stop_pool/1, - pool_name/1, - health_check_ecpool_workers/2, - health_check_ecpool_workers/3 + start/3, + stop/1, + health_check_workers/2, + health_check_workers/3 ]). -include_lib("emqx/include/logger.hrl"). -define(HEALTH_CHECK_TIMEOUT, 15000). -pool_name(ID) when is_binary(ID) -> - list_to_atom(binary_to_list(ID)). - -start_pool(Name, Mod, Options) -> +start(Name, Mod, Options) -> case ecpool:start_sup_pool(Name, Mod, Options) of {ok, _} -> ?SLOG(info, #{msg => "start_ecpool_ok", pool_name => Name}), ok; {error, {already_started, _Pid}} -> - stop_pool(Name), - start_pool(Name, Mod, Options); + stop(Name), + start(Name, Mod, Options); {error, Reason} -> NReason = parse_reason(Reason), ?SLOG(error, #{ @@ -49,7 +45,7 @@ start_pool(Name, Mod, Options) -> {error, {start_pool_failed, Name, NReason}} end. -stop_pool(Name) -> +stop(Name) -> case ecpool:stop_sup_pool(Name) of ok -> ?SLOG(info, #{msg => "stop_ecpool_ok", pool_name => Name}); @@ -64,10 +60,10 @@ stop_pool(Name) -> error({stop_pool_failed, Name, Reason}) end. -health_check_ecpool_workers(PoolName, CheckFunc) -> - health_check_ecpool_workers(PoolName, CheckFunc, ?HEALTH_CHECK_TIMEOUT). +health_check_workers(PoolName, CheckFunc) -> + health_check_workers(PoolName, CheckFunc, ?HEALTH_CHECK_TIMEOUT). -health_check_ecpool_workers(PoolName, CheckFunc, Timeout) -> +health_check_workers(PoolName, CheckFunc, Timeout) -> Workers = [Worker || {_WorkerName, Worker} <- ecpool:workers(PoolName)], DoPerWorker = fun(Worker) -> diff --git a/apps/emqx_resource/test/emqx_connector_demo.erl b/apps/emqx_resource/test/emqx_connector_demo.erl index 5be854e93..96e22c6b6 100644 --- a/apps/emqx_resource/test/emqx_connector_demo.erl +++ b/apps/emqx_resource/test/emqx_connector_demo.erl @@ -62,6 +62,7 @@ set_callback_mode(Mode) -> persistent_term:put(?CM_KEY, Mode). on_start(_InstId, #{create_error := true}) -> + ?tp(connector_demo_start_error, #{}), error("some error"); on_start(InstId, #{name := Name} = Opts) -> Register = maps:get(register, Opts, false), @@ -243,6 +244,7 @@ batch_big_payload({async, ReplyFunAndArgs}, InstId, Batch, State = #{pid := Pid} {ok, Pid}. on_get_status(_InstId, #{health_check_error := true}) -> + ?tp(connector_demo_health_check_error, #{}), disconnected; on_get_status(_InstId, #{pid := Pid}) -> timer:sleep(300), diff --git a/apps/emqx_resource/test/emqx_resource_SUITE.erl b/apps/emqx_resource/test/emqx_resource_SUITE.erl index b861e1be2..809f101a8 100644 --- a/apps/emqx_resource/test/emqx_resource_SUITE.erl +++ b/apps/emqx_resource/test/emqx_resource_SUITE.erl @@ -1055,28 +1055,22 @@ t_list_filter(_) -> ). t_create_dry_run_local(_) -> - ets:match_delete(emqx_resource_manager, {{owner, '$1'}, '_'}), lists:foreach( fun(_) -> create_dry_run_local_succ() end, lists:seq(1, 10) ), - case [] =:= ets:match(emqx_resource_manager, {{owner, '$1'}, '_'}) of - false -> - %% Sleep to remove flakyness in test case. It take some time for - %% the ETS table to be cleared. - timer:sleep(2000), - [] = ets:match(emqx_resource_manager, {{owner, '$1'}, '_'}); - true -> - ok - end. + ?retry( + 100, + 5, + ?assertEqual( + [], + emqx_resource:list_instances_verbose() + ) + ). create_dry_run_local_succ() -> - case whereis(test_resource) of - undefined -> ok; - Pid -> exit(Pid, kill) - end, ?assertEqual( ok, emqx_resource:create_dry_run_local( @@ -1107,7 +1101,15 @@ t_create_dry_run_local_failed(_) -> ?TEST_RESOURCE, #{name => test_resource, stop_error => true} ), - ?assertEqual(ok, Res3). + ?assertEqual(ok, Res3), + ?retry( + 100, + 5, + ?assertEqual( + [], + emqx_resource:list_instances_verbose() + ) + ). t_test_func(_) -> ?assertEqual(ok, erlang:apply(emqx_resource_validator:not_empty("not_empty"), [<<"someval">>])), @@ -1766,12 +1768,6 @@ t_async_pool_worker_death(_Config) -> ?assertEqual(NumReqs, Inflight0), %% grab one of the worker pids and kill it - {ok, SRef1} = - snabbkaffe:subscribe( - ?match_event(#{?snk_kind := buffer_worker_worker_down_update}), - NumBufferWorkers, - 10_000 - ), {ok, #{pid := Pid0}} = emqx_resource:simple_sync_query(?ID, get_state), MRef = monitor(process, Pid0), ct:pal("will kill ~p", [Pid0]), @@ -1785,13 +1781,27 @@ t_async_pool_worker_death(_Config) -> end, %% inflight requests should have been marked as retriable - {ok, _} = snabbkaffe:receive_events(SRef1), + wait_until_all_marked_as_retriable(NumReqs), Inflight1 = emqx_resource_metrics:inflight_get(?ID), ?assertEqual(NumReqs, Inflight1), - ok + NumReqs end, - [] + fun(NumReqs, Trace) -> + Events = ?of_kind(buffer_worker_async_agent_down, Trace), + %% At least one buffer worker should have marked its + %% requests as retriable. If a single one has + %% received all requests, that's all we got. + ?assertMatch([_ | _], Events), + %% All requests distributed over all buffer workers + %% should have been marked as retriable, by the time + %% the inflight has been drained. + ?assertEqual( + NumReqs, + lists:sum([N || #{num_affected := N} <- Events]) + ), + ok + end ), ok. @@ -2630,7 +2640,6 @@ t_call_mode_uncoupled_from_query_mode(_Config) -> Trace2 ) ), - ok end ). @@ -2791,6 +2800,42 @@ t_late_call_reply(_Config) -> ), ok. +t_resource_create_error_activate_alarm_once(_) -> + do_t_resource_activate_alarm_once( + #{name => test_resource, create_error => true}, + connector_demo_start_error + ). + +t_resource_health_check_error_activate_alarm_once(_) -> + do_t_resource_activate_alarm_once( + #{name => test_resource, health_check_error => true}, + connector_demo_health_check_error + ). + +do_t_resource_activate_alarm_once(ResourceConfig, SubscribeEvent) -> + ?check_trace( + begin + ?wait_async_action( + emqx_resource:create_local( + ?ID, + ?DEFAULT_RESOURCE_GROUP, + ?TEST_RESOURCE, + ResourceConfig, + #{auto_restart_interval => 100, health_check_interval => 100} + ), + #{?snk_kind := resource_activate_alarm, resource_id := ?ID} + ), + ?assertMatch([#{activated := true, name := ?ID}], emqx_alarm:get_alarms(activated)), + {ok, SubRef} = snabbkaffe:subscribe( + ?match_event(#{?snk_kind := SubscribeEvent}), 4, 7000 + ), + ?assertMatch({ok, [_, _, _, _]}, snabbkaffe:receive_events(SubRef)) + end, + fun(Trace) -> + ?assertMatch([_], ?of_kind(resource_activate_alarm, Trace)) + end + ). + %%------------------------------------------------------------------------------ %% Helpers %%------------------------------------------------------------------------------ @@ -2977,3 +3022,33 @@ trace_between_span(Trace0, Marker) -> {Trace1, [_ | _]} = ?split_trace_at(#{?snk_kind := Marker, ?snk_span := {complete, _}}, Trace0), {[_ | _], [_ | Trace2]} = ?split_trace_at(#{?snk_kind := Marker, ?snk_span := start}, Trace1), Trace2. + +wait_until_all_marked_as_retriable(NumExpected) when NumExpected =< 0 -> + ok; +wait_until_all_marked_as_retriable(NumExpected) -> + Seen = #{}, + do_wait_until_all_marked_as_retriable(NumExpected, Seen). + +do_wait_until_all_marked_as_retriable(NumExpected, _Seen) when NumExpected =< 0 -> + ok; +do_wait_until_all_marked_as_retriable(NumExpected, Seen) -> + Res = ?block_until( + #{?snk_kind := buffer_worker_async_agent_down, ?snk_meta := #{pid := P}} when + not is_map_key(P, Seen), + 10_000 + ), + case Res of + {timeout, Evts} -> + ct:pal("events so far:\n ~p", [Evts]), + ct:fail("timeout waiting for events"); + {ok, #{num_affected := NumAffected, ?snk_meta := #{pid := Pid}}} -> + ct:pal("affected: ~p; pid: ~p", [NumAffected, Pid]), + case NumAffected >= NumExpected of + true -> + ok; + false -> + do_wait_until_all_marked_as_retriable(NumExpected - NumAffected, Seen#{ + Pid => true + }) + end + end. diff --git a/apps/emqx_retainer/src/emqx_retainer_schema.erl b/apps/emqx_retainer/src/emqx_retainer_schema.erl index dbe1ad9d5..74f1e2371 100644 --- a/apps/emqx_retainer/src/emqx_retainer_schema.erl +++ b/apps/emqx_retainer/src/emqx_retainer_schema.erl @@ -53,7 +53,8 @@ fields("retainer") -> sc( ?R_REF(flow_control), flow_control, - #{} + #{}, + ?IMPORTANCE_HIDDEN )}, {max_payload_size, sc( @@ -125,7 +126,9 @@ desc(_) -> %% hoconsc:mk(Type, #{desc => ?DESC(DescId)}). sc(Type, DescId, Default) -> - hoconsc:mk(Type, #{default => Default, desc => ?DESC(DescId)}). + sc(Type, DescId, Default, ?DEFAULT_IMPORTANCE). +sc(Type, DescId, Default, Importance) -> + hoconsc:mk(Type, #{default => Default, desc => ?DESC(DescId), importance => Importance}). backend_config() -> hoconsc:mk(hoconsc:ref(?MODULE, mnesia_config), #{desc => ?DESC(backend)}). diff --git a/apps/emqx_retainer/test/emqx_retainer_SUITE.erl b/apps/emqx_retainer/test/emqx_retainer_SUITE.erl index 09d1f77da..c90ec6b2b 100644 --- a/apps/emqx_retainer/test/emqx_retainer_SUITE.erl +++ b/apps/emqx_retainer/test/emqx_retainer_SUITE.erl @@ -758,23 +758,22 @@ with_conf(ConfMod, Case) -> end. make_limiter_cfg(Rate) -> - Infinity = emqx_limiter_schema:infinity_value(), Client = #{ rate => Rate, initial => 0, - capacity => Infinity, + burst => 0, low_watermark => 1, divisible => false, max_retry_time => timer:seconds(5), failure_strategy => force }, - #{client => Client, rate => Infinity, initial => 0, capacity => Infinity}. + #{client => Client, rate => Rate, initial => 0, burst => 0}. make_limiter_json(Rate) -> Client = #{ <<"rate">> => Rate, <<"initial">> => 0, - <<"capacity">> => <<"infinity">>, + <<"burst">> => <<"0">>, <<"low_watermark">> => 0, <<"divisible">> => <<"false">>, <<"max_retry_time">> => <<"5s">>, @@ -784,5 +783,5 @@ make_limiter_json(Rate) -> <<"client">> => Client, <<"rate">> => <<"infinity">>, <<"initial">> => 0, - <<"capacity">> => <<"infinity">> + <<"burst">> => <<"0">> }. diff --git a/apps/emqx_rule_engine/src/emqx_rule_engine.app.src b/apps/emqx_rule_engine/src/emqx_rule_engine.app.src index 133138252..932ebc5ed 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_engine.app.src +++ b/apps/emqx_rule_engine/src/emqx_rule_engine.app.src @@ -2,7 +2,7 @@ {application, emqx_rule_engine, [ {description, "EMQX Rule Engine"}, % strict semver, bump manually! - {vsn, "5.0.14"}, + {vsn, "5.0.15"}, {modules, []}, {registered, [emqx_rule_engine_sup, emqx_rule_engine]}, {applications, [kernel, stdlib, rulesql, getopt, emqx_ctl]}, diff --git a/apps/emqx_rule_engine/src/emqx_rule_funcs.erl b/apps/emqx_rule_engine/src/emqx_rule_funcs.erl index 81435d9ac..02163f95b 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_funcs.erl +++ b/apps/emqx_rule_engine/src/emqx_rule_funcs.erl @@ -227,9 +227,20 @@ now_timestamp/1, format_date/3, format_date/4, + date_to_unix_ts/3, date_to_unix_ts/4 ]). +%% MongoDB specific date functions. These functions return a date tuple. The +%% MongoDB bridge converts such date tuples to a MongoDB date type. The +%% following functions are therefore only useful for rules with at least one +%% MongoDB action. +-export([ + mongo_date/0, + mongo_date/1, + mongo_date/2 +]). + %% Proc Dict Func -export([ proc_dict_get/1, @@ -1085,6 +1096,9 @@ format_date(TimeUnit, Offset, FormatString, TimeEpoch) -> ) ). +date_to_unix_ts(TimeUnit, FormatString, InputString) -> + date_to_unix_ts(TimeUnit, "Z", FormatString, InputString). + date_to_unix_ts(TimeUnit, Offset, FormatString, InputString) -> emqx_rule_date:parse_date( time_unit(TimeUnit), @@ -1135,3 +1149,21 @@ function_literal(Fun, [FArg | Args]) when is_atom(Fun), is_list(Args) -> ) ++ ")"; function_literal(Fun, Args) -> {invalid_func, {Fun, Args}}. + +mongo_date() -> + erlang:timestamp(). + +mongo_date(MillisecondsTimestamp) -> + convert_timestamp(MillisecondsTimestamp). + +mongo_date(Timestamp, Unit) -> + InsertedTimeUnit = time_unit(Unit), + ScaledEpoch = erlang:convert_time_unit(Timestamp, InsertedTimeUnit, millisecond), + convert_timestamp(ScaledEpoch). + +convert_timestamp(MillisecondsTimestamp) -> + MicroTimestamp = MillisecondsTimestamp * 1000, + MegaSecs = MicroTimestamp div 1000_000_000_000, + Secs = MicroTimestamp div 1000_000 - MegaSecs * 1000_000, + MicroSecs = MicroTimestamp rem 1000_000, + {MegaSecs, Secs, MicroSecs}. diff --git a/apps/emqx_rule_engine/test/emqx_rule_funcs_SUITE.erl b/apps/emqx_rule_engine/test/emqx_rule_funcs_SUITE.erl index ee798868e..d88637312 100644 --- a/apps/emqx_rule_engine/test/emqx_rule_funcs_SUITE.erl +++ b/apps/emqx_rule_engine/test/emqx_rule_funcs_SUITE.erl @@ -686,7 +686,6 @@ t_jq(_) -> %% Got timeout as expected got_timeout end, - _ConfigRootKey = emqx_rule_engine_schema:namespace(), ?assertThrow( {jq_exception, {timeout, _}}, apply_func(jq, [TOProgram, <<"-2">>]) @@ -959,7 +958,7 @@ prop_format_date_fun() -> Args1 = [<<"second">>, <<"+07:00">>, <<"%m--%d--%y---%H:%M:%S%Z">>], ?FORALL( S, - erlang:system_time(second), + range(0, 4000000000), S == apply_func( date_to_unix_ts, @@ -975,7 +974,7 @@ prop_format_date_fun() -> Args2 = [<<"millisecond">>, <<"+04:00">>, <<"--%m--%d--%y---%H:%M:%S%Z">>], ?FORALL( S, - erlang:system_time(millisecond), + range(0, 4000000000), S == apply_func( date_to_unix_ts, @@ -991,7 +990,7 @@ prop_format_date_fun() -> Args = [<<"second">>, <<"+08:00">>, <<"%y-%m-%d-%H:%M:%S%Z">>], ?FORALL( S, - erlang:system_time(second), + range(0, 4000000000), S == apply_func( date_to_unix_ts, @@ -1003,6 +1002,24 @@ prop_format_date_fun() -> ) ] ) + ), + %% When no offset is specified, the offset should be taken from the formatted time string + ArgsNoOffset = [<<"second">>, <<"%y-%m-%d-%H:%M:%S%Z">>], + ArgsOffset = [<<"second">>, <<"+08:00">>, <<"%y-%m-%d-%H:%M:%S%Z">>], + ?FORALL( + S, + range(0, 4000000000), + S == + apply_func( + date_to_unix_ts, + ArgsNoOffset ++ + [ + apply_func( + format_date, + ArgsOffset ++ [S] + ) + ] + ) ). %%------------------------------------------------------------------------------ diff --git a/apps/emqx_slow_subs/README.md b/apps/emqx_slow_subs/README.md new file mode 100644 index 000000000..8b83508c2 --- /dev/null +++ b/apps/emqx_slow_subs/README.md @@ -0,0 +1,47 @@ +# EMQX Slow Subscriptions + +This application can calculate the latency (time spent) of the message to be processed and transmitted since it arrives at EMQX. + +If the latency exceeds a specified threshold, this application will add the subscriber and topic information to a slow subscriptions list or update the existing record. + +More introduction: [Slow Subscriptions](https://www.emqx.io/docs/en/v5.0/observability/slow-subscribers-statistics.html) + +# Usage + +You can add the below section into `emqx.conf` to enable this application + +```yaml +slow_subs { + enable = true + threshold = "500ms" + expire_interval = "300s" + top_k_num = 10 + stats_type = whole +} +``` + +# Configurations + +**threshold**: Latency threshold for statistics, only messages with latency exceeding this value will be collected. + +Minimum value: 100ms +Default value: 500ms + +**expire_interval**: Eviction time of the record, will start the counting since the creation of the record, and the records that are not triggered again within this specified period will be removed from the rank list. + +Default: 300s + +**top_k_num**: Maximum number of records in the slow subscription statistics record table. + +Maximum value: 1,000 +Default value: 10 + +**stats_type**: Calculation methods of the latency, which are +- **whole**: From the time the message arrives at EMQX until the message transmission completes +- **internal**: From when the message arrives at EMQX until when EMQX starts delivering the message +- **response**: From the time EMQX starts delivering the message until the message transmission completes + +Default value: whole + +# Contributing +Please see our [contributing.md](../../CONTRIBUTING.md). diff --git a/apps/emqx_statsd/src/emqx_statsd.app.src b/apps/emqx_statsd/src/emqx_statsd.app.src index 412e0b685..87fc8c596 100644 --- a/apps/emqx_statsd/src/emqx_statsd.app.src +++ b/apps/emqx_statsd/src/emqx_statsd.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_statsd, [ {description, "EMQX Statsd"}, - {vsn, "5.0.8"}, + {vsn, "5.0.9"}, {registered, []}, {mod, {emqx_statsd_app, []}}, {applications, [ diff --git a/apps/emqx_statsd/src/emqx_statsd.erl b/apps/emqx_statsd/src/emqx_statsd.erl index c5a7fc1c8..b2d726b07 100644 --- a/apps/emqx_statsd/src/emqx_statsd.erl +++ b/apps/emqx_statsd/src/emqx_statsd.erl @@ -80,7 +80,7 @@ init(Conf) -> flush_time_interval := FlushTimeInterval } = Conf, FlushTimeInterval1 = flush_interval(FlushTimeInterval, SampleTimeInterval), - {Host, Port} = emqx_schema:parse_server(Server, ?SERVER_PARSE_OPTS), + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, ?SERVER_PARSE_OPTS), Tags = maps:fold(fun(K, V, Acc) -> [{to_bin(K), to_bin(V)} | Acc] end, [], TagsRaw), Opts = [{tags, Tags}, {host, Host}, {port, Port}, {prefix, <<"emqx">>}], {ok, Pid} = estatsd:start_link(Opts), diff --git a/apps/emqx_statsd/src/emqx_statsd_api.erl b/apps/emqx_statsd/src/emqx_statsd_api.erl index e65c93432..4ee144d57 100644 --- a/apps/emqx_statsd/src/emqx_statsd_api.erl +++ b/apps/emqx_statsd/src/emqx_statsd_api.erl @@ -49,6 +49,7 @@ schema("/statsd") -> 'operationId' => statsd, get => #{ + deprecated => true, description => ?DESC(get_statsd_config_api), tags => ?API_TAG_STATSD, responses => @@ -56,6 +57,7 @@ schema("/statsd") -> }, put => #{ + deprecated => true, description => ?DESC(update_statsd_config_api), tags => ?API_TAG_STATSD, 'requestBody' => statsd_config_schema(), diff --git a/apps/emqx_statsd/src/emqx_statsd_schema.erl b/apps/emqx_statsd/src/emqx_statsd_schema.erl index e44f94954..01decc6f7 100644 --- a/apps/emqx_statsd/src/emqx_statsd_schema.erl +++ b/apps/emqx_statsd/src/emqx_statsd_schema.erl @@ -32,7 +32,8 @@ namespace() -> "statsd". -roots() -> ["statsd"]. +roots() -> + [{"statsd", hoconsc:mk(hoconsc:ref(?MODULE, "statsd"), #{importance => ?IMPORTANCE_HIDDEN})}]. fields("statsd") -> [ diff --git a/apps/emqx_statsd/test/emqx_statsd_SUITE.erl b/apps/emqx_statsd/test/emqx_statsd_SUITE.erl index 1f7c4688e..8b8709c27 100644 --- a/apps/emqx_statsd/test/emqx_statsd_SUITE.erl +++ b/apps/emqx_statsd/test/emqx_statsd_SUITE.erl @@ -59,9 +59,7 @@ init_per_suite(Config) -> [emqx_conf, emqx_dashboard, emqx_statsd], fun set_special_configs/1 ), - ok = emqx_common_test_helpers:load_config(emqx_statsd_schema, ?BASE_CONF, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_statsd_schema, ?BASE_CONF), Config. end_per_suite(_Config) -> @@ -84,22 +82,16 @@ t_server_validator(_) -> reason := "cannot_be_empty", value := "" }, - emqx_common_test_helpers:load_config(emqx_statsd_schema, ?BAD_CONF, #{ - raw_with_default => true - }) + emqx_common_test_helpers:load_config(emqx_statsd_schema, ?BAD_CONF) ), %% default - ok = emqx_common_test_helpers:load_config(emqx_statsd_schema, ?DEFAULT_CONF, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_statsd_schema, ?DEFAULT_CONF), DefaultServer = default_server(), ?assertEqual(DefaultServer, emqx_conf:get_raw([statsd, server])), DefaultServerStr = binary_to_list(DefaultServer), ?assertEqual(DefaultServerStr, emqx_conf:get([statsd, server])), %% recover - ok = emqx_common_test_helpers:load_config(emqx_statsd_schema, ?BASE_CONF, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_statsd_schema, ?BASE_CONF), Server2 = emqx_conf:get_raw([statsd, server]), ?assertMatch(Server0, Server2), ok. diff --git a/apps/emqx_utils/src/emqx_utils.app.src b/apps/emqx_utils/src/emqx_utils.app.src index eb6371411..dff55bc86 100644 --- a/apps/emqx_utils/src/emqx_utils.app.src +++ b/apps/emqx_utils/src/emqx_utils.app.src @@ -2,7 +2,7 @@ {application, emqx_utils, [ {description, "Miscellaneous utilities for EMQX apps"}, % strict semver, bump manually! - {vsn, "5.0.0"}, + {vsn, "5.0.1"}, {modules, [ emqx_utils, emqx_utils_api, diff --git a/apps/emqx_utils/src/emqx_utils.erl b/apps/emqx_utils/src/emqx_utils.erl index 5f0aa3e64..e9b2a1f9e 100644 --- a/apps/emqx_utils/src/emqx_utils.erl +++ b/apps/emqx_utils/src/emqx_utils.erl @@ -230,7 +230,7 @@ check_oom(Policy) -> check_oom(_Pid, #{enable := false}) -> ok; check_oom(Pid, #{ - max_message_queue_len := MaxQLen, + max_mailbox_size := MaxQLen, max_heap_size := MaxHeapSize }) -> case process_info(Pid, [message_queue_len, total_heap_size]) of diff --git a/apps/emqx_utils/src/emqx_utils_maps.erl b/apps/emqx_utils/src/emqx_utils_maps.erl index 6bec32ae3..d1c3ed649 100644 --- a/apps/emqx_utils/src/emqx_utils_maps.erl +++ b/apps/emqx_utils/src/emqx_utils_maps.erl @@ -41,14 +41,13 @@ -type config_key_path() :: [config_key()]. -type convert_fun() :: fun((...) -> {K1 :: any(), V1 :: any()} | drop). +-define(CONFIG_NOT_FOUND_MAGIC, '$0tFound'). %%----------------------------------------------------------------- -spec deep_get(config_key_path(), map()) -> term(). deep_get(ConfKeyPath, Map) -> - Ref = make_ref(), - Res = deep_get(ConfKeyPath, Map, Ref), - case Res =:= Ref of - true -> error({config_not_found, ConfKeyPath}); - false -> Res + case deep_get(ConfKeyPath, Map, ?CONFIG_NOT_FOUND_MAGIC) of + ?CONFIG_NOT_FOUND_MAGIC -> error({config_not_found, ConfKeyPath}); + Res -> Res end. -spec deep_get(config_key_path(), map(), term()) -> term(). diff --git a/apps/emqx_utils/test/emqx_utils_SUITE.erl b/apps/emqx_utils/test/emqx_utils_SUITE.erl index 99516b0eb..6c6bcf8d3 100644 --- a/apps/emqx_utils/test/emqx_utils_SUITE.erl +++ b/apps/emqx_utils/test/emqx_utils_SUITE.erl @@ -140,7 +140,7 @@ t_index_of(_) -> t_check(_) -> Policy = #{ - max_message_queue_len => 10, + max_mailbox_size => 10, max_heap_size => 1024 * 1024 * 8, enable => true }, diff --git a/bin/emqx b/bin/emqx index dd0b0791e..3b7212c99 100755 --- a/bin/emqx +++ b/bin/emqx @@ -396,7 +396,7 @@ relx_get_pid() { remsh() { # Generate a unique id used to allow multiple remsh to the same node # transparently - id="remsh$(relx_gen_id)-${NAME}" + id="remsh$(gen_node_id)-${NAME}" # shellcheck disable=SC2086 # Setup remote shell command to control node @@ -424,7 +424,7 @@ remsh() { } # Generate a random id -relx_gen_id() { +gen_node_id() { od -t u -N 4 /dev/urandom | head -n1 | awk '{print $2 % 1000}' } @@ -451,7 +451,7 @@ find_emqx_process() { if [ -n "${EMQX_NODE__NAME:-}" ]; then # if node name is provided, filter by node name # shellcheck disable=SC2009 - ps -ef | $GREP '[e]mqx' | $GREP -v -E '(remsh|nodetool)' | $GREP -E "\s\-s?name\s${EMQX_NODE__NAME}" | $GREP -oE "\-[r]oot ${RUNNER_ROOT_DIR}.*" || true + ps -ef | $GREP '[e]mqx' | $GREP -v -E '(remsh|nodetool)' | $GREP -E "\s-s?name\s${EMQX_NODE__NAME}" | $GREP -oE "\-[r]oot ${RUNNER_ROOT_DIR}.*" || true else # shellcheck disable=SC2009 ps -ef | $GREP '[e]mqx' | $GREP -v -E '(remsh|nodetool)' | $GREP -oE "\-[r]oot ${RUNNER_ROOT_DIR}.*" || true @@ -482,7 +482,7 @@ RUNNING_NODES_COUNT="$(echo -e "$PS_LINE" | sed '/^\s*$/d' | wc -l)" if [ "$IS_BOOT_COMMAND" = 'yes' ]; then if [ "$RUNNING_NODES_COUNT" -gt 0 ] && [ "$COMMAND" != 'check_config' ]; then - running_node_name=$(echo -e "$PS_LINE" | $GREP -oE "\s\-s?name.*" | awk '{print $2}' || true) + running_node_name=$(echo -e "$PS_LINE" | $GREP -oE "\s-s?name.*" | awk '{print $2}' || true) if [ -n "$running_node_name" ] && [ "$running_node_name" = "${EMQX_NODE__NAME:-}" ]; then echo "Node ${running_node_name} is already running!" exit 1 @@ -520,10 +520,10 @@ else # would try to stop the new node instead. if [ "$RUNNING_NODES_COUNT" -eq 1 ]; then ## only one emqx node is running, get running args from 'ps -ef' output - tmp_nodename=$(echo -e "$PS_LINE" | $GREP -oE "\s\-s?name.*" | awk '{print $2}' || true) - tmp_cookie=$(echo -e "$PS_LINE" | $GREP -oE "\s\-setcookie.*" | awk '{print $2}' || true) + tmp_nodename=$(echo -e "$PS_LINE" | $GREP -oE "\s-s?name.*" | awk '{print $2}' || true) + tmp_cookie=$(echo -e "$PS_LINE" | $GREP -oE "\s-setcookie.*" | awk '{print $2}' || true) SSL_DIST_OPTFILE="$(echo -e "$PS_LINE" | $GREP -oE '\-ssl_dist_optfile\s.+\s' | awk '{print $2}' || true)" - tmp_ticktime="$(echo -e "$PS_LINE" | $GREP -oE '\s\-kernel\snet_ticktime\s.+\s' | awk '{print $3}' || true)" + tmp_ticktime="$(echo -e "$PS_LINE" | $GREP -oE '\s-kernel\snet_ticktime\s.+\s' | awk '{print $3}' || true)" # data_dir is actually not needed, but kept anyway tmp_datadir="$(echo -e "$PS_LINE" | $GREP -oE "\-emqx_data_dir.*" | sed -E 's#.+emqx_data_dir[[:blank:]]##g' | sed -E 's#[[:blank:]]--$##g' || true)" if [ -z "$SSL_DIST_OPTFILE" ]; then @@ -536,7 +536,7 @@ else else if [ "$RUNNING_NODES_COUNT" -gt 1 ]; then if [ -z "${EMQX_NODE__NAME:-}" ]; then - tmp_nodenames=$(echo -e "$PS_LINE" | $GREP -oE "\s\-s?name.*" | awk '{print $2}' | tr '\n' ' ') + tmp_nodenames=$(echo -e "$PS_LINE" | $GREP -oE "\s-s?name.*" | awk '{print $2}' | tr '\n' ' ') logerr "More than one EMQX node found running (root dir: ${RUNNER_ROOT_DIR})" logerr "Running nodes: $tmp_nodenames" logerr "Make sure environment variable EMQX_NODE__NAME is set to indicate for which node this command is intended." @@ -806,6 +806,7 @@ generate_config() { } # check if a PID is down +# shellcheck disable=SC2317 # call in func `nodetool_shutdown()` is_down() { PID="$1" if ps -p "$PID" >/dev/null; then @@ -937,7 +938,7 @@ case "$NAME" in esac SHORT_NAME="$(echo "$NAME" | awk -F'@' '{print $1}')" HOST_NAME="$(echo "$NAME" | awk -F'@' '{print $2}')" -if ! (echo "$SHORT_NAME" | grep -q '^[0-9A-Za-z_\-]\+$'); then +if ! (echo "$SHORT_NAME" | $GREP -q '^[0-9A-Za-z_\-]\+$'); then logerr "Invalid node name, should be of format '^[0-9A-Za-z_-]+$'." exit 1 fi @@ -972,7 +973,7 @@ maybe_warn_default_cookie() { ## check if OTP version has mnesia_hook feature; if not, fallback to ## using Mnesia DB backend. if [[ "$IS_BOOT_COMMAND" == 'yes' && "$(get_boot_config 'node.db_backend')" == "rlog" ]]; then - if ! (echo -e "$COMPATIBILITY_INFO" | grep -q 'MNESIA_OK'); then + if ! (echo -e "$COMPATIBILITY_INFO" | $GREP -q 'MNESIA_OK'); then logerr "DB Backend is RLOG, but an incompatible OTP version has been detected. Falling back to using Mnesia DB backend." export EMQX_NODE__DB_BACKEND=mnesia export EMQX_NODE__DB_ROLE=core @@ -1277,7 +1278,7 @@ case "${COMMAND}" in then "$REL_DIR/elixir" \ --hidden \ - --name "rand-$(relx_gen_id)-$NAME" \ + --name "rand-$(gen_node_id)-$NAME" \ --cookie "$COOKIE" \ --boot "$REL_DIR/start_clean" \ --boot-var RELEASE_LIB "$ERTS_LIB_DIR" \ diff --git a/build b/build index 3c558c19a..0846d6057 100755 --- a/build +++ b/build @@ -92,7 +92,7 @@ log() { } make_docs() { - local libs_dir1 libs_dir2 libs_dir3 docdir dashboard_www_static + local libs_dir1 libs_dir2 libs_dir3 docdir libs_dir1="$("$FIND" "_build/$PROFILE/lib/" -maxdepth 2 -name ebin -type d)" if [ -d "_build/default/lib/" ]; then libs_dir2="$("$FIND" "_build/default/lib/" -maxdepth 2 -name ebin -type d)" @@ -113,35 +113,32 @@ make_docs() { ;; esac docdir="_build/docgen/$PROFILE" - dashboard_www_static='apps/emqx_dashboard/priv/www/static/' - mkdir -p "$docdir" "$dashboard_www_static" + mkdir -p "$docdir" # shellcheck disable=SC2086 erl -noshell -pa $libs_dir1 $libs_dir2 $libs_dir3 -eval \ - "I18nFile = filename:join([apps, emqx_dashboard, priv, 'i18n.conf']), \ - ok = emqx_conf:dump_schema('$docdir', $SCHEMA_MODULE, I18nFile), \ + "ok = emqx_conf:dump_schema('$docdir', $SCHEMA_MODULE), \ halt(0)." - cp "$docdir"/bridge-api-*.json "$dashboard_www_static" - cp "$docdir"/hot-config-schema-*.json "$dashboard_www_static" } assert_no_compile_time_only_deps() { - if [ "$("$FIND" "_build/$PROFILE/rel/emqx/lib/" -maxdepth 1 -name 'gpb-*' -type d)" != "" ]; then - echo "gpb should not be included in the release" - exit 1 - fi + : } -make_rel() { +just_compile() { ./scripts/pre-compile.sh "$PROFILE" # make_elixir_rel always create rebar.lock # delete it to make git clone + checkout work because we use shallow close for rebar deps rm -f rebar.lock # compile all beams ./rebar3 as "$PROFILE" compile - # generate docs (require beam compiled), generated to etc and priv dirs make_docs +} + +make_rel() { + local release_or_tar="${1}" + just_compile # now assemble the release tar - ./rebar3 as "$PROFILE" tar + ./rebar3 as "$PROFILE" "$release_or_tar" assert_no_compile_time_only_deps } @@ -227,7 +224,7 @@ make_tgz() { else # build the src_tarball again to ensure relup is included # elixir does not have relup yet. - make_rel + make_rel tar local relpath="_build/${PROFILE}/rel/emqx" full_vsn="$(./pkg-vsn.sh "$PROFILE" --long)" @@ -338,17 +335,22 @@ make_docker() { EMQX_RUNNER="${EMQX_RUNNER:-${EMQX_DEFAULT_RUNNER}}" EMQX_DOCKERFILE="${EMQX_DOCKERFILE:-deploy/docker/Dockerfile}" if [[ "$PROFILE" = *-elixir ]]; then - PKG_VSN="$PKG_VSN-elixir" + PKG_VSN="$PKG_VSN-elixir" fi local default_tag="emqx/${PROFILE%%-elixir}:${PKG_VSN}" EMQX_IMAGE_TAG="${EMQX_IMAGE_TAG:-$default_tag}" - + ## extra_deps is a comma separated list of debian 11 package names + local extra_deps='' + if [[ "$PROFILE" = *enterprise* ]]; then + extra_deps='libsasl2-2' + fi echo '_build' >> ./.dockerignore set -x docker build --no-cache --pull \ --build-arg BUILD_FROM="${EMQX_BUILDER}" \ --build-arg RUN_FROM="${EMQX_RUNNER}" \ - --build-arg EMQX_NAME="$PROFILE" \ + --build-arg EMQX_NAME="${PROFILE}" \ + --build-arg EXTRA_DEPS="${extra_deps}" \ --tag "${EMQX_IMAGE_TAG}" \ -f "${EMQX_DOCKERFILE}" . [[ "${DEBUG:-}" -eq 1 ]] || set +x @@ -381,11 +383,14 @@ export_elixir_release_vars() { log "building artifact=$ARTIFACT for profile=$PROFILE" case "$ARTIFACT" in + apps) + just_compile + ;; doc|docs) make_docs ;; rel) - make_rel + make_rel release ;; relup) make_relup @@ -404,7 +409,7 @@ case "$ARTIFACT" in if [ "${IS_ELIXIR:-}" = 'yes' ]; then make_elixir_rel else - make_rel + make_rel tar fi env EMQX_REL="$(pwd)" \ EMQX_BUILD="${PROFILE}" \ diff --git a/changes/ce/feat-10358.en.md b/changes/ce/feat-10358.en.md deleted file mode 100644 index e6d05c84b..000000000 --- a/changes/ce/feat-10358.en.md +++ /dev/null @@ -1,2 +0,0 @@ -Hide `flapping_detect/conn_congestion/stats` configuration. -Deprecate `flapping_detect.enable`. diff --git a/changes/ce/feat-10381.en.md b/changes/ce/feat-10381.en.md deleted file mode 100644 index 3ea11188f..000000000 --- a/changes/ce/feat-10381.en.md +++ /dev/null @@ -1 +0,0 @@ -Hide the `auto_subscribe` configuration items so that they can be modified later only through the HTTP API. diff --git a/changes/ce/feat-10385.en.md b/changes/ce/feat-10385.en.md deleted file mode 100644 index 667e01890..000000000 --- a/changes/ce/feat-10385.en.md +++ /dev/null @@ -1 +0,0 @@ -Hide data items(rule_engine/bridge/authz/authn) from configuration files and documentation. diff --git a/changes/ce/feat-10389.en.md b/changes/ce/feat-10389.en.md new file mode 100644 index 000000000..3789e80ae --- /dev/null +++ b/changes/ce/feat-10389.en.md @@ -0,0 +1,2 @@ +Unify the config formats for `cluster.core_nodes` and `cluster.statics.seeds`. +Now they both support formats in array `["emqx1@127.0.0.1", "emqx2@127.0.0.1"]` or semicolon-separated string `"emqx1@127.0.0.1,emqx2@127.0.0.1"`. diff --git a/changes/ce/feat-10392.en.md b/changes/ce/feat-10392.en.md new file mode 100644 index 000000000..04c6c85cc --- /dev/null +++ b/changes/ce/feat-10392.en.md @@ -0,0 +1 @@ +A new function to convert a formatted date to an integer timestamp has been added: date_to_unix_ts/3 diff --git a/changes/ce/feat-10426.en.md b/changes/ce/feat-10426.en.md new file mode 100644 index 000000000..8575347d6 --- /dev/null +++ b/changes/ce/feat-10426.en.md @@ -0,0 +1,4 @@ +Optimize the configuration priority mechanism to fix the issue where the configuration +changes made to `etc/emqx.conf` do not take effect after restarting EMQX. + +More introduction about the new mechanism: [Configure Override Rules](https://www.emqx.io/docs/en/v5.0/configuration/configuration.html#configure-override-rules) diff --git a/changes/ce/feat-10457.en.md b/changes/ce/feat-10457.en.md new file mode 100644 index 000000000..966569a1c --- /dev/null +++ b/changes/ce/feat-10457.en.md @@ -0,0 +1,4 @@ +Deprecates the integration with StatsD. + +There seemd to be no user using StatsD integration, so we have decided to hide this feature +for now. We will either remove or revive it based on requirements in the future. diff --git a/changes/ce/feat-10458.en.md b/changes/ce/feat-10458.en.md new file mode 100644 index 000000000..655885145 --- /dev/null +++ b/changes/ce/feat-10458.en.md @@ -0,0 +1,3 @@ +Set the level of plugin configuration options to low level, +in most cases, users only need to manage plugins on the dashboard +without the need for manual modification, so we lowered the level. diff --git a/changes/ce/feat-10491.en.md b/changes/ce/feat-10491.en.md new file mode 100644 index 000000000..e1c38b6bb --- /dev/null +++ b/changes/ce/feat-10491.en.md @@ -0,0 +1 @@ +Rename `etcd.ssl` to `etcd.ssl_options` to keep all of SSL options consistent in the configuration file. diff --git a/changes/ce/feat-10512.en.md b/changes/ce/feat-10512.en.md new file mode 100644 index 000000000..e6c162742 --- /dev/null +++ b/changes/ce/feat-10512.en.md @@ -0,0 +1,3 @@ +Improved the storage format of Unicode characters in data files, +Now we can store Unicode characters normally. +For example: "SELECT * FROM \"t/1\" WHERE clientid = \"-测试专用-\"" diff --git a/changes/ce/feat-10568.en.md b/changes/ce/feat-10568.en.md new file mode 100644 index 000000000..dba1abdea --- /dev/null +++ b/changes/ce/feat-10568.en.md @@ -0,0 +1 @@ +Add shutdown counter information to `emqx ctl listeners` command diff --git a/changes/ce/feat-10571.en.md b/changes/ce/feat-10571.en.md new file mode 100644 index 000000000..49b564ef9 --- /dev/null +++ b/changes/ce/feat-10571.en.md @@ -0,0 +1,2 @@ +Do not emit useless crash report when EMQX stops. +Previously, when EMQX (and `emqx_topic_metrics` in particular) stopped and removed underlying tables, some messages were still being handled and crashed. diff --git a/changes/ce/feat-10588.en.md b/changes/ce/feat-10588.en.md new file mode 100644 index 000000000..2f907a549 --- /dev/null +++ b/changes/ce/feat-10588.en.md @@ -0,0 +1,2 @@ +Increase the time precision of trace logs from second to microsecond. +For example, change from `2023-05-02T08:43:50+00:00` to `2023-05-02T08:43:50.237945+00:00`. diff --git a/changes/ce/feat-10623.en.md b/changes/ce/feat-10623.en.md new file mode 100644 index 000000000..9c8ce858f --- /dev/null +++ b/changes/ce/feat-10623.en.md @@ -0,0 +1 @@ +Renamed `max_message_queue_len` to `max_mailbox_size` in the `force_shutdown` configuration. Old name is kept as an alias, so this change is backward compatible. diff --git a/changes/ce/fix-10369.en.md b/changes/ce/fix-10369.en.md new file mode 100644 index 000000000..3c32d33f3 --- /dev/null +++ b/changes/ce/fix-10369.en.md @@ -0,0 +1,6 @@ +Fix error in `/api/v5/monitor_current` API endpoint that happens when some EMQX nodes are down. + +Prior to this fix, sometimes the request returned HTTP code 500 and the following message: +``` +{"code":"INTERNAL_ERROR","message":"error, badarg, [{erlang,'++',[{error,nodedown},[{node,'emqx@10.42.0.150'}]], ... +``` diff --git a/changes/ce/fix-10407.en.md b/changes/ce/fix-10407.en.md new file mode 100644 index 000000000..d9df9ce69 --- /dev/null +++ b/changes/ce/fix-10407.en.md @@ -0,0 +1,7 @@ +Improve 'emqx_alarm' performance by using Mnesia dirty operations and avoiding +unnecessary calls from 'emqx_resource_manager' to reactivate alarms that have been already activated. +Use new safe 'emqx_alarm' API to activate/deactivate alarms to ensure that emqx_resource_manager +doesn't crash because of alarm timeouts. +The crashes were possible when the following conditions co-occurred: + - a relatively high number of failing resources, e.g. bridges tried to activate alarms on re-occurring errors; + - the system experienced a very high load. diff --git a/changes/ce/fix-10420.en.md b/changes/ce/fix-10420.en.md new file mode 100644 index 000000000..70afd8b45 --- /dev/null +++ b/changes/ce/fix-10420.en.md @@ -0,0 +1,3 @@ +Fix HTTP path handling when composing the URL for the HTTP requests in authentication and authorization modules. +* Avoid unnecessary URL normalization since we cannot assume that external servers treat original and normalized URLs equally. This led to bugs like [#10411](https://github.com/emqx/emqx/issues/10411). +* Fix the issue that path segments could be HTTP encoded twice. diff --git a/changes/ce/fix-10422.en.md b/changes/ce/fix-10422.en.md new file mode 100644 index 000000000..7c18ccf32 --- /dev/null +++ b/changes/ce/fix-10422.en.md @@ -0,0 +1 @@ +Fixed a bug where external plugins could not be configured via environment variables in a lone-node cluster. diff --git a/changes/ce/fix-10448.en.md b/changes/ce/fix-10448.en.md new file mode 100644 index 000000000..c35ecdd8b --- /dev/null +++ b/changes/ce/fix-10448.en.md @@ -0,0 +1,3 @@ +Fix a compatibility issue of limiter configuration introduced by v5.0.23 which broke the upgrade from previous versions if the `capacity` is `infinity`. + +In v5.0.23 we have replaced `capacity` with `burst`. After this fix, a `capacity = infinity` config will be automatically converted to equivalent `burst = 0`. diff --git a/changes/ce/fix-10462.en.md b/changes/ce/fix-10462.en.md new file mode 100644 index 000000000..9e7922be2 --- /dev/null +++ b/changes/ce/fix-10462.en.md @@ -0,0 +1,4 @@ +Deprecate config `broker.shared_dispatch_ack_enabled`. +This was designed to avoid dispatching messages to a shared-subscription session which has the client disconnected. +However since v5.0.9, this feature is no longer useful because the shared-subscrption messages in a expired session will be redispatched to other sessions in the group. +See also: https://github.com/emqx/emqx/pull/9104 diff --git a/changes/ce/fix-10463.en.md b/changes/ce/fix-10463.en.md new file mode 100644 index 000000000..9d57bc1b0 --- /dev/null +++ b/changes/ce/fix-10463.en.md @@ -0,0 +1,2 @@ +Improve bridges API error handling. +If Webhook bridge URL is not valid, bridges API will return '400' error instead of '500'. diff --git a/changes/ce/fix-10484.en.md b/changes/ce/fix-10484.en.md new file mode 100644 index 000000000..d1a501384 --- /dev/null +++ b/changes/ce/fix-10484.en.md @@ -0,0 +1,3 @@ +Fix the issue that the priority of the configuration cannot be set during rolling upgrade. +For example, when authorization is modified in v5.0.21 and then upgraded v5.0.23 through rolling upgrade, +the authorization will be restored to the default. diff --git a/changes/ce/fix-10495.en.md b/changes/ce/fix-10495.en.md new file mode 100644 index 000000000..222f3dd5a --- /dev/null +++ b/changes/ce/fix-10495.en.md @@ -0,0 +1 @@ +Add the limiter API `/configs/limiter` which was deleted by mistake back. diff --git a/changes/ce/fix-10500.en.md b/changes/ce/fix-10500.en.md new file mode 100644 index 000000000..730dfb6e5 --- /dev/null +++ b/changes/ce/fix-10500.en.md @@ -0,0 +1,12 @@ +Add several fixes, enhancements and features in Mria: + - protect `mria:join/1,2` with a global lock to prevent conflicts between + two nodes trying to join each other simultaneously + [Mria PR](https://github.com/emqx/mria/pull/137) + - implement new function `mria:sync_transaction/4,3,2`, which blocks the caller until + a transaction is imported to the local node (if the local node is a replicant, otherwise, + it behaves exactly the same as `mria:transaction/3,2`) + [Mria PR](https://github.com/emqx/mria/pull/136) + - optimize `mria:running_nodes/0` + [Mria PR](https://github.com/emqx/mria/pull/135) + - optimize `mria:ro_transaction/2` when called on a replicant node + [Mria PR](https://github.com/emqx/mria/pull/134). diff --git a/changes/ce/fix-10518.en.md b/changes/ce/fix-10518.en.md new file mode 100644 index 000000000..87d001e91 --- /dev/null +++ b/changes/ce/fix-10518.en.md @@ -0,0 +1,6 @@ +Add the following fixes and features in Mria: + - call `mria_rlog:role/1` safely in mria_membership to ensure that mria_membership + gen_server won't crash if RPC to another node fails + [Mria PR](https://github.com/emqx/mria/pull/139) + - Add extra field to ?rlog_sync table to facilitate extending this functionality in future + [Mria PR](https://github.com/emqx/mria/pull/138). diff --git a/changes/ce/fix-10556.en.md b/changes/ce/fix-10556.en.md new file mode 100644 index 000000000..019ab3ad9 --- /dev/null +++ b/changes/ce/fix-10556.en.md @@ -0,0 +1 @@ +Wrap potentially sensitive data in `emqx_connector_http` if `Authorization` headers are being passed at initialization. diff --git a/changes/ce/fix-10636.md b/changes/ce/fix-10636.md new file mode 100644 index 000000000..56bf4bd7e --- /dev/null +++ b/changes/ce/fix-10636.md @@ -0,0 +1 @@ +An issue with the MongoDB connector related to the "Max Overflow" parameter has been fixed. Previously, the minimum limit for the parameter was incorrectly set to 1 instead of allowing a minimum value of 0. This issue has been fixed, and the "Max Overflow" parameter now correctly supports a minimum value of 0. diff --git a/changes/ce/perf-10376.en.md b/changes/ce/perf-10376.en.md new file mode 100644 index 000000000..d585ad5b2 --- /dev/null +++ b/changes/ce/perf-10376.en.md @@ -0,0 +1,6 @@ +Simplify the configuration of the limiter feature and optimize some codes +- Rename `message_in` to `messages` +- Rename `bytes_in` to `bytes` +- Use `burst` instead of `capacity` +- Hide non-importance fields +- Optimize limiter instances in different rate settings diff --git a/changes/ce/perf-10417.en.md b/changes/ce/perf-10417.en.md new file mode 100644 index 000000000..ad83d2cf4 --- /dev/null +++ b/changes/ce/perf-10417.en.md @@ -0,0 +1 @@ +Improve get config performance by eliminating temporary references. diff --git a/changes/ce/perf-10430.en.md b/changes/ce/perf-10430.en.md new file mode 100644 index 000000000..e01d8ba64 --- /dev/null +++ b/changes/ce/perf-10430.en.md @@ -0,0 +1,2 @@ +Simplify the configuration of the `retainer` feature. +- Mark `flow_control` as non-importance field. diff --git a/changes/ce/perf-10487.en.md b/changes/ce/perf-10487.en.md new file mode 100644 index 000000000..6f2b2d156 --- /dev/null +++ b/changes/ce/perf-10487.en.md @@ -0,0 +1 @@ +Optimize the instance of limiter for whose rate is `infinity` to reduce memory and CPU usage. diff --git a/changes/ce/perf-10490.en.md b/changes/ce/perf-10490.en.md new file mode 100644 index 000000000..5c1c183a5 --- /dev/null +++ b/changes/ce/perf-10490.en.md @@ -0,0 +1 @@ +Remove the default limit of connect rate which used to be `1000/s` diff --git a/changes/ce/perf-10525.en.md b/changes/ce/perf-10525.en.md new file mode 100644 index 000000000..b67e88289 --- /dev/null +++ b/changes/ce/perf-10525.en.md @@ -0,0 +1,2 @@ +Reduce resource usage per MQTT packet handling. + diff --git a/changes/ce/perf-10528.en.md b/changes/ce/perf-10528.en.md new file mode 100644 index 000000000..46adc5220 --- /dev/null +++ b/changes/ce/perf-10528.en.md @@ -0,0 +1 @@ +Reduce memory footprint in hot code path. diff --git a/changes/ce/perf-10591.en.md b/changes/ce/perf-10591.en.md new file mode 100644 index 000000000..2e14312d1 --- /dev/null +++ b/changes/ce/perf-10591.en.md @@ -0,0 +1,3 @@ +Improve the configuration of the limiter. +- Simplify the memory representation of the limiter configuration. +- Make sure the node-level limiter can really work when the listener's limiter configuration is omitted. diff --git a/changes/ee/feat-10378.en.md b/changes/ee/feat-10378.en.md new file mode 100644 index 000000000..ebdd299c8 --- /dev/null +++ b/changes/ee/feat-10378.en.md @@ -0,0 +1 @@ +Implement Pulsar Producer Bridge, which supports publishing messages to Pulsar from MQTT topics. diff --git a/changes/ee/feat-10408.en.md b/changes/ee/feat-10408.en.md new file mode 100644 index 000000000..75cc4b945 --- /dev/null +++ b/changes/ee/feat-10408.en.md @@ -0,0 +1 @@ +The rule engine SQL-like language has got three more built-in functions for creating values of the MongoDB date type. These functions are useful for rules with MongoDB bridge actions only and not supported in other actions. diff --git a/changes/ee/feat-10409.en.md b/changes/ee/feat-10409.en.md new file mode 100644 index 000000000..dfa9bfa76 --- /dev/null +++ b/changes/ee/feat-10409.en.md @@ -0,0 +1 @@ +Add support for [Protocol Buffers](https://protobuf.dev/) schemas in Schema Registry. diff --git a/changes/ee/feat-10425.en.md b/changes/ee/feat-10425.en.md new file mode 100644 index 000000000..7144241df --- /dev/null +++ b/changes/ee/feat-10425.en.md @@ -0,0 +1 @@ +Implement OpenTSDB data bridge. diff --git a/changes/ee/feat-10498.en.md b/changes/ee/feat-10498.en.md new file mode 100644 index 000000000..7222f8957 --- /dev/null +++ b/changes/ee/feat-10498.en.md @@ -0,0 +1 @@ +Implement Oracle Database Bridge, which supports publishing messages to Oracle Database from MQTT topics. diff --git a/changes/ee/feat-10560.en.md b/changes/ee/feat-10560.en.md new file mode 100644 index 000000000..c5bc59d69 --- /dev/null +++ b/changes/ee/feat-10560.en.md @@ -0,0 +1 @@ +Add enterprise data bridge for Apache IoTDB. diff --git a/changes/v5.0.23.en.md b/changes/v5.0.23.en.md new file mode 100644 index 000000000..6d016d2da --- /dev/null +++ b/changes/v5.0.23.en.md @@ -0,0 +1,62 @@ +# v5.0.23 + +## Enhancements + +- [#10156](https://github.com/emqx/emqx/pull/10156) Change the priority of the configuration: + 1. If it is a new installation of EMQX, the priority of configuration is `ENV > emqx.conf > HTTP API`. + 2. If EMQX is upgraded from an old version (i.e., the cluster-override.conf file still exists in EMQX's data directory), then the configuration priority remains the same as before. That is, `HTTP API > ENV > emqx.conf`. + + Deprecated data/configs/local-override.conf. + + Stabilizing the HTTP API for hot updates. + +- [#10354](https://github.com/emqx/emqx/pull/10354) More specific error messages when configure with bad max_heap_size value. + Log current value and the max value when the `message_queue_too_long` error is thrown. + +- [#10359](https://github.com/emqx/emqx/pull/10359) Metrics now are not implicitly collected in places where API handlers don't make any use of them. Instead, a separate backplane RPC gathers cluster-wide metrics. + +- [#10373](https://github.com/emqx/emqx/pull/10373) Deprecate the trace.payload_encode configuration. + Add payload_encode=[text,hidden,hex] option when creating a trace via HTTP API. + +- [#10389](https://github.com/emqx/emqx/pull/10389) Unify the config formats for `cluster.core_nodes` and `cluster.statics.seeds`. + Now they both support formats in array `["emqx1@127.0.0.1", "emqx2@127.0.0.1"]` or semicolon-separated string `"emqx1@127.0.0.1,emqx2@127.0.0.1"`. + +- [#10391](https://github.com/emqx/emqx/pull/10391) Hide a large number of advanced options to simplify the configuration file. + + That includes `rewrite`, `topic_metric`, `persistent_session_store`, `overload_protection`, + `flapping_detect`, `conn_congestion`, `stats,auto_subscribe`, `broker_perf`, + `shared_subscription_group`, `slow_subs`, `ssl_options.user_lookup_fun` and some advance items + in `node` and `dashboard` section, [#10358](https://github.com/emqx/emqx/pull/10358), + [#10381](https://github.com/emqx/emqx/pull/10381), [#10385](https://github.com/emqx/emqx/pull/10385). + +- [#10392](https://github.com/emqx/emqx/pull/10392) A new function to convert a formatted date to an integer timestamp has been added: date_to_unix_ts/3 + +- [#10404](https://github.com/emqx/emqx/pull/10404) Change the default queue mode for buffer workers to `memory_only`. + Before this change, the default queue mode was `volatile_offload`. When under high message rate pressure and when the resource is not keeping up with such rate, the buffer performance degraded a lot due to the constant disk operations. + +- [#10426](https://github.com/emqx/emqx/pull/10426) Optimize the configuration priority mechanism to fix the issue where the configuration + changes made to `etc/emqx.conf` do not take effect after restarting EMQX. + + More introduction about the new mechanism: [Configure Override Rules](https://www.emqx.io/docs/en/v5.0/configuration/configuration.html#configure-override-rules) + +- [#10376](https://github.com/emqx/emqx/pull/10376) Simplify the configuration of the limiter feature and optimize some codes + - Rename `message_in` to `messages` + - Rename `bytes_in` to `bytes` + - Use `burst` instead of `capacity` + - Hide non-importance fields + - Optimize limiter instances in different rate settings + +- [#10430](https://github.com/emqx/emqx/pull/10430) Simplify the configuration of the `retainer` feature. + - Mark `flow_control` as non-importance field. + +## Bug Fixes + +- [#10369](https://github.com/emqx/emqx/pull/10369) Fix error in `/api/v5/monitor_current` API endpoint that happens when some EMQX nodes are down. + + Prior to this fix, sometimes the request returned HTTP code 500 and the following message: + ``` + {"code":"INTERNAL_ERROR","message":"error, badarg, [{erlang,'++',[{error,nodedown},[{node,'emqx@10.42.0.150'}]], ... + ``` + +- [#10410](https://github.com/emqx/emqx/pull/10410) Fix config check failed when gateways are configured in emqx.conf. + This issue was first introduced in v5.0.22 via [#10278](https://github.com/emqx/emqx/pull/10278), the boot-time config check was missing. diff --git a/changes/v5.0.24.en.md b/changes/v5.0.24.en.md new file mode 100644 index 000000000..4fa5cdd4f --- /dev/null +++ b/changes/v5.0.24.en.md @@ -0,0 +1,89 @@ +# v5.0.24 + +## Enhancements + +- [#10457](https://github.com/emqx/emqx/pull/10457) Deprecates the integration with StatsD. + + There seemd to be no user using StatsD integration, so we have decided to hide this feature + for now. We will either remove or revive it based on requirements in the future. + +- [#10458](https://github.com/emqx/emqx/pull/10458) Set the level of plugin configuration options to low level, + in most cases, users only need to manage plugins on the dashboard + without the need for manual modification, so we lowered the level. + +- [#10491](https://github.com/emqx/emqx/pull/10491) Rename `etcd.ssl` to `etcd.ssl_options` to keep all of SSL options consistent in the configuration file. + +- [#10512](https://github.com/emqx/emqx/pull/10512) Improved the storage format of Unicode characters in data files, + Now we can store Unicode characters normally. + For example: "SELECT * FROM \"t/1\" WHERE clientid = \"-测试专用-\"" + +- [#10487](https://github.com/emqx/emqx/pull/10487) Optimize the instance of limiter for whose rate is `infinity` to reduce memory and CPU usage. + +- [#10490](https://github.com/emqx/emqx/pull/10490) Remove the default limit of connect rate which used to be `1000/s` + +## Bug Fixes + +- [#10407](https://github.com/emqx/emqx/pull/10407) Improve 'emqx_alarm' performance by using Mnesia dirty operations and avoiding + unnecessary calls from 'emqx_resource_manager' to reactivate alarms that have been already activated. + Use new safe 'emqx_alarm' API to activate/deactivate alarms to ensure that emqx_resource_manager + doesn't crash because of alarm timeouts. + The crashes were possible when the following conditions co-occurred: + - a relatively high number of failing resources, e.g. bridges tried to activate alarms on re-occurring errors; + - the system experienced a very high load. + +- [#10420](https://github.com/emqx/emqx/pull/10420) Fix HTTP path handling when composing the URL for the HTTP requests in authentication and authorization modules. + * Avoid unnecessary URL normalization since we cannot assume that external servers treat original and normalized URLs equally. This led to bugs like [#10411](https://github.com/emqx/emqx/issues/10411). + * Fix the issue that path segments could be HTTP encoded twice. + +- [#10422](https://github.com/emqx/emqx/pull/10422) Fixed a bug where external plugins could not be configured via environment variables in a lone-node cluster. + +- [#10448](https://github.com/emqx/emqx/pull/10448) Fix a compatibility issue of limiter configuration introduced by v5.0.23 which broke the upgrade from previous versions if the `capacity` is `infinity`. + + In v5.0.23 we have replaced `capacity` with `burst`. After this fix, a `capacity = infinity` config will be automatically converted to equivalent `burst = 0`. + +- [#10449](https://github.com/emqx/emqx/pull/10449) Validate the ssl_options and header configurations when creating authentication http (`authn_http`). + Prior to this, incorrect `ssl` configuration could result in successful creation but the entire authn being unusable. + +- [#10455](https://github.com/emqx/emqx/pull/10455) Fixed an issue that could cause (otherwise harmless) noise in the logs. + + During some particularly slow synchronous calls to bridges, some late replies could be sent to connections processes that were no longer expecting a reply, and then emit an error log like: + + ``` + 2023-04-19T18:24:35.350233+00:00 [error] msg: unexpected_info, mfa: emqx_channel:handle_info/2, line: 1278, peername: 172.22.0.1:36384, clientid: caribdis_bench_sub_1137967633_4788, info: {#Ref<0.408802983.1941504010.189402>,{ok,200,[{<<"cache-control">>,<<"max-age=0, ...">>}} + ``` + + Those logs are harmless, but they could flood and worry the users without need. + +- [#10462](https://github.com/emqx/emqx/pull/10462) Deprecate config `broker.shared_dispatch_ack_enabled`. + This was designed to avoid dispatching messages to a shared-subscription session which has the client disconnected. + However since v5.0.9, this feature is no longer useful because the shared-subscrption messages in a expired session will be redispatched to other sessions in the group. + See also: https://github.com/emqx/emqx/pull/9104 + +- [#10463](https://github.com/emqx/emqx/pull/10463) Improve bridges API error handling. + If Webhook bridge URL is not valid, bridges API will return '400' error instead of '500'. + +- [#10484](https://github.com/emqx/emqx/pull/10484) Fix the issue that the priority of the configuration cannot be set during rolling upgrade. + For example, when authorization is modified in v5.0.21 and then upgraded v5.0.23 through rolling upgrade, + the authorization will be restored to the default. + +- [#10495](https://github.com/emqx/emqx/pull/10495) Add the limiter API `/configs/limiter` which was deleted by mistake back. + +- [#10500](https://github.com/emqx/emqx/pull/10500) Add several fixes, enhancements and features in Mria: + - protect `mria:join/1,2` with a global lock to prevent conflicts between + two nodes trying to join each other simultaneously + [Mria PR](https://github.com/emqx/mria/pull/137) + - implement new function `mria:sync_transaction/4,3,2`, which blocks the caller until + a transaction is imported to the local node (if the local node is a replicant, otherwise, + it behaves exactly the same as `mria:transaction/3,2`) + [Mria PR](https://github.com/emqx/mria/pull/136) + - optimize `mria:running_nodes/0` + [Mria PR](https://github.com/emqx/mria/pull/135) + - optimize `mria:ro_transaction/2` when called on a replicant node + [Mria PR](https://github.com/emqx/mria/pull/134). + +- [#10518](https://github.com/emqx/emqx/pull/10518) Add the following fixes and features in Mria: + - call `mria_rlog:role/1` safely in mria_membership to ensure that mria_membership + gen_server won't crash if RPC to another node fails + [Mria PR](https://github.com/emqx/mria/pull/139) + - Add extra field to ?rlog_sync table to facilitate extending this functionality in future + [Mria PR](https://github.com/emqx/mria/pull/138). diff --git a/deploy/charts/emqx/Chart.yaml b/deploy/charts/emqx/Chart.yaml index ecc211e35..9c23f7c15 100644 --- a/deploy/charts/emqx/Chart.yaml +++ b/deploy/charts/emqx/Chart.yaml @@ -14,8 +14,8 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 5.0.22 +version: 5.0.24 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 5.0.22 +appVersion: 5.0.24 diff --git a/deploy/docker/Dockerfile b/deploy/docker/Dockerfile index 8f04c433c..75f5675f6 100644 --- a/deploy/docker/Dockerfile +++ b/deploy/docker/Dockerfile @@ -19,6 +19,7 @@ RUN export PROFILE=${EMQX_NAME%%-elixir} \ && mv $EMQX_REL_PATH /emqx-rel FROM $RUN_FROM +ARG EXTRA_DEPS='' # Elixir complains if runs without UTF-8 ENV LC_ALL=C.UTF-8 @@ -30,7 +31,7 @@ COPY --from=builder /emqx-rel/emqx /opt/emqx RUN ln -s /opt/emqx/bin/* /usr/local/bin/ RUN apt-get update; \ - apt-get install -y --no-install-recommends ca-certificates procps; \ + apt-get install -y --no-install-recommends ca-certificates procps $(echo "${EXTRA_DEPS}" | tr ',' ' '); \ rm -rf /var/lib/apt/lists/* WORKDIR /opt/emqx diff --git a/deploy/packages/emqx.service b/deploy/packages/emqx.service index 2dbe550bc..ce342b632 100644 --- a/deploy/packages/emqx.service +++ b/deploy/packages/emqx.service @@ -22,8 +22,10 @@ LimitNOFILE=1048576 # ExecStop is commented out so systemd will send a SIGTERM when 'systemctl stop'. # SIGTERM is handled by EMQX and it then performs a graceful shutdown -# It's better than command 'emqx stop' because it needs to ping the node -# ExecStop=/bin/bash /usr/bin/emqx stop +# emqx stop will ping node, always return 0 to make sure next command will be executed +ExecStop=/bin/bash -c '/usr/bin/emqx stop; exit 0' +# If the process is still running, force kill it +ExecStop=/bin/bash -c 'if [ ps -p $MAINPID >/dev/null 2>&1 ]; then kill -15 $MAINPID; fi' # Wait long enough before force kill for graceful shutdown TimeoutStopSec=120s diff --git a/dev b/dev new file mode 100755 index 000000000..18e4cbc93 --- /dev/null +++ b/dev @@ -0,0 +1,405 @@ +#!/usr/bin/env bash + +set -euo pipefail + +UNAME="$(uname -s)" + +PROJ_ROOT="$(git rev-parse --show-toplevel)" +cd "$PROJ_ROOT" + +logerr() { + if [ "${TERM:-dumb}" = dumb ]; then + echo -e "ERROR: $*" 1>&2 + else + echo -e "$(tput setaf 1)ERROR: $*$(tput sgr0)" 1>&2 + fi +} + +usage() { +cat <.config and vm.