diff --git a/.ci/docker-compose-file/.env b/.ci/docker-compose-file/.env index 1d602e5bd..12bc988bf 100644 --- a/.ci/docker-compose-file/.env +++ b/.ci/docker-compose-file/.env @@ -8,6 +8,7 @@ TDENGINE_TAG=3.0.2.4 DYNAMO_TAG=1.21.0 CASSANDRA_TAG=3.11.6 MINIO_TAG=RELEASE.2023-03-20T20-16-18Z +OPENTS_TAG=9aa7f88 MS_IMAGE_ADDR=mcr.microsoft.com/mssql/server SQLSERVER_TAG=2019-CU19-ubuntu-20.04 diff --git a/.ci/docker-compose-file/docker-compose-opents.yaml b/.ci/docker-compose-file/docker-compose-opents.yaml new file mode 100644 index 000000000..545aeb015 --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-opents.yaml @@ -0,0 +1,9 @@ +version: '3.9' + +services: + opents_server: + container_name: opents + image: petergrace/opentsdb-docker:${OPENTS_TAG} + restart: always + networks: + - emqx_bridge diff --git a/.ci/docker-compose-file/docker-compose-oracle.yaml b/.ci/docker-compose-file/docker-compose-oracle.yaml new file mode 100644 index 000000000..ea8965846 --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-oracle.yaml @@ -0,0 +1,11 @@ +version: '3.9' + +services: + oracle_server: + container_name: oracle + image: oracleinanutshell/oracle-xe-11g:1.0.0 + restart: always + environment: + ORACLE_DISABLE_ASYNCH_IO: true + networks: + - emqx_bridge diff --git a/.ci/docker-compose-file/docker-compose-pulsar.yaml b/.ci/docker-compose-file/docker-compose-pulsar.yaml new file mode 100644 index 000000000..926000ae4 --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-pulsar.yaml @@ -0,0 +1,32 @@ +version: '3' + +services: + pulsar: + container_name: pulsar + image: apachepulsar/pulsar:2.11.0 + # ports: + # - 6650:6650 + # - 8080:8080 + networks: + emqx_bridge: + volumes: + - ../../apps/emqx/etc/certs/cert.pem:/etc/certs/server.pem + - ../../apps/emqx/etc/certs/key.pem:/etc/certs/key.pem + - ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.pem + restart: always + command: + - bash + - "-c" + - | + sed -i 's/^advertisedAddress=/#advertisedAddress=/' conf/standalone.conf + sed -ie 's/^brokerServicePort=.*/brokerServicePort=6649/' conf/standalone.conf + sed -i 's/^bindAddress=/#bindAddress=/' conf/standalone.conf + sed -i 's#^bindAddresses=#bindAddresses=plain:pulsar://0.0.0.0:6650,ssl:pulsar+ssl://0.0.0.0:6651,toxiproxy:pulsar://0.0.0.0:6652,toxiproxy_ssl:pulsar+ssl://0.0.0.0:6653#' conf/standalone.conf + sed -i 's#^advertisedAddress=#advertisedAddress=plain:pulsar://pulsar:6650,ssl:pulsar+ssl://pulsar:6651,toxiproxy:pulsar://toxiproxy:6652,toxiproxy_ssl:pulsar+ssl://toxiproxy:6653#' conf/standalone.conf + sed -i 's#^tlsCertificateFilePath=#tlsCertificateFilePath=/etc/certs/server.pem#' conf/standalone.conf + sed -i 's#^tlsTrustCertsFilePath=#tlsTrustCertsFilePath=/etc/certs/ca.pem#' conf/standalone.conf + sed -i 's#^tlsKeyFilePath=#tlsKeyFilePath=/etc/certs/key.pem#' conf/standalone.conf + sed -i 's#^tlsProtocols=#tlsProtocols=TLSv1.3,TLSv1.2#' conf/standalone.conf + sed -i 's#^tlsCiphers=#tlsCiphers=TLS_AES_256_GCM_SHA384#' conf/standalone.conf + echo 'advertisedListeners=plain:pulsar://pulsar:6650,ssl:pulsar+ssl://pulsar:6651,toxiproxy:pulsar://toxiproxy:6652,toxiproxy_ssl:pulsar+ssl://toxiproxy:6653' >> conf/standalone.conf + bin/pulsar standalone -nfw -nss diff --git a/.ci/docker-compose-file/docker-compose-toxiproxy.yaml b/.ci/docker-compose-file/docker-compose-toxiproxy.yaml index ea5c099dc..f1d750dc5 100644 --- a/.ci/docker-compose-file/docker-compose-toxiproxy.yaml +++ b/.ci/docker-compose-file/docker-compose-toxiproxy.yaml @@ -43,6 +43,8 @@ services: - 19000:19000 # S3 TLS - 19100:19100 + # IOTDB + - 14242:4242 command: - "-host=0.0.0.0" - "-config=/config/toxiproxy.json" diff --git a/.ci/docker-compose-file/scripts/run-emqx.sh b/.ci/docker-compose-file/scripts/run-emqx.sh index 8f124aa63..fc61c9da1 100755 --- a/.ci/docker-compose-file/scripts/run-emqx.sh +++ b/.ci/docker-compose-file/scripts/run-emqx.sh @@ -20,8 +20,8 @@ esac { echo "HOCON_ENV_OVERRIDE_PREFIX=EMQX_" - echo "EMQX_ZONES__DEFAULT__MQTT__RETRY_INTERVAL=2s" - echo "EMQX_ZONES__DEFAULT__MQTT__MAX_TOPIC_ALIAS=10" + echo "EMQX_MQTT__RETRY_INTERVAL=2s" + echo "EMQX_MQTT__MAX_TOPIC_ALIAS=10" echo "EMQX_AUTHORIZATION__SOURCES=[]" echo "EMQX_AUTHORIZATION__NO_MATCH=allow" } >> .ci/docker-compose-file/conf.cluster.env diff --git a/.ci/docker-compose-file/toxiproxy.json b/.ci/docker-compose-file/toxiproxy.json index 3934df0df..a5955e5e8 100644 --- a/.ci/docker-compose-file/toxiproxy.json +++ b/.ci/docker-compose-file/toxiproxy.json @@ -102,6 +102,30 @@ "upstream": "sqlserver:1433", "enabled": true }, + { + "name": "opents", + "listen": "0.0.0.0:4242", + "upstream": "opents:4242", + "enabled": true + }, + { + "name": "pulsar_plain", + "listen": "0.0.0.0:6652", + "upstream": "pulsar:6652", + "enabled": true + }, + { + "name": "pulsar_tls", + "listen": "0.0.0.0:6653", + "upstream": "pulsar:6653", + "enabled": true + }, + { + "name": "oracle", + "listen": "0.0.0.0:1521", + "upstream": "oracle:1521", + "enabled": true + }, { "name": "minio_tcp", "listen": "0.0.0.0:19000", diff --git a/.github/workflows/build_packages.yaml b/.github/workflows/build_packages.yaml index 60b4c1b84..dc34146bb 100644 --- a/.github/workflows/build_packages.yaml +++ b/.github/workflows/build_packages.yaml @@ -5,8 +5,6 @@ concurrency: cancel-in-progress: true on: - schedule: - - cron: '0 */6 * * *' push: branches: - 'ci/**' @@ -23,7 +21,6 @@ on: jobs: prepare: runs-on: ubuntu-22.04 - if: (github.repository_owner == 'emqx' && github.event_name == 'schedule') || github.event_name != 'schedule' container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-24.3.4.2-3-ubuntu22.04 outputs: BUILD_PROFILE: ${{ steps.get_profile.outputs.BUILD_PROFILE }} @@ -134,14 +131,6 @@ jobs: with: name: ${{ matrix.profile }} path: source/_packages/${{ matrix.profile }}/ - - name: Send notification to Slack - uses: slackapi/slack-github-action@v1.23.0 - if: failure() && github.event_name == 'schedule' - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - with: - payload: | - {"text": "Scheduled run of ${{ github.workflow }}@Windows failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"} mac: needs: prepare @@ -182,14 +171,6 @@ jobs: with: name: ${{ matrix.profile }} path: _packages/${{ matrix.profile }}/ - - name: Send notification to Slack - uses: slackapi/slack-github-action@v1.23.0 - if: failure() && github.event_name == 'schedule' - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - with: - payload: | - {"text": "Scheduled run of ${{ github.workflow }}@${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"} linux: needs: prepare @@ -304,19 +285,11 @@ jobs: with: name: ${{ matrix.profile }} path: source/_packages/${{ matrix.profile }}/ - - name: Send notification to Slack - uses: slackapi/slack-github-action@v1.23.0 - if: failure() && github.event_name == 'schedule' - env: - SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} - with: - payload: | - {"text": "Scheduled run of ${{ github.workflow }}@${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"} publish_artifacts: runs-on: ubuntu-22.04 needs: [prepare, mac, linux] - if: needs.prepare.outputs.IS_EXACT_TAG && github.event_name != 'schedule' + if: needs.prepare.outputs.IS_EXACT_TAG strategy: fail-fast: false matrix: diff --git a/.github/workflows/build_packages_cron.yaml b/.github/workflows/build_packages_cron.yaml new file mode 100644 index 000000000..a90187221 --- /dev/null +++ b/.github/workflows/build_packages_cron.yaml @@ -0,0 +1,153 @@ +name: Scheduled build packages + +concurrency: + group: build-${{ github.event_name }}-${{ github.ref }} + cancel-in-progress: true + +on: + schedule: + - cron: '0 */6 * * *' + +jobs: + prepare: + runs-on: aws-amd64 + if: github.repository_owner == 'emqx' + container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-24.3.4.2-3-ubuntu22.04 + strategy: + fail-fast: false + matrix: + profile: + - ['emqx', 'master'] + - ['emqx-enterprise', 'release-50'] + + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ matrix.profile[1] }} + path: source + fetch-depth: 0 + - name: get_all_deps + run: | + make -C source deps-all + zip -ryq source.zip source/* source/.[^.]* + - uses: actions/upload-artifact@v3 + with: + name: source-${{ matrix.profile[0] }} + path: source.zip + + linux: + needs: prepare + runs-on: aws-${{ matrix.arch }} + + strategy: + fail-fast: false + matrix: + profile: + - emqx + - emqx-enterprise + otp: + - 24.3.4.2-3 + arch: + - amd64 + os: + - debian10 + - amzn2 + builder: + - 5.0-34 + elixir: + - 1.13.4 + + defaults: + run: + shell: bash + + steps: + - uses: AutoModality/action-clean@v1 + - uses: actions/download-artifact@v3 + with: + name: source-${{ matrix.profile }} + path: . + - name: unzip source code + run: unzip -q source.zip + - name: build emqx packages + working-directory: source + env: + BUILDER: ${{ matrix.builder }} + ELIXIR: ${{ matrix.elixir }} + OTP: ${{ matrix.otp }} + PROFILE: ${{ matrix.profile[0] }} + ARCH: ${{ matrix.arch }} + OS: ${{ matrix.os }} + run: | + set -eu + PKGTYPES="tgz pkg" + IS_ELIXIR="no" + for PKGTYPE in ${PKGTYPES}; + do + ./scripts/buildx.sh \ + --profile "${PROFILE}" \ + --pkgtype "${PKGTYPE}" \ + --arch "${ARCH}" \ + --elixir "${IS_ELIXIR}" \ + --builder "ghcr.io/emqx/emqx-builder/${BUILDER}:${ELIXIR}-${OTP}-${OS} + done + - uses: actions/upload-artifact@v3 + if: success() + with: + name: ${{ matrix.profile }} + path: source/_packages/${{ matrix.profile }}/ + - name: Send notification to Slack + uses: slackapi/slack-github-action@v1.23.0 + if: failure() + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + with: + payload: | + {"text": "Scheduled build of ${{ matrix.profile }} package for ${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"} + + mac: + needs: prepare + strategy: + fail-fast: false + matrix: + profile: + - emqx + otp: + - 24.3.4.2-3 + os: + - macos-12 + - macos-12-arm64 + runs-on: ${{ matrix.os }} + steps: + - uses: emqx/self-hosted-cleanup-action@v1.0.3 + - uses: actions/download-artifact@v3 + with: + name: source-${{ matrix.profile }} + path: . + - name: unzip source code + run: | + ln -s . source + unzip -o -q source.zip + rm source source.zip + - uses: ./.github/actions/package-macos + with: + profile: ${{ matrix.profile }} + otp: ${{ matrix.otp }} + os: ${{ matrix.os }} + apple_id_password: ${{ secrets.APPLE_ID_PASSWORD }} + apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }} + apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }} + apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }} + - uses: actions/upload-artifact@v3 + if: success() + with: + name: ${{ matrix.profile }} + path: _packages/${{ matrix.profile }}/ + - name: Send notification to Slack + uses: slackapi/slack-github-action@v1.23.0 + if: failure() + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + with: + payload: | + {"text": "Scheduled build of ${{ matrix.profile }} package for ${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"} diff --git a/.github/workflows/build_slim_packages.yaml b/.github/workflows/build_slim_packages.yaml index 9ae5ba944..06bcb98a2 100644 --- a/.github/workflows/build_slim_packages.yaml +++ b/.github/workflows/build_slim_packages.yaml @@ -194,15 +194,12 @@ jobs: run: | CID=$(docker run -d --rm -P $EMQX_IMAGE_TAG) HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' $CID) - export EMQX_SMOKE_TEST_CHECK_HIDDEN_FIELDS='yes' ./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT docker stop $CID - name: test two nodes cluster with proto_dist=inet_tls in docker run: | ./scripts/test/start-two-nodes-in-docker.sh -P $EMQX_IMAGE_TAG $EMQX_IMAGE_OLD_VERSION_TAG HTTP_PORT=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "18083/tcp") 0).HostPort}}' haproxy) - # versions before 5.0.22 have hidden fields included in the API spec - export EMQX_SMOKE_TEST_CHECK_HIDDEN_FIELDS='no' ./scripts/test/emqx-smoke-test.sh localhost $HTTP_PORT # cleanup ./scripts/test/start-two-nodes-in-docker.sh -c diff --git a/.github/workflows/performance_test.yaml b/.github/workflows/performance_test.yaml new file mode 100644 index 000000000..00ba15ed0 --- /dev/null +++ b/.github/workflows/performance_test.yaml @@ -0,0 +1,127 @@ +name: Performance Test Suite + +on: + push: + branches: + - 'perf/**' + schedule: + - cron: '0 1 * * *' + workflow_dispatch: + inputs: + ref: + required: false + +jobs: + prepare: + runs-on: ubuntu-latest + container: ghcr.io/emqx/emqx-builder/5.0-34:1.13.4-25.1.2-3-ubuntu20.04 + outputs: + BENCH_ID: ${{ steps.prepare.outputs.BENCH_ID }} + PACKAGE_FILE: ${{ steps.package_file.outputs.PACKAGE_FILE }} + + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + ref: ${{ github.event.inputs.ref }} + - name: Work around https://github.com/actions/checkout/issues/766 + run: | + git config --global --add safe.directory "$GITHUB_WORKSPACE" + - id: prepare + run: | + echo "EMQX_NAME=emqx" >> $GITHUB_ENV + echo "CODE_PATH=$GITHUB_WORKSPACE" >> $GITHUB_ENV + echo "BENCH_ID=$(date --utc +%F)/emqx-$(./pkg-vsn.sh emqx)" >> $GITHUB_OUTPUT + - name: Build deb package + run: | + make ${EMQX_NAME}-pkg + ./scripts/pkg-tests.sh ${EMQX_NAME}-pkg + - name: Get package file name + id: package_file + run: | + echo "PACKAGE_FILE=$(find _packages/emqx -name 'emqx-*.deb' | head -n 1 | xargs basename)" >> $GITHUB_OUTPUT + - uses: actions/upload-artifact@v3 + with: + name: emqx-ubuntu20.04 + path: _packages/emqx/${{ steps.package_file.outputs.PACKAGE_FILE }} + + tf_emqx_perf_test: + runs-on: ubuntu-latest + needs: + - prepare + env: + TF_VAR_bench_id: ${{ needs.prepare.outputs.BENCH_ID }} + TF_VAR_package_file: ${{ needs.prepare.outputs.PACKAGE_FILE }} + TF_VAR_test_duration: 300 + TF_VAR_grafana_api_key: ${{ secrets.TF_EMQX_PERF_TEST_GRAFANA_API_KEY }} + TF_AWS_REGION: eu-north-1 + + steps: + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_PERF_TEST }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_PERF_TEST }} + aws-region: eu-north-1 + - name: Checkout tf-emqx-performance-test + uses: actions/checkout@v3 + with: + repository: emqx/tf-emqx-performance-test + path: tf-emqx-performance-test + - uses: actions/download-artifact@v3 + with: + name: emqx-ubuntu20.04 + path: tf-emqx-performance-test/ + - name: Setup Terraform + uses: hashicorp/setup-terraform@v2 + with: + terraform_wrapper: false + - name: terraform init + working-directory: ./tf-emqx-performance-test + run: | + terraform init + - name: terraform apply + working-directory: ./tf-emqx-performance-test + run: | + terraform apply -auto-approve + - name: Wait for test results + timeout-minutes: 30 + working-directory: ./tf-emqx-performance-test + id: test-results + run: | + sleep $TF_VAR_test_duration + until aws s3api head-object --bucket tf-emqx-performance-test --key "$TF_VAR_bench_id/DONE" > /dev/null 2>&1 + do + printf '.' + sleep 10 + done + echo + aws s3 cp "s3://tf-emqx-performance-test/$TF_VAR_bench_id/metrics.json" ./ + aws s3 cp "s3://tf-emqx-performance-test/$TF_VAR_bench_id/stats.json" ./ + echo MESSAGES_DELIVERED=$(cat metrics.json | jq '[.[]."messages.delivered"] | add') >> $GITHUB_OUTPUT + echo MESSAGES_DROPPED=$(cat metrics.json | jq '[.[]."messages.dropped"] | add') >> $GITHUB_OUTPUT + - name: Send notification to Slack + if: success() + uses: slackapi/slack-github-action@v1.23.0 + env: + SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }} + with: + payload: | + {"text": "EMQX performance test completed.\nMessages delivered: ${{ steps.test-results.outputs.MESSAGES_DELIVERED }}.\nMessages dropped: ${{ steps.test-results.outputs.MESSAGES_DROPPED }}.\nhttps://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"} + - name: terraform destroy + if: always() + working-directory: ./tf-emqx-performance-test + run: | + terraform destroy -auto-approve + - uses: actions/upload-artifact@v3 + if: success() + with: + name: test-results + path: "./tf-emqx-performance-test/*.json" + - uses: actions/upload-artifact@v3 + if: always() + with: + name: terraform + path: | + ./tf-emqx-performance-test/.terraform + ./tf-emqx-performance-test/*.tfstate diff --git a/.github/workflows/run_fvt_tests.yaml b/.github/workflows/run_fvt_tests.yaml index 1bb0486f1..185c76be1 100644 --- a/.github/workflows/run_fvt_tests.yaml +++ b/.github/workflows/run_fvt_tests.yaml @@ -167,8 +167,8 @@ jobs: --set image.pullPolicy=Never \ --set image.tag=$EMQX_TAG \ --set emqxAclConfig="" \ - --set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__RETRY_INTERVAL=2s \ - --set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__MAX_TOPIC_ALIAS=10 \ + --set emqxConfig.EMQX_MQTT__RETRY_INTERVAL=2s \ + --set emqxConfig.EMQX_MQTT__MAX_TOPIC_ALIAS=10 \ --set emqxConfig.EMQX_AUTHORIZATION__SOURCES=[] \ --set emqxConfig.EMQX_AUTHORIZATION__NO_MATCH=allow \ deploy/charts/${{ matrix.profile }} \ @@ -185,8 +185,8 @@ jobs: --set image.pullPolicy=Never \ --set image.tag=$EMQX_TAG \ --set emqxAclConfig="" \ - --set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__RETRY_INTERVAL=2s \ - --set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__MAX_TOPIC_ALIAS=10 \ + --set emqxConfig.EMQX_MQTT__RETRY_INTERVAL=2s \ + --set emqxConfig.EMQX_MQTT__MAX_TOPIC_ALIAS=10 \ --set emqxConfig.EMQX_AUTHORIZATION__SOURCES=[] \ --set emqxConfig.EMQX_AUTHORIZATION__NO_MATCH=allow \ deploy/charts/${{ matrix.profile }} \ diff --git a/.github/workflows/run_test_cases.yaml b/.github/workflows/run_test_cases.yaml index 39c755053..c28ebc0bc 100644 --- a/.github/workflows/run_test_cases.yaml +++ b/.github/workflows/run_test_cases.yaml @@ -14,6 +14,9 @@ on: - e* pull_request: +env: + IS_CI: "yes" + jobs: build-matrix: runs-on: ubuntu-22.04 @@ -69,21 +72,14 @@ jobs: - uses: actions/checkout@v3 with: path: source - - uses: actions/cache@v3 - id: cache - with: - path: "$HOME/.cache/rebar3/rebar3_${{ matrix.otp }}_plt" - key: rebar3-dialyzer-plt-${{ matrix.otp }} - name: get_all_deps working-directory: source env: PROFILE: ${{ matrix.profile }} - #DIAGNOSTIC: 1 run: | make ensure-rebar3 # fetch all deps and compile - make ${{ matrix.profile }} - make static_checks + make ${{ matrix.profile }}-compile make test-compile cd .. zip -ryq source.zip source/* source/.[^.]* @@ -92,6 +88,34 @@ jobs: name: source-${{ matrix.profile }}-${{ matrix.otp }} path: source.zip + static_checks: + needs: + - build-matrix + - prepare + runs-on: ${{ needs.build-matrix.outputs.runs-on }} + strategy: + fail-fast: false + matrix: + include: ${{ fromJson(needs.build-matrix.outputs.prepare) }} + container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu22.04" + steps: + - uses: AutoModality/action-clean@v1 + - uses: actions/download-artifact@v3 + with: + name: source-${{ matrix.profile }}-${{ matrix.otp }} + path: . + - name: unzip source code + run: unzip -o -q source.zip + - uses: actions/cache@v3 + with: + path: "source/emqx_dialyzer_${{ matrix.otp }}_plt" + key: rebar3-dialyzer-plt-${{ matrix.profile }}-${{ matrix.otp }} + - name: run static checks + env: + PROFILE: ${{ matrix.profile }} + working-directory: source + run: make static_checks + eunit_and_proper: needs: - build-matrix @@ -168,6 +192,7 @@ jobs: REDIS_TAG: "7.0" INFLUXDB_TAG: "2.5.0" TDENGINE_TAG: "3.0.2.4" + OPENTS_TAG: "9aa7f88" MINIO_TAG: "RELEASE.2023-03-20T20-16-18Z" PROFILE: ${{ matrix.profile }} CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }} diff --git a/.gitignore b/.gitignore index 62e8ddc81..ceb12182f 100644 --- a/.gitignore +++ b/.gitignore @@ -43,8 +43,7 @@ tmp/ _packages elvis emqx_dialyzer_*_plt -*/emqx_dashboard/priv/www -*/emqx_dashboard/priv/i18n.conf +*/emqx_dashboard/priv/ dist.zip scripts/git-token apps/*/etc/*.all @@ -71,3 +70,5 @@ apps/emqx/test/emqx_static_checks_data/master.bpapi lux_logs/ /.prepare bom.json +ct_run*/ +apps/emqx_conf/etc/emqx.conf.all.rendered* diff --git a/LICENSE b/LICENSE index 2a081b135..8ff0a9060 100644 --- a/LICENSE +++ b/LICENSE @@ -1,7 +1,7 @@ Source code in this repository is variously licensed under below licenses. -For EMQX: Apache License 2.0, see APL.txt, -which applies to all source files except for lib-ee sub-directory. +For Default: Apache License 2.0, see APL.txt, +which applies to all source files except for folders applied with Business Source License. For EMQX Enterprise (since version 5.0): Business Source License 1.1, -see lib-ee/BSL.txt, which applies to source code in lib-ee sub-directory. +see apps/emqx_bridge_kafka/BSL.txt as an example, please check license files under sub directory of apps. diff --git a/Makefile b/Makefile index 10e6d1424..d5046fe64 100644 --- a/Makefile +++ b/Makefile @@ -7,7 +7,8 @@ export EMQX_DEFAULT_RUNNER = debian:11-slim export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh) export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh) export EMQX_DASHBOARD_VERSION ?= v1.2.3 -export EMQX_EE_DASHBOARD_VERSION ?= e1.0.6-beta.1 +export EMQX_EE_DASHBOARD_VERSION ?= e1.0.6-beta.2 + export EMQX_REL_FORM ?= tgz export QUICER_DOWNLOAD_FROM_RELEASE = 1 ifeq ($(OS),Windows_NT) @@ -73,6 +74,10 @@ proper: $(REBAR) test-compile: $(REBAR) merge-config $(REBAR) as test compile +.PHONY: $(REL_PROFILES:%=%-compile) +$(REL_PROFILES:%=%-compile): $(REBAR) merge-config + $(REBAR) as $(@:%-compile=%) compile + .PHONY: ct ct: $(REBAR) merge-config @ENABLE_COVER_COMPILE=1 $(REBAR) ct --name $(CT_NODE_NAME) -c -v --cover_export_name $(CT_COVER_EXPORT_PREFIX)-ct @@ -88,10 +93,9 @@ APPS=$(shell $(SCRIPTS)/find-apps.sh) .PHONY: $(APPS:%=%-ct) define gen-app-ct-target -$1-ct: $(REBAR) +$1-ct: $(REBAR) merge-config $(eval SUITES := $(shell $(SCRIPTS)/find-suites.sh $1)) ifneq ($(SUITES),) - @$(SCRIPTS)/pre-compile.sh $(PROFILE) @ENABLE_COVER_COMPILE=1 $(REBAR) ct -c -v \ --readable=$(CT_READABLE) \ --name $(CT_NODE_NAME) \ @@ -139,6 +143,11 @@ COMMON_DEPS := $(REBAR) $(REL_PROFILES:%=%): $(COMMON_DEPS) @$(BUILD) $(@) rel +.PHONY: compile $(PROFILES:%=compile-%) +compile: $(PROFILES:%=compile-%) +$(PROFILES:%=compile-%): + @$(BUILD) $(@:compile-%=%) apps + ## Not calling rebar3 clean because ## 1. rebar3 clean relies on rebar3, meaning it reads config, fetches dependencies etc. ## 2. it's slow @@ -222,11 +231,11 @@ endef $(foreach pt,$(PKG_PROFILES),$(eval $(call gen-pkg-target,$(pt)))) .PHONY: run -run: $(PROFILE) quickrun +run: compile-$(PROFILE) quickrun .PHONY: quickrun quickrun: - ./_build/$(PROFILE)/rel/emqx/bin/emqx console + ./dev -p $(PROFILE) ## Take the currently set PROFILE docker: diff --git a/apps/emqx/etc/emqx.conf b/apps/emqx/etc/emqx.conf index ee345e9d6..e69de29bb 100644 --- a/apps/emqx/etc/emqx.conf +++ b/apps/emqx/etc/emqx.conf @@ -1,43 +0,0 @@ -listeners.tcp.default { - bind = "0.0.0.0:1883" - max_connections = 1024000 -} - -listeners.ssl.default { - bind = "0.0.0.0:8883" - max_connections = 512000 - ssl_options { - keyfile = "{{ platform_etc_dir }}/certs/key.pem" - certfile = "{{ platform_etc_dir }}/certs/cert.pem" - cacertfile = "{{ platform_etc_dir }}/certs/cacert.pem" - } -} - -listeners.ws.default { - bind = "0.0.0.0:8083" - max_connections = 1024000 - websocket.mqtt_path = "/mqtt" -} - -listeners.wss.default { - bind = "0.0.0.0:8084" - max_connections = 512000 - websocket.mqtt_path = "/mqtt" - ssl_options { - keyfile = "{{ platform_etc_dir }}/certs/key.pem" - certfile = "{{ platform_etc_dir }}/certs/cert.pem" - cacertfile = "{{ platform_etc_dir }}/certs/cacert.pem" - } -} - -# listeners.quic.default { -# enabled = true -# bind = "0.0.0.0:14567" -# max_connections = 1024000 -# ssl_options { -# verify = verify_none -# keyfile = "{{ platform_etc_dir }}/certs/key.pem" -# certfile = "{{ platform_etc_dir }}/certs/cert.pem" -# cacertfile = "{{ platform_etc_dir }}/certs/cacert.pem" -# } -# } diff --git a/apps/emqx/include/emqx_release.hrl b/apps/emqx/include/emqx_release.hrl index ec3883c77..2f44d2e1a 100644 --- a/apps/emqx/include/emqx_release.hrl +++ b/apps/emqx/include/emqx_release.hrl @@ -32,10 +32,10 @@ %% `apps/emqx/src/bpapi/README.md' %% Community edition --define(EMQX_RELEASE_CE, "5.0.23"). +-define(EMQX_RELEASE_CE, "5.0.24"). %% Enterprise edition --define(EMQX_RELEASE_EE, "5.0.3-alpha.1"). +-define(EMQX_RELEASE_EE, "5.0.3-alpha.5"). %% the HTTP API version -define(EMQX_API_VERSION, "5.0"). diff --git a/apps/emqx/include/emqx_schema.hrl b/apps/emqx/include/emqx_schema.hrl new file mode 100644 index 000000000..307bb20c5 --- /dev/null +++ b/apps/emqx/include/emqx_schema.hrl @@ -0,0 +1,23 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- +-ifndef(EMQX_SCHEMA_HRL). +-define(EMQX_SCHEMA_HRL, true). + +-define(TOMBSTONE_TYPE, marked_for_deletion). +-define(TOMBSTONE_VALUE, <<"marked_for_deletion">>). +-define(TOMBSTONE_CONFIG_CHANGE_REQ, mark_it_for_deletion). + +-endif. diff --git a/apps/emqx/rebar.config b/apps/emqx/rebar.config index 6788b4f40..18119607e 100644 --- a/apps/emqx/rebar.config +++ b/apps/emqx/rebar.config @@ -27,13 +27,13 @@ {gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}}, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}}, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.6"}}}, - {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.14.6"}}}, + {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.15.1"}}}, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}, - {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.2"}}}, + {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.39.4"}}}, {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.2"}}}, {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}}, {recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}}, - {snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.7"}}} + {snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.8"}}} ]}. {plugins, [{rebar3_proper, "0.12.1"}, rebar3_path_deps]}. diff --git a/apps/emqx/src/emqx.app.src b/apps/emqx/src/emqx.app.src index d42478fea..5ca8fc797 100644 --- a/apps/emqx/src/emqx.app.src +++ b/apps/emqx/src/emqx.app.src @@ -3,7 +3,7 @@ {id, "emqx"}, {description, "EMQX Core"}, % strict semver, bump manually! - {vsn, "5.0.24"}, + {vsn, "5.0.25"}, {modules, []}, {registered, []}, {applications, [ diff --git a/apps/emqx/src/emqx_channel.erl b/apps/emqx/src/emqx_channel.erl index 17006f1dc..70499c60a 100644 --- a/apps/emqx/src/emqx_channel.erl +++ b/apps/emqx/src/emqx_channel.erl @@ -89,7 +89,7 @@ %% Authentication Data Cache auth_cache :: maybe(map()), %% Quota checkers - quota :: maybe(emqx_limiter_container:limiter()), + quota :: emqx_limiter_container:limiter(), %% Timers timers :: #{atom() => disabled | maybe(reference())}, %% Conn State @@ -768,7 +768,7 @@ do_finish_publish(PacketId, PubRes, RC, Channel) -> NChannel = ensure_quota(PubRes, Channel), handle_out(puback, {PacketId, RC}, NChannel). -ensure_quota(_, Channel = #channel{quota = undefined}) -> +ensure_quota(_, Channel = #channel{quota = infinity}) -> Channel; ensure_quota(PubRes, Channel = #channel{quota = Limiter}) -> Cnt = lists:foldl( diff --git a/apps/emqx/src/emqx_config.erl b/apps/emqx/src/emqx_config.erl index c94f25ead..1cf1d873c 100644 --- a/apps/emqx/src/emqx_config.erl +++ b/apps/emqx/src/emqx_config.erl @@ -22,7 +22,6 @@ -export([ init_load/1, init_load/2, - init_load/3, read_override_conf/1, has_deprecated_file/0, delete_override_conf_files/0, @@ -35,7 +34,6 @@ save_to_config_map/2, save_to_override_conf/3 ]). --export([raw_conf_with_default/4]). -export([merge_envs/2]). -export([ @@ -90,7 +88,7 @@ ]). -ifdef(TEST). --export([erase_schema_mod_and_names/0]). +-export([erase_all/0]). -endif. -include("logger.hrl"). @@ -103,6 +101,8 @@ -define(ZONE_CONF_PATH(ZONE, PATH), [zones, ZONE | PATH]). -define(LISTENER_CONF_PATH(TYPE, LISTENER, PATH), [listeners, TYPE, LISTENER | PATH]). +-define(CONFIG_NOT_FOUND_MAGIC, '$0tFound'). + -export_type([ update_request/0, raw_config/0, @@ -164,9 +164,8 @@ get(KeyPath, Default) -> do_get(?CONF, KeyPath, Default). -spec find(emqx_utils_maps:config_key_path()) -> {ok, term()} | {not_found, emqx_utils_maps:config_key_path(), term()}. find([]) -> - Ref = make_ref(), - case do_get(?CONF, [], Ref) of - Ref -> {not_found, []}; + case do_get(?CONF, [], ?CONFIG_NOT_FOUND_MAGIC) of + ?CONFIG_NOT_FOUND_MAGIC -> {not_found, []}; Res -> {ok, Res} end; find(KeyPath) -> @@ -179,9 +178,8 @@ find(KeyPath) -> -spec find_raw(emqx_utils_maps:config_key_path()) -> {ok, term()} | {not_found, emqx_utils_maps:config_key_path(), term()}. find_raw([]) -> - Ref = make_ref(), - case do_get_raw([], Ref) of - Ref -> {not_found, []}; + case do_get_raw([], ?CONFIG_NOT_FOUND_MAGIC) of + ?CONFIG_NOT_FOUND_MAGIC -> {not_found, []}; Res -> {ok, Res} end; find_raw(KeyPath) -> @@ -315,45 +313,38 @@ put_raw(KeyPath, Config) -> %%============================================================================ init_load(SchemaMod) -> ConfFiles = application:get_env(emqx, config_files, []), - init_load(SchemaMod, ConfFiles, #{raw_with_default => true}). - -init_load(SchemaMod, Opts) when is_map(Opts) -> - ConfFiles = application:get_env(emqx, config_files, []), - init_load(SchemaMod, ConfFiles, Opts); -init_load(SchemaMod, ConfFiles) -> - init_load(SchemaMod, ConfFiles, #{raw_with_default => false}). + init_load(SchemaMod, ConfFiles). %% @doc Initial load of the given config files. %% NOTE: The order of the files is significant, configs from files ordered %% in the rear of the list overrides prior values. -spec init_load(module(), [string()] | binary() | hocon:config()) -> ok. -init_load(SchemaMod, Conf, Opts) when is_list(Conf) orelse is_binary(Conf) -> +init_load(SchemaMod, Conf) when is_list(Conf) orelse is_binary(Conf) -> + ok = save_schema_mod_and_names(SchemaMod), HasDeprecatedFile = has_deprecated_file(), - RawConf = parse_hocon(HasDeprecatedFile, Conf), - init_load(HasDeprecatedFile, SchemaMod, RawConf, Opts). + RawConf0 = load_config_files(HasDeprecatedFile, Conf), + RawConf1 = + case HasDeprecatedFile of + true -> + overlay_v0(SchemaMod, RawConf0); + false -> + overlay_v1(SchemaMod, RawConf0) + end, + RawConf = fill_defaults_for_all_roots(SchemaMod, RawConf1), + %% check configs against the schema + {AppEnvs, CheckedConf} = check_config(SchemaMod, RawConf, #{}), + save_to_app_env(AppEnvs), + ok = save_to_config_map(CheckedConf, RawConf). -init_load(true, SchemaMod, RawConf, Opts) when is_map(RawConf) -> - %% deprecated conf will be removed in 5.1 - %% Merge environment variable overrides on top +%% Merge environment variable overrides on top, then merge with overrides. +overlay_v0(SchemaMod, RawConf) when is_map(RawConf) -> RawConfWithEnvs = merge_envs(SchemaMod, RawConf), Overrides = read_override_confs(), - RawConfWithOverrides = hocon:deep_merge(RawConfWithEnvs, Overrides), - RootNames = get_root_names(), - RawConfAll = raw_conf_with_default(SchemaMod, RootNames, RawConfWithOverrides, Opts), - %% check configs against the schema - {AppEnvs, CheckedConf} = check_config(SchemaMod, RawConfAll, #{}), - save_to_app_env(AppEnvs), - ok = save_to_config_map(CheckedConf, RawConfAll); -init_load(false, SchemaMod, RawConf, Opts) when is_map(RawConf) -> - ok = save_schema_mod_and_names(SchemaMod), - RootNames = get_root_names(), - %% Merge environment variable overrides on top - RawConfWithEnvs = merge_envs(SchemaMod, RawConf), - RawConfAll = raw_conf_with_default(SchemaMod, RootNames, RawConfWithEnvs, Opts), - %% check configs against the schema - {AppEnvs, CheckedConf} = check_config(SchemaMod, RawConfAll, #{}), - save_to_app_env(AppEnvs), - ok = save_to_config_map(CheckedConf, RawConfAll). + hocon:deep_merge(RawConfWithEnvs, Overrides). + +%% Merge environment variable overrides on top. +overlay_v1(SchemaMod, RawConf) when is_map(RawConf) -> + merge_envs(SchemaMod, RawConf). %% @doc Read merged cluster + local overrides. read_override_confs() -> @@ -362,47 +353,58 @@ read_override_confs() -> hocon:deep_merge(ClusterOverrides, LocalOverrides). %% keep the raw and non-raw conf has the same keys to make update raw conf easier. -raw_conf_with_default(SchemaMod, RootNames, RawConf, #{raw_with_default := true}) -> - Fun = fun(Name, Acc) -> - case maps:is_key(Name, RawConf) of - true -> - Acc; - false -> - case lists:keyfind(Name, 1, hocon_schema:roots(SchemaMod)) of - false -> - Acc; - {_, {_, Schema}} -> - Acc#{Name => schema_default(Schema)} - end - end - end, - RawDefault = lists:foldl(Fun, #{}, RootNames), - maps:merge(RawConf, fill_defaults(SchemaMod, RawDefault, #{})); -raw_conf_with_default(_SchemaMod, _RootNames, RawConf, _Opts) -> - RawConf. +fill_defaults_for_all_roots(SchemaMod, RawConf0) -> + RootSchemas = hocon_schema:roots(SchemaMod), + %% the roots which are missing from the loaded configs + MissingRoots = lists:filtermap( + fun({BinName, Sc}) -> + case maps:is_key(BinName, RawConf0) orelse is_already_loaded(BinName) of + true -> false; + false -> {true, Sc} + end + end, + RootSchemas + ), + RawConf = lists:foldl( + fun({RootName, Schema}, Acc) -> + Acc#{bin(RootName) => seed_default(Schema)} + end, + RawConf0, + MissingRoots + ), + fill_defaults(RawConf). -schema_default(Schema) -> - case hocon_schema:field_schema(Schema, type) of - ?ARRAY(_) -> - []; - _ -> - #{} +%% So far, this can only return true when testing. +%% e.g. when testing an app, we need to load its config first +%% then start emqx_conf application which will load the +%% possibly empty config again (then filled with defaults). +is_already_loaded(Name) -> + ?MODULE:get_raw([Name], #{}) =/= #{}. + +%% if a root is not found in the raw conf, fill it with default values. +seed_default(Schema) -> + case hocon_schema:field_schema(Schema, default) of + undefined -> + %% so far all roots without a default value are objects + #{}; + Value -> + Value end. -parse_hocon(HasDeprecatedFile, Conf) -> +load_config_files(HasDeprecatedFile, Conf) -> IncDirs = include_dirs(), case do_parse_hocon(HasDeprecatedFile, Conf, IncDirs) of {ok, HoconMap} -> HoconMap; {error, Reason} -> ?SLOG(error, #{ - msg => "failed_to_load_hocon_file", + msg => "failed_to_load_config_file", reason => Reason, pwd => file:get_cwd(), include_dirs => IncDirs, config_file => Conf }), - error(failed_to_load_hocon_file) + error(failed_to_load_config_file) end. do_parse_hocon(true, Conf, IncDirs) -> @@ -547,7 +549,9 @@ save_schema_mod_and_names(SchemaMod) -> }). -ifdef(TEST). -erase_schema_mod_and_names() -> +erase_all() -> + Names = get_root_names(), + lists:foreach(fun erase/1, Names), persistent_term:erase(?PERSIS_SCHEMA_MODS). -endif. @@ -665,11 +669,9 @@ do_get_raw(Path, Default) -> do_get(?RAW_CONF, Path, Default). do_get(Type, KeyPath) -> - Ref = make_ref(), - Res = do_get(Type, KeyPath, Ref), - case Res =:= Ref of - true -> error({config_not_found, KeyPath}); - false -> Res + case do_get(Type, KeyPath, ?CONFIG_NOT_FOUND_MAGIC) of + ?CONFIG_NOT_FOUND_MAGIC -> error({config_not_found, KeyPath}); + Res -> Res end. do_get(Type, [], Default) -> diff --git a/apps/emqx/src/emqx_config_handler.erl b/apps/emqx/src/emqx_config_handler.erl index e664a7dd7..0bad19f9e 100644 --- a/apps/emqx/src/emqx_config_handler.erl +++ b/apps/emqx/src/emqx_config_handler.erl @@ -18,6 +18,7 @@ -module(emqx_config_handler). -include("logger.hrl"). +-include("emqx_schema.hrl"). -include_lib("hocon/include/hoconsc.hrl"). -behaviour(gen_server). @@ -447,11 +448,17 @@ merge_to_override_config(RawConf, Opts) -> up_req({remove, _Opts}) -> '$remove'; up_req({{update, Req}, _Opts}) -> Req. -return_change_result(ConfKeyPath, {{update, _Req}, Opts}) -> - #{ - config => emqx_config:get(ConfKeyPath), - raw_config => return_rawconf(ConfKeyPath, Opts) - }; +return_change_result(ConfKeyPath, {{update, Req}, Opts}) -> + case Req =/= ?TOMBSTONE_CONFIG_CHANGE_REQ of + true -> + #{ + config => emqx_config:get(ConfKeyPath), + raw_config => return_rawconf(ConfKeyPath, Opts) + }; + false -> + %% like remove, nothing to return + #{} + end; return_change_result(_ConfKeyPath, {remove, _Opts}) -> #{}. diff --git a/apps/emqx/src/emqx_connection.erl b/apps/emqx/src/emqx_connection.erl index 27b6f3e84..79654e510 100644 --- a/apps/emqx/src/emqx_connection.erl +++ b/apps/emqx/src/emqx_connection.erl @@ -111,7 +111,7 @@ listener :: {Type :: atom(), Name :: atom()}, %% Limiter - limiter :: maybe(limiter()), + limiter :: limiter(), %% limiter buffer for overload use limiter_buffer :: queue:queue(pending_req()), @@ -974,55 +974,61 @@ handle_cast(Req, State) -> list(any()), state() ) -> _. + +check_limiter( + _Needs, + Data, + WhenOk, + Msgs, + #state{limiter = infinity} = State +) -> + WhenOk(Data, Msgs, State); check_limiter( Needs, Data, WhenOk, Msgs, - #state{ - limiter = Limiter, - limiter_timer = LimiterTimer, - limiter_buffer = Cache - } = State -) when Limiter =/= undefined -> - case LimiterTimer of - undefined -> - case emqx_limiter_container:check_list(Needs, Limiter) of - {ok, Limiter2} -> - WhenOk(Data, Msgs, State#state{limiter = Limiter2}); - {pause, Time, Limiter2} -> - ?SLOG(debug, #{ - msg => "pause_time_dueto_rate_limit", - needs => Needs, - time_in_ms => Time - }), + #state{limiter_timer = undefined, limiter = Limiter} = State +) -> + case emqx_limiter_container:check_list(Needs, Limiter) of + {ok, Limiter2} -> + WhenOk(Data, Msgs, State#state{limiter = Limiter2}); + {pause, Time, Limiter2} -> + ?SLOG(debug, #{ + msg => "pause_time_dueto_rate_limit", + needs => Needs, + time_in_ms => Time + }), - Retry = #retry{ - types = [Type || {_, Type} <- Needs], - data = Data, - next = WhenOk - }, + Retry = #retry{ + types = [Type || {_, Type} <- Needs], + data = Data, + next = WhenOk + }, - Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2), + Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2), - TRef = start_timer(Time, limit_timeout), + TRef = start_timer(Time, limit_timeout), - {ok, State#state{ - limiter = Limiter3, - limiter_timer = TRef - }}; - {drop, Limiter2} -> - {ok, State#state{limiter = Limiter2}} - end; - _ -> - %% if there has a retry timer, - %% cache the operation and execute it after the retry is over - %% the maximum length of the cache queue is equal to the active_n - New = #pending_req{need = Needs, data = Data, next = WhenOk}, - {ok, State#state{limiter_buffer = queue:in(New, Cache)}} + {ok, State#state{ + limiter = Limiter3, + limiter_timer = TRef + }}; + {drop, Limiter2} -> + {ok, State#state{limiter = Limiter2}} end; -check_limiter(_, Data, WhenOk, Msgs, State) -> - WhenOk(Data, Msgs, State). +check_limiter( + Needs, + Data, + WhenOk, + _Msgs, + #state{limiter_buffer = Cache} = State +) -> + %% if there has a retry timer, + %% cache the operation and execute it after the retry is over + %% the maximum length of the cache queue is equal to the active_n + New = #pending_req{need = Needs, data = Data, next = WhenOk}, + {ok, State#state{limiter_buffer = queue:in(New, Cache)}}. %% try to perform a retry -spec retry_limiter(state()) -> _. diff --git a/apps/emqx/src/emqx_limiter/src/emqx_htb_limiter.erl b/apps/emqx/src/emqx_limiter/src/emqx_htb_limiter.erl index 53f26deb5..bcd4166af 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_htb_limiter.erl +++ b/apps/emqx/src/emqx_limiter/src/emqx_htb_limiter.erl @@ -22,7 +22,7 @@ %% API -export([ - make_token_bucket_limiter/2, + make_local_limiter/2, make_ref_limiter/2, check/2, consume/2, @@ -32,12 +32,11 @@ make_future/1, available/1 ]). --export_type([token_bucket_limiter/0]). +-export_type([local_limiter/0]). -%% a token bucket limiter with a limiter server's bucket reference - -%% the number of tokens currently available --type token_bucket_limiter() :: #{ +%% a token bucket limiter which may or not contains a reference to another limiter, +%% and can be used in a client alone +-type local_limiter() :: #{ tokens := non_neg_integer(), rate := decimal(), capacity := decimal(), @@ -58,12 +57,12 @@ retry_ctx => undefined %% the retry context - | retry_context(token_bucket_limiter()), + | retry_context(local_limiter()), %% allow to add other keys atom => any() }. -%% a limiter server's bucket reference +%% a limiter instance which only contains a reference to another limiter(bucket) -type ref_limiter() :: #{ max_retry_time := non_neg_integer(), failure_strategy := failure_strategy(), @@ -88,7 +87,7 @@ }. -type bucket() :: emqx_limiter_bucket_ref:bucket_ref(). --type limiter() :: token_bucket_limiter() | ref_limiter() | infinity. +-type limiter() :: local_limiter() | ref_limiter() | infinity. -type millisecond() :: non_neg_integer(). -type pause_type() :: pause | partial. @@ -116,7 +115,7 @@ rate := decimal(), initial := non_neg_integer(), low_watermark := non_neg_integer(), - capacity := decimal(), + burst := decimal(), divisible := boolean(), max_retry_time := non_neg_integer(), failure_strategy := failure_strategy() @@ -134,8 +133,8 @@ %% API %%-------------------------------------------------------------------- %%@doc create a limiter --spec make_token_bucket_limiter(limiter_bucket_cfg(), bucket()) -> _. -make_token_bucket_limiter(Cfg, Bucket) -> +-spec make_local_limiter(limiter_bucket_cfg(), bucket()) -> _. +make_local_limiter(Cfg, Bucket) -> Cfg#{ tokens => emqx_limiter_server:get_initial_val(Cfg), lasttime => ?NOW, @@ -312,8 +311,8 @@ on_failure(throw, Limiter) -> Message = io_lib:format("limiter consume failed, limiter:~p~n", [Limiter]), erlang:throw({rate_check_fail, Message}). --spec do_check_with_parent_limiter(pos_integer(), token_bucket_limiter()) -> - inner_check_result(token_bucket_limiter()). +-spec do_check_with_parent_limiter(pos_integer(), local_limiter()) -> + inner_check_result(local_limiter()). do_check_with_parent_limiter( Need, #{ @@ -336,7 +335,7 @@ do_check_with_parent_limiter( ) end. --spec do_reset(pos_integer(), token_bucket_limiter()) -> inner_check_result(token_bucket_limiter()). +-spec do_reset(pos_integer(), local_limiter()) -> inner_check_result(local_limiter()). do_reset( Need, #{ diff --git a/apps/emqx/src/emqx_limiter/src/emqx_limiter_container.erl b/apps/emqx/src/emqx_limiter/src/emqx_limiter_container.erl index ea02152a9..6a9101a0f 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_limiter_container.erl +++ b/apps/emqx/src/emqx_limiter/src/emqx_limiter_container.erl @@ -34,16 +34,18 @@ -export_type([container/0, check_result/0]). --type container() :: #{ - limiter_type() => undefined | limiter(), - %% the retry context of the limiter - retry_key() => - undefined - | retry_context() - | future(), - %% the retry context of the container - retry_ctx := undefined | any() -}. +-type container() :: + infinity + | #{ + limiter_type() => undefined | limiter(), + %% the retry context of the limiter + retry_key() => + undefined + | retry_context() + | future(), + %% the retry context of the container + retry_ctx := undefined | any() + }. -type future() :: pos_integer(). -type limiter_id() :: emqx_limiter_schema:limiter_id(). @@ -78,7 +80,20 @@ get_limiter_by_types(Id, Types, BucketCfgs) -> {ok, Limiter} = emqx_limiter_server:connect(Id, Type, BucketCfgs), add_new(Type, Limiter, Acc) end, - lists:foldl(Init, #{retry_ctx => undefined}, Types). + Container = lists:foldl(Init, #{retry_ctx => undefined}, Types), + case + lists:all( + fun(Type) -> + maps:get(Type, Container) =:= infinity + end, + Types + ) + of + true -> + infinity; + _ -> + Container + end. -spec add_new(limiter_type(), limiter(), container()) -> container(). add_new(Type, Limiter, Container) -> @@ -89,11 +104,15 @@ add_new(Type, Limiter, Container) -> %% @doc check the specified limiter -spec check(pos_integer(), limiter_type(), container()) -> check_result(). +check(_Need, _Type, infinity) -> + {ok, infinity}; check(Need, Type, Container) -> check_list([{Need, Type}], Container). %% @doc check multiple limiters -spec check_list(list({pos_integer(), limiter_type()}), container()) -> check_result(). +check_list(_Need, infinity) -> + {ok, infinity}; check_list([{Need, Type} | T], Container) -> Limiter = maps:get(Type, Container), case emqx_htb_limiter:check(Need, Limiter) of @@ -121,11 +140,15 @@ check_list([], Container) -> %% @doc retry the specified limiter -spec retry(limiter_type(), container()) -> check_result(). +retry(_Type, infinity) -> + {ok, infinity}; retry(Type, Container) -> retry_list([Type], Container). %% @doc retry multiple limiters -spec retry_list(list(limiter_type()), container()) -> check_result(). +retry_list(_Types, infinity) -> + {ok, infinity}; retry_list([Type | T], Container) -> Key = ?RETRY_KEY(Type), case Container of diff --git a/apps/emqx/src/emqx_limiter/src/emqx_limiter_manager.erl b/apps/emqx/src/emqx_limiter/src/emqx_limiter_manager.erl index 297bdffb0..40061e0b9 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_limiter_manager.erl +++ b/apps/emqx/src/emqx_limiter/src/emqx_limiter_manager.erl @@ -30,6 +30,12 @@ post_config_update/5 ]). +-export([ + find_root/1, + insert_root/2, + delete_root/1 +]). + -export([ start_server/1, start_server/2, @@ -62,6 +68,7 @@ -define(UID(Id, Type), {Id, Type}). -define(TAB, emqx_limiter_counters). +-define(ROOT_ID, root). %%-------------------------------------------------------------------- %% API @@ -104,9 +111,25 @@ insert_bucket(Id, Type, Bucket) -> ). -spec delete_bucket(limiter_id(), limiter_type()) -> true. -delete_bucket(Type, Id) -> +delete_bucket(Id, Type) -> ets:delete(?TAB, ?UID(Id, Type)). +-spec find_root(limiter_type()) -> + {ok, bucket_ref()} | undefined. +find_root(Type) -> + find_bucket(?ROOT_ID, Type). + +-spec insert_root( + limiter_type(), + bucket_ref() +) -> boolean(). +insert_root(Type, Bucket) -> + insert_bucket(?ROOT_ID, Type, Bucket). + +-spec delete_root(limiter_type()) -> true. +delete_root(Type) -> + delete_bucket(?ROOT_ID, Type). + post_config_update([limiter], _Config, NewConf, _OldConf, _AppEnvs) -> Types = lists:delete(client, maps:keys(NewConf)), _ = [on_post_config_update(Type, NewConf) || Type <- Types], diff --git a/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl b/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl index c762a0f1d..40b23415c 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl +++ b/apps/emqx/src/emqx_limiter/src/emqx_limiter_schema.erl @@ -32,15 +32,17 @@ get_bucket_cfg_path/2, desc/1, types/0, - calc_capacity/1 + calc_capacity/1, + extract_with_type/2, + default_client_config/0 ]). -define(KILOBYTE, 1024). --define(BUCKET_KEYS, [ - {bytes, bucket_infinity}, - {messages, bucket_infinity}, - {connection, bucket_limit}, - {message_routing, bucket_infinity} +-define(LISTENER_BUCKET_KEYS, [ + bytes, + messages, + connection, + message_routing ]). -type limiter_type() :: @@ -94,30 +96,33 @@ namespace() -> limiter. roots() -> - [{limiter, hoconsc:mk(hoconsc:ref(?MODULE, limiter), #{importance => ?IMPORTANCE_HIDDEN})}]. + [ + {limiter, + hoconsc:mk(hoconsc:ref(?MODULE, limiter), #{ + importance => ?IMPORTANCE_HIDDEN + })} + ]. fields(limiter) -> [ {Type, ?HOCON(?R_REF(node_opts), #{ desc => ?DESC(Type), - default => #{}, importance => ?IMPORTANCE_HIDDEN, aliases => alias_of_type(Type) })} || Type <- types() ] ++ [ + %% This is an undocumented feature, and it won't be support anymore {client, ?HOCON( ?R_REF(client_fields), #{ desc => ?DESC(client), importance => ?IMPORTANCE_HIDDEN, - default => maps:from_list([ - {erlang:atom_to_binary(Type), #{}} - || Type <- types() - ]) + required => {false, recursively}, + deprecated => {since, "5.0.25"} } )} ]; @@ -131,11 +136,9 @@ fields(node_opts) -> })} ]; fields(client_fields) -> - client_fields(types(), #{default => #{}}); -fields(bucket_infinity) -> + client_fields(types()); +fields(bucket_opts) -> fields_of_bucket(<<"infinity">>); -fields(bucket_limit) -> - fields_of_bucket(<<"1000/s">>); fields(client_opts) -> [ {rate, ?HOCON(rate(), #{default => <<"infinity">>, desc => ?DESC(rate)})}, @@ -194,10 +197,9 @@ fields(client_opts) -> )} ]; fields(listener_fields) -> - composite_bucket_fields(?BUCKET_KEYS, listener_client_fields); + composite_bucket_fields(?LISTENER_BUCKET_KEYS, listener_client_fields); fields(listener_client_fields) -> - {Types, _} = lists:unzip(?BUCKET_KEYS), - client_fields(Types, #{required => false}); + client_fields(?LISTENER_BUCKET_KEYS); fields(Type) -> simple_bucket_field(Type). @@ -205,10 +207,8 @@ desc(limiter) -> "Settings for the rate limiter."; desc(node_opts) -> "Settings for the limiter of the node level."; -desc(bucket_infinity) -> +desc(bucket_opts) -> "Settings for the bucket."; -desc(bucket_limit) -> - desc(bucket_infinity); desc(client_opts) -> "Settings for the client in bucket level."; desc(client_fields) -> @@ -241,6 +241,31 @@ calc_capacity(#{rate := infinity}) -> calc_capacity(#{rate := Rate, burst := Burst}) -> erlang:floor(1000 * Rate / default_period()) + Burst. +extract_with_type(_Type, undefined) -> + undefined; +extract_with_type(Type, #{client := ClientCfg} = BucketCfg) -> + BucketVal = maps:find(Type, BucketCfg), + ClientVal = maps:find(Type, ClientCfg), + merge_client_bucket(Type, ClientVal, BucketVal); +extract_with_type(Type, BucketCfg) -> + BucketVal = maps:find(Type, BucketCfg), + merge_client_bucket(Type, undefined, BucketVal). + +%% Since the client configuration can be absent and be a undefined value, +%% but we must need some basic settings to control the behaviour of the limiter, +%% so here add this helper function to generate a default setting. +%% This is a temporary workaround until we found a better way to simplify. +default_client_config() -> + #{ + rate => infinity, + initial => 0, + low_watermark => 0, + burst => 0, + divisible => false, + max_retry_time => timer:seconds(10), + failure_strategy => force + }. + %%-------------------------------------------------------------------- %% Internal functions %%-------------------------------------------------------------------- @@ -360,14 +385,14 @@ apply_unit(Unit, _) -> throw("invalid unit:" ++ Unit). %% A bucket with only one type simple_bucket_field(Type) when is_atom(Type) -> - fields(bucket_infinity) ++ + fields(bucket_opts) ++ [ {client, ?HOCON( ?R_REF(?MODULE, client_opts), #{ desc => ?DESC(client), - required => false, + required => {false, recursively}, importance => importance_of_type(Type), aliases => alias_of_type(Type) } @@ -378,13 +403,13 @@ simple_bucket_field(Type) when is_atom(Type) -> composite_bucket_fields(Types, ClientRef) -> [ {Type, - ?HOCON(?R_REF(?MODULE, Opts), #{ + ?HOCON(?R_REF(?MODULE, bucket_opts), #{ desc => ?DESC(?MODULE, Type), - required => false, + required => {false, recursively}, importance => importance_of_type(Type), aliases => alias_of_type(Type) })} - || {Type, Opts} <- Types + || Type <- Types ] ++ [ {client, @@ -392,7 +417,7 @@ composite_bucket_fields(Types, ClientRef) -> ?R_REF(?MODULE, ClientRef), #{ desc => ?DESC(client), - required => false + required => {false, recursively} } )} ]. @@ -415,11 +440,12 @@ fields_of_bucket(Default) -> })} ]. -client_fields(Types, Meta) -> +client_fields(Types) -> [ {Type, - ?HOCON(?R_REF(client_opts), Meta#{ + ?HOCON(?R_REF(client_opts), #{ desc => ?DESC(Type), + required => false, importance => importance_of_type(Type), aliases => alias_of_type(Type) })} @@ -441,3 +467,12 @@ alias_of_type(bytes) -> [bytes_in]; alias_of_type(_) -> []. + +merge_client_bucket(Type, {ok, ClientVal}, {ok, BucketVal}) -> + #{Type => BucketVal, client => #{Type => ClientVal}}; +merge_client_bucket(Type, {ok, ClientVal}, _) -> + #{client => #{Type => ClientVal}}; +merge_client_bucket(Type, _, {ok, BucketVal}) -> + #{Type => BucketVal}; +merge_client_bucket(_, _, _) -> + undefined. diff --git a/apps/emqx/src/emqx_limiter/src/emqx_limiter_server.erl b/apps/emqx/src/emqx_limiter/src/emqx_limiter_server.erl index 58db66f82..2867283d6 100644 --- a/apps/emqx/src/emqx_limiter/src/emqx_limiter_server.erl +++ b/apps/emqx/src/emqx_limiter/src/emqx_limiter_server.erl @@ -59,7 +59,8 @@ burst := rate(), %% token generation interval(second) period := pos_integer(), - produced := float() + produced := float(), + correction := emqx_limiter_decimal:zero_or_float() }. -type bucket() :: #{ @@ -98,6 +99,7 @@ %% minimum coefficient for overloaded limiter -define(OVERLOAD_MIN_ALLOC, 0.3). -define(COUNTER_SIZE, 8). +-define(ROOT_COUNTER_IDX, 1). -export_type([index/0]). -import(emqx_limiter_decimal, [add/2, sub/2, mul/2, put_to_counter/3]). @@ -110,47 +112,24 @@ -spec connect( limiter_id(), limiter_type(), - bucket_name() | #{limiter_type() => bucket_name() | undefined} + hocons:config() | undefined ) -> {ok, emqx_htb_limiter:limiter()} | {error, _}. -%% If no bucket path is set in config, there will be no limit -connect(_Id, _Type, undefined) -> - {ok, emqx_htb_limiter:make_infinity_limiter()}; +%% undefined is the default situation, no limiter setting by default +connect(Id, Type, undefined) -> + create_limiter(Id, Type, undefined, undefined); +connect(Id, Type, #{rate := _} = Cfg) -> + create_limiter(Id, Type, maps:get(client, Cfg, undefined), Cfg); connect(Id, Type, Cfg) -> - case find_limiter_cfg(Type, Cfg) of - {_ClientCfg, undefined, _NodeCfg} -> - {ok, emqx_htb_limiter:make_infinity_limiter()}; - {#{rate := infinity}, #{rate := infinity}, #{rate := infinity}} -> - {ok, emqx_htb_limiter:make_infinity_limiter()}; - {ClientCfg, #{rate := infinity}, #{rate := infinity}} -> - {ok, - emqx_htb_limiter:make_token_bucket_limiter( - ClientCfg, emqx_limiter_bucket_ref:infinity_bucket() - )}; - { - #{rate := CliRate} = ClientCfg, - #{rate := BucketRate} = BucketCfg, - _ - } -> - case emqx_limiter_manager:find_bucket(Id, Type) of - {ok, Bucket} -> - BucketSize = emqx_limiter_schema:calc_capacity(BucketCfg), - CliSize = emqx_limiter_schema:calc_capacity(ClientCfg), - {ok, - if - CliRate < BucketRate orelse CliSize < BucketSize -> - emqx_htb_limiter:make_token_bucket_limiter(ClientCfg, Bucket); - true -> - emqx_htb_limiter:make_ref_limiter(ClientCfg, Bucket) - end}; - undefined -> - ?SLOG(error, #{msg => "bucket_not_found", type => Type, id => Id}), - {error, invalid_bucket} - end - end. + create_limiter( + Id, + Type, + emqx_utils_maps:deep_get([client, Type], Cfg, undefined), + maps:get(Type, Cfg, undefined) + ). -spec add_bucket(limiter_id(), limiter_type(), hocons:config() | undefined) -> ok. -add_bucket(_Id, _Type, undefine) -> +add_bucket(_Id, _Type, undefined) -> ok; add_bucket(Id, Type, Cfg) -> ?CALL(Type, {add_bucket, Id, Cfg}). @@ -288,7 +267,8 @@ handle_info(Info, State) -> Reason :: normal | shutdown | {shutdown, term()} | term(), State :: term() ) -> any(). -terminate(_Reason, _State) -> +terminate(_Reason, #{type := Type}) -> + emqx_limiter_manager:delete_root(Type), ok. %%-------------------------------------------------------------------- @@ -343,10 +323,14 @@ oscillation( oscillate(Interval), Ordereds = get_ordered_buckets(Buckets), {Alloced, Buckets2} = transverse(Ordereds, Flow, 0.0, Buckets), - maybe_burst(State#{ - buckets := Buckets2, - root := Root#{produced := Produced + Alloced} - }). + State2 = maybe_adjust_root_tokens( + State#{ + buckets := Buckets2, + root := Root#{produced := Produced + Alloced} + }, + Alloced + ), + maybe_burst(State2). %% @doc horizontal spread -spec transverse( @@ -419,6 +403,24 @@ get_ordered_buckets(Buckets) -> Buckets ). +-spec maybe_adjust_root_tokens(state(), float()) -> state(). +maybe_adjust_root_tokens(#{root := #{rate := infinity}} = State, _Alloced) -> + State; +maybe_adjust_root_tokens(#{root := #{rate := Rate}} = State, Alloced) when Alloced >= Rate -> + State; +maybe_adjust_root_tokens(#{root := #{rate := Rate} = Root, counter := Counter} = State, Alloced) -> + InFlow = Rate - Alloced, + Token = counters:get(Counter, ?ROOT_COUNTER_IDX), + case Token >= Rate of + true -> + State; + _ -> + Available = erlang:min(Rate - Token, InFlow), + {Inc, Root2} = emqx_limiter_correction:add(Available, Root), + counters:add(Counter, ?ROOT_COUNTER_IDX, Inc), + State#{root := Root2} + end. + -spec maybe_burst(state()) -> state(). maybe_burst( #{ @@ -482,12 +484,16 @@ init_tree(Type) when is_atom(Type) -> Cfg = emqx:get_config([limiter, Type]), init_tree(Type, Cfg). -init_tree(Type, Cfg) -> +init_tree(Type, #{rate := Rate} = Cfg) -> + Counter = counters:new(?COUNTER_SIZE, [write_concurrency]), + RootBucket = emqx_limiter_bucket_ref:new(Counter, ?ROOT_COUNTER_IDX, Rate), + emqx_limiter_manager:insert_root(Type, RootBucket), #{ type => Type, root => make_root(Cfg), - counter => counters:new(?COUNTER_SIZE, [write_concurrency]), - index => 0, + counter => Counter, + %% The first slot is reserved for the root + index => ?ROOT_COUNTER_IDX, buckets => #{} }. @@ -497,7 +503,8 @@ make_root(#{rate := Rate, burst := Burst}) -> rate => Rate, burst => Burst, period => emqx_limiter_schema:default_period(), - produced => 0.0 + produced => 0.0, + correction => 0 }. do_add_bucket(_Id, #{rate := infinity}, #{root := #{rate := infinity}} = State) -> @@ -571,25 +578,61 @@ call(Type, Msg) -> gen_server:call(Pid, Msg) end. -find_limiter_cfg(Type, #{rate := _} = Cfg) -> - {find_client_cfg(Type, maps:get(client, Cfg, undefined)), Cfg, find_node_cfg(Type)}; -find_limiter_cfg(Type, Cfg) -> - { - find_client_cfg(Type, emqx_utils_maps:deep_get([client, Type], Cfg, undefined)), - maps:get(Type, Cfg, undefined), - find_node_cfg(Type) - }. +create_limiter(Id, Type, #{rate := Rate} = ClientCfg, BucketCfg) when Rate =/= infinity -> + create_limiter_with_client(Id, Type, ClientCfg, BucketCfg); +create_limiter(Id, Type, _, BucketCfg) -> + create_limiter_without_client(Id, Type, BucketCfg). -find_client_cfg(Type, BucketCfg) -> - NodeCfg = emqx:get_config([limiter, client, Type], undefined), - merge_client_cfg(NodeCfg, BucketCfg). +%% create a limiter with the client-level configuration +create_limiter_with_client(Id, Type, ClientCfg, BucketCfg) -> + case find_referenced_bucket(Id, Type, BucketCfg) of + false -> + {ok, emqx_htb_limiter:make_local_limiter(ClientCfg, infinity)}; + {ok, Bucket, RefCfg} -> + create_limiter_with_ref(Bucket, ClientCfg, RefCfg); + Error -> + Error + end. -merge_client_cfg(undefined, BucketCfg) -> - BucketCfg; -merge_client_cfg(NodeCfg, undefined) -> - NodeCfg; -merge_client_cfg(NodeCfg, BucketCfg) -> - maps:merge(NodeCfg, BucketCfg). +%% create a limiter only with the referenced configuration +create_limiter_without_client(Id, Type, BucketCfg) -> + case find_referenced_bucket(Id, Type, BucketCfg) of + false -> + {ok, emqx_htb_limiter:make_infinity_limiter()}; + {ok, Bucket, RefCfg} -> + ClientCfg = emqx_limiter_schema:default_client_config(), + create_limiter_with_ref(Bucket, ClientCfg, RefCfg); + Error -> + Error + end. -find_node_cfg(Type) -> - emqx:get_config([limiter, Type], #{rate => infinity, burst => 0}). +create_limiter_with_ref( + Bucket, + #{rate := CliRate} = ClientCfg, + #{rate := RefRate} +) when CliRate < RefRate -> + {ok, emqx_htb_limiter:make_local_limiter(ClientCfg, Bucket)}; +create_limiter_with_ref(Bucket, ClientCfg, _) -> + {ok, emqx_htb_limiter:make_ref_limiter(ClientCfg, Bucket)}. + +%% this is a listener(server)-level reference +find_referenced_bucket(Id, Type, #{rate := Rate} = Cfg) when Rate =/= infinity -> + case emqx_limiter_manager:find_bucket(Id, Type) of + {ok, Bucket} -> + {ok, Bucket, Cfg}; + _ -> + ?SLOG(error, #{msg => "bucket not found", type => Type, id => Id}), + {error, invalid_bucket} + end; +%% this is a node-level reference +find_referenced_bucket(Id, Type, _) -> + case emqx:get_config([limiter, Type], undefined) of + #{rate := infinity} -> + false; + undefined -> + ?SLOG(error, #{msg => "invalid limiter type", type => Type, id => Id}), + {error, invalid_bucket}; + NodeCfg -> + {ok, Bucket} = emqx_limiter_manager:find_root(Type), + {ok, Bucket, NodeCfg} + end. diff --git a/apps/emqx/src/emqx_listeners.erl b/apps/emqx/src/emqx_listeners.erl index f82aebe7c..99ab52f61 100644 --- a/apps/emqx/src/emqx_listeners.erl +++ b/apps/emqx/src/emqx_listeners.erl @@ -20,6 +20,7 @@ -elvis([{elvis_style, dont_repeat_yourself, #{min_complexity => 10000}}]). -include("emqx_mqtt.hrl"). +-include("emqx_schema.hrl"). -include("logger.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). @@ -33,7 +34,8 @@ is_running/1, current_conns/2, max_conns/2, - id_example/0 + id_example/0, + default_max_conn/0 ]). -export([ @@ -61,8 +63,11 @@ -export([certs_dir/2]). -endif. +-type listener_id() :: atom() | binary(). + -define(CONF_KEY_PATH, [listeners, '?', '?']). -define(TYPES_STRING, ["tcp", "ssl", "ws", "wss", "quic"]). +-define(MARK_DEL, ?TOMBSTONE_CONFIG_CHANGE_REQ). -spec id_example() -> atom(). id_example() -> 'tcp:default'. @@ -105,19 +110,22 @@ do_list_raw() -> format_raw_listeners({Type0, Conf}) -> Type = binary_to_atom(Type0), - lists:map( - fun({LName, LConf0}) when is_map(LConf0) -> - Bind = parse_bind(LConf0), - Running = is_running(Type, listener_id(Type, LName), LConf0#{bind => Bind}), - LConf1 = maps:remove(<<"authentication">>, LConf0), - LConf3 = maps:put(<<"running">>, Running, LConf1), - CurrConn = - case Running of - true -> current_conns(Type, LName, Bind); - false -> 0 - end, - LConf4 = maps:put(<<"current_connections">>, CurrConn, LConf3), - {Type0, LName, LConf4} + lists:filtermap( + fun + ({LName, LConf0}) when is_map(LConf0) -> + Bind = parse_bind(LConf0), + Running = is_running(Type, listener_id(Type, LName), LConf0#{bind => Bind}), + LConf1 = maps:remove(<<"authentication">>, LConf0), + LConf2 = maps:put(<<"running">>, Running, LConf1), + CurrConn = + case Running of + true -> current_conns(Type, LName, Bind); + false -> 0 + end, + LConf = maps:put(<<"current_connections">>, CurrConn, LConf2), + {true, {Type0, LName, LConf}}; + ({_LName, _MarkDel}) -> + false end, maps:to_list(Conf) ). @@ -195,7 +203,7 @@ start() -> ok = emqx_config_handler:add_handler(?CONF_KEY_PATH, ?MODULE), foreach_listeners(fun start_listener/3). --spec start_listener(atom()) -> ok | {error, term()}. +-spec start_listener(listener_id()) -> ok | {error, term()}. start_listener(ListenerId) -> apply_on_listener(ListenerId, fun start_listener/3). @@ -246,7 +254,7 @@ start_listener(Type, ListenerName, #{bind := Bind} = Conf) -> restart() -> foreach_listeners(fun restart_listener/3). --spec restart_listener(atom()) -> ok | {error, term()}. +-spec restart_listener(listener_id()) -> ok | {error, term()}. restart_listener(ListenerId) -> apply_on_listener(ListenerId, fun restart_listener/3). @@ -271,7 +279,7 @@ stop() -> _ = emqx_config_handler:remove_handler(?CONF_KEY_PATH), foreach_listeners(fun stop_listener/3). --spec stop_listener(atom()) -> ok | {error, term()}. +-spec stop_listener(listener_id()) -> ok | {error, term()}. stop_listener(ListenerId) -> apply_on_listener(ListenerId, fun stop_listener/3). @@ -419,7 +427,9 @@ do_start_listener(quic, ListenerName, #{bind := Bind} = Opts) -> end. %% Update the listeners at runtime -pre_config_update([listeners, Type, Name], {create, NewConf}, undefined) -> +pre_config_update([listeners, Type, Name], {create, NewConf}, V) when + V =:= undefined orelse V =:= ?TOMBSTONE_VALUE +-> CertsDir = certs_dir(Type, Name), {ok, convert_certs(CertsDir, NewConf)}; pre_config_update([listeners, _Type, _Name], {create, _NewConf}, _RawConf) -> @@ -434,6 +444,8 @@ pre_config_update([listeners, Type, Name], {update, Request}, RawConf) -> pre_config_update([listeners, _Type, _Name], {action, _Action, Updated}, RawConf) -> NewConf = emqx_utils_maps:deep_merge(RawConf, Updated), {ok, NewConf}; +pre_config_update([listeners, _Type, _Name], ?MARK_DEL, _RawConf) -> + {ok, ?TOMBSTONE_VALUE}; pre_config_update(_Path, _Request, RawConf) -> {ok, RawConf}. @@ -441,13 +453,15 @@ post_config_update([listeners, Type, Name], {create, _Request}, NewConf, undefin start_listener(Type, Name, NewConf); post_config_update([listeners, Type, Name], {update, _Request}, NewConf, OldConf, _AppEnvs) -> try_clear_ssl_files(certs_dir(Type, Name), NewConf, OldConf), + ok = maybe_unregister_ocsp_stapling_refresh(Type, Name, NewConf), case NewConf of #{enabled := true} -> restart_listener(Type, Name, {OldConf, NewConf}); _ -> ok end; -post_config_update([listeners, _Type, _Name], '$remove', undefined, undefined, _AppEnvs) -> - ok; -post_config_update([listeners, Type, Name], '$remove', undefined, OldConf, _AppEnvs) -> +post_config_update([listeners, Type, Name], Op, _, OldConf, _AppEnvs) when + Op =:= ?MARK_DEL andalso is_map(OldConf) +-> + ok = unregister_ocsp_stapling_refresh(Type, Name), case stop_listener(Type, Name, OldConf) of ok -> _ = emqx_authentication:delete_chain(listener_id(Type, Name)), @@ -460,10 +474,18 @@ post_config_update([listeners, Type, Name], {action, _Action, _}, NewConf, OldCo #{enabled := NewEnabled} = NewConf, #{enabled := OldEnabled} = OldConf, case {NewEnabled, OldEnabled} of - {true, true} -> restart_listener(Type, Name, {OldConf, NewConf}); - {true, false} -> start_listener(Type, Name, NewConf); - {false, true} -> stop_listener(Type, Name, OldConf); - {false, false} -> stop_listener(Type, Name, OldConf) + {true, true} -> + ok = maybe_unregister_ocsp_stapling_refresh(Type, Name, NewConf), + restart_listener(Type, Name, {OldConf, NewConf}); + {true, false} -> + ok = maybe_unregister_ocsp_stapling_refresh(Type, Name, NewConf), + start_listener(Type, Name, NewConf); + {false, true} -> + ok = unregister_ocsp_stapling_refresh(Type, Name), + stop_listener(Type, Name, OldConf); + {false, false} -> + ok = unregister_ocsp_stapling_refresh(Type, Name), + stop_listener(Type, Name, OldConf) end; post_config_update(_Path, _Request, _NewConf, _OldConf, _AppEnvs) -> ok. @@ -472,7 +494,7 @@ esockd_opts(ListenerId, Type, Opts0) -> Opts1 = maps:with([acceptors, max_connections, proxy_protocol, proxy_protocol_timeout], Opts0), Limiter = limiter(Opts0), Opts2 = - case maps:get(connection, Limiter, undefined) of + case emqx_limiter_schema:extract_with_type(connection, Limiter) of undefined -> Opts1; BucketCfg -> @@ -601,6 +623,7 @@ format_bind(Bin) when is_binary(Bin) -> listener_id(Type, ListenerName) -> list_to_atom(lists:append([str(Type), ":", str(ListenerName)])). +-spec parse_listener_id(listener_id()) -> {ok, #{type => atom(), name => atom()}} | {error, term()}. parse_listener_id(Id) -> case string:split(str(Id), ":", leading) of [Type, Name] -> @@ -616,7 +639,7 @@ zone(Opts) -> maps:get(zone, Opts, undefined). limiter(Opts) -> - maps:get(limiter, Opts, #{}). + maps:get(limiter, Opts, undefined). add_limiter_bucket(Id, #{limiter := Limiter}) -> maps:fold( @@ -813,3 +836,22 @@ inject_crl_config( }; inject_crl_config(Conf) -> Conf. + +maybe_unregister_ocsp_stapling_refresh( + ssl = Type, Name, #{ssl_options := #{ocsp := #{enable_ocsp_stapling := false}}} = _Conf +) -> + unregister_ocsp_stapling_refresh(Type, Name), + ok; +maybe_unregister_ocsp_stapling_refresh(_Type, _Name, _Conf) -> + ok. + +unregister_ocsp_stapling_refresh(Type, Name) -> + ListenerId = listener_id(Type, Name), + emqx_ocsp_cache:unregister_listener(ListenerId), + ok. + +%% There is currently an issue with frontend +%% infinity is not a good value for it, so we use 5m for now +default_max_conn() -> + %% TODO: <<"infinity">> + 5_000_000. diff --git a/apps/emqx/src/emqx_mqtt_caps.erl b/apps/emqx/src/emqx_mqtt_caps.erl index 1806ede1d..897bb93c4 100644 --- a/apps/emqx/src/emqx_mqtt_caps.erl +++ b/apps/emqx/src/emqx_mqtt_caps.erl @@ -37,7 +37,6 @@ max_qos_allowed => emqx_types:qos(), retain_available => boolean(), wildcard_subscription => boolean(), - subscription_identifiers => boolean(), shared_subscription => boolean(), exclusive_subscription => boolean() }. @@ -58,18 +57,17 @@ exclusive_subscription ]). --define(DEFAULT_CAPS, #{ - max_packet_size => ?MAX_PACKET_SIZE, - max_clientid_len => ?MAX_CLIENTID_LEN, - max_topic_alias => ?MAX_TOPIC_AlIAS, - max_topic_levels => ?MAX_TOPIC_LEVELS, - max_qos_allowed => ?QOS_2, - retain_available => true, - wildcard_subscription => true, - subscription_identifiers => true, - shared_subscription => true, - exclusive_subscription => false -}). +-define(DEFAULT_CAPS_KEYS, [ + max_packet_size, + max_clientid_len, + max_topic_alias, + max_topic_levels, + max_qos_allowed, + retain_available, + wildcard_subscription, + shared_subscription, + exclusive_subscription +]). -spec check_pub( emqx_types:zone(), @@ -88,7 +86,7 @@ check_pub(Zone, Flags) when is_map(Flags) -> error -> Flags end, - maps:with(?PUBCAP_KEYS, get_caps(Zone)) + get_caps(?PUBCAP_KEYS, Zone) ). do_check_pub(#{topic_levels := Levels}, #{max_topic_levels := Limit}) when @@ -111,7 +109,7 @@ do_check_pub(_Flags, _Caps) -> ) -> ok_or_error(emqx_types:reason_code()). check_sub(ClientInfo = #{zone := Zone}, Topic, SubOpts) -> - Caps = maps:with(?SUBCAP_KEYS, get_caps(Zone)), + Caps = get_caps(?SUBCAP_KEYS, Zone), Flags = lists:foldl( fun (max_topic_levels, Map) -> @@ -152,10 +150,12 @@ do_check_sub(_Flags, _Caps, _, _) -> ok. get_caps(Zone) -> - lists:foldl( - fun({K, V}, Acc) -> - Acc#{K => emqx_config:get_zone_conf(Zone, [mqtt, K], V)} - end, - #{}, - maps:to_list(?DEFAULT_CAPS) + get_caps(?DEFAULT_CAPS_KEYS, Zone). +get_caps(Keys, Zone) -> + maps:with( + Keys, + maps:merge( + emqx_config:get([mqtt]), + emqx_config:get_zone_conf(Zone, [mqtt]) + ) ). diff --git a/apps/emqx/src/emqx_ocsp_cache.erl b/apps/emqx/src/emqx_ocsp_cache.erl index 3bb10ee5c..ef0411b37 100644 --- a/apps/emqx/src/emqx_ocsp_cache.erl +++ b/apps/emqx/src/emqx_ocsp_cache.erl @@ -30,6 +30,7 @@ sni_fun/2, fetch_response/1, register_listener/2, + unregister_listener/1, inject_sni_fun/2 ]). @@ -107,6 +108,9 @@ fetch_response(ListenerID) -> register_listener(ListenerID, Opts) -> gen_server:call(?MODULE, {register_listener, ListenerID, Opts}, ?CALL_TIMEOUT). +unregister_listener(ListenerID) -> + gen_server:cast(?MODULE, {unregister_listener, ListenerID}). + -spec inject_sni_fun(emqx_listeners:listener_id(), map()) -> map(). inject_sni_fun(ListenerID, Conf0) -> SNIFun = emqx_const_v1:make_sni_fun(ListenerID), @@ -160,6 +164,18 @@ handle_call({register_listener, ListenerID, Conf}, _From, State0) -> handle_call(Call, _From, State) -> {reply, {error, {unknown_call, Call}}, State}. +handle_cast({unregister_listener, ListenerID}, State0) -> + State2 = + case maps:take(?REFRESH_TIMER(ListenerID), State0) of + error -> + State0; + {TRef, State1} -> + emqx_utils:cancel_timer(TRef), + State1 + end, + State = maps:remove({refresh_interval, ListenerID}, State2), + ?tp(ocsp_cache_listener_unregistered, #{listener_id => ListenerID}), + {noreply, State}; handle_cast(_Cast, State) -> {noreply, State}. diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl index 540c681b3..188c22d78 100644 --- a/apps/emqx/src/emqx_schema.erl +++ b/apps/emqx/src/emqx_schema.erl @@ -23,6 +23,7 @@ -dialyzer(no_fail_call). -elvis([{elvis_style, invalid_dynamic_call, disable}]). +-include("emqx_schema.hrl"). -include("emqx_authentication.hrl"). -include("emqx_access_control.hrl"). -include_lib("typerefl/include/types.hrl"). @@ -42,7 +43,12 @@ -type ip_port() :: tuple() | integer(). -type cipher() :: map(). -type port_number() :: 1..65536. --type server_parse_option() :: #{default_port => port_number(), no_port => boolean()}. +-type server_parse_option() :: #{ + default_port => port_number(), + no_port => boolean(), + supported_schemes => [string()], + default_scheme => string() +}. -type url() :: binary(). -type json_binary() :: binary(). @@ -61,12 +67,19 @@ -typerefl_from_string({url/0, emqx_schema, to_url}). -typerefl_from_string({json_binary/0, emqx_schema, to_json_binary}). +-type parsed_server() :: #{ + hostname := string(), + port => port_number(), + scheme => string() +}. + -export([ validate_heap_size/1, user_lookup_fun_tr/2, validate_alarm_actions/1, non_empty_string/1, - validations/0 + validations/0, + naive_env_interpolation/1 ]). -export([qos/0]). @@ -99,6 +112,12 @@ convert_servers/2 ]). +%% tombstone types +-export([ + tombstone_map/2, + get_tombstone_map_value_type/1 +]). + -behaviour(hocon_schema). -reflect_type([ @@ -776,41 +795,48 @@ fields("listeners") -> [ {"tcp", sc( - map(name, ref("mqtt_tcp_listener")), + tombstone_map(name, ref("mqtt_tcp_listener")), #{ desc => ?DESC(fields_listeners_tcp), + converter => fun(X, _) -> + ensure_default_listener(X, tcp) + end, required => {false, recursively} } )}, {"ssl", sc( - map(name, ref("mqtt_ssl_listener")), + tombstone_map(name, ref("mqtt_ssl_listener")), #{ desc => ?DESC(fields_listeners_ssl), + converter => fun(X, _) -> ensure_default_listener(X, ssl) end, required => {false, recursively} } )}, {"ws", sc( - map(name, ref("mqtt_ws_listener")), + tombstone_map(name, ref("mqtt_ws_listener")), #{ desc => ?DESC(fields_listeners_ws), + converter => fun(X, _) -> ensure_default_listener(X, ws) end, required => {false, recursively} } )}, {"wss", sc( - map(name, ref("mqtt_wss_listener")), + tombstone_map(name, ref("mqtt_wss_listener")), #{ desc => ?DESC(fields_listeners_wss), + converter => fun(X, _) -> ensure_default_listener(X, wss) end, required => {false, recursively} } )}, {"quic", sc( - map(name, ref("mqtt_quic_listener")), + tombstone_map(name, ref("mqtt_quic_listener")), #{ desc => ?DESC(fields_listeners_quic), + converter => fun keep_default_tombstone/2, required => {false, recursively} } )} @@ -821,7 +847,7 @@ fields("crl_cache") -> %% same URL. If they had diverging timeout options, it would be %% confusing. [ - {"refresh_interval", + {refresh_interval, sc( duration(), #{ @@ -829,7 +855,7 @@ fields("crl_cache") -> desc => ?DESC("crl_cache_refresh_interval") } )}, - {"http_timeout", + {http_timeout, sc( duration(), #{ @@ -837,7 +863,7 @@ fields("crl_cache") -> desc => ?DESC("crl_cache_refresh_http_timeout") } )}, - {"capacity", + {capacity, sc( pos_integer(), #{ @@ -909,15 +935,17 @@ fields("mqtt_quic_listener") -> string(), #{ %% TODO: deprecated => {since, "5.1.0"} - desc => ?DESC(fields_mqtt_quic_listener_certfile) + desc => ?DESC(fields_mqtt_quic_listener_certfile), + importance => ?IMPORTANCE_HIDDEN } )}, {"keyfile", sc( string(), - %% TODO: deprecated => {since, "5.1.0"} #{ - desc => ?DESC(fields_mqtt_quic_listener_keyfile) + %% TODO: deprecated => {since, "5.1.0"} + desc => ?DESC(fields_mqtt_quic_listener_keyfile), + importance => ?IMPORTANCE_HIDDEN } )}, {"ciphers", ciphers_schema(quic)}, @@ -993,7 +1021,10 @@ fields("mqtt_quic_listener") -> duration_ms(), #{ default => 0, - desc => ?DESC(fields_mqtt_quic_listener_idle_timeout) + desc => ?DESC(fields_mqtt_quic_listener_idle_timeout), + %% TODO: deprecated => {since, "5.1.0"} + %% deprecated, use idle_timeout_ms instead + importance => ?IMPORTANCE_HIDDEN } )}, {"idle_timeout_ms", @@ -1007,7 +1038,10 @@ fields("mqtt_quic_listener") -> duration_ms(), #{ default => <<"10s">>, - desc => ?DESC(fields_mqtt_quic_listener_handshake_idle_timeout) + desc => ?DESC(fields_mqtt_quic_listener_handshake_idle_timeout), + %% TODO: deprecated => {since, "5.1.0"} + %% use handshake_idle_timeout_ms + importance => ?IMPORTANCE_HIDDEN } )}, {"handshake_idle_timeout_ms", @@ -1021,7 +1055,10 @@ fields("mqtt_quic_listener") -> duration_ms(), #{ default => 0, - desc => ?DESC(fields_mqtt_quic_listener_keep_alive_interval) + desc => ?DESC(fields_mqtt_quic_listener_keep_alive_interval), + %% TODO: deprecated => {since, "5.1.0"} + %% use keep_alive_interval_ms instead + importance => ?IMPORTANCE_HIDDEN } )}, {"keep_alive_interval_ms", @@ -1354,7 +1391,7 @@ fields("ssl_client_opts") -> client_ssl_opts_schema(#{}); fields("ocsp") -> [ - {"enable_ocsp_stapling", + {enable_ocsp_stapling, sc( boolean(), #{ @@ -1362,7 +1399,7 @@ fields("ocsp") -> desc => ?DESC("server_ssl_opts_schema_enable_ocsp_stapling") } )}, - {"responder_url", + {responder_url, sc( url(), #{ @@ -1370,7 +1407,7 @@ fields("ocsp") -> desc => ?DESC("server_ssl_opts_schema_ocsp_responder_url") } )}, - {"issuer_pem", + {issuer_pem, sc( binary(), #{ @@ -1378,7 +1415,7 @@ fields("ocsp") -> desc => ?DESC("server_ssl_opts_schema_ocsp_issuer_pem") } )}, - {"refresh_interval", + {refresh_interval, sc( duration(), #{ @@ -1386,7 +1423,7 @@ fields("ocsp") -> desc => ?DESC("server_ssl_opts_schema_ocsp_refresh_interval") } )}, - {"refresh_http_timeout", + {refresh_http_timeout, sc( duration(), #{ @@ -1489,10 +1526,8 @@ fields("broker") -> sc( boolean(), #{ - %% TODO: deprecated => {since, "5.1.0"} - %% in favor of session message re-dispatch at termination - %% we will stop supporting dispatch acks for shared - %% subscriptions. + deprecated => {since, "5.1.0"}, + importance => ?IMPORTANCE_HIDDEN, default => false, desc => ?DESC(broker_shared_dispatch_ack_enabled) } @@ -1938,7 +1973,7 @@ base_listener(Bind) -> sc( hoconsc:union([infinity, pos_integer()]), #{ - default => <<"infinity">>, + default => emqx_listeners:default_max_conn(), desc => ?DESC(base_listener_max_connections) } )}, @@ -2314,12 +2349,12 @@ server_ssl_opts_schema(Defaults, IsRanchListener) -> Field || not IsRanchListener, Field <- [ - {"gc_after_handshake", + {gc_after_handshake, sc(boolean(), #{ default => false, desc => ?DESC(server_ssl_opts_schema_gc_after_handshake) })}, - {"ocsp", + {ocsp, sc( ref("ocsp"), #{ @@ -2327,7 +2362,7 @@ server_ssl_opts_schema(Defaults, IsRanchListener) -> validator => fun ocsp_inner_validator/1 } )}, - {"enable_crl_check", + {enable_crl_check, sc( boolean(), #{ @@ -2790,6 +2825,7 @@ authentication(Which) -> hoconsc:mk(Type, #{ desc => Desc, converter => fun ensure_array/2, + default => [], importance => Importance }). @@ -2898,7 +2934,7 @@ servers_validator(Opts, Required) -> %% `no_port': by default it's `false', when set to `true', %% a `throw' exception is raised if the port is found. -spec parse_server(undefined | string() | binary(), server_parse_option()) -> - {string(), port_number()}. + undefined | parsed_server(). parse_server(Str, Opts) -> case parse_servers(Str, Opts) of undefined -> @@ -2912,7 +2948,7 @@ parse_server(Str, Opts) -> %% @doc Parse comma separated `host[:port][,host[:port]]' endpoints %% into a list of `{Host, Port}' tuples or just `Host' string. -spec parse_servers(undefined | string() | binary(), server_parse_option()) -> - [{string(), port_number()}]. + undefined | [parsed_server()]. parse_servers(undefined, _Opts) -> %% should not parse 'undefined' as string, %% not to throw exception either, @@ -2958,6 +2994,9 @@ split_host_port(Str) -> do_parse_server(Str, Opts) -> DefaultPort = maps:get(default_port, Opts, undefined), NotExpectingPort = maps:get(no_port, Opts, false), + DefaultScheme = maps:get(default_scheme, Opts, undefined), + SupportedSchemes = maps:get(supported_schemes, Opts, []), + NotExpectingScheme = (not is_list(DefaultScheme)) andalso length(SupportedSchemes) =:= 0, case is_integer(DefaultPort) andalso NotExpectingPort of true -> %% either provide a default port from schema, @@ -2966,22 +3005,129 @@ do_parse_server(Str, Opts) -> false -> ok end, + case is_list(DefaultScheme) andalso (not lists:member(DefaultScheme, SupportedSchemes)) of + true -> + %% inconsistent schema + error("bad_schema"); + false -> + ok + end, %% do not split with space, there should be no space allowed between host and port - case string:tokens(Str, ":") of - [Hostname, Port] -> - NotExpectingPort andalso throw("not_expecting_port_number"), - {check_hostname(Hostname), parse_port(Port)}; - [Hostname] -> - case is_integer(DefaultPort) of - true -> - {check_hostname(Hostname), DefaultPort}; - false when NotExpectingPort -> - check_hostname(Hostname); - false -> - throw("missing_port_number") - end; - _ -> - throw("bad_host_port") + Tokens = string:tokens(Str, ":"), + Context = #{ + not_expecting_port => NotExpectingPort, + not_expecting_scheme => NotExpectingScheme, + default_port => DefaultPort, + default_scheme => DefaultScheme, + opts => Opts + }, + check_server_parts(Tokens, Context). + +check_server_parts([Scheme, "//" ++ Hostname, Port], Context) -> + #{ + not_expecting_scheme := NotExpectingScheme, + not_expecting_port := NotExpectingPort, + opts := Opts + } = Context, + NotExpectingPort andalso throw("not_expecting_port_number"), + NotExpectingScheme andalso throw("not_expecting_scheme"), + #{ + scheme => check_scheme(Scheme, Opts), + hostname => check_hostname(Hostname), + port => parse_port(Port) + }; +check_server_parts([Scheme, "//" ++ Hostname], Context) -> + #{ + not_expecting_scheme := NotExpectingScheme, + not_expecting_port := NotExpectingPort, + default_port := DefaultPort, + opts := Opts + } = Context, + NotExpectingScheme andalso throw("not_expecting_scheme"), + case is_integer(DefaultPort) of + true -> + #{ + scheme => check_scheme(Scheme, Opts), + hostname => check_hostname(Hostname), + port => DefaultPort + }; + false when NotExpectingPort -> + #{ + scheme => check_scheme(Scheme, Opts), + hostname => check_hostname(Hostname) + }; + false -> + throw("missing_port_number") + end; +check_server_parts([Hostname, Port], Context) -> + #{ + not_expecting_port := NotExpectingPort, + default_scheme := DefaultScheme + } = Context, + NotExpectingPort andalso throw("not_expecting_port_number"), + case is_list(DefaultScheme) of + false -> + #{ + hostname => check_hostname(Hostname), + port => parse_port(Port) + }; + true -> + #{ + scheme => DefaultScheme, + hostname => check_hostname(Hostname), + port => parse_port(Port) + } + end; +check_server_parts([Hostname], Context) -> + #{ + not_expecting_scheme := NotExpectingScheme, + not_expecting_port := NotExpectingPort, + default_port := DefaultPort, + default_scheme := DefaultScheme + } = Context, + case is_integer(DefaultPort) orelse NotExpectingPort of + true -> + ok; + false -> + throw("missing_port_number") + end, + case is_list(DefaultScheme) orelse NotExpectingScheme of + true -> + ok; + false -> + throw("missing_scheme") + end, + case {is_integer(DefaultPort), is_list(DefaultScheme)} of + {true, true} -> + #{ + scheme => DefaultScheme, + hostname => check_hostname(Hostname), + port => DefaultPort + }; + {true, false} -> + #{ + hostname => check_hostname(Hostname), + port => DefaultPort + }; + {false, true} -> + #{ + scheme => DefaultScheme, + hostname => check_hostname(Hostname) + }; + {false, false} -> + #{hostname => check_hostname(Hostname)} + end; +check_server_parts(_Tokens, _Context) -> + throw("bad_host_port"). + +check_scheme(Str, Opts) -> + SupportedSchemes = maps:get(supported_schemes, Opts, []), + IsSupported = lists:member(Str, SupportedSchemes), + case IsSupported of + true -> + Str; + false -> + throw("unsupported_scheme") end. check_hostname(Str) -> @@ -3084,3 +3230,138 @@ assert_required_field(Conf, Key, ErrorMessage) -> _ -> ok end. + +default_listener(tcp) -> + #{ + <<"bind">> => <<"0.0.0.0:1883">> + }; +default_listener(ws) -> + #{ + <<"bind">> => <<"0.0.0.0:8083">>, + <<"websocket">> => #{<<"mqtt_path">> => <<"/mqtt">>} + }; +default_listener(SSLListener) -> + %% The env variable is resolved in emqx_tls_lib by calling naive_env_interpolate + CertFile = fun(Name) -> + iolist_to_binary("${EMQX_ETC_DIR}/" ++ filename:join(["certs", Name])) + end, + SslOptions = #{ + <<"cacertfile">> => CertFile(<<"cacert.pem">>), + <<"certfile">> => CertFile(<<"cert.pem">>), + <<"keyfile">> => CertFile(<<"key.pem">>) + }, + case SSLListener of + ssl -> + #{ + <<"bind">> => <<"0.0.0.0:8883">>, + <<"ssl_options">> => SslOptions + }; + wss -> + #{ + <<"bind">> => <<"0.0.0.0:8084">>, + <<"ssl_options">> => SslOptions, + <<"websocket">> => #{<<"mqtt_path">> => <<"/mqtt">>} + } + end. + +%% @doc This function helps to perform a naive string interpolation which +%% only looks at the first segment of the string and tries to replace it. +%% For example +%% "$MY_FILE_PATH" +%% "${MY_FILE_PATH}" +%% "$ENV_VARIABLE/sub/path" +%% "${ENV_VARIABLE}/sub/path" +%% "${ENV_VARIABLE}\sub\path" # windows +%% This function returns undefined if the input is undefined +%% otherwise always return string. +naive_env_interpolation(undefined) -> + undefined; +naive_env_interpolation(Bin) when is_binary(Bin) -> + naive_env_interpolation(unicode:characters_to_list(Bin, utf8)); +naive_env_interpolation("$" ++ Maybe = Original) -> + {Env, Tail} = split_path(Maybe), + case resolve_env(Env) of + {ok, Path} -> + filename:join([Path, Tail]); + error -> + Original + end; +naive_env_interpolation(Other) -> + Other. + +split_path(Path) -> + split_path(Path, []). + +split_path([], Acc) -> + {lists:reverse(Acc), []}; +split_path([Char | Rest], Acc) when Char =:= $/ orelse Char =:= $\\ -> + {lists:reverse(Acc), string:trim(Rest, leading, "/\\")}; +split_path([Char | Rest], Acc) -> + split_path(Rest, [Char | Acc]). + +resolve_env(Name0) -> + Name = string:trim(Name0, both, "{}"), + Value = os:getenv(Name), + case Value =/= false andalso Value =/= "" of + true -> + {ok, Value}; + false -> + special_env(Name) + end. + +-ifdef(TEST). +%% when running tests, we need to mock the env variables +special_env("EMQX_ETC_DIR") -> + {ok, filename:join([code:lib_dir(emqx), etc])}; +special_env("EMQX_LOG_DIR") -> + {ok, "log"}; +special_env(_Name) -> + %% only in tests + error. +-else. +special_env(_Name) -> error. +-endif. + +%% The tombstone atom. +tombstone() -> + ?TOMBSTONE_TYPE. + +%% Make a map type, the value of which is allowed to be 'marked_for_deletion' +%% 'marked_for_delition' is a special value which means the key is deleted. +%% This is used to support the 'delete' operation in configs, +%% since deleting the key would result in default value being used. +tombstone_map(Name, Type) -> + %% marked_for_deletion must be the last member of the union + %% because we need to first union member to populate the default values + map(Name, ?UNION([Type, ?TOMBSTONE_TYPE])). + +%% inverse of mark_del_map +get_tombstone_map_value_type(Schema) -> + %% TODO: violation of abstraction, expose an API in hoconsc + %% hoconsc:map_value_type(Schema) + ?MAP(_Name, Union) = hocon_schema:field_schema(Schema, type), + %% TODO: violation of abstraction, fix hoconsc:union_members/1 + ?UNION(Members) = Union, + Tombstone = tombstone(), + [Type, Tombstone] = hoconsc:union_members(Members), + Type. + +%% Keep the 'default' tombstone, but delete others. +keep_default_tombstone(Map, _Opts) when is_map(Map) -> + maps:filter( + fun(Key, Value) -> + Key =:= <<"default">> orelse Value =/= ?TOMBSTONE_VALUE + end, + Map + ); +keep_default_tombstone(Value, _Opts) -> + Value. + +ensure_default_listener(undefined, ListenerType) -> + %% let the schema's default value do its job + #{<<"default">> => default_listener(ListenerType)}; +ensure_default_listener(#{<<"default">> := _} = Map, _ListenerType) -> + keep_default_tombstone(Map, #{}); +ensure_default_listener(Map, ListenerType) -> + NewMap = Map#{<<"default">> => default_listener(ListenerType)}, + keep_default_tombstone(NewMap, #{}). diff --git a/apps/emqx/src/emqx_shared_sub.erl b/apps/emqx/src/emqx_shared_sub.erl index d7dc8c5a6..997364898 100644 --- a/apps/emqx/src/emqx_shared_sub.erl +++ b/apps/emqx/src/emqx_shared_sub.erl @@ -165,7 +165,7 @@ strategy(Group) -> -spec ack_enabled() -> boolean(). ack_enabled() -> - emqx:get_config([broker, shared_dispatch_ack_enabled]). + emqx:get_config([broker, shared_dispatch_ack_enabled], false). do_dispatch(SubPid, _Group, Topic, Msg, _Type) when SubPid =:= self() -> %% Deadlock otherwise @@ -181,7 +181,7 @@ do_dispatch(SubPid, _Group, Topic, Msg, retry) -> do_dispatch(SubPid, Group, Topic, Msg, fresh) -> case ack_enabled() of true -> - %% FIXME: replace with `emqx_shared_sub_proto:dispatch_with_ack' in 5.2 + %% TODO: delete this clase after 5.1.0 do_dispatch_with_ack(SubPid, Group, Topic, Msg); false -> send(SubPid, Topic, {deliver, Topic, Msg}) diff --git a/apps/emqx/src/emqx_tls_lib.erl b/apps/emqx/src/emqx_tls_lib.erl index d1c57bf0d..2683d2a9d 100644 --- a/apps/emqx/src/emqx_tls_lib.erl +++ b/apps/emqx/src/emqx_tls_lib.erl @@ -309,19 +309,19 @@ ensure_ssl_files(Dir, SSL, Opts) -> case ensure_ssl_file_key(SSL, RequiredKeys) of ok -> KeyPaths = ?SSL_FILE_OPT_PATHS ++ ?SSL_FILE_OPT_PATHS_A, - ensure_ssl_files(Dir, SSL, KeyPaths, Opts); + ensure_ssl_files_per_key(Dir, SSL, KeyPaths, Opts); {error, _} = Error -> Error end. -ensure_ssl_files(_Dir, SSL, [], _Opts) -> +ensure_ssl_files_per_key(_Dir, SSL, [], _Opts) -> {ok, SSL}; -ensure_ssl_files(Dir, SSL, [KeyPath | KeyPaths], Opts) -> +ensure_ssl_files_per_key(Dir, SSL, [KeyPath | KeyPaths], Opts) -> case ensure_ssl_file(Dir, KeyPath, SSL, emqx_utils_maps:deep_get(KeyPath, SSL, undefined), Opts) of {ok, NewSSL} -> - ensure_ssl_files(Dir, NewSSL, KeyPaths, Opts); + ensure_ssl_files_per_key(Dir, NewSSL, KeyPaths, Opts); {error, Reason} -> {error, Reason#{which_options => [KeyPath]}} end. @@ -472,7 +472,8 @@ hex_str(Bin) -> iolist_to_binary([io_lib:format("~2.16.0b", [X]) || <> <= Bin]). %% @doc Returns 'true' when the file is a valid pem, otherwise {error, Reason}. -is_valid_pem_file(Path) -> +is_valid_pem_file(Path0) -> + Path = resolve_cert_path_for_read(Path0), case file:read_file(Path) of {ok, Pem} -> is_pem(Pem) orelse {error, not_pem}; {error, Reason} -> {error, Reason} @@ -513,10 +514,16 @@ do_drop_invalid_certs([KeyPath | KeyPaths], SSL) -> to_server_opts(Type, Opts) -> Versions = integral_versions(Type, maps:get(versions, Opts, undefined)), Ciphers = integral_ciphers(Versions, maps:get(ciphers, Opts, undefined)), - maps:to_list(Opts#{ - ciphers => Ciphers, - versions => Versions - }). + Path = fun(Key) -> resolve_cert_path_for_read_strict(maps:get(Key, Opts, undefined)) end, + filter( + maps:to_list(Opts#{ + keyfile => Path(keyfile), + certfile => Path(certfile), + cacertfile => Path(cacertfile), + ciphers => Ciphers, + versions => Versions + }) + ). %% @doc Convert hocon-checked tls client options (map()) to %% proplist accepted by ssl library. @@ -530,11 +537,12 @@ to_client_opts(Opts) -> to_client_opts(Type, Opts) -> GetD = fun(Key, Default) -> fuzzy_map_get(Key, Opts, Default) end, Get = fun(Key) -> GetD(Key, undefined) end, + Path = fun(Key) -> resolve_cert_path_for_read_strict(Get(Key)) end, case GetD(enable, false) of true -> - KeyFile = ensure_str(Get(keyfile)), - CertFile = ensure_str(Get(certfile)), - CAFile = ensure_str(Get(cacertfile)), + KeyFile = Path(keyfile), + CertFile = Path(certfile), + CAFile = Path(cacertfile), Verify = GetD(verify, verify_none), SNI = ensure_sni(Get(server_name_indication)), Versions = integral_versions(Type, Get(versions)), @@ -556,6 +564,31 @@ to_client_opts(Type, Opts) -> [] end. +resolve_cert_path_for_read_strict(Path) -> + case resolve_cert_path_for_read(Path) of + undefined -> + undefined; + ResolvedPath -> + case filelib:is_regular(ResolvedPath) of + true -> + ResolvedPath; + false -> + PathToLog = ensure_str(Path), + LogData = + case PathToLog =:= ResolvedPath of + true -> + #{path => PathToLog}; + false -> + #{path => PathToLog, resolved_path => ResolvedPath} + end, + ?SLOG(error, LogData#{msg => "cert_file_not_found"}), + undefined + end + end. + +resolve_cert_path_for_read(Path) -> + emqx_schema:naive_env_interpolation(Path). + filter([]) -> []; filter([{_, undefined} | T]) -> filter(T); filter([{_, ""} | T]) -> filter(T); diff --git a/apps/emqx/src/emqx_trace/emqx_trace_formatter.erl b/apps/emqx/src/emqx_trace/emqx_trace_formatter.erl index c31bc0355..a44237bd0 100644 --- a/apps/emqx/src/emqx_trace/emqx_trace_formatter.erl +++ b/apps/emqx/src/emqx_trace/emqx_trace_formatter.erl @@ -27,7 +27,7 @@ format( #{level := debug, meta := Meta = #{trace_tag := Tag}, msg := Msg}, #{payload_encode := PEncode} ) -> - Time = calendar:system_time_to_rfc3339(erlang:system_time(second)), + Time = calendar:system_time_to_rfc3339(erlang:system_time(microsecond), [{unit, microsecond}]), ClientId = to_iolist(maps:get(clientid, Meta, "")), Peername = maps:get(peername, Meta, ""), MetaBin = format_meta(Meta, PEncode), diff --git a/apps/emqx/src/emqx_ws_connection.erl b/apps/emqx/src/emqx_ws_connection.erl index faf62f98d..00fe545eb 100644 --- a/apps/emqx/src/emqx_ws_connection.erl +++ b/apps/emqx/src/emqx_ws_connection.erl @@ -90,7 +90,7 @@ listener :: {Type :: atom(), Name :: atom()}, %% Limiter - limiter :: maybe(container()), + limiter :: container(), %% cache operation when overload limiter_cache :: queue:queue(cache()), @@ -579,54 +579,61 @@ handle_timeout(TRef, TMsg, State) -> list(any()), state() ) -> state(). +check_limiter( + _Needs, + Data, + WhenOk, + Msgs, + #state{limiter = infinity} = State +) -> + WhenOk(Data, Msgs, State); check_limiter( Needs, Data, WhenOk, Msgs, - #state{ - limiter = Limiter, - limiter_timer = LimiterTimer, - limiter_cache = Cache - } = State + #state{limiter_timer = undefined, limiter = Limiter} = State ) -> - case LimiterTimer of - undefined -> - case emqx_limiter_container:check_list(Needs, Limiter) of - {ok, Limiter2} -> - WhenOk(Data, Msgs, State#state{limiter = Limiter2}); - {pause, Time, Limiter2} -> - ?SLOG(debug, #{ - msg => "pause_time_due_to_rate_limit", - needs => Needs, - time_in_ms => Time - }), + case emqx_limiter_container:check_list(Needs, Limiter) of + {ok, Limiter2} -> + WhenOk(Data, Msgs, State#state{limiter = Limiter2}); + {pause, Time, Limiter2} -> + ?SLOG(debug, #{ + msg => "pause_time_due_to_rate_limit", + needs => Needs, + time_in_ms => Time + }), - Retry = #retry{ - types = [Type || {_, Type} <- Needs], - data = Data, - next = WhenOk - }, + Retry = #retry{ + types = [Type || {_, Type} <- Needs], + data = Data, + next = WhenOk + }, - Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2), + Limiter3 = emqx_limiter_container:set_retry_context(Retry, Limiter2), - TRef = start_timer(Time, limit_timeout), + TRef = start_timer(Time, limit_timeout), - enqueue( - {active, false}, - State#state{ - sockstate = blocked, - limiter = Limiter3, - limiter_timer = TRef - } - ); - {drop, Limiter2} -> - {ok, State#state{limiter = Limiter2}} - end; - _ -> - New = #cache{need = Needs, data = Data, next = WhenOk}, - State#state{limiter_cache = queue:in(New, Cache)} - end. + enqueue( + {active, false}, + State#state{ + sockstate = blocked, + limiter = Limiter3, + limiter_timer = TRef + } + ); + {drop, Limiter2} -> + {ok, State#state{limiter = Limiter2}} + end; +check_limiter( + Needs, + Data, + WhenOk, + _Msgs, + #state{limiter_cache = Cache} = State +) -> + New = #cache{need = Needs, data = Data, next = WhenOk}, + State#state{limiter_cache = queue:in(New, Cache)}. -spec retry_limiter(state()) -> state(). retry_limiter(#state{limiter = Limiter} = State) -> diff --git a/apps/emqx/test/emqx_banned_SUITE.erl b/apps/emqx/test/emqx_banned_SUITE.erl index 0c14f64c9..9419ba4c3 100644 --- a/apps/emqx/test/emqx_banned_SUITE.erl +++ b/apps/emqx/test/emqx_banned_SUITE.erl @@ -186,7 +186,7 @@ t_session_taken(_) -> false end end, - 6000 + 15_000 ), Publish(), diff --git a/apps/emqx/test/emqx_channel_SUITE.erl b/apps/emqx/test/emqx_channel_SUITE.erl index 94a77d2cd..6c4c9e640 100644 --- a/apps/emqx/test/emqx_channel_SUITE.erl +++ b/apps/emqx/test/emqx_channel_SUITE.erl @@ -267,13 +267,14 @@ t_chan_info(_) -> t_chan_caps(_) -> ?assertMatch( #{ + exclusive_subscription := false, + max_packet_size := 1048576, max_clientid_len := 65535, max_qos_allowed := 2, max_topic_alias := 65535, max_topic_levels := Level, retain_available := true, shared_subscription := true, - subscription_identifiers := true, wildcard_subscription := true } when is_integer(Level), emqx_channel:caps(channel()) diff --git a/apps/emqx/test/emqx_common_test_helpers.erl b/apps/emqx/test/emqx_common_test_helpers.erl index 3aad9c6cc..18af99343 100644 --- a/apps/emqx/test/emqx_common_test_helpers.erl +++ b/apps/emqx/test/emqx_common_test_helpers.erl @@ -32,6 +32,7 @@ start_apps/3, start_app/2, stop_apps/1, + stop_apps/2, reload/2, app_path/2, proj_root/0, @@ -55,12 +56,12 @@ is_tcp_server_available/2, is_tcp_server_available/3, load_config/2, - load_config/3, not_wait_mqtt_payload/1, read_schema_configs/2, render_config_file/2, wait_for/4, - wait_mqtt_payload/1 + wait_mqtt_payload/1, + select_free_port/1 ]). -export([ @@ -253,11 +254,20 @@ start_app(App, SpecAppConfig, Opts) -> case application:ensure_all_started(App) of {ok, _} -> ok = ensure_dashboard_listeners_started(App), + ok = wait_for_app_processes(App), ok; {error, Reason} -> error({failed_to_start_app, App, Reason}) end. +wait_for_app_processes(emqx_conf) -> + %% emqx_conf app has a gen_server which + %% initializes its state asynchronously + gen_server:call(emqx_cluster_rpc, dummy), + ok; +wait_for_app_processes(_) -> + ok. + app_conf_file(emqx_conf) -> "emqx.conf.all"; app_conf_file(App) -> atom_to_list(App) ++ ".conf". @@ -274,9 +284,9 @@ app_schema(App) -> mustache_vars(App, Opts) -> ExtraMustacheVars = maps:get(extra_mustache_vars, Opts, #{}), Defaults = #{ + node_cookie => atom_to_list(erlang:get_cookie()), platform_data_dir => app_path(App, "data"), - platform_etc_dir => app_path(App, "etc"), - platform_log_dir => app_path(App, "log") + platform_etc_dir => app_path(App, "etc") }, maps:merge(Defaults, ExtraMustacheVars). @@ -304,12 +314,21 @@ generate_config(SchemaModule, ConfigFile) when is_atom(SchemaModule) -> -spec stop_apps(list()) -> ok. stop_apps(Apps) -> + stop_apps(Apps, #{}). + +stop_apps(Apps, Opts) -> [application:stop(App) || App <- Apps ++ [emqx, ekka, mria, mnesia]], ok = mria_mnesia:delete_schema(), %% to avoid inter-suite flakiness application:unset_env(emqx, init_config_load_done), persistent_term:erase(?EMQX_AUTHENTICATION_SCHEMA_MODULE_PT_KEY), - emqx_config:erase_schema_mod_and_names(), + case Opts of + #{erase_all_configs := false} -> + %% FIXME: this means inter-suite or inter-test dependencies + ok; + _ -> + emqx_config:erase_all() + end, ok = emqx_config:delete_override_conf_files(), application:unset_env(emqx, local_override_conf_file), application:unset_env(emqx, cluster_override_conf_file), @@ -478,18 +497,14 @@ copy_certs(emqx_conf, Dest0) -> copy_certs(_, _) -> ok. -load_config(SchemaModule, Config, Opts) -> +load_config(SchemaModule, Config) -> ConfigBin = case is_map(Config) of true -> emqx_utils_json:encode(Config); false -> Config end, ok = emqx_config:delete_override_conf_files(), - ok = emqx_config:init_load(SchemaModule, ConfigBin, Opts), - ok. - -load_config(SchemaModule, Config) -> - load_config(SchemaModule, Config, #{raw_with_default => false}). + ok = emqx_config:init_load(SchemaModule, ConfigBin). -spec is_all_tcp_servers_available(Servers) -> Result when Servers :: [{Host, Port}], @@ -665,6 +680,7 @@ start_slave(Name, Opts) when is_map(Opts) -> SlaveMod = maps:get(peer_mod, Opts, ct_slave), Node = node_name(Name), put_peer_mod(Node, SlaveMod), + Cookie = atom_to_list(erlang:get_cookie()), DoStart = fun() -> case SlaveMod of @@ -676,7 +692,11 @@ start_slave(Name, Opts) when is_map(Opts) -> {monitor_master, true}, {init_timeout, 20_000}, {startup_timeout, 20_000}, - {erl_flags, erl_flags()} + {erl_flags, erl_flags()}, + {env, [ + {"HOCON_ENV_OVERRIDE_PREFIX", "EMQX_"}, + {"EMQX_NODE__COOKIE", Cookie} + ]} ] ); slave -> @@ -1241,3 +1261,34 @@ get_or_spawn_janitor() -> on_exit(Fun) -> Janitor = get_or_spawn_janitor(), ok = emqx_test_janitor:push_on_exit_callback(Janitor, Fun). + +%%------------------------------------------------------------------------------- +%% Select a free transport port from the OS +%%------------------------------------------------------------------------------- +%% @doc get unused port from OS +-spec select_free_port(tcp | udp | ssl | quic) -> inets:port_number(). +select_free_port(tcp) -> + select_free_port(gen_tcp, listen); +select_free_port(udp) -> + select_free_port(gen_udp, open); +select_free_port(ssl) -> + select_free_port(tcp); +select_free_port(quic) -> + select_free_port(udp). + +select_free_port(GenModule, Fun) when + GenModule == gen_tcp orelse + GenModule == gen_udp +-> + {ok, S} = GenModule:Fun(0, [{reuseaddr, true}]), + {ok, Port} = inet:port(S), + ok = GenModule:close(S), + case os:type() of + {unix, darwin} -> + %% in MacOS, still get address_in_use after close port + timer:sleep(500); + _ -> + skip + end, + ct:pal("Select free OS port: ~p", [Port]), + Port. diff --git a/apps/emqx/test/emqx_config_SUITE.erl b/apps/emqx/test/emqx_config_SUITE.erl index 7befd7a16..6cabfbfe9 100644 --- a/apps/emqx/test/emqx_config_SUITE.erl +++ b/apps/emqx/test/emqx_config_SUITE.erl @@ -50,7 +50,6 @@ t_fill_default_values(_) -> }, <<"route_batch_clean">> := false, <<"session_locking_strategy">> := quorum, - <<"shared_dispatch_ack_enabled">> := false, <<"shared_subscription_strategy">> := round_robin } }, @@ -59,3 +58,22 @@ t_fill_default_values(_) -> %% ensure JSON compatible _ = emqx_utils_json:encode(WithDefaults), ok. + +t_init_load(_Config) -> + ConfFile = "./test_emqx.conf", + ok = file:write_file(ConfFile, <<"">>), + ExpectRootNames = lists:sort(hocon_schema:root_names(emqx_schema)), + emqx_config:erase_all(), + {ok, DeprecatedFile} = application:get_env(emqx, cluster_override_conf_file), + ?assertEqual(false, filelib:is_regular(DeprecatedFile), DeprecatedFile), + %% Don't has deprecated file + ok = emqx_config:init_load(emqx_schema, [ConfFile]), + ?assertEqual(ExpectRootNames, lists:sort(emqx_config:get_root_names())), + ?assertMatch({ok, #{raw_config := 256}}, emqx:update_config([mqtt, max_topic_levels], 256)), + emqx_config:erase_all(), + %% Has deprecated file + ok = file:write_file(DeprecatedFile, <<"{}">>), + ok = emqx_config:init_load(emqx_schema, [ConfFile]), + ?assertEqual(ExpectRootNames, lists:sort(emqx_config:get_root_names())), + ?assertMatch({ok, #{raw_config := 128}}, emqx:update_config([mqtt, max_topic_levels], 128)), + ok = file:delete(DeprecatedFile). diff --git a/apps/emqx/test/emqx_connection_SUITE.erl b/apps/emqx/test/emqx_connection_SUITE.erl index f24c1c895..0692ec8f5 100644 --- a/apps/emqx/test/emqx_connection_SUITE.erl +++ b/apps/emqx/test/emqx_connection_SUITE.erl @@ -38,8 +38,6 @@ init_per_suite(Config) -> ok = meck:new(emqx_cm, [passthrough, no_history, no_link]), ok = meck:expect(emqx_cm, mark_channel_connected, fun(_) -> ok end), ok = meck:expect(emqx_cm, mark_channel_disconnected, fun(_) -> ok end), - %% Meck Limiter - ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]), %% Meck Pd ok = meck:new(emqx_pd, [passthrough, no_history, no_link]), %% Meck Metrics @@ -67,7 +65,6 @@ end_per_suite(_Config) -> ok = meck:unload(emqx_transport), catch meck:unload(emqx_channel), ok = meck:unload(emqx_cm), - ok = meck:unload(emqx_htb_limiter), ok = meck:unload(emqx_pd), ok = meck:unload(emqx_metrics), ok = meck:unload(emqx_hooks), @@ -421,6 +418,14 @@ t_ensure_rate_limit(_) -> {ok, [], State1} = emqx_connection:check_limiter([], [], WhenOk, [], st(#{limiter => Limiter})), ?assertEqual(Limiter, emqx_connection:info(limiter, State1)), + ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]), + + ok = meck:expect( + emqx_htb_limiter, + make_infinity_limiter, + fun() -> non_infinity end + ), + ok = meck:expect( emqx_htb_limiter, check, @@ -431,10 +436,10 @@ t_ensure_rate_limit(_) -> [], WhenOk, [], - st(#{limiter => Limiter}) + st(#{limiter => init_limiter()}) ), meck:unload(emqx_htb_limiter), - ok = meck:new(emqx_htb_limiter, [passthrough, no_history, no_link]), + ?assertNotEqual(undefined, emqx_connection:info(limiter_timer, State2)). t_activate_socket(_) -> @@ -495,6 +500,7 @@ t_get_conn_info(_) -> end). t_oom_shutdown(init, Config) -> + ok = snabbkaffe:stop(), ok = snabbkaffe:start_trace(), ok = meck:new(emqx_utils, [non_strict, passthrough, no_history, no_link]), meck:expect( @@ -707,7 +713,14 @@ init_limiter() -> limiter_cfg() -> Cfg = bucket_cfg(), - Client = #{ + Client = client_cfg(), + #{bytes => Cfg, messages => Cfg, client => #{bytes => Client, messages => Client}}. + +bucket_cfg() -> + #{rate => infinity, initial => 0, burst => 0}. + +client_cfg() -> + #{ rate => infinity, initial => 0, burst => 0, @@ -715,11 +728,7 @@ limiter_cfg() -> divisible => false, max_retry_time => timer:seconds(5), failure_strategy => force - }, - #{bytes => Cfg, messages => Cfg, client => #{bytes => Client, messages => Client}}. - -bucket_cfg() -> - #{rate => infinity, initial => 0, burst => 0}. + }. add_bucket() -> Cfg = bucket_cfg(), diff --git a/apps/emqx/test/emqx_crl_cache_SUITE.erl b/apps/emqx/test/emqx_crl_cache_SUITE.erl index dd3eb29e7..1b8abb9c3 100644 --- a/apps/emqx/test/emqx_crl_cache_SUITE.erl +++ b/apps/emqx/test/emqx_crl_cache_SUITE.erl @@ -35,6 +35,7 @@ all() -> init_per_suite(Config) -> application:load(emqx), + {ok, _} = application:ensure_all_started(ssl), emqx_config:save_schema_mod_and_names(emqx_schema), emqx_common_test_helpers:boot_modules(all), Config. @@ -328,7 +329,15 @@ drain_msgs() -> clear_crl_cache() -> %% reset the CRL cache + Ref = monitor(process, whereis(ssl_manager)), exit(whereis(ssl_manager), kill), + receive + {'DOWN', Ref, process, _, _} -> + ok + after 1_000 -> + ct:fail("ssl_manager didn't die") + end, + ensure_ssl_manager_alive(), ok. force_cacertfile(Cacertfile) -> @@ -382,7 +391,6 @@ setup_crl_options(Config, #{is_cached := IsCached} = Opts) -> false -> %% ensure cache is empty clear_crl_cache(), - ct:sleep(200), ok end, drain_msgs(), @@ -459,6 +467,13 @@ of_kinds(Trace0, Kinds0) -> Trace0 ). +ensure_ssl_manager_alive() -> + ?retry( + _Sleep0 = 200, + _Attempts0 = 50, + true = is_pid(whereis(ssl_manager)) + ). + %%-------------------------------------------------------------------- %% Test cases %%-------------------------------------------------------------------- diff --git a/apps/emqx/test/emqx_listeners_SUITE.erl b/apps/emqx/test/emqx_listeners_SUITE.erl index 107f3d4e7..fa0713cf0 100644 --- a/apps/emqx/test/emqx_listeners_SUITE.erl +++ b/apps/emqx/test/emqx_listeners_SUITE.erl @@ -47,13 +47,14 @@ init_per_testcase(Case, Config) when Case =:= t_max_conns_tcp; Case =:= t_current_conns_tcp -> catch emqx_config_handler:stop(), + Port = emqx_common_test_helpers:select_free_port(tcp), {ok, _} = emqx_config_handler:start_link(), PrevListeners = emqx_config:get([listeners], #{}), PureListeners = remove_default_limiter(PrevListeners), PureListeners2 = PureListeners#{ tcp => #{ listener_test => #{ - bind => {"127.0.0.1", 9999}, + bind => {"127.0.0.1", Port}, max_connections => 4321, limiter => #{} } @@ -63,19 +64,20 @@ init_per_testcase(Case, Config) when ok = emqx_listeners:start(), [ - {prev_listener_conf, PrevListeners} + {prev_listener_conf, PrevListeners}, + {tcp_port, Port} | Config ]; init_per_testcase(t_wss_conn, Config) -> catch emqx_config_handler:stop(), + Port = emqx_common_test_helpers:select_free_port(ssl), {ok, _} = emqx_config_handler:start_link(), - PrevListeners = emqx_config:get([listeners], #{}), PureListeners = remove_default_limiter(PrevListeners), PureListeners2 = PureListeners#{ wss => #{ listener_test => #{ - bind => {{127, 0, 0, 1}, 9998}, + bind => {{127, 0, 0, 1}, Port}, limiter => #{}, ssl_options => #{ cacertfile => ?CERTS_PATH("cacert.pem"), @@ -89,7 +91,8 @@ init_per_testcase(t_wss_conn, Config) -> ok = emqx_listeners:start(), [ - {prev_listener_conf, PrevListeners} + {prev_listener_conf, PrevListeners}, + {wss_port, Port} | Config ]; init_per_testcase(_, Config) -> @@ -171,20 +174,30 @@ t_restart_listeners_with_hibernate_after_disabled(_Config) -> ok = emqx_listeners:stop(), emqx_config:put([listeners], OldLConf). -t_max_conns_tcp(_) -> +t_max_conns_tcp(Config) -> %% Note: Using a string representation for the bind address like %% "127.0.0.1" does not work - ?assertEqual(4321, emqx_listeners:max_conns('tcp:listener_test', {{127, 0, 0, 1}, 9999})). + ?assertEqual( + 4321, + emqx_listeners:max_conns('tcp:listener_test', {{127, 0, 0, 1}, ?config(tcp_port, Config)}) + ). -t_current_conns_tcp(_) -> - ?assertEqual(0, emqx_listeners:current_conns('tcp:listener_test', {{127, 0, 0, 1}, 9999})). +t_current_conns_tcp(Config) -> + ?assertEqual( + 0, + emqx_listeners:current_conns('tcp:listener_test', { + {127, 0, 0, 1}, ?config(tcp_port, Config) + }) + ). -t_wss_conn(_) -> - {ok, Socket} = ssl:connect({127, 0, 0, 1}, 9998, [{verify, verify_none}], 1000), +t_wss_conn(Config) -> + {ok, Socket} = ssl:connect( + {127, 0, 0, 1}, ?config(wss_port, Config), [{verify, verify_none}], 1000 + ), ok = ssl:close(Socket). t_quic_conn(Config) -> - Port = 24568, + Port = emqx_common_test_helpers:select_free_port(quic), DataDir = ?config(data_dir, Config), SSLOpts = #{ password => ?SERVER_KEY_PASSWORD, @@ -207,7 +220,7 @@ t_quic_conn(Config) -> emqx_listeners:stop_listener(quic, ?FUNCTION_NAME, #{bind => Port}). t_ssl_password_cert(Config) -> - Port = 24568, + Port = emqx_common_test_helpers:select_free_port(ssl), DataDir = ?config(data_dir, Config), SSLOptsPWD = #{ password => ?SERVER_KEY_PASSWORD, @@ -266,8 +279,7 @@ render_config_file() -> mustache_vars() -> [ {platform_data_dir, local_path(["data"])}, - {platform_etc_dir, local_path(["etc"])}, - {platform_log_dir, local_path(["log"])} + {platform_etc_dir, local_path(["etc"])} ]. generate_config() -> diff --git a/apps/emqx/test/emqx_logger_SUITE.erl b/apps/emqx/test/emqx_logger_SUITE.erl index c8ff63c75..e8d7d7a34 100644 --- a/apps/emqx/test/emqx_logger_SUITE.erl +++ b/apps/emqx/test/emqx_logger_SUITE.erl @@ -22,7 +22,6 @@ -include_lib("eunit/include/eunit.hrl"). -define(LOGGER, emqx_logger). --define(a, "a"). -define(SUPPORTED_LEVELS, [emergency, alert, critical, error, warning, notice, info, debug]). all() -> emqx_common_test_helpers:all(?MODULE). diff --git a/apps/emqx/test/emqx_ocsp_cache_SUITE.erl b/apps/emqx/test/emqx_ocsp_cache_SUITE.erl index 15ca29853..75c41b9fb 100644 --- a/apps/emqx/test/emqx_ocsp_cache_SUITE.erl +++ b/apps/emqx/test/emqx_ocsp_cache_SUITE.erl @@ -254,10 +254,15 @@ does_module_exist(Mod) -> end. assert_no_http_get() -> + Timeout = 0, + Error = should_be_cached, + assert_no_http_get(Timeout, Error). + +assert_no_http_get(Timeout, Error) -> receive {http_get, _URL} -> - error(should_be_cached) - after 0 -> + error(Error) + after Timeout -> ok end. @@ -702,7 +707,9 @@ do_t_update_listener(Config) -> %% the API converts that to an internally %% managed file <<"issuer_pem">> => IssuerPem, - <<"responder_url">> => <<"http://localhost:9877">> + <<"responder_url">> => <<"http://localhost:9877">>, + %% for quicker testing; min refresh in tests is 5 s. + <<"refresh_interval">> => <<"5s">> } } }, @@ -739,6 +746,70 @@ do_t_update_listener(Config) -> ) ), assert_http_get(1, 5_000), + + %% Disable OCSP Stapling; the periodic refreshes should stop + RefreshInterval = emqx_config:get([listeners, ssl, default, ssl_options, ocsp, refresh_interval]), + OCSPConfig1 = + #{ + <<"ssl_options">> => + #{ + <<"ocsp">> => + #{ + <<"enable_ocsp_stapling">> => false + } + } + }, + ListenerData3 = emqx_utils_maps:deep_merge(ListenerData2, OCSPConfig1), + {ok, {_, _, ListenerData4}} = update_listener_via_api(ListenerId, ListenerData3), + ?assertMatch( + #{ + <<"ssl_options">> := + #{ + <<"ocsp">> := + #{ + <<"enable_ocsp_stapling">> := false + } + } + }, + ListenerData4 + ), + + assert_no_http_get(2 * RefreshInterval, should_stop_refreshing), + + ok. + +t_double_unregister(_Config) -> + ListenerID = <<"ssl:test_ocsp">>, + Conf = emqx_config:get_listener_conf(ssl, test_ocsp, []), + ?check_trace( + begin + {ok, {ok, _}} = + ?wait_async_action( + emqx_ocsp_cache:register_listener(ListenerID, Conf), + #{?snk_kind := ocsp_http_fetch_and_cache, listener_id := ListenerID}, + 5_000 + ), + assert_http_get(1), + + {ok, {ok, _}} = + ?wait_async_action( + emqx_ocsp_cache:unregister_listener(ListenerID), + #{?snk_kind := ocsp_cache_listener_unregistered, listener_id := ListenerID}, + 5_000 + ), + + %% Should be idempotent and not crash + {ok, {ok, _}} = + ?wait_async_action( + emqx_ocsp_cache:unregister_listener(ListenerID), + #{?snk_kind := ocsp_cache_listener_unregistered, listener_id := ListenerID}, + 5_000 + ), + ok + end, + [] + ), + ok. t_ocsp_responder_error_responses(_Config) -> diff --git a/apps/emqx/test/emqx_quic_multistreams_SUITE.erl b/apps/emqx/test/emqx_quic_multistreams_SUITE.erl index 4afd965bd..b55a28206 100644 --- a/apps/emqx/test/emqx_quic_multistreams_SUITE.erl +++ b/apps/emqx/test/emqx_quic_multistreams_SUITE.erl @@ -2026,18 +2026,7 @@ stop_emqx() -> %% select a random port picked by OS -spec select_port() -> inet:port_number(). select_port() -> - {ok, S} = gen_udp:open(0, [{reuseaddr, true}]), - {ok, {_, Port}} = inet:sockname(S), - gen_udp:close(S), - case os:type() of - {unix, darwin} -> - %% in MacOS, still get address_in_use after close port - timer:sleep(500); - _ -> - skip - end, - ct:pal("select port: ~p", [Port]), - Port. + emqx_common_test_helpers:select_free_port(quic). -spec via_stream({quic, quicer:connection_handle(), quicer:stream_handle()}) -> quicer:stream_handle(). diff --git a/apps/emqx/test/emqx_ratelimiter_SUITE.erl b/apps/emqx/test/emqx_ratelimiter_SUITE.erl index 26048873e..67ed8e6bc 100644 --- a/apps/emqx/test/emqx_ratelimiter_SUITE.erl +++ b/apps/emqx/test/emqx_ratelimiter_SUITE.erl @@ -38,6 +38,7 @@ -define(LOGT(Format, Args), ct:pal("TEST_SUITE: " ++ Format, Args)). -define(RATE(Rate), to_rate(Rate)). -define(NOW, erlang:system_time(millisecond)). +-define(ROOT_COUNTER_IDX, 1). %%-------------------------------------------------------------------- %% Setups @@ -211,11 +212,11 @@ t_infinity_client(_) -> end, with_per_client(Fun, Case). -t_try_restore_agg(_) -> +t_try_restore_with_bucket(_) -> Fun = fun(#{client := Cli} = Bucket) -> Bucket2 = Bucket#{ - rate := 1, - burst := 199, + rate := 100, + burst := 100, initial := 50 }, Cli2 = Cli#{ @@ -394,38 +395,6 @@ t_burst(_) -> Case ). -t_limit_global_with_unlimit_other(_) -> - GlobalMod = fun(#{message_routing := MR} = Cfg) -> - Cfg#{message_routing := MR#{rate := ?RATE("600/1s")}} - end, - - Bucket = fun(#{client := Cli} = Bucket) -> - Bucket2 = Bucket#{ - rate := infinity, - initial := 0, - burst := 0 - }, - Cli2 = Cli#{ - rate := infinity, - burst := 0, - initial := 0 - }, - Bucket2#{client := Cli2} - end, - - Case = fun() -> - C1 = counters:new(1, []), - start_client({b1, Bucket}, ?NOW + 2000, C1, 20), - timer:sleep(2200), - check_average_rate(C1, 2, 600) - end, - - with_global( - GlobalMod, - [{b1, Bucket}], - Case - ). - %%-------------------------------------------------------------------- %% Test Cases container %%-------------------------------------------------------------------- @@ -454,38 +423,6 @@ t_check_container(_) -> end, with_per_client(Cfg, Case). -%%-------------------------------------------------------------------- -%% Test Override -%%-------------------------------------------------------------------- -t_bucket_no_client(_) -> - Rate = ?RATE("1/s"), - GlobalMod = fun(#{client := #{message_routing := MR} = Client} = Cfg) -> - Cfg#{client := Client#{message_routing := MR#{rate := Rate}}} - end, - BucketMod = fun(Bucket) -> - maps:remove(client, Bucket) - end, - Case = fun() -> - Limiter = connect(BucketMod(make_limiter_cfg())), - ?assertMatch(#{rate := Rate}, Limiter) - end, - with_global(GlobalMod, [BucketMod], Case). - -t_bucket_client(_) -> - GlobalRate = ?RATE("1/s"), - BucketRate = ?RATE("10/s"), - GlobalMod = fun(#{client := #{message_routing := MR} = Client} = Cfg) -> - Cfg#{client := Client#{message_routing := MR#{rate := GlobalRate}}} - end, - BucketMod = fun(#{client := Client} = Bucket) -> - Bucket#{client := Client#{rate := BucketRate}} - end, - Case = fun() -> - Limiter = connect(BucketMod(make_limiter_cfg())), - ?assertMatch(#{rate := BucketRate}, Limiter) - end, - with_global(GlobalMod, [BucketMod], Case). - %%-------------------------------------------------------------------- %% Test Cases misc %%-------------------------------------------------------------------- @@ -574,7 +511,7 @@ t_schema_unit(_) -> ?assertEqual({ok, 100 * 1024 * 1024 * 1024}, M:to_capacity("100GB")), ok. -compatibility_for_capacity(_) -> +t_compatibility_for_capacity(_) -> CfgStr = << "" "\n" @@ -594,7 +531,7 @@ compatibility_for_capacity(_) -> parse_and_check(CfgStr) ). -compatibility_for_message_in(_) -> +t_compatibility_for_message_in(_) -> CfgStr = << "" "\n" @@ -614,7 +551,7 @@ compatibility_for_message_in(_) -> parse_and_check(CfgStr) ). -compatibility_for_bytes_in(_) -> +t_compatibility_for_bytes_in(_) -> CfgStr = << "" "\n" @@ -634,6 +571,174 @@ compatibility_for_bytes_in(_) -> parse_and_check(CfgStr) ). +t_extract_with_type(_) -> + IsOnly = fun + (_Key, Cfg) when map_size(Cfg) =/= 1 -> + false; + (Key, Cfg) -> + maps:is_key(Key, Cfg) + end, + Checker = fun + (Type, #{client := Client} = Cfg) -> + Cfg2 = maps:remove(client, Cfg), + IsOnly(Type, Client) andalso + (IsOnly(Type, Cfg2) orelse + map_size(Cfg2) =:= 0); + (Type, Cfg) -> + IsOnly(Type, Cfg) + end, + ?assertEqual(undefined, emqx_limiter_schema:extract_with_type(messages, undefined)), + ?assert( + Checker( + messages, + emqx_limiter_schema:extract_with_type(messages, #{ + messages => #{rate => 1}, bytes => #{rate => 1} + }) + ) + ), + ?assert( + Checker( + messages, + emqx_limiter_schema:extract_with_type(messages, #{ + messages => #{rate => 1}, + bytes => #{rate => 1}, + client => #{messages => #{rate => 2}} + }) + ) + ), + ?assert( + Checker( + messages, + emqx_limiter_schema:extract_with_type(messages, #{ + client => #{messages => #{rate => 2}, bytes => #{rate => 1}} + }) + ) + ). + +%%-------------------------------------------------------------------- +%% Test Cases Create Instance +%%-------------------------------------------------------------------- +t_create_instance_with_infinity_node(_) -> + emqx_limiter_manager:insert_bucket(?FUNCTION_NAME, bytes, ?FUNCTION_NAME), + Cases = make_create_test_data_with_infinity_node(?FUNCTION_NAME), + lists:foreach( + fun({Cfg, Expected}) -> + {ok, Result} = emqx_limiter_server:connect(?FUNCTION_NAME, bytes, Cfg), + IsMatched = + case is_atom(Expected) of + true -> + Result =:= Expected; + _ -> + Expected(Result) + end, + ?assert( + IsMatched, + lists:flatten( + io_lib:format("Got unexpected:~p~n, Cfg:~p~n", [ + Result, Cfg + ]) + ) + ) + end, + Cases + ), + emqx_limiter_manager:delete_bucket(?FUNCTION_NAME, bytes), + ok. + +t_not_exists_instance(_) -> + Cfg = #{bytes => #{rate => 100, burst => 0, initial => 0}}, + ?assertEqual( + {error, invalid_bucket}, + emqx_limiter_server:connect(?FUNCTION_NAME, bytes, Cfg) + ), + + ?assertEqual( + {error, invalid_bucket}, + emqx_limiter_server:connect(?FUNCTION_NAME, not_exists, Cfg) + ), + ok. + +t_create_instance_with_node(_) -> + GlobalMod = fun(#{message_routing := MR} = Cfg) -> + Cfg#{ + message_routing := MR#{rate := ?RATE("200/1s")}, + messages := MR#{rate := ?RATE("200/1s")} + } + end, + + B1 = fun(Bucket) -> + Bucket#{rate := ?RATE("400/1s")} + end, + + B2 = fun(Bucket) -> + Bucket#{rate := infinity} + end, + + IsRefLimiter = fun + ({ok, #{tokens := _}}, _IsRoot) -> + false; + ({ok, #{bucket := #{index := ?ROOT_COUNTER_IDX}}}, true) -> + true; + ({ok, #{bucket := #{index := Index}}}, false) when Index =/= ?ROOT_COUNTER_IDX -> + true; + (Result, _IsRoot) -> + ct:pal("The result is:~p~n", [Result]), + false + end, + + Case = fun() -> + BucketCfg = make_limiter_cfg(), + + ?assert( + IsRefLimiter(emqx_limiter_server:connect(b1, message_routing, B1(BucketCfg)), false) + ), + ?assert( + IsRefLimiter(emqx_limiter_server:connect(b2, message_routing, B2(BucketCfg)), true) + ), + ?assert(IsRefLimiter(emqx_limiter_server:connect(x, messages, undefined), true)), + ?assertNot(IsRefLimiter(emqx_limiter_server:connect(x, bytes, undefined), false)) + end, + + with_global( + GlobalMod, + [{b1, B1}, {b2, B2}], + Case + ), + ok. + +%%-------------------------------------------------------------------- +%% Test Cases emqx_esockd_htb_limiter +%%-------------------------------------------------------------------- +t_create_esockd_htb_limiter(_) -> + Opts = emqx_esockd_htb_limiter:new_create_options(?FUNCTION_NAME, bytes, undefined), + ?assertMatch( + #{module := _, id := ?FUNCTION_NAME, type := bytes, bucket := undefined}, + Opts + ), + + Limiter = emqx_esockd_htb_limiter:create(Opts), + ?assertMatch( + #{module := _, name := bytes, limiter := infinity}, + Limiter + ), + + ?assertEqual(ok, emqx_esockd_htb_limiter:delete(Limiter)), + ok. + +t_esockd_htb_consume(_) -> + ClientCfg = emqx_limiter_schema:default_client_config(), + Cfg = #{client => #{bytes => ClientCfg#{rate := 50, max_retry_time := 0}}}, + Opts = emqx_esockd_htb_limiter:new_create_options(?FUNCTION_NAME, bytes, Cfg), + Limiter = emqx_esockd_htb_limiter:create(Opts), + + C1R = emqx_esockd_htb_limiter:consume(51, Limiter), + ?assertMatch({pause, _Ms, _Limiter2}, C1R), + + timer:sleep(300), + C2R = emqx_esockd_htb_limiter:consume(50, Limiter), + ?assertMatch({ok, _}, C2R), + ok. + %%-------------------------------------------------------------------- %%% Internal functions %%-------------------------------------------------------------------- @@ -877,3 +982,64 @@ apply_modifier(Pairs, #{default := Template}) -> parse_and_check(ConfigString) -> ok = emqx_common_test_helpers:load_config(emqx_schema, ConfigString), emqx:get_config([listeners, tcp, default, limiter]). + +make_create_test_data_with_infinity_node(FakeInstnace) -> + Infinity = emqx_htb_limiter:make_infinity_limiter(), + ClientCfg = emqx_limiter_schema:default_client_config(), + InfinityRef = emqx_limiter_bucket_ref:infinity_bucket(), + MkC = fun(Rate) -> + #{client => #{bytes => ClientCfg#{rate := Rate}}} + end, + MkB = fun(Rate) -> + #{bytes => #{rate => Rate, burst => 0, initial => 0}} + end, + + MkA = fun(Client, Bucket) -> + maps:merge(MkC(Client), MkB(Bucket)) + end, + IsRefLimiter = fun(Expected) -> + fun + (#{tokens := _}) -> false; + (#{bucket := Bucket}) -> Bucket =:= Expected; + (_) -> false + end + end, + + IsTokenLimiter = fun(Expected) -> + fun + (#{tokens := _, bucket := Bucket}) -> Bucket =:= Expected; + (_) -> false + end + end, + + [ + %% default situation, no limiter setting + {undefined, Infinity}, + + %% client = undefined bucket = undefined + {#{}, Infinity}, + %% client = undefined bucket = infinity + {MkB(infinity), Infinity}, + %% client = undefined bucket = other + {MkB(100), IsRefLimiter(FakeInstnace)}, + + %% client = infinity bucket = undefined + {MkC(infinity), Infinity}, + %% client = infinity bucket = infinity + {MkA(infinity, infinity), Infinity}, + + %% client = infinity bucket = other + {MkA(infinity, 100), IsRefLimiter(FakeInstnace)}, + + %% client = other bucket = undefined + {MkC(100), IsTokenLimiter(InfinityRef)}, + + %% client = other bucket = infinity + {MkC(100), IsTokenLimiter(InfinityRef)}, + + %% client = C bucket = B C < B + {MkA(100, 1000), IsTokenLimiter(FakeInstnace)}, + + %% client = C bucket = B C > B + {MkA(1000, 100), IsRefLimiter(FakeInstnace)} + ]. diff --git a/apps/emqx/test/emqx_schema_tests.erl b/apps/emqx/test/emqx_schema_tests.erl index 5176f4fad..81991f26e 100644 --- a/apps/emqx/test/emqx_schema_tests.erl +++ b/apps/emqx/test/emqx_schema_tests.erl @@ -219,112 +219,124 @@ parse_server_test_() -> ?T( "single server, binary, no port", ?assertEqual( - [{"localhost", DefaultPort}], + [#{hostname => "localhost", port => DefaultPort}], Parse(<<"localhost">>) ) ), ?T( "single server, string, no port", ?assertEqual( - [{"localhost", DefaultPort}], + [#{hostname => "localhost", port => DefaultPort}], Parse("localhost") ) ), ?T( "single server, list(string), no port", ?assertEqual( - [{"localhost", DefaultPort}], + [#{hostname => "localhost", port => DefaultPort}], Parse(["localhost"]) ) ), ?T( "single server, list(binary), no port", ?assertEqual( - [{"localhost", DefaultPort}], + [#{hostname => "localhost", port => DefaultPort}], Parse([<<"localhost">>]) ) ), ?T( "single server, binary, with port", ?assertEqual( - [{"localhost", 9999}], + [#{hostname => "localhost", port => 9999}], Parse(<<"localhost:9999">>) ) ), ?T( "single server, list(string), with port", ?assertEqual( - [{"localhost", 9999}], + [#{hostname => "localhost", port => 9999}], Parse(["localhost:9999"]) ) ), ?T( "single server, string, with port", ?assertEqual( - [{"localhost", 9999}], + [#{hostname => "localhost", port => 9999}], Parse("localhost:9999") ) ), ?T( "single server, list(binary), with port", ?assertEqual( - [{"localhost", 9999}], + [#{hostname => "localhost", port => 9999}], Parse([<<"localhost:9999">>]) ) ), ?T( "multiple servers, string, no port", ?assertEqual( - [{"host1", DefaultPort}, {"host2", DefaultPort}], + [ + #{hostname => "host1", port => DefaultPort}, + #{hostname => "host2", port => DefaultPort} + ], Parse("host1, host2") ) ), ?T( "multiple servers, binary, no port", ?assertEqual( - [{"host1", DefaultPort}, {"host2", DefaultPort}], + [ + #{hostname => "host1", port => DefaultPort}, + #{hostname => "host2", port => DefaultPort} + ], Parse(<<"host1, host2,,,">>) ) ), ?T( "multiple servers, list(string), no port", ?assertEqual( - [{"host1", DefaultPort}, {"host2", DefaultPort}], + [ + #{hostname => "host1", port => DefaultPort}, + #{hostname => "host2", port => DefaultPort} + ], Parse(["host1", "host2"]) ) ), ?T( "multiple servers, list(binary), no port", ?assertEqual( - [{"host1", DefaultPort}, {"host2", DefaultPort}], + [ + #{hostname => "host1", port => DefaultPort}, + #{hostname => "host2", port => DefaultPort} + ], Parse([<<"host1">>, <<"host2">>]) ) ), ?T( "multiple servers, string, with port", ?assertEqual( - [{"host1", 1234}, {"host2", 2345}], + [#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}], Parse("host1:1234, host2:2345") ) ), ?T( "multiple servers, binary, with port", ?assertEqual( - [{"host1", 1234}, {"host2", 2345}], + [#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}], Parse(<<"host1:1234, host2:2345, ">>) ) ), ?T( "multiple servers, list(string), with port", ?assertEqual( - [{"host1", 1234}, {"host2", 2345}], + [#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}], Parse([" host1:1234 ", "host2:2345"]) ) ), ?T( "multiple servers, list(binary), with port", ?assertEqual( - [{"host1", 1234}, {"host2", 2345}], + [#{hostname => "host1", port => 1234}, #{hostname => "host2", port => 2345}], Parse([<<"host1:1234">>, <<"host2:2345">>]) ) ), @@ -350,9 +362,9 @@ parse_server_test_() -> ) ), ?T( - "multiple servers wihtout port, mixed list(binary|string)", + "multiple servers without port, mixed list(binary|string)", ?assertEqual( - ["host1", "host2"], + [#{hostname => "host1"}, #{hostname => "host2"}], Parse2([<<"host1">>, "host2"], #{no_port => true}) ) ), @@ -394,14 +406,18 @@ parse_server_test_() -> ?T( "single server map", ?assertEqual( - [{"host1.domain", 1234}], + [#{hostname => "host1.domain", port => 1234}], HoconParse("host1.domain:1234") ) ), ?T( "multiple servers map", ?assertEqual( - [{"host1.domain", 1234}, {"host2.domain", 2345}, {"host3.domain", 3456}], + [ + #{hostname => "host1.domain", port => 1234}, + #{hostname => "host2.domain", port => 2345}, + #{hostname => "host3.domain", port => 3456} + ], HoconParse("host1.domain:1234,host2.domain:2345,host3.domain:3456") ) ), @@ -447,6 +463,171 @@ parse_server_test_() -> "bad_schema", emqx_schema:parse_server("whatever", #{default_port => 10, no_port => true}) ) + ), + ?T( + "scheme, hostname and port", + ?assertEqual( + #{scheme => "pulsar+ssl", hostname => "host", port => 6651}, + emqx_schema:parse_server( + "pulsar+ssl://host:6651", + #{ + default_port => 6650, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "scheme and hostname, default port", + ?assertEqual( + #{scheme => "pulsar", hostname => "host", port => 6650}, + emqx_schema:parse_server( + "pulsar://host", + #{ + default_port => 6650, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "scheme and hostname, no port", + ?assertEqual( + #{scheme => "pulsar", hostname => "host"}, + emqx_schema:parse_server( + "pulsar://host", + #{ + no_port => true, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "scheme and hostname, missing port", + ?assertThrow( + "missing_port_number", + emqx_schema:parse_server( + "pulsar://host", + #{ + no_port => false, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "hostname, default scheme, no default port", + ?assertEqual( + #{scheme => "pulsar", hostname => "host"}, + emqx_schema:parse_server( + "host", + #{ + default_scheme => "pulsar", + no_port => true, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "hostname, default scheme, default port", + ?assertEqual( + #{scheme => "pulsar", hostname => "host", port => 6650}, + emqx_schema:parse_server( + "host", + #{ + default_port => 6650, + default_scheme => "pulsar", + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "just hostname, expecting missing scheme", + ?assertThrow( + "missing_scheme", + emqx_schema:parse_server( + "host", + #{ + no_port => true, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "hostname, default scheme, defined port", + ?assertEqual( + #{scheme => "pulsar", hostname => "host", port => 6651}, + emqx_schema:parse_server( + "host:6651", + #{ + default_port => 6650, + default_scheme => "pulsar", + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "inconsistent scheme opts", + ?assertError( + "bad_schema", + emqx_schema:parse_server( + "pulsar+ssl://host:6651", + #{ + default_port => 6650, + default_scheme => "something", + supported_schemes => ["not", "supported"] + } + ) + ) + ), + ?T( + "hostname, default scheme, defined port", + ?assertEqual( + #{scheme => "pulsar", hostname => "host", port => 6651}, + emqx_schema:parse_server( + "host:6651", + #{ + default_port => 6650, + default_scheme => "pulsar", + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) + ), + ?T( + "unsupported scheme", + ?assertThrow( + "unsupported_scheme", + emqx_schema:parse_server( + "pulsar+quic://host:6651", + #{ + default_port => 6650, + supported_schemes => ["pulsar"] + } + ) + ) + ), + ?T( + "multiple hostnames with schemes (1)", + ?assertEqual( + [ + #{scheme => "pulsar", hostname => "host", port => 6649}, + #{scheme => "pulsar+ssl", hostname => "other.host", port => 6651}, + #{scheme => "pulsar", hostname => "yet.another", port => 6650} + ], + emqx_schema:parse_servers( + "pulsar://host:6649, pulsar+ssl://other.host:6651,pulsar://yet.another", + #{ + default_port => 6650, + supported_schemes => ["pulsar", "pulsar+ssl"] + } + ) + ) ) ]. @@ -513,3 +694,81 @@ url_type_test_() -> typerefl:from_string(emqx_schema:url(), <<"">>) ) ]. + +env_test_() -> + Do = fun emqx_schema:naive_env_interpolation/1, + [ + {"undefined", fun() -> ?assertEqual(undefined, Do(undefined)) end}, + {"full env abs path", + with_env_fn( + "MY_FILE", + "/path/to/my/file", + fun() -> ?assertEqual("/path/to/my/file", Do("$MY_FILE")) end + )}, + {"full env relative path", + with_env_fn( + "MY_FILE", + "path/to/my/file", + fun() -> ?assertEqual("path/to/my/file", Do("${MY_FILE}")) end + )}, + %% we can not test windows style file join though + {"windows style", + with_env_fn( + "MY_FILE", + "path\\to\\my\\file", + fun() -> ?assertEqual("path\\to\\my\\file", Do("$MY_FILE")) end + )}, + {"dir no {}", + with_env_fn( + "MY_DIR", + "/mydir", + fun() -> ?assertEqual("/mydir/foobar", Do(<<"$MY_DIR/foobar">>)) end + )}, + {"dir with {}", + with_env_fn( + "MY_DIR", + "/mydir", + fun() -> ?assertEqual("/mydir/foobar", Do(<<"${MY_DIR}/foobar">>)) end + )}, + %% a trailing / should not cause the sub path to become absolute + {"env dir with trailing /", + with_env_fn( + "MY_DIR", + "/mydir//", + fun() -> ?assertEqual("/mydir/foobar", Do(<<"${MY_DIR}/foobar">>)) end + )}, + {"string dir with doulbe /", + with_env_fn( + "MY_DIR", + "/mydir/", + fun() -> ?assertEqual("/mydir/foobar", Do(<<"${MY_DIR}//foobar">>)) end + )}, + {"env not found", + with_env_fn( + "MY_DIR", + "/mydir/", + fun() -> ?assertEqual("${MY_DIR2}//foobar", Do(<<"${MY_DIR2}//foobar">>)) end + )} + ]. + +with_env_fn(Name, Value, F) -> + fun() -> + with_envs(F, [{Name, Value}]) + end. + +with_envs(Fun, Envs) -> + with_envs(Fun, [], Envs). + +with_envs(Fun, Args, [{_Name, _Value} | _] = Envs) -> + set_envs(Envs), + try + apply(Fun, Args) + after + unset_envs(Envs) + end. + +set_envs([{_Name, _Value} | _] = Envs) -> + lists:map(fun({Name, Value}) -> os:putenv(Name, Value) end, Envs). + +unset_envs([{_Name, _Value} | _] = Envs) -> + lists:map(fun({Name, _}) -> os:unsetenv(Name) end, Envs). diff --git a/apps/emqx/test/emqx_test_janitor.erl b/apps/emqx/test/emqx_test_janitor.erl index c3f82a3e1..041b03fa7 100644 --- a/apps/emqx/test/emqx_test_janitor.erl +++ b/apps/emqx/test/emqx_test_janitor.erl @@ -60,12 +60,12 @@ init(Parent) -> {ok, #{callbacks => [], owner => Parent}}. terminate(_Reason, #{callbacks := Callbacks}) -> - lists:foreach(fun(Fun) -> catch Fun() end, Callbacks). + do_terminate(Callbacks). handle_call({push, Callback}, _From, State = #{callbacks := Callbacks}) -> {reply, ok, State#{callbacks := [Callback | Callbacks]}}; handle_call(terminate, _From, State = #{callbacks := Callbacks}) -> - lists:foreach(fun(Fun) -> catch Fun() end, Callbacks), + do_terminate(Callbacks), {stop, normal, ok, State}; handle_call(_Req, _From, State) -> {reply, error, State}. @@ -77,3 +77,23 @@ handle_info({'EXIT', Parent, _Reason}, State = #{owner := Parent}) -> {stop, normal, State}; handle_info(_Msg, State) -> {noreply, State}. + +%%---------------------------------------------------------------------------------- +%% Internal fns +%%---------------------------------------------------------------------------------- + +do_terminate(Callbacks) -> + lists:foreach( + fun(Fun) -> + try + Fun() + catch + K:E:S -> + ct:pal("error executing callback ~p: ~p", [Fun, {K, E}]), + ct:pal("stacktrace: ~p", [S]), + ok + end + end, + Callbacks + ), + ok. diff --git a/apps/emqx/test/emqx_ws_connection_SUITE.erl b/apps/emqx/test/emqx_ws_connection_SUITE.erl index 1ae23361e..60abe3d3c 100644 --- a/apps/emqx/test/emqx_ws_connection_SUITE.erl +++ b/apps/emqx/test/emqx_ws_connection_SUITE.erl @@ -138,13 +138,13 @@ end_per_testcase(t_ws_non_check_origin, Config) -> del_bucket(), PrevConfig = ?config(prev_config, Config), emqx_config:put_listener_conf(ws, default, [websocket], PrevConfig), - emqx_common_test_helpers:stop_apps([]), + stop_apps(), ok; end_per_testcase(_, Config) -> del_bucket(), PrevConfig = ?config(prev_config, Config), emqx_config:put_listener_conf(ws, default, [websocket], PrevConfig), - emqx_common_test_helpers:stop_apps([]), + stop_apps(), Config. init_per_suite(Config) -> @@ -156,6 +156,10 @@ end_per_suite(_) -> emqx_common_test_helpers:stop_apps([]), ok. +%% FIXME: this is a temp fix to tests share configs. +stop_apps() -> + emqx_common_test_helpers:stop_apps([], #{erase_all_configs => false}). + %%-------------------------------------------------------------------- %% Test Cases %%-------------------------------------------------------------------- @@ -443,7 +447,12 @@ t_websocket_info_deliver(_) -> t_websocket_info_timeout_limiter(_) -> Ref = make_ref(), - LimiterT = init_limiter(), + {ok, Rate} = emqx_limiter_schema:to_rate("50MB"), + LimiterT = init_limiter(#{ + bytes => bucket_cfg(), + messages => bucket_cfg(), + client => #{bytes => client_cfg(Rate)} + }), Next = fun emqx_ws_connection:when_msg_in/3, Limiter = emqx_limiter_container:set_retry_context({retry, [], [], Next}, LimiterT), Event = {timeout, Ref, limit_timeout}, diff --git a/apps/emqx_authn/test/emqx_authn_api_SUITE.erl b/apps/emqx_authn/test/emqx_authn_api_SUITE.erl index c7f718dfc..6d9203c95 100644 --- a/apps/emqx_authn/test/emqx_authn_api_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_api_SUITE.erl @@ -67,7 +67,7 @@ init_per_suite(Config) -> emqx_config:erase(?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_BINARY), _ = application:load(emqx_conf), ok = emqx_mgmt_api_test_util:init_suite( - [emqx_authn] + [emqx_conf, emqx_authn] ), ?AUTHN:delete_chain(?GLOBAL), diff --git a/apps/emqx_authn/test/emqx_authn_enable_flag_SUITE.erl b/apps/emqx_authn/test/emqx_authn_enable_flag_SUITE.erl index 59865ab41..98215e853 100644 --- a/apps/emqx_authn/test/emqx_authn_enable_flag_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_enable_flag_SUITE.erl @@ -42,15 +42,16 @@ init_per_testcase(_Case, Config) -> <<"backend">> => <<"built_in_database">>, <<"user_id_type">> => <<"clientid">> }, - emqx:update_config( + {ok, _} = emqx:update_config( ?PATH, {create_authenticator, ?GLOBAL, AuthnConfig} ), - - emqx_conf:update( - [listeners, tcp, listener_authn_enabled], {create, listener_mqtt_tcp_conf(18830, true)}, #{} + {ok, _} = emqx_conf:update( + [listeners, tcp, listener_authn_enabled], + {create, listener_mqtt_tcp_conf(18830, true)}, + #{} ), - emqx_conf:update( + {ok, _} = emqx_conf:update( [listeners, tcp, listener_authn_disabled], {create, listener_mqtt_tcp_conf(18831, false)}, #{} diff --git a/apps/emqx_authn/test/emqx_authn_jwt_SUITE.erl b/apps/emqx_authn/test/emqx_authn_jwt_SUITE.erl index 94c07ca96..bd18367b6 100644 --- a/apps/emqx_authn/test/emqx_authn_jwt_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_jwt_SUITE.erl @@ -37,7 +37,7 @@ init_per_testcase(_, Config) -> init_per_suite(Config) -> _ = application:load(emqx_conf), - emqx_common_test_helpers:start_apps([emqx_authn]), + emqx_common_test_helpers:start_apps([emqx_conf, emqx_authn]), application:ensure_all_started(emqx_resource), application:ensure_all_started(emqx_connector), Config. diff --git a/apps/emqx_authn/test/emqx_authn_schema_SUITE.erl b/apps/emqx_authn/test/emqx_authn_schema_SUITE.erl index 7a766281b..3afb8e973 100644 --- a/apps/emqx_authn/test/emqx_authn_schema_SUITE.erl +++ b/apps/emqx_authn/test/emqx_authn_schema_SUITE.erl @@ -78,7 +78,8 @@ t_check_schema(_Config) -> ). t_union_member_selector(_) -> - ?assertMatch(#{authentication := undefined}, check(undefined)), + %% default value for authentication + ?assertMatch(#{authentication := []}, check(undefined)), C1 = #{<<"backend">> => <<"built_in_database">>}, ?assertThrow( #{ diff --git a/apps/emqx_authz/etc/emqx_authz.conf b/apps/emqx_authz/etc/emqx_authz.conf index 3bdc180c5..167b12b3f 100644 --- a/apps/emqx_authz/etc/emqx_authz.conf +++ b/apps/emqx_authz/etc/emqx_authz.conf @@ -2,14 +2,4 @@ authorization { deny_action = ignore no_match = allow cache = { enable = true } - sources = [ - { - type = file - enable = true - # This file is immutable to EMQX. - # Once new rules are created from dashboard UI or HTTP API, - # the file 'data/authz/acl.conf' is used instead of this one - path = "{{ platform_etc_dir }}/acl.conf" - } - ] } diff --git a/apps/emqx_authz/src/emqx_authz.app.src b/apps/emqx_authz/src/emqx_authz.app.src index dd658a6aa..dd0325694 100644 --- a/apps/emqx_authz/src/emqx_authz.app.src +++ b/apps/emqx_authz/src/emqx_authz.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_authz, [ {description, "An OTP application"}, - {vsn, "0.1.18"}, + {vsn, "0.1.19"}, {registered, []}, {mod, {emqx_authz_app, []}}, {applications, [ diff --git a/apps/emqx_authz/src/emqx_authz_api_sources.erl b/apps/emqx_authz/src/emqx_authz_api_sources.erl index 2220e8f6e..d332f009f 100644 --- a/apps/emqx_authz/src/emqx_authz_api_sources.erl +++ b/apps/emqx_authz/src/emqx_authz_api_sources.erl @@ -205,7 +205,7 @@ sources(get, _) -> }, AccIn ) -> - case file:read_file(Path) of + case emqx_authz_file:read_file(Path) of {ok, Rules} -> lists:append(AccIn, [ #{ @@ -242,7 +242,7 @@ source(get, #{bindings := #{type := Type}}) -> Type, fun (#{<<"type">> := <<"file">>, <<"enable">> := Enable, <<"path">> := Path}) -> - case file:read_file(Path) of + case emqx_authz_file:read_file(Path) of {ok, Rules} -> {200, #{ type => file, diff --git a/apps/emqx_authz/src/emqx_authz_file.erl b/apps/emqx_authz/src/emqx_authz_file.erl index ede4a9582..54f1775c6 100644 --- a/apps/emqx_authz/src/emqx_authz_file.erl +++ b/apps/emqx_authz/src/emqx_authz_file.erl @@ -32,13 +32,15 @@ create/1, update/1, destroy/1, - authorize/4 + authorize/4, + read_file/1 ]). description() -> "AuthZ with static rules". -create(#{path := Path} = Source) -> +create(#{path := Path0} = Source) -> + Path = filename(Path0), Rules = case file:consult(Path) of {ok, Terms} -> @@ -63,3 +65,9 @@ destroy(_Source) -> ok. authorize(Client, PubSub, Topic, #{annotations := #{rules := Rules}}) -> emqx_authz_rule:matches(Client, PubSub, Topic, Rules). + +read_file(Path) -> + file:read_file(filename(Path)). + +filename(PathMaybeTemplate) -> + emqx_schema:naive_env_interpolation(PathMaybeTemplate). diff --git a/apps/emqx_authz/src/emqx_authz_schema.erl b/apps/emqx_authz/src/emqx_authz_schema.erl index 7aaa68b62..a2a7c6b52 100644 --- a/apps/emqx_authz/src/emqx_authz_schema.erl +++ b/apps/emqx_authz/src/emqx_authz_schema.erl @@ -491,7 +491,7 @@ authz_fields() -> ?HOCON( ?ARRAY(?UNION(UnionMemberSelector)), #{ - default => [], + default => [default_authz()], desc => ?DESC(sources), %% doc_lift is force a root level reference instead of nesting sub-structs extra => #{doc_lift => true}, @@ -501,3 +501,10 @@ authz_fields() -> } )} ]. + +default_authz() -> + #{ + <<"type">> => <<"file">>, + <<"enable">> => true, + <<"path">> => <<"${EMQX_ETC_DIR}/acl.conf">> + }. diff --git a/apps/emqx_bridge/src/emqx_bridge.app.src b/apps/emqx_bridge/src/emqx_bridge.app.src index d6c140fef..e408250be 100644 --- a/apps/emqx_bridge/src/emqx_bridge.app.src +++ b/apps/emqx_bridge/src/emqx_bridge.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_bridge, [ {description, "EMQX bridges"}, - {vsn, "0.1.17"}, + {vsn, "0.1.18"}, {registered, [emqx_bridge_sup]}, {mod, {emqx_bridge_app, []}}, {applications, [ diff --git a/apps/emqx_bridge/src/emqx_bridge.erl b/apps/emqx_bridge/src/emqx_bridge.erl index 08b8222f2..a37b6db3c 100644 --- a/apps/emqx_bridge/src/emqx_bridge.erl +++ b/apps/emqx_bridge/src/emqx_bridge.erl @@ -70,7 +70,9 @@ T == dynamo; T == rocketmq; T == cassandra; - T == sqlserver + T == sqlserver; + T == pulsar_producer; + T == oracle ). load() -> diff --git a/apps/emqx_bridge/src/emqx_bridge_resource.erl b/apps/emqx_bridge/src/emqx_bridge_resource.erl index 1ad024c40..da98b073e 100644 --- a/apps/emqx_bridge/src/emqx_bridge_resource.erl +++ b/apps/emqx_bridge/src/emqx_bridge_resource.erl @@ -340,6 +340,8 @@ parse_confs(Type, Name, Conf) when ?IS_INGRESS_BRIDGE(Type) -> %% to hocon; keeping this as just `kafka' for backwards compatibility. parse_confs(<<"kafka">> = _Type, Name, Conf) -> Conf#{bridge_name => Name}; +parse_confs(<<"pulsar_producer">> = _Type, Name, Conf) -> + Conf#{bridge_name => Name}; parse_confs(_Type, _Name, Conf) -> Conf. diff --git a/apps/emqx_bridge/src/schema/emqx_bridge_schema.erl b/apps/emqx_bridge/src/schema/emqx_bridge_schema.erl index 4b9b7e3fe..f58805b6b 100644 --- a/apps/emqx_bridge/src/schema/emqx_bridge_schema.erl +++ b/apps/emqx_bridge/src/schema/emqx_bridge_schema.erl @@ -230,7 +230,12 @@ webhook_bridge_converter(Conf0, _HoconOpts) -> undefined -> undefined; _ -> - do_convert_webhook_config(Conf1) + maps:map( + fun(_Name, Conf) -> + do_convert_webhook_config(Conf) + end, + Conf1 + ) end. do_convert_webhook_config( diff --git a/apps/emqx_bridge/test/emqx_bridge_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_SUITE.erl index ed4807d12..a8864bf00 100644 --- a/apps/emqx_bridge/test/emqx_bridge_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_SUITE.erl @@ -141,8 +141,7 @@ setup_fake_telemetry_data() -> } } }, - Opts = #{raw_with_default => true}, - ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, Conf, Opts), + ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, Conf), ok = snabbkaffe:start_trace(), Predicate = fun(#{?snk_kind := K}) -> K =:= emqx_bridge_loaded end, diff --git a/apps/emqx_bridge_cassandra/README.md b/apps/emqx_bridge_cassandra/README.md index d26bd2fbb..c5a2609a5 100644 --- a/apps/emqx_bridge_cassandra/README.md +++ b/apps/emqx_bridge_cassandra/README.md @@ -11,6 +11,7 @@ The application is used to connect EMQX and Cassandra. User can create a rule and easily ingest IoT data into Cassandra by leveraging [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + # HTTP APIs diff --git a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.app.src b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.app.src index 58e4a1984..1bde274f3 100644 --- a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.app.src +++ b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_cassandra, [ {description, "EMQX Enterprise Cassandra Bridge"}, - {vsn, "0.1.0"}, + {vsn, "0.1.1"}, {registered, []}, {applications, [kernel, stdlib, ecql]}, {env, []}, diff --git a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_connector.erl b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_connector.erl index cf6ddff9f..a3032a9df 100644 --- a/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_connector.erl +++ b/apps/emqx_bridge_cassandra/src/emqx_bridge_cassandra_connector.erl @@ -92,7 +92,7 @@ callback_mode() -> async_if_possible. on_start( InstId, #{ - servers := Servers, + servers := Servers0, keyspace := Keyspace, username := Username, pool_size := PoolSize, @@ -104,9 +104,16 @@ on_start( connector => InstId, config => emqx_utils:redact(Config) }), + Servers = + lists:map( + fun(#{hostname := Host, port := Port}) -> + {Host, Port} + end, + emqx_schema:parse_servers(Servers0, ?DEFAULT_SERVER_OPTION) + ), Options = [ - {nodes, emqx_schema:parse_servers(Servers, ?DEFAULT_SERVER_OPTION)}, + {nodes, Servers}, {username, Username}, {password, emqx_secret:wrap(maps:get(password, Config, ""))}, {keyspace, Keyspace}, @@ -274,7 +281,7 @@ proc_cql_params(query, SQL, Params, _State) -> exec_cql_query(InstId, PoolName, Type, Async, PreparedKey, Data) when Type == query; Type == prepared_query -> - case ecpool:pick_and_do(PoolName, {?MODULE, Type, [Async, PreparedKey, Data]}, no_handover) of + case exec(PoolName, {?MODULE, Type, [Async, PreparedKey, Data]}) of {error, Reason} = Result -> ?tp( error, @@ -288,7 +295,7 @@ exec_cql_query(InstId, PoolName, Type, Async, PreparedKey, Data) when end. exec_cql_batch_query(InstId, PoolName, Async, CQLs) -> - case ecpool:pick_and_do(PoolName, {?MODULE, batch_query, [Async, CQLs]}, no_handover) of + case exec(PoolName, {?MODULE, batch_query, [Async, CQLs]}) of {error, Reason} = Result -> ?tp( error, @@ -301,6 +308,13 @@ exec_cql_batch_query(InstId, PoolName, Async, CQLs) -> Result end. +%% Pick one of the pool members to do the query. +%% Using 'no_handoever' strategy, +%% meaning the buffer worker does the gen_server call or gen_server cast +%% towards the connection process. +exec(PoolName, Query) -> + ecpool:pick_and_do(PoolName, Query, no_handover). + on_get_status(_InstId, #{pool_name := PoolName} = State) -> case emqx_resource_pool:health_check_workers(PoolName, fun ?MODULE:do_get_status/1) of true -> @@ -339,17 +353,23 @@ do_check_prepares(State = #{pool_name := PoolName, prepare_cql := {error, Prepar query(Conn, sync, CQL, Params) -> ecql:query(Conn, CQL, Params); query(Conn, {async, Callback}, CQL, Params) -> - ecql:async_query(Conn, CQL, Params, one, Callback). + ok = ecql:async_query(Conn, CQL, Params, one, Callback), + %% return the connection pid for buffer worker to monitor + {ok, Conn}. prepared_query(Conn, sync, PreparedKey, Params) -> ecql:execute(Conn, PreparedKey, Params); prepared_query(Conn, {async, Callback}, PreparedKey, Params) -> - ecql:async_execute(Conn, PreparedKey, Params, Callback). + ok = ecql:async_execute(Conn, PreparedKey, Params, Callback), + %% return the connection pid for buffer worker to monitor + {ok, Conn}. batch_query(Conn, sync, Rows) -> ecql:batch(Conn, Rows); batch_query(Conn, {async, Callback}, Rows) -> - ecql:async_batch(Conn, Rows, Callback). + ok = ecql:async_batch(Conn, Rows, Callback), + %% return the connection pid for buffer worker to monitor + {ok, Conn}. %%-------------------------------------------------------------------- %% callbacks for ecpool diff --git a/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl index 7865f0415..cda27f6e4 100644 --- a/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl +++ b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_SUITE.erl @@ -404,7 +404,7 @@ t_setup_via_config_and_publish(Config) -> end, fun(Trace0) -> Trace = ?of_kind(cassandra_connector_query_return, Trace0), - ?assertMatch([#{result := ok}], Trace), + ?assertMatch([#{result := {ok, _Pid}}], Trace), ok end ), @@ -443,7 +443,7 @@ t_setup_via_http_api_and_publish(Config) -> end, fun(Trace0) -> Trace = ?of_kind(cassandra_connector_query_return, Trace0), - ?assertMatch([#{result := ok}], Trace), + ?assertMatch([#{result := {ok, _Pid}}], Trace), ok end ), @@ -604,7 +604,7 @@ t_missing_data(Config) -> fun(Trace0) -> %% 1. ecql driver will return `ok` first in async query Trace = ?of_kind(cassandra_connector_query_return, Trace0), - ?assertMatch([#{result := ok}], Trace), + ?assertMatch([#{result := {ok, _Pid}}], Trace), %% 2. then it will return an error in callback function Trace1 = ?of_kind(handle_async_reply, Trace0), ?assertMatch([#{result := {error, {8704, _}}}], Trace1), diff --git a/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_connector_SUITE.erl b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_connector_SUITE.erl index f419283a8..452db33a7 100644 --- a/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_connector_SUITE.erl +++ b/apps/emqx_bridge_cassandra/test/emqx_bridge_cassandra_connector_SUITE.erl @@ -38,9 +38,14 @@ groups() -> []. cassandra_servers() -> - emqx_schema:parse_servers( - iolist_to_binary([?CASSANDRA_HOST, ":", erlang:integer_to_list(?CASSANDRA_DEFAULT_PORT)]), - #{default_port => ?CASSANDRA_DEFAULT_PORT} + lists:map( + fun(#{hostname := Host, port := Port}) -> + {Host, Port} + end, + emqx_schema:parse_servers( + iolist_to_binary([?CASSANDRA_HOST, ":", erlang:integer_to_list(?CASSANDRA_DEFAULT_PORT)]), + #{default_port => ?CASSANDRA_DEFAULT_PORT} + ) ). init_per_suite(Config) -> diff --git a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.app.src b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.app.src index 86627eb2a..2b3d359d3 100644 --- a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.app.src +++ b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_gcp_pubsub, [ {description, "EMQX Enterprise GCP Pub/Sub Bridge"}, - {vsn, "0.1.0"}, + {vsn, "0.1.1"}, {registered, []}, {applications, [ kernel, diff --git a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_connector.erl b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_connector.erl index a3f0ef36b..be5e56e85 100644 --- a/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_connector.erl +++ b/apps/emqx_bridge_gcp_pubsub/src/emqx_bridge_gcp_pubsub_connector.erl @@ -38,7 +38,6 @@ }. -type state() :: #{ connect_timeout := timer:time(), - instance_id := manager_id(), jwt_worker_id := jwt_worker(), max_retries := non_neg_integer(), payload_template := emqx_plugin_libs_rule:tmpl_token(), @@ -61,9 +60,9 @@ is_buffer_supported() -> false. callback_mode() -> async_if_possible. --spec on_start(manager_id(), config()) -> {ok, state()} | {error, term()}. +-spec on_start(resource_id(), config()) -> {ok, state()} | {error, term()}. on_start( - InstanceId, + ResourceId, #{ connect_timeout := ConnectTimeout, max_retries := MaxRetries, @@ -75,13 +74,13 @@ on_start( ) -> ?SLOG(info, #{ msg => "starting_gcp_pubsub_bridge", - connector => InstanceId, + connector => ResourceId, config => Config }), %% emulating the emulator behavior %% https://cloud.google.com/pubsub/docs/emulator HostPort = os:getenv("PUBSUB_EMULATOR_HOST", "pubsub.googleapis.com:443"), - {Host, Port} = emqx_schema:parse_server(HostPort, #{default_port => 443}), + #{hostname := Host, port := Port} = emqx_schema:parse_server(HostPort, #{default_port => 443}), PoolType = random, Transport = tls, TransportOpts = emqx_tls_lib:to_client_opts(#{enable => true, verify => verify_none}), @@ -100,14 +99,13 @@ on_start( #{ jwt_worker_id := JWTWorkerId, project_id := ProjectId - } = ensure_jwt_worker(InstanceId, Config), + } = ensure_jwt_worker(ResourceId, Config), State = #{ connect_timeout => ConnectTimeout, - instance_id => InstanceId, jwt_worker_id => JWTWorkerId, max_retries => MaxRetries, payload_template => emqx_plugin_libs_rule:preproc_tmpl(PayloadTemplate), - pool_name => InstanceId, + pool_name => ResourceId, project_id => ProjectId, pubsub_topic => PubSubTopic, request_timeout => RequestTimeout @@ -115,39 +113,39 @@ on_start( ?tp( gcp_pubsub_on_start_before_starting_pool, #{ - instance_id => InstanceId, - pool_name => InstanceId, + resource_id => ResourceId, + pool_name => ResourceId, pool_opts => PoolOpts } ), - ?tp(gcp_pubsub_starting_ehttpc_pool, #{pool_name => InstanceId}), - case ehttpc_sup:start_pool(InstanceId, PoolOpts) of + ?tp(gcp_pubsub_starting_ehttpc_pool, #{pool_name => ResourceId}), + case ehttpc_sup:start_pool(ResourceId, PoolOpts) of {ok, _} -> {ok, State}; {error, {already_started, _}} -> - ?tp(gcp_pubsub_ehttpc_pool_already_started, #{pool_name => InstanceId}), + ?tp(gcp_pubsub_ehttpc_pool_already_started, #{pool_name => ResourceId}), {ok, State}; {error, Reason} -> ?tp(gcp_pubsub_ehttpc_pool_start_failure, #{ - pool_name => InstanceId, + pool_name => ResourceId, reason => Reason }), {error, Reason} end. --spec on_stop(manager_id(), state()) -> ok | {error, term()}. +-spec on_stop(resource_id(), state()) -> ok | {error, term()}. on_stop( - InstanceId, - _State = #{jwt_worker_id := JWTWorkerId, pool_name := PoolName} + ResourceId, + _State = #{jwt_worker_id := JWTWorkerId} ) -> - ?tp(gcp_pubsub_stop, #{instance_id => InstanceId, jwt_worker_id => JWTWorkerId}), + ?tp(gcp_pubsub_stop, #{resource_id => ResourceId, jwt_worker_id => JWTWorkerId}), ?SLOG(info, #{ msg => "stopping_gcp_pubsub_bridge", - connector => InstanceId + connector => ResourceId }), emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId), - emqx_connector_jwt:delete_jwt(?JWT_TABLE, InstanceId), - ehttpc_sup:stop_pool(PoolName). + emqx_connector_jwt:delete_jwt(?JWT_TABLE, ResourceId), + ehttpc_sup:stop_pool(ResourceId). -spec on_query( resource_id(), @@ -213,9 +211,9 @@ on_batch_query_async(ResourceId, Requests, ReplyFunAndArgs, State) -> ), do_send_requests_async(State, Requests, ReplyFunAndArgs, ResourceId). --spec on_get_status(manager_id(), state()) -> connected | disconnected. -on_get_status(InstanceId, #{connect_timeout := Timeout, pool_name := PoolName} = State) -> - case do_get_status(InstanceId, PoolName, Timeout) of +-spec on_get_status(resource_id(), state()) -> connected | disconnected. +on_get_status(ResourceId, #{connect_timeout := Timeout} = State) -> + case do_get_status(ResourceId, Timeout) of true -> connected; false -> @@ -230,12 +228,12 @@ on_get_status(InstanceId, #{connect_timeout := Timeout, pool_name := PoolName} = %% Helper fns %%------------------------------------------------------------------------------------------------- --spec ensure_jwt_worker(manager_id(), config()) -> +-spec ensure_jwt_worker(resource_id(), config()) -> #{ jwt_worker_id := jwt_worker(), project_id := binary() }. -ensure_jwt_worker(InstanceId, #{ +ensure_jwt_worker(ResourceId, #{ service_account_json := ServiceAccountJSON }) -> #{ @@ -250,7 +248,7 @@ ensure_jwt_worker(InstanceId, #{ Alg = <<"RS256">>, Config = #{ private_key => PrivateKeyPEM, - resource_id => InstanceId, + resource_id => ResourceId, expiration => ExpirationMS, table => ?JWT_TABLE, iss => ServiceAccountEmail, @@ -260,14 +258,14 @@ ensure_jwt_worker(InstanceId, #{ alg => Alg }, - JWTWorkerId = <<"gcp_pubsub_jwt_worker:", InstanceId/binary>>, + JWTWorkerId = <<"gcp_pubsub_jwt_worker:", ResourceId/binary>>, Worker = case emqx_connector_jwt_sup:ensure_worker_present(JWTWorkerId, Config) of {ok, Worker0} -> Worker0; Error -> ?tp(error, "gcp_pubsub_bridge_jwt_worker_failed_to_start", #{ - connector => InstanceId, + connector => ResourceId, reason => Error }), _ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId), @@ -281,18 +279,18 @@ ensure_jwt_worker(InstanceId, #{ %% produced by the worker. receive {Ref, token_created} -> - ?tp(gcp_pubsub_bridge_jwt_created, #{resource_id => InstanceId}), + ?tp(gcp_pubsub_bridge_jwt_created, #{resource_id => ResourceId}), demonitor(MRef, [flush]), ok; {'DOWN', MRef, process, Worker, Reason} -> ?tp(error, "gcp_pubsub_bridge_jwt_worker_failed_to_start", #{ - connector => InstanceId, + connector => ResourceId, reason => Reason }), _ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId), throw(failed_to_start_jwt_worker) after 10_000 -> - ?tp(warning, "gcp_pubsub_bridge_jwt_timeout", #{connector => InstanceId}), + ?tp(warning, "gcp_pubsub_bridge_jwt_timeout", #{connector => ResourceId}), demonitor(MRef, [flush]), _ = emqx_connector_jwt_sup:ensure_worker_deleted(JWTWorkerId), throw(timeout_creating_jwt) @@ -325,8 +323,8 @@ publish_path( <<"/v1/projects/", ProjectId/binary, "/topics/", PubSubTopic/binary, ":publish">>. -spec get_jwt_authorization_header(resource_id()) -> [{binary(), binary()}]. -get_jwt_authorization_header(InstanceId) -> - case emqx_connector_jwt:lookup_jwt(?JWT_TABLE, InstanceId) of +get_jwt_authorization_header(ResourceId) -> + case emqx_connector_jwt:lookup_jwt(?JWT_TABLE, ResourceId) of %% Since we synchronize the JWT creation during resource start %% (see `on_start/2'), this will be always be populated. {ok, JWT} -> @@ -345,7 +343,6 @@ get_jwt_authorization_header(InstanceId) -> do_send_requests_sync(State, Requests, ResourceId) -> #{ pool_name := PoolName, - instance_id := InstanceId, max_retries := MaxRetries, request_timeout := RequestTimeout } = State, @@ -353,12 +350,11 @@ do_send_requests_sync(State, Requests, ResourceId) -> gcp_pubsub_bridge_do_send_requests, #{ query_mode => sync, - instance_id => InstanceId, resource_id => ResourceId, requests => Requests } ), - Headers = get_jwt_authorization_header(InstanceId), + Headers = get_jwt_authorization_header(ResourceId), Payloads = lists:map( fun({send_message, Selected}) -> @@ -471,19 +467,17 @@ do_send_requests_sync(State, Requests, ResourceId) -> do_send_requests_async(State, Requests, ReplyFunAndArgs, ResourceId) -> #{ pool_name := PoolName, - instance_id := InstanceId, request_timeout := RequestTimeout } = State, ?tp( gcp_pubsub_bridge_do_send_requests, #{ query_mode => async, - instance_id => InstanceId, resource_id => ResourceId, requests => Requests } ), - Headers = get_jwt_authorization_header(InstanceId), + Headers = get_jwt_authorization_header(ResourceId), Payloads = lists:map( fun({send_message, Selected}) -> @@ -541,9 +535,9 @@ reply_delegator(_ResourceId, ReplyFunAndArgs, Result) -> emqx_resource:apply_reply_fun(ReplyFunAndArgs, Result) end. --spec do_get_status(manager_id(), binary(), timer:time()) -> boolean(). -do_get_status(InstanceId, PoolName, Timeout) -> - Workers = [Worker || {_WorkerName, Worker} <- ehttpc:workers(PoolName)], +-spec do_get_status(resource_id(), timer:time()) -> boolean(). +do_get_status(ResourceId, Timeout) -> + Workers = [Worker || {_WorkerName, Worker} <- ehttpc:workers(ResourceId)], DoPerWorker = fun(Worker) -> case ehttpc:health_check(Worker, Timeout) of @@ -552,7 +546,7 @@ do_get_status(InstanceId, PoolName, Timeout) -> {error, Reason} -> ?SLOG(error, #{ msg => "ehttpc_health_check_failed", - instance_id => InstanceId, + connector => ResourceId, reason => Reason, worker => Worker }), diff --git a/apps/emqx_bridge_hstreamdb/README.md b/apps/emqx_bridge_hstreamdb/README.md index 3a7c6b49d..520817e82 100644 --- a/apps/emqx_bridge_hstreamdb/README.md +++ b/apps/emqx_bridge_hstreamdb/README.md @@ -9,6 +9,7 @@ The application is used to connect EMQX and HStreamDB. User can create a rule and easily ingest IoT data into HStreamDB by leveraging [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + # HTTP APIs diff --git a/apps/emqx_bridge_kafka/README.md b/apps/emqx_bridge_kafka/README.md index 72cbeecc6..f1b0d1f9a 100644 --- a/apps/emqx_bridge_kafka/README.md +++ b/apps/emqx_bridge_kafka/README.md @@ -10,10 +10,21 @@ workers from `emqx_resource`. It implements the connection management and interaction without need for a separate connector app, since it's not used by authentication and authorization applications. -## Contributing +# Documentation links + +For more information on Apache Kafka, please see its [official +site](https://kafka.apache.org/). + +# Configurations + +Please see [our official +documentation](https://www.emqx.io/docs/en/v5.0/data-integration/data-bridge-kafka.html) +for more detailed info. + +# Contributing Please see our [contributing.md](../../CONTRIBUTING.md). -## License +# License -See [BSL](./BSL.txt). +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src index a4fbe5673..f01a011d1 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka.app.src @@ -1,6 +1,6 @@ {application, emqx_bridge_kafka, [ {description, "EMQX Enterprise Kafka Bridge"}, - {vsn, "0.1.0"}, + {vsn, "0.1.2"}, {registered, [emqx_bridge_kafka_consumer_sup]}, {applications, [ kernel, diff --git a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_consumer.erl b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_consumer.erl index fdfa3300c..f7958af81 100644 --- a/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_consumer.erl +++ b/apps/emqx_bridge_kafka/src/emqx_bridge_kafka_impl_consumer.erl @@ -114,8 +114,8 @@ callback_mode() -> is_buffer_supported() -> true. --spec on_start(manager_id(), config()) -> {ok, state()}. -on_start(InstanceId, Config) -> +-spec on_start(resource_id(), config()) -> {ok, state()}. +on_start(ResourceId, Config) -> #{ authentication := Auth, bootstrap_hosts := BootstrapHosts0, @@ -133,7 +133,7 @@ on_start(InstanceId, Config) -> BootstrapHosts = emqx_bridge_kafka_impl:hosts(BootstrapHosts0), KafkaType = kafka_consumer, %% Note: this is distinct per node. - ClientID = make_client_id(InstanceId, KafkaType, BridgeName), + ClientID = make_client_id(ResourceId, KafkaType, BridgeName), ClientOpts0 = case Auth of none -> []; @@ -144,26 +144,26 @@ on_start(InstanceId, Config) -> ok -> ?tp( kafka_consumer_client_started, - #{client_id => ClientID, instance_id => InstanceId} + #{client_id => ClientID, resource_id => ResourceId} ), ?SLOG(info, #{ msg => "kafka_consumer_client_started", - instance_id => InstanceId, + resource_id => ResourceId, kafka_hosts => BootstrapHosts }); {error, Reason} -> ?SLOG(error, #{ msg => "failed_to_start_kafka_consumer_client", - instance_id => InstanceId, + resource_id => ResourceId, kafka_hosts => BootstrapHosts, reason => emqx_utils:redact(Reason) }), throw(?CLIENT_DOWN_MESSAGE) end, - start_consumer(Config, InstanceId, ClientID). + start_consumer(Config, ResourceId, ClientID). --spec on_stop(manager_id(), state()) -> ok. -on_stop(_InstanceID, State) -> +-spec on_stop(resource_id(), state()) -> ok. +on_stop(_ResourceID, State) -> #{ subscriber_id := SubscriberId, kafka_client_id := ClientID @@ -172,14 +172,19 @@ on_stop(_InstanceID, State) -> stop_client(ClientID), ok. --spec on_get_status(manager_id(), state()) -> connected | disconnected. -on_get_status(_InstanceID, State) -> +-spec on_get_status(resource_id(), state()) -> connected | disconnected. +on_get_status(_ResourceID, State) -> #{ subscriber_id := SubscriberId, kafka_client_id := ClientID, kafka_topics := KafkaTopics } = State, - do_get_status(State, ClientID, KafkaTopics, SubscriberId). + case do_get_status(ClientID, KafkaTopics, SubscriberId) of + {disconnected, Message} -> + {disconnected, State, Message}; + Res -> + Res + end. %%------------------------------------------------------------------------------------- %% `brod_group_subscriber' API @@ -266,8 +271,8 @@ ensure_consumer_supervisor_started() -> ok end. --spec start_consumer(config(), manager_id(), brod:client_id()) -> {ok, state()}. -start_consumer(Config, InstanceId, ClientID) -> +-spec start_consumer(config(), resource_id(), brod:client_id()) -> {ok, state()}. +start_consumer(Config, ResourceId, ClientID) -> #{ bootstrap_hosts := BootstrapHosts0, bridge_name := BridgeName, @@ -287,7 +292,7 @@ start_consumer(Config, InstanceId, ClientID) -> InitialState = #{ key_encoding_mode => KeyEncodingMode, hookpoint => Hookpoint, - resource_id => emqx_bridge_resource:resource_id(kafka_consumer, BridgeName), + resource_id => ResourceId, topic_mapping => TopicMapping, value_encoding_mode => ValueEncodingMode }, @@ -332,7 +337,7 @@ start_consumer(Config, InstanceId, ClientID) -> {ok, _ConsumerPid} -> ?tp( kafka_consumer_subscriber_started, - #{instance_id => InstanceId, subscriber_id => SubscriberId} + #{resource_id => ResourceId, subscriber_id => SubscriberId} ), {ok, #{ subscriber_id => SubscriberId, @@ -342,7 +347,7 @@ start_consumer(Config, InstanceId, ClientID) -> {error, Reason2} -> ?SLOG(error, #{ msg => "failed_to_start_kafka_consumer", - instance_id => InstanceId, + resource_id => ResourceId, kafka_hosts => emqx_bridge_kafka_impl:hosts(BootstrapHosts0), reason => emqx_utils:redact(Reason2) }), @@ -376,41 +381,41 @@ stop_client(ClientID) -> ), ok. -do_get_status(State, ClientID, [KafkaTopic | RestTopics], SubscriberId) -> +do_get_status(ClientID, [KafkaTopic | RestTopics], SubscriberId) -> case brod:get_partitions_count(ClientID, KafkaTopic) of {ok, NPartitions} -> - case do_get_status1(ClientID, KafkaTopic, SubscriberId, NPartitions) of - connected -> do_get_status(State, ClientID, RestTopics, SubscriberId); + case do_get_topic_status(ClientID, KafkaTopic, SubscriberId, NPartitions) of + connected -> do_get_status(ClientID, RestTopics, SubscriberId); disconnected -> disconnected end; {error, {client_down, Context}} -> case infer_client_error(Context) of auth_error -> Message = "Authentication error. " ++ ?CLIENT_DOWN_MESSAGE, - {disconnected, State, Message}; + {disconnected, Message}; {auth_error, Message0} -> Message = binary_to_list(Message0) ++ "; " ++ ?CLIENT_DOWN_MESSAGE, - {disconnected, State, Message}; + {disconnected, Message}; connection_refused -> Message = "Connection refused. " ++ ?CLIENT_DOWN_MESSAGE, - {disconnected, State, Message}; + {disconnected, Message}; _ -> - {disconnected, State, ?CLIENT_DOWN_MESSAGE} + {disconnected, ?CLIENT_DOWN_MESSAGE} end; {error, leader_not_available} -> Message = "Leader connection not available. Please check the Kafka topic used," " the connection parameters and Kafka cluster health", - {disconnected, State, Message}; + {disconnected, Message}; _ -> disconnected end; -do_get_status(_State, _ClientID, _KafkaTopics = [], _SubscriberId) -> +do_get_status(_ClientID, _KafkaTopics = [], _SubscriberId) -> connected. --spec do_get_status1(brod:client_id(), binary(), subscriber_id(), pos_integer()) -> +-spec do_get_topic_status(brod:client_id(), binary(), subscriber_id(), pos_integer()) -> connected | disconnected. -do_get_status1(ClientID, KafkaTopic, SubscriberId, NPartitions) -> +do_get_topic_status(ClientID, KafkaTopic, SubscriberId, NPartitions) -> Results = lists:map( fun(N) -> @@ -466,19 +471,19 @@ consumer_group_id(BridgeName0) -> BridgeName = to_bin(BridgeName0), <<"emqx-kafka-consumer-", BridgeName/binary>>. --spec is_dry_run(manager_id()) -> boolean(). -is_dry_run(InstanceId) -> - TestIdStart = string:find(InstanceId, ?TEST_ID_PREFIX), +-spec is_dry_run(resource_id()) -> boolean(). +is_dry_run(ResourceId) -> + TestIdStart = string:find(ResourceId, ?TEST_ID_PREFIX), case TestIdStart of nomatch -> false; _ -> - string:equal(TestIdStart, InstanceId) + string:equal(TestIdStart, ResourceId) end. --spec make_client_id(manager_id(), kafka_consumer, atom() | binary()) -> atom(). -make_client_id(InstanceId, KafkaType, KafkaName) -> - case is_dry_run(InstanceId) of +-spec make_client_id(resource_id(), kafka_consumer, atom() | binary()) -> atom(). +make_client_id(ResourceId, KafkaType, KafkaName) -> + case is_dry_run(ResourceId) of false -> ClientID0 = emqx_bridge_kafka_impl:make_client_id(KafkaType, KafkaName), binary_to_atom(ClientID0); diff --git a/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_consumer_SUITE.erl b/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_consumer_SUITE.erl index 08fbf5e15..4f98f33cf 100644 --- a/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_consumer_SUITE.erl +++ b/apps/emqx_bridge_kafka/test/emqx_bridge_kafka_impl_consumer_SUITE.erl @@ -1156,11 +1156,12 @@ t_start_and_consume_ok(Config) -> ), %% Check that the bridge probe API doesn't leak atoms. - ProbeRes = probe_bridge_api(Config), - ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes), + ProbeRes0 = probe_bridge_api(Config), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0), AtomsBefore = erlang:system_info(atom_count), %% Probe again; shouldn't have created more atoms. - ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes), + ProbeRes1 = probe_bridge_api(Config), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1), AtomsAfter = erlang:system_info(atom_count), ?assertEqual(AtomsBefore, AtomsAfter), @@ -1259,11 +1260,12 @@ t_multiple_topic_mappings(Config) -> {ok, _} = snabbkaffe:receive_events(SRef0), %% Check that the bridge probe API doesn't leak atoms. - ProbeRes = probe_bridge_api(Config), - ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes), + ProbeRes0 = probe_bridge_api(Config), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0), AtomsBefore = erlang:system_info(atom_count), %% Probe again; shouldn't have created more atoms. - ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes), + ProbeRes1 = probe_bridge_api(Config), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1), AtomsAfter = erlang:system_info(atom_count), ?assertEqual(AtomsBefore, AtomsAfter), @@ -1473,7 +1475,10 @@ do_t_receive_after_recovery(Config) -> ResourceId = resource_id(Config), ?check_trace( begin - {ok, _} = create_bridge(Config), + {ok, _} = create_bridge( + Config, + #{<<"kafka">> => #{<<"offset_reset_policy">> => <<"earliest">>}} + ), ping_until_healthy(Config, _Period = 1_500, _Timeout0 = 24_000), {ok, connected} = emqx_resource_manager:health_check(ResourceId), %% 0) ensure each partition commits its offset so it can diff --git a/apps/emqx_bridge_matrix/README.md b/apps/emqx_bridge_matrix/README.md index 976120ffe..0d9c4fc4a 100644 --- a/apps/emqx_bridge_matrix/README.md +++ b/apps/emqx_bridge_matrix/README.md @@ -1,12 +1,12 @@ # EMQX MatrixDB Bridge -[MatrixDB](http://matrixdb.univ-lyon1.fr/) is a biological database focused on -molecular interactions between extracellular proteins and polysaccharides. +[YMatrix](https://www.ymatrix.cn/) is a hyper-converged database product developed by YMatrix based on the PostgreSQL / Greenplum classic open source database. In addition to being able to handle time series scenarios with ease, it also supports classic scenarios such as online transaction processing (OLTP) and online analytical processing (OLAP). The application is used to connect EMQX and MatrixDB. User can create a rule and easily ingest IoT data into MatrixDB by leveraging [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + # HTTP APIs diff --git a/apps/emqx_bridge_mysql/README.md b/apps/emqx_bridge_mysql/README.md index 73f6987b6..d7c9b5647 100644 --- a/apps/emqx_bridge_mysql/README.md +++ b/apps/emqx_bridge_mysql/README.md @@ -1,6 +1,6 @@ # EMQX MySQL Bridge -[MySQL](https://github.com/MySQL/MySQL) is a popular open-source relational database +[MySQL](https://github.com/mysql/mysql-server) is a popular open-source relational database management system. The application is used to connect EMQX and MySQL. diff --git a/apps/emqx_bridge_opents/BSL.txt b/apps/emqx_bridge_opents/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_opents/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_opents/README.md b/apps/emqx_bridge_opents/README.md new file mode 100644 index 000000000..a1d6511ee --- /dev/null +++ b/apps/emqx_bridge_opents/README.md @@ -0,0 +1,36 @@ +# EMQX OpenTSDB Bridge + +[OpenTSDB](http://opentsdb.net) is a distributed, scalable Time Series Database (TSDB) written on top of HBase. + +OpenTSDB was written to address a common need: store, index and serve metrics collected from computer systems (network gear, operating systems, applications) at a large scale, and make this data easily accessible and graphable. + +OpenTSDB allows you to collect thousands of metrics from tens of thousands of hosts and applications, at a high rate (every few seconds). + +OpenTSDB will never delete or downsample data and can easily store hundreds of billions of data points. + +The application is used to connect EMQX and OpenTSDB. User can create a rule and easily ingest IoT data into OpenTSDB by leveraging the +[EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + + +# Documentation + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information. + + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_opents/docker-ct b/apps/emqx_bridge_opents/docker-ct new file mode 100644 index 000000000..fc68b978e --- /dev/null +++ b/apps/emqx_bridge_opents/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +opents diff --git a/apps/emqx_bridge_opents/rebar.config b/apps/emqx_bridge_opents/rebar.config new file mode 100644 index 000000000..d7bd4560f --- /dev/null +++ b/apps/emqx_bridge_opents/rebar.config @@ -0,0 +1,8 @@ +{erl_opts, [debug_info]}. + +{deps, [ + {opentsdb, {git, "https://github.com/emqx/opentsdb-client-erl", {tag, "v0.5.1"}}}, + {emqx_connector, {path, "../../apps/emqx_connector"}}, + {emqx_resource, {path, "../../apps/emqx_resource"}}, + {emqx_bridge, {path, "../../apps/emqx_bridge"}} +]}. diff --git a/apps/emqx_bridge_opents/src/emqx_bridge_opents.app.src b/apps/emqx_bridge_opents/src/emqx_bridge_opents.app.src new file mode 100644 index 000000000..d001446b3 --- /dev/null +++ b/apps/emqx_bridge_opents/src/emqx_bridge_opents.app.src @@ -0,0 +1,15 @@ +{application, emqx_bridge_opents, [ + {description, "EMQX Enterprise OpenTSDB Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + opentsdb + ]}, + {env, []}, + {modules, []}, + + {licenses, ["BSL"]}, + {links, []} +]}. diff --git a/apps/emqx_bridge_opents/src/emqx_bridge_opents.erl b/apps/emqx_bridge_opents/src/emqx_bridge_opents.erl new file mode 100644 index 000000000..2eb6a554f --- /dev/null +++ b/apps/emqx_bridge_opents/src/emqx_bridge_opents.erl @@ -0,0 +1,85 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_opents). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + conn_bridge_examples/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +%% ------------------------------------------------------------------------------------------------- +%% api +conn_bridge_examples(Method) -> + [ + #{ + <<"opents">> => #{ + summary => <<"OpenTSDB Bridge">>, + value => values(Method) + } + } + ]. + +values(_Method) -> + #{ + enable => true, + type => opents, + name => <<"foo">>, + server => <<"http://127.0.0.1:4242">>, + pool_size => 8, + resource_opts => #{ + worker_pool_size => 1, + health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, + auto_restart_interval => ?AUTO_RESTART_INTERVAL_RAW, + batch_size => ?DEFAULT_BATCH_SIZE, + batch_time => ?DEFAULT_BATCH_TIME, + query_mode => async, + max_buffer_bytes => ?DEFAULT_BUFFER_BYTES + } + }. + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions +namespace() -> "bridge_opents". + +roots() -> []. + +fields("config") -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})} + ] ++ emqx_resource_schema:fields("resource_opts") ++ + emqx_bridge_opents_connector:fields(config); +fields("post") -> + [type_field(), name_field() | fields("config")]; +fields("put") -> + fields("config"); +fields("get") -> + emqx_bridge_schema:status_fields() ++ fields("post"). + +desc("config") -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for OpenTSDB using `", string:to_upper(Method), "` method."]; +desc(_) -> + undefined. + +%% ------------------------------------------------------------------------------------------------- +%% internal + +type_field() -> + {type, mk(enum([opents]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. diff --git a/apps/emqx_bridge_opents/src/emqx_bridge_opents_connector.erl b/apps/emqx_bridge_opents/src/emqx_bridge_opents_connector.erl new file mode 100644 index 000000000..0366c9dc2 --- /dev/null +++ b/apps/emqx_bridge_opents/src/emqx_bridge_opents_connector.erl @@ -0,0 +1,184 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_opents_connector). + +-behaviour(emqx_resource). + +-include_lib("emqx_resource/include/emqx_resource.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-export([roots/0, fields/1]). + +%% `emqx_resource' API +-export([ + callback_mode/0, + is_buffer_supported/0, + on_start/2, + on_stop/2, + on_query/3, + on_batch_query/3, + on_get_status/2 +]). + +-export([connect/1]). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +%%===================================================================== +%% Hocon schema +roots() -> + [{config, #{type => hoconsc:ref(?MODULE, config)}}]. + +fields(config) -> + [ + {server, mk(binary(), #{required => true, desc => ?DESC("server")})}, + {pool_size, fun emqx_connector_schema_lib:pool_size/1}, + {summary, mk(boolean(), #{default => true, desc => ?DESC("summary")})}, + {details, mk(boolean(), #{default => false, desc => ?DESC("details")})}, + {auto_reconnect, fun emqx_connector_schema_lib:auto_reconnect/1} + ]. + +%%======================================================================================== +%% `emqx_resource' API +%%======================================================================================== + +callback_mode() -> always_sync. + +is_buffer_supported() -> false. + +on_start( + InstanceId, + #{ + server := Server, + pool_size := PoolSize, + summary := Summary, + details := Details, + resource_opts := #{batch_size := BatchSize} + } = Config +) -> + ?SLOG(info, #{ + msg => "starting_opents_connector", + connector => InstanceId, + config => emqx_utils:redact(Config) + }), + + Options = [ + {server, to_str(Server)}, + {summary, Summary}, + {details, Details}, + {max_batch_size, BatchSize}, + {pool_size, PoolSize} + ], + + State = #{pool_name => InstanceId, server => Server}, + case opentsdb_connectivity(Server) of + ok -> + case emqx_resource_pool:start(InstanceId, ?MODULE, Options) of + ok -> + {ok, State}; + Error -> + Error + end; + {error, Reason} = Error -> + ?SLOG(error, #{msg => "Initiate resource failed", reason => Reason}), + Error + end. + +on_stop(InstanceId, #{pool_name := PoolName} = _State) -> + ?SLOG(info, #{ + msg => "stopping_opents_connector", + connector => InstanceId + }), + emqx_resource_pool:stop(PoolName). + +on_query(InstanceId, Request, State) -> + on_batch_query(InstanceId, [Request], State). + +on_batch_query( + InstanceId, + BatchReq, + State +) -> + Datas = [format_opentsdb_msg(Msg) || {_Key, Msg} <- BatchReq], + do_query(InstanceId, Datas, State). + +on_get_status(_InstanceId, #{server := Server}) -> + Result = + case opentsdb_connectivity(Server) of + ok -> + connected; + {error, Reason} -> + ?SLOG(error, #{msg => "OpenTSDB lost connection", reason => Reason}), + connecting + end, + Result. + +%%======================================================================================== +%% Helper fns +%%======================================================================================== + +do_query(InstanceId, Query, #{pool_name := PoolName} = State) -> + ?TRACE( + "QUERY", + "opents_connector_received", + #{connector => InstanceId, query => Query, state => State} + ), + Result = ecpool:pick_and_do(PoolName, {opentsdb, put, [Query]}, no_handover), + + case Result of + {error, Reason} -> + ?tp( + opents_connector_query_return, + #{error => Reason} + ), + ?SLOG(error, #{ + msg => "opents_connector_do_query_failed", + connector => InstanceId, + query => Query, + reason => Reason + }), + Result; + _ -> + ?tp( + opents_connector_query_return, + #{result => Result} + ), + Result + end. + +connect(Opts) -> + opentsdb:start_link(Opts). + +to_str(List) when is_list(List) -> + List; +to_str(Bin) when is_binary(Bin) -> + erlang:binary_to_list(Bin). + +opentsdb_connectivity(Server) -> + SvrUrl = + case Server of + <<"http://", _/binary>> -> Server; + <<"https://", _/binary>> -> Server; + _ -> "http://" ++ Server + end, + emqx_plugin_libs_rule:http_connectivity(SvrUrl). + +format_opentsdb_msg(Msg) -> + maps:with( + [ + timestamp, + metric, + tags, + value, + <<"timestamp">>, + <<"metric">>, + <<"tags">>, + <<"value">> + ], + Msg + ). diff --git a/apps/emqx_bridge_opents/test/emqx_bridge_opents_SUITE.erl b/apps/emqx_bridge_opents/test/emqx_bridge_opents_SUITE.erl new file mode 100644 index 000000000..6f444b93e --- /dev/null +++ b/apps/emqx_bridge_opents/test/emqx_bridge_opents_SUITE.erl @@ -0,0 +1,363 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_bridge_opents_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +% DB defaults +-define(BATCH_SIZE, 10). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, with_batch}, + {group, without_batch} + ]. + +groups() -> + TCs = emqx_common_test_helpers:all(?MODULE), + [ + {with_batch, TCs}, + {without_batch, TCs} + ]. + +init_per_group(with_batch, Config0) -> + Config = [{batch_size, ?BATCH_SIZE} | Config0], + common_init(Config); +init_per_group(without_batch, Config0) -> + Config = [{batch_size, 1} | Config0], + common_init(Config); +init_per_group(_Group, Config) -> + Config. + +end_per_group(Group, Config) when Group =:= with_batch; Group =:= without_batch -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ok; +end_per_group(_Group, _Config) -> + ok. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_bridge, emqx_conf]), + ok. + +init_per_testcase(_Testcase, Config) -> + delete_bridge(Config), + snabbkaffe:start_trace(), + Config. + +end_per_testcase(_Testcase, Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ok = snabbkaffe:stop(), + delete_bridge(Config), + ok. + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +common_init(ConfigT) -> + Host = os:getenv("OPENTS_HOST", "toxiproxy"), + Port = list_to_integer(os:getenv("OPENTS_PORT", "4242")), + + Config0 = [ + {opents_host, Host}, + {opents_port, Port}, + {proxy_name, "opents"} + | ConfigT + ], + + BridgeType = proplists:get_value(bridge_type, Config0, <<"opents">>), + case emqx_common_test_helpers:is_tcp_server_available(Host, Port) of + true -> + % Setup toxiproxy + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + % Ensure EE bridge module is loaded + _ = application:load(emqx_ee_bridge), + _ = emqx_ee_bridge:module_info(), + ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]), + emqx_mgmt_api_test_util:init_suite(), + {Name, OpenTSConf} = opents_config(BridgeType, Config0), + Config = + [ + {opents_config, OpenTSConf}, + {opents_bridge_type, BridgeType}, + {opents_name, Name}, + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort} + | Config0 + ], + Config; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_opents); + _ -> + {skip, no_opents} + end + end. + +opents_config(BridgeType, Config) -> + Port = integer_to_list(?config(opents_port, Config)), + Server = "http://" ++ ?config(opents_host, Config) ++ ":" ++ Port, + Name = atom_to_binary(?MODULE), + BatchSize = ?config(batch_size, Config), + ConfigString = + io_lib:format( + "bridges.~s.~s {\n" + " enable = true\n" + " server = ~p\n" + " resource_opts = {\n" + " request_timeout = 500ms\n" + " batch_size = ~b\n" + " query_mode = sync\n" + " }\n" + "}", + [ + BridgeType, + Name, + Server, + BatchSize + ] + ), + {Name, parse_and_check(ConfigString, BridgeType, Name)}. + +parse_and_check(ConfigString, BridgeType, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{BridgeType := #{Name := Config}}} = RawConf, + Config. + +create_bridge(Config) -> + create_bridge(Config, _Overrides = #{}). + +create_bridge(Config, Overrides) -> + BridgeType = ?config(opents_bridge_type, Config), + Name = ?config(opents_name, Config), + Config0 = ?config(opents_config, Config), + Config1 = emqx_utils_maps:deep_merge(Config0, Overrides), + emqx_bridge:create(BridgeType, Name, Config1). + +delete_bridge(Config) -> + BridgeType = ?config(opents_bridge_type, Config), + Name = ?config(opents_name, Config), + emqx_bridge:remove(BridgeType, Name). + +create_bridge_http(Params) -> + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + Error -> Error + end. + +send_message(Config, Payload) -> + Name = ?config(opents_name, Config), + BridgeType = ?config(opents_bridge_type, Config), + BridgeID = emqx_bridge_resource:bridge_id(BridgeType, Name), + emqx_bridge:send_message(BridgeID, Payload). + +query_resource(Config, Request) -> + query_resource(Config, Request, 1_000). + +query_resource(Config, Request, Timeout) -> + Name = ?config(opents_name, Config), + BridgeType = ?config(opents_bridge_type, Config), + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + emqx_resource:query(ResourceID, Request, #{timeout => Timeout}). + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_setup_via_config_and_publish(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + SentData = make_data(), + ?check_trace( + begin + {_, {ok, #{result := Result}}} = + ?wait_async_action( + send_message(Config, SentData), + #{?snk_kind := buffer_worker_flush_ack}, + 2_000 + ), + ?assertMatch( + {ok, 200, #{failed := 0, success := 1}}, Result + ), + ok + end, + fun(Trace0) -> + Trace = ?of_kind(opents_connector_query_return, Trace0), + ?assertMatch([#{result := {ok, 200, #{failed := 0, success := 1}}}], Trace), + ok + end + ), + ok. + +t_setup_via_http_api_and_publish(Config) -> + BridgeType = ?config(opents_bridge_type, Config), + Name = ?config(opents_name, Config), + OpentsConfig0 = ?config(opents_config, Config), + OpentsConfig = OpentsConfig0#{ + <<"name">> => Name, + <<"type">> => BridgeType + }, + ?assertMatch( + {ok, _}, + create_bridge_http(OpentsConfig) + ), + SentData = make_data(), + ?check_trace( + begin + Request = {send_message, SentData}, + Res0 = query_resource(Config, Request, 2_500), + ?assertMatch( + {ok, 200, #{failed := 0, success := 1}}, Res0 + ), + ok + end, + fun(Trace0) -> + Trace = ?of_kind(opents_connector_query_return, Trace0), + ?assertMatch([#{result := {ok, 200, #{failed := 0, success := 1}}}], Trace), + ok + end + ), + ok. + +t_get_status(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + + Name = ?config(opents_name, Config), + BridgeType = ?config(opents_bridge_type, Config), + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)), + ok. + +t_create_disconnected(Config) -> + BridgeType = proplists:get_value(bridge_type, Config, <<"opents">>), + Config1 = lists:keyreplace(opents_port, 1, Config, {opents_port, 61234}), + {_Name, OpenTSConf} = opents_config(BridgeType, Config1), + + Config2 = lists:keyreplace(opents_config, 1, Config1, {opents_config, OpenTSConf}), + ?assertMatch({ok, _}, create_bridge(Config2)), + + Name = ?config(opents_name, Config), + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceID)), + ok. + +t_write_failure(Config) -> + ProxyName = ?config(proxy_name, Config), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + {ok, _} = create_bridge(Config), + SentData = make_data(), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + {_, {ok, #{result := Result}}} = + ?wait_async_action( + send_message(Config, SentData), + #{?snk_kind := buffer_worker_flush_ack}, + 2_000 + ), + ?assertMatch({error, _}, Result), + ok + end), + ok. + +t_write_timeout(Config) -> + ProxyName = ?config(proxy_name, Config), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + {ok, _} = create_bridge( + Config, + #{ + <<"resource_opts">> => #{ + <<"request_timeout">> => 500, + <<"resume_interval">> => 100, + <<"health_check_interval">> => 100 + } + } + ), + SentData = make_data(), + emqx_common_test_helpers:with_failure( + timeout, ProxyName, ProxyHost, ProxyPort, fun() -> + ?assertMatch( + {error, {resource_error, #{reason := timeout}}}, + query_resource(Config, {send_message, SentData}) + ) + end + ), + ok. + +t_missing_data(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + {_, {ok, #{result := Result}}} = + ?wait_async_action( + send_message(Config, #{}), + #{?snk_kind := buffer_worker_flush_ack}, + 2_000 + ), + ?assertMatch( + {error, {400, #{failed := 1, success := 0}}}, + Result + ), + ok. + +t_bad_data(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + Data = maps:without([metric], make_data()), + {_, {ok, #{result := Result}}} = + ?wait_async_action( + send_message(Config, Data), + #{?snk_kind := buffer_worker_flush_ack}, + 2_000 + ), + + ?assertMatch( + {error, {400, #{failed := 1, success := 0}}}, Result + ), + ok. + +make_data() -> + make_data(<<"cpu">>, 12). + +make_data(Metric, Value) -> + #{ + metric => Metric, + tags => #{ + <<"host">> => <<"serverA">> + }, + value => Value + }. diff --git a/apps/emqx_bridge_oracle/BSL.txt b/apps/emqx_bridge_oracle/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_oracle/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_oracle/README.md b/apps/emqx_bridge_oracle/README.md new file mode 100644 index 000000000..d2974b722 --- /dev/null +++ b/apps/emqx_bridge_oracle/README.md @@ -0,0 +1,28 @@ +# EMQX Oracle Database Bridge + +This application houses the Oracle Database bridge for EMQX Enterprise Edition. +It implements the data bridge APIs for interacting with an Oracle Database Bridge. + + +# Documentation + +- Refer to [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html) + for the EMQX rules engine introduction. + + +# HTTP APIs + +- Several APIs are provided for bridge management, which includes create bridge, + update bridge, get bridge, stop or restart bridge and list bridges etc. + + Refer to [API Docs - Bridges](https://docs.emqx.com/en/enterprise/v5.0/admin/api-docs.html#tag/Bridges) for more detailed information. + + +## Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + + +## License + +See [BSL](./BSL.txt). diff --git a/apps/emqx_bridge_oracle/docker-ct b/apps/emqx_bridge_oracle/docker-ct new file mode 100644 index 000000000..c24dc4bc9 --- /dev/null +++ b/apps/emqx_bridge_oracle/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +oracle diff --git a/changes/ce/fix-10420.zh.md b/apps/emqx_bridge_oracle/etc/emqx_bridge_oracle.conf similarity index 100% rename from changes/ce/fix-10420.zh.md rename to apps/emqx_bridge_oracle/etc/emqx_bridge_oracle.conf diff --git a/apps/emqx_bridge_oracle/rebar.config b/apps/emqx_bridge_oracle/rebar.config new file mode 100644 index 000000000..c238546c4 --- /dev/null +++ b/apps/emqx_bridge_oracle/rebar.config @@ -0,0 +1,13 @@ +%% -*- mode: erlang; -*- + +{erl_opts, [debug_info]}. +{deps, [ {emqx_oracle, {path, "../../apps/emqx_oracle"}} + , {emqx_connector, {path, "../../apps/emqx_connector"}} + , {emqx_resource, {path, "../../apps/emqx_resource"}} + , {emqx_bridge, {path, "../../apps/emqx_bridge"}} + ]}. + +{shell, [ + % {config, "config/sys.config"}, + {apps, [emqx_bridge_oracle]} +]}. diff --git a/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.app.src b/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.app.src new file mode 100644 index 000000000..4f81c2110 --- /dev/null +++ b/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.app.src @@ -0,0 +1,14 @@ +{application, emqx_bridge_oracle, [ + {description, "EMQX Enterprise Oracle Database Bridge"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + emqx_oracle + ]}, + {env, []}, + {modules, []}, + + {links, []} +]}. diff --git a/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.erl b/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.erl new file mode 100644 index 000000000..8a87f02ba --- /dev/null +++ b/apps/emqx_bridge_oracle/src/emqx_bridge_oracle.erl @@ -0,0 +1,109 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_oracle). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx_bridge/include/emqx_bridge.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). + +-export([ + conn_bridge_examples/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +-define(DEFAULT_SQL, << + "insert into t_mqtt_msg(msgid, topic, qos, payload)" + "values (${id}, ${topic}, ${qos}, ${payload})" +>>). + +conn_bridge_examples(Method) -> + [ + #{ + <<"oracle">> => #{ + summary => <<"Oracle Database Bridge">>, + value => values(Method) + } + } + ]. + +values(_Method) -> + #{ + enable => true, + type => oracle, + name => <<"foo">>, + server => <<"127.0.0.1:1521">>, + pool_size => 8, + database => <<"ORCL">>, + sid => <<"ORCL">>, + username => <<"root">>, + password => <<"******">>, + sql => ?DEFAULT_SQL, + local_topic => <<"local/topic/#">>, + resource_opts => #{ + worker_pool_size => 8, + health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, + auto_restart_interval => ?AUTO_RESTART_INTERVAL_RAW, + batch_size => ?DEFAULT_BATCH_SIZE, + batch_time => ?DEFAULT_BATCH_TIME, + query_mode => async, + max_buffer_bytes => ?DEFAULT_BUFFER_BYTES + } + }. + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions + +namespace() -> "bridge_oracle". + +roots() -> []. + +fields("config") -> + [ + {enable, + hoconsc:mk( + boolean(), + #{desc => ?DESC("config_enable"), default => true} + )}, + {sql, + hoconsc:mk( + binary(), + #{desc => ?DESC("sql_template"), default => ?DEFAULT_SQL, format => <<"sql">>} + )}, + {local_topic, + hoconsc:mk( + binary(), + #{desc => ?DESC("local_topic"), default => undefined} + )} + ] ++ emqx_resource_schema:fields("resource_opts") ++ + (emqx_oracle_schema:fields(config) -- + emqx_connector_schema_lib:prepare_statement_fields()); +fields("post") -> + fields("post", oracle); +fields("put") -> + fields("config"); +fields("get") -> + emqx_bridge_schema:status_fields() ++ fields("post"). + +fields("post", Type) -> + [type_field(Type), name_field() | fields("config")]. + +desc("config") -> + ?DESC("desc_config"); +desc(_) -> + undefined. + +%% ------------------------------------------------------------------------------------------------- + +type_field(Type) -> + {type, hoconsc:mk(hoconsc:enum([Type]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, hoconsc:mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. diff --git a/apps/emqx_bridge_oracle/test/emqx_bridge_oracle_SUITE.erl b/apps/emqx_bridge_oracle/test/emqx_bridge_oracle_SUITE.erl new file mode 100644 index 000000000..b50788277 --- /dev/null +++ b/apps/emqx_bridge_oracle/test/emqx_bridge_oracle_SUITE.erl @@ -0,0 +1,514 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_oracle_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-import(emqx_common_test_helpers, [on_exit/1]). + +-define(BRIDGE_TYPE_BIN, <<"oracle">>). +-define(APPS, [emqx_bridge, emqx_resource, emqx_rule_engine, emqx_oracle, emqx_bridge_oracle]). +-define(DATABASE, "XE"). +-define(RULE_TOPIC, "mqtt/rule"). +% -define(RULE_TOPIC_BIN, <>). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, plain} + ]. + +groups() -> + AllTCs = emqx_common_test_helpers:all(?MODULE), + [ + {plain, AllTCs} + ]. + +only_once_tests() -> + [t_create_via_http]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_conf]), + ok = emqx_connector_test_helpers:stop_apps(lists:reverse(?APPS)), + _ = application:stop(emqx_connector), + ok. + +init_per_group(plain = Type, Config) -> + OracleHost = os:getenv("ORACLE_PLAIN_HOST", "toxiproxy.emqx.net"), + OraclePort = list_to_integer(os:getenv("ORACLE_PLAIN_PORT", "1521")), + ProxyName = "oracle", + case emqx_common_test_helpers:is_tcp_server_available(OracleHost, OraclePort) of + true -> + Config1 = common_init_per_group(), + [ + {proxy_name, ProxyName}, + {oracle_host, OracleHost}, + {oracle_port, OraclePort}, + {connection_type, Type} + | Config1 ++ Config + ]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_oracle); + _ -> + {skip, no_oracle} + end + end; +init_per_group(_Group, Config) -> + Config. + +end_per_group(Group, Config) when + Group =:= plain +-> + common_end_per_group(Config), + ok; +end_per_group(_Group, _Config) -> + ok. + +common_init_per_group() -> + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + application:load(emqx_bridge), + ok = emqx_common_test_helpers:start_apps([emqx_conf]), + ok = emqx_connector_test_helpers:start_apps(?APPS), + {ok, _} = application:ensure_all_started(emqx_connector), + emqx_mgmt_api_test_util:init_suite(), + UniqueNum = integer_to_binary(erlang:unique_integer()), + MQTTTopic = <<"mqtt/topic/", UniqueNum/binary>>, + [ + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort}, + {mqtt_topic, MQTTTopic} + ]. + +common_end_per_group(Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + delete_all_bridges(), + ok. + +init_per_testcase(TestCase, Config) -> + common_init_per_testcase(TestCase, Config). + +end_per_testcase(_Testcase, Config) -> + common_end_per_testcase(_Testcase, Config). + +common_init_per_testcase(TestCase, Config0) -> + ct:timetrap(timer:seconds(60)), + delete_all_bridges(), + UniqueNum = integer_to_binary(erlang:unique_integer()), + OracleTopic = + << + (atom_to_binary(TestCase))/binary, + UniqueNum/binary + >>, + ConnectionType = ?config(connection_type, Config0), + Config = [{oracle_topic, OracleTopic} | Config0], + {Name, ConfigString, OracleConfig} = oracle_config( + TestCase, ConnectionType, Config + ), + ok = snabbkaffe:start_trace(), + [ + {oracle_name, Name}, + {oracle_config_string, ConfigString}, + {oracle_config, OracleConfig} + | Config + ]. + +common_end_per_testcase(_Testcase, Config) -> + case proplists:get_bool(skip_does_not_apply, Config) of + true -> + ok; + false -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + delete_all_bridges(), + %% in CI, apparently this needs more time since the + %% machines struggle with all the containers running... + emqx_common_test_helpers:call_janitor(60_000), + ok = snabbkaffe:stop(), + ok + end. + +delete_all_bridges() -> + lists:foreach( + fun(#{name := Name, type := Type}) -> + emqx_bridge:remove(Type, Name) + end, + emqx_bridge:list() + ). + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ +sql_insert_template_for_bridge() -> + "INSERT INTO mqtt_test(topic, msgid, payload, retain) VALUES (${topic}, ${id}, ${payload}, ${retain})". + +sql_create_table() -> + "CREATE TABLE mqtt_test (topic VARCHAR2(255), msgid VARCHAR2(64), payload NCLOB, retain NUMBER(1))". + +sql_drop_table() -> + "DROP TABLE mqtt_test". + +reset_table(Config) -> + ResourceId = resource_id(Config), + _ = emqx_resource:simple_sync_query(ResourceId, {sql, sql_drop_table()}), + {ok, [{proc_result, 0, _}]} = emqx_resource:simple_sync_query( + ResourceId, {sql, sql_create_table()} + ), + ok. + +drop_table(Config) -> + ResourceId = resource_id(Config), + emqx_resource:simple_sync_query(ResourceId, {query, sql_drop_table()}), + ok. + +oracle_config(TestCase, _ConnectionType, Config) -> + UniqueNum = integer_to_binary(erlang:unique_integer()), + OracleHost = ?config(oracle_host, Config), + OraclePort = ?config(oracle_port, Config), + Name = << + (atom_to_binary(TestCase))/binary, UniqueNum/binary + >>, + ServerURL = iolist_to_binary([ + OracleHost, + ":", + integer_to_binary(OraclePort) + ]), + ConfigString = + io_lib:format( + "bridges.oracle.~s {\n" + " enable = true\n" + " database = \"~s\"\n" + " sid = \"~s\"\n" + " server = \"~s\"\n" + " username = \"system\"\n" + " password = \"oracle\"\n" + " pool_size = 1\n" + " sql = \"~s\"\n" + " resource_opts = {\n" + " auto_restart_interval = 5000\n" + " request_timeout = 30000\n" + " query_mode = \"async\"\n" + " enable_batch = true\n" + " batch_size = 3\n" + " batch_time = \"3s\"\n" + " worker_pool_size = 1\n" + " }\n" + "}\n", + [ + Name, + ?DATABASE, + ?DATABASE, + ServerURL, + sql_insert_template_for_bridge() + ] + ), + {Name, ConfigString, parse_and_check(ConfigString, Name)}. + +parse_and_check(ConfigString, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + TypeBin = ?BRIDGE_TYPE_BIN, + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{TypeBin := #{Name := Config}}} = RawConf, + Config. + +resource_id(Config) -> + Type = ?BRIDGE_TYPE_BIN, + Name = ?config(oracle_name, Config), + emqx_bridge_resource:resource_id(Type, Name). + +create_bridge(Config) -> + create_bridge(Config, _Overrides = #{}). + +create_bridge(Config, Overrides) -> + Type = ?BRIDGE_TYPE_BIN, + Name = ?config(oracle_name, Config), + OracleConfig0 = ?config(oracle_config, Config), + OracleConfig = emqx_utils_maps:deep_merge(OracleConfig0, Overrides), + emqx_bridge:create(Type, Name, OracleConfig). + +create_bridge_api(Config) -> + create_bridge_api(Config, _Overrides = #{}). + +create_bridge_api(Config, Overrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(oracle_name, Config), + OracleConfig0 = ?config(oracle_config, Config), + OracleConfig = emqx_utils_maps:deep_merge(OracleConfig0, Overrides), + Params = OracleConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("creating bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of + {ok, {Status, Headers, Body0}} -> + {ok, {Status, Headers, emqx_utils_json:decode(Body0, [return_maps])}}; + Error -> + Error + end, + ct:pal("bridge create result: ~p", [Res]), + Res. + +update_bridge_api(Config) -> + update_bridge_api(Config, _Overrides = #{}). + +update_bridge_api(Config, Overrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(oracle_name, Config), + OracleConfig0 = ?config(oracle_config, Config), + OracleConfig = emqx_utils_maps:deep_merge(OracleConfig0, Overrides), + BridgeId = emqx_bridge_resource:bridge_id(TypeBin, Name), + Params = OracleConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("updating bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(put, Path, "", AuthHeader, Params, Opts) of + {ok, {_Status, _Headers, Body0}} -> {ok, emqx_utils_json:decode(Body0, [return_maps])}; + Error -> Error + end, + ct:pal("bridge update result: ~p", [Res]), + Res. + +probe_bridge_api(Config) -> + probe_bridge_api(Config, _Overrides = #{}). + +probe_bridge_api(Config, _Overrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(oracle_name, Config), + OracleConfig = ?config(oracle_config, Config), + Params = OracleConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges_probe"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("probing bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of + {ok, {{_, 204, _}, _Headers, _Body0} = Res0} -> {ok, Res0}; + Error -> Error + end, + ct:pal("bridge probe result: ~p", [Res]), + Res. + +create_rule_and_action_http(Config) -> + OracleName = ?config(oracle_name, Config), + BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_BIN, OracleName), + Params = #{ + enable => true, + sql => <<"SELECT * FROM \"", ?RULE_TOPIC, "\"">>, + actions => [BridgeId] + }, + Path = emqx_mgmt_api_test_util:api_path(["rules"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + ct:pal("rule action params: ~p", [Params]), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + Error -> Error + end. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_sync_query(Config) -> + ResourceId = resource_id(Config), + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + reset_table(Config), + MsgId = erlang:unique_integer(), + Params = #{ + topic => ?config(mqtt_topic, Config), + id => MsgId, + payload => ?config(oracle_name, Config), + retain => true + }, + Message = {send_message, Params}, + ?assertEqual( + {ok, [{affected_rows, 1}]}, emqx_resource:simple_sync_query(ResourceId, Message) + ), + ok + end, + [] + ), + ok. + +t_batch_sync_query(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + ResourceId = resource_id(Config), + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + ?retry( + _Sleep = 1_000, + _Attempts = 30, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + reset_table(Config), + MsgId = erlang:unique_integer(), + Params = #{ + topic => ?config(mqtt_topic, Config), + id => MsgId, + payload => ?config(oracle_name, Config), + retain => false + }, + % Send 3 async messages while resource is down. When it comes back, these messages + % will be delivered in sync way. If we try to send sync messages directly, it will + % be sent async as callback_mode is set to async_if_possible. + Message = {send_message, Params}, + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ct:sleep(1000), + emqx_resource:query(ResourceId, Message), + emqx_resource:query(ResourceId, Message), + emqx_resource:query(ResourceId, Message) + end), + ?retry( + _Sleep = 1_000, + _Attempts = 30, + ?assertMatch( + {ok, [{result_set, _, _, [[{3}]]}]}, + emqx_resource:simple_sync_query( + ResourceId, {query, "SELECT COUNT(*) FROM mqtt_test"} + ) + ) + ), + ok + end, + [] + ), + ok. + +t_create_via_http(Config) -> + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + + %% lightweight matrix testing some configs + ?assertMatch( + {ok, _}, + update_bridge_api( + Config, + #{ + <<"resource_opts">> => + #{<<"batch_size">> => 4} + } + ) + ), + ?assertMatch( + {ok, _}, + update_bridge_api( + Config, + #{ + <<"resource_opts">> => + #{<<"batch_time">> => <<"4s">>} + } + ) + ), + ok + end, + [] + ), + ok. + +t_start_stop(Config) -> + OracleName = ?config(oracle_name, Config), + ResourceId = resource_id(Config), + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge(Config)), + %% Since the connection process is async, we give it some time to + %% stabilize and avoid flakiness. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + + %% Check that the bridge probe API doesn't leak atoms. + ProbeRes0 = probe_bridge_api( + Config, + #{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}} + ), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0), + AtomsBefore = erlang:system_info(atom_count), + %% Probe again; shouldn't have created more atoms. + ProbeRes1 = probe_bridge_api( + Config, + #{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}} + ), + + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1), + AtomsAfter = erlang:system_info(atom_count), + ?assertEqual(AtomsBefore, AtomsAfter), + + %% Now stop the bridge. + ?assertMatch( + {{ok, _}, {ok, _}}, + ?wait_async_action( + emqx_bridge:disable_enable(disable, ?BRIDGE_TYPE_BIN, OracleName), + #{?snk_kind := oracle_bridge_stopped}, + 5_000 + ) + ), + + ok + end, + fun(Trace) -> + %% one for each probe, one for real + ?assertMatch([_, _, _], ?of_kind(oracle_bridge_stopped, Trace)), + ok + end + ), + ok. + +t_on_get_status(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + ResourceId = resource_id(Config), + ?assertMatch({ok, _}, create_bridge(Config)), + %% Since the connection process is async, we give it some time to + %% stabilize and avoid flakiness. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ct:sleep(500), + ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)) + end), + %% Check that it recovers itself. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + ok. diff --git a/apps/emqx_bridge_pulsar/.gitignore b/apps/emqx_bridge_pulsar/.gitignore new file mode 100644 index 000000000..f1c455451 --- /dev/null +++ b/apps/emqx_bridge_pulsar/.gitignore @@ -0,0 +1,19 @@ +.rebar3 +_* +.eunit +*.o +*.beam +*.plt +*.swp +*.swo +.erlang.cookie +ebin +log +erl_crash.dump +.rebar +logs +_build +.idea +*.iml +rebar3.crashdump +*~ diff --git a/apps/emqx_bridge_pulsar/BSL.txt b/apps/emqx_bridge_pulsar/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_bridge_pulsar/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_bridge_pulsar/README.md b/apps/emqx_bridge_pulsar/README.md new file mode 100644 index 000000000..fbd8bf81d --- /dev/null +++ b/apps/emqx_bridge_pulsar/README.md @@ -0,0 +1,30 @@ +# Pulsar Data Integration Bridge + +This application houses the Pulsar Producer data integration bridge +for EMQX Enterprise Edition. It provides the means to connect to +Pulsar and publish messages to it. + +Currently, our Pulsar Producer library has its own `replayq` buffering +implementation, so this bridge does not require buffer workers from +`emqx_resource`. It implements the connection management and +interaction without need for a separate connector app, since it's not +used by authentication and authorization applications. + +# Documentation links + +For more information on Apache Pulsar, please see its [official +site](https://pulsar.apache.org/). + +# Configurations + +Please see [our official +documentation](https://www.emqx.io/docs/en/v5.0/data-integration/data-bridge-pulsar.html) +for more detailed info. + +# Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + +# License + +EMQ Business Source License 1.1, refer to [LICENSE](BSL.txt). diff --git a/apps/emqx_bridge_pulsar/docker-ct b/apps/emqx_bridge_pulsar/docker-ct new file mode 100644 index 000000000..6324bb4f7 --- /dev/null +++ b/apps/emqx_bridge_pulsar/docker-ct @@ -0,0 +1,2 @@ +toxiproxy +pulsar diff --git a/apps/emqx_bridge_pulsar/etc/emqx_bridge_pulsar.conf b/apps/emqx_bridge_pulsar/etc/emqx_bridge_pulsar.conf new file mode 100644 index 000000000..e69de29bb diff --git a/apps/emqx_bridge_pulsar/include/emqx_bridge_pulsar.hrl b/apps/emqx_bridge_pulsar/include/emqx_bridge_pulsar.hrl new file mode 100644 index 000000000..5ee87e48f --- /dev/null +++ b/apps/emqx_bridge_pulsar/include/emqx_bridge_pulsar.hrl @@ -0,0 +1,14 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-ifndef(EMQX_BRIDGE_PULSAR_HRL). +-define(EMQX_BRIDGE_PULSAR_HRL, true). + +-define(PULSAR_HOST_OPTIONS, #{ + default_port => 6650, + default_scheme => "pulsar", + supported_schemes => ["pulsar", "pulsar+ssl"] +}). + +-endif. diff --git a/apps/emqx_bridge_pulsar/rebar.config b/apps/emqx_bridge_pulsar/rebar.config new file mode 100644 index 000000000..d5a63f320 --- /dev/null +++ b/apps/emqx_bridge_pulsar/rebar.config @@ -0,0 +1,14 @@ +%% -*- mode: erlang; -*- + +{erl_opts, [debug_info]}. +{deps, [ + {pulsar, {git, "https://github.com/emqx/pulsar-client-erl.git", {tag, "0.8.1"}}}, + {emqx_connector, {path, "../../apps/emqx_connector"}}, + {emqx_resource, {path, "../../apps/emqx_resource"}}, + {emqx_bridge, {path, "../../apps/emqx_bridge"}} +]}. + +{shell, [ + % {config, "config/sys.config"}, + {apps, [emqx_bridge_pulsar]} +]}. diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.app.src b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.app.src new file mode 100644 index 000000000..b169aa2c4 --- /dev/null +++ b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.app.src @@ -0,0 +1,14 @@ +{application, emqx_bridge_pulsar, [ + {description, "EMQX Pulsar Bridge"}, + {vsn, "0.1.1"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + pulsar + ]}, + {env, []}, + {modules, []}, + + {links, []} +]}. diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.erl b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.erl new file mode 100644 index 000000000..18faf0e3b --- /dev/null +++ b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar.erl @@ -0,0 +1,228 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_pulsar). + +-include("emqx_bridge_pulsar.hrl"). +-include_lib("emqx_connector/include/emqx_connector.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +%% hocon_schema API +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). +%% emqx_ee_bridge "unofficial" API +-export([conn_bridge_examples/1]). + +%%------------------------------------------------------------------------------------------------- +%% `hocon_schema' API +%%------------------------------------------------------------------------------------------------- + +namespace() -> + "bridge_pulsar". + +roots() -> + []. + +fields(pulsar_producer) -> + fields(config) ++ fields(producer_opts); +fields(config) -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {servers, + mk( + binary(), + #{ + required => true, + desc => ?DESC("servers"), + validator => emqx_schema:servers_validator( + ?PULSAR_HOST_OPTIONS, _Required = true + ) + } + )}, + {authentication, + mk(hoconsc:union([none, ref(auth_basic), ref(auth_token)]), #{ + default => none, desc => ?DESC("authentication") + })} + ] ++ emqx_connector_schema_lib:ssl_fields(); +fields(producer_opts) -> + [ + {batch_size, + mk( + pos_integer(), + #{default => 100, desc => ?DESC("producer_batch_size")} + )}, + {compression, + mk( + hoconsc:enum([no_compression, snappy, zlib]), + #{default => no_compression, desc => ?DESC("producer_compression")} + )}, + {send_buffer, + mk(emqx_schema:bytesize(), #{ + default => <<"1MB">>, desc => ?DESC("producer_send_buffer") + })}, + {sync_timeout, + mk(emqx_schema:duration_ms(), #{ + default => <<"3s">>, desc => ?DESC("producer_sync_timeout") + })}, + {retention_period, + mk( + hoconsc:union([infinity, emqx_schema:duration_ms()]), + #{default => infinity, desc => ?DESC("producer_retention_period")} + )}, + {max_batch_bytes, + mk( + emqx_schema:bytesize(), + #{default => <<"900KB">>, desc => ?DESC("producer_max_batch_bytes")} + )}, + {local_topic, mk(binary(), #{required => false, desc => ?DESC("producer_local_topic")})}, + {pulsar_topic, mk(binary(), #{required => true, desc => ?DESC("producer_pulsar_topic")})}, + {strategy, + mk( + hoconsc:enum([random, roundrobin, key_dispatch]), + #{default => random, desc => ?DESC("producer_strategy")} + )}, + {buffer, mk(ref(producer_buffer), #{required => false, desc => ?DESC("producer_buffer")})}, + {message, + mk(ref(producer_pulsar_message), #{ + required => false, desc => ?DESC("producer_message_opts") + })}, + {resource_opts, + mk( + ref(producer_resource_opts), + #{ + required => false, + desc => ?DESC(emqx_resource_schema, "creation_opts") + } + )} + ]; +fields(producer_buffer) -> + [ + {mode, + mk( + hoconsc:enum([memory, disk, hybrid]), + #{default => memory, desc => ?DESC("buffer_mode")} + )}, + {per_partition_limit, + mk( + emqx_schema:bytesize(), + #{default => <<"2GB">>, desc => ?DESC("buffer_per_partition_limit")} + )}, + {segment_bytes, + mk( + emqx_schema:bytesize(), + #{default => <<"100MB">>, desc => ?DESC("buffer_segment_bytes")} + )}, + {memory_overload_protection, + mk(boolean(), #{ + default => false, + desc => ?DESC("buffer_memory_overload_protection") + })} + ]; +fields(producer_pulsar_message) -> + [ + {key, + mk(string(), #{default => <<"${.clientid}">>, desc => ?DESC("producer_key_template")})}, + {value, mk(string(), #{default => <<"${.}">>, desc => ?DESC("producer_value_template")})} + ]; +fields(producer_resource_opts) -> + SupportedOpts = [ + health_check_interval, + resume_interval, + start_after_created, + start_timeout, + auto_restart_interval + ], + lists:filtermap( + fun + ({health_check_interval = Field, MetaFn}) -> + {true, {Field, override_default(MetaFn, 1_000)}}; + ({Field, _Meta}) -> + lists:member(Field, SupportedOpts) + end, + emqx_resource_schema:fields("creation_opts") + ); +fields(auth_basic) -> + [ + {username, mk(binary(), #{required => true, desc => ?DESC("auth_basic_username")})}, + {password, + mk(binary(), #{ + required => true, + desc => ?DESC("auth_basic_password"), + sensitive => true, + converter => fun emqx_schema:password_converter/2 + })} + ]; +fields(auth_token) -> + [ + {jwt, + mk(binary(), #{ + required => true, + desc => ?DESC("auth_token_jwt"), + sensitive => true, + converter => fun emqx_schema:password_converter/2 + })} + ]; +fields("get_" ++ Type) -> + emqx_bridge_schema:status_fields() ++ fields("post_" ++ Type); +fields("put_" ++ Type) -> + fields("config_" ++ Type); +fields("post_" ++ Type) -> + [type_field(), name_field() | fields("config_" ++ Type)]; +fields("config_producer") -> + fields(pulsar_producer). + +desc(pulsar_producer) -> + ?DESC(pulsar_producer_struct); +desc(producer_resource_opts) -> + ?DESC(emqx_resource_schema, "creation_opts"); +desc("get_" ++ Type) when Type =:= "producer" -> + ["Configuration for Pulsar using `GET` method."]; +desc("put_" ++ Type) when Type =:= "producer" -> + ["Configuration for Pulsar using `PUT` method."]; +desc("post_" ++ Type) when Type =:= "producer" -> + ["Configuration for Pulsar using `POST` method."]; +desc(Name) -> + lists:member(Name, struct_names()) orelse throw({missing_desc, Name}), + ?DESC(Name). + +conn_bridge_examples(_Method) -> + [ + #{ + <<"pulsar_producer">> => #{ + summary => <<"Pulsar Producer Bridge">>, + value => #{todo => true} + } + } + ]. + +%%------------------------------------------------------------------------------------------------- +%% Internal fns +%%------------------------------------------------------------------------------------------------- + +mk(Type, Meta) -> hoconsc:mk(Type, Meta). +ref(Name) -> hoconsc:ref(?MODULE, Name). + +type_field() -> + {type, mk(hoconsc:enum([pulsar_producer]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. + +struct_names() -> + [ + auth_basic, + auth_token, + producer_buffer, + producer_pulsar_message + ]. + +override_default(OriginalFn, NewDefault) -> + fun + (default) -> NewDefault; + (Field) -> OriginalFn(Field) + end. diff --git a/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl new file mode 100644 index 000000000..300fe9b2d --- /dev/null +++ b/apps/emqx_bridge_pulsar/src/emqx_bridge_pulsar_impl_producer.erl @@ -0,0 +1,421 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_pulsar_impl_producer). + +-include("emqx_bridge_pulsar.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +%% `emqx_resource' API +-export([ + callback_mode/0, + is_buffer_supported/0, + on_start/2, + on_stop/2, + on_get_status/2, + on_query/3, + on_query_async/4 +]). + +-type pulsar_client_id() :: atom(). +-type state() :: #{ + pulsar_client_id := pulsar_client_id(), + producers := pulsar_producers:producers(), + sync_timeout := infinity | time:time(), + message_template := message_template() +}. +-type buffer_mode() :: memory | disk | hybrid. +-type compression_mode() :: no_compression | snappy | zlib. +-type partition_strategy() :: random | roundrobin | key_dispatch. +-type message_template_raw() :: #{ + key := binary(), + value := binary() +}. +-type message_template() :: #{ + key := emqx_plugin_libs_rule:tmpl_token(), + value := emqx_plugin_libs_rule:tmpl_token() +}. +-type config() :: #{ + authentication := _, + batch_size := pos_integer(), + bridge_name := atom(), + buffer := #{ + mode := buffer_mode(), + per_partition_limit := emqx_schema:byte_size(), + segment_bytes := emqx_schema:byte_size(), + memory_overload_protection := boolean() + }, + compression := compression_mode(), + max_batch_bytes := emqx_schema:bytesize(), + message := message_template_raw(), + pulsar_topic := binary(), + retention_period := infinity | emqx_schema:duration_ms(), + send_buffer := emqx_schema:bytesize(), + servers := binary(), + ssl := _, + strategy := partition_strategy(), + sync_timeout := emqx_schema:duration_ms() +}. + +%%------------------------------------------------------------------------------------- +%% `emqx_resource' API +%%------------------------------------------------------------------------------------- + +callback_mode() -> async_if_possible. + +%% there are no queries to be made to this bridge, so we say that +%% buffer is supported so we don't spawn unused resource buffer +%% workers. +is_buffer_supported() -> true. + +-spec on_start(resource_id(), config()) -> {ok, state()}. +on_start(InstanceId, Config) -> + #{ + authentication := _Auth, + bridge_name := BridgeName, + servers := Servers0, + ssl := SSL + } = Config, + Servers = format_servers(Servers0), + ClientId = make_client_id(InstanceId, BridgeName), + SSLOpts = emqx_tls_lib:to_client_opts(SSL), + ClientOpts = #{ + ssl_opts => SSLOpts, + conn_opts => conn_opts(Config) + }, + case pulsar:ensure_supervised_client(ClientId, Servers, ClientOpts) of + {ok, _Pid} -> + ?tp( + info, + "pulsar_client_started", + #{ + instance_id => InstanceId, + pulsar_hosts => Servers + } + ); + {error, Reason} -> + ?SLOG(error, #{ + msg => "failed_to_start_pulsar_client", + instance_id => InstanceId, + pulsar_hosts => Servers, + reason => Reason + }), + throw(failed_to_start_pulsar_client) + end, + start_producer(Config, InstanceId, ClientId, ClientOpts). + +-spec on_stop(resource_id(), state()) -> ok. +on_stop(_InstanceId, State) -> + #{ + pulsar_client_id := ClientId, + producers := Producers + } = State, + stop_producers(ClientId, Producers), + stop_client(ClientId), + ?tp(pulsar_bridge_stopped, #{instance_id => _InstanceId}), + ok. + +-spec on_get_status(resource_id(), state()) -> connected | disconnected. +on_get_status(_InstanceId, State = #{}) -> + #{ + pulsar_client_id := ClientId, + producers := Producers + } = State, + case pulsar_client_sup:find_client(ClientId) of + {ok, Pid} -> + try pulsar_client:get_status(Pid) of + true -> + get_producer_status(Producers); + false -> + disconnected + catch + error:timeout -> + disconnected; + exit:{noproc, _} -> + disconnected + end; + {error, _} -> + disconnected + end; +on_get_status(_InstanceId, _State) -> + %% If a health check happens just after a concurrent request to + %% create the bridge is not quite finished, `State = undefined'. + connecting. + +-spec on_query(resource_id(), {send_message, map()}, state()) -> + {ok, term()} + | {error, timeout} + | {error, term()}. +on_query(_InstanceId, {send_message, Message}, State) -> + #{ + producers := Producers, + sync_timeout := SyncTimeout, + message_template := MessageTemplate + } = State, + PulsarMessage = render_message(Message, MessageTemplate), + try + pulsar:send_sync(Producers, [PulsarMessage], SyncTimeout) + catch + error:timeout -> + {error, timeout} + end. + +-spec on_query_async( + resource_id(), {send_message, map()}, {ReplyFun :: function(), Args :: list()}, state() +) -> + {ok, pid()}. +on_query_async(_InstanceId, {send_message, Message}, AsyncReplyFn, State) -> + ?tp_span( + pulsar_producer_on_query_async, + #{instance_id => _InstanceId, message => Message}, + do_on_query_async(Message, AsyncReplyFn, State) + ). + +do_on_query_async(Message, AsyncReplyFn, State) -> + #{ + producers := Producers, + message_template := MessageTemplate + } = State, + PulsarMessage = render_message(Message, MessageTemplate), + pulsar:send(Producers, [PulsarMessage], #{callback_fn => AsyncReplyFn}). + +%%------------------------------------------------------------------------------------- +%% Internal fns +%%------------------------------------------------------------------------------------- + +-spec to_bin(atom() | string() | binary()) -> binary(). +to_bin(A) when is_atom(A) -> + atom_to_binary(A); +to_bin(L) when is_list(L) -> + list_to_binary(L); +to_bin(B) when is_binary(B) -> + B. + +-spec format_servers(binary()) -> [string()]. +format_servers(Servers0) -> + Servers1 = emqx_schema:parse_servers(Servers0, ?PULSAR_HOST_OPTIONS), + lists:map( + fun(#{scheme := Scheme, hostname := Host, port := Port}) -> + Scheme ++ "://" ++ Host ++ ":" ++ integer_to_list(Port) + end, + Servers1 + ). + +-spec make_client_id(resource_id(), atom() | binary()) -> pulsar_client_id(). +make_client_id(InstanceId, BridgeName) -> + case is_dry_run(InstanceId) of + true -> + pulsar_producer_probe; + false -> + ClientIdBin = iolist_to_binary([ + <<"pulsar_producer:">>, + to_bin(BridgeName), + <<":">>, + to_bin(node()) + ]), + binary_to_atom(ClientIdBin) + end. + +-spec is_dry_run(resource_id()) -> boolean(). +is_dry_run(InstanceId) -> + TestIdStart = string:find(InstanceId, ?TEST_ID_PREFIX), + case TestIdStart of + nomatch -> + false; + _ -> + string:equal(TestIdStart, InstanceId) + end. + +conn_opts(#{authentication := none}) -> + #{}; +conn_opts(#{authentication := #{username := Username, password := Password}}) -> + #{ + auth_data => iolist_to_binary([Username, <<":">>, Password]), + auth_method_name => <<"basic">> + }; +conn_opts(#{authentication := #{jwt := JWT}}) -> + #{ + auth_data => JWT, + auth_method_name => <<"token">> + }. + +-spec replayq_dir(pulsar_client_id()) -> string(). +replayq_dir(ClientId) -> + filename:join([emqx:data_dir(), "pulsar", to_bin(ClientId)]). + +-spec producer_name(pulsar_client_id()) -> atom(). +producer_name(ClientId) -> + ClientIdBin = to_bin(ClientId), + binary_to_atom( + iolist_to_binary([ + <<"producer-">>, + ClientIdBin + ]) + ). + +-spec start_producer(config(), resource_id(), pulsar_client_id(), map()) -> {ok, state()}. +start_producer(Config, InstanceId, ClientId, ClientOpts) -> + #{ + conn_opts := ConnOpts, + ssl_opts := SSLOpts + } = ClientOpts, + #{ + batch_size := BatchSize, + buffer := #{ + mode := BufferMode, + per_partition_limit := PerPartitionLimit, + segment_bytes := SegmentBytes, + memory_overload_protection := MemOLP0 + }, + compression := Compression, + max_batch_bytes := MaxBatchBytes, + message := MessageTemplateOpts, + pulsar_topic := PulsarTopic0, + retention_period := RetentionPeriod, + send_buffer := SendBuffer, + strategy := Strategy, + sync_timeout := SyncTimeout + } = Config, + {OffloadMode, ReplayQDir} = + case BufferMode of + memory -> {false, false}; + disk -> {false, replayq_dir(ClientId)}; + hybrid -> {true, replayq_dir(ClientId)} + end, + MemOLP = + case os:type() of + {unix, linux} -> MemOLP0; + _ -> false + end, + ReplayQOpts = #{ + replayq_dir => ReplayQDir, + replayq_offload_mode => OffloadMode, + replayq_max_total_bytes => PerPartitionLimit, + replayq_seg_bytes => SegmentBytes, + drop_if_highmem => MemOLP + }, + ProducerName = producer_name(ClientId), + ?tp(pulsar_producer_capture_name, #{producer_name => ProducerName}), + MessageTemplate = compile_message_template(MessageTemplateOpts), + ProducerOpts0 = + #{ + batch_size => BatchSize, + compression => Compression, + conn_opts => ConnOpts, + max_batch_bytes => MaxBatchBytes, + name => ProducerName, + retention_period => RetentionPeriod, + ssl_opts => SSLOpts, + strategy => partition_strategy(Strategy), + tcp_opts => [{sndbuf, SendBuffer}] + }, + ProducerOpts = maps:merge(ReplayQOpts, ProducerOpts0), + PulsarTopic = binary_to_list(PulsarTopic0), + ?tp(pulsar_producer_about_to_start_producers, #{producer_name => ProducerName}), + try pulsar:ensure_supervised_producers(ClientId, PulsarTopic, ProducerOpts) of + {ok, Producers} -> + State = #{ + pulsar_client_id => ClientId, + producers => Producers, + sync_timeout => SyncTimeout, + message_template => MessageTemplate + }, + ?tp(pulsar_producer_bridge_started, #{}), + {ok, State} + catch + Kind:Error:Stacktrace -> + ?tp( + error, + "failed_to_start_pulsar_producer", + #{ + instance_id => InstanceId, + kind => Kind, + reason => Error, + stacktrace => Stacktrace + } + ), + stop_client(ClientId), + throw(failed_to_start_pulsar_producer) + end. + +-spec stop_client(pulsar_client_id()) -> ok. +stop_client(ClientId) -> + _ = log_when_error( + fun() -> + ok = pulsar:stop_and_delete_supervised_client(ClientId), + ?tp(pulsar_bridge_client_stopped, #{pulsar_client_id => ClientId}), + ok + end, + #{ + msg => "failed_to_delete_pulsar_client", + pulsar_client_id => ClientId + } + ), + ok. + +-spec stop_producers(pulsar_client_id(), pulsar_producers:producers()) -> ok. +stop_producers(ClientId, Producers) -> + _ = log_when_error( + fun() -> + ok = pulsar:stop_and_delete_supervised_producers(Producers), + ?tp(pulsar_bridge_producer_stopped, #{pulsar_client_id => ClientId}), + ok + end, + #{ + msg => "failed_to_delete_pulsar_producer", + pulsar_client_id => ClientId + } + ), + ok. + +log_when_error(Fun, Log) -> + try + Fun() + catch + C:E -> + ?SLOG(error, Log#{ + exception => C, + reason => E + }) + end. + +-spec compile_message_template(message_template_raw()) -> message_template(). +compile_message_template(TemplateOpts) -> + KeyTemplate = maps:get(key, TemplateOpts, <<"${.clientid}">>), + ValueTemplate = maps:get(value, TemplateOpts, <<"${.}">>), + #{ + key => preproc_tmpl(KeyTemplate), + value => preproc_tmpl(ValueTemplate) + }. + +preproc_tmpl(Template) -> + emqx_plugin_libs_rule:preproc_tmpl(Template). + +render_message( + Message, #{key := KeyTemplate, value := ValueTemplate} +) -> + #{ + key => render(Message, KeyTemplate), + value => render(Message, ValueTemplate) + }. + +render(Message, Template) -> + Opts = #{ + var_trans => fun + (undefined) -> <<"">>; + (X) -> emqx_plugin_libs_rule:bin(X) + end, + return => full_binary + }, + emqx_plugin_libs_rule:proc_tmpl(Template, Message, Opts). + +get_producer_status(Producers) -> + case pulsar_producers:all_connected(Producers) of + true -> connected; + false -> connecting + end. + +partition_strategy(key_dispatch) -> first_key_dispatch; +partition_strategy(Strategy) -> Strategy. diff --git a/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE.erl b/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE.erl new file mode 100644 index 000000000..be38f6625 --- /dev/null +++ b/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE.erl @@ -0,0 +1,1019 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_bridge_pulsar_impl_producer_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-import(emqx_common_test_helpers, [on_exit/1]). + +-define(BRIDGE_TYPE_BIN, <<"pulsar_producer">>). +-define(APPS, [emqx_bridge, emqx_resource, emqx_rule_engine, emqx_bridge_pulsar]). +-define(RULE_TOPIC, "mqtt/rule"). +-define(RULE_TOPIC_BIN, <>). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, plain}, + {group, tls} + ]. + +groups() -> + AllTCs = emqx_common_test_helpers:all(?MODULE), + OnlyOnceTCs = only_once_tests(), + TCs = AllTCs -- OnlyOnceTCs, + [ + {plain, AllTCs}, + {tls, TCs} + ]. + +only_once_tests() -> + [ + t_create_via_http, + t_start_when_down, + t_send_when_down, + t_send_when_timeout, + t_failure_to_start_producer, + t_producer_process_crash + ]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_conf]), + ok = emqx_connector_test_helpers:stop_apps(lists:reverse(?APPS)), + _ = application:stop(emqx_connector), + ok. + +init_per_group(plain = Type, Config) -> + PulsarHost = os:getenv("PULSAR_PLAIN_HOST", "toxiproxy"), + PulsarPort = list_to_integer(os:getenv("PULSAR_PLAIN_PORT", "6652")), + ProxyName = "pulsar_plain", + case emqx_common_test_helpers:is_tcp_server_available(PulsarHost, PulsarPort) of + true -> + Config1 = common_init_per_group(), + [ + {proxy_name, ProxyName}, + {pulsar_host, PulsarHost}, + {pulsar_port, PulsarPort}, + {pulsar_type, Type}, + {use_tls, false} + | Config1 ++ Config + ]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_pulsar); + _ -> + {skip, no_pulsar} + end + end; +init_per_group(tls = Type, Config) -> + PulsarHost = os:getenv("PULSAR_TLS_HOST", "toxiproxy"), + PulsarPort = list_to_integer(os:getenv("PULSAR_TLS_PORT", "6653")), + ProxyName = "pulsar_tls", + case emqx_common_test_helpers:is_tcp_server_available(PulsarHost, PulsarPort) of + true -> + Config1 = common_init_per_group(), + [ + {proxy_name, ProxyName}, + {pulsar_host, PulsarHost}, + {pulsar_port, PulsarPort}, + {pulsar_type, Type}, + {use_tls, true} + | Config1 ++ Config + ]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_pulsar); + _ -> + {skip, no_pulsar} + end + end; +init_per_group(_Group, Config) -> + Config. + +end_per_group(Group, Config) when + Group =:= plain; + Group =:= tls +-> + common_end_per_group(Config), + ok; +end_per_group(_Group, _Config) -> + ok. + +common_init_per_group() -> + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + application:load(emqx_bridge), + ok = emqx_common_test_helpers:start_apps([emqx_conf]), + ok = emqx_connector_test_helpers:start_apps(?APPS), + {ok, _} = application:ensure_all_started(emqx_connector), + emqx_mgmt_api_test_util:init_suite(), + UniqueNum = integer_to_binary(erlang:unique_integer()), + MQTTTopic = <<"mqtt/topic/", UniqueNum/binary>>, + [ + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort}, + {mqtt_topic, MQTTTopic} + ]. + +common_end_per_group(Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + delete_all_bridges(), + ok. + +init_per_testcase(TestCase, Config) -> + common_init_per_testcase(TestCase, Config). + +end_per_testcase(_Testcase, Config) -> + case proplists:get_bool(skip_does_not_apply, Config) of + true -> + ok; + false -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + delete_all_bridges(), + stop_consumer(Config), + %% in CI, apparently this needs more time since the + %% machines struggle with all the containers running... + emqx_common_test_helpers:call_janitor(60_000), + ok = snabbkaffe:stop(), + ok + end. + +common_init_per_testcase(TestCase, Config0) -> + ct:timetrap(timer:seconds(60)), + delete_all_bridges(), + UniqueNum = integer_to_binary(erlang:unique_integer()), + PulsarTopic = + << + (atom_to_binary(TestCase))/binary, + UniqueNum/binary + >>, + PulsarType = ?config(pulsar_type, Config0), + Config1 = [{pulsar_topic, PulsarTopic} | Config0], + {Name, ConfigString, PulsarConfig} = pulsar_config( + TestCase, PulsarType, Config1 + ), + ConsumerConfig = start_consumer(TestCase, Config1), + Config = ConsumerConfig ++ Config1, + ok = snabbkaffe:start_trace(), + [ + {pulsar_name, Name}, + {pulsar_config_string, ConfigString}, + {pulsar_config, PulsarConfig} + | Config + ]. + +delete_all_bridges() -> + lists:foreach( + fun(#{name := Name, type := Type}) -> + emqx_bridge:remove(Type, Name) + end, + emqx_bridge:list() + ). + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +pulsar_config(TestCase, _PulsarType, Config) -> + UniqueNum = integer_to_binary(erlang:unique_integer()), + PulsarHost = ?config(pulsar_host, Config), + PulsarPort = ?config(pulsar_port, Config), + PulsarTopic = ?config(pulsar_topic, Config), + AuthType = proplists:get_value(sasl_auth_mechanism, Config, none), + UseTLS = proplists:get_value(use_tls, Config, false), + Name = << + (atom_to_binary(TestCase))/binary, UniqueNum/binary + >>, + MQTTTopic = proplists:get_value(mqtt_topic, Config, <<"mqtt/topic/", UniqueNum/binary>>), + Prefix = + case UseTLS of + true -> <<"pulsar+ssl://">>; + false -> <<"pulsar://">> + end, + ServerURL = iolist_to_binary([ + Prefix, + PulsarHost, + ":", + integer_to_binary(PulsarPort) + ]), + ConfigString = + io_lib:format( + "bridges.pulsar_producer.~s {\n" + " enable = true\n" + " servers = \"~s\"\n" + " sync_timeout = 5s\n" + " compression = no_compression\n" + " send_buffer = 1MB\n" + " retention_period = infinity\n" + " max_batch_bytes = 900KB\n" + " batch_size = 1\n" + " strategy = random\n" + " buffer {\n" + " mode = memory\n" + " per_partition_limit = 10MB\n" + " segment_bytes = 5MB\n" + " memory_overload_protection = true\n" + " }\n" + " message {\n" + " key = \"${.clientid}\"\n" + " value = \"${.}\"\n" + " }\n" + "~s" + " ssl {\n" + " enable = ~p\n" + " verify = verify_none\n" + " server_name_indication = \"auto\"\n" + " }\n" + " pulsar_topic = \"~s\"\n" + " local_topic = \"~s\"\n" + "}\n", + [ + Name, + ServerURL, + authentication(AuthType), + UseTLS, + PulsarTopic, + MQTTTopic + ] + ), + {Name, ConfigString, parse_and_check(ConfigString, Name)}. + +parse_and_check(ConfigString, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + TypeBin = ?BRIDGE_TYPE_BIN, + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{TypeBin := #{Name := Config}}} = RawConf, + Config. + +authentication(_) -> + " authentication = none\n". + +resource_id(Config) -> + Type = ?BRIDGE_TYPE_BIN, + Name = ?config(pulsar_name, Config), + emqx_bridge_resource:resource_id(Type, Name). + +create_bridge(Config) -> + create_bridge(Config, _Overrides = #{}). + +create_bridge(Config, Overrides) -> + Type = ?BRIDGE_TYPE_BIN, + Name = ?config(pulsar_name, Config), + PulsarConfig0 = ?config(pulsar_config, Config), + PulsarConfig = emqx_utils_maps:deep_merge(PulsarConfig0, Overrides), + emqx_bridge:create(Type, Name, PulsarConfig). + +create_bridge_api(Config) -> + create_bridge_api(Config, _Overrides = #{}). + +create_bridge_api(Config, Overrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(pulsar_name, Config), + PulsarConfig0 = ?config(pulsar_config, Config), + PulsarConfig = emqx_utils_maps:deep_merge(PulsarConfig0, Overrides), + Params = PulsarConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("creating bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of + {ok, {Status, Headers, Body0}} -> + {ok, {Status, Headers, emqx_utils_json:decode(Body0, [return_maps])}}; + Error -> + Error + end, + ct:pal("bridge create result: ~p", [Res]), + Res. + +update_bridge_api(Config) -> + update_bridge_api(Config, _Overrides = #{}). + +update_bridge_api(Config, Overrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(pulsar_name, Config), + PulsarConfig0 = ?config(pulsar_config, Config), + PulsarConfig = emqx_utils_maps:deep_merge(PulsarConfig0, Overrides), + BridgeId = emqx_bridge_resource:bridge_id(TypeBin, Name), + Params = PulsarConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Path = emqx_mgmt_api_test_util:api_path(["bridges", BridgeId]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("updating bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(put, Path, "", AuthHeader, Params, Opts) of + {ok, {_Status, _Headers, Body0}} -> {ok, emqx_utils_json:decode(Body0, [return_maps])}; + Error -> Error + end, + ct:pal("bridge update result: ~p", [Res]), + Res. + +probe_bridge_api(Config) -> + probe_bridge_api(Config, _Overrides = #{}). + +probe_bridge_api(Config, Overrides) -> + TypeBin = ?BRIDGE_TYPE_BIN, + Name = ?config(pulsar_name, Config), + PulsarConfig = ?config(pulsar_config, Config), + Params0 = PulsarConfig#{<<"type">> => TypeBin, <<"name">> => Name}, + Params = maps:merge(Params0, Overrides), + Path = emqx_mgmt_api_test_util:api_path(["bridges_probe"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + Opts = #{return_all => true}, + ct:pal("probing bridge (via http): ~p", [Params]), + Res = + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params, Opts) of + {ok, {{_, 204, _}, _Headers, _Body0} = Res0} -> {ok, Res0}; + Error -> Error + end, + ct:pal("bridge probe result: ~p", [Res]), + Res. + +start_consumer(TestCase, Config) -> + PulsarHost = ?config(pulsar_host, Config), + PulsarPort = ?config(pulsar_port, Config), + PulsarTopic = ?config(pulsar_topic, Config), + UseTLS = ?config(use_tls, Config), + %% FIXME: patch pulsar to accept binary urls... + Scheme = + case UseTLS of + true -> <<"pulsar+ssl://">>; + false -> <<"pulsar://">> + end, + URL = + binary_to_list( + <> + ), + ConnOpts = #{}, + ConsumerClientId = TestCase, + CertsPath = emqx_common_test_helpers:deps_path(emqx, "etc/certs"), + SSLOpts = #{ + enable => UseTLS, + keyfile => filename:join([CertsPath, "key.pem"]), + certfile => filename:join([CertsPath, "cert.pem"]), + cacertfile => filename:join([CertsPath, "cacert.pem"]) + }, + {ok, _ClientPid} = pulsar:ensure_supervised_client( + ConsumerClientId, + [URL], + #{ + conn_opts => ConnOpts, + ssl_opts => emqx_tls_lib:to_client_opts(SSLOpts) + } + ), + ConsumerOpts = #{ + cb_init_args => #{send_to => self()}, + cb_module => pulsar_echo_consumer, + sub_type => 'Shared', + subscription => atom_to_list(TestCase), + max_consumer_num => 1, + %% Note! This must not coincide with the client + %% id, or else weird bugs will happen, like the + %% consumer never starts... + name => test_consumer, + consumer_id => 1, + conn_opts => ConnOpts + }, + {ok, Consumer} = pulsar:ensure_supervised_consumers( + ConsumerClientId, + PulsarTopic, + ConsumerOpts + ), + %% since connection is async, and there's currently no way to + %% specify the subscription initial position as `Earliest', we + %% need to wait until the consumer is connected to avoid + %% flakiness. + ok = wait_until_consumer_connected(Consumer), + [ + {consumer_client_id, ConsumerClientId}, + {pulsar_consumer, Consumer} + ]. + +stop_consumer(Config) -> + ConsumerClientId = ?config(consumer_client_id, Config), + Consumer = ?config(pulsar_consumer, Config), + ok = pulsar:stop_and_delete_supervised_consumers(Consumer), + ok = pulsar:stop_and_delete_supervised_client(ConsumerClientId), + ok. + +wait_until_consumer_connected(Consumer) -> + ?retry( + _Sleep = 300, + _Attempts0 = 20, + true = pulsar_consumers:all_connected(Consumer) + ), + ok. + +wait_until_producer_connected() -> + wait_until_connected(pulsar_producers_sup, pulsar_producer). + +wait_until_connected(SupMod, Mod) -> + Pids = [ + P + || {_Name, SupPid, _Type, _Mods} <- supervisor:which_children(SupMod), + P <- element(2, process_info(SupPid, links)), + case proc_lib:initial_call(P) of + {Mod, init, _} -> true; + _ -> false + end + ], + ?retry( + _Sleep = 300, + _Attempts0 = 20, + lists:foreach(fun(P) -> {connected, _} = sys:get_state(P) end, Pids) + ), + ok. + +create_rule_and_action_http(Config) -> + PulsarName = ?config(pulsar_name, Config), + BridgeId = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_BIN, PulsarName), + Params = #{ + enable => true, + sql => <<"SELECT * FROM \"", ?RULE_TOPIC, "\"">>, + actions => [BridgeId] + }, + Path = emqx_mgmt_api_test_util:api_path(["rules"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + ct:pal("rule action params: ~p", [Params]), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_utils_json:decode(Res, [return_maps])}; + Error -> Error + end. + +receive_consumed(Timeout) -> + receive + {pulsar_message, #{payloads := Payloads}} -> + lists:map(fun try_decode_json/1, Payloads) + after Timeout -> + ct:pal("mailbox: ~p", [process_info(self(), messages)]), + ct:fail("no message consumed") + end. + +try_decode_json(Payload) -> + case emqx_utils_json:safe_decode(Payload, [return_maps]) of + {error, _} -> + Payload; + {ok, JSON} -> + JSON + end. + +cluster(Config) -> + PrivDataDir = ?config(priv_dir, Config), + PeerModule = + case os:getenv("IS_CI") of + false -> + slave; + _ -> + ct_slave + end, + Cluster = emqx_common_test_helpers:emqx_cluster( + [core, core], + [ + {apps, [emqx_conf, emqx_bridge, emqx_rule_engine, emqx_bridge_pulsar]}, + {listener_ports, []}, + {peer_mod, PeerModule}, + {priv_data_dir, PrivDataDir}, + {load_schema, true}, + {start_autocluster, true}, + {schema_mod, emqx_ee_conf_schema}, + {env_handler, fun + (emqx) -> + application:set_env(emqx, boot_modules, [broker, router]), + ok; + (emqx_conf) -> + ok; + (_) -> + ok + end} + ] + ), + ct:pal("cluster: ~p", [Cluster]), + Cluster. + +start_cluster(Cluster) -> + Nodes = + [ + emqx_common_test_helpers:start_slave(Name, Opts) + || {Name, Opts} <- Cluster + ], + on_exit(fun() -> + emqx_utils:pmap( + fun(N) -> + ct:pal("stopping ~p", [N]), + ok = emqx_common_test_helpers:stop_slave(N) + end, + Nodes + ) + end), + Nodes. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_start_and_produce_ok(Config) -> + MQTTTopic = ?config(mqtt_topic, Config), + ResourceId = resource_id(Config), + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + QoS = 0, + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + ?check_trace( + begin + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + {ok, #{<<"id">> := RuleId}} = create_rule_and_action_http(Config), + on_exit(fun() -> ok = emqx_rule_engine:delete_rule(RuleId) end), + %% Publish using local topic. + Message0 = emqx_message:make(ClientId, QoS, MQTTTopic, Payload), + emqx:publish(Message0), + %% Publish using rule engine. + Message1 = emqx_message:make(ClientId, QoS, ?RULE_TOPIC_BIN, Payload), + emqx:publish(Message1), + + #{rule_id => RuleId} + end, + fun(#{rule_id := RuleId}, _Trace) -> + Data0 = receive_consumed(5_000), + ?assertMatch( + [ + #{ + <<"clientid">> := ClientId, + <<"event">> := <<"message.publish">>, + <<"payload">> := Payload, + <<"topic">> := MQTTTopic + } + ], + Data0 + ), + Data1 = receive_consumed(5_000), + ?assertMatch( + [ + #{ + <<"clientid">> := ClientId, + <<"event">> := <<"message.publish">>, + <<"payload">> := Payload, + <<"topic">> := ?RULE_TOPIC_BIN + } + ], + Data1 + ), + ?retry( + _Sleep = 100, + _Attempts0 = 20, + begin + ?assertMatch( + #{ + counters := #{ + dropped := 0, + failed := 0, + late_reply := 0, + matched := 2, + received := 0, + retried := 0, + success := 2 + } + }, + emqx_resource_manager:get_metrics(ResourceId) + ), + ?assertEqual( + 1, emqx_metrics_worker:get(rule_metrics, RuleId, 'actions.success') + ), + ?assertEqual( + 0, emqx_metrics_worker:get(rule_metrics, RuleId, 'actions.failed') + ), + ok + end + ), + ok + end + ), + ok. + +%% Under normal operations, the bridge will be called async via +%% `simple_async_query'. +t_sync_query(Config) -> + ResourceId = resource_id(Config), + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + Message = {send_message, #{payload => Payload}}, + ?assertMatch( + {ok, #{sequence_id := _}}, emqx_resource:simple_sync_query(ResourceId, Message) + ), + ok + end, + [] + ), + ok. + +t_create_via_http(Config) -> + ?check_trace( + begin + ?assertMatch({ok, _}, create_bridge_api(Config)), + + %% lightweight matrix testing some configs + ?assertMatch( + {ok, _}, + update_bridge_api( + Config, + #{ + <<"buffer">> => + #{<<"mode">> => <<"disk">>} + } + ) + ), + ?assertMatch( + {ok, _}, + update_bridge_api( + Config, + #{ + <<"buffer">> => + #{ + <<"mode">> => <<"hybrid">>, + <<"memory_overload_protection">> => true + } + } + ) + ), + ok + end, + [] + ), + ok. + +t_start_stop(Config) -> + PulsarName = ?config(pulsar_name, Config), + ResourceId = resource_id(Config), + ?check_trace( + begin + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + %% Since the connection process is async, we give it some time to + %% stabilize and avoid flakiness. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + + %% Check that the bridge probe API doesn't leak atoms. + redbug:start( + [ + "emqx_resource_manager:health_check_interval -> return", + "emqx_resource_manager:with_health_check -> return" + ], + [{msgs, 100}, {time, 30_000}] + ), + ProbeRes0 = probe_bridge_api( + Config, + #{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}} + ), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes0), + AtomsBefore = erlang:system_info(atom_count), + %% Probe again; shouldn't have created more atoms. + ProbeRes1 = probe_bridge_api( + Config, + #{<<"resource_opts">> => #{<<"health_check_interval">> => <<"1s">>}} + ), + ?assertMatch({ok, {{_, 204, _}, _Headers, _Body}}, ProbeRes1), + AtomsAfter = erlang:system_info(atom_count), + ?assertEqual(AtomsBefore, AtomsAfter), + + %% Now stop the bridge. + ?assertMatch( + {{ok, _}, {ok, _}}, + ?wait_async_action( + emqx_bridge:disable_enable(disable, ?BRIDGE_TYPE_BIN, PulsarName), + #{?snk_kind := pulsar_bridge_stopped}, + 5_000 + ) + ), + + ok + end, + fun(Trace) -> + %% one for each probe, one for real + ?assertMatch([_, _, _], ?of_kind(pulsar_bridge_producer_stopped, Trace)), + ?assertMatch([_, _, _], ?of_kind(pulsar_bridge_client_stopped, Trace)), + ?assertMatch([_, _, _], ?of_kind(pulsar_bridge_stopped, Trace)), + ok + end + ), + ok. + +t_on_get_status(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + ResourceId = resource_id(Config), + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + %% Since the connection process is async, we give it some time to + %% stabilize and avoid flakiness. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ct:sleep(500), + ?assertEqual({ok, disconnected}, emqx_resource_manager:health_check(ResourceId)) + end), + %% Check that it recovers itself. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + ok. + +t_start_when_down(Config) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + ResourceId = resource_id(Config), + ?check_trace( + begin + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + ok + end), + %% Should recover given enough time. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + ok + end, + [] + ), + ok. + +t_send_when_down(Config) -> + do_t_send_with_failure(Config, down). + +t_send_when_timeout(Config) -> + do_t_send_with_failure(Config, timeout). + +do_t_send_with_failure(Config, FailureType) -> + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + MQTTTopic = ?config(mqtt_topic, Config), + QoS = 0, + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + Message0 = emqx_message:make(ClientId, QoS, MQTTTopic, Payload), + + {{ok, _}, {ok, _}} = + ?wait_async_action( + create_bridge(Config), + #{?snk_kind := pulsar_producer_bridge_started}, + 10_000 + ), + ?check_trace( + begin + emqx_common_test_helpers:with_failure( + FailureType, ProxyName, ProxyHost, ProxyPort, fun() -> + {_, {ok, _}} = + ?wait_async_action( + emqx:publish(Message0), + #{ + ?snk_kind := pulsar_producer_on_query_async, + ?snk_span := {complete, _} + }, + 5_000 + ), + ok + end + ), + ok + end, + fun(_Trace) -> + %% Should recover given enough time. + Data0 = receive_consumed(20_000), + ?assertMatch( + [ + #{ + <<"clientid">> := ClientId, + <<"event">> := <<"message.publish">>, + <<"payload">> := Payload, + <<"topic">> := MQTTTopic + } + ], + Data0 + ), + ok + end + ), + ok. + +%% Check that we correctly terminate the pulsar client when the pulsar +%% producer processes fail to start for whatever reason. +t_failure_to_start_producer(Config) -> + ?check_trace( + begin + ?force_ordering( + #{?snk_kind := name_registered}, + #{?snk_kind := pulsar_producer_about_to_start_producers} + ), + spawn_link(fun() -> + ?tp(will_register_name, #{}), + {ok, #{producer_name := ProducerName}} = ?block_until( + #{?snk_kind := pulsar_producer_capture_name}, 10_000 + ), + true = register(ProducerName, self()), + ?tp(name_registered, #{name => ProducerName}), + %% Just simulating another process so that starting the + %% producers fail. Currently it does a gen_server:call + %% with `infinity' timeout, so this is just to avoid + %% hanging. + receive + {'$gen_call', From, _Request} -> + gen_server:reply(From, {error, im_not, your_producer}) + end, + receive + die -> ok + end + end), + {{ok, _}, {ok, _}} = + ?wait_async_action( + create_bridge(Config), + #{?snk_kind := pulsar_bridge_client_stopped}, + 20_000 + ), + ok + end, + [] + ), + ok. + +%% Check the driver recovers itself if one of the producer processes +%% die for whatever reason. +t_producer_process_crash(Config) -> + MQTTTopic = ?config(mqtt_topic, Config), + ResourceId = resource_id(Config), + QoS = 0, + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + Message0 = emqx_message:make(ClientId, QoS, MQTTTopic, Payload), + ?check_trace( + begin + {{ok, _}, {ok, _}} = + ?wait_async_action( + create_bridge( + Config, + #{<<"buffer">> => #{<<"mode">> => <<"disk">>}} + ), + #{?snk_kind := pulsar_producer_bridge_started}, + 10_000 + ), + [ProducerPid | _] = [ + Pid + || {_Name, PS, _Type, _Mods} <- supervisor:which_children(pulsar_producers_sup), + Pid <- element(2, process_info(PS, links)), + case proc_lib:initial_call(Pid) of + {pulsar_producer, init, _} -> true; + _ -> false + end + ], + Ref = monitor(process, ProducerPid), + exit(ProducerPid, kill), + receive + {'DOWN', Ref, process, ProducerPid, _Killed} -> + ok + after 1_000 -> ct:fail("pid didn't die") + end, + ?assertEqual({ok, connecting}, emqx_resource_manager:health_check(ResourceId)), + %% Should recover given enough time. + ?retry( + _Sleep = 1_000, + _Attempts = 20, + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceId)) + ), + {_, {ok, _}} = + ?wait_async_action( + emqx:publish(Message0), + #{?snk_kind := pulsar_producer_on_query_async, ?snk_span := {complete, _}}, + 5_000 + ), + Data0 = receive_consumed(20_000), + ?assertMatch( + [ + #{ + <<"clientid">> := ClientId, + <<"event">> := <<"message.publish">>, + <<"payload">> := Payload, + <<"topic">> := MQTTTopic + } + ], + Data0 + ), + ok + end, + [] + ), + ok. + +t_cluster(Config) -> + MQTTTopic = ?config(mqtt_topic, Config), + ResourceId = resource_id(Config), + Cluster = cluster(Config), + ClientId = emqx_guid:to_hexstr(emqx_guid:gen()), + QoS = 0, + Payload = emqx_guid:to_hexstr(emqx_guid:gen()), + ?check_trace( + begin + Nodes = [N1, N2 | _] = start_cluster(Cluster), + {ok, SRef0} = snabbkaffe:subscribe( + ?match_event(#{?snk_kind := pulsar_producer_bridge_started}), + length(Nodes), + 15_000 + ), + {ok, _} = erpc:call(N1, fun() -> create_bridge(Config) end), + {ok, _} = snabbkaffe:receive_events(SRef0), + lists:foreach( + fun(N) -> + ?retry( + _Sleep = 1_000, + _Attempts0 = 20, + ?assertEqual( + {ok, connected}, + erpc:call(N, emqx_resource_manager, health_check, [ResourceId]), + #{node => N} + ) + ) + end, + Nodes + ), + erpc:multicall(Nodes, fun wait_until_producer_connected/0), + Message0 = emqx_message:make(ClientId, QoS, MQTTTopic, Payload), + erpc:call(N2, emqx, publish, [Message0]), + + lists:foreach( + fun(N) -> + ?assertEqual( + {ok, connected}, + erpc:call(N, emqx_resource_manager, health_check, [ResourceId]), + #{node => N} + ) + end, + Nodes + ), + + ok + end, + fun(_Trace) -> + Data0 = receive_consumed(10_000), + ?assertMatch( + [ + #{ + <<"clientid">> := ClientId, + <<"event">> := <<"message.publish">>, + <<"payload">> := Payload, + <<"topic">> := MQTTTopic + } + ], + Data0 + ), + ok + end + ), + ok. diff --git a/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE_data/pulsar_echo_consumer.erl b/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE_data/pulsar_echo_consumer.erl new file mode 100644 index 000000000..834978851 --- /dev/null +++ b/apps/emqx_bridge_pulsar/test/emqx_bridge_pulsar_impl_producer_SUITE_data/pulsar_echo_consumer.erl @@ -0,0 +1,25 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(pulsar_echo_consumer). + +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +%% pulsar consumer API +-export([init/2, handle_message/3]). + +init(Topic, Args) -> + ct:pal("consumer init: ~p", [#{topic => Topic, args => Args}]), + SendTo = maps:get(send_to, Args), + ?tp(pulsar_echo_consumer_init, #{topic => Topic}), + {ok, #{topic => Topic, send_to => SendTo}}. + +handle_message(Message, Payloads, State) -> + #{send_to := SendTo, topic := Topic} = State, + ct:pal( + "pulsar consumer received:\n ~p", + [#{message => Message, payloads => Payloads}] + ), + SendTo ! {pulsar_message, #{topic => Topic, message => Message, payloads => Payloads}}, + ?tp(pulsar_echo_consumer_message, #{topic => Topic, message => Message, payloads => Payloads}), + {ok, 'Individual', State}. diff --git a/apps/emqx_bridge_timescale/README.md b/apps/emqx_bridge_timescale/README.md index 96f70f847..071cb0fa6 100644 --- a/apps/emqx_bridge_timescale/README.md +++ b/apps/emqx_bridge_timescale/README.md @@ -9,6 +9,7 @@ The application is used to connect EMQX and TimescaleDB. User can create a rule and easily ingest IoT data into TimescaleDB by leveraging [EMQX Rules](https://docs.emqx.com/en/enterprise/v5.0/data-integration/rules.html). + # HTTP APIs diff --git a/apps/emqx_conf/README.md b/apps/emqx_conf/README.md new file mode 100644 index 000000000..f1efe7987 --- /dev/null +++ b/apps/emqx_conf/README.md @@ -0,0 +1,15 @@ +# Configuration Management + +This application provides configuration management capabilities for EMQX. + +At compile time it reads all configuration schemas and generates the following files: + * `config-en.md`: documentation for all configuration options. + * `schema-en.json`: JSON description of all configuration schema options. + * `emqx.conf.example`: an example of a complete configuration file. + +At runtime, it provides: +- Cluster configuration synchronization capability. + Responsible for synchronizing hot-update configurations from the HTTP API to the entire cluster + and ensuring consistency. + +In addition, this application manages system-level configurations such as `cluster`, `node`, `log`. diff --git a/apps/emqx_conf/etc/emqx_conf.conf b/apps/emqx_conf/etc/emqx_conf.conf index a54894dcd..76e3c0805 100644 --- a/apps/emqx_conf/etc/emqx_conf.conf +++ b/apps/emqx_conf/etc/emqx_conf.conf @@ -1,7 +1,13 @@ ## NOTE: -## The EMQX configuration is prioritized (overlayed) in the following order: -## `data/configs/cluster.hocon < etc/emqx.conf < environment variables`. - +## This config file overrides data/configs/cluster.hocon, +## and is merged with environment variables which start with 'EMQX_' prefix. +## +## Config changes made from EMQX dashboard UI, management HTTP API, or CLI +## are stored in data/configs/cluster.hocon. +## To avoid confusion, please do not store the same configs in both files. +## +## See https://docs.emqx.com/en/enterprise/v5.0/configuration/configuration.html +## Configuration full example can be found in emqx.conf.example node { name = "emqx@127.0.0.1" @@ -9,13 +15,6 @@ node { data_dir = "{{ platform_data_dir }}" } -log { - file_handlers.default { - level = warning - file = "{{ platform_log_dir }}/emqx.log" - } -} - cluster { name = emqxcl discovery_strategy = manual diff --git a/apps/emqx_conf/src/emqx_conf.app.src b/apps/emqx_conf/src/emqx_conf.app.src index 234690374..e6c3d9cd9 100644 --- a/apps/emqx_conf/src/emqx_conf.app.src +++ b/apps/emqx_conf/src/emqx_conf.app.src @@ -1,6 +1,6 @@ {application, emqx_conf, [ {description, "EMQX configuration management"}, - {vsn, "0.1.17"}, + {vsn, "0.1.19"}, {registered, []}, {mod, {emqx_conf_app, []}}, {applications, [kernel, stdlib, emqx_ctl]}, diff --git a/apps/emqx_conf/src/emqx_conf.erl b/apps/emqx_conf/src/emqx_conf.erl index 8632df139..6668d6424 100644 --- a/apps/emqx_conf/src/emqx_conf.erl +++ b/apps/emqx_conf/src/emqx_conf.erl @@ -18,12 +18,14 @@ -compile({no_auto_import, [get/1, get/2]}). -include_lib("emqx/include/logger.hrl"). -include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx/include/emqx_schema.hrl"). -export([add_handler/2, remove_handler/1]). -export([get/1, get/2, get_raw/1, get_raw/2, get_all/1]). -export([get_by_node/2, get_by_node/3]). -export([update/3, update/4]). -export([remove/2, remove/3]). +-export([tombstone/2]). -export([reset/2, reset/3]). -export([dump_schema/2]). -export([schema_module/0]). @@ -31,8 +33,9 @@ %% TODO: move to emqx_dashboard when we stop building api schema at build time -export([ - hotconf_schema_json/1, - bridge_schema_json/1 + hotconf_schema_json/0, + bridge_schema_json/0, + hocon_schema_to_spec/2 ]). %% for rpc @@ -113,6 +116,10 @@ update(Node, KeyPath, UpdateReq, Opts0) when Node =:= node() -> update(Node, KeyPath, UpdateReq, Opts) -> emqx_conf_proto_v2:update(Node, KeyPath, UpdateReq, Opts). +%% @doc Mark the specified key path as tombstone +tombstone(KeyPath, Opts) -> + update(KeyPath, ?TOMBSTONE_CONFIG_CHANGE_REQ, Opts). + %% @doc remove all value of key path in cluster-override.conf or local-override.conf. -spec remove(emqx_utils_maps:config_key_path(), emqx_config:update_opts()) -> {ok, emqx_config:update_result()} | {error, emqx_config:update_error()}. @@ -149,7 +156,6 @@ dump_schema(Dir, SchemaModule) -> lists:foreach( fun(Lang) -> ok = gen_config_md(Dir, SchemaModule, Lang), - ok = gen_api_schema_json(Dir, Lang), ok = gen_schema_json(Dir, SchemaModule, Lang) end, ["en", "zh"] @@ -176,41 +182,15 @@ gen_schema_json(Dir, SchemaModule, Lang) -> IoData = emqx_utils_json:encode(JsonMap, [pretty, force_utf8]), ok = file:write_file(SchemaJsonFile, IoData). -%% TODO: delete this function when we stop generating this JSON at build time. -gen_api_schema_json(Dir, Lang) -> - gen_api_schema_json_hotconf(Dir, Lang), - gen_api_schema_json_bridge(Dir, Lang). - -%% TODO: delete this function when we stop generating this JSON at build time. -gen_api_schema_json_hotconf(Dir, Lang) -> - File = schema_filename(Dir, "hot-config-schema-", Lang), - IoData = hotconf_schema_json(Lang), - ok = write_api_schema_json_file(File, IoData). - -%% TODO: delete this function when we stop generating this JSON at build time. -gen_api_schema_json_bridge(Dir, Lang) -> - File = schema_filename(Dir, "bridge-api-", Lang), - IoData = bridge_schema_json(Lang), - ok = write_api_schema_json_file(File, IoData). - -%% TODO: delete this function when we stop generating this JSON at build time. -write_api_schema_json_file(File, IoData) -> - io:format(user, "===< Generating: ~s~n", [File]), - file:write_file(File, IoData). - %% TODO: move this function to emqx_dashboard when we stop generating this JSON at build time. -hotconf_schema_json(Lang) -> +hotconf_schema_json() -> SchemaInfo = #{title => <<"EMQX Hot Conf API Schema">>, version => <<"0.1.0">>}, - gen_api_schema_json_iodata(emqx_mgmt_api_configs, SchemaInfo, Lang). + gen_api_schema_json_iodata(emqx_mgmt_api_configs, SchemaInfo). %% TODO: move this function to emqx_dashboard when we stop generating this JSON at build time. -bridge_schema_json(Lang) -> +bridge_schema_json() -> SchemaInfo = #{title => <<"EMQX Data Bridge API Schema">>, version => <<"0.1.0">>}, - gen_api_schema_json_iodata(emqx_bridge_api, SchemaInfo, Lang). - -schema_filename(Dir, Prefix, Lang) -> - Filename = Prefix ++ Lang ++ ".json", - filename:join([Dir, Filename]). + gen_api_schema_json_iodata(emqx_bridge_api, SchemaInfo). %% TODO: remove it and also remove hocon_md.erl and friends. %% markdown generation from schema is a failure and we are moving to an interactive @@ -270,50 +250,11 @@ gen_example(File, SchemaModule) -> Example = hocon_schema_example:gen(SchemaModule, Opts), file:write_file(File, Example). -%% TODO: move this to emqx_dashboard when we stop generating -%% this JSON at build time. -gen_api_schema_json_iodata(SchemaMod, SchemaInfo, Lang) -> - {ApiSpec0, Components0} = emqx_dashboard_swagger:spec( +gen_api_schema_json_iodata(SchemaMod, SchemaInfo) -> + emqx_dashboard_swagger:gen_api_schema_json_iodata( SchemaMod, - #{ - schema_converter => fun hocon_schema_to_spec/2, - i18n_lang => Lang - } - ), - ApiSpec = lists:foldl( - fun({Path, Spec, _, _}, Acc) -> - NewSpec = maps:fold( - fun(Method, #{responses := Responses}, SubAcc) -> - case Responses of - #{ - <<"200">> := - #{ - <<"content">> := #{ - <<"application/json">> := #{<<"schema">> := Schema} - } - } - } -> - SubAcc#{Method => Schema}; - _ -> - SubAcc - end - end, - #{}, - Spec - ), - Acc#{list_to_atom(Path) => NewSpec} - end, - #{}, - ApiSpec0 - ), - Components = lists:foldl(fun(M, Acc) -> maps:merge(M, Acc) end, #{}, Components0), - emqx_utils_json:encode( - #{ - info => SchemaInfo, - paths => ApiSpec, - components => #{schemas => Components} - }, - [pretty, force_utf8] + SchemaInfo, + fun ?MODULE:hocon_schema_to_spec/2 ). -define(TO_REF(_N_, _F_), iolist_to_binary([to_bin(_N_), ".", to_bin(_F_)])). diff --git a/apps/emqx_conf/src/emqx_conf_app.erl b/apps/emqx_conf/src/emqx_conf_app.erl index 35a79ea6e..70234b525 100644 --- a/apps/emqx_conf/src/emqx_conf_app.erl +++ b/apps/emqx_conf/src/emqx_conf_app.erl @@ -32,12 +32,8 @@ start(_StartType, _StartArgs) -> ok = init_conf() catch C:E:St -> - ?SLOG(critical, #{ - msg => failed_to_init_config, - exception => C, - reason => E, - stacktrace => St - }), + %% logger is not quite ready. + io:format(standard_error, "Failed to load config~n~p~n~p~n~p~n", [C, E, St]), init:stop(1) end, ok = emqx_config_logger:refresh_config(), @@ -66,7 +62,8 @@ get_override_config_file() -> conf => Conf, tnx_id => TnxId, node => Node, - has_deprecated_file => HasDeprecateFile + has_deprecated_file => HasDeprecateFile, + release => emqx_app:get_release() } end, case mria:ro_transaction(?CLUSTER_RPC_SHARD, Fun) of @@ -91,15 +88,8 @@ sync_data_from_node() -> %% Internal functions %% ------------------------------------------------------------------------------ --ifdef(TEST). init_load() -> - emqx_config:init_load(emqx_conf:schema_module(), #{raw_with_default => false}). - --else. - -init_load() -> - emqx_config:init_load(emqx_conf:schema_module(), #{raw_with_default => true}). --endif. + emqx_config:init_load(emqx_conf:schema_module()). init_conf() -> %% Workaround for https://github.com/emqx/mria/issues/94: @@ -175,11 +165,13 @@ copy_override_conf_from_core_node() -> _ -> [{ok, Info} | _] = lists:sort(fun conf_sort/2, Ready), #{node := Node, conf := RawOverrideConf, tnx_id := TnxId} = Info, - HasDeprecatedFile = maps:get(has_deprecated_file, Info, false), + HasDeprecatedFile = has_deprecated_file(Info), ?SLOG(debug, #{ msg => "copy_cluster_conf_from_core_node_success", node => Node, has_deprecated_file => HasDeprecatedFile, + local_release => emqx_app:get_release(), + remote_release => maps:get(release, Info, "before_v5.0.24|e5.0.3"), data_dir => emqx:data_dir(), tnx_id => TnxId }), @@ -227,3 +219,13 @@ sync_data_from_node(Node) -> ?SLOG(emergency, #{node => Node, msg => "sync_data_from_node_failed", reason => Error}), error(Error) end. + +has_deprecated_file(#{conf := Conf} = Info) -> + case maps:find(has_deprecated_file, Info) of + {ok, HasDeprecatedFile} -> + HasDeprecatedFile; + error -> + %% The old version don't have emqx_config:has_deprecated_file/0 + %% Conf is not empty if deprecated file is found. + Conf =/= #{} + end. diff --git a/apps/emqx_conf/src/emqx_conf_schema.erl b/apps/emqx_conf/src/emqx_conf_schema.erl index f3f014321..e6ccc3842 100644 --- a/apps/emqx_conf/src/emqx_conf_schema.erl +++ b/apps/emqx_conf/src/emqx_conf_schema.erl @@ -93,7 +93,10 @@ roots() -> {"log", sc( ?R_REF("log"), - #{translate_to => ["kernel"]} + #{ + translate_to => ["kernel"], + importance => ?IMPORTANCE_HIGH + } )}, {"rpc", sc( @@ -335,11 +338,12 @@ fields(cluster_etcd) -> desc => ?DESC(cluster_etcd_node_ttl) } )}, - {"ssl", + {"ssl_options", sc( ?R_REF(emqx_schema, "ssl_client_opts"), #{ desc => ?DESC(cluster_etcd_ssl), + alias => [ssl], 'readOnly' => true } )} @@ -471,7 +475,7 @@ fields("node") -> %% for now, it's tricky to use a different data_dir %% otherwise data paths in cluster config may differ %% TODO: change configurable data file paths to relative - importance => ?IMPORTANCE_HIDDEN, + importance => ?IMPORTANCE_LOW, desc => ?DESC(node_data_dir) } )}, @@ -862,15 +866,25 @@ fields("rpc") -> ]; fields("log") -> [ - {"console_handler", ?R_REF("console_handler")}, + {"console_handler", + sc( + ?R_REF("console_handler"), + #{importance => ?IMPORTANCE_HIGH} + )}, {"file_handlers", sc( map(name, ?R_REF("log_file_handler")), - #{desc => ?DESC("log_file_handlers")} + #{ + desc => ?DESC("log_file_handlers"), + %% because file_handlers is a map + %% so there has to be a default value in order to populate the raw configs + default => #{<<"default">> => #{<<"level">> => <<"warning">>}}, + importance => ?IMPORTANCE_HIGH + } )} ]; fields("console_handler") -> - log_handler_common_confs(false); + log_handler_common_confs(console); fields("log_file_handler") -> [ {"file", @@ -878,6 +892,8 @@ fields("log_file_handler") -> file(), #{ desc => ?DESC("log_file_handler_file"), + default => <<"${EMQX_LOG_DIR}/emqx.log">>, + converter => fun emqx_schema:naive_env_interpolation/1, validator => fun validate_file_location/1 } )}, @@ -891,10 +907,11 @@ fields("log_file_handler") -> hoconsc:union([infinity, emqx_schema:bytesize()]), #{ default => <<"50MB">>, - desc => ?DESC("log_file_handler_max_size") + desc => ?DESC("log_file_handler_max_size"), + importance => ?IMPORTANCE_MEDIUM } )} - ] ++ log_handler_common_confs(true); + ] ++ log_handler_common_confs(file); fields("log_rotation") -> [ {"enable", @@ -1103,14 +1120,33 @@ tr_logger_level(Conf) -> tr_logger_handlers(Conf) -> emqx_config_logger:tr_handlers(Conf). -log_handler_common_confs(Enable) -> +log_handler_common_confs(Handler) -> + lists:map( + fun + ({_Name, #{importance := _}} = F) -> F; + ({Name, Sc}) -> {Name, Sc#{importance => ?IMPORTANCE_LOW}} + end, + do_log_handler_common_confs(Handler) + ). +do_log_handler_common_confs(Handler) -> + %% we rarely support dynamic defaults like this + %% for this one, we have build-time defualut the same as runtime default + %% so it's less tricky + EnableValues = + case Handler of + console -> ["console", "both"]; + file -> ["file", "both", "", false] + end, + EnvValue = os:getenv("EMQX_DEFAULT_LOG_HANDLER"), + Enable = lists:member(EnvValue, EnableValues), [ {"enable", sc( boolean(), #{ default => Enable, - desc => ?DESC("common_handler_enable") + desc => ?DESC("common_handler_enable"), + importance => ?IMPORTANCE_LOW } )}, {"level", @@ -1127,7 +1163,8 @@ log_handler_common_confs(Enable) -> #{ default => <<"system">>, desc => ?DESC("common_handler_time_offset"), - validator => fun validate_time_offset/1 + validator => fun validate_time_offset/1, + importance => ?IMPORTANCE_LOW } )}, {"chars_limit", @@ -1135,7 +1172,8 @@ log_handler_common_confs(Enable) -> hoconsc:union([unlimited, range(100, inf)]), #{ default => unlimited, - desc => ?DESC("common_handler_chars_limit") + desc => ?DESC("common_handler_chars_limit"), + importance => ?IMPORTANCE_LOW } )}, {"formatter", @@ -1143,7 +1181,8 @@ log_handler_common_confs(Enable) -> hoconsc:enum([text, json]), #{ default => text, - desc => ?DESC("common_handler_formatter") + desc => ?DESC("common_handler_formatter"), + importance => ?IMPORTANCE_MEDIUM } )}, {"single_line", @@ -1151,7 +1190,8 @@ log_handler_common_confs(Enable) -> boolean(), #{ default => true, - desc => ?DESC("common_handler_single_line") + desc => ?DESC("common_handler_single_line"), + importance => ?IMPORTANCE_LOW } )}, {"sync_mode_qlen", @@ -1199,7 +1239,7 @@ log_handler_common_confs(Enable) -> ]. crash_dump_file_default() -> - case os:getenv("RUNNER_LOG_DIR") of + case os:getenv("EMQX_LOG_DIR") of false -> %% testing, or running emqx app as deps <<"log/erl_crash.dump">>; diff --git a/apps/emqx_connector/src/emqx_connector.app.src b/apps/emqx_connector/src/emqx_connector.app.src index c0a19824c..db55c7032 100644 --- a/apps/emqx_connector/src/emqx_connector.app.src +++ b/apps/emqx_connector/src/emqx_connector.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_connector, [ {description, "EMQX Data Integration Connectors"}, - {vsn, "0.1.21"}, + {vsn, "0.1.22"}, {registered, []}, {mod, {emqx_connector_app, []}}, {applications, [ diff --git a/apps/emqx_connector/src/emqx_connector_http.erl b/apps/emqx_connector/src/emqx_connector_http.erl index 5b71e3f3a..ffb4bd8a9 100644 --- a/apps/emqx_connector/src/emqx_connector_http.erl +++ b/apps/emqx_connector/src/emqx_connector_http.erl @@ -305,7 +305,20 @@ on_query( Retry ) of - {error, Reason} when Reason =:= econnrefused; Reason =:= timeout -> + {error, Reason} when + Reason =:= econnrefused; + Reason =:= timeout; + Reason =:= {shutdown, normal}; + Reason =:= {shutdown, closed} + -> + ?SLOG(warning, #{ + msg => "http_connector_do_request_failed", + reason => Reason, + connector => InstId + }), + {error, {recoverable_error, Reason}}; + {error, {closed, _Message} = Reason} -> + %% _Message = "The connection was lost." ?SLOG(warning, #{ msg => "http_connector_do_request_failed", reason => Reason, @@ -593,7 +606,16 @@ reply_delegator(ReplyFunAndArgs, Result) -> case Result of %% The normal reason happens when the HTTP connection times out before %% the request has been fully processed - {error, Reason} when Reason =:= econnrefused; Reason =:= timeout; Reason =:= normal -> + {error, Reason} when + Reason =:= econnrefused; + Reason =:= timeout; + Reason =:= normal; + Reason =:= {shutdown, normal} + -> + Result1 = {error, {recoverable_error, Reason}}, + emqx_resource:apply_reply_fun(ReplyFunAndArgs, Result1); + {error, {closed, _Message} = Reason} -> + %% _Message = "The connection was lost." Result1 = {error, {recoverable_error, Reason}}, emqx_resource:apply_reply_fun(ReplyFunAndArgs, Result1); _ -> diff --git a/apps/emqx_connector/src/emqx_connector_ldap.erl b/apps/emqx_connector/src/emqx_connector_ldap.erl index e2121de22..c3e1db7d3 100644 --- a/apps/emqx_connector/src/emqx_connector_ldap.erl +++ b/apps/emqx_connector/src/emqx_connector_ldap.erl @@ -67,7 +67,17 @@ on_start( connector => InstId, config => emqx_utils:redact(Config) }), - Servers = emqx_schema:parse_servers(Servers0, ?LDAP_HOST_OPTIONS), + Servers1 = emqx_schema:parse_servers(Servers0, ?LDAP_HOST_OPTIONS), + Servers = + lists:map( + fun + (#{hostname := Host, port := Port0}) -> + {Host, Port0}; + (#{hostname := Host}) -> + Host + end, + Servers1 + ), SslOpts = case maps:get(enable, SSL) of true -> diff --git a/apps/emqx_connector/src/emqx_connector_mongo.erl b/apps/emqx_connector/src/emqx_connector_mongo.erl index a65a32842..dde8652f0 100644 --- a/apps/emqx_connector/src/emqx_connector_mongo.erl +++ b/apps/emqx_connector/src/emqx_connector_mongo.erl @@ -537,4 +537,9 @@ format_hosts(Hosts) -> lists:map(fun format_host/1, Hosts). parse_servers(HoconValue) -> - emqx_schema:parse_servers(HoconValue, ?MONGO_HOST_OPTIONS). + lists:map( + fun(#{hostname := Host, port := Port}) -> + {Host, Port} + end, + emqx_schema:parse_servers(HoconValue, ?MONGO_HOST_OPTIONS) + ). diff --git a/apps/emqx_connector/src/emqx_connector_mqtt.erl b/apps/emqx_connector/src/emqx_connector_mqtt.erl index 5b488825b..5cafd2d50 100644 --- a/apps/emqx_connector/src/emqx_connector_mqtt.erl +++ b/apps/emqx_connector/src/emqx_connector_mqtt.erl @@ -248,13 +248,12 @@ make_sub_confs(EmptyMap, _Conf, _) when map_size(EmptyMap) == 0 -> undefined; make_sub_confs(undefined, _Conf, _) -> undefined; -make_sub_confs(SubRemoteConf, Conf, InstanceId) -> - ResId = emqx_resource_manager:manager_id_to_resource_id(InstanceId), +make_sub_confs(SubRemoteConf, Conf, ResourceId) -> case maps:find(hookpoint, Conf) of error -> error({no_hookpoint_provided, Conf}); {ok, HookPoint} -> - MFA = {?MODULE, on_message_received, [HookPoint, ResId]}, + MFA = {?MODULE, on_message_received, [HookPoint, ResourceId]}, SubRemoteConf#{on_message_received => MFA} end. diff --git a/apps/emqx_connector/src/emqx_connector_mysql.erl b/apps/emqx_connector/src/emqx_connector_mysql.erl index 45d459e70..b8c1250fe 100644 --- a/apps/emqx_connector/src/emqx_connector_mysql.erl +++ b/apps/emqx_connector/src/emqx_connector_mysql.erl @@ -98,7 +98,7 @@ on_start( ssl := SSL } = Config ) -> - {Host, Port} = emqx_schema:parse_server(Server, ?MYSQL_HOST_OPTIONS), + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, ?MYSQL_HOST_OPTIONS), ?SLOG(info, #{ msg => "starting_mysql_connector", connector => InstId, diff --git a/apps/emqx_connector/src/emqx_connector_pgsql.erl b/apps/emqx_connector/src/emqx_connector_pgsql.erl index ddbf9491d..3b2375d04 100644 --- a/apps/emqx_connector/src/emqx_connector_pgsql.erl +++ b/apps/emqx_connector/src/emqx_connector_pgsql.erl @@ -91,7 +91,7 @@ on_start( ssl := SSL } = Config ) -> - {Host, Port} = emqx_schema:parse_server(Server, ?PGSQL_HOST_OPTIONS), + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, ?PGSQL_HOST_OPTIONS), ?SLOG(info, #{ msg => "starting_postgresql_connector", connector => InstId, diff --git a/apps/emqx_connector/src/emqx_connector_redis.erl b/apps/emqx_connector/src/emqx_connector_redis.erl index e2155eb49..32ac77226 100644 --- a/apps/emqx_connector/src/emqx_connector_redis.erl +++ b/apps/emqx_connector/src/emqx_connector_redis.erl @@ -131,7 +131,13 @@ on_start( _ -> servers end, Servers0 = maps:get(ConfKey, Config), - Servers = [{servers, emqx_schema:parse_servers(Servers0, ?REDIS_HOST_OPTIONS)}], + Servers1 = lists:map( + fun(#{hostname := Host, port := Port}) -> + {Host, Port} + end, + emqx_schema:parse_servers(Servers0, ?REDIS_HOST_OPTIONS) + ), + Servers = [{servers, Servers1}], Database = case Type of cluster -> []; diff --git a/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_schema.erl b/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_schema.erl index e08804685..2a40980af 100644 --- a/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_schema.erl +++ b/apps/emqx_connector/src/mqtt/emqx_connector_mqtt_schema.erl @@ -293,4 +293,5 @@ qos() -> hoconsc:union([emqx_schema:qos(), binary()]). parse_server(Str) -> - emqx_schema:parse_server(Str, ?MQTT_HOST_OPTS). + #{hostname := Host, port := Port} = emqx_schema:parse_server(Str, ?MQTT_HOST_OPTS), + {Host, Port}. diff --git a/apps/emqx_dashboard/etc/emqx_dashboard.conf b/apps/emqx_dashboard/etc/emqx_dashboard.conf index 856779500..67e3f61ec 100644 --- a/apps/emqx_dashboard/etc/emqx_dashboard.conf +++ b/apps/emqx_dashboard/etc/emqx_dashboard.conf @@ -2,6 +2,4 @@ dashboard { listeners.http { bind = 18083 } - default_username = "admin" - default_password = "public" } diff --git a/apps/emqx_dashboard/src/emqx_dashboard.app.src b/apps/emqx_dashboard/src/emqx_dashboard.app.src index 8c7e424e0..bd022f226 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard.app.src +++ b/apps/emqx_dashboard/src/emqx_dashboard.app.src @@ -2,7 +2,7 @@ {application, emqx_dashboard, [ {description, "EMQX Web Dashboard"}, % strict semver, bump manually! - {vsn, "5.0.19"}, + {vsn, "5.0.20"}, {modules, []}, {registered, [emqx_dashboard_sup]}, {applications, [kernel, stdlib, mnesia, minirest, emqx, emqx_ctl]}, diff --git a/apps/emqx_dashboard/src/emqx_dashboard_desc_cache.erl b/apps/emqx_dashboard/src/emqx_dashboard_desc_cache.erl index 9d8d1905d..b503fed88 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_desc_cache.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_desc_cache.erl @@ -36,7 +36,7 @@ init() -> OtherLangDesc0 = filelib:wildcard("desc.*.hocon", WwwStaticDir), OtherLangDesc = lists:map(fun(F) -> filename:join([WwwStaticDir, F]) end, OtherLangDesc0), Files = [EngDesc | OtherLangDesc], - ?MODULE = ets:new(?MODULE, [named_table, public, set, {read_concurrency, true}]), + ok = emqx_utils_ets:new(?MODULE, [public, ordered_set, {read_concurrency, true}]), ok = lists:foreach(fun(F) -> load_desc(?MODULE, F) end, Files). %% @doc Load the description of the configuration items from the file. diff --git a/apps/emqx_dashboard/src/emqx_dashboard_schema_api.erl b/apps/emqx_dashboard/src/emqx_dashboard_schema_api.erl index 898d95b3c..e4f2f0c1a 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_schema_api.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_schema_api.erl @@ -45,18 +45,11 @@ schema("/schemas/:name") -> 'operationId' => get_schema, get => #{ parameters => [ - {name, hoconsc:mk(hoconsc:enum([hotconf, bridges]), #{in => path})}, - {lang, - hoconsc:mk(typerefl:string(), #{ - in => query, - default => <<"en">>, - desc => <<"The language of the schema.">> - })} + {name, hoconsc:mk(hoconsc:enum([hotconf, bridges]), #{in => path})} ], desc => << "Get the schema JSON of the specified name. " - "NOTE: you should never need to make use of this API " - "unless you are building a multi-lang dashboaard." + "NOTE: only intended for EMQX Dashboard." >>, tags => ?TAGS, security => [], @@ -71,14 +64,13 @@ schema("/schemas/:name") -> %%-------------------------------------------------------------------- get_schema(get, #{ - bindings := #{name := Name}, - query_string := #{<<"lang">> := Lang} + bindings := #{name := Name} }) -> - {200, gen_schema(Name, iolist_to_binary(Lang))}; + {200, gen_schema(Name)}; get_schema(get, _) -> {400, ?BAD_REQUEST, <<"unknown">>}. -gen_schema(hotconf, Lang) -> - emqx_conf:hotconf_schema_json(Lang); -gen_schema(bridges, Lang) -> - emqx_conf:bridge_schema_json(Lang). +gen_schema(hotconf) -> + emqx_conf:hotconf_schema_json(); +gen_schema(bridges) -> + emqx_conf:bridge_schema_json(). diff --git a/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl b/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl index 6422d627c..6fedb8d69 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_swagger.erl @@ -31,7 +31,11 @@ -export([relative_uri/1]). -export([compose_filters/2]). --export([filter_check_request/2, filter_check_request_and_translate_body/2]). +-export([ + filter_check_request/2, + filter_check_request_and_translate_body/2, + gen_api_schema_json_iodata/3 +]). -ifdef(TEST). -export([ @@ -77,6 +81,8 @@ ]) ). +-define(SPECIAL_LANG_MSGID, <<"$msgid">>). + -define(MAX_ROW_LIMIT, 1000). -define(DEFAULT_ROW, 100). @@ -221,6 +227,50 @@ file_schema(FileName) -> } }. +gen_api_schema_json_iodata(SchemaMod, SchemaInfo, Converter) -> + {ApiSpec0, Components0} = emqx_dashboard_swagger:spec( + SchemaMod, + #{ + schema_converter => Converter, + i18n_lang => ?SPECIAL_LANG_MSGID + } + ), + ApiSpec = lists:foldl( + fun({Path, Spec, _, _}, Acc) -> + NewSpec = maps:fold( + fun(Method, #{responses := Responses}, SubAcc) -> + case Responses of + #{ + <<"200">> := + #{ + <<"content">> := #{ + <<"application/json">> := #{<<"schema">> := Schema} + } + } + } -> + SubAcc#{Method => Schema}; + _ -> + SubAcc + end + end, + #{}, + Spec + ), + Acc#{list_to_atom(Path) => NewSpec} + end, + #{}, + ApiSpec0 + ), + Components = lists:foldl(fun(M, Acc) -> maps:merge(M, Acc) end, #{}, Components0), + emqx_utils_json:encode( + #{ + info => SchemaInfo, + paths => ApiSpec, + components => #{schemas => Components} + }, + [pretty, force_utf8] + ). + -spec compose_filters(filter(), filter()) -> filter(). compose_filters(undefined, Filter2) -> Filter2; @@ -288,18 +338,17 @@ parse_spec_ref(Module, Path, Options) -> Schema = try erlang:apply(Module, schema, [Path]) - %% better error message catch - error:Reason:Stacktrace -> - %% raise a new error with the same stacktrace. - %% it's a bug if this happens. - %% i.e. if a path is listed in the spec but the module doesn't - %% implement it or crashes when trying to build the schema. - erlang:raise( - error, - #{mfa => {Module, schema, [Path]}, reason => Reason}, - Stacktrace - ) + Error:Reason:Stacktrace -> + %% This error is intended to fail the build + %% hence print to standard_error + io:format( + standard_error, + "Failed to generate swagger for path ~p in module ~p~n" + "error:~p~nreason:~p~n~p~n", + [Module, Path, Error, Reason, Stacktrace] + ), + error({failed_to_generate_swagger_spec, Module, Path}) end, {Specs, Refs} = maps:fold( fun(Method, Meta, {Acc, RefsAcc}) -> @@ -534,6 +583,14 @@ maybe_add_summary_from_label(Spec, Hocon, Options) -> get_i18n(Tag, ?DESC(Namespace, Id), Default, Options) -> Lang = get_lang(Options), + case Lang of + ?SPECIAL_LANG_MSGID -> + make_msgid(Namespace, Id, Tag); + _ -> + get_i18n_text(Lang, Namespace, Id, Tag, Default) + end. + +get_i18n_text(Lang, Namespace, Id, Tag, Default) -> case emqx_dashboard_desc_cache:lookup(Lang, Namespace, Id, Tag) of undefined -> Default; @@ -541,6 +598,14 @@ get_i18n(Tag, ?DESC(Namespace, Id), Default, Options) -> Text end. +%% Format:$msgid:Namespace.Id.Tag +%% e.g. $msgid:emqx_schema.key.desc +%% $msgid:emqx_schema.key.label +%% if needed, the consumer of this schema JSON can use this msgid to +%% resolve the text in the i18n database. +make_msgid(Namespace, Id, Tag) -> + iolist_to_binary(["$msgid:", to_bin(Namespace), ".", to_bin(Id), ".", Tag]). + %% So far i18n_lang in options is only used at build time. %% At runtime, it's still the global config which controls the language. get_lang(#{i18n_lang := Lang}) -> Lang; diff --git a/apps/emqx_dashboard/test/emqx_dashboard_schema_api_SUITE.erl b/apps/emqx_dashboard/test/emqx_dashboard_schema_api_SUITE.erl new file mode 100644 index 000000000..e4425aed8 --- /dev/null +++ b/apps/emqx_dashboard/test/emqx_dashboard_schema_api_SUITE.erl @@ -0,0 +1,52 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_dashboard_schema_api_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("emqx/include/http_api.hrl"). + +-include_lib("eunit/include/eunit.hrl"). + +-define(SERVER, "http://127.0.0.1:18083/api/v5"). + +-import(emqx_mgmt_api_test_util, [request/2]). + +all() -> + emqx_common_test_helpers:all(?MODULE). + +init_per_suite(Config) -> + emqx_mgmt_api_test_util:init_suite([emqx_conf]), + Config. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite([emqx_conf]). + +t_hotconf(_) -> + Url = ?SERVER ++ "/schemas/hotconf", + {ok, 200, Body} = request(get, Url), + %% assert it's a valid json + _ = emqx_utils_json:decode(Body), + ok. + +t_bridges(_) -> + Url = ?SERVER ++ "/schemas/bridges", + {ok, 200, Body} = request(get, Url), + %% assert it's a valid json + _ = emqx_utils_json:decode(Body), + ok. diff --git a/apps/emqx_dashboard/test/emqx_swagger_requestBody_SUITE.erl b/apps/emqx_dashboard/test/emqx_swagger_requestBody_SUITE.erl index f2ba56e08..af4b901b2 100644 --- a/apps/emqx_dashboard/test/emqx_swagger_requestBody_SUITE.erl +++ b/apps/emqx_dashboard/test/emqx_swagger_requestBody_SUITE.erl @@ -308,10 +308,7 @@ t_nest_ref(_Config) -> t_none_ref(_Config) -> Path = "/ref/none", ?assertError( - #{ - mfa := {?MODULE, schema, [Path]}, - reason := function_clause - }, + {failed_to_generate_swagger_spec, ?MODULE, Path}, emqx_dashboard_swagger:parse_spec_ref(?MODULE, Path, #{}) ), ok. diff --git a/apps/emqx_dashboard/test/emqx_swagger_response_SUITE.erl b/apps/emqx_dashboard/test/emqx_swagger_response_SUITE.erl index cda533cc2..a3d2b4e75 100644 --- a/apps/emqx_dashboard/test/emqx_swagger_response_SUITE.erl +++ b/apps/emqx_dashboard/test/emqx_swagger_response_SUITE.erl @@ -278,10 +278,7 @@ t_bad_ref(_Config) -> t_none_ref(_Config) -> Path = "/ref/none", ?assertError( - #{ - mfa := {?MODULE, schema, ["/ref/none"]}, - reason := function_clause - }, + {failed_to_generate_swagger_spec, ?MODULE, Path}, validate(Path, #{}, []) ), ok. diff --git a/apps/emqx_exhook/test/emqx_exhook_SUITE.erl b/apps/emqx_exhook/test/emqx_exhook_SUITE.erl index d12f99917..ff313c8c8 100644 --- a/apps/emqx_exhook/test/emqx_exhook_SUITE.erl +++ b/apps/emqx_exhook/test/emqx_exhook_SUITE.erl @@ -301,10 +301,10 @@ t_cluster_name(_) -> ok end, - emqx_common_test_helpers:stop_apps([emqx, emqx_exhook]), + stop_apps([emqx, emqx_exhook]), emqx_common_test_helpers:start_apps([emqx, emqx_exhook], SetEnvFun), on_exit(fun() -> - emqx_common_test_helpers:stop_apps([emqx, emqx_exhook]), + stop_apps([emqx, emqx_exhook]), load_cfg(?CONF_DEFAULT), emqx_common_test_helpers:start_apps([emqx_exhook]), mria:wait_for_tables([?CLUSTER_MFA, ?CLUSTER_COMMIT]) @@ -489,3 +489,7 @@ data_file(Name) -> cert_file(Name) -> data_file(filename:join(["certs", Name])). + +%% FIXME: this creats inter-test dependency +stop_apps(Apps) -> + emqx_common_test_helpers:stop_apps(Apps, #{erase_all_configs => false}). diff --git a/apps/emqx_machine/src/emqx_machine.erl b/apps/emqx_machine/src/emqx_machine.erl index 6872b150c..aa8f03ae5 100644 --- a/apps/emqx_machine/src/emqx_machine.erl +++ b/apps/emqx_machine/src/emqx_machine.erl @@ -43,7 +43,7 @@ start() -> start_sysmon(), configure_shard_transports(), ekka:start(), - ok = print_otp_version_warning(). + ok. graceful_shutdown() -> emqx_machine_terminator:graceful_wait(). @@ -61,17 +61,6 @@ set_backtrace_depth() -> is_ready() -> emqx_machine_terminator:is_running(). --if(?OTP_RELEASE > 22). -print_otp_version_warning() -> ok. --else. -print_otp_version_warning() -> - ?ULOG( - "WARNING: Running on Erlang/OTP version ~p. Recommended: 23~n", - [?OTP_RELEASE] - ). -% OTP_RELEASE > 22 --endif. - start_sysmon() -> _ = application:load(system_monitor), application:set_env(system_monitor, node_status_fun, {?MODULE, node_status}), diff --git a/apps/emqx_machine/test/emqx_machine_SUITE.erl b/apps/emqx_machine/test/emqx_machine_SUITE.erl index 691cda677..02d03d983 100644 --- a/apps/emqx_machine/test/emqx_machine_SUITE.erl +++ b/apps/emqx_machine/test/emqx_machine_SUITE.erl @@ -103,3 +103,13 @@ t_custom_shard_transports(_Config) -> emqx_machine:start(), ?assertEqual(distr, mria_config:shard_transport(Shard)), ok. + +t_node_status(_Config) -> + JSON = emqx_machine:node_status(), + ?assertMatch( + #{ + <<"backend">> := _, + <<"role">> := <<"core">> + }, + jsx:decode(JSON) + ). diff --git a/apps/emqx_management/src/emqx_management.app.src b/apps/emqx_management/src/emqx_management.app.src index f423213af..34f3dd1fe 100644 --- a/apps/emqx_management/src/emqx_management.app.src +++ b/apps/emqx_management/src/emqx_management.app.src @@ -2,7 +2,7 @@ {application, emqx_management, [ {description, "EMQX Management API and CLI"}, % strict semver, bump manually! - {vsn, "5.0.19"}, + {vsn, "5.0.21"}, {modules, []}, {registered, [emqx_management_sup]}, {applications, [kernel, stdlib, emqx_plugins, minirest, emqx, emqx_ctl]}, diff --git a/apps/emqx_management/src/emqx_mgmt_api_configs.erl b/apps/emqx_management/src/emqx_mgmt_api_configs.erl index af203dfe9..bc9aaf768 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_configs.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_configs.erl @@ -42,6 +42,7 @@ <<"alarm">>, <<"sys_topics">>, <<"sysmon">>, + <<"limiter">>, <<"log">>, <<"persistent_session_store">>, <<"zones">> diff --git a/apps/emqx_management/src/emqx_mgmt_api_listeners.erl b/apps/emqx_management/src/emqx_mgmt_api_listeners.erl index de86700ef..152ccc599 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_listeners.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_listeners.erl @@ -293,12 +293,14 @@ listeners_type() -> listeners_info(Opts) -> Listeners = hocon_schema:fields(emqx_schema, "listeners"), lists:map( - fun({Type, #{type := ?MAP(_Name, ?R_REF(Mod, Field))}}) -> - Fields0 = hocon_schema:fields(Mod, Field), + fun({ListenerType, Schema}) -> + Type = emqx_schema:get_tombstone_map_value_type(Schema), + ?R_REF(Mod, StructName) = Type, + Fields0 = hocon_schema:fields(Mod, StructName), Fields1 = lists:keydelete("authentication", 1, Fields0), Fields3 = required_bind(Fields1, Opts), - Ref = listeners_ref(Type, Opts), - TypeAtom = list_to_existing_atom(Type), + Ref = listeners_ref(ListenerType, Opts), + TypeAtom = list_to_existing_atom(ListenerType), #{ ref => ?R_REF(Ref), schema => [ @@ -642,7 +644,7 @@ create(Path, Conf) -> wrap(emqx_conf:update(Path, {create, Conf}, ?OPTS(cluster))). ensure_remove(Path) -> - wrap(emqx_conf:remove(Path, ?OPTS(cluster))). + wrap(emqx_conf:tombstone(Path, ?OPTS(cluster))). wrap({error, {post_config_update, emqx_listeners, Reason}}) -> {error, Reason}; wrap({error, {pre_config_update, emqx_listeners, Reason}}) -> {error, Reason}; diff --git a/apps/emqx_management/src/emqx_mgmt_api_status.erl b/apps/emqx_management/src/emqx_mgmt_api_status.erl index 7d5c18e59..c0ee42e2b 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_status.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_status.erl @@ -45,6 +45,17 @@ schema("/status") -> #{ 'operationId' => get_status, get => #{ + parameters => [ + {format, + hoconsc:mk( + string(), + #{ + in => query, + default => <<"text">>, + desc => ?DESC(get_status_api_format) + } + )} + ], description => ?DESC(get_status_api), tags => ?TAGS, security => [], @@ -70,7 +81,16 @@ path() -> "/status". init(Req0, State) -> - {Code, Headers, Body} = running_status(), + Format = + try + QS = cowboy_req:parse_qs(Req0), + {_, F} = lists:keyfind(<<"format">>, 1, QS), + F + catch + _:_ -> + <<"text">> + end, + {Code, Headers, Body} = running_status(Format), Req = cowboy_req:reply(Code, Headers, Body, Req0), {ok, Req, State}. @@ -78,29 +98,52 @@ init(Req0, State) -> %% API Handler funcs %%-------------------------------------------------------------------- -get_status(get, _Params) -> - running_status(). +get_status(get, Params) -> + Format = maps:get(<<"format">>, maps:get(query_string, Params, #{}), <<"text">>), + running_status(iolist_to_binary(Format)). -running_status() -> +running_status(Format) -> case emqx_dashboard_listener:is_ready(timer:seconds(20)) of true -> - BrokerStatus = broker_status(), AppStatus = application_status(), - Body = io_lib:format("Node ~ts is ~ts~nemqx is ~ts", [node(), BrokerStatus, AppStatus]), + Body = do_get_status(AppStatus, Format), StatusCode = case AppStatus of running -> 200; not_running -> 503 end, + ContentType = + case Format of + <<"json">> -> <<"applicatin/json">>; + _ -> <<"text/plain">> + end, Headers = #{ - <<"content-type">> => <<"text/plain">>, + <<"content-type">> => ContentType, <<"retry-after">> => <<"15">> }, - {StatusCode, Headers, list_to_binary(Body)}; + {StatusCode, Headers, iolist_to_binary(Body)}; false -> {503, #{<<"retry-after">> => <<"15">>}, <<>>} end. +do_get_status(AppStatus, <<"json">>) -> + BrokerStatus = broker_status(), + emqx_utils_json:encode(#{ + node_name => atom_to_binary(node(), utf8), + rel_vsn => vsn(), + broker_status => atom_to_binary(BrokerStatus), + app_status => atom_to_binary(AppStatus) + }); +do_get_status(AppStatus, _) -> + BrokerStatus = broker_status(), + io_lib:format("Node ~ts is ~ts~nemqx is ~ts", [node(), BrokerStatus, AppStatus]). + +vsn() -> + iolist_to_binary([ + emqx_release:edition_vsn_prefix(), + emqx_release:version() + ]). + broker_status() -> case emqx:is_running() of true -> diff --git a/apps/emqx_management/test/emqx_mgmt_api_listeners_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_listeners_SUITE.erl index 33cb66eb2..977c81c2b 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_listeners_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_listeners_SUITE.erl @@ -20,18 +20,51 @@ -include_lib("eunit/include/eunit.hrl"). --define(PORT, (20000 + ?LINE)). +-define(PORT(Base), (Base + ?LINE)). +-define(PORT, ?PORT(20000)). all() -> - emqx_common_test_helpers:all(?MODULE). + [ + {group, with_defaults_in_file}, + {group, without_defaults_in_file} + ]. + +groups() -> + AllTests = emqx_common_test_helpers:all(?MODULE), + [ + {with_defaults_in_file, AllTests}, + {without_defaults_in_file, AllTests} + ]. init_per_suite(Config) -> - emqx_mgmt_api_test_util:init_suite([emqx_conf]), Config. -end_per_suite(_) -> - emqx_conf:remove([listeners, tcp, new], #{override_to => cluster}), - emqx_conf:remove([listeners, tcp, new1], #{override_to => local}), +end_per_suite(_Config) -> + ok. + +init_per_group(without_defaults_in_file, Config) -> + emqx_mgmt_api_test_util:init_suite([emqx_conf]), + Config; +init_per_group(with_defaults_in_file, Config) -> + %% we have to materialize the config file with default values for this test group + %% because we want to test the deletion of non-existing listener + %% if there is no config file, the such deletion would result in a deletion + %% of the default listener. + Name = atom_to_list(?MODULE) ++ "-default-listeners", + TmpConfFullPath = inject_tmp_config_content(Name, default_listeners_hocon_text()), + emqx_mgmt_api_test_util:init_suite([emqx_conf]), + [{injected_conf_file, TmpConfFullPath} | Config]. + +end_per_group(Group, Config) -> + emqx_conf:tombstone([listeners, tcp, new], #{override_to => cluster}), + emqx_conf:tombstone([listeners, tcp, new1], #{override_to => local}), + case Group =:= with_defaults_in_file of + true -> + {_, File} = lists:keyfind(injected_conf_file, 1, Config), + ok = file:delete(File); + false -> + ok + end, emqx_mgmt_api_test_util:end_suite([emqx_conf]). init_per_testcase(Case, Config) -> @@ -52,30 +85,25 @@ end_per_testcase(Case, Config) -> t_max_connection_default({init, Config}) -> emqx_mgmt_api_test_util:end_suite([emqx_conf]), - Etc = filename:join(["etc", "emqx.conf.all"]), - TmpConfName = atom_to_list(?FUNCTION_NAME) ++ ".conf", - Inc = filename:join(["etc", TmpConfName]), - ConfFile = emqx_common_test_helpers:app_path(emqx_conf, Etc), - IncFile = emqx_common_test_helpers:app_path(emqx_conf, Inc), Port = integer_to_binary(?PORT), Bin = <<"listeners.tcp.max_connection_test {bind = \"0.0.0.0:", Port/binary, "\"}">>, - ok = file:write_file(IncFile, Bin), - ok = file:write_file(ConfFile, ["include \"", TmpConfName, "\""], [append]), + TmpConfName = atom_to_list(?FUNCTION_NAME) ++ ".conf", + TmpConfFullPath = inject_tmp_config_content(TmpConfName, Bin), emqx_mgmt_api_test_util:init_suite([emqx_conf]), - [{tmp_config_file, IncFile} | Config]; + [{tmp_config_file, TmpConfFullPath} | Config]; t_max_connection_default({'end', Config}) -> ok = file:delete(proplists:get_value(tmp_config_file, Config)); t_max_connection_default(Config) when is_list(Config) -> - %% Check infinity is binary not atom. #{<<"listeners">> := Listeners} = emqx_mgmt_api_listeners:do_list_listeners(), Target = lists:filter( fun(#{<<"id">> := Id}) -> Id =:= 'tcp:max_connection_test' end, Listeners ), - ?assertMatch([#{<<"max_connections">> := <<"infinity">>}], Target), + DefaultMaxConn = emqx_listeners:default_max_conn(), + ?assertMatch([#{<<"max_connections">> := DefaultMaxConn}], Target), NewPath = emqx_mgmt_api_test_util:api_path(["listeners", "tcp:max_connection_test"]), - ?assertMatch(#{<<"max_connections">> := <<"infinity">>}, request(get, NewPath, [], [])), - emqx_conf:remove([listeners, tcp, max_connection_test], #{override_to => cluster}), + ?assertMatch(#{<<"max_connections">> := DefaultMaxConn}, request(get, NewPath, [], [])), + emqx_conf:tombstone([listeners, tcp, max_connection_test], #{override_to => cluster}), ok. t_list_listeners(Config) when is_list(Config) -> @@ -86,7 +114,7 @@ t_list_listeners(Config) when is_list(Config) -> %% POST /listeners ListenerId = <<"tcp:default">>, - NewListenerId = <<"tcp:new">>, + NewListenerId = <<"tcp:new11">>, OriginPath = emqx_mgmt_api_test_util:api_path(["listeners", ListenerId]), NewPath = emqx_mgmt_api_test_util:api_path(["listeners", NewListenerId]), @@ -100,7 +128,7 @@ t_list_listeners(Config) when is_list(Config) -> OriginListener2 = maps:remove(<<"id">>, OriginListener), Port = integer_to_binary(?PORT), NewConf = OriginListener2#{ - <<"name">> => <<"new">>, + <<"name">> => <<"new11">>, <<"bind">> => <<"0.0.0.0:", Port/binary>>, <<"max_connections">> := <<"infinity">> }, @@ -123,7 +151,7 @@ t_tcp_crud_listeners_by_id(Config) when is_list(Config) -> MinListenerId = <<"tcp:min">>, BadId = <<"tcp:bad">>, Type = <<"tcp">>, - crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type). + crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type, 31000). t_ssl_crud_listeners_by_id(Config) when is_list(Config) -> ListenerId = <<"ssl:default">>, @@ -131,7 +159,7 @@ t_ssl_crud_listeners_by_id(Config) when is_list(Config) -> MinListenerId = <<"ssl:min">>, BadId = <<"ssl:bad">>, Type = <<"ssl">>, - crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type). + crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type, 32000). t_ws_crud_listeners_by_id(Config) when is_list(Config) -> ListenerId = <<"ws:default">>, @@ -139,7 +167,7 @@ t_ws_crud_listeners_by_id(Config) when is_list(Config) -> MinListenerId = <<"ws:min">>, BadId = <<"ws:bad">>, Type = <<"ws">>, - crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type). + crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type, 33000). t_wss_crud_listeners_by_id(Config) when is_list(Config) -> ListenerId = <<"wss:default">>, @@ -147,7 +175,7 @@ t_wss_crud_listeners_by_id(Config) when is_list(Config) -> MinListenerId = <<"wss:min">>, BadId = <<"wss:bad">>, Type = <<"wss">>, - crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type). + crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type, 34000). t_api_listeners_list_not_ready(Config) when is_list(Config) -> net_kernel:start(['listeners@127.0.0.1', longnames]), @@ -266,7 +294,7 @@ cluster(Specs) -> end} ]). -crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type) -> +crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type, PortBase) -> OriginPath = emqx_mgmt_api_test_util:api_path(["listeners", ListenerId]), NewPath = emqx_mgmt_api_test_util:api_path(["listeners", NewListenerId]), OriginListener = request(get, OriginPath, [], []), @@ -274,8 +302,8 @@ crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type) -> %% create with full options ?assertEqual({error, not_found}, is_running(NewListenerId)), ?assertMatch({error, {"HTTP/1.1", 404, _}}, request(get, NewPath, [], [])), - Port1 = integer_to_binary(?PORT), - Port2 = integer_to_binary(?PORT), + Port1 = integer_to_binary(?PORT(PortBase)), + Port2 = integer_to_binary(?PORT(PortBase)), NewConf = OriginListener#{ <<"id">> => NewListenerId, <<"bind">> => <<"0.0.0.0:", Port1/binary>> @@ -284,7 +312,7 @@ crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type) -> ?assertEqual(lists:sort(maps:keys(OriginListener)), lists:sort(maps:keys(Create))), Get1 = request(get, NewPath, [], []), ?assertMatch(Create, Get1), - ?assert(is_running(NewListenerId)), + ?assertEqual({true, NewListenerId}, {is_running(NewListenerId), NewListenerId}), %% create with required options MinPath = emqx_mgmt_api_test_util:api_path(["listeners", MinListenerId]), @@ -417,3 +445,21 @@ data_file(Name) -> cert_file(Name) -> data_file(filename:join(["certs", Name])). + +default_listeners_hocon_text() -> + Sc = #{roots => emqx_schema:fields("listeners")}, + Listeners = hocon_tconf:make_serializable(Sc, #{}, #{}), + Config = #{<<"listeners">> => Listeners}, + hocon_pp:do(Config, #{}). + +%% inject a 'include' at the end of emqx.conf.all +%% the 'include' can be kept after test, +%% as long as the file has been deleted it is a no-op +inject_tmp_config_content(TmpFile, Content) -> + Etc = filename:join(["etc", "emqx.conf.all"]), + Inc = filename:join(["etc", TmpFile]), + ConfFile = emqx_common_test_helpers:app_path(emqx_conf, Etc), + TmpFileFullPath = emqx_common_test_helpers:app_path(emqx_conf, Inc), + ok = file:write_file(TmpFileFullPath, Content), + ok = file:write_file(ConfFile, ["\ninclude \"", TmpFileFullPath, "\"\n"], [append]), + TmpFileFullPath. diff --git a/apps/emqx_management/test/emqx_mgmt_api_status_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_status_SUITE.erl index f0200c410..e8e0b4ac9 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_status_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_status_SUITE.erl @@ -38,7 +38,10 @@ all() -> get_status_tests() -> [ t_status_ok, - t_status_not_ok + t_status_not_ok, + t_status_text_format, + t_status_json_format, + t_status_bad_format_qs ]. groups() -> @@ -87,8 +90,10 @@ do_request(Opts) -> headers := Headers, body := Body0 } = Opts, + QS = maps:get(qs, Opts, ""), URL = ?HOST ++ filename:join(Path0), - {ok, #{host := Host, port := Port, path := Path}} = emqx_http_lib:uri_parse(URL), + {ok, #{host := Host, port := Port, path := Path1}} = emqx_http_lib:uri_parse(URL), + Path = Path1 ++ QS, %% we must not use `httpc' here, because it keeps retrying when it %% receives a 503 with `retry-after' header, and there's no option %% to stop that behavior... @@ -165,3 +170,73 @@ t_status_not_ok(Config) -> Headers ), ok. + +t_status_text_format(Config) -> + Path = ?config(get_status_path, Config), + #{ + body := Resp, + status_code := StatusCode + } = do_request(#{ + method => get, + path => Path, + qs => "?format=text", + headers => [], + body => no_body + }), + ?assertEqual(200, StatusCode), + ?assertMatch( + {match, _}, + re:run(Resp, <<"emqx is running$">>) + ), + ok. + +t_status_json_format(Config) -> + Path = ?config(get_status_path, Config), + #{ + body := Resp, + status_code := StatusCode + } = do_request(#{ + method => get, + path => Path, + qs => "?format=json", + headers => [], + body => no_body + }), + ?assertEqual(200, StatusCode), + ?assertMatch( + #{<<"app_status">> := <<"running">>}, + emqx_utils_json:decode(Resp) + ), + ok. + +t_status_bad_format_qs(Config) -> + lists:foreach( + fun(QS) -> + test_status_bad_format_qs(QS, Config) + end, + [ + "?a=b", + "?format=", + "?format=x" + ] + ). + +%% when query-sting is invalid, fallback to text format +test_status_bad_format_qs(QS, Config) -> + Path = ?config(get_status_path, Config), + #{ + body := Resp, + status_code := StatusCode + } = do_request(#{ + method => get, + path => Path, + qs => QS, + headers => [], + body => no_body + }), + ?assertEqual(200, StatusCode), + ?assertMatch( + {match, _}, + re:run(Resp, <<"emqx is running$">>) + ), + ok. diff --git a/apps/emqx_modules/src/emqx_modules.app.src b/apps/emqx_modules/src/emqx_modules.app.src index 1e38e25d1..b984cf658 100644 --- a/apps/emqx_modules/src/emqx_modules.app.src +++ b/apps/emqx_modules/src/emqx_modules.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_modules, [ {description, "EMQX Modules"}, - {vsn, "5.0.13"}, + {vsn, "5.0.14"}, {modules, []}, {applications, [kernel, stdlib, emqx, emqx_ctl]}, {mod, {emqx_modules_app, []}}, diff --git a/apps/emqx_modules/src/emqx_modules_schema.erl b/apps/emqx_modules/src/emqx_modules_schema.erl index 36a08de60..9057333d5 100644 --- a/apps/emqx_modules/src/emqx_modules_schema.erl +++ b/apps/emqx_modules/src/emqx_modules_schema.erl @@ -36,11 +36,13 @@ roots() -> "telemetry", array("rewrite", #{ desc => "List of topic rewrite rules.", - importance => ?IMPORTANCE_HIDDEN + importance => ?IMPORTANCE_HIDDEN, + default => [] }), array("topic_metrics", #{ desc => "List of topics whose metrics are reported.", - importance => ?IMPORTANCE_HIDDEN + importance => ?IMPORTANCE_HIDDEN, + default => [] }) ]. diff --git a/apps/emqx_modules/src/emqx_topic_metrics.erl b/apps/emqx_modules/src/emqx_topic_metrics.erl index de09e568f..efe309b9e 100644 --- a/apps/emqx_modules/src/emqx_topic_metrics.erl +++ b/apps/emqx_modules/src/emqx_topic_metrics.erl @@ -179,7 +179,12 @@ deregister_all() -> gen_server:call(?MODULE, {deregister, all}). is_registered(Topic) -> - ets:member(?TAB, Topic). + try + ets:member(?TAB, Topic) + catch + error:badarg -> + false + end. all_registered_topics() -> [Topic || {Topic, _} <- ets:tab2list(?TAB)]. diff --git a/apps/emqx_modules/test/emqx_delayed_SUITE.erl b/apps/emqx_modules/test/emqx_delayed_SUITE.erl index ffea436bb..8c271f0c1 100644 --- a/apps/emqx_modules/test/emqx_delayed_SUITE.erl +++ b/apps/emqx_modules/test/emqx_delayed_SUITE.erl @@ -40,9 +40,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), emqx_common_test_helpers:start_apps([emqx_conf, emqx_modules]), Config. @@ -169,10 +167,10 @@ t_cluster(_) -> emqx_delayed_proto_v1:get_delayed_message(node(), Id) ), - ?assertEqual( - emqx_delayed:get_delayed_message(Id), - emqx_delayed_proto_v1:get_delayed_message(node(), Id) - ), + %% The 'local' and the 'fake-remote' values should be the same, + %% however there is a race condition, so we are just assert that they are both 'ok' tuples + ?assertMatch({ok, _}, emqx_delayed:get_delayed_message(Id)), + ?assertMatch({ok, _}, emqx_delayed_proto_v1:get_delayed_message(node(), Id)), ok = emqx_delayed_proto_v1:delete_delayed_message(node(), Id), diff --git a/apps/emqx_modules/test/emqx_delayed_api_SUITE.erl b/apps/emqx_modules/test/emqx_delayed_api_SUITE.erl index 4ae3dec88..b5995a47a 100644 --- a/apps/emqx_modules/test/emqx_delayed_api_SUITE.erl +++ b/apps/emqx_modules/test/emqx_delayed_api_SUITE.erl @@ -32,10 +32,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF, #{ - raw_with_default => true - }), - + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), ok = emqx_mgmt_api_test_util:init_suite( [emqx_conf, emqx_modules] ), diff --git a/apps/emqx_modules/test/emqx_modules_conf_SUITE.erl b/apps/emqx_modules/test/emqx_modules_conf_SUITE.erl index 666af9ef0..14e477bf9 100644 --- a/apps/emqx_modules/test/emqx_modules_conf_SUITE.erl +++ b/apps/emqx_modules/test/emqx_modules_conf_SUITE.erl @@ -29,9 +29,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Conf) -> - emqx_common_test_helpers:load_config(emqx_modules_schema, <<"gateway {}">>, #{ - raw_with_default => true - }), + emqx_common_test_helpers:load_config(emqx_modules_schema, <<"gateway {}">>), emqx_common_test_helpers:start_apps([emqx_conf, emqx_modules]), Conf. diff --git a/apps/emqx_modules/test/emqx_rewrite_SUITE.erl b/apps/emqx_modules/test/emqx_rewrite_SUITE.erl index 1847f876e..aa2c7cad7 100644 --- a/apps/emqx_modules/test/emqx_rewrite_SUITE.erl +++ b/apps/emqx_modules/test/emqx_rewrite_SUITE.erl @@ -73,9 +73,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> emqx_common_test_helpers:boot_modules(all), - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, #{}, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, #{}), emqx_common_test_helpers:start_apps([emqx_conf, emqx_modules]), Config. @@ -160,17 +158,13 @@ t_rewrite_re_error(_Config) -> ok. t_list(_Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE), Expect = maps:get(<<"rewrite">>, ?REWRITE), ?assertEqual(Expect, emqx_rewrite:list()), ok. t_update(_Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE), Init = emqx_rewrite:list(), Rules = [ #{ @@ -186,9 +180,7 @@ t_update(_Config) -> ok. t_update_disable(_Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE), ?assertEqual(ok, emqx_rewrite:update([])), timer:sleep(150), @@ -203,9 +195,7 @@ t_update_disable(_Config) -> ok. t_update_re_failed(_Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE), Re = <<"*^test/*">>, Rules = [ #{ @@ -261,9 +251,7 @@ receive_publish(Timeout) -> end. init() -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE), ok = emqx_rewrite:enable(), {ok, C} = emqtt:start_link([{clientid, <<"c1">>}, {username, <<"u1">>}]), {ok, _} = emqtt:connect(C), diff --git a/apps/emqx_modules/test/emqx_rewrite_api_SUITE.erl b/apps/emqx_modules/test/emqx_rewrite_api_SUITE.erl index 68d12a2c1..528102d9e 100644 --- a/apps/emqx_modules/test/emqx_rewrite_api_SUITE.erl +++ b/apps/emqx_modules/test/emqx_rewrite_api_SUITE.erl @@ -33,10 +33,7 @@ init_per_testcase(_, Config) -> Config. init_per_suite(Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF, #{ - raw_with_default => true - }), - + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), ok = emqx_mgmt_api_test_util:init_suite( [emqx_conf, emqx_modules] ), diff --git a/apps/emqx_modules/test/emqx_telemetry_SUITE.erl b/apps/emqx_modules/test/emqx_telemetry_SUITE.erl index bb5f39c1f..86ea65620 100644 --- a/apps/emqx_modules/test/emqx_telemetry_SUITE.erl +++ b/apps/emqx_modules/test/emqx_telemetry_SUITE.erl @@ -42,9 +42,7 @@ init_per_suite(Config) -> emqx_common_test_helpers:deps_path(emqx_authz, "etc/acl.conf") end ), - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), emqx_gateway_test_utils:load_all_gateway_apps(), emqx_common_test_helpers:start_apps( [emqx_conf, emqx_authn, emqx_authz, emqx_modules], @@ -154,9 +152,7 @@ init_per_testcase(t_exhook_info, Config) -> {ok, _} = emqx_exhook_demo_svr:start(), {ok, Sock} = gen_tcp:connect("localhost", 9000, [], 3000), _ = gen_tcp:close(Sock), - ok = emqx_common_test_helpers:load_config(emqx_exhook_schema, ExhookConf, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_exhook_schema, ExhookConf), {ok, _} = application:ensure_all_started(emqx_exhook), Config; init_per_testcase(t_cluster_uuid, Config) -> @@ -177,9 +173,7 @@ init_per_testcase(t_uuid_restored_from_file, Config) -> %% clear the UUIDs in the DB {atomic, ok} = mria:clear_table(emqx_telemetry), emqx_common_test_helpers:stop_apps([emqx_conf, emqx_authn, emqx_authz, emqx_modules]), - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), emqx_common_test_helpers:start_apps( [emqx_conf, emqx_authn, emqx_authz, emqx_modules], fun set_special_configs/1 @@ -332,9 +326,7 @@ t_uuid_saved_to_file(_Config) -> %% clear the UUIDs in the DB {atomic, ok} = mria:clear_table(emqx_telemetry), emqx_common_test_helpers:stop_apps([emqx_conf, emqx_authn, emqx_authz, emqx_modules]), - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), emqx_common_test_helpers:start_apps( [emqx_conf, emqx_authn, emqx_authz, emqx_modules], fun set_special_configs/1 @@ -657,8 +649,10 @@ mock_advanced_mqtt_features() -> lists:foreach( fun(N) -> - Num = integer_to_binary(N), - Message = emqx_message:make(<<"$delayed/", Num/binary, "/delayed">>, <<"payload">>), + DelaySec = integer_to_binary(N + 10), + Message = emqx_message:make( + <<"$delayed/", DelaySec/binary, "/delayed">>, <<"payload">> + ), {stop, _} = emqx_delayed:on_message_publish(Message) end, lists:seq(1, 4) @@ -824,15 +818,11 @@ start_slave(Name) -> (emqx) -> application:set_env(emqx, boot_modules, []), ekka:join(TestNode), - emqx_common_test_helpers:load_config( - emqx_modules_schema, ?BASE_CONF, #{raw_with_default => true} - ), + emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), ok; (_App) -> - emqx_common_test_helpers:load_config( - emqx_modules_schema, ?BASE_CONF, #{raw_with_default => true} - ), + emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), ok end, Opts = #{ diff --git a/apps/emqx_modules/test/emqx_telemetry_api_SUITE.erl b/apps/emqx_modules/test/emqx_telemetry_api_SUITE.erl index ac6d12039..c375810b5 100644 --- a/apps/emqx_modules/test/emqx_telemetry_api_SUITE.erl +++ b/apps/emqx_modules/test/emqx_telemetry_api_SUITE.erl @@ -29,10 +29,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF, #{ - raw_with_default => true - }), - + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), ok = emqx_mgmt_api_test_util:init_suite( [emqx_conf, emqx_authn, emqx_authz, emqx_modules], fun set_special_configs/1 diff --git a/apps/emqx_modules/test/emqx_topic_metrics_SUITE.erl b/apps/emqx_modules/test/emqx_topic_metrics_SUITE.erl index 10ce5d0df..a147f41cd 100644 --- a/apps/emqx_modules/test/emqx_topic_metrics_SUITE.erl +++ b/apps/emqx_modules/test/emqx_topic_metrics_SUITE.erl @@ -28,9 +28,7 @@ all() -> emqx_common_test_helpers:all(?MODULE). init_per_suite(Config) -> emqx_common_test_helpers:boot_modules(all), - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?TOPIC, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?TOPIC), emqx_common_test_helpers:start_apps([emqx_conf, emqx_modules]), Config. @@ -42,6 +40,9 @@ init_per_testcase(_Case, Config) -> emqx_topic_metrics:deregister_all(), Config. +end_per_testcase(t_metrics_not_started, _Config) -> + _ = supervisor:restart_child(emqx_modules_sup, emqx_topic_metrics), + ok; end_per_testcase(_Case, _Config) -> emqx_topic_metrics:deregister_all(), emqx_config:put([topic_metrics], []), @@ -181,3 +182,10 @@ t_unknown_messages(_) -> OldPid, whereis(emqx_topic_metrics) ). + +t_metrics_not_started(_Config) -> + _ = emqx_topic_metrics:register(<<"a/b/c">>), + ?assert(emqx_topic_metrics:is_registered(<<"a/b/c">>)), + ok = supervisor:terminate_child(emqx_modules_sup, emqx_topic_metrics), + ?assertNot(emqx_topic_metrics:is_registered(<<"a/b/c">>)), + {ok, _} = supervisor:restart_child(emqx_modules_sup, emqx_topic_metrics). diff --git a/apps/emqx_modules/test/emqx_topic_metrics_api_SUITE.erl b/apps/emqx_modules/test/emqx_topic_metrics_api_SUITE.erl index 5d64f123d..2e668b6cf 100644 --- a/apps/emqx_modules/test/emqx_topic_metrics_api_SUITE.erl +++ b/apps/emqx_modules/test/emqx_topic_metrics_api_SUITE.erl @@ -40,10 +40,7 @@ init_per_testcase(_, Config) -> Config. init_per_suite(Config) -> - ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF, #{ - raw_with_default => true - }), - + ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF), ok = emqx_mgmt_api_test_util:init_suite( [emqx_conf, emqx_modules] ), diff --git a/apps/emqx_oracle/BSL.txt b/apps/emqx_oracle/BSL.txt new file mode 100644 index 000000000..0acc0e696 --- /dev/null +++ b/apps/emqx_oracle/BSL.txt @@ -0,0 +1,94 @@ +Business Source License 1.1 + +Licensor: Hangzhou EMQ Technologies Co., Ltd. +Licensed Work: EMQX Enterprise Edition + The Licensed Work is (c) 2023 + Hangzhou EMQ Technologies Co., Ltd. +Additional Use Grant: Students and educators are granted right to copy, + modify, and create derivative work for research + or education. +Change Date: 2027-02-01 +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please contact Licensor: https://www.emqx.com/en/contact + +Notice + +The Business Source License (this document, or the “License”) is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +“Business Source License” is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN “AS IS” BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark “Business Source License”, +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the “Business +Source License” name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where “compatible” means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text “None”. + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/apps/emqx_oracle/README.md b/apps/emqx_oracle/README.md new file mode 100644 index 000000000..873d52259 --- /dev/null +++ b/apps/emqx_oracle/README.md @@ -0,0 +1,14 @@ +# Oracle Database Connector + +This application houses the Oracle Database connector for EMQX Enterprise Edition. +It provides the APIs to connect to Oracle Database. + +So far it is only used to insert messages as data bridge. + +## Contributing + +Please see our [contributing.md](../../CONTRIBUTING.md). + +## License + +See [BSL](./BSL.txt). diff --git a/apps/emqx_oracle/rebar.config b/apps/emqx_oracle/rebar.config new file mode 100644 index 000000000..14461ba34 --- /dev/null +++ b/apps/emqx_oracle/rebar.config @@ -0,0 +1,7 @@ +%% -*- mode: erlang; -*- + +{erl_opts, [debug_info]}. +{deps, [ {jamdb_oracle, {git, "https://github.com/emqx/jamdb_oracle", {tag, "0.4.9.4"}}} + , {emqx_connector, {path, "../../apps/emqx_connector"}} + , {emqx_resource, {path, "../../apps/emqx_resource"}} + ]}. diff --git a/apps/emqx_oracle/src/emqx_oracle.app.src b/apps/emqx_oracle/src/emqx_oracle.app.src new file mode 100644 index 000000000..fa48e8479 --- /dev/null +++ b/apps/emqx_oracle/src/emqx_oracle.app.src @@ -0,0 +1,14 @@ +{application, emqx_oracle, [ + {description, "EMQX Enterprise Oracle Database Connector"}, + {vsn, "0.1.0"}, + {registered, []}, + {applications, [ + kernel, + stdlib, + jamdb_oracle + ]}, + {env, []}, + {modules, []}, + + {links, []} +]}. diff --git a/apps/emqx_oracle/src/emqx_oracle.erl b/apps/emqx_oracle/src/emqx_oracle.erl new file mode 100644 index 000000000..a0d7169f3 --- /dev/null +++ b/apps/emqx_oracle/src/emqx_oracle.erl @@ -0,0 +1,367 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_oracle). + +-behaviour(emqx_resource). + +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-define(ORACLE_DEFAULT_PORT, 1521). + +%%==================================================================== +%% Exports +%%==================================================================== + +%% callbacks for behaviour emqx_resource +-export([ + callback_mode/0, + is_buffer_supported/0, + on_start/2, + on_stop/2, + on_query/3, + on_batch_query/3, + on_get_status/2 +]). + +%% callbacks for ecpool +-export([connect/1, prepare_sql_to_conn/2]). + +%% Internal exports used to execute code with ecpool worker +-export([ + query/3, + execute_batch/3, + do_get_status/1 +]). + +-export([ + oracle_host_options/0 +]). + +-define(ACTION_SEND_MESSAGE, send_message). + +-define(SYNC_QUERY_MODE, no_handover). + +-define(ORACLE_HOST_OPTIONS, #{ + default_port => ?ORACLE_DEFAULT_PORT +}). + +-define(MAX_CURSORS, 10). +-define(DEFAULT_POOL_SIZE, 8). +-define(OPT_TIMEOUT, 30000). + +-type prepares() :: #{atom() => binary()}. +-type params_tokens() :: #{atom() => list()}. + +-type state() :: + #{ + pool_name := binary(), + prepare_sql := prepares(), + params_tokens := params_tokens(), + batch_params_tokens := params_tokens() + }. + +% As ecpool is not monitoring the worker's PID when doing a handover_async, the +% request can be lost if worker crashes. Thus, it's better to force requests to +% be sync for now. +callback_mode() -> always_sync. + +is_buffer_supported() -> false. + +-spec on_start(binary(), hoconsc:config()) -> {ok, state()} | {error, _}. +on_start( + InstId, + #{ + server := Server, + database := DB, + sid := Sid, + username := User + } = Config +) -> + ?SLOG(info, #{ + msg => "starting_oracle_connector", + connector => InstId, + config => emqx_utils:redact(Config) + }), + ?tp(oracle_bridge_started, #{instance_id => InstId, config => Config}), + {ok, _} = application:ensure_all_started(ecpool), + {ok, _} = application:ensure_all_started(jamdb_oracle), + jamdb_oracle_conn:set_max_cursors_number(?MAX_CURSORS), + + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, oracle_host_options()), + ServiceName = maps:get(<<"service_name">>, Config, Sid), + Options = [ + {host, Host}, + {port, Port}, + {user, emqx_plugin_libs_rule:str(User)}, + {password, emqx_secret:wrap(maps:get(password, Config, ""))}, + {sid, emqx_plugin_libs_rule:str(Sid)}, + {service_name, emqx_plugin_libs_rule:str(ServiceName)}, + {database, DB}, + {pool_size, maps:get(<<"pool_size">>, Config, ?DEFAULT_POOL_SIZE)}, + {timeout, ?OPT_TIMEOUT}, + {app_name, "EMQX Data To Oracle Database Action"} + ], + PoolName = InstId, + Prepares = parse_prepare_sql(Config), + InitState = #{pool_name => PoolName, prepare_statement => #{}}, + State = maps:merge(InitState, Prepares), + case emqx_resource_pool:start(InstId, ?MODULE, Options) of + ok -> + {ok, init_prepare(State)}; + {error, Reason} -> + ?tp( + oracle_connector_start_failed, + #{error => Reason} + ), + {error, Reason} + end. + +on_stop(InstId, #{pool_name := PoolName}) -> + ?SLOG(info, #{ + msg => "stopping_oracle_connector", + connector => InstId + }), + ?tp(oracle_bridge_stopped, #{instance_id => InstId}), + emqx_resource_pool:stop(PoolName). + +on_query(InstId, {TypeOrKey, NameOrSQL}, #{pool_name := _PoolName} = State) -> + on_query(InstId, {TypeOrKey, NameOrSQL, []}, State); +on_query( + InstId, + {TypeOrKey, NameOrSQL, Params}, + #{pool_name := PoolName} = State +) -> + ?SLOG(debug, #{ + msg => "oracle database connector received sql query", + connector => InstId, + type => TypeOrKey, + sql => NameOrSQL, + state => State + }), + Type = query, + {NameOrSQL2, Data} = proc_sql_params(TypeOrKey, NameOrSQL, Params, State), + Res = on_sql_query(InstId, PoolName, Type, ?SYNC_QUERY_MODE, NameOrSQL2, Data), + handle_result(Res). + +on_batch_query( + InstId, + BatchReq, + #{pool_name := PoolName, params_tokens := Tokens, prepare_statement := Sts} = State +) -> + case BatchReq of + [{Key, _} = Request | _] -> + BinKey = to_bin(Key), + case maps:get(BinKey, Tokens, undefined) of + undefined -> + Log = #{ + connector => InstId, + first_request => Request, + state => State, + msg => "batch prepare not implemented" + }, + ?SLOG(error, Log), + {error, {unrecoverable_error, batch_prepare_not_implemented}}; + TokenList -> + {_, Datas} = lists:unzip(BatchReq), + Datas2 = [emqx_plugin_libs_rule:proc_sql(TokenList, Data) || Data <- Datas], + St = maps:get(BinKey, Sts), + case + on_sql_query(InstId, PoolName, execute_batch, ?SYNC_QUERY_MODE, St, Datas2) + of + {ok, Results} -> + handle_batch_result(Results, 0); + Result -> + Result + end + end; + _ -> + Log = #{ + connector => InstId, + request => BatchReq, + state => State, + msg => "invalid request" + }, + ?SLOG(error, Log), + {error, {unrecoverable_error, invalid_request}} + end. + +proc_sql_params(query, SQLOrKey, Params, _State) -> + {SQLOrKey, Params}; +proc_sql_params(TypeOrKey, SQLOrData, Params, #{ + params_tokens := ParamsTokens, prepare_sql := PrepareSql +}) -> + Key = to_bin(TypeOrKey), + case maps:get(Key, ParamsTokens, undefined) of + undefined -> + {SQLOrData, Params}; + Tokens -> + case maps:get(Key, PrepareSql, undefined) of + undefined -> + {SQLOrData, Params}; + Sql -> + {Sql, emqx_plugin_libs_rule:proc_sql(Tokens, SQLOrData)} + end + end. + +on_sql_query(InstId, PoolName, Type, ApplyMode, NameOrSQL, Data) -> + case ecpool:pick_and_do(PoolName, {?MODULE, Type, [NameOrSQL, Data]}, ApplyMode) of + {error, Reason} = Result -> + ?tp( + oracle_connector_query_return, + #{error => Reason} + ), + ?SLOG(error, #{ + msg => "oracle database connector do sql query failed", + connector => InstId, + type => Type, + sql => NameOrSQL, + reason => Reason + }), + Result; + Result -> + ?tp( + oracle_connector_query_return, + #{result => Result} + ), + Result + end. + +on_get_status(_InstId, #{pool_name := Pool} = State) -> + case emqx_resource_pool:health_check_workers(Pool, fun ?MODULE:do_get_status/1) of + true -> + case do_check_prepares(State) of + ok -> + connected; + {ok, NState} -> + %% return new state with prepared statements + {connected, NState} + end; + false -> + disconnected + end. + +do_get_status(Conn) -> + ok == element(1, jamdb_oracle:sql_query(Conn, "select 1 from dual")). + +do_check_prepares(#{prepare_sql := Prepares}) when is_map(Prepares) -> + ok; +do_check_prepares(State = #{pool_name := PoolName, prepare_sql := {error, Prepares}}) -> + {ok, Sts} = prepare_sql(Prepares, PoolName), + {ok, State#{prepare_sql => Prepares, prepare_statement := Sts}}. + +%% =================================================================== + +oracle_host_options() -> + ?ORACLE_HOST_OPTIONS. + +connect(Opts) -> + Password = emqx_secret:unwrap(proplists:get_value(password, Opts)), + NewOpts = lists:keyreplace(password, 1, Opts, {password, Password}), + jamdb_oracle:start_link(NewOpts). + +sql_query_to_str(SqlQuery) -> + emqx_plugin_libs_rule:str(SqlQuery). + +sql_params_to_str(Params) when is_list(Params) -> + lists:map( + fun + (false) -> "0"; + (true) -> "1"; + (Value) -> emqx_plugin_libs_rule:str(Value) + end, + Params + ). + +query(Conn, SQL, Params) -> + Ret = jamdb_oracle:sql_query(Conn, {sql_query_to_str(SQL), sql_params_to_str(Params)}), + ?tp(oracle_query, #{conn => Conn, sql => SQL, params => Params, result => Ret}), + handle_result(Ret). + +execute_batch(Conn, SQL, ParamsList) -> + ParamsListStr = lists:map(fun sql_params_to_str/1, ParamsList), + Ret = jamdb_oracle:sql_query(Conn, {batch, sql_query_to_str(SQL), ParamsListStr}), + ?tp(oracle_batch_query, #{conn => Conn, sql => SQL, params => ParamsList, result => Ret}), + handle_result(Ret). + +parse_prepare_sql(Config) -> + SQL = + case maps:get(prepare_statement, Config, undefined) of + undefined -> + case maps:get(sql, Config, undefined) of + undefined -> #{}; + Template -> #{<<"send_message">> => Template} + end; + Any -> + Any + end, + parse_prepare_sql(maps:to_list(SQL), #{}, #{}). + +parse_prepare_sql([{Key, H} | T], Prepares, Tokens) -> + {PrepareSQL, ParamsTokens} = emqx_plugin_libs_rule:preproc_sql(H, ':n'), + parse_prepare_sql( + T, Prepares#{Key => PrepareSQL}, Tokens#{Key => ParamsTokens} + ); +parse_prepare_sql([], Prepares, Tokens) -> + #{ + prepare_sql => Prepares, + params_tokens => Tokens + }. + +init_prepare(State = #{prepare_sql := Prepares, pool_name := PoolName}) -> + {ok, Sts} = prepare_sql(Prepares, PoolName), + State#{prepare_statement := Sts}. + +prepare_sql(Prepares, PoolName) when is_map(Prepares) -> + prepare_sql(maps:to_list(Prepares), PoolName); +prepare_sql(Prepares, PoolName) -> + Data = do_prepare_sql(Prepares, PoolName), + {ok, _Sts} = Data, + ecpool:add_reconnect_callback(PoolName, {?MODULE, prepare_sql_to_conn, [Prepares]}), + Data. + +do_prepare_sql(Prepares, PoolName) -> + do_prepare_sql(ecpool:workers(PoolName), Prepares, PoolName, #{}). + +do_prepare_sql([{_Name, Worker} | T], Prepares, PoolName, _LastSts) -> + {ok, Conn} = ecpool_worker:client(Worker), + {ok, Sts} = prepare_sql_to_conn(Conn, Prepares), + do_prepare_sql(T, Prepares, PoolName, Sts); +do_prepare_sql([], _Prepares, _PoolName, LastSts) -> + {ok, LastSts}. + +prepare_sql_to_conn(Conn, Prepares) -> + prepare_sql_to_conn(Conn, Prepares, #{}). + +prepare_sql_to_conn(Conn, [], Statements) when is_pid(Conn) -> {ok, Statements}; +prepare_sql_to_conn(Conn, [{Key, SQL} | PrepareList], Statements) when is_pid(Conn) -> + LogMeta = #{msg => "Oracle Database Prepare Statement", name => Key, prepare_sql => SQL}, + ?SLOG(info, LogMeta), + prepare_sql_to_conn(Conn, PrepareList, Statements#{Key => SQL}). + +to_bin(Bin) when is_binary(Bin) -> + Bin; +to_bin(Atom) when is_atom(Atom) -> + erlang:atom_to_binary(Atom). + +handle_result({error, disconnected}) -> + {error, {recoverable_error, disconnected}}; +handle_result({error, Error}) -> + {error, {unrecoverable_error, Error}}; +handle_result({error, socket, closed} = Error) -> + {error, {recoverable_error, Error}}; +handle_result({error, Type, Reason}) -> + {error, {unrecoverable_error, {Type, Reason}}}; +handle_result(Res) -> + Res. + +handle_batch_result([{affected_rows, RowCount} | Rest], Acc) -> + handle_batch_result(Rest, Acc + RowCount); +handle_batch_result([{proc_result, RetCode, _Rows} | Rest], Acc) when RetCode =:= 0 -> + handle_batch_result(Rest, Acc); +handle_batch_result([{proc_result, RetCode, Reason} | _Rest], _Acc) -> + {error, {unrecoverable_error, {RetCode, Reason}}}; +handle_batch_result([], Acc) -> + {ok, Acc}. diff --git a/apps/emqx_oracle/src/emqx_oracle_schema.erl b/apps/emqx_oracle/src/emqx_oracle_schema.erl new file mode 100644 index 000000000..cfa74054a --- /dev/null +++ b/apps/emqx_oracle/src/emqx_oracle_schema.erl @@ -0,0 +1,33 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_oracle_schema). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-define(REF_MODULE, emqx_oracle). + +%% Hocon config schema exports +-export([ + roots/0, + fields/1 +]). + +roots() -> + [{config, #{type => hoconsc:ref(?REF_MODULE, config)}}]. + +fields(config) -> + [{server, server()}, {sid, fun sid/1}] ++ + emqx_connector_schema_lib:relational_db_fields() ++ + emqx_connector_schema_lib:prepare_statement_fields(). + +server() -> + Meta = #{desc => ?DESC(?REF_MODULE, "server")}, + emqx_schema:servers_sc(Meta, (?REF_MODULE):oracle_host_options()). + +sid(type) -> binary(); +sid(desc) -> ?DESC(?REF_MODULE, "sid"); +sid(required) -> true; +sid(_) -> undefined. diff --git a/apps/emqx_plugin_libs/src/emqx_placeholder.erl b/apps/emqx_plugin_libs/src/emqx_placeholder.erl index 18ef9e8fb..dcd666f5b 100644 --- a/apps/emqx_plugin_libs/src/emqx_placeholder.erl +++ b/apps/emqx_plugin_libs/src/emqx_placeholder.erl @@ -69,7 +69,7 @@ -type preproc_sql_opts() :: #{ placeholders => list(binary()), - replace_with => '?' | '$n', + replace_with => '?' | '$n' | ':n', strip_double_quote => boolean() }. @@ -149,7 +149,7 @@ proc_cmd(Tokens, Data, Opts) -> preproc_sql(Sql) -> preproc_sql(Sql, '?'). --spec preproc_sql(binary(), '?' | '$n' | preproc_sql_opts()) -> +-spec preproc_sql(binary(), '?' | '$n' | ':n' | preproc_sql_opts()) -> {prepare_statement_key(), tmpl_token()}. preproc_sql(Sql, ReplaceWith) when is_atom(ReplaceWith) -> preproc_sql(Sql, #{replace_with => ReplaceWith}); @@ -316,13 +316,17 @@ preproc_tmpl_deep_map_key(Key, _) -> replace_with(Tmpl, RE, '?') -> re:replace(Tmpl, RE, "?", [{return, binary}, global]); replace_with(Tmpl, RE, '$n') -> + replace_with(Tmpl, RE, <<"$">>); +replace_with(Tmpl, RE, ':n') -> + replace_with(Tmpl, RE, <<":">>); +replace_with(Tmpl, RE, String) when is_binary(String) -> Parts = re:split(Tmpl, RE, [{return, binary}, trim, group]), {Res, _} = lists:foldl( fun ([Tkn, _Phld], {Acc, Seq}) -> Seq1 = erlang:integer_to_binary(Seq), - {<>, Seq + 1}; + {<>, Seq + 1}; ([Tkn], {Acc, Seq}) -> {<>, Seq} end, @@ -330,6 +334,7 @@ replace_with(Tmpl, RE, '$n') -> Parts ), Res. + parse_nested(<<".", R/binary>>) -> %% ignore the root . parse_nested(R); diff --git a/apps/emqx_plugin_libs/src/emqx_plugin_libs.app.src b/apps/emqx_plugin_libs/src/emqx_plugin_libs.app.src index 24b5a3240..bfd7e68fa 100644 --- a/apps/emqx_plugin_libs/src/emqx_plugin_libs.app.src +++ b/apps/emqx_plugin_libs/src/emqx_plugin_libs.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_plugin_libs, [ {description, "EMQX Plugin utility libs"}, - {vsn, "4.3.9"}, + {vsn, "4.3.10"}, {modules, []}, {applications, [kernel, stdlib]}, {env, []} diff --git a/apps/emqx_plugin_libs/src/emqx_plugin_libs_rule.erl b/apps/emqx_plugin_libs/src/emqx_plugin_libs_rule.erl index 8844fe586..9a4c01a2b 100644 --- a/apps/emqx_plugin_libs/src/emqx_plugin_libs_rule.erl +++ b/apps/emqx_plugin_libs/src/emqx_plugin_libs_rule.erl @@ -105,9 +105,8 @@ proc_cmd(Tokens, Data, Opts) -> preproc_sql(Sql) -> emqx_placeholder:preproc_sql(Sql). --spec preproc_sql(Sql :: binary(), ReplaceWith :: '?' | '$n') -> +-spec preproc_sql(Sql :: binary(), ReplaceWith :: '?' | '$n' | ':n') -> {prepare_statement_key(), tmpl_token()}. - preproc_sql(Sql, ReplaceWith) -> emqx_placeholder:preproc_sql(Sql, ReplaceWith). diff --git a/apps/emqx_plugins/README.md b/apps/emqx_plugins/README.md new file mode 100644 index 000000000..9c8faccd1 --- /dev/null +++ b/apps/emqx_plugins/README.md @@ -0,0 +1,12 @@ +# Plugins Management + +This application provides the feature for users to upload and install custom, Erlang-based plugins. + +More introduction about [Plugins](https://www.emqx.io/docs/en/v5.0/extensions/plugins.html#develop-emqx-plugins) + +See HTTP API to learn how to [Install/Uninstall a Plugin](https://www.emqx.io/docs/en/v5.0/admin/api-docs.html#tag/Plugins) + +## Plugin Template + +We provide a [plugin template](https://github.com/emqx/emqx-plugin-template) that +you can use to learn how to write and package custom plugins. diff --git a/apps/emqx_resource/include/emqx_resource.hrl b/apps/emqx_resource/include/emqx_resource.hrl index 91572eac3..e6f86fb59 100644 --- a/apps/emqx_resource/include/emqx_resource.hrl +++ b/apps/emqx_resource/include/emqx_resource.hrl @@ -15,7 +15,6 @@ %%-------------------------------------------------------------------- -type resource_type() :: module(). -type resource_id() :: binary(). --type manager_id() :: binary(). -type raw_resource_config() :: binary() | raw_term_resource_config(). -type raw_term_resource_config() :: #{binary() => term()} | [raw_term_resource_config()]. -type resource_config() :: term(). diff --git a/apps/emqx_resource/src/emqx_resource.app.src b/apps/emqx_resource/src/emqx_resource.app.src index 2553e6dd8..3e264cb3e 100644 --- a/apps/emqx_resource/src/emqx_resource.app.src +++ b/apps/emqx_resource/src/emqx_resource.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_resource, [ {description, "Manager for all external resources"}, - {vsn, "0.1.14"}, + {vsn, "0.1.15"}, {registered, []}, {mod, {emqx_resource_app, []}}, {applications, [ diff --git a/apps/emqx_resource/src/emqx_resource.erl b/apps/emqx_resource/src/emqx_resource.erl index d8b91942b..7c48e8ee4 100644 --- a/apps/emqx_resource/src/emqx_resource.erl +++ b/apps/emqx_resource/src/emqx_resource.erl @@ -113,7 +113,10 @@ -export([apply_reply_fun/2]). --export_type([resource_data/0]). +-export_type([ + resource_id/0, + resource_data/0 +]). -optional_callbacks([ on_query/3, @@ -362,11 +365,11 @@ is_buffer_supported(Module) -> false end. --spec call_start(manager_id(), module(), resource_config()) -> +-spec call_start(resource_id(), module(), resource_config()) -> {ok, resource_state()} | {error, Reason :: term()}. -call_start(MgrId, Mod, Config) -> +call_start(ResId, Mod, Config) -> try - Mod:on_start(MgrId, Config) + Mod:on_start(ResId, Config) catch throw:Error -> {error, Error}; @@ -374,17 +377,17 @@ call_start(MgrId, Mod, Config) -> {error, #{exception => Kind, reason => Error, stacktrace => Stacktrace}} end. --spec call_health_check(manager_id(), module(), resource_state()) -> +-spec call_health_check(resource_id(), module(), resource_state()) -> resource_status() | {resource_status(), resource_state()} | {resource_status(), resource_state(), term()} | {error, term()}. -call_health_check(MgrId, Mod, ResourceState) -> - ?SAFE_CALL(Mod:on_get_status(MgrId, ResourceState)). +call_health_check(ResId, Mod, ResourceState) -> + ?SAFE_CALL(Mod:on_get_status(ResId, ResourceState)). --spec call_stop(manager_id(), module(), resource_state()) -> term(). -call_stop(MgrId, Mod, ResourceState) -> - ?SAFE_CALL(Mod:on_stop(MgrId, ResourceState)). +-spec call_stop(resource_id(), module(), resource_state()) -> term(). +call_stop(ResId, Mod, ResourceState) -> + ?SAFE_CALL(Mod:on_stop(ResId, ResourceState)). -spec check_config(resource_type(), raw_resource_config()) -> {ok, resource_config()} | {error, term()}. diff --git a/apps/emqx_resource/src/emqx_resource_buffer_worker.erl b/apps/emqx_resource/src/emqx_resource_buffer_worker.erl index 2e2cd5631..7cb7f8198 100644 --- a/apps/emqx_resource/src/emqx_resource_buffer_worker.erl +++ b/apps/emqx_resource/src/emqx_resource_buffer_worker.erl @@ -1466,7 +1466,7 @@ mark_inflight_items_as_retriable(Data, WorkerMRef) -> end ), _NumAffected = ets:select_replace(InflightTID, MatchSpec), - ?tp(buffer_worker_worker_down_update, #{num_affected => _NumAffected}), + ?tp(buffer_worker_async_agent_down, #{num_affected => _NumAffected}), ok. %% used to update a batch after dropping expired individual queries. diff --git a/apps/emqx_resource/src/emqx_resource_buffer_worker_sup.erl b/apps/emqx_resource/src/emqx_resource_buffer_worker_sup.erl index a00dcdcd2..104ad7ade 100644 --- a/apps/emqx_resource/src/emqx_resource_buffer_worker_sup.erl +++ b/apps/emqx_resource/src/emqx_resource_buffer_worker_sup.erl @@ -52,6 +52,7 @@ init([]) -> ChildSpecs = [], {ok, {SupFlags, ChildSpecs}}. +-spec start_workers(emqx_resource:resource_id(), _Opts :: #{atom() => _}) -> ok. start_workers(ResId, Opts) -> WorkerPoolSize = worker_pool_size(Opts), _ = ensure_worker_pool(ResId, hash, [{size, WorkerPoolSize}]), @@ -63,6 +64,7 @@ start_workers(ResId, Opts) -> lists:seq(1, WorkerPoolSize) ). +-spec stop_workers(emqx_resource:resource_id(), _Opts :: #{atom() => _}) -> ok. stop_workers(ResId, Opts) -> WorkerPoolSize = worker_pool_size(Opts), lists:foreach( @@ -75,6 +77,7 @@ stop_workers(ResId, Opts) -> ensure_worker_pool_removed(ResId), ok. +-spec worker_pids(emqx_resource:resource_id()) -> [pid()]. worker_pids(ResId) -> lists:map( fun({_Name, Pid}) -> @@ -141,9 +144,5 @@ ensure_disk_queue_dir_absent(ResourceId, Index) -> ok. ensure_worker_pool_removed(ResId) -> - try - gproc_pool:delete(ResId) - catch - error:badarg -> ok - end, + gproc_pool:force_delete(ResId), ok. diff --git a/apps/emqx_resource/src/emqx_resource_manager.erl b/apps/emqx_resource/src/emqx_resource_manager.erl index 877b35fff..f42d3c1b5 100644 --- a/apps/emqx_resource/src/emqx_resource_manager.erl +++ b/apps/emqx_resource/src/emqx_resource_manager.erl @@ -42,23 +42,24 @@ ]). -export([ - set_resource_status_connecting/1, - manager_id_to_resource_id/1 + set_resource_status_connecting/1 ]). % Server --export([start_link/6]). +-export([start_link/5]). % Behaviour -export([init/1, callback_mode/0, handle_event/4, terminate/3]). % State record -record(data, { - id, manager_id, group, mod, callback_mode, query_mode, config, opts, status, state, error, pid + id, group, mod, callback_mode, query_mode, config, opts, status, state, error, pid }). -type data() :: #data{}. --define(ETS_TABLE, ?MODULE). +-define(NAME(ResId), {n, l, {?MODULE, ResId}}). +-define(REF(ResId), {via, gproc, ?NAME(ResId)}). + -define(WAIT_FOR_RESOURCE_DELAY, 100). -define(T_OPERATION, 5000). -define(T_LOOKUP, 1000). @@ -69,13 +70,6 @@ %% API %%------------------------------------------------------------------------------ -make_manager_id(ResId) -> - emqx_resource:generate_id(ResId). - -manager_id_to_resource_id(MgrId) -> - [ResId, _Index] = string:split(MgrId, ":", trailing), - ResId. - %% @doc Called from emqx_resource when starting a resource instance. %% %% Triggers the emqx_resource_manager_sup supervisor to actually create @@ -92,8 +86,7 @@ ensure_resource(ResId, Group, ResourceType, Config, Opts) -> {ok, _Group, Data} -> {ok, Data}; {error, not_found} -> - MgrId = set_new_owner(ResId), - create_and_return_data(MgrId, ResId, Group, ResourceType, Config, Opts) + create_and_return_data(ResId, Group, ResourceType, Config, Opts) end. %% @doc Called from emqx_resource when recreating a resource which may or may not exist @@ -103,23 +96,22 @@ recreate(ResId, ResourceType, NewConfig, Opts) -> case lookup(ResId) of {ok, Group, #{mod := ResourceType, status := _} = _Data} -> _ = remove(ResId, false), - MgrId = set_new_owner(ResId), - create_and_return_data(MgrId, ResId, Group, ResourceType, NewConfig, Opts); + create_and_return_data(ResId, Group, ResourceType, NewConfig, Opts); {ok, _, #{mod := Mod}} when Mod =/= ResourceType -> {error, updating_to_incorrect_resource_type}; {error, not_found} -> {error, not_found} end. -create_and_return_data(MgrId, ResId, Group, ResourceType, Config, Opts) -> - _ = create(MgrId, ResId, Group, ResourceType, Config, Opts), +create_and_return_data(ResId, Group, ResourceType, Config, Opts) -> + _ = create(ResId, Group, ResourceType, Config, Opts), {ok, _Group, Data} = lookup(ResId), {ok, Data}. %% @doc Create a resource_manager and wait until it is running -create(MgrId, ResId, Group, ResourceType, Config, Opts) -> +create(ResId, Group, ResourceType, Config, Opts) -> % The state machine will make the actual call to the callback/resource module after init - ok = emqx_resource_manager_sup:ensure_child(MgrId, ResId, Group, ResourceType, Config, Opts), + ok = emqx_resource_manager_sup:ensure_child(ResId, Group, ResourceType, Config, Opts), ok = emqx_metrics_worker:create_metrics( ?RES_METRICS, ResId, @@ -164,10 +156,12 @@ create(MgrId, ResId, Group, ResourceType, Config, Opts) -> ok | {error, Reason :: term()}. create_dry_run(ResourceType, Config) -> ResId = make_test_id(), - MgrId = set_new_owner(ResId), - ok = emqx_resource_manager_sup:ensure_child( - MgrId, ResId, <<"dry_run">>, ResourceType, Config, #{} - ), + Opts = + case is_map(Config) of + true -> maps:get(resource_opts, Config, #{}); + false -> #{} + end, + ok = emqx_resource_manager_sup:ensure_child(ResId, <<"dry_run">>, ResourceType, Config, Opts), case wait_for_ready(ResId, 5000) of ok -> remove(ResId); @@ -237,10 +231,11 @@ lookup(ResId) -> %% @doc Lookup the group and data of a resource from the cache -spec lookup_cached(resource_id()) -> {ok, resource_group(), resource_data()} | {error, not_found}. lookup_cached(ResId) -> - case read_cache(ResId) of - {Group, Data} -> - {ok, Group, data_record_to_external_map(Data)}; - not_found -> + try read_cache(ResId) of + Data = #data{group = Group} -> + {ok, Group, data_record_to_external_map(Data)} + catch + error:badarg -> {error, not_found} end. @@ -256,20 +251,16 @@ reset_metrics(ResId) -> %% @doc Returns the data for all resources -spec list_all() -> [resource_data()]. list_all() -> - try - [ - data_record_to_external_map(Data) - || {_Id, _Group, Data} <- ets:tab2list(?ETS_TABLE) - ] - catch - error:badarg -> [] - end. + lists:map( + fun data_record_to_external_map/1, + gproc:select({local, names}, [{{?NAME('_'), '_', '$1'}, [], ['$1']}]) + ). %% @doc Returns a list of ids for all the resources in a group -spec list_group(resource_group()) -> [resource_id()]. list_group(Group) -> - List = ets:match(?ETS_TABLE, {'$1', Group, '_'}), - lists:flatten(List). + Guard = {'==', {element, #data.group, '$1'}, Group}, + gproc:select({local, names}, [{{?NAME('$2'), '_', '$1'}, [Guard], ['$2']}]). -spec health_check(resource_id()) -> {ok, resource_status()} | {error, term()}. health_check(ResId) -> @@ -278,10 +269,9 @@ health_check(ResId) -> %% Server start/stop callbacks %% @doc Function called from the supervisor to actually start the server -start_link(MgrId, ResId, Group, ResourceType, Config, Opts) -> +start_link(ResId, Group, ResourceType, Config, Opts) -> Data = #data{ id = ResId, - manager_id = MgrId, group = Group, mod = ResourceType, callback_mode = emqx_resource:get_callback_mode(ResourceType), @@ -295,7 +285,7 @@ start_link(MgrId, ResId, Group, ResourceType, Config, Opts) -> state = undefined, error = undefined }, - gen_statem:start_link(?MODULE, {Data, Opts}, []). + gen_statem:start_link(?REF(ResId), ?MODULE, {Data, Opts}, []). init({DataIn, Opts}) -> process_flag(trap_exit, true), @@ -315,7 +305,7 @@ terminate({shutdown, removed}, _State, _Data) -> ok; terminate(_Reason, _State, Data) -> _ = maybe_stop_resource(Data), - ok = delete_cache(Data#data.id, Data#data.manager_id), + _ = erase_cache(Data), ok. %% Behavior callback @@ -340,9 +330,6 @@ handle_event({call, From}, start, State, Data) when start_resource(Data, From); handle_event({call, From}, start, _State, _Data) -> {keep_state_and_data, [{reply, From, ok}]}; -% Called when the resource received a `quit` message -handle_event(info, quit, _State, _Data) -> - {stop, {shutdown, quit}}; % Called when the resource is to be stopped handle_event({call, From}, stop, stopped, _Data) -> {keep_state_and_data, [{reply, From, ok}]}; @@ -413,9 +400,9 @@ log_state_consistency(State, Data) -> data => Data }). -log_cache_consistency({_, Data}, Data) -> +log_cache_consistency(Data, Data) -> ok; -log_cache_consistency({_, DataCached}, Data) -> +log_cache_consistency(DataCached, Data) -> ?tp(warning, "inconsistent_cache", #{ cache => DataCached, data => Data @@ -424,56 +411,20 @@ log_cache_consistency({_, DataCached}, Data) -> %%------------------------------------------------------------------------------ %% internal functions %%------------------------------------------------------------------------------ -insert_cache(ResId, Group, Data = #data{manager_id = MgrId}) -> - case get_owner(ResId) of - not_found -> - ets:insert(?ETS_TABLE, {ResId, Group, Data}); - MgrId -> - ets:insert(?ETS_TABLE, {ResId, Group, Data}); - _ -> - ?SLOG(error, #{ - msg => get_resource_owner_failed, - resource_id => ResId, - action => quit_resource - }), - self() ! quit - end. +insert_cache(ResId, Data = #data{}) -> + gproc:set_value(?NAME(ResId), Data). read_cache(ResId) -> - case ets:lookup(?ETS_TABLE, ResId) of - [{_Id, Group, Data}] -> {Group, Data}; - [] -> not_found - end. + gproc:lookup_value(?NAME(ResId)). -delete_cache(ResId, MgrId) -> - case get_owner(ResId) of - MgrIdNow when MgrIdNow == not_found; MgrIdNow == MgrId -> - do_delete_cache(ResId); - _ -> - ok - end. +erase_cache(_Data = #data{id = ResId}) -> + gproc:unreg(?NAME(ResId)). -do_delete_cache(<> = ResId) -> - true = ets:delete(?ETS_TABLE, {owner, ResId}), - true = ets:delete(?ETS_TABLE, ResId), - ok; -do_delete_cache(ResId) -> - true = ets:delete(?ETS_TABLE, ResId), - ok. - -set_new_owner(ResId) -> - MgrId = make_manager_id(ResId), - ok = set_owner(ResId, MgrId), - MgrId. - -set_owner(ResId, MgrId) -> - ets:insert(?ETS_TABLE, {{owner, ResId}, MgrId}), - ok. - -get_owner(ResId) -> - case ets:lookup(?ETS_TABLE, {owner, ResId}) of - [{_, MgrId}] -> MgrId; - [] -> not_found +try_read_cache(ResId) -> + try + read_cache(ResId) + catch + error:badarg -> not_found end. retry_actions(Data) -> @@ -489,17 +440,17 @@ health_check_actions(Data) -> handle_remove_event(From, ClearMetrics, Data) -> _ = stop_resource(Data), - ok = delete_cache(Data#data.id, Data#data.manager_id), ok = emqx_resource_buffer_worker_sup:stop_workers(Data#data.id, Data#data.opts), case ClearMetrics of true -> ok = emqx_metrics_worker:clear_metrics(?RES_METRICS, Data#data.id); false -> ok end, + _ = erase_cache(Data), {stop_and_reply, {shutdown, removed}, [{reply, From, ok}]}. start_resource(Data, From) -> %% in case the emqx_resource:call_start/2 hangs, the lookup/1 can read status from the cache - case emqx_resource:call_start(Data#data.manager_id, Data#data.mod, Data#data.config) of + case emqx_resource:call_start(Data#data.id, Data#data.mod, Data#data.config) of {ok, ResourceState} -> UpdatedData = Data#data{status = connecting, state = ResourceState}, %% Perform an initial health_check immediately before transitioning into a connected state @@ -530,7 +481,7 @@ stop_resource(#data{state = ResState, id = ResId} = Data) -> %% is returned. case ResState /= undefined of true -> - emqx_resource:call_stop(Data#data.manager_id, Data#data.mod, ResState); + emqx_resource:call_stop(Data#data.id, Data#data.mod, ResState); false -> ok end, @@ -584,7 +535,7 @@ with_health_check(#data{state = undefined} = Data, Func) -> Func(disconnected, Data); with_health_check(#data{error = PrevError} = Data, Func) -> ResId = Data#data.id, - HCRes = emqx_resource:call_health_check(Data#data.manager_id, Data#data.mod, Data#data.state), + HCRes = emqx_resource:call_health_check(Data#data.id, Data#data.mod, Data#data.state), {Status, NewState, Err} = parse_health_check_result(HCRes, Data), _ = maybe_alarm(Status, ResId, Err, PrevError), ok = maybe_resume_resource_workers(ResId, Status), @@ -599,7 +550,7 @@ update_state(Data) -> update_state(DataWas, DataWas) -> DataWas; update_state(Data, _DataWas) -> - _ = insert_cache(Data#data.id, Data#data.group, Data), + _ = insert_cache(Data#data.id, Data), Data. health_check_interval(Opts) -> @@ -689,10 +640,10 @@ wait_for_ready(ResId, WaitTime) -> do_wait_for_ready(_ResId, 0) -> timeout; do_wait_for_ready(ResId, Retry) -> - case read_cache(ResId) of - {_Group, #data{status = connected}} -> + case try_read_cache(ResId) of + #data{status = connected} -> ok; - {_Group, #data{status = disconnected, error = Err}} -> + #data{status = disconnected, error = Err} -> {error, external_error(Err)}; _ -> timer:sleep(?WAIT_FOR_RESOURCE_DELAY), @@ -701,12 +652,7 @@ do_wait_for_ready(ResId, Retry) -> safe_call(ResId, Message, Timeout) -> try - case read_cache(ResId) of - not_found -> - {error, not_found}; - {_, #data{pid = ManagerPid}} -> - gen_statem:call(ManagerPid, Message, {clean_timeout, Timeout}) - end + gen_statem:call(?REF(ResId), Message, {clean_timeout, Timeout}) catch error:badarg -> {error, not_found}; diff --git a/apps/emqx_resource/src/emqx_resource_manager_sup.erl b/apps/emqx_resource/src/emqx_resource_manager_sup.erl index 5b731d6cf..2f442cd56 100644 --- a/apps/emqx_resource/src/emqx_resource_manager_sup.erl +++ b/apps/emqx_resource/src/emqx_resource_manager_sup.erl @@ -17,23 +17,20 @@ -behaviour(supervisor). --export([ensure_child/6]). +-export([ensure_child/5]). -export([start_link/0]). -export([init/1]). -ensure_child(MgrId, ResId, Group, ResourceType, Config, Opts) -> - _ = supervisor:start_child(?MODULE, [MgrId, ResId, Group, ResourceType, Config, Opts]), +ensure_child(ResId, Group, ResourceType, Config, Opts) -> + _ = supervisor:start_child(?MODULE, [ResId, Group, ResourceType, Config, Opts]), ok. start_link() -> supervisor:start_link({local, ?MODULE}, ?MODULE, []). init([]) -> - TabOpts = [named_table, set, public, {read_concurrency, true}], - _ = ets:new(emqx_resource_manager, TabOpts), - ChildSpecs = [ #{ id => emqx_resource_manager, @@ -44,6 +41,5 @@ init([]) -> modules => [emqx_resource_manager] } ], - SupFlags = #{strategy => simple_one_for_one, intensity => 10, period => 10}, {ok, {SupFlags, ChildSpecs}}. diff --git a/apps/emqx_resource/test/emqx_resource_SUITE.erl b/apps/emqx_resource/test/emqx_resource_SUITE.erl index f8ddd56b5..6fd5a552e 100644 --- a/apps/emqx_resource/test/emqx_resource_SUITE.erl +++ b/apps/emqx_resource/test/emqx_resource_SUITE.erl @@ -1055,28 +1055,22 @@ t_list_filter(_) -> ). t_create_dry_run_local(_) -> - ets:match_delete(emqx_resource_manager, {{owner, '$1'}, '_'}), lists:foreach( fun(_) -> create_dry_run_local_succ() end, lists:seq(1, 10) ), - case [] =:= ets:match(emqx_resource_manager, {{owner, '$1'}, '_'}) of - false -> - %% Sleep to remove flakyness in test case. It take some time for - %% the ETS table to be cleared. - timer:sleep(2000), - [] = ets:match(emqx_resource_manager, {{owner, '$1'}, '_'}); - true -> - ok - end. + ?retry( + 100, + 5, + ?assertEqual( + [], + emqx_resource:list_instances_verbose() + ) + ). create_dry_run_local_succ() -> - case whereis(test_resource) of - undefined -> ok; - Pid -> exit(Pid, kill) - end, ?assertEqual( ok, emqx_resource:create_dry_run_local( @@ -1107,7 +1101,15 @@ t_create_dry_run_local_failed(_) -> ?TEST_RESOURCE, #{name => test_resource, stop_error => true} ), - ?assertEqual(ok, Res3). + ?assertEqual(ok, Res3), + ?retry( + 100, + 5, + ?assertEqual( + [], + emqx_resource:list_instances_verbose() + ) + ). t_test_func(_) -> ?assertEqual(ok, erlang:apply(emqx_resource_validator:not_empty("not_empty"), [<<"someval">>])), @@ -1766,12 +1768,6 @@ t_async_pool_worker_death(_Config) -> ?assertEqual(NumReqs, Inflight0), %% grab one of the worker pids and kill it - {ok, SRef1} = - snabbkaffe:subscribe( - ?match_event(#{?snk_kind := buffer_worker_worker_down_update}), - NumBufferWorkers, - 10_000 - ), {ok, #{pid := Pid0}} = emqx_resource:simple_sync_query(?ID, get_state), MRef = monitor(process, Pid0), ct:pal("will kill ~p", [Pid0]), @@ -1785,13 +1781,27 @@ t_async_pool_worker_death(_Config) -> end, %% inflight requests should have been marked as retriable - {ok, _} = snabbkaffe:receive_events(SRef1), + wait_until_all_marked_as_retriable(NumReqs), Inflight1 = emqx_resource_metrics:inflight_get(?ID), ?assertEqual(NumReqs, Inflight1), - ok + NumReqs end, - [] + fun(NumReqs, Trace) -> + Events = ?of_kind(buffer_worker_async_agent_down, Trace), + %% At least one buffer worker should have marked its + %% requests as retriable. If a single one has + %% received all requests, that's all we got. + ?assertMatch([_ | _], Events), + %% All requests distributed over all buffer workers + %% should have been marked as retriable, by the time + %% the inflight has been drained. + ?assertEqual( + NumReqs, + lists:sum([N || #{num_affected := N} <- Events]) + ), + ok + end ), ok. @@ -3017,3 +3027,33 @@ trace_between_span(Trace0, Marker) -> {Trace1, [_ | _]} = ?split_trace_at(#{?snk_kind := Marker, ?snk_span := {complete, _}}, Trace0), {[_ | _], [_ | Trace2]} = ?split_trace_at(#{?snk_kind := Marker, ?snk_span := start}, Trace1), Trace2. + +wait_until_all_marked_as_retriable(NumExpected) when NumExpected =< 0 -> + ok; +wait_until_all_marked_as_retriable(NumExpected) -> + Seen = #{}, + do_wait_until_all_marked_as_retriable(NumExpected, Seen). + +do_wait_until_all_marked_as_retriable(NumExpected, _Seen) when NumExpected =< 0 -> + ok; +do_wait_until_all_marked_as_retriable(NumExpected, Seen) -> + Res = ?block_until( + #{?snk_kind := buffer_worker_async_agent_down, ?snk_meta := #{pid := P}} when + not is_map_key(P, Seen), + 10_000 + ), + case Res of + {timeout, Evts} -> + ct:pal("events so far:\n ~p", [Evts]), + ct:fail("timeout waiting for events"); + {ok, #{num_affected := NumAffected, ?snk_meta := #{pid := Pid}}} -> + ct:pal("affected: ~p; pid: ~p", [NumAffected, Pid]), + case NumAffected >= NumExpected of + true -> + ok; + false -> + do_wait_until_all_marked_as_retriable(NumExpected - NumAffected, Seen#{ + Pid => true + }) + end + end. diff --git a/apps/emqx_statsd/src/emqx_statsd.erl b/apps/emqx_statsd/src/emqx_statsd.erl index c5a7fc1c8..b2d726b07 100644 --- a/apps/emqx_statsd/src/emqx_statsd.erl +++ b/apps/emqx_statsd/src/emqx_statsd.erl @@ -80,7 +80,7 @@ init(Conf) -> flush_time_interval := FlushTimeInterval } = Conf, FlushTimeInterval1 = flush_interval(FlushTimeInterval, SampleTimeInterval), - {Host, Port} = emqx_schema:parse_server(Server, ?SERVER_PARSE_OPTS), + #{hostname := Host, port := Port} = emqx_schema:parse_server(Server, ?SERVER_PARSE_OPTS), Tags = maps:fold(fun(K, V, Acc) -> [{to_bin(K), to_bin(V)} | Acc] end, [], TagsRaw), Opts = [{tags, Tags}, {host, Host}, {port, Port}, {prefix, <<"emqx">>}], {ok, Pid} = estatsd:start_link(Opts), diff --git a/apps/emqx_statsd/test/emqx_statsd_SUITE.erl b/apps/emqx_statsd/test/emqx_statsd_SUITE.erl index b5669e4b9..8b8709c27 100644 --- a/apps/emqx_statsd/test/emqx_statsd_SUITE.erl +++ b/apps/emqx_statsd/test/emqx_statsd_SUITE.erl @@ -59,9 +59,7 @@ init_per_suite(Config) -> [emqx_conf, emqx_dashboard, emqx_statsd], fun set_special_configs/1 ), - ok = emqx_common_test_helpers:load_config(emqx_statsd_schema, ?BASE_CONF, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_statsd_schema, ?BASE_CONF), Config. end_per_suite(_Config) -> @@ -84,20 +82,16 @@ t_server_validator(_) -> reason := "cannot_be_empty", value := "" }, - emqx_common_test_helpers:load_config(emqx_statsd_schema, ?BAD_CONF, #{ - raw_with_default => true - }) + emqx_common_test_helpers:load_config(emqx_statsd_schema, ?BAD_CONF) ), %% default - ok = emqx_common_test_helpers:load_config(emqx_statsd_schema, ?DEFAULT_CONF, #{ - raw_with_default => true - }), - undefined = emqx_conf:get_raw([statsd, server], undefined), - ?assertMatch("127.0.0.1:8125", emqx_conf:get([statsd, server])), + ok = emqx_common_test_helpers:load_config(emqx_statsd_schema, ?DEFAULT_CONF), + DefaultServer = default_server(), + ?assertEqual(DefaultServer, emqx_conf:get_raw([statsd, server])), + DefaultServerStr = binary_to_list(DefaultServer), + ?assertEqual(DefaultServerStr, emqx_conf:get([statsd, server])), %% recover - ok = emqx_common_test_helpers:load_config(emqx_statsd_schema, ?BASE_CONF, #{ - raw_with_default => true - }), + ok = emqx_common_test_helpers:load_config(emqx_statsd_schema, ?BASE_CONF), Server2 = emqx_conf:get_raw([statsd, server]), ?assertMatch(Server0, Server2), ok. @@ -204,3 +198,7 @@ request(Method, Body) -> {ok, _Status, _} -> error end. + +default_server() -> + {server, Schema} = lists:keyfind(server, 1, emqx_statsd_schema:fields("statsd")), + hocon_schema:field_schema(Schema, default). diff --git a/apps/emqx_utils/src/emqx_utils.app.src b/apps/emqx_utils/src/emqx_utils.app.src index eb6371411..dff55bc86 100644 --- a/apps/emqx_utils/src/emqx_utils.app.src +++ b/apps/emqx_utils/src/emqx_utils.app.src @@ -2,7 +2,7 @@ {application, emqx_utils, [ {description, "Miscellaneous utilities for EMQX apps"}, % strict semver, bump manually! - {vsn, "5.0.0"}, + {vsn, "5.0.1"}, {modules, [ emqx_utils, emqx_utils_api, diff --git a/apps/emqx_utils/src/emqx_utils_maps.erl b/apps/emqx_utils/src/emqx_utils_maps.erl index 6bec32ae3..d1c3ed649 100644 --- a/apps/emqx_utils/src/emqx_utils_maps.erl +++ b/apps/emqx_utils/src/emqx_utils_maps.erl @@ -41,14 +41,13 @@ -type config_key_path() :: [config_key()]. -type convert_fun() :: fun((...) -> {K1 :: any(), V1 :: any()} | drop). +-define(CONFIG_NOT_FOUND_MAGIC, '$0tFound'). %%----------------------------------------------------------------- -spec deep_get(config_key_path(), map()) -> term(). deep_get(ConfKeyPath, Map) -> - Ref = make_ref(), - Res = deep_get(ConfKeyPath, Map, Ref), - case Res =:= Ref of - true -> error({config_not_found, ConfKeyPath}); - false -> Res + case deep_get(ConfKeyPath, Map, ?CONFIG_NOT_FOUND_MAGIC) of + ?CONFIG_NOT_FOUND_MAGIC -> error({config_not_found, ConfKeyPath}); + Res -> Res end. -spec deep_get(config_key_path(), map(), term()) -> term(). diff --git a/bin/emqx b/bin/emqx index fc0124b96..60c292f9c 100755 --- a/bin/emqx +++ b/bin/emqx @@ -304,7 +304,7 @@ if [ "$ES" -ne 0 ]; then fi # Make sure log directory exists -mkdir -p "$RUNNER_LOG_DIR" +mkdir -p "$EMQX_LOG_DIR" # turn off debug as this is static set +x @@ -396,7 +396,7 @@ relx_get_pid() { remsh() { # Generate a unique id used to allow multiple remsh to the same node # transparently - id="remsh$(relx_gen_id)-${NAME}" + id="remsh$(gen_node_id)-${NAME}" # shellcheck disable=SC2086 # Setup remote shell command to control node @@ -424,7 +424,7 @@ remsh() { } # Generate a random id -relx_gen_id() { +gen_node_id() { od -t u -N 4 /dev/urandom | head -n1 | awk '{print $2 % 1000}' } @@ -757,7 +757,7 @@ generate_config() { local node_name="$2" ## Delete the *.siz files first or it can't start after ## changing the config 'log.rotation.size' - rm -f "${RUNNER_LOG_DIR}"/*.siz + rm -f "${EMQX_LOG_DIR}"/*.siz ## timestamp for each generation local NOW_TIME @@ -861,7 +861,13 @@ wait_until_return_val() { done } -# backward compatible with 4.x +# First, there is EMQX_DEFAULT_LOG_HANDLER which can control the default values +# to be used when generating configs. +# It's set in docker entrypoint and in systemd service file. +# +# To be backward compatible with 4.x and v5.0.0 ~ v5.0.24/e5.0.2: +# if EMQX_LOG__TO is set, we try to enable handlers from environment variables. +# i.e. it overrides the default value set in EMQX_DEFAULT_LOG_HANDLER tr_log_to_env() { local log_to=${EMQX_LOG__TO:-undefined} # unset because it's unknown to 5.0 @@ -893,13 +899,11 @@ tr_log_to_env() { maybe_log_to_console() { if [ "${EMQX_LOG__TO:-}" = 'default' ]; then - # want to use config file defaults, do nothing + # want to use defaults, do nothing unset EMQX_LOG__TO else tr_log_to_env - # ensure defaults - export EMQX_LOG__CONSOLE_HANDLER__ENABLE="${EMQX_LOG__CONSOLE_HANDLER__ENABLE:-true}" - export EMQX_LOG__FILE_HANDLERS__DEFAULT__ENABLE="${EMQX_LOG__FILE_HANDLERS__DEFAULT__ENABLE:-false}" + export EMQX_DEFAULT_LOG_HANDLER=${EMQX_DEFAULT_LOG_HANDLER:-console} fi } @@ -979,7 +983,7 @@ diagnose_boot_failure_and_die() { local ps_line ps_line="$(find_emqx_process)" if [ -z "$ps_line" ]; then - echo "Find more information in the latest log file: ${RUNNER_LOG_DIR}/erlang.log.*" + echo "Find more information in the latest log file: ${EMQX_LOG_DIR}/erlang.log.*" exit 1 fi if ! relx_nodetool "ping" > /dev/null; then @@ -990,7 +994,7 @@ diagnose_boot_failure_and_die() { fi if ! relx_nodetool 'eval' 'true = emqx:is_running()' > /dev/null; then logerr "$NAME node is started, but failed to complete the boot sequence in time." - echo "Please collect the logs in ${RUNNER_LOG_DIR} and report a bug to EMQX team at https://github.com/emqx/emqx/issues/new/choose" + echo "Please collect the logs in ${EMQX_LOG_DIR} and report a bug to EMQX team at https://github.com/emqx/emqx/issues/new/choose" pipe_shutdown exit 3 fi @@ -1065,7 +1069,7 @@ case "${COMMAND}" in mkdir -p "$PIPE_DIR" - "$BINDIR/run_erl" -daemon "$PIPE_DIR" "$RUNNER_LOG_DIR" \ + "$BINDIR/run_erl" -daemon "$PIPE_DIR" "$EMQX_LOG_DIR" \ "$(relx_start_command)" WAIT_TIME=${EMQX_WAIT_FOR_START:-120} @@ -1273,7 +1277,7 @@ case "${COMMAND}" in then "$REL_DIR/elixir" \ --hidden \ - --name "rand-$(relx_gen_id)-$NAME" \ + --name "rand-$(gen_node_id)-$NAME" \ --cookie "$COOKIE" \ --boot "$REL_DIR/start_clean" \ --boot-var RELEASE_LIB "$ERTS_LIB_DIR" \ diff --git a/bin/node_dump b/bin/node_dump index 1c4df08b5..60c995885 100755 --- a/bin/node_dump +++ b/bin/node_dump @@ -10,10 +10,10 @@ echo "Running node dump in ${RUNNER_ROOT_DIR}" cd "${RUNNER_ROOT_DIR}" -DUMP="$RUNNER_LOG_DIR/node_dump_$(date +"%Y%m%d_%H%M%S").tar.gz" -CONF_DUMP="$RUNNER_LOG_DIR/conf.dump" -LICENSE_INFO="$RUNNER_LOG_DIR/license_info.txt" -SYSINFO="$RUNNER_LOG_DIR/sysinfo.txt" +DUMP="$EMQX_LOG_DIR/node_dump_$(date +"%Y%m%d_%H%M%S").tar.gz" +CONF_DUMP="$EMQX_LOG_DIR/conf.dump" +LICENSE_INFO="$EMQX_LOG_DIR/license_info.txt" +SYSINFO="$EMQX_LOG_DIR/sysinfo.txt" LOG_MAX_AGE_DAYS=3 @@ -74,7 +74,7 @@ done # Pack files { - find "$RUNNER_LOG_DIR" -mtime -"${LOG_MAX_AGE_DAYS}" \( -name '*.log.*' -or -name 'run_erl.log*' \) + find "$EMQX_LOG_DIR" -mtime -"${LOG_MAX_AGE_DAYS}" \( -name '*.log.*' -or -name 'run_erl.log*' \) echo "${SYSINFO}" echo "${CONF_DUMP}" echo "${LICENSE_INFO}" diff --git a/build b/build index 77d4dbfc8..2924f8a6f 100755 --- a/build +++ b/build @@ -92,7 +92,7 @@ log() { } make_docs() { - local libs_dir1 libs_dir2 libs_dir3 docdir dashboard_www_static + local libs_dir1 libs_dir2 libs_dir3 docdir libs_dir1="$("$FIND" "_build/$PROFILE/lib/" -maxdepth 2 -name ebin -type d)" if [ -d "_build/default/lib/" ]; then libs_dir2="$("$FIND" "_build/default/lib/" -maxdepth 2 -name ebin -type d)" @@ -113,31 +113,32 @@ make_docs() { ;; esac docdir="_build/docgen/$PROFILE" - dashboard_www_static='apps/emqx_dashboard/priv/www/static/' - mkdir -p "$docdir" "$dashboard_www_static" + mkdir -p "$docdir" # shellcheck disable=SC2086 erl -noshell -pa $libs_dir1 $libs_dir2 $libs_dir3 -eval \ "ok = emqx_conf:dump_schema('$docdir', $SCHEMA_MODULE), \ halt(0)." - cp "$docdir"/bridge-api-*.json "$dashboard_www_static" - cp "$docdir"/hot-config-schema-*.json "$dashboard_www_static" } assert_no_compile_time_only_deps() { : } -make_rel() { +just_compile() { ./scripts/pre-compile.sh "$PROFILE" # make_elixir_rel always create rebar.lock # delete it to make git clone + checkout work because we use shallow close for rebar deps rm -f rebar.lock # compile all beams ./rebar3 as "$PROFILE" compile - # generate docs (require beam compiled), generated to etc and priv dirs make_docs +} + +make_rel() { + local release_or_tar="${1}" + just_compile # now assemble the release tar - ./rebar3 as "$PROFILE" tar + ./rebar3 as "$PROFILE" "$release_or_tar" assert_no_compile_time_only_deps } @@ -223,7 +224,7 @@ make_tgz() { else # build the src_tarball again to ensure relup is included # elixir does not have relup yet. - make_rel + make_rel tar local relpath="_build/${PROFILE}/rel/emqx" full_vsn="$(./pkg-vsn.sh "$PROFILE" --long)" @@ -377,11 +378,14 @@ export_elixir_release_vars() { log "building artifact=$ARTIFACT for profile=$PROFILE" case "$ARTIFACT" in + apps) + just_compile + ;; doc|docs) make_docs ;; rel) - make_rel + make_rel release ;; relup) make_relup @@ -400,7 +404,7 @@ case "$ARTIFACT" in if [ "${IS_ELIXIR:-}" = 'yes' ]; then make_elixir_rel else - make_rel + make_rel tar fi env EMQX_REL="$(pwd)" \ EMQX_BUILD="${PROFILE}" \ diff --git a/changes/ce/feat-10457.en.md b/changes/ce/feat-10457.en.md index d6a44bd53..966569a1c 100644 --- a/changes/ce/feat-10457.en.md +++ b/changes/ce/feat-10457.en.md @@ -1,4 +1,4 @@ Deprecates the integration with StatsD. -Since StatsD is not used a lot. So we will deprecate it in the next release -and plan to remove it in 5.1 +There seemd to be no user using StatsD integration, so we have decided to hide this feature +for now. We will either remove or revive it based on requirements in the future. diff --git a/changes/ce/feat-10491.en.md b/changes/ce/feat-10491.en.md new file mode 100644 index 000000000..e1c38b6bb --- /dev/null +++ b/changes/ce/feat-10491.en.md @@ -0,0 +1 @@ +Rename `etcd.ssl` to `etcd.ssl_options` to keep all of SSL options consistent in the configuration file. diff --git a/changes/ce/feat-10512.en.md b/changes/ce/feat-10512.en.md new file mode 100644 index 000000000..e6c162742 --- /dev/null +++ b/changes/ce/feat-10512.en.md @@ -0,0 +1,3 @@ +Improved the storage format of Unicode characters in data files, +Now we can store Unicode characters normally. +For example: "SELECT * FROM \"t/1\" WHERE clientid = \"-测试专用-\"" diff --git a/changes/ce/feat-10571.en.md b/changes/ce/feat-10571.en.md new file mode 100644 index 000000000..49b564ef9 --- /dev/null +++ b/changes/ce/feat-10571.en.md @@ -0,0 +1,2 @@ +Do not emit useless crash report when EMQX stops. +Previously, when EMQX (and `emqx_topic_metrics` in particular) stopped and removed underlying tables, some messages were still being handled and crashed. diff --git a/changes/ce/feat-10588.en.md b/changes/ce/feat-10588.en.md new file mode 100644 index 000000000..2f907a549 --- /dev/null +++ b/changes/ce/feat-10588.en.md @@ -0,0 +1,2 @@ +Increase the time precision of trace logs from second to microsecond. +For example, change from `2023-05-02T08:43:50+00:00` to `2023-05-02T08:43:50.237945+00:00`. diff --git a/changes/ce/fix-10462.en.md b/changes/ce/fix-10462.en.md new file mode 100644 index 000000000..9e7922be2 --- /dev/null +++ b/changes/ce/fix-10462.en.md @@ -0,0 +1,4 @@ +Deprecate config `broker.shared_dispatch_ack_enabled`. +This was designed to avoid dispatching messages to a shared-subscription session which has the client disconnected. +However since v5.0.9, this feature is no longer useful because the shared-subscrption messages in a expired session will be redispatched to other sessions in the group. +See also: https://github.com/emqx/emqx/pull/9104 diff --git a/changes/ce/fix-10484.en.md b/changes/ce/fix-10484.en.md new file mode 100644 index 000000000..d1a501384 --- /dev/null +++ b/changes/ce/fix-10484.en.md @@ -0,0 +1,3 @@ +Fix the issue that the priority of the configuration cannot be set during rolling upgrade. +For example, when authorization is modified in v5.0.21 and then upgraded v5.0.23 through rolling upgrade, +the authorization will be restored to the default. diff --git a/changes/ce/fix-10495.en.md b/changes/ce/fix-10495.en.md new file mode 100644 index 000000000..222f3dd5a --- /dev/null +++ b/changes/ce/fix-10495.en.md @@ -0,0 +1 @@ +Add the limiter API `/configs/limiter` which was deleted by mistake back. diff --git a/changes/ce/fix-10500.en.md b/changes/ce/fix-10500.en.md new file mode 100644 index 000000000..730dfb6e5 --- /dev/null +++ b/changes/ce/fix-10500.en.md @@ -0,0 +1,12 @@ +Add several fixes, enhancements and features in Mria: + - protect `mria:join/1,2` with a global lock to prevent conflicts between + two nodes trying to join each other simultaneously + [Mria PR](https://github.com/emqx/mria/pull/137) + - implement new function `mria:sync_transaction/4,3,2`, which blocks the caller until + a transaction is imported to the local node (if the local node is a replicant, otherwise, + it behaves exactly the same as `mria:transaction/3,2`) + [Mria PR](https://github.com/emqx/mria/pull/136) + - optimize `mria:running_nodes/0` + [Mria PR](https://github.com/emqx/mria/pull/135) + - optimize `mria:ro_transaction/2` when called on a replicant node + [Mria PR](https://github.com/emqx/mria/pull/134). diff --git a/changes/ce/fix-10518.en.md b/changes/ce/fix-10518.en.md new file mode 100644 index 000000000..87d001e91 --- /dev/null +++ b/changes/ce/fix-10518.en.md @@ -0,0 +1,6 @@ +Add the following fixes and features in Mria: + - call `mria_rlog:role/1` safely in mria_membership to ensure that mria_membership + gen_server won't crash if RPC to another node fails + [Mria PR](https://github.com/emqx/mria/pull/139) + - Add extra field to ?rlog_sync table to facilitate extending this functionality in future + [Mria PR](https://github.com/emqx/mria/pull/138). diff --git a/changes/ce/fix-10548.en.md b/changes/ce/fix-10548.en.md new file mode 100644 index 000000000..d96f0b57f --- /dev/null +++ b/changes/ce/fix-10548.en.md @@ -0,0 +1,2 @@ +Fixed a race condition in the HTTP driver that would result in an error rather than a retry of the request. +Related fix in the driver: https://github.com/emqx/ehttpc/pull/45 diff --git a/changes/ce/perf-10417.en.md b/changes/ce/perf-10417.en.md new file mode 100644 index 000000000..ad83d2cf4 --- /dev/null +++ b/changes/ce/perf-10417.en.md @@ -0,0 +1 @@ +Improve get config performance by eliminating temporary references. diff --git a/changes/ce/perf-10487.en.md b/changes/ce/perf-10487.en.md new file mode 100644 index 000000000..6f2b2d156 --- /dev/null +++ b/changes/ce/perf-10487.en.md @@ -0,0 +1 @@ +Optimize the instance of limiter for whose rate is `infinity` to reduce memory and CPU usage. diff --git a/changes/ce/perf-10490.en.md b/changes/ce/perf-10490.en.md new file mode 100644 index 000000000..5c1c183a5 --- /dev/null +++ b/changes/ce/perf-10490.en.md @@ -0,0 +1 @@ +Remove the default limit of connect rate which used to be `1000/s` diff --git a/changes/ce/perf-10525.en.md b/changes/ce/perf-10525.en.md new file mode 100644 index 000000000..b67e88289 --- /dev/null +++ b/changes/ce/perf-10525.en.md @@ -0,0 +1,2 @@ +Reduce resource usage per MQTT packet handling. + diff --git a/changes/ce/perf-10591.en.md b/changes/ce/perf-10591.en.md new file mode 100644 index 000000000..2e14312d1 --- /dev/null +++ b/changes/ce/perf-10591.en.md @@ -0,0 +1,3 @@ +Improve the configuration of the limiter. +- Simplify the memory representation of the limiter configuration. +- Make sure the node-level limiter can really work when the listener's limiter configuration is omitted. diff --git a/changes/ee/feat-10378.en.md b/changes/ee/feat-10378.en.md new file mode 100644 index 000000000..ebdd299c8 --- /dev/null +++ b/changes/ee/feat-10378.en.md @@ -0,0 +1 @@ +Implement Pulsar Producer Bridge, which supports publishing messages to Pulsar from MQTT topics. diff --git a/changes/ee/feat-10425.en.md b/changes/ee/feat-10425.en.md new file mode 100644 index 000000000..7144241df --- /dev/null +++ b/changes/ee/feat-10425.en.md @@ -0,0 +1 @@ +Implement OpenTSDB data bridge. diff --git a/changes/ee/feat-10498.en.md b/changes/ee/feat-10498.en.md new file mode 100644 index 000000000..7222f8957 --- /dev/null +++ b/changes/ee/feat-10498.en.md @@ -0,0 +1 @@ +Implement Oracle Database Bridge, which supports publishing messages to Oracle Database from MQTT topics. diff --git a/changes/v5.0.24.en.md b/changes/v5.0.24.en.md new file mode 100644 index 000000000..4fa5cdd4f --- /dev/null +++ b/changes/v5.0.24.en.md @@ -0,0 +1,89 @@ +# v5.0.24 + +## Enhancements + +- [#10457](https://github.com/emqx/emqx/pull/10457) Deprecates the integration with StatsD. + + There seemd to be no user using StatsD integration, so we have decided to hide this feature + for now. We will either remove or revive it based on requirements in the future. + +- [#10458](https://github.com/emqx/emqx/pull/10458) Set the level of plugin configuration options to low level, + in most cases, users only need to manage plugins on the dashboard + without the need for manual modification, so we lowered the level. + +- [#10491](https://github.com/emqx/emqx/pull/10491) Rename `etcd.ssl` to `etcd.ssl_options` to keep all of SSL options consistent in the configuration file. + +- [#10512](https://github.com/emqx/emqx/pull/10512) Improved the storage format of Unicode characters in data files, + Now we can store Unicode characters normally. + For example: "SELECT * FROM \"t/1\" WHERE clientid = \"-测试专用-\"" + +- [#10487](https://github.com/emqx/emqx/pull/10487) Optimize the instance of limiter for whose rate is `infinity` to reduce memory and CPU usage. + +- [#10490](https://github.com/emqx/emqx/pull/10490) Remove the default limit of connect rate which used to be `1000/s` + +## Bug Fixes + +- [#10407](https://github.com/emqx/emqx/pull/10407) Improve 'emqx_alarm' performance by using Mnesia dirty operations and avoiding + unnecessary calls from 'emqx_resource_manager' to reactivate alarms that have been already activated. + Use new safe 'emqx_alarm' API to activate/deactivate alarms to ensure that emqx_resource_manager + doesn't crash because of alarm timeouts. + The crashes were possible when the following conditions co-occurred: + - a relatively high number of failing resources, e.g. bridges tried to activate alarms on re-occurring errors; + - the system experienced a very high load. + +- [#10420](https://github.com/emqx/emqx/pull/10420) Fix HTTP path handling when composing the URL for the HTTP requests in authentication and authorization modules. + * Avoid unnecessary URL normalization since we cannot assume that external servers treat original and normalized URLs equally. This led to bugs like [#10411](https://github.com/emqx/emqx/issues/10411). + * Fix the issue that path segments could be HTTP encoded twice. + +- [#10422](https://github.com/emqx/emqx/pull/10422) Fixed a bug where external plugins could not be configured via environment variables in a lone-node cluster. + +- [#10448](https://github.com/emqx/emqx/pull/10448) Fix a compatibility issue of limiter configuration introduced by v5.0.23 which broke the upgrade from previous versions if the `capacity` is `infinity`. + + In v5.0.23 we have replaced `capacity` with `burst`. After this fix, a `capacity = infinity` config will be automatically converted to equivalent `burst = 0`. + +- [#10449](https://github.com/emqx/emqx/pull/10449) Validate the ssl_options and header configurations when creating authentication http (`authn_http`). + Prior to this, incorrect `ssl` configuration could result in successful creation but the entire authn being unusable. + +- [#10455](https://github.com/emqx/emqx/pull/10455) Fixed an issue that could cause (otherwise harmless) noise in the logs. + + During some particularly slow synchronous calls to bridges, some late replies could be sent to connections processes that were no longer expecting a reply, and then emit an error log like: + + ``` + 2023-04-19T18:24:35.350233+00:00 [error] msg: unexpected_info, mfa: emqx_channel:handle_info/2, line: 1278, peername: 172.22.0.1:36384, clientid: caribdis_bench_sub_1137967633_4788, info: {#Ref<0.408802983.1941504010.189402>,{ok,200,[{<<"cache-control">>,<<"max-age=0, ...">>}} + ``` + + Those logs are harmless, but they could flood and worry the users without need. + +- [#10462](https://github.com/emqx/emqx/pull/10462) Deprecate config `broker.shared_dispatch_ack_enabled`. + This was designed to avoid dispatching messages to a shared-subscription session which has the client disconnected. + However since v5.0.9, this feature is no longer useful because the shared-subscrption messages in a expired session will be redispatched to other sessions in the group. + See also: https://github.com/emqx/emqx/pull/9104 + +- [#10463](https://github.com/emqx/emqx/pull/10463) Improve bridges API error handling. + If Webhook bridge URL is not valid, bridges API will return '400' error instead of '500'. + +- [#10484](https://github.com/emqx/emqx/pull/10484) Fix the issue that the priority of the configuration cannot be set during rolling upgrade. + For example, when authorization is modified in v5.0.21 and then upgraded v5.0.23 through rolling upgrade, + the authorization will be restored to the default. + +- [#10495](https://github.com/emqx/emqx/pull/10495) Add the limiter API `/configs/limiter` which was deleted by mistake back. + +- [#10500](https://github.com/emqx/emqx/pull/10500) Add several fixes, enhancements and features in Mria: + - protect `mria:join/1,2` with a global lock to prevent conflicts between + two nodes trying to join each other simultaneously + [Mria PR](https://github.com/emqx/mria/pull/137) + - implement new function `mria:sync_transaction/4,3,2`, which blocks the caller until + a transaction is imported to the local node (if the local node is a replicant, otherwise, + it behaves exactly the same as `mria:transaction/3,2`) + [Mria PR](https://github.com/emqx/mria/pull/136) + - optimize `mria:running_nodes/0` + [Mria PR](https://github.com/emqx/mria/pull/135) + - optimize `mria:ro_transaction/2` when called on a replicant node + [Mria PR](https://github.com/emqx/mria/pull/134). + +- [#10518](https://github.com/emqx/emqx/pull/10518) Add the following fixes and features in Mria: + - call `mria_rlog:role/1` safely in mria_membership to ensure that mria_membership + gen_server won't crash if RPC to another node fails + [Mria PR](https://github.com/emqx/mria/pull/139) + - Add extra field to ?rlog_sync table to facilitate extending this functionality in future + [Mria PR](https://github.com/emqx/mria/pull/138). diff --git a/deploy/charts/emqx/Chart.yaml b/deploy/charts/emqx/Chart.yaml index 312a9dfbe..9c23f7c15 100644 --- a/deploy/charts/emqx/Chart.yaml +++ b/deploy/charts/emqx/Chart.yaml @@ -14,8 +14,8 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 5.0.23 +version: 5.0.24 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 5.0.23 +appVersion: 5.0.24 diff --git a/deploy/docker/Dockerfile.msodbc b/deploy/docker/Dockerfile.msodbc new file mode 100644 index 000000000..d7b3457ac --- /dev/null +++ b/deploy/docker/Dockerfile.msodbc @@ -0,0 +1,25 @@ +## This Dockerfile should not run in GitHub Action or any other automated process. +## It should be manually executed by the needs of the user. +## +## Before manaually execute: +## Please confirm the EMQX-Enterprise version you are using and modify the base layer image tag +## ```bash +## $ docker build -f=Dockerfile.msodbc -t emqx-enterprise-with-msodbc:5.0.3-alpha.2 . +## ``` + +# FROM emqx/emqx-enterprise:latest +FROM emqx/emqx-enterprise:5.0.3-alpha.2 + +USER root + +RUN apt-get update \ + && apt-get install -y gnupg2 curl apt-utils \ + && curl https://packages.microsoft.com/keys/microsoft.asc | apt-key add - \ + && curl https://packages.microsoft.com/config/debian/11/prod.list > /etc/apt/sources.list.d/mssql-mkc crelease.list \ + && apt-get update \ + && ACCEPT_EULA=Y apt-get install -y msodbcsql17 unixodbc-dev \ + && sed -i 's/ODBC Driver 17 for SQL Server/ms-sql/g' /etc/odbcinst.ini \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +USER emqx diff --git a/deploy/docker/docker-entrypoint.sh b/deploy/docker/docker-entrypoint.sh index 1824e1ee0..056f0675f 100755 --- a/deploy/docker/docker-entrypoint.sh +++ b/deploy/docker/docker-entrypoint.sh @@ -1,9 +1,7 @@ #!/usr/bin/env bash -## EMQ docker image start script -# Huang Rui -# EMQX Team -## Shell setting +## EMQ docker image start script + if [[ -n "$DEBUG" ]]; then set -ex else diff --git a/deploy/packages/emqx.service b/deploy/packages/emqx.service index d826e358b..2dbe550bc 100644 --- a/deploy/packages/emqx.service +++ b/deploy/packages/emqx.service @@ -10,8 +10,8 @@ Group=emqx Type=simple Environment=HOME=/var/lib/emqx -# Enable logging to file -Environment=EMQX_LOG__TO=default +# log to file by default (if no log handler config) +Environment=EMQX_DEFAULT_LOG_HANDLER=file # Start 'foreground' but not 'start' (daemon) mode. # Because systemd monitor/restarts 'simple' services diff --git a/dev b/dev new file mode 100755 index 000000000..18e4cbc93 --- /dev/null +++ b/dev @@ -0,0 +1,405 @@ +#!/usr/bin/env bash + +set -euo pipefail + +UNAME="$(uname -s)" + +PROJ_ROOT="$(git rev-parse --show-toplevel)" +cd "$PROJ_ROOT" + +logerr() { + if [ "${TERM:-dumb}" = dumb ]; then + echo -e "ERROR: $*" 1>&2 + else + echo -e "$(tput setaf 1)ERROR: $*$(tput sgr0)" 1>&2 + fi +} + +usage() { +cat <.config and vm.