chore: merge master into dev/ee5.0
This commit is contained in:
commit
4135910b42
|
@ -0,0 +1,3 @@
|
|||
r7000i.log
|
||||
r7001i.log
|
||||
r7002i.log
|
|
@ -0,0 +1,91 @@
|
|||
name: 'Create MacOS package'
|
||||
inputs:
|
||||
profile: # emqx, emqx-enterprise
|
||||
required: true
|
||||
type: string
|
||||
otp: # 24.2.1-1, 23.3.4.9-3
|
||||
required: true
|
||||
type: string
|
||||
os:
|
||||
required: false
|
||||
type: string
|
||||
default: macos-11
|
||||
apple_id_password:
|
||||
required: true
|
||||
type: string
|
||||
apple_developer_identity:
|
||||
required: true
|
||||
type: string
|
||||
apple_developer_id_bundle:
|
||||
required: true
|
||||
type: string
|
||||
apple_developer_id_bundle_password:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: prepare
|
||||
shell: bash
|
||||
run: |
|
||||
brew update
|
||||
brew install curl zip unzip kerl coreutils openssl@1.1
|
||||
echo "/usr/local/opt/bison/bin" >> $GITHUB_PATH
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
- uses: actions/cache@v2
|
||||
id: cache
|
||||
with:
|
||||
path: ~/.kerl/${{ inputs.otp }}
|
||||
key: otp-install-${{ inputs.otp }}-${{ inputs.os }}-static-ssl-disable-hipe-disable-jit
|
||||
- name: build erlang
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
shell: bash
|
||||
env:
|
||||
KERL_BUILD_BACKEND: git
|
||||
OTP_GITHUB_URL: https://github.com/emqx/otp
|
||||
KERL_CONFIGURE_OPTIONS: --disable-dynamic-ssl-lib --with-ssl=/usr/local/opt/openssl@1.1 --disable-hipe --disable-jit
|
||||
run: |
|
||||
kerl update releases
|
||||
kerl build ${{ inputs.otp }}
|
||||
kerl install ${{ inputs.otp }} $HOME/.kerl/${{ inputs.otp }}
|
||||
- name: build ${{ inputs.profile }}
|
||||
env:
|
||||
AUTO_INSTALL_BUILD_DEPS: 1
|
||||
APPLE_SIGN_BINARIES: 1
|
||||
APPLE_ID: developers@emqx.io
|
||||
APPLE_TEAM_ID: 26N6HYJLZA
|
||||
APPLE_ID_PASSWORD: ${{ inputs.apple_id_password }}
|
||||
APPLE_DEVELOPER_IDENTITY: ${{ inputs.apple_developer_identity }}
|
||||
APPLE_DEVELOPER_ID_BUNDLE: ${{ inputs.apple_developer_id_bundle }}
|
||||
APPLE_DEVELOPER_ID_BUNDLE_PASSWORD: ${{ inputs.apple_developer_id_bundle_password }}
|
||||
shell: bash
|
||||
run: |
|
||||
. $HOME/.kerl/${{ inputs.otp }}/activate
|
||||
make ensure-rebar3
|
||||
sudo cp rebar3 /usr/local/bin/rebar3
|
||||
make ${{ inputs.profile }}-tgz
|
||||
- name: test ${{ inputs.profile }}
|
||||
shell: bash
|
||||
run: |
|
||||
pkg_name=$(find _packages/${{ inputs.profile }} -mindepth 1 -maxdepth 1 -iname \*.zip)
|
||||
mkdir emqx
|
||||
unzip -d emqx $pkg_name > /dev/null
|
||||
# gsed -i '/emqx_telemetry/d' ./emqx/data/loaded_plugins
|
||||
./emqx/bin/emqx start || cat emqx/log/erlang.log.1
|
||||
ready='no'
|
||||
for i in {1..30}; do
|
||||
if curl -fs 127.0.0.1:18083/status > /dev/null; then
|
||||
ready='yes'
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
if [ "$ready" != "yes" ]; then
|
||||
echo "Timed out waiting for emqx to be ready"
|
||||
cat emqx/log/erlang.log.1
|
||||
exit 1
|
||||
fi
|
||||
./emqx/bin/emqx_ctl status
|
||||
./emqx/bin/emqx stop
|
||||
rm -rf emqx
|
|
@ -150,68 +150,26 @@ jobs:
|
|||
name: source
|
||||
path: .
|
||||
- name: unzip source code
|
||||
run: unzip -q source.zip
|
||||
run: |
|
||||
ln -s . source
|
||||
unzip -q source.zip
|
||||
rm source source.zip
|
||||
- name: prepare
|
||||
run: |
|
||||
brew update
|
||||
brew install curl zip unzip kerl coreutils
|
||||
echo "/usr/local/opt/bison/bin" >> $GITHUB_PATH
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
git config --global credential.helper store
|
||||
- uses: actions/cache@v2
|
||||
id: cache
|
||||
- uses: ./.github/actions/package-macos
|
||||
with:
|
||||
path: ~/.kerl/${{ matrix.otp }}
|
||||
key: otp-install-${{ matrix.otp }}-${{ matrix.os }}
|
||||
- name: build erlang
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
timeout-minutes: 60
|
||||
env:
|
||||
KERL_BUILD_BACKEND: git
|
||||
OTP_GITHUB_URL: https://github.com/emqx/otp
|
||||
run: |
|
||||
kerl update releases
|
||||
kerl build ${{ matrix.otp }}
|
||||
kerl install ${{ matrix.otp }} $HOME/.kerl/${{ matrix.otp }}
|
||||
|
||||
- name: build
|
||||
working-directory: source
|
||||
env:
|
||||
AUTO_INSTALL_BUILD_DEPS: 1
|
||||
run: |
|
||||
. $HOME/.kerl/${{ matrix.otp }}/activate
|
||||
make ensure-rebar3
|
||||
sudo cp rebar3 /usr/local/bin/rebar3
|
||||
rm -rf _build/${{ matrix.profile }}/lib
|
||||
make ${{ matrix.profile }}-tgz
|
||||
- name: test
|
||||
working-directory: source
|
||||
run: |
|
||||
pkg_name=$(find _packages/${{ matrix.profile }} -mindepth 1 -maxdepth 1 -iname \*.tar.gz)
|
||||
mkdir -p emqx
|
||||
tar -C emqx -zxf $pkg_name
|
||||
# gsed -i '/emqx_telemetry/d' ./emqx/data/loaded_plugins
|
||||
./emqx/bin/emqx start || cat emqx/log/erlang.log.1
|
||||
ready='no'
|
||||
for i in {1..18}; do
|
||||
if curl -fs 127.0.0.1:18083/status > /dev/null; then
|
||||
ready='yes'
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
if [ "$ready" != "yes" ]; then
|
||||
echo "Timed out waiting for emqx to be ready"
|
||||
cat emqx/log/erlang.log.1
|
||||
exit 1
|
||||
fi
|
||||
./emqx/bin/emqx_ctl status
|
||||
./emqx/bin/emqx stop
|
||||
rm -rf emqx
|
||||
profile: ${{ matrix.profile }}
|
||||
otp: ${{ matrix.otp }}
|
||||
os: ${{ matrix.os }}
|
||||
apple_id_password: ${{ secrets.APPLE_ID_PASSWORD }}
|
||||
apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }}
|
||||
apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }}
|
||||
apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }}
|
||||
- uses: actions/upload-artifact@v1
|
||||
with:
|
||||
name: ${{ matrix.profile }}-${{ matrix.otp }}
|
||||
path: source/_packages/${{ matrix.profile }}/.
|
||||
path: _packages/${{ matrix.profile }}/.
|
||||
|
||||
linux:
|
||||
needs: prepare
|
||||
|
|
|
@ -133,67 +133,26 @@ jobs:
|
|||
- emqx-enterprise
|
||||
otp:
|
||||
- 24.2.1-1
|
||||
macos:
|
||||
os:
|
||||
- macos-11
|
||||
|
||||
runs-on: ${{ matrix.macos }}
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: prepare
|
||||
run: |
|
||||
brew update
|
||||
brew install curl zip unzip kerl coreutils
|
||||
echo "/usr/local/opt/bison/bin" >> $GITHUB_PATH
|
||||
echo "/usr/local/bin" >> $GITHUB_PATH
|
||||
echo "EMQX_NAME=${{ matrix.profile }}" >> $GITHUB_ENV
|
||||
echo "BUILD_WITH_QUIC=1" >> $GITHUB_ENV
|
||||
- uses: actions/cache@v2
|
||||
id: cache
|
||||
- uses: ./.github/actions/package-macos
|
||||
with:
|
||||
path: ~/.kerl/${{ matrix.otp }}
|
||||
key: otp-install-${{ matrix.otp }}-${{ matrix.macos }}
|
||||
- name: build erlang
|
||||
if: steps.cache.outputs.cache-hit != 'true'
|
||||
timeout-minutes: 60
|
||||
env:
|
||||
KERL_BUILD_BACKEND: git
|
||||
OTP_GITHUB_URL: https://github.com/emqx/otp
|
||||
run: |
|
||||
kerl update releases
|
||||
kerl build ${{ matrix.otp }}
|
||||
kerl install ${{ matrix.otp }} $HOME/.kerl/${{ matrix.otp }}
|
||||
- name: build ${{ matrix.profile }}
|
||||
env:
|
||||
AUTO_INSTALL_BUILD_DEPS: 1
|
||||
run: |
|
||||
. $HOME/.kerl/${{ matrix.otp }}/activate
|
||||
make ensure-rebar3
|
||||
sudo cp rebar3 /usr/local/bin/rebar3
|
||||
make ${{ matrix.profile }}-tgz
|
||||
- name: test
|
||||
run: |
|
||||
pkg_name=$(find _packages/${{ matrix.profile }} -mindepth 1 -maxdepth 1 -iname \*.tar.gz)
|
||||
mkdir -p emqx
|
||||
tar -C emqx -zxf $pkg_name
|
||||
# gsed -i '/emqx_telemetry/d' ./emqx/data/loaded_plugins
|
||||
./emqx/bin/emqx start || cat emqx/log/erlang.log.1
|
||||
ready='no'
|
||||
for i in {1..30}; do
|
||||
if curl -fs 127.0.0.1:18083/status > /dev/null; then
|
||||
ready='yes'
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
if [ "$ready" != "yes" ]; then
|
||||
echo "Timed out waiting for emqx to be ready"
|
||||
cat emqx/log/erlang.log.1
|
||||
exit 1
|
||||
fi
|
||||
./emqx/bin/emqx_ctl status
|
||||
./emqx/bin/emqx stop
|
||||
rm -rf emqx
|
||||
profile: ${{ matrix.profile }}
|
||||
otp: ${{ matrix.otp }}
|
||||
os: ${{ matrix.os }}
|
||||
apple_id_password: ${{ secrets.APPLE_ID_PASSWORD }}
|
||||
apple_developer_identity: ${{ secrets.APPLE_DEVELOPER_IDENTITY }}
|
||||
apple_developer_id_bundle: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE }}
|
||||
apple_developer_id_bundle_password: ${{ secrets.APPLE_DEVELOPER_ID_BUNDLE_PASSWORD }}
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: macos
|
||||
|
|
|
@ -58,7 +58,7 @@ jobs:
|
|||
-d "{\"repo\":\"emqx/emqx\", \"tag\": \"${{ github.ref_name }}\" }" \
|
||||
${{ secrets.EMQX_IO_RELEASE_API }}
|
||||
- uses: emqx/push-helm-action@v1
|
||||
if: github.event_name == 'release' && endsWith(github.repository, 'emqx') && matrix.profile == 'emqx'
|
||||
if: github.event_name == 'release' && startsWith(github.ref_name, 'v')
|
||||
with:
|
||||
charts_dir: "${{ github.workspace }}/deploy/charts/emqx"
|
||||
version: ${{ github.ref_name }}
|
||||
|
@ -67,9 +67,9 @@ jobs:
|
|||
aws_region: "us-west-2"
|
||||
aws_bucket_name: "repos-emqx-io"
|
||||
- uses: emqx/push-helm-action@v1
|
||||
if: github.event_name == 'release' && endsWith(github.repository, 'enterprise') && matrix.profile == 'emqx-ee'
|
||||
if: github.event_name == 'release' && startsWith(github.ref_name, 'e')
|
||||
with:
|
||||
charts_dir: "${{ github.workspace }}/deploy/charts/emqx-ee"
|
||||
charts_dir: "${{ github.workspace }}/deploy/charts/emqx-enterprise"
|
||||
version: ${{ github.ref_name }}
|
||||
aws_access_key_id: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
aws_secret_access_key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
|
|
|
@ -116,6 +116,7 @@ jobs:
|
|||
- dns
|
||||
profile:
|
||||
- emqx
|
||||
- emqx-enterprise
|
||||
os:
|
||||
- ["debian11", "debian:11-slim"]
|
||||
otp:
|
||||
|
@ -147,53 +148,49 @@ jobs:
|
|||
echo "TARGET=emqx/${{ matrix.profile }}" >> $GITHUB_ENV
|
||||
echo "EMQX_TAG=$(./pkg-vsn.sh ${{ matrix.profile }})" >> $GITHUB_ENV
|
||||
- run: minikube start
|
||||
- name: setup helm chart
|
||||
working-directory: source
|
||||
run: |
|
||||
minikube image load $TARGET:$EMQX_TAG
|
||||
sed -i -r "s/^appVersion: .*$/appVersion: \"$EMQX_TAG\"/g" deploy/charts/emqx/Chart.yaml
|
||||
- run: minikube image load $TARGET:$EMQX_TAG
|
||||
- name: run emqx on chart
|
||||
working-directory: source
|
||||
if: matrix.discovery == 'k8s'
|
||||
run: |
|
||||
helm install emqx \
|
||||
helm install ${{ matrix.profile }} \
|
||||
--set emqxConfig.EMQX_CLUSTER__DISCOVERY_STRATEGY="k8s" \
|
||||
--set emqxConfig.EMQX_CLUSTER__K8S__APISERVER="https://kubernetes.default.svc:443" \
|
||||
--set emqxConfig.EMQX_CLUSTER__K8S__SERVICE_NAME="emqx-headless" \
|
||||
--set emqxConfig.EMQX_CLUSTER__K8S__SERVICE_NAME="${{ matrix.profile }}-headless" \
|
||||
--set emqxConfig.EMQX_CLUSTER__K8S__NAMESPACE="default" \
|
||||
--set image.repository=$TARGET \
|
||||
--set image.pullPolicy=Never \
|
||||
--set image.tag=$EMQX_TAG \
|
||||
--set emqxAclConfig="" \
|
||||
--set image.pullPolicy=Never \
|
||||
--set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__RETRY_INTERVAL=2s \
|
||||
--set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__MAX_TOPIC_ALIAS=10 \
|
||||
--set emqxConfig.EMQX_AUTHORIZATION__SOURCES=[] \
|
||||
--set emqxConfig.EMQX_AUTHORIZATION__NO_MATCH=allow \
|
||||
deploy/charts/emqx \
|
||||
deploy/charts/${{ matrix.profile }} \
|
||||
--debug
|
||||
- name: run emqx on chart
|
||||
working-directory: source
|
||||
if: matrix.discovery == 'dns'
|
||||
run: |
|
||||
helm install emqx \
|
||||
helm install ${{ matrix.profile }} \
|
||||
--set emqxConfig.EMQX_CLUSTER__DISCOVERY_STRATEGY="dns" \
|
||||
--set emqxConfig.EMQX_CLUSTER__DNS__RECORD_TYPE="srv" \
|
||||
--set emqxConfig.EMQX_CLUSTER__DNS__NAME="emqx-headless.default.svc.cluster.local" \
|
||||
--set emqxConfig.EMQX_CLUSTER__DNS__NAME="${{ matrix.profile }}-headless.default.svc.cluster.local" \
|
||||
--set image.repository=$TARGET \
|
||||
--set image.pullPolicy=Never \
|
||||
--set image.tag=$EMQX_TAG \
|
||||
--set emqxAclConfig="" \
|
||||
--set image.pullPolicy=Never \
|
||||
--set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__RETRY_INTERVAL=2s \
|
||||
--set emqxConfig.EMQX_ZONES__DEFAULT__MQTT__MAX_TOPIC_ALIAS=10 \
|
||||
--set emqxConfig.EMQX_AUTHORIZATION__SOURCES=[] \
|
||||
--set emqxConfig.EMQX_AUTHORIZATION__NO_MATCH=allow \
|
||||
deploy/charts/emqx \
|
||||
deploy/charts/${{ matrix.profile }} \
|
||||
--debug
|
||||
- name: waiting emqx started
|
||||
timeout-minutes: 10
|
||||
run: |
|
||||
while [ "$(kubectl get StatefulSet -l app.kubernetes.io/name=emqx -o jsonpath='{.items[0].status.replicas}')" \
|
||||
!= "$(kubectl get StatefulSet -l app.kubernetes.io/name=emqx -o jsonpath='{.items[0].status.readyReplicas}')" ]; do
|
||||
while [ "$(kubectl get StatefulSet -l app.kubernetes.io/instance=${{ matrix.profile }} -o jsonpath='{.items[0].status.replicas}')" \
|
||||
!= "$(kubectl get StatefulSet -l app.kubernetes.io/instance=${{ matrix.profile }} -o jsonpath='{.items[0].status.readyReplicas}')" ]; do
|
||||
echo "==============================";
|
||||
kubectl get pods;
|
||||
echo "==============================";
|
||||
|
@ -203,28 +200,13 @@ jobs:
|
|||
- name: Check cluster
|
||||
timeout-minutes: 10
|
||||
run: |
|
||||
kubectl port-forward svc/emqx 18083:18083 &
|
||||
kubectl port-forward service/${{ matrix.profile }} 18083:18083 > /dev/null &
|
||||
while
|
||||
[ "$(curl --silent --basic -u admin:public -X GET http://127.0.0.1:18083/api/v5/cluster| jq '.nodes|length')" != "3" ];
|
||||
do
|
||||
echo "waiting emqx cluster scale"
|
||||
echo "waiting ${{ matrix.profile }} cluster scale"
|
||||
sleep 1
|
||||
done
|
||||
- name: get emqx-0 pods log
|
||||
if: failure()
|
||||
run: |
|
||||
kubectl describe pods emqx-0
|
||||
kubectl logs emqx-0
|
||||
- name: get emqx-1 pods log
|
||||
if: failure()
|
||||
run: |
|
||||
kubectl describe pods emqx-1
|
||||
kubectl logs emqx-1
|
||||
- name: get emqx-2 pods log
|
||||
if: failure()
|
||||
run: |
|
||||
kubectl describe pods emqx-2
|
||||
kubectl logs emqx-2
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
repository: emqx/paho.mqtt.testing
|
||||
|
@ -235,6 +217,7 @@ jobs:
|
|||
pip install pytest
|
||||
echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
- name: run paho test
|
||||
timeout-minutes: 10
|
||||
run: |
|
||||
port_connected () {
|
||||
local server="$1"
|
||||
|
@ -242,7 +225,7 @@ jobs:
|
|||
echo > /dev/tcp/${server}/${port} 2>/dev/null
|
||||
}
|
||||
|
||||
kubectl port-forward service/emqx 1883:1883 > /dev/null &
|
||||
kubectl port-forward service/${{ matrix.profile }} 1883:1883 > /dev/null &
|
||||
|
||||
while ! port_connected localhost 1883; do
|
||||
echo server not listening yet...
|
||||
|
@ -250,3 +233,5 @@ jobs:
|
|||
done
|
||||
|
||||
pytest -v paho.mqtt.testing/interoperability/test_client/V5/test_connect.py -k test_basic --host "127.0.0.1"
|
||||
- if: failure()
|
||||
run: kubectl logs -l "app.kubernetes.io/instance=${{ matrix.profile }}" -c emqx --tail=1000
|
||||
|
|
|
@ -68,4 +68,3 @@ apps/emqx/test/emqx_static_checks_data/master.bpapi
|
|||
# rendered configurations
|
||||
*.conf.rendered
|
||||
lux_logs/
|
||||
.ci/docker-compose-file/redis/*.log
|
||||
|
|
|
@ -1,3 +1,16 @@
|
|||
# 5.0.9
|
||||
|
||||
## Enhancements
|
||||
|
||||
* Add `cert_common_name` and `cert_subject` placeholder support for authz_http and authz_mongo.[#8973](https://github.com/emqx/emqx/pull/8973)
|
||||
|
||||
## Bug fixes
|
||||
|
||||
* Check ACLs for last will testament topic before publishing the message. [#8930](https://github.com/emqx/emqx/pull/8930)
|
||||
* Fix GET /listeners API crash When some nodes still in initial configuration. [#9002](https://github.com/emqx/emqx/pull/9002)
|
||||
* Fix empty variable interpolation in authentication and authorization. Placeholders for undefined variables are rendered now as empty strings and do not cause errors anymore. [#8963](https://github.com/emqx/emqx/pull/8963)
|
||||
* Fix the latency statistics error of the slow subscription module when `stats_type` is `internal` or `response`. [#8986](https://github.com/emqx/emqx/pull/8986)
|
||||
|
||||
# 5.0.8
|
||||
|
||||
## Bug fixes
|
||||
|
@ -10,14 +23,21 @@
|
|||
* Speed up updating the configuration, When some nodes in the cluster are down. [#8857](https://github.com/emqx/emqx/pull/8857)
|
||||
* Fix delayed publish inaccurate caused by os time change. [#8926](https://github.com/emqx/emqx/pull/8926)
|
||||
* Fix that EMQX can't start when the retainer is disabled [#8911](https://github.com/emqx/emqx/pull/8911)
|
||||
* Fix that redis authn will deny the unknown users [#8934](https://github.com/emqx/emqx/pull/8934)
|
||||
* Fix ExProto UDP client keepalive checking error.
|
||||
This causes the clients to not expire as long as a new UDP packet arrives [#8866](https://github.com/emqx/emqx/pull/8866)
|
||||
* Fix that MQTT Bridge message payload could be empty string. [#8949](https://github.com/emqx/emqx/pull/8949)
|
||||
|
||||
## Enhancements
|
||||
|
||||
* Print a warning message when boot with the default (insecure) Erlang cookie. [#8905](https://github.com/emqx/emqx/pull/8905)
|
||||
* Change the `/gateway` API path to plural form. [#8823](https://github.com/emqx/emqx/pull/8823)
|
||||
* Don't allow updating config items when they already exist in `local-override.conf`. [#8851](https://github.com/emqx/emqx/pull/8851)
|
||||
* Remove `node.etc_dir` from emqx.conf, because it is never used.
|
||||
Also allow user to customize the logging directory [#8892](https://github.com/emqx/emqx/pull/8892)
|
||||
* Added a new API `POST /listeners` for creating listener. [#8876](https://github.com/emqx/emqx/pull/8876)
|
||||
* Close ExProto client process immediately if it's keepalive timeouted. [#8866](https://github.com/emqx/emqx/pull/8866)
|
||||
* Upgrade grpc-erl driver to 0.6.7 to support batch operation in sending stream. [#8866](https://github.com/emqx/emqx/pull/8866)
|
||||
|
||||
# 5.0.7
|
||||
|
||||
|
|
2
Makefile
2
Makefile
|
@ -6,7 +6,7 @@ export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-d
|
|||
export EMQX_DEFAULT_RUNNER = debian:11-slim
|
||||
export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh)
|
||||
export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh)
|
||||
export EMQX_DASHBOARD_VERSION ?= v1.0.8
|
||||
export EMQX_DASHBOARD_VERSION ?= v1.0.9
|
||||
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.1-beta.4
|
||||
export EMQX_REL_FORM ?= tgz
|
||||
export QUICER_DOWNLOAD_FROM_RELEASE = 1
|
||||
|
|
21
README-CN.md
21
README-CN.md
|
@ -112,6 +112,27 @@ make
|
|||
_build/emqx/rel/emqx/bin/emqx console
|
||||
```
|
||||
|
||||
### 在 Apple 芯片(M1,M2)上编译
|
||||
|
||||
基于 Apple 芯片的 Homebrew 将[默认的 home 目录](https://github.com/Homebrew/brew/issues/9177)从 `/usr/local` 改成了 `/opt/homebrew`,这个改变导致了一些兼容性问题。
|
||||
|
||||
具体到 EMQX 来说,主要影响的是 `unixodbc`,如果使用 Homebrew 安装的 `unixodbc` 包,那么在使用 [kerl](https://github.com/kerl/kerl) 编译 Erlang/OTP 的时候,kerl 会找不到 `unixodbc`。
|
||||
|
||||
解决此问题的方法如下:
|
||||
|
||||
```bash
|
||||
brew install unixodbc kerl
|
||||
sudo ln -s $(realpath $(brew --prefix unixodbc)) /usr/local/odbc
|
||||
export CC="/usr/bin/gcc -I$(brew --prefix unixodbc)/include"
|
||||
export LDFLAGS="-L$(brew --prefix unixodbc)/lib"
|
||||
kerl build 24.3
|
||||
mkdir ~/.kerl/installations
|
||||
kerl install 24.3 ~/.kerl/installations/24.3
|
||||
. ~/.kerl/installations/24.3/activate
|
||||
```
|
||||
|
||||
然后再使用 `make` 继续编译就可以了。
|
||||
|
||||
## 开源许可
|
||||
|
||||
详见 [LICENSE](./LICENSE)。
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
%% `apps/emqx/src/bpapi/README.md'
|
||||
|
||||
%% Community edition
|
||||
-define(EMQX_RELEASE_CE, "5.0.7").
|
||||
-define(EMQX_RELEASE_CE, "5.0.8").
|
||||
|
||||
%% Enterprise edition
|
||||
-define(EMQX_RELEASE_EE, "5.0.0-beta.3").
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
{jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}},
|
||||
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}},
|
||||
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.4"}}},
|
||||
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.4"}}},
|
||||
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.5"}}},
|
||||
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
|
||||
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.30.0"}}},
|
||||
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
{id, "emqx"},
|
||||
{description, "EMQX Core"},
|
||||
% strict semver, bump manually!
|
||||
{vsn, "5.0.8"},
|
||||
{vsn, "5.0.9"},
|
||||
{modules, []},
|
||||
{registered, []},
|
||||
{applications, [
|
||||
|
|
|
@ -354,12 +354,14 @@ handle_in(?CONNECT_PACKET(ConnPkt) = Packet, Channel) ->
|
|||
{ok, NConnPkt, NChannel = #channel{clientinfo = ClientInfo}} ->
|
||||
?TRACE("MQTT", "mqtt_packet_received", #{packet => Packet}),
|
||||
NChannel1 = NChannel#channel{
|
||||
will_msg = emqx_packet:will_msg(NConnPkt),
|
||||
alias_maximum = init_alias_maximum(NConnPkt, ClientInfo)
|
||||
},
|
||||
case authenticate(?CONNECT_PACKET(NConnPkt), NChannel1) of
|
||||
{ok, Properties, NChannel2} ->
|
||||
process_connect(Properties, NChannel2);
|
||||
%% only store will_msg after successful authn
|
||||
%% fix for: https://github.com/emqx/emqx/issues/8886
|
||||
NChannel3 = NChannel2#channel{will_msg = emqx_packet:will_msg(NConnPkt)},
|
||||
process_connect(Properties, NChannel3);
|
||||
{continue, Properties, NChannel2} ->
|
||||
handle_out(auth, {?RC_CONTINUE_AUTHENTICATION, Properties}, NChannel2);
|
||||
{error, ReasonCode} ->
|
||||
|
@ -1165,10 +1167,11 @@ handle_call(
|
|||
Channel = #channel{
|
||||
conn_state = ConnState,
|
||||
will_msg = WillMsg,
|
||||
clientinfo = ClientInfo,
|
||||
conninfo = #{proto_ver := ProtoVer}
|
||||
}
|
||||
) ->
|
||||
(WillMsg =/= undefined) andalso publish_will_msg(WillMsg),
|
||||
(WillMsg =/= undefined) andalso publish_will_msg(ClientInfo, WillMsg),
|
||||
Channel1 =
|
||||
case ConnState of
|
||||
connected -> ensure_disconnected(kicked, Channel);
|
||||
|
@ -1359,8 +1362,10 @@ handle_timeout(
|
|||
end;
|
||||
handle_timeout(_TRef, expire_session, Channel) ->
|
||||
shutdown(expired, Channel);
|
||||
handle_timeout(_TRef, will_message, Channel = #channel{will_msg = WillMsg}) ->
|
||||
(WillMsg =/= undefined) andalso publish_will_msg(WillMsg),
|
||||
handle_timeout(
|
||||
_TRef, will_message, Channel = #channel{clientinfo = ClientInfo, will_msg = WillMsg}
|
||||
) ->
|
||||
(WillMsg =/= undefined) andalso publish_will_msg(ClientInfo, WillMsg),
|
||||
{ok, clean_timer(will_timer, Channel#channel{will_msg = undefined})};
|
||||
handle_timeout(
|
||||
_TRef,
|
||||
|
@ -1434,12 +1439,14 @@ terminate({shutdown, kicked}, Channel) ->
|
|||
run_terminate_hook(kicked, Channel);
|
||||
terminate({shutdown, Reason}, Channel) when
|
||||
Reason =:= discarded;
|
||||
Reason =:= takenover;
|
||||
Reason =:= not_authorized
|
||||
Reason =:= takenover
|
||||
->
|
||||
run_terminate_hook(Reason, Channel);
|
||||
terminate(Reason, Channel = #channel{will_msg = WillMsg}) ->
|
||||
(WillMsg =/= undefined) andalso publish_will_msg(WillMsg),
|
||||
terminate(Reason, Channel = #channel{clientinfo = ClientInfo, will_msg = WillMsg}) ->
|
||||
%% since will_msg is set to undefined as soon as it is published,
|
||||
%% if will_msg still exists when the session is terminated, it
|
||||
%% must be published immediately.
|
||||
WillMsg =/= undefined andalso publish_will_msg(ClientInfo, WillMsg),
|
||||
(Reason =:= expired) andalso persist_if_session(Channel),
|
||||
run_terminate_hook(Reason, Channel).
|
||||
|
||||
|
@ -2098,10 +2105,10 @@ ensure_disconnected(
|
|||
|
||||
maybe_publish_will_msg(Channel = #channel{will_msg = undefined}) ->
|
||||
Channel;
|
||||
maybe_publish_will_msg(Channel = #channel{will_msg = WillMsg}) ->
|
||||
maybe_publish_will_msg(Channel = #channel{clientinfo = ClientInfo, will_msg = WillMsg}) ->
|
||||
case will_delay_interval(WillMsg) of
|
||||
0 ->
|
||||
ok = publish_will_msg(WillMsg),
|
||||
ok = publish_will_msg(ClientInfo, WillMsg),
|
||||
Channel#channel{will_msg = undefined};
|
||||
I ->
|
||||
ensure_timer(will_timer, timer:seconds(I), Channel)
|
||||
|
@ -2114,9 +2121,23 @@ will_delay_interval(WillMsg) ->
|
|||
0
|
||||
).
|
||||
|
||||
publish_will_msg(Msg) ->
|
||||
_ = emqx_broker:publish(Msg),
|
||||
ok.
|
||||
publish_will_msg(ClientInfo, Msg = #message{topic = Topic}) ->
|
||||
case emqx_access_control:authorize(ClientInfo, publish, Topic) of
|
||||
allow ->
|
||||
_ = emqx_broker:publish(Msg),
|
||||
ok;
|
||||
deny ->
|
||||
?tp(
|
||||
warning,
|
||||
last_will_testament_publish_denied,
|
||||
#{
|
||||
client_info => ClientInfo,
|
||||
topic => Topic,
|
||||
message => Msg
|
||||
}
|
||||
),
|
||||
ok
|
||||
end.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Disconnect Reason
|
||||
|
|
|
@ -476,7 +476,7 @@ read_override_conf(#{} = Opts) ->
|
|||
|
||||
override_conf_file(Opts) when is_map(Opts) ->
|
||||
Key =
|
||||
case maps:get(override_to, Opts, local) of
|
||||
case maps:get(override_to, Opts, cluster) of
|
||||
local -> local_override_conf_file;
|
||||
cluster -> cluster_override_conf_file
|
||||
end,
|
||||
|
|
|
@ -43,6 +43,7 @@
|
|||
terminate/2,
|
||||
code_change/3
|
||||
]).
|
||||
-export([is_mutable/3]).
|
||||
|
||||
-define(MOD, {mod}).
|
||||
-define(WKEY, '?').
|
||||
|
@ -229,15 +230,26 @@ process_update_request([_], _Handlers, {remove, _Opts}) ->
|
|||
process_update_request(ConfKeyPath, _Handlers, {remove, Opts}) ->
|
||||
OldRawConf = emqx_config:get_root_raw(ConfKeyPath),
|
||||
BinKeyPath = bin_path(ConfKeyPath),
|
||||
NewRawConf = emqx_map_lib:deep_remove(BinKeyPath, OldRawConf),
|
||||
OverrideConf = remove_from_override_config(BinKeyPath, Opts),
|
||||
{ok, NewRawConf, OverrideConf, Opts};
|
||||
case check_permissions(remove, BinKeyPath, OldRawConf, Opts) of
|
||||
allow ->
|
||||
NewRawConf = emqx_map_lib:deep_remove(BinKeyPath, OldRawConf),
|
||||
OverrideConf = remove_from_override_config(BinKeyPath, Opts),
|
||||
{ok, NewRawConf, OverrideConf, Opts};
|
||||
{deny, Reason} ->
|
||||
{error, {permission_denied, Reason}}
|
||||
end;
|
||||
process_update_request(ConfKeyPath, Handlers, {{update, UpdateReq}, Opts}) ->
|
||||
OldRawConf = emqx_config:get_root_raw(ConfKeyPath),
|
||||
case do_update_config(ConfKeyPath, Handlers, OldRawConf, UpdateReq) of
|
||||
{ok, NewRawConf} ->
|
||||
OverrideConf = update_override_config(NewRawConf, Opts),
|
||||
{ok, NewRawConf, OverrideConf, Opts};
|
||||
BinKeyPath = bin_path(ConfKeyPath),
|
||||
case check_permissions(update, BinKeyPath, NewRawConf, Opts) of
|
||||
allow ->
|
||||
OverrideConf = update_override_config(NewRawConf, Opts),
|
||||
{ok, NewRawConf, OverrideConf, Opts};
|
||||
{deny, Reason} ->
|
||||
{error, {permission_denied, Reason}}
|
||||
end;
|
||||
Error ->
|
||||
Error
|
||||
end.
|
||||
|
@ -272,12 +284,11 @@ check_and_save_configs(
|
|||
UpdateArgs,
|
||||
Opts
|
||||
) ->
|
||||
OldConf = emqx_config:get_root(ConfKeyPath),
|
||||
Schema = schema(SchemaModule, ConfKeyPath),
|
||||
{AppEnvs, NewConf} = emqx_config:check_config(Schema, NewRawConf),
|
||||
OldConf = emqx_config:get_root(ConfKeyPath),
|
||||
case do_post_config_update(ConfKeyPath, Handlers, OldConf, NewConf, AppEnvs, UpdateArgs, #{}) of
|
||||
{ok, Result0} ->
|
||||
remove_from_local_if_cluster_change(ConfKeyPath, Opts),
|
||||
ok = emqx_config:save_configs(AppEnvs, NewConf, NewRawConf, OverrideConf, Opts),
|
||||
Result1 = return_change_result(ConfKeyPath, UpdateArgs),
|
||||
{ok, Result1#{post_config_update => Result0}};
|
||||
|
@ -430,16 +441,6 @@ merge_to_old_config(UpdateReq, RawConf) when is_map(UpdateReq), is_map(RawConf)
|
|||
merge_to_old_config(UpdateReq, _RawConf) ->
|
||||
{ok, UpdateReq}.
|
||||
|
||||
%% local-override.conf priority is higher than cluster-override.conf
|
||||
%% If we want cluster to take effect, we must remove the local.
|
||||
remove_from_local_if_cluster_change(BinKeyPath, #{override_to := cluster} = Opts) ->
|
||||
Opts1 = Opts#{override_to => local},
|
||||
Local = remove_from_override_config(BinKeyPath, Opts1),
|
||||
_ = emqx_config:save_to_override_conf(Local, Opts1),
|
||||
ok;
|
||||
remove_from_local_if_cluster_change(_BinKeyPath, _Opts) ->
|
||||
ok.
|
||||
|
||||
remove_from_override_config(_BinKeyPath, #{persistent := false}) ->
|
||||
undefined;
|
||||
remove_from_override_config(BinKeyPath, Opts) ->
|
||||
|
@ -544,3 +545,98 @@ load_prev_handlers() ->
|
|||
|
||||
save_handlers(Handlers) ->
|
||||
application:set_env(emqx, ?MODULE, Handlers).
|
||||
|
||||
check_permissions(_Action, _ConfKeyPath, _NewRawConf, #{override_to := local}) ->
|
||||
allow;
|
||||
check_permissions(Action, ConfKeyPath, NewRawConf, _Opts) ->
|
||||
case emqx_map_lib:deep_find(ConfKeyPath, NewRawConf) of
|
||||
{ok, NewRaw} ->
|
||||
LocalOverride = emqx_config:read_override_conf(#{override_to => local}),
|
||||
case emqx_map_lib:deep_find(ConfKeyPath, LocalOverride) of
|
||||
{ok, LocalRaw} ->
|
||||
case is_mutable(Action, NewRaw, LocalRaw) of
|
||||
ok ->
|
||||
allow;
|
||||
{error, Error} ->
|
||||
?SLOG(error, #{
|
||||
msg => "prevent_remove_local_override_conf",
|
||||
config_key_path => ConfKeyPath,
|
||||
error => Error
|
||||
}),
|
||||
{deny, "Disable changed from local-override.conf"}
|
||||
end;
|
||||
{not_found, _, _} ->
|
||||
allow
|
||||
end;
|
||||
{not_found, _, _} ->
|
||||
allow
|
||||
end.
|
||||
|
||||
is_mutable(Action, NewRaw, LocalRaw) ->
|
||||
try
|
||||
KeyPath = [],
|
||||
is_mutable(KeyPath, Action, NewRaw, LocalRaw)
|
||||
catch
|
||||
throw:Error -> Error
|
||||
end.
|
||||
|
||||
-define(REMOVE_FAILED, "remove_failed").
|
||||
-define(UPDATE_FAILED, "update_failed").
|
||||
|
||||
is_mutable(KeyPath, Action, New = #{}, Local = #{}) ->
|
||||
maps:foreach(
|
||||
fun(Key, SubLocal) ->
|
||||
case maps:find(Key, New) of
|
||||
error -> ok;
|
||||
{ok, SubNew} -> is_mutable(KeyPath ++ [Key], Action, SubNew, SubLocal)
|
||||
end
|
||||
end,
|
||||
Local
|
||||
);
|
||||
is_mutable(KeyPath, remove, Update, Origin) ->
|
||||
throw({error, {?REMOVE_FAILED, KeyPath, Update, Origin}});
|
||||
is_mutable(_KeyPath, update, Val, Val) ->
|
||||
ok;
|
||||
is_mutable(KeyPath, update, Update, Origin) ->
|
||||
throw({error, {?UPDATE_FAILED, KeyPath, Update, Origin}}).
|
||||
|
||||
-ifdef(TEST).
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
is_mutable_update_test() ->
|
||||
Action = update,
|
||||
?assertEqual(ok, is_mutable(Action, #{}, #{})),
|
||||
?assertEqual(ok, is_mutable(Action, #{a => #{b => #{c => #{}}}}, #{a => #{b => #{c => #{}}}})),
|
||||
?assertEqual(ok, is_mutable(Action, #{a => #{b => #{c => 1}}}, #{a => #{b => #{c => 1}}})),
|
||||
?assertEqual(
|
||||
{error, {?UPDATE_FAILED, [a, b, c], 1, 2}},
|
||||
is_mutable(Action, #{a => #{b => #{c => 1}}}, #{a => #{b => #{c => 2}}})
|
||||
),
|
||||
?assertEqual(
|
||||
{error, {?UPDATE_FAILED, [a, b, d], 2, 3}},
|
||||
is_mutable(Action, #{a => #{b => #{c => 1, d => 2}}}, #{a => #{b => #{c => 1, d => 3}}})
|
||||
),
|
||||
ok.
|
||||
|
||||
is_mutable_remove_test() ->
|
||||
Action = remove,
|
||||
?assertEqual(ok, is_mutable(Action, #{}, #{})),
|
||||
?assertEqual(ok, is_mutable(Action, #{a => #{b => #{c => #{}}}}, #{a1 => #{b => #{c => #{}}}})),
|
||||
?assertEqual(ok, is_mutable(Action, #{a => #{b => #{c => 1}}}, #{a => #{b1 => #{c => 1}}})),
|
||||
?assertEqual(ok, is_mutable(Action, #{a => #{b => #{c => 1}}}, #{a => #{b => #{c1 => 1}}})),
|
||||
|
||||
?assertEqual(
|
||||
{error, {?REMOVE_FAILED, [a, b, c], 1, 1}},
|
||||
is_mutable(Action, #{a => #{b => #{c => 1}}}, #{a => #{b => #{c => 1}}})
|
||||
),
|
||||
?assertEqual(
|
||||
{error, {?REMOVE_FAILED, [a, b, c], 1, 2}},
|
||||
is_mutable(Action, #{a => #{b => #{c => 1}}}, #{a => #{b => #{c => 2}}})
|
||||
),
|
||||
?assertEqual(
|
||||
{error, {?REMOVE_FAILED, [a, b, c], 1, 1}},
|
||||
is_mutable(Action, #{a => #{b => #{c => 1, d => 2}}}, #{a => #{b => #{c => 1, d => 3}}})
|
||||
),
|
||||
ok.
|
||||
|
||||
-endif.
|
||||
|
|
|
@ -87,12 +87,18 @@ format_list(Listener) ->
|
|||
].
|
||||
|
||||
do_list_raw() ->
|
||||
Key = <<"listeners">>,
|
||||
Raw = emqx_config:get_raw([Key], #{}),
|
||||
SchemaMod = emqx_config:get_schema_mod(Key),
|
||||
#{Key := RawWithDefault} = emqx_config:fill_defaults(SchemaMod, #{Key => Raw}, #{}),
|
||||
Listeners = maps:to_list(RawWithDefault),
|
||||
lists:flatmap(fun format_raw_listeners/1, Listeners).
|
||||
%% GET /listeners from other nodes returns [] when init config is not loaded.
|
||||
case emqx_app:get_init_config_load_done() of
|
||||
true ->
|
||||
Key = <<"listeners">>,
|
||||
Raw = emqx_config:get_raw([Key], #{}),
|
||||
SchemaMod = emqx_config:get_schema_mod(Key),
|
||||
#{Key := RawWithDefault} = emqx_config:fill_defaults(SchemaMod, #{Key => Raw}, #{}),
|
||||
Listeners = maps:to_list(RawWithDefault),
|
||||
lists:flatmap(fun format_raw_listeners/1, Listeners);
|
||||
false ->
|
||||
[]
|
||||
end.
|
||||
|
||||
format_raw_listeners({Type0, Conf}) ->
|
||||
Type = binary_to_atom(Type0),
|
||||
|
|
|
@ -892,7 +892,7 @@ on_delivery_completed(
|
|||
).
|
||||
|
||||
mark_begin_deliver(Msg) ->
|
||||
emqx_message:set_header(deliver_begin_at, erlang:system_time(second), Msg).
|
||||
emqx_message:set_header(deliver_begin_at, erlang:system_time(millisecond), Msg).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Helper functions
|
||||
|
|
|
@ -641,9 +641,9 @@ setup_node(Node, Opts) when is_map(Opts) ->
|
|||
%% Here we start the apps
|
||||
EnvHandlerForRpc =
|
||||
fun(App) ->
|
||||
%% We load configuration, and than set the special enviroment variable
|
||||
%% We load configuration, and than set the special environment variable
|
||||
%% which says that emqx shouldn't load configuration at startup
|
||||
%% Otherwise, configuration get's loaded and all preset env in envhandler is lost
|
||||
%% Otherwise, configuration gets loaded and all preset env in EnvHandler is lost
|
||||
LoadSchema andalso
|
||||
begin
|
||||
emqx_config:init_load(SchemaMod),
|
||||
|
|
|
@ -21,6 +21,8 @@
|
|||
|
||||
-define(MOD, {mod}).
|
||||
-define(WKEY, '?').
|
||||
-define(LOCAL_CONF, "/tmp/local-override.conf").
|
||||
-define(CLUSTER_CONF, "/tmp/cluster-override.conf").
|
||||
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
-include_lib("common_test/include/ct.hrl").
|
||||
|
@ -36,6 +38,8 @@ end_per_suite(_Config) ->
|
|||
emqx_common_test_helpers:stop_apps([]).
|
||||
|
||||
init_per_testcase(_Case, Config) ->
|
||||
_ = file:delete(?LOCAL_CONF),
|
||||
_ = file:delete(?CLUSTER_CONF),
|
||||
Config.
|
||||
|
||||
end_per_testcase(_Case, _Config) ->
|
||||
|
@ -196,6 +200,62 @@ t_sub_key_update_remove(_Config) ->
|
|||
ok = emqx_config_handler:remove_handler(KeyPath2),
|
||||
ok.
|
||||
|
||||
t_local_override_update_remove(_Config) ->
|
||||
application:set_env(emqx, local_override_conf_file, ?LOCAL_CONF),
|
||||
application:set_env(emqx, cluster_override_conf_file, ?CLUSTER_CONF),
|
||||
KeyPath = [sysmon, os, cpu_high_watermark],
|
||||
ok = emqx_config_handler:add_handler(KeyPath, ?MODULE),
|
||||
LocalOpts = #{override_to => local},
|
||||
{ok, Res} = emqx:update_config(KeyPath, <<"70%">>, LocalOpts),
|
||||
?assertMatch(
|
||||
#{
|
||||
config := 0.7,
|
||||
post_config_update := #{},
|
||||
raw_config := <<"70%">>
|
||||
},
|
||||
Res
|
||||
),
|
||||
ClusterOpts = #{override_to => cluster},
|
||||
?assertMatch(
|
||||
{error, {permission_denied, _}}, emqx:update_config(KeyPath, <<"71%">>, ClusterOpts)
|
||||
),
|
||||
?assertMatch(0.7, emqx:get_config(KeyPath)),
|
||||
|
||||
KeyPath2 = [sysmon, os, cpu_low_watermark],
|
||||
ok = emqx_config_handler:add_handler(KeyPath2, ?MODULE),
|
||||
?assertMatch(
|
||||
{error, {permission_denied, _}}, emqx:update_config(KeyPath2, <<"40%">>, ClusterOpts)
|
||||
),
|
||||
|
||||
%% remove
|
||||
?assertMatch({error, {permission_denied, _}}, emqx:remove_config(KeyPath)),
|
||||
?assertEqual(
|
||||
{ok, #{post_config_update => #{}}},
|
||||
emqx:remove_config(KeyPath, #{override_to => local})
|
||||
),
|
||||
?assertEqual(
|
||||
{ok, #{post_config_update => #{}}},
|
||||
emqx:remove_config(KeyPath)
|
||||
),
|
||||
?assertError({config_not_found, KeyPath}, emqx:get_raw_config(KeyPath)),
|
||||
OSKey = maps:keys(emqx:get_raw_config([sysmon, os])),
|
||||
?assertEqual(false, lists:member(<<"cpu_high_watermark">>, OSKey)),
|
||||
?assert(length(OSKey) > 0),
|
||||
|
||||
?assertEqual(
|
||||
{ok, #{config => 0.8, post_config_update => #{}, raw_config => <<"80%">>}},
|
||||
emqx:reset_config(KeyPath, ClusterOpts)
|
||||
),
|
||||
OSKey1 = maps:keys(emqx:get_raw_config([sysmon, os])),
|
||||
?assertEqual(true, lists:member(<<"cpu_high_watermark">>, OSKey1)),
|
||||
?assert(length(OSKey1) > 1),
|
||||
|
||||
ok = emqx_config_handler:remove_handler(KeyPath),
|
||||
ok = emqx_config_handler:remove_handler(KeyPath2),
|
||||
application:unset_env(emqx, local_override_conf_file),
|
||||
application:unset_env(emqx, cluster_override_conf_file),
|
||||
ok.
|
||||
|
||||
t_check_failed(_Config) ->
|
||||
KeyPath = [sysmon, os, cpu_check_interval],
|
||||
Opts = #{rawconf_with_defaults => true},
|
||||
|
@ -219,7 +279,7 @@ t_stop(_Config) ->
|
|||
ok.
|
||||
|
||||
t_callback_crash(_Config) ->
|
||||
CrashPath = [sysmon, os, cpu_high_watermark],
|
||||
CrashPath = [sysmon, os, procmem_high_watermark],
|
||||
Opts = #{rawconf_with_defaults => true},
|
||||
ok = emqx_config_handler:add_handler(CrashPath, ?MODULE),
|
||||
Old = emqx:get_raw_config(CrashPath),
|
||||
|
@ -334,6 +394,8 @@ pre_config_update([sysmon, os, cpu_check_interval], UpdateReq, _RawConf) ->
|
|||
{ok, UpdateReq};
|
||||
pre_config_update([sysmon, os, cpu_low_watermark], UpdateReq, _RawConf) ->
|
||||
{ok, UpdateReq};
|
||||
pre_config_update([sysmon, os, cpu_high_watermark], UpdateReq, _RawConf) ->
|
||||
{ok, UpdateReq};
|
||||
pre_config_update([sysmon, os, sysmem_high_watermark], UpdateReq, _RawConf) ->
|
||||
{ok, UpdateReq};
|
||||
pre_config_update([sysmon, os, mem_check_interval], _UpdateReq, _RawConf) ->
|
||||
|
@ -347,6 +409,8 @@ post_config_update([sysmon, os, cpu_check_interval], _UpdateReq, _NewConf, _OldC
|
|||
{ok, ok};
|
||||
post_config_update([sysmon, os, cpu_low_watermark], _UpdateReq, _NewConf, _OldConf, _AppEnvs) ->
|
||||
ok;
|
||||
post_config_update([sysmon, os, cpu_high_watermark], _UpdateReq, _NewConf, _OldConf, _AppEnvs) ->
|
||||
ok;
|
||||
post_config_update([sysmon, os, sysmem_high_watermark], _UpdateReq, _NewConf, _OldConf, _AppEnvs) ->
|
||||
{error, post_config_update_error}.
|
||||
|
||||
|
|
|
@ -38,8 +38,4 @@
|
|||
|
||||
-define(RESOURCE_GROUP, <<"emqx_authn">>).
|
||||
|
||||
-define(WITH_SUCCESSFUL_RENDER(Code),
|
||||
emqx_authn_utils:with_successful_render(?MODULE, fun() -> Code end)
|
||||
).
|
||||
|
||||
-endif.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
%% -*- mode: erlang -*-
|
||||
{application, emqx_authn, [
|
||||
{description, "EMQX Authentication"},
|
||||
{vsn, "0.1.6"},
|
||||
{vsn, "0.1.7"},
|
||||
{modules, []},
|
||||
{registered, [emqx_authn_sup, emqx_authn_registry]},
|
||||
{applications, [kernel, stdlib, emqx_resource, emqx_connector, ehttpc, epgsql, mysql, jose]},
|
||||
|
|
|
@ -34,8 +34,7 @@
|
|||
ensure_apps_started/1,
|
||||
cleanup_resources/0,
|
||||
make_resource_id/1,
|
||||
without_password/1,
|
||||
with_successful_render/2
|
||||
without_password/1
|
||||
]).
|
||||
|
||||
-define(AUTHN_PLACEHOLDERS, [
|
||||
|
@ -111,7 +110,8 @@ parse_sql(Template, ReplaceWith) ->
|
|||
Template,
|
||||
#{
|
||||
replace_with => ReplaceWith,
|
||||
placeholders => ?AUTHN_PLACEHOLDERS
|
||||
placeholders => ?AUTHN_PLACEHOLDERS,
|
||||
strip_double_quote => true
|
||||
}
|
||||
).
|
||||
|
||||
|
@ -136,18 +136,6 @@ render_sql_params(ParamList, Credential) ->
|
|||
#{return => rawlist, var_trans => fun handle_sql_var/2}
|
||||
).
|
||||
|
||||
with_successful_render(Provider, Fun) when is_function(Fun, 0) ->
|
||||
try
|
||||
Fun()
|
||||
catch
|
||||
error:{cannot_get_variable, Name} ->
|
||||
?TRACE_AUTHN(error, "placeholder_interpolation_failed", #{
|
||||
provider => Provider,
|
||||
placeholder => Name
|
||||
}),
|
||||
ignore
|
||||
end.
|
||||
|
||||
%% true
|
||||
is_superuser(#{<<"is_superuser">> := <<"true">>}) ->
|
||||
#{is_superuser => true};
|
||||
|
@ -229,15 +217,15 @@ without_password(Credential, [Name | Rest]) ->
|
|||
without_password(Credential, Rest)
|
||||
end.
|
||||
|
||||
handle_var({var, Name}, undefined) ->
|
||||
error({cannot_get_variable, Name});
|
||||
handle_var({var, _Name}, undefined) ->
|
||||
<<>>;
|
||||
handle_var({var, <<"peerhost">>}, PeerHost) ->
|
||||
emqx_placeholder:bin(inet:ntoa(PeerHost));
|
||||
handle_var(_, Value) ->
|
||||
emqx_placeholder:bin(Value).
|
||||
|
||||
handle_sql_var({var, Name}, undefined) ->
|
||||
error({cannot_get_variable, Name});
|
||||
handle_sql_var({var, _Name}, undefined) ->
|
||||
<<>>;
|
||||
handle_sql_var({var, <<"peerhost">>}, PeerHost) ->
|
||||
emqx_placeholder:bin(inet:ntoa(PeerHost));
|
||||
handle_sql_var(_, Value) ->
|
||||
|
|
|
@ -187,29 +187,25 @@ authenticate(
|
|||
request_timeout := RequestTimeout
|
||||
} = State
|
||||
) ->
|
||||
?WITH_SUCCESSFUL_RENDER(
|
||||
begin
|
||||
Request = generate_request(Credential, State),
|
||||
Response = emqx_resource:query(ResourceId, {Method, Request, RequestTimeout}),
|
||||
?TRACE_AUTHN_PROVIDER("http_response", #{
|
||||
request => request_for_log(Credential, State),
|
||||
response => response_for_log(Response),
|
||||
resource => ResourceId
|
||||
}),
|
||||
case Response of
|
||||
{ok, 204, _Headers} ->
|
||||
{ok, #{is_superuser => false}};
|
||||
{ok, 200, Headers, Body} ->
|
||||
handle_response(Headers, Body);
|
||||
{ok, _StatusCode, _Headers} = Response ->
|
||||
ignore;
|
||||
{ok, _StatusCode, _Headers, _Body} = Response ->
|
||||
ignore;
|
||||
{error, _Reason} ->
|
||||
ignore
|
||||
end
|
||||
end
|
||||
).
|
||||
Request = generate_request(Credential, State),
|
||||
Response = emqx_resource:query(ResourceId, {Method, Request, RequestTimeout}),
|
||||
?TRACE_AUTHN_PROVIDER("http_response", #{
|
||||
request => request_for_log(Credential, State),
|
||||
response => response_for_log(Response),
|
||||
resource => ResourceId
|
||||
}),
|
||||
case Response of
|
||||
{ok, 204, _Headers} ->
|
||||
{ok, #{is_superuser => false}};
|
||||
{ok, 200, Headers, Body} ->
|
||||
handle_response(Headers, Body);
|
||||
{ok, _StatusCode, _Headers} = Response ->
|
||||
ignore;
|
||||
{ok, _StatusCode, _Headers, _Body} = Response ->
|
||||
ignore;
|
||||
{error, _Reason} ->
|
||||
ignore
|
||||
end.
|
||||
|
||||
destroy(#{resource_id := ResourceId}) ->
|
||||
_ = emqx_resource:remove_local(ResourceId),
|
||||
|
|
|
@ -162,39 +162,35 @@ authenticate(
|
|||
resource_id := ResourceId
|
||||
} = State
|
||||
) ->
|
||||
?WITH_SUCCESSFUL_RENDER(
|
||||
begin
|
||||
Filter = emqx_authn_utils:render_deep(FilterTemplate, Credential),
|
||||
case emqx_resource:query(ResourceId, {find_one, Collection, Filter, #{}}) of
|
||||
{ok, undefined} ->
|
||||
ignore;
|
||||
{error, Reason} ->
|
||||
?TRACE_AUTHN_PROVIDER(error, "mongodb_query_failed", #{
|
||||
Filter = emqx_authn_utils:render_deep(FilterTemplate, Credential),
|
||||
case emqx_resource:query(ResourceId, {find_one, Collection, Filter, #{}}) of
|
||||
{ok, undefined} ->
|
||||
ignore;
|
||||
{error, Reason} ->
|
||||
?TRACE_AUTHN_PROVIDER(error, "mongodb_query_failed", #{
|
||||
resource => ResourceId,
|
||||
collection => Collection,
|
||||
filter => Filter,
|
||||
reason => Reason
|
||||
}),
|
||||
ignore;
|
||||
{ok, Doc} ->
|
||||
case check_password(Password, Doc, State) of
|
||||
ok ->
|
||||
{ok, is_superuser(Doc, State)};
|
||||
{error, {cannot_find_password_hash_field, PasswordHashField}} ->
|
||||
?TRACE_AUTHN_PROVIDER(error, "cannot_find_password_hash_field", #{
|
||||
resource => ResourceId,
|
||||
collection => Collection,
|
||||
filter => Filter,
|
||||
reason => Reason
|
||||
document => Doc,
|
||||
password_hash_field => PasswordHashField
|
||||
}),
|
||||
ignore;
|
||||
{ok, Doc} ->
|
||||
case check_password(Password, Doc, State) of
|
||||
ok ->
|
||||
{ok, is_superuser(Doc, State)};
|
||||
{error, {cannot_find_password_hash_field, PasswordHashField}} ->
|
||||
?TRACE_AUTHN_PROVIDER(error, "cannot_find_password_hash_field", #{
|
||||
resource => ResourceId,
|
||||
collection => Collection,
|
||||
filter => Filter,
|
||||
document => Doc,
|
||||
password_hash_field => PasswordHashField
|
||||
}),
|
||||
ignore;
|
||||
{error, Reason} ->
|
||||
{error, Reason}
|
||||
end
|
||||
{error, Reason} ->
|
||||
{error, Reason}
|
||||
end
|
||||
end
|
||||
).
|
||||
end.
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% Internal functions
|
||||
|
|
|
@ -113,36 +113,32 @@ authenticate(
|
|||
password_hash_algorithm := Algorithm
|
||||
}
|
||||
) ->
|
||||
?WITH_SUCCESSFUL_RENDER(
|
||||
begin
|
||||
Params = emqx_authn_utils:render_sql_params(TmplToken, Credential),
|
||||
case emqx_resource:query(ResourceId, {prepared_query, ?PREPARE_KEY, Params, Timeout}) of
|
||||
{ok, _Columns, []} ->
|
||||
ignore;
|
||||
{ok, Columns, [Row | _]} ->
|
||||
Selected = maps:from_list(lists:zip(Columns, Row)),
|
||||
case
|
||||
emqx_authn_utils:check_password_from_selected_map(
|
||||
Algorithm, Selected, Password
|
||||
)
|
||||
of
|
||||
ok ->
|
||||
{ok, emqx_authn_utils:is_superuser(Selected)};
|
||||
{error, Reason} ->
|
||||
{error, Reason}
|
||||
end;
|
||||
Params = emqx_authn_utils:render_sql_params(TmplToken, Credential),
|
||||
case emqx_resource:query(ResourceId, {prepared_query, ?PREPARE_KEY, Params, Timeout}) of
|
||||
{ok, _Columns, []} ->
|
||||
ignore;
|
||||
{ok, Columns, [Row | _]} ->
|
||||
Selected = maps:from_list(lists:zip(Columns, Row)),
|
||||
case
|
||||
emqx_authn_utils:check_password_from_selected_map(
|
||||
Algorithm, Selected, Password
|
||||
)
|
||||
of
|
||||
ok ->
|
||||
{ok, emqx_authn_utils:is_superuser(Selected)};
|
||||
{error, Reason} ->
|
||||
?TRACE_AUTHN_PROVIDER(error, "mysql_query_failed", #{
|
||||
resource => ResourceId,
|
||||
tmpl_token => TmplToken,
|
||||
params => Params,
|
||||
timeout => Timeout,
|
||||
reason => Reason
|
||||
}),
|
||||
ignore
|
||||
end
|
||||
end
|
||||
).
|
||||
{error, Reason}
|
||||
end;
|
||||
{error, Reason} ->
|
||||
?TRACE_AUTHN_PROVIDER(error, "mysql_query_failed", #{
|
||||
resource => ResourceId,
|
||||
tmpl_token => TmplToken,
|
||||
params => Params,
|
||||
timeout => Timeout,
|
||||
reason => Reason
|
||||
}),
|
||||
ignore
|
||||
end.
|
||||
|
||||
parse_config(
|
||||
#{
|
||||
|
|
|
@ -115,35 +115,31 @@ authenticate(
|
|||
password_hash_algorithm := Algorithm
|
||||
}
|
||||
) ->
|
||||
?WITH_SUCCESSFUL_RENDER(
|
||||
begin
|
||||
Params = emqx_authn_utils:render_sql_params(PlaceHolders, Credential),
|
||||
case emqx_resource:query(ResourceId, {prepared_query, ResourceId, Params}) of
|
||||
{ok, _Columns, []} ->
|
||||
ignore;
|
||||
{ok, Columns, [Row | _]} ->
|
||||
NColumns = [Name || #column{name = Name} <- Columns],
|
||||
Selected = maps:from_list(lists:zip(NColumns, erlang:tuple_to_list(Row))),
|
||||
case
|
||||
emqx_authn_utils:check_password_from_selected_map(
|
||||
Algorithm, Selected, Password
|
||||
)
|
||||
of
|
||||
ok ->
|
||||
{ok, emqx_authn_utils:is_superuser(Selected)};
|
||||
{error, Reason} ->
|
||||
{error, Reason}
|
||||
end;
|
||||
Params = emqx_authn_utils:render_sql_params(PlaceHolders, Credential),
|
||||
case emqx_resource:query(ResourceId, {prepared_query, ResourceId, Params}) of
|
||||
{ok, _Columns, []} ->
|
||||
ignore;
|
||||
{ok, Columns, [Row | _]} ->
|
||||
NColumns = [Name || #column{name = Name} <- Columns],
|
||||
Selected = maps:from_list(lists:zip(NColumns, erlang:tuple_to_list(Row))),
|
||||
case
|
||||
emqx_authn_utils:check_password_from_selected_map(
|
||||
Algorithm, Selected, Password
|
||||
)
|
||||
of
|
||||
ok ->
|
||||
{ok, emqx_authn_utils:is_superuser(Selected)};
|
||||
{error, Reason} ->
|
||||
?TRACE_AUTHN_PROVIDER(error, "postgresql_query_failed", #{
|
||||
resource => ResourceId,
|
||||
params => Params,
|
||||
reason => Reason
|
||||
}),
|
||||
ignore
|
||||
end
|
||||
end
|
||||
).
|
||||
{error, Reason}
|
||||
end;
|
||||
{error, Reason} ->
|
||||
?TRACE_AUTHN_PROVIDER(error, "postgresql_query_failed", #{
|
||||
resource => ResourceId,
|
||||
params => Params,
|
||||
reason => Reason
|
||||
}),
|
||||
ignore
|
||||
end.
|
||||
|
||||
parse_config(
|
||||
#{
|
||||
|
|
|
@ -133,15 +133,14 @@ authenticate(
|
|||
password_hash_algorithm := Algorithm
|
||||
}
|
||||
) ->
|
||||
?WITH_SUCCESSFUL_RENDER(
|
||||
begin
|
||||
NKey = emqx_authn_utils:render_str(KeyTemplate, Credential),
|
||||
Command = [CommandName, NKey | Fields],
|
||||
case emqx_resource:query(ResourceId, {cmd, Command}) of
|
||||
{ok, []} ->
|
||||
ignore;
|
||||
{ok, Values} ->
|
||||
Selected = merge(Fields, Values),
|
||||
NKey = emqx_authn_utils:render_str(KeyTemplate, Credential),
|
||||
Command = [CommandName, NKey | Fields],
|
||||
case emqx_resource:query(ResourceId, {cmd, Command}) of
|
||||
{ok, []} ->
|
||||
ignore;
|
||||
{ok, Values} ->
|
||||
case merge(Fields, Values) of
|
||||
Selected when Selected =/= #{} ->
|
||||
case
|
||||
emqx_authn_utils:check_password_from_selected_map(
|
||||
Algorithm, Selected, Password
|
||||
|
@ -149,21 +148,28 @@ authenticate(
|
|||
of
|
||||
ok ->
|
||||
{ok, emqx_authn_utils:is_superuser(Selected)};
|
||||
{error, _Reason} ->
|
||||
ignore
|
||||
{error, _Reason} = Error ->
|
||||
Error
|
||||
end;
|
||||
{error, Reason} ->
|
||||
?TRACE_AUTHN_PROVIDER(error, "redis_query_failed", #{
|
||||
_ ->
|
||||
?TRACE_AUTHN_PROVIDER(info, "redis_query_not_matched", #{
|
||||
resource => ResourceId,
|
||||
cmd => Command,
|
||||
keys => NKey,
|
||||
fields => Fields,
|
||||
reason => Reason
|
||||
fields => Fields
|
||||
}),
|
||||
ignore
|
||||
end
|
||||
end
|
||||
).
|
||||
end;
|
||||
{error, Reason} ->
|
||||
?TRACE_AUTHN_PROVIDER(error, "redis_query_failed", #{
|
||||
resource => ResourceId,
|
||||
cmd => Command,
|
||||
keys => NKey,
|
||||
fields => Fields,
|
||||
reason => Reason
|
||||
}),
|
||||
ignore
|
||||
end.
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% Internal functions
|
||||
|
|
|
@ -166,6 +166,49 @@ test_user_auth(#{
|
|||
?GLOBAL
|
||||
).
|
||||
|
||||
t_no_value_for_placeholder(_Config) ->
|
||||
Handler = fun(Req0, State) ->
|
||||
{ok, RawBody, Req1} = cowboy_req:read_body(Req0),
|
||||
#{
|
||||
<<"cert_subject">> := <<"">>,
|
||||
<<"cert_common_name">> := <<"">>
|
||||
} = jiffy:decode(RawBody, [return_maps]),
|
||||
Req = cowboy_req:reply(
|
||||
200,
|
||||
#{<<"content-type">> => <<"application/json">>},
|
||||
jiffy:encode(#{result => allow, is_superuser => false}),
|
||||
Req1
|
||||
),
|
||||
{ok, Req, State}
|
||||
end,
|
||||
|
||||
SpecificConfgParams = #{
|
||||
<<"method">> => <<"post">>,
|
||||
<<"headers">> => #{<<"content-type">> => <<"application/json">>},
|
||||
<<"body">> => #{
|
||||
<<"cert_subject">> => ?PH_CERT_SUBJECT,
|
||||
<<"cert_common_name">> => ?PH_CERT_CN_NAME
|
||||
}
|
||||
},
|
||||
|
||||
AuthConfig = maps:merge(raw_http_auth_config(), SpecificConfgParams),
|
||||
|
||||
{ok, _} = emqx:update_config(
|
||||
?PATH,
|
||||
{create_authenticator, ?GLOBAL, AuthConfig}
|
||||
),
|
||||
|
||||
ok = emqx_authn_http_test_server:set_handler(Handler),
|
||||
|
||||
Credentials = maps:without([cert_subject, cert_common_name], ?CREDENTIALS),
|
||||
|
||||
?assertMatch({ok, _}, emqx_access_control:authenticate(Credentials)),
|
||||
|
||||
emqx_authn_test_lib:delete_authenticators(
|
||||
[authentication],
|
||||
?GLOBAL
|
||||
).
|
||||
|
||||
t_destroy(_Config) ->
|
||||
AuthConfig = raw_http_auth_config(),
|
||||
|
||||
|
@ -247,27 +290,6 @@ t_update(_Config) ->
|
|||
emqx_access_control:authenticate(?CREDENTIALS)
|
||||
).
|
||||
|
||||
t_interpolation_error(_Config) ->
|
||||
{ok, _} = emqx:update_config(
|
||||
?PATH,
|
||||
{create_authenticator, ?GLOBAL, raw_http_auth_config()}
|
||||
),
|
||||
|
||||
Headers = #{<<"content-type">> => <<"application/json">>},
|
||||
Response = ?SERVER_RESPONSE_JSON(allow),
|
||||
|
||||
ok = emqx_authn_http_test_server:set_handler(
|
||||
fun(Req0, State) ->
|
||||
Req = cowboy_req:reply(200, Headers, Response, Req0),
|
||||
{ok, Req, State}
|
||||
end
|
||||
),
|
||||
|
||||
?assertMatch(
|
||||
?EXCEPTION_DENY,
|
||||
emqx_access_control:authenticate(maps:without([username], ?CREDENTIALS))
|
||||
).
|
||||
|
||||
t_is_superuser(_Config) ->
|
||||
Config = raw_http_auth_config(),
|
||||
{ok, _} = emqx:update_config(
|
||||
|
@ -431,26 +453,6 @@ samples() ->
|
|||
result => {ok, #{is_superuser => false, user_property => #{}}}
|
||||
},
|
||||
|
||||
%% simple get request, no username
|
||||
#{
|
||||
handler => fun(Req0, State) ->
|
||||
#{
|
||||
username := <<"plain">>,
|
||||
password := <<"plain">>
|
||||
} = cowboy_req:match_qs([username, password], Req0),
|
||||
|
||||
Req = cowboy_req:reply(
|
||||
200,
|
||||
#{<<"content-type">> => <<"application/json">>},
|
||||
jiffy:encode(#{result => allow, is_superuser => false}),
|
||||
Req0
|
||||
),
|
||||
{ok, Req, State}
|
||||
end,
|
||||
config_params => #{},
|
||||
result => {ok, #{is_superuser => false, user_property => #{}}}
|
||||
},
|
||||
|
||||
%% get request with json body response
|
||||
#{
|
||||
handler => fun(Req0, State) ->
|
||||
|
|
|
@ -288,20 +288,6 @@ raw_mongo_auth_config() ->
|
|||
|
||||
user_seeds() ->
|
||||
[
|
||||
#{
|
||||
data => #{
|
||||
username => <<"plain">>,
|
||||
password_hash => <<"plainsalt">>,
|
||||
salt => <<"salt">>,
|
||||
is_superuser => <<"1">>
|
||||
},
|
||||
credentials => #{
|
||||
password => <<"plain">>
|
||||
},
|
||||
config_params => #{},
|
||||
result => {error, not_authorized}
|
||||
},
|
||||
|
||||
#{
|
||||
data => #{
|
||||
username => <<"plain">>,
|
||||
|
|
|
@ -258,20 +258,6 @@ raw_mysql_auth_config() ->
|
|||
|
||||
user_seeds() ->
|
||||
[
|
||||
#{
|
||||
data => #{
|
||||
username => "plain",
|
||||
password_hash => "plainsalt",
|
||||
salt => "salt",
|
||||
is_superuser_str => "1"
|
||||
},
|
||||
credentials => #{
|
||||
password => <<"plain">>
|
||||
},
|
||||
config_params => #{},
|
||||
result => {error, not_authorized}
|
||||
},
|
||||
|
||||
#{
|
||||
data => #{
|
||||
username => "plain",
|
||||
|
@ -332,6 +318,32 @@ user_seeds() ->
|
|||
result => {ok, #{is_superuser => true}}
|
||||
},
|
||||
|
||||
%% strip double quote support
|
||||
#{
|
||||
data => #{
|
||||
username => "sha256",
|
||||
password_hash => "ac63a624e7074776d677dd61a003b8c803eb11db004d0ec6ae032a5d7c9c5caf",
|
||||
salt => "salt",
|
||||
is_superuser_int => 1
|
||||
},
|
||||
credentials => #{
|
||||
username => <<"sha256">>,
|
||||
password => <<"sha256">>
|
||||
},
|
||||
config_params => #{
|
||||
<<"query">> =>
|
||||
<<
|
||||
"SELECT password_hash, salt, is_superuser_int as is_superuser\n"
|
||||
" FROM users where username = \"${username}\" LIMIT 1"
|
||||
>>,
|
||||
<<"password_hash_algorithm">> => #{
|
||||
<<"name">> => <<"sha256">>,
|
||||
<<"salt_position">> => <<"prefix">>
|
||||
}
|
||||
},
|
||||
result => {ok, #{is_superuser => true}}
|
||||
},
|
||||
|
||||
#{
|
||||
data => #{
|
||||
username => "sha256",
|
||||
|
|
|
@ -320,20 +320,6 @@ raw_pgsql_auth_config() ->
|
|||
|
||||
user_seeds() ->
|
||||
[
|
||||
#{
|
||||
data => #{
|
||||
username => "plain",
|
||||
password_hash => "plainsalt",
|
||||
salt => "salt",
|
||||
is_superuser_str => "1"
|
||||
},
|
||||
credentials => #{
|
||||
password => <<"plain">>
|
||||
},
|
||||
config_params => #{},
|
||||
result => {error, not_authorized}
|
||||
},
|
||||
|
||||
#{
|
||||
data => #{
|
||||
username => "plain",
|
||||
|
@ -394,6 +380,32 @@ user_seeds() ->
|
|||
result => {ok, #{is_superuser => true}}
|
||||
},
|
||||
|
||||
%% strip double quote support
|
||||
#{
|
||||
data => #{
|
||||
username => "sha256",
|
||||
password_hash => "ac63a624e7074776d677dd61a003b8c803eb11db004d0ec6ae032a5d7c9c5caf",
|
||||
salt => "salt",
|
||||
is_superuser_int => 1
|
||||
},
|
||||
credentials => #{
|
||||
username => <<"sha256">>,
|
||||
password => <<"sha256">>
|
||||
},
|
||||
config_params => #{
|
||||
<<"query">> =>
|
||||
<<
|
||||
"SELECT password_hash, salt, is_superuser_int as is_superuser\n"
|
||||
" FROM users where username = \"${username}\" LIMIT 1"
|
||||
>>,
|
||||
<<"password_hash_algorithm">> => #{
|
||||
<<"name">> => <<"sha256">>,
|
||||
<<"salt_position">> => <<"prefix">>
|
||||
}
|
||||
},
|
||||
result => {ok, #{is_superuser => true}}
|
||||
},
|
||||
|
||||
#{
|
||||
data => #{
|
||||
username => "sha256",
|
||||
|
|
|
@ -161,11 +161,13 @@ t_authenticate(_Config) ->
|
|||
user_seeds()
|
||||
).
|
||||
|
||||
test_user_auth(#{
|
||||
credentials := Credentials0,
|
||||
config_params := SpecificConfigParams,
|
||||
result := Result
|
||||
}) ->
|
||||
test_user_auth(
|
||||
#{
|
||||
credentials := Credentials0,
|
||||
config_params := SpecificConfigParams,
|
||||
result := Result
|
||||
} = Config
|
||||
) ->
|
||||
AuthConfig = maps:merge(raw_redis_auth_config(), SpecificConfigParams),
|
||||
|
||||
{ok, _} = emqx:update_config(
|
||||
|
@ -183,14 +185,12 @@ test_user_auth(#{
|
|||
|
||||
?assertEqual(Result, emqx_access_control:authenticate(Credentials)),
|
||||
|
||||
AuthnResult =
|
||||
case Result of
|
||||
{error, _} ->
|
||||
ignore;
|
||||
Any ->
|
||||
Any
|
||||
end,
|
||||
?assertEqual(AuthnResult, emqx_authn_redis:authenticate(Credentials, State)),
|
||||
case maps:get(redis_result, Config, undefined) of
|
||||
undefined ->
|
||||
ok;
|
||||
RedisResult ->
|
||||
?assertEqual(RedisResult, emqx_authn_redis:authenticate(Credentials, State))
|
||||
end,
|
||||
|
||||
emqx_authn_test_lib:delete_authenticators(
|
||||
[authentication],
|
||||
|
@ -292,20 +292,6 @@ raw_redis_auth_config() ->
|
|||
|
||||
user_seeds() ->
|
||||
[
|
||||
#{
|
||||
data => #{
|
||||
password_hash => <<"plainsalt">>,
|
||||
salt => <<"salt">>,
|
||||
is_superuser => <<"1">>
|
||||
},
|
||||
credentials => #{
|
||||
password => <<"plain">>
|
||||
},
|
||||
key => <<"mqtt_user:plain">>,
|
||||
config_params => #{},
|
||||
result => {error, not_authorized}
|
||||
},
|
||||
|
||||
#{
|
||||
data => #{
|
||||
password_hash => <<"plainsalt">>,
|
||||
|
@ -478,7 +464,7 @@ user_seeds() ->
|
|||
<<"cmd">> => <<"HMGET mqtt_user:${username} password_hash salt is_superuser">>,
|
||||
<<"password_hash_algorithm">> => #{<<"name">> => <<"bcrypt">>}
|
||||
},
|
||||
result => {error, not_authorized}
|
||||
result => {error, bad_username_or_password}
|
||||
},
|
||||
|
||||
#{
|
||||
|
@ -547,6 +533,23 @@ user_seeds() ->
|
|||
}
|
||||
},
|
||||
result => {ok, #{is_superuser => true}}
|
||||
},
|
||||
|
||||
%% user not exists
|
||||
#{
|
||||
data => #{
|
||||
password_hash => <<"plainsalt">>,
|
||||
salt => <<"salt">>,
|
||||
is_superuser => <<"1">>
|
||||
},
|
||||
credentials => #{
|
||||
username => <<"not_exists">>,
|
||||
password => <<"plain">>
|
||||
},
|
||||
key => <<"mqtt_user:plain">>,
|
||||
config_params => #{},
|
||||
result => {error, not_authorized},
|
||||
redis_result => ignore
|
||||
}
|
||||
].
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
%% -*- mode: erlang -*-
|
||||
{application, emqx_authz, [
|
||||
{description, "An OTP application"},
|
||||
{vsn, "0.1.5"},
|
||||
{vsn, "0.1.6"},
|
||||
{registered, []},
|
||||
{mod, {emqx_authz_app, []}},
|
||||
{applications, [
|
||||
|
|
|
@ -391,14 +391,6 @@ do_authorize(
|
|||
Matched ->
|
||||
{Matched, Type}
|
||||
catch
|
||||
error:{cannot_get_variable, Name} ->
|
||||
emqx_metrics_worker:inc(authz_metrics, Type, nomatch),
|
||||
?SLOG(warning, #{
|
||||
msg => "placeholder_interpolation_failed",
|
||||
placeholder => Name,
|
||||
authorize_type => Type
|
||||
}),
|
||||
do_authorize(Client, PubSub, Topic, Tail);
|
||||
Class:Reason:Stacktrace ->
|
||||
emqx_metrics_worker:inc(authz_metrics, Type, nomatch),
|
||||
?SLOG(warning, #{
|
||||
|
|
|
@ -223,7 +223,7 @@ sources(get, _) ->
|
|||
])
|
||||
end;
|
||||
(Source, AccIn) ->
|
||||
lists:append(AccIn, [drop_invalid_certs(Source)])
|
||||
lists:append(AccIn, [Source])
|
||||
end,
|
||||
[],
|
||||
get_raw_sources()
|
||||
|
@ -257,7 +257,7 @@ source(get, #{bindings := #{type := Type}}) ->
|
|||
}}
|
||||
end;
|
||||
[Source] ->
|
||||
{200, drop_invalid_certs(Source)}
|
||||
{200, Source}
|
||||
end;
|
||||
source(put, #{bindings := #{type := <<"file">>}, body := #{<<"type">> := <<"file">>} = Body}) ->
|
||||
update_authz_file(Body);
|
||||
|
@ -511,11 +511,6 @@ update_config(Cmd, Sources) ->
|
|||
}}
|
||||
end.
|
||||
|
||||
drop_invalid_certs(#{<<"ssl">> := SSL} = Source) when SSL =/= undefined ->
|
||||
Source#{<<"ssl">> => emqx_tls_lib:drop_invalid_certs(SSL)};
|
||||
drop_invalid_certs(Source) ->
|
||||
Source.
|
||||
|
||||
parameters_field() ->
|
||||
[
|
||||
{type,
|
||||
|
|
|
@ -45,7 +45,9 @@
|
|||
?PH_PROTONAME,
|
||||
?PH_MOUNTPOINT,
|
||||
?PH_TOPIC,
|
||||
?PH_ACTION
|
||||
?PH_ACTION,
|
||||
?PH_CERT_SUBJECT,
|
||||
?PH_CERT_CN_NAME
|
||||
]).
|
||||
|
||||
description() ->
|
||||
|
|
|
@ -40,7 +40,9 @@
|
|||
-define(PLACEHOLDERS, [
|
||||
?PH_USERNAME,
|
||||
?PH_CLIENTID,
|
||||
?PH_PEERHOST
|
||||
?PH_PEERHOST,
|
||||
?PH_CERT_CN_NAME,
|
||||
?PH_CERT_SUBJECT
|
||||
]).
|
||||
|
||||
description() ->
|
||||
|
|
|
@ -109,7 +109,8 @@ parse_sql(Template, ReplaceWith, PlaceHolders) ->
|
|||
Template,
|
||||
#{
|
||||
replace_with => ReplaceWith,
|
||||
placeholders => PlaceHolders
|
||||
placeholders => PlaceHolders,
|
||||
strip_double_quote => true
|
||||
}
|
||||
).
|
||||
|
||||
|
@ -180,15 +181,15 @@ convert_client_var({dn, DN}) -> {cert_subject, DN};
|
|||
convert_client_var({protocol, Proto}) -> {proto_name, Proto};
|
||||
convert_client_var(Other) -> Other.
|
||||
|
||||
handle_var({var, Name}, undefined) ->
|
||||
error({cannot_get_variable, Name});
|
||||
handle_var({var, _Name}, undefined) ->
|
||||
<<>>;
|
||||
handle_var({var, <<"peerhost">>}, IpAddr) ->
|
||||
inet_parse:ntoa(IpAddr);
|
||||
handle_var(_Name, Value) ->
|
||||
emqx_placeholder:bin(Value).
|
||||
|
||||
handle_sql_var({var, Name}, undefined) ->
|
||||
error({cannot_get_variable, Name});
|
||||
handle_sql_var({var, _Name}, undefined) ->
|
||||
<<>>;
|
||||
handle_sql_var({var, <<"peerhost">>}, IpAddr) ->
|
||||
inet_parse:ntoa(IpAddr);
|
||||
handle_sql_var(_Name, Value) ->
|
||||
|
|
|
@ -19,9 +19,12 @@
|
|||
-compile(export_all).
|
||||
|
||||
-include("emqx_authz.hrl").
|
||||
-include_lib("emqx/include/emqx.hrl").
|
||||
-include_lib("emqx/include/emqx_mqtt.hrl").
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
-include_lib("common_test/include/ct.hrl").
|
||||
-include_lib("emqx/include/emqx_placeholder.hrl").
|
||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||
|
||||
all() ->
|
||||
emqx_common_test_helpers:all(?MODULE).
|
||||
|
@ -60,10 +63,26 @@ end_per_suite(_Config) ->
|
|||
meck:unload(emqx_resource),
|
||||
ok.
|
||||
|
||||
init_per_testcase(TestCase, Config) when
|
||||
TestCase =:= t_subscribe_deny_disconnect_publishes_last_will_testament;
|
||||
TestCase =:= t_publish_deny_disconnect_publishes_last_will_testament
|
||||
->
|
||||
{ok, _} = emqx_authz:update(?CMD_REPLACE, []),
|
||||
{ok, _} = emqx:update_config([authorization, deny_action], disconnect),
|
||||
Config;
|
||||
init_per_testcase(_, Config) ->
|
||||
{ok, _} = emqx_authz:update(?CMD_REPLACE, []),
|
||||
Config.
|
||||
|
||||
end_per_testcase(TestCase, _Config) when
|
||||
TestCase =:= t_subscribe_deny_disconnect_publishes_last_will_testament;
|
||||
TestCase =:= t_publish_deny_disconnect_publishes_last_will_testament
|
||||
->
|
||||
{ok, _} = emqx:update_config([authorization, deny_action], ignore),
|
||||
ok;
|
||||
end_per_testcase(_TestCase, _Config) ->
|
||||
ok.
|
||||
|
||||
set_special_configs(emqx_authz) ->
|
||||
{ok, _} = emqx:update_config([authorization, cache, enable], false),
|
||||
{ok, _} = emqx:update_config([authorization, no_match], deny),
|
||||
|
@ -138,6 +157,15 @@ set_special_configs(_App) ->
|
|||
"\n{allow,{ipaddr,\"127.0.0.1\"},all,[\"$SYS/#\",\"#\"]}."
|
||||
>>
|
||||
}).
|
||||
-define(SOURCE7, #{
|
||||
<<"type">> => <<"file">>,
|
||||
<<"enable">> => true,
|
||||
<<"rules">> =>
|
||||
<<
|
||||
"{allow,{username,\"some_client\"},publish,[\"some_client/lwt\"]}.\n"
|
||||
"{deny, all}."
|
||||
>>
|
||||
}).
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% Testcases
|
||||
|
@ -286,5 +314,87 @@ t_get_enabled_authzs_some_enabled(_Config) ->
|
|||
{ok, _} = emqx_authz:update(?CMD_REPLACE, [?SOURCE4]),
|
||||
?assertEqual([postgresql], emqx_authz:get_enabled_authzs()).
|
||||
|
||||
t_subscribe_deny_disconnect_publishes_last_will_testament(_Config) ->
|
||||
{ok, _} = emqx_authz:update(?CMD_REPLACE, [?SOURCE7]),
|
||||
{ok, C} = emqtt:start_link([
|
||||
{username, <<"some_client">>},
|
||||
{will_topic, <<"some_client/lwt">>},
|
||||
{will_payload, <<"should be published">>}
|
||||
]),
|
||||
{ok, _} = emqtt:connect(C),
|
||||
ok = emqx:subscribe(<<"some_client/lwt">>),
|
||||
process_flag(trap_exit, true),
|
||||
|
||||
try
|
||||
emqtt:subscribe(C, <<"unauthorized">>),
|
||||
error(should_have_disconnected)
|
||||
catch
|
||||
exit:{{shutdown, tcp_closed}, _} ->
|
||||
ok
|
||||
end,
|
||||
|
||||
receive
|
||||
{deliver, <<"some_client/lwt">>, #message{payload = <<"should be published">>}} ->
|
||||
ok
|
||||
after 2_000 ->
|
||||
error(lwt_not_published)
|
||||
end,
|
||||
|
||||
ok.
|
||||
|
||||
t_publish_deny_disconnect_publishes_last_will_testament(_Config) ->
|
||||
{ok, _} = emqx_authz:update(?CMD_REPLACE, [?SOURCE7]),
|
||||
{ok, C} = emqtt:start_link([
|
||||
{username, <<"some_client">>},
|
||||
{will_topic, <<"some_client/lwt">>},
|
||||
{will_payload, <<"should be published">>}
|
||||
]),
|
||||
{ok, _} = emqtt:connect(C),
|
||||
ok = emqx:subscribe(<<"some_client/lwt">>),
|
||||
process_flag(trap_exit, true),
|
||||
|
||||
%% disconnect is async
|
||||
Ref = monitor(process, C),
|
||||
emqtt:publish(C, <<"some/topic">>, <<"unauthorized">>),
|
||||
receive
|
||||
{'DOWN', Ref, process, C, _} ->
|
||||
ok
|
||||
after 1_000 ->
|
||||
error(client_should_have_been_disconnected)
|
||||
end,
|
||||
receive
|
||||
{deliver, <<"some_client/lwt">>, #message{payload = <<"should be published">>}} ->
|
||||
ok
|
||||
after 2_000 ->
|
||||
error(lwt_not_published)
|
||||
end,
|
||||
|
||||
ok.
|
||||
|
||||
t_publish_last_will_testament_denied_topic(_Config) ->
|
||||
{ok, C} = emqtt:start_link([
|
||||
{will_topic, <<"$SYS/lwt">>},
|
||||
{will_payload, <<"should not be published">>}
|
||||
]),
|
||||
{ok, _} = emqtt:connect(C),
|
||||
ok = emqx:subscribe(<<"$SYS/lwt">>),
|
||||
unlink(C),
|
||||
ok = snabbkaffe:start_trace(),
|
||||
{true, {ok, _}} = ?wait_async_action(
|
||||
exit(C, kill),
|
||||
#{?snk_kind := last_will_testament_publish_denied},
|
||||
1_000
|
||||
),
|
||||
ok = snabbkaffe:stop(),
|
||||
|
||||
receive
|
||||
{deliver, <<"$SYS/lwt">>, #message{payload = <<"should not be published">>}} ->
|
||||
error(lwt_should_not_be_published_to_forbidden_topic)
|
||||
after 1_000 ->
|
||||
ok
|
||||
end,
|
||||
|
||||
ok.
|
||||
|
||||
stop_apps(Apps) ->
|
||||
lists:foreach(fun application:stop/1, Apps).
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
-include("emqx_authz.hrl").
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
-include_lib("common_test/include/ct.hrl").
|
||||
-include_lib("emqx/include/emqx_placeholder.hrl").
|
||||
|
||||
-define(HTTP_PORT, 33333).
|
||||
-define(HTTP_PATH, "/authz/[...]").
|
||||
|
@ -303,7 +304,7 @@ t_json_body(_Config) ->
|
|||
emqx_access_control:authorize(ClientInfo, publish, <<"t">>)
|
||||
).
|
||||
|
||||
t_form_body(_Config) ->
|
||||
t_placeholder_and_body(_Config) ->
|
||||
ok = setup_handler_and_config(
|
||||
fun(Req0, State) ->
|
||||
?assertEqual(
|
||||
|
@ -321,7 +322,9 @@ t_form_body(_Config) ->
|
|||
<<"proto_name">> := <<"MQTT">>,
|
||||
<<"mountpoint">> := <<"MOUNTPOINT">>,
|
||||
<<"topic">> := <<"t">>,
|
||||
<<"action">> := <<"publish">>
|
||||
<<"action">> := <<"publish">>,
|
||||
<<"CN">> := ?PH_CERT_CN_NAME,
|
||||
<<"CS">> := ?PH_CERT_SUBJECT
|
||||
},
|
||||
jiffy:decode(PostVars, [return_maps])
|
||||
),
|
||||
|
@ -336,7 +339,9 @@ t_form_body(_Config) ->
|
|||
<<"proto_name">> => <<"${proto_name}">>,
|
||||
<<"mountpoint">> => <<"${mountpoint}">>,
|
||||
<<"topic">> => <<"${topic}">>,
|
||||
<<"action">> => <<"${action}">>
|
||||
<<"action">> => <<"${action}">>,
|
||||
<<"CN">> => ?PH_CERT_CN_NAME,
|
||||
<<"CS">> => ?PH_CERT_SUBJECT
|
||||
},
|
||||
<<"headers">> => #{<<"content-type">> => <<"application/x-www-form-urlencoded">>}
|
||||
}
|
||||
|
@ -349,6 +354,48 @@ t_form_body(_Config) ->
|
|||
protocol => <<"MQTT">>,
|
||||
mountpoint => <<"MOUNTPOINT">>,
|
||||
zone => default,
|
||||
listener => {tcp, default},
|
||||
cn => ?PH_CERT_CN_NAME,
|
||||
dn => ?PH_CERT_SUBJECT
|
||||
},
|
||||
|
||||
?assertEqual(
|
||||
allow,
|
||||
emqx_access_control:authorize(ClientInfo, publish, <<"t">>)
|
||||
).
|
||||
|
||||
t_no_value_for_placeholder(_Config) ->
|
||||
ok = setup_handler_and_config(
|
||||
fun(Req0, State) ->
|
||||
?assertEqual(
|
||||
<<"/authz/users/">>,
|
||||
cowboy_req:path(Req0)
|
||||
),
|
||||
|
||||
{ok, RawBody, Req1} = cowboy_req:read_body(Req0),
|
||||
|
||||
?assertMatch(
|
||||
#{
|
||||
<<"mountpoint">> := <<"[]">>
|
||||
},
|
||||
jiffy:decode(RawBody, [return_maps])
|
||||
),
|
||||
{ok, ?AUTHZ_HTTP_RESP(allow, Req1), State}
|
||||
end,
|
||||
#{
|
||||
<<"method">> => <<"post">>,
|
||||
<<"body">> => #{
|
||||
<<"mountpoint">> => <<"[${mountpoint}]">>
|
||||
}
|
||||
}
|
||||
),
|
||||
|
||||
ClientInfo = #{
|
||||
clientid => <<"client id">>,
|
||||
username => <<"user name">>,
|
||||
peerhost => {127, 0, 0, 1},
|
||||
protocol => <<"MQTT">>,
|
||||
zone => default,
|
||||
listener => {tcp, default}
|
||||
},
|
||||
|
||||
|
|
|
@ -18,8 +18,8 @@
|
|||
-compile(nowarn_export_all).
|
||||
-compile(export_all).
|
||||
|
||||
-include("emqx_connector.hrl").
|
||||
-include("emqx_authz.hrl").
|
||||
-include_lib("emqx_connector/include/emqx_connector.hrl").
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
-include_lib("common_test/include/ct.hrl").
|
||||
-include_lib("emqx/include/emqx_placeholder.hrl").
|
||||
|
@ -188,6 +188,46 @@ t_lookups(_Config) ->
|
|||
#{<<"filter">> => #{<<"peerhost">> => <<"${peerhost}">>}}
|
||||
),
|
||||
|
||||
ok = emqx_authz_test_lib:test_samples(
|
||||
ClientInfo,
|
||||
[
|
||||
{allow, subscribe, <<"a">>},
|
||||
{deny, subscribe, <<"b">>}
|
||||
]
|
||||
),
|
||||
|
||||
ByCN = #{
|
||||
<<"CN">> => <<"cn">>,
|
||||
<<"topics">> => [<<"a">>],
|
||||
<<"action">> => <<"all">>,
|
||||
<<"permission">> => <<"allow">>
|
||||
},
|
||||
|
||||
ok = setup_samples([ByCN]),
|
||||
ok = setup_config(
|
||||
#{<<"filter">> => #{<<"CN">> => ?PH_CERT_CN_NAME}}
|
||||
),
|
||||
|
||||
ok = emqx_authz_test_lib:test_samples(
|
||||
ClientInfo,
|
||||
[
|
||||
{allow, subscribe, <<"a">>},
|
||||
{deny, subscribe, <<"b">>}
|
||||
]
|
||||
),
|
||||
|
||||
ByDN = #{
|
||||
<<"DN">> => <<"dn">>,
|
||||
<<"topics">> => [<<"a">>],
|
||||
<<"action">> => <<"all">>,
|
||||
<<"permission">> => <<"allow">>
|
||||
},
|
||||
|
||||
ok = setup_samples([ByDN]),
|
||||
ok = setup_config(
|
||||
#{<<"filter">> => #{<<"DN">> => ?PH_CERT_SUBJECT}}
|
||||
),
|
||||
|
||||
ok = emqx_authz_test_lib:test_samples(
|
||||
ClientInfo,
|
||||
[
|
||||
|
|
|
@ -202,6 +202,34 @@ t_lookups(_Config) ->
|
|||
}
|
||||
),
|
||||
|
||||
ok = emqx_authz_test_lib:test_samples(
|
||||
ClientInfo,
|
||||
[
|
||||
{allow, subscribe, <<"a">>},
|
||||
{deny, subscribe, <<"b">>}
|
||||
]
|
||||
),
|
||||
|
||||
%% strip double quote support
|
||||
|
||||
ok = init_table(),
|
||||
ok = q(
|
||||
<<
|
||||
"INSERT INTO acl(clientid, topic, permission, action)"
|
||||
"VALUES(?, ?, ?, ?)"
|
||||
>>,
|
||||
[<<"clientid">>, <<"a">>, <<"allow">>, <<"subscribe">>]
|
||||
),
|
||||
|
||||
ok = setup_config(
|
||||
#{
|
||||
<<"query">> => <<
|
||||
"SELECT permission, action, topic "
|
||||
"FROM acl WHERE clientid = \"${clientid}\""
|
||||
>>
|
||||
}
|
||||
),
|
||||
|
||||
ok = emqx_authz_test_lib:test_samples(
|
||||
ClientInfo,
|
||||
[
|
||||
|
|
|
@ -202,6 +202,34 @@ t_lookups(_Config) ->
|
|||
}
|
||||
),
|
||||
|
||||
ok = emqx_authz_test_lib:test_samples(
|
||||
ClientInfo,
|
||||
[
|
||||
{allow, subscribe, <<"a">>},
|
||||
{deny, subscribe, <<"b">>}
|
||||
]
|
||||
),
|
||||
|
||||
%% strip double quote support
|
||||
|
||||
ok = init_table(),
|
||||
ok = insert(
|
||||
<<
|
||||
"INSERT INTO acl(clientid, topic, permission, action)"
|
||||
"VALUES($1, $2, $3, $4)"
|
||||
>>,
|
||||
[<<"clientid">>, <<"a">>, <<"allow">>, <<"subscribe">>]
|
||||
),
|
||||
|
||||
ok = setup_config(
|
||||
#{
|
||||
<<"query">> => <<
|
||||
"SELECT permission, action, topic "
|
||||
"FROM acl WHERE clientid = \"${clientid}\""
|
||||
>>
|
||||
}
|
||||
),
|
||||
|
||||
ok = emqx_authz_test_lib:test_samples(
|
||||
ClientInfo,
|
||||
[
|
||||
|
|
|
@ -621,10 +621,9 @@ pick_bridges_by_id(Type, Name, BridgesAllNodes) ->
|
|||
|
||||
format_bridge_info([FirstBridge | _] = Bridges) ->
|
||||
Res = maps:remove(node, FirstBridge),
|
||||
NRes = emqx_connector_ssl:drop_invalid_certs(Res),
|
||||
NodeStatus = collect_status(Bridges),
|
||||
NodeMetrics = collect_metrics(Bridges),
|
||||
NRes#{
|
||||
Res#{
|
||||
status => aggregate_status(NodeStatus),
|
||||
node_status => NodeStatus,
|
||||
metrics => aggregate_metrics(NodeMetrics),
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{application, emqx_conf, [
|
||||
{description, "EMQX configuration management"},
|
||||
{vsn, "0.1.4"},
|
||||
{vsn, "0.1.5"},
|
||||
{registered, []},
|
||||
{mod, {emqx_conf_app, []}},
|
||||
{applications, [kernel, stdlib]},
|
||||
|
|
|
@ -152,11 +152,17 @@ copy_override_conf_from_core_node() ->
|
|||
_ ->
|
||||
[{ok, Info} | _] = lists:sort(fun conf_sort/2, Ready),
|
||||
#{node := Node, conf := RawOverrideConf, tnx_id := TnxId} = Info,
|
||||
Msg = #{
|
||||
?SLOG(debug, #{
|
||||
msg => "copy_overide_conf_from_core_node_success",
|
||||
node => Node
|
||||
},
|
||||
?SLOG(debug, Msg),
|
||||
node => Node,
|
||||
cluster_override_conf_file => application:get_env(
|
||||
emqx, cluster_override_conf_file
|
||||
),
|
||||
local_override_conf_file => application:get_env(
|
||||
emqx, local_override_conf_file
|
||||
),
|
||||
data_dir => emqx:data_dir()
|
||||
}),
|
||||
ok = emqx_config:save_to_override_conf(
|
||||
RawOverrideConf,
|
||||
#{override_to => cluster}
|
||||
|
|
|
@ -535,6 +535,15 @@ fields("node") ->
|
|||
desc => ?DESC(node_applications)
|
||||
}
|
||||
)},
|
||||
{"etc_dir",
|
||||
sc(
|
||||
string(),
|
||||
#{
|
||||
desc => ?DESC(node_etc_dir),
|
||||
'readOnly' => true,
|
||||
deprecated => {since, "5.0.8"}
|
||||
}
|
||||
)},
|
||||
{"cluster_call",
|
||||
sc(
|
||||
?R_REF("cluster_call"),
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
|
||||
-export([
|
||||
convert_certs/2,
|
||||
drop_invalid_certs/1,
|
||||
clear_certs/2
|
||||
]).
|
||||
|
||||
|
@ -61,28 +60,6 @@ clear_certs(RltvDir, #{ssl := OldSSL} = _Config) ->
|
|||
clear_certs(_RltvDir, _) ->
|
||||
ok.
|
||||
|
||||
drop_invalid_certs(#{<<"connector">> := Connector} = Config) when
|
||||
is_map(Connector)
|
||||
->
|
||||
SSL = map_get_oneof([<<"ssl">>, ssl], Connector, undefined),
|
||||
NewSSL = emqx_tls_lib:drop_invalid_certs(SSL),
|
||||
new_ssl_config(Config, NewSSL);
|
||||
drop_invalid_certs(#{connector := Connector} = Config) when
|
||||
is_map(Connector)
|
||||
->
|
||||
SSL = map_get_oneof([<<"ssl">>, ssl], Connector, undefined),
|
||||
NewSSL = emqx_tls_lib:drop_invalid_certs(SSL),
|
||||
new_ssl_config(Config, NewSSL);
|
||||
drop_invalid_certs(#{<<"ssl">> := SSL} = Config) ->
|
||||
NewSSL = emqx_tls_lib:drop_invalid_certs(SSL),
|
||||
new_ssl_config(Config, NewSSL);
|
||||
drop_invalid_certs(#{ssl := SSL} = Config) ->
|
||||
NewSSL = emqx_tls_lib:drop_invalid_certs(SSL),
|
||||
new_ssl_config(Config, NewSSL);
|
||||
%% for bridges use connector name
|
||||
drop_invalid_certs(Config) ->
|
||||
Config.
|
||||
|
||||
new_ssl_config(RltvDir, Config, SSL) ->
|
||||
case emqx_tls_lib:ensure_ssl_files(RltvDir, SSL) of
|
||||
{ok, NewSSL} ->
|
||||
|
|
|
@ -199,7 +199,7 @@ fields("ingress_local") ->
|
|||
mk(
|
||||
binary(),
|
||||
#{
|
||||
default => <<"${payload}">>,
|
||||
default => undefined,
|
||||
desc => ?DESC("payload")
|
||||
}
|
||||
)}
|
||||
|
@ -259,7 +259,7 @@ fields("egress_remote") ->
|
|||
mk(
|
||||
binary(),
|
||||
#{
|
||||
default => <<"${payload}">>,
|
||||
default => undefined,
|
||||
desc => ?DESC("payload")
|
||||
}
|
||||
)}
|
||||
|
|
|
@ -43,6 +43,8 @@
|
|||
{'message.dropped', {emqx_exhook_handler, on_message_dropped, []}}
|
||||
]).
|
||||
|
||||
-define(SERVER_FORCE_SHUTDOWN_TIMEOUT, 5000).
|
||||
|
||||
-endif.
|
||||
|
||||
-define(CMD_MOVE_FRONT, front).
|
||||
|
|
|
@ -483,16 +483,11 @@ err_msg(Msg) -> emqx_misc:readable_error_msg(Msg).
|
|||
get_raw_config() ->
|
||||
RawConfig = emqx:get_raw_config([exhook, servers], []),
|
||||
Schema = #{roots => emqx_exhook_schema:fields(exhook), fields => #{}},
|
||||
Conf = #{<<"servers">> => lists:map(fun drop_invalid_certs/1, RawConfig)},
|
||||
Conf = #{<<"servers">> => RawConfig},
|
||||
Options = #{only_fill_defaults => true},
|
||||
#{<<"servers">> := Servers} = hocon_tconf:check_plain(Schema, Conf, Options),
|
||||
Servers.
|
||||
|
||||
drop_invalid_certs(#{<<"ssl">> := SSL} = Conf) when SSL =/= undefined ->
|
||||
Conf#{<<"ssl">> => emqx_tls_lib:drop_invalid_certs(SSL)};
|
||||
drop_invalid_certs(Conf) ->
|
||||
Conf.
|
||||
|
||||
position_example() ->
|
||||
#{
|
||||
front =>
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
|
||||
-include("emqx_exhook.hrl").
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||
|
||||
%% APIs
|
||||
-export([start_link/0]).
|
||||
|
@ -297,7 +298,8 @@ handle_info(refresh_tick, State) ->
|
|||
handle_info(_Info, State) ->
|
||||
{noreply, State}.
|
||||
|
||||
terminate(_Reason, State = #{servers := Servers}) ->
|
||||
terminate(Reason, State = #{servers := Servers}) ->
|
||||
_ = unload_exhooks(),
|
||||
_ = maps:fold(
|
||||
fun(Name, _, AccIn) ->
|
||||
do_unload_server(Name, AccIn)
|
||||
|
@ -305,7 +307,7 @@ terminate(_Reason, State = #{servers := Servers}) ->
|
|||
State,
|
||||
Servers
|
||||
),
|
||||
_ = unload_exhooks(),
|
||||
?tp(info, exhook_mgr_terminated, #{reason => Reason, servers => Servers}),
|
||||
ok.
|
||||
|
||||
code_change(_OldVsn, State, _Extra) ->
|
||||
|
|
|
@ -179,13 +179,16 @@ filter(Ls) ->
|
|||
|
||||
-spec unload(server()) -> ok.
|
||||
unload(#{name := Name, options := ReqOpts, hookspec := HookSpecs}) ->
|
||||
_ = do_deinit(Name, ReqOpts),
|
||||
_ = may_unload_hooks(HookSpecs),
|
||||
_ = do_deinit(Name, ReqOpts),
|
||||
_ = emqx_exhook_sup:stop_grpc_client_channel(Name),
|
||||
ok.
|
||||
|
||||
do_deinit(Name, ReqOpts) ->
|
||||
_ = do_call(Name, undefined, 'on_provider_unloaded', #{}, ReqOpts),
|
||||
%% Override the request timeout to deinit grpc server to
|
||||
%% avoid emqx_exhook_mgr force killed by upper supervisor
|
||||
NReqOpts = ReqOpts#{timeout => ?SERVER_FORCE_SHUTDOWN_TIMEOUT},
|
||||
_ = do_call(Name, undefined, 'on_provider_unloaded', #{}, NReqOpts),
|
||||
ok.
|
||||
|
||||
do_init(ChannName, ReqOpts) ->
|
||||
|
|
|
@ -16,6 +16,8 @@
|
|||
|
||||
-module(emqx_exhook_sup).
|
||||
|
||||
-include("emqx_exhook.hrl").
|
||||
|
||||
-behaviour(supervisor).
|
||||
|
||||
-export([
|
||||
|
@ -28,11 +30,13 @@
|
|||
stop_grpc_client_channel/1
|
||||
]).
|
||||
|
||||
-define(CHILD(Mod, Type, Args), #{
|
||||
-define(DEFAULT_TIMEOUT, 5000).
|
||||
|
||||
-define(CHILD(Mod, Type, Args, Timeout), #{
|
||||
id => Mod,
|
||||
start => {Mod, start_link, Args},
|
||||
type => Type,
|
||||
shutdown => 15000
|
||||
shutdown => Timeout
|
||||
}).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
@ -45,7 +49,7 @@ start_link() ->
|
|||
init([]) ->
|
||||
_ = emqx_exhook_metrics:init(),
|
||||
_ = emqx_exhook_mgr:init_ref_counter_table(),
|
||||
Mngr = ?CHILD(emqx_exhook_mgr, worker, []),
|
||||
Mngr = ?CHILD(emqx_exhook_mgr, worker, [], force_shutdown_timeout()),
|
||||
{ok, {{one_for_one, 10, 100}, [Mngr]}}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
@ -70,3 +74,9 @@ stop_grpc_client_channel(Name) ->
|
|||
_:_:_ ->
|
||||
ok
|
||||
end.
|
||||
|
||||
%% Calculate the maximum timeout, which will help to shutdown the
|
||||
%% emqx_exhook_mgr process correctly.
|
||||
force_shutdown_timeout() ->
|
||||
Factor = max(3, length(emqx:get_config([exhook, servers])) + 1),
|
||||
Factor * ?SERVER_FORCE_SHUTDOWN_TIMEOUT.
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
-include_lib("eunit/include/eunit.hrl").
|
||||
-include_lib("common_test/include/ct.hrl").
|
||||
-include_lib("emqx/include/emqx_hooks.hrl").
|
||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||
|
||||
-define(DEFAULT_CLUSTER_NAME_ATOM, emqxcl).
|
||||
|
||||
|
@ -313,6 +314,40 @@ t_cluster_name(_) ->
|
|||
),
|
||||
emqx_exhook_mgr:disable(<<"default">>).
|
||||
|
||||
t_stop_timeout(_) ->
|
||||
snabbkaffe:start_trace(),
|
||||
meck:new(emqx_exhook_demo_svr, [passthrough, no_history]),
|
||||
meck:expect(
|
||||
emqx_exhook_demo_svr,
|
||||
on_provider_unloaded,
|
||||
fun(Req, Md) ->
|
||||
%% ensure sleep time greater than emqx_exhook_mgr shutdown timeout
|
||||
timer:sleep(20000),
|
||||
meck:passthrough([Req, Md])
|
||||
end
|
||||
),
|
||||
|
||||
%% stop application
|
||||
application:stop(emqx_exhook),
|
||||
?block_until(#{?snk_kind := exhook_mgr_terminated}, 20000),
|
||||
|
||||
%% all exhook hooked point should be unloaded
|
||||
Mods = lists:flatten(
|
||||
lists:map(
|
||||
fun({hook, _, Cbs}) ->
|
||||
lists:map(fun({callback, {M, _, _}, _, _}) -> M end, Cbs)
|
||||
end,
|
||||
ets:tab2list(emqx_hooks)
|
||||
)
|
||||
),
|
||||
?assertEqual(false, lists:any(fun(M) -> M == emqx_exhook_handler end, Mods)),
|
||||
|
||||
%% ensure started for other tests
|
||||
emqx_common_test_helpers:start_apps([emqx_exhook]),
|
||||
|
||||
snabbkaffe:stop(),
|
||||
meck:unload(emqx_exhook_demo_svr).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Cases Helpers
|
||||
%%--------------------------------------------------------------------
|
||||
|
|
|
@ -80,7 +80,16 @@ stop() ->
|
|||
|
||||
stop(Name) ->
|
||||
grpc:stop_server(Name),
|
||||
to_atom_name(Name) ! stop.
|
||||
case whereis(to_atom_name(Name)) of
|
||||
undefined ->
|
||||
ok;
|
||||
Pid ->
|
||||
Ref = erlang:monitor(process, Pid),
|
||||
Pid ! stop,
|
||||
receive
|
||||
{'DOWN', Ref, process, Pid, _Reason} -> ok
|
||||
end
|
||||
end.
|
||||
|
||||
take() ->
|
||||
to_atom_name(?NAME) ! {take, self()},
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
-include_lib("emqx/include/types.hrl").
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||
|
||||
%% API
|
||||
-export([
|
||||
|
@ -51,6 +52,9 @@
|
|||
%% Internal callback
|
||||
-export([wakeup_from_hib/2, recvloop/2]).
|
||||
|
||||
%% for channel module
|
||||
-export([keepalive_stats/1]).
|
||||
|
||||
-record(state, {
|
||||
%% TCP/SSL/UDP/DTLS Wrapped Socket
|
||||
socket :: {esockd_transport, esockd:socket()} | {udp, _, _},
|
||||
|
@ -240,6 +244,11 @@ esockd_send(Data, #state{
|
|||
esockd_send(Data, #state{socket = {esockd_transport, Sock}}) ->
|
||||
esockd_transport:async_send(Sock, Data).
|
||||
|
||||
keepalive_stats(recv) ->
|
||||
emqx_pd:get_counter(recv_pkt);
|
||||
keepalive_stats(send) ->
|
||||
emqx_pd:get_counter(send_pkt).
|
||||
|
||||
is_datadram_socket({esockd_transport, _}) -> false;
|
||||
is_datadram_socket({udp, _, _}) -> true.
|
||||
|
||||
|
@ -568,9 +577,15 @@ terminate(
|
|||
channel = Channel
|
||||
}
|
||||
) ->
|
||||
?SLOG(debug, #{msg => "conn_process_terminated", reason => Reason}),
|
||||
_ = ChannMod:terminate(Reason, Channel),
|
||||
_ = close_socket(State),
|
||||
ClientId =
|
||||
try ChannMod:info(clientid, Channel) of
|
||||
Id -> Id
|
||||
catch
|
||||
_:_ -> undefined
|
||||
end,
|
||||
?tp(debug, conn_process_terminated, #{reason => Reason, clientid => ClientId}),
|
||||
exit(Reason).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
@ -635,28 +650,22 @@ handle_timeout(
|
|||
Keepalive,
|
||||
State = #state{
|
||||
chann_mod = ChannMod,
|
||||
socket = Socket,
|
||||
channel = Channel
|
||||
}
|
||||
) when
|
||||
Keepalive == keepalive;
|
||||
Keepalive == keepalive_send
|
||||
->
|
||||
Stat =
|
||||
StatVal =
|
||||
case Keepalive of
|
||||
keepalive -> recv_oct;
|
||||
keepalive_send -> send_oct
|
||||
keepalive -> keepalive_stats(recv);
|
||||
keepalive_send -> keepalive_stats(send)
|
||||
end,
|
||||
case ChannMod:info(conn_state, Channel) of
|
||||
disconnected ->
|
||||
{ok, State};
|
||||
_ ->
|
||||
case esockd_getstat(Socket, [Stat]) of
|
||||
{ok, [{Stat, RecvOct}]} ->
|
||||
handle_timeout(TRef, {Keepalive, RecvOct}, State);
|
||||
{error, Reason} ->
|
||||
handle_info({sock_error, Reason}, State)
|
||||
end
|
||||
handle_timeout(TRef, {Keepalive, StatVal}, State)
|
||||
end;
|
||||
handle_timeout(
|
||||
_TRef,
|
||||
|
|
|
@ -78,7 +78,8 @@
|
|||
|
||||
-define(TIMER_TABLE, #{
|
||||
alive_timer => keepalive,
|
||||
force_timer => force_close
|
||||
force_timer => force_close,
|
||||
idle_timer => force_close_idle
|
||||
}).
|
||||
|
||||
-define(INFO_KEYS, [conninfo, conn_state, clientinfo, session, will_msg]).
|
||||
|
@ -151,14 +152,17 @@ init(
|
|||
Ctx = maps:get(ctx, Options),
|
||||
GRpcChann = maps:get(handler, Options),
|
||||
PoolName = maps:get(pool_name, Options),
|
||||
NConnInfo = default_conninfo(ConnInfo),
|
||||
IdleTimeout = emqx_gateway_utils:idle_timeout(Options),
|
||||
|
||||
NConnInfo = default_conninfo(ConnInfo#{idle_timeout => IdleTimeout}),
|
||||
ListenerId =
|
||||
case maps:get(listener, Options, undefined) of
|
||||
undefined -> undefined;
|
||||
{GwName, Type, LisName} -> emqx_gateway_utils:listener_id(GwName, Type, LisName)
|
||||
end,
|
||||
|
||||
EnableAuthn = maps:get(enable_authn, Options, true),
|
||||
DefaultClientInfo = default_clientinfo(ConnInfo),
|
||||
DefaultClientInfo = default_clientinfo(NConnInfo),
|
||||
ClientInfo = DefaultClientInfo#{
|
||||
listener => ListenerId,
|
||||
enable_authn => EnableAuthn
|
||||
|
@ -183,7 +187,9 @@ init(
|
|||
}
|
||||
)
|
||||
},
|
||||
try_dispatch(on_socket_created, wrap(Req), Channel).
|
||||
start_idle_checking_timer(
|
||||
try_dispatch(on_socket_created, wrap(Req), Channel)
|
||||
).
|
||||
|
||||
%% @private
|
||||
peercert(NoSsl, ConnInfo) when
|
||||
|
@ -217,6 +223,12 @@ socktype(dtls) -> 'DTLS'.
|
|||
address({Host, Port}) ->
|
||||
#{host => inet:ntoa(Host), port => Port}.
|
||||
|
||||
%% avoid udp connection process leak
|
||||
start_idle_checking_timer(Channel = #channel{conninfo = #{socktype := udp}}) ->
|
||||
ensure_timer(idle_timer, Channel);
|
||||
start_idle_checking_timer(Channel) ->
|
||||
Channel.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Handle incoming packet
|
||||
%%--------------------------------------------------------------------
|
||||
|
@ -285,10 +297,15 @@ handle_timeout(
|
|||
{ok, reset_timer(alive_timer, NChannel)};
|
||||
{error, timeout} ->
|
||||
Req = #{type => 'KEEPALIVE'},
|
||||
{ok, try_dispatch(on_timer_timeout, wrap(Req), Channel)}
|
||||
NChannel = remove_timer_ref(alive_timer, Channel),
|
||||
%% close connection if keepalive timeout
|
||||
Replies = [{event, disconnected}, {close, keepalive_timeout}],
|
||||
{ok, Replies, try_dispatch(on_timer_timeout, wrap(Req), NChannel)}
|
||||
end;
|
||||
handle_timeout(_TRef, force_close, Channel = #channel{closed_reason = Reason}) ->
|
||||
{shutdown, {error, {force_close, Reason}}, Channel};
|
||||
handle_timeout(_TRef, force_close_idle, Channel) ->
|
||||
{shutdown, idle_timeout, Channel};
|
||||
handle_timeout(_TRef, Msg, Channel) ->
|
||||
?SLOG(warning, #{
|
||||
msg => "unexpected_timeout_signal",
|
||||
|
@ -390,7 +407,7 @@ handle_call(
|
|||
NConnInfo = ConnInfo#{keepalive => Interval},
|
||||
NClientInfo = ClientInfo#{keepalive => Interval},
|
||||
NChannel = Channel#channel{conninfo = NConnInfo, clientinfo = NClientInfo},
|
||||
{reply, ok, ensure_keepalive(NChannel)};
|
||||
{reply, ok, [{event, updated}], ensure_keepalive(cancel_timer(idle_timer, NChannel))};
|
||||
handle_call(
|
||||
{subscribe_from_client, TopicFilter, Qos},
|
||||
_From,
|
||||
|
@ -405,21 +422,21 @@ handle_call(
|
|||
{reply, {error, ?RESP_PERMISSION_DENY, <<"Authorization deny">>}, Channel};
|
||||
_ ->
|
||||
{ok, _, NChannel} = do_subscribe([{TopicFilter, #{qos => Qos}}], Channel),
|
||||
{reply, ok, NChannel}
|
||||
{reply, ok, [{event, updated}], NChannel}
|
||||
end;
|
||||
handle_call({subscribe, Topic, SubOpts}, _From, Channel) ->
|
||||
{ok, [{NTopicFilter, NSubOpts}], NChannel} = do_subscribe([{Topic, SubOpts}], Channel),
|
||||
{reply, {ok, {NTopicFilter, NSubOpts}}, NChannel};
|
||||
{reply, {ok, {NTopicFilter, NSubOpts}}, [{event, updated}], NChannel};
|
||||
handle_call(
|
||||
{unsubscribe_from_client, TopicFilter},
|
||||
_From,
|
||||
Channel = #channel{conn_state = connected}
|
||||
) ->
|
||||
{ok, NChannel} = do_unsubscribe([{TopicFilter, #{}}], Channel),
|
||||
{reply, ok, NChannel};
|
||||
{reply, ok, [{event, updated}], NChannel};
|
||||
handle_call({unsubscribe, Topic}, _From, Channel) ->
|
||||
{ok, NChannel} = do_unsubscribe([Topic], Channel),
|
||||
{reply, ok, NChannel};
|
||||
{reply, ok, [{event, update}], NChannel};
|
||||
handle_call(subscriptions, _From, Channel = #channel{subscriptions = Subs}) ->
|
||||
{reply, {ok, maps:to_list(Subs)}, Channel};
|
||||
handle_call(
|
||||
|
@ -446,7 +463,7 @@ handle_call(
|
|||
{reply, ok, Channel}
|
||||
end;
|
||||
handle_call(kick, _From, Channel) ->
|
||||
{shutdown, kicked, ok, ensure_disconnected(kicked, Channel)};
|
||||
{reply, ok, [{event, disconnected}, {close, kicked}], Channel};
|
||||
handle_call(discard, _From, Channel) ->
|
||||
{shutdown, discarded, ok, Channel};
|
||||
handle_call(Req, _From, Channel) ->
|
||||
|
@ -648,7 +665,8 @@ ensure_keepalive(Channel = #channel{clientinfo = ClientInfo}) ->
|
|||
ensure_keepalive_timer(Interval, Channel) when Interval =< 0 ->
|
||||
Channel;
|
||||
ensure_keepalive_timer(Interval, Channel) ->
|
||||
Keepalive = emqx_keepalive:init(timer:seconds(Interval)),
|
||||
StatVal = emqx_gateway_conn:keepalive_stats(recv),
|
||||
Keepalive = emqx_keepalive:init(StatVal, timer:seconds(Interval)),
|
||||
ensure_timer(alive_timer, Channel#channel{keepalive = Keepalive}).
|
||||
|
||||
ensure_timer(Name, Channel = #channel{timers = Timers}) ->
|
||||
|
@ -666,11 +684,17 @@ ensure_timer(Name, Time, Channel = #channel{timers = Timers}) ->
|
|||
Channel#channel{timers = Timers#{Name => TRef}}.
|
||||
|
||||
reset_timer(Name, Channel) ->
|
||||
ensure_timer(Name, clean_timer(Name, Channel)).
|
||||
ensure_timer(Name, remove_timer_ref(Name, Channel)).
|
||||
|
||||
clean_timer(Name, Channel = #channel{timers = Timers}) ->
|
||||
cancel_timer(Name, Channel = #channel{timers = Timers}) ->
|
||||
emqx_misc:cancel_timer(maps:get(Name, Timers, undefined)),
|
||||
remove_timer_ref(Name, Channel).
|
||||
|
||||
remove_timer_ref(Name, Channel = #channel{timers = Timers}) ->
|
||||
Channel#channel{timers = maps:remove(Name, Timers)}.
|
||||
|
||||
interval(idle_timer, #channel{conninfo = #{idle_timeout := IdleTimeout}}) ->
|
||||
IdleTimeout;
|
||||
interval(force_timer, _) ->
|
||||
15000;
|
||||
interval(alive_timer, #channel{keepalive = Keepalive}) ->
|
||||
|
@ -725,7 +749,7 @@ enrich_clientinfo(InClientInfo = #{proto_name := ProtoName}, ClientInfo) ->
|
|||
default_conninfo(ConnInfo) ->
|
||||
ConnInfo#{
|
||||
clean_start => true,
|
||||
clientid => undefined,
|
||||
clientid => anonymous_clientid(),
|
||||
username => undefined,
|
||||
conn_props => #{},
|
||||
connected => true,
|
||||
|
@ -739,14 +763,15 @@ default_conninfo(ConnInfo) ->
|
|||
|
||||
default_clientinfo(#{
|
||||
peername := {PeerHost, _},
|
||||
sockname := {_, SockPort}
|
||||
sockname := {_, SockPort},
|
||||
clientid := ClientId
|
||||
}) ->
|
||||
#{
|
||||
zone => default,
|
||||
protocol => exproto,
|
||||
peerhost => PeerHost,
|
||||
sockport => SockPort,
|
||||
clientid => undefined,
|
||||
clientid => ClientId,
|
||||
username => undefined,
|
||||
is_bridge => false,
|
||||
is_superuser => false,
|
||||
|
@ -764,3 +789,6 @@ proto_name_to_protocol(<<>>) ->
|
|||
exproto;
|
||||
proto_name_to_protocol(ProtoName) when is_binary(ProtoName) ->
|
||||
binary_to_atom(ProtoName).
|
||||
|
||||
anonymous_clientid() ->
|
||||
iolist_to_binary(["exproto-", emqx_misc:gen_id()]).
|
||||
|
|
|
@ -56,12 +56,19 @@ start_link(Pool, Id) ->
|
|||
[]
|
||||
).
|
||||
|
||||
-spec async_call(atom(), map(), map()) -> ok.
|
||||
async_call(
|
||||
FunName,
|
||||
Req = #{conn := Conn},
|
||||
Options = #{pool_name := PoolName}
|
||||
) ->
|
||||
cast(pick(PoolName, Conn), {rpc, FunName, Req, Options, self()}).
|
||||
case pick(PoolName, Conn) of
|
||||
false ->
|
||||
reply(self(), FunName, {error, no_available_grpc_client});
|
||||
Pid when is_pid(Pid) ->
|
||||
cast(Pid, {rpc, FunName, Req, Options, self()})
|
||||
end,
|
||||
ok.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% cast, pick
|
||||
|
@ -72,6 +79,7 @@ async_call(
|
|||
cast(Deliver, Msg) ->
|
||||
gen_server:cast(Deliver, Msg).
|
||||
|
||||
-spec pick(term(), term()) -> pid() | false.
|
||||
pick(PoolName, Conn) ->
|
||||
gproc_pool:pick_worker(PoolName, Conn).
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
-compile(nowarn_export_all).
|
||||
|
||||
-include_lib("emqx/include/emqx_hooks.hrl").
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
-import(
|
||||
emqx_exproto_echo_svr,
|
||||
|
@ -38,6 +39,7 @@
|
|||
|
||||
-include_lib("emqx/include/emqx.hrl").
|
||||
-include_lib("emqx/include/emqx_mqtt.hrl").
|
||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||
|
||||
-define(TCPOPTS, [binary, {active, false}]).
|
||||
-define(DTLSOPTS, [binary, {active, false}, {protocol, dtls}]).
|
||||
|
@ -62,6 +64,9 @@
|
|||
all() ->
|
||||
[{group, Name} || Name <- metrics()].
|
||||
|
||||
suite() ->
|
||||
[{timetrap, {seconds, 30}}].
|
||||
|
||||
groups() ->
|
||||
Cases = emqx_common_test_helpers:all(?MODULE),
|
||||
[{Name, Cases} || Name <- metrics()].
|
||||
|
@ -87,6 +92,7 @@ set_special_cfg(emqx_gateway) ->
|
|||
[gateway, exproto],
|
||||
#{
|
||||
server => #{bind => 9100},
|
||||
idle_timeout => 5000,
|
||||
handler => #{address => "http://127.0.0.1:9001"},
|
||||
listeners => listener_confs(LisType)
|
||||
}
|
||||
|
@ -223,14 +229,16 @@ t_acl_deny(Cfg) ->
|
|||
close(Sock).
|
||||
|
||||
t_keepalive_timeout(Cfg) ->
|
||||
ok = snabbkaffe:start_trace(),
|
||||
SockType = proplists:get_value(listener_type, Cfg),
|
||||
Sock = open(SockType),
|
||||
|
||||
ClientId1 = <<"keepalive_test_client1">>,
|
||||
Client = #{
|
||||
proto_name => <<"demo">>,
|
||||
proto_ver => <<"v0.1">>,
|
||||
clientid => <<"test_client_1">>,
|
||||
keepalive => 2
|
||||
clientid => ClientId1,
|
||||
keepalive => 5
|
||||
},
|
||||
Password = <<"123456">>,
|
||||
|
||||
|
@ -238,16 +246,42 @@ t_keepalive_timeout(Cfg) ->
|
|||
ConnAckBin = frame_connack(0),
|
||||
|
||||
send(Sock, ConnBin),
|
||||
{ok, ConnAckBin} = recv(Sock, 5000),
|
||||
{ok, ConnAckBin} = recv(Sock),
|
||||
|
||||
DisconnectBin = frame_disconnect(),
|
||||
{ok, DisconnectBin} = recv(Sock, 10000),
|
||||
|
||||
SockType =/= udp andalso
|
||||
begin
|
||||
{error, closed} = recv(Sock, 5000)
|
||||
end,
|
||||
ok.
|
||||
case SockType of
|
||||
udp ->
|
||||
%% another udp client should not affect the first
|
||||
%% udp client keepalive check
|
||||
timer:sleep(4000),
|
||||
Sock2 = open(SockType),
|
||||
ConnBin2 = frame_connect(
|
||||
Client#{clientid => <<"keepalive_test_client2">>},
|
||||
Password
|
||||
),
|
||||
send(Sock2, ConnBin2),
|
||||
%% first client will be keepalive timeouted in 6s
|
||||
?assertMatch(
|
||||
{ok, #{
|
||||
clientid := ClientId1,
|
||||
reason := {shutdown, {sock_closed, keepalive_timeout}}
|
||||
}},
|
||||
?block_until(#{?snk_kind := conn_process_terminated}, 8000)
|
||||
);
|
||||
_ ->
|
||||
?assertMatch(
|
||||
{ok, #{
|
||||
clientid := ClientId1,
|
||||
reason := {shutdown, {sock_closed, keepalive_timeout}}
|
||||
}},
|
||||
?block_until(#{?snk_kind := conn_process_terminated}, 12000)
|
||||
),
|
||||
Trace = snabbkaffe:collect_trace(),
|
||||
%% conn process should be terminated
|
||||
?assertEqual(1, length(?of_kind(conn_process_terminated, Trace))),
|
||||
%% socket port should be closed
|
||||
?assertEqual({error, closed}, recv(Sock, 5000))
|
||||
end,
|
||||
snabbkaffe:stop().
|
||||
|
||||
t_hook_connected_disconnected(Cfg) ->
|
||||
SockType = proplists:get_value(listener_type, Cfg),
|
||||
|
@ -337,6 +371,8 @@ t_hook_session_subscribed_unsubscribed(Cfg) ->
|
|||
error(hook_is_not_running)
|
||||
end,
|
||||
|
||||
send(Sock, frame_disconnect()),
|
||||
|
||||
close(Sock),
|
||||
emqx_hooks:del('session.subscribed', {?MODULE, hook_fun3}),
|
||||
emqx_hooks:del('session.unsubscribed', {?MODULE, hook_fun4}).
|
||||
|
@ -373,6 +409,48 @@ t_hook_message_delivered(Cfg) ->
|
|||
close(Sock),
|
||||
emqx_hooks:del('message.delivered', {?MODULE, hook_fun5}).
|
||||
|
||||
t_idle_timeout(Cfg) ->
|
||||
ok = snabbkaffe:start_trace(),
|
||||
SockType = proplists:get_value(listener_type, Cfg),
|
||||
Sock = open(SockType),
|
||||
|
||||
%% need to create udp client by sending something
|
||||
case SockType of
|
||||
udp ->
|
||||
%% nothing to do
|
||||
ok = meck:new(emqx_exproto_gcli, [passthrough, no_history]),
|
||||
ok = meck:expect(
|
||||
emqx_exproto_gcli,
|
||||
async_call,
|
||||
fun(FunName, _Req, _GClient) ->
|
||||
self() ! {hreply, FunName, ok},
|
||||
ok
|
||||
end
|
||||
),
|
||||
%% send request, but nobody can respond to it
|
||||
ClientId = <<"idle_test_client1">>,
|
||||
Client = #{
|
||||
proto_name => <<"demo">>,
|
||||
proto_ver => <<"v0.1">>,
|
||||
clientid => ClientId,
|
||||
keepalive => 5
|
||||
},
|
||||
Password = <<"123456">>,
|
||||
ConnBin = frame_connect(Client, Password),
|
||||
send(Sock, ConnBin),
|
||||
?assertMatch(
|
||||
{ok, #{reason := {shutdown, idle_timeout}}},
|
||||
?block_until(#{?snk_kind := conn_process_terminated}, 10000)
|
||||
),
|
||||
ok = meck:unload(emqx_exproto_gcli);
|
||||
_ ->
|
||||
?assertMatch(
|
||||
{ok, #{reason := {shutdown, idle_timeout}}},
|
||||
?block_until(#{?snk_kind := conn_process_terminated}, 10000)
|
||||
)
|
||||
end,
|
||||
snabbkaffe:stop().
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Utils
|
||||
|
||||
|
@ -422,6 +500,9 @@ send({ssl, Sock}, Bin) ->
|
|||
send({dtls, Sock}, Bin) ->
|
||||
ssl:send(Sock, Bin).
|
||||
|
||||
recv(Sock) ->
|
||||
recv(Sock, infinity).
|
||||
|
||||
recv({tcp, Sock}, Ts) ->
|
||||
gen_tcp:recv(Sock, 0, Ts);
|
||||
recv({udp, Sock}, Ts) ->
|
||||
|
|
|
@ -141,7 +141,8 @@ schema("/configs_reset/:rootname") ->
|
|||
],
|
||||
responses => #{
|
||||
200 => <<"Rest config successfully">>,
|
||||
400 => emqx_dashboard_swagger:error_codes(['NO_DEFAULT_VALUE', 'REST_FAILED'])
|
||||
400 => emqx_dashboard_swagger:error_codes(['NO_DEFAULT_VALUE', 'REST_FAILED']),
|
||||
403 => emqx_dashboard_swagger:error_codes(['REST_FAILED'])
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -160,7 +161,8 @@ schema("/configs/global_zone") ->
|
|||
'requestBody' => Schema,
|
||||
responses => #{
|
||||
200 => Schema,
|
||||
400 => emqx_dashboard_swagger:error_codes(['UPDATE_FAILED'])
|
||||
400 => emqx_dashboard_swagger:error_codes(['UPDATE_FAILED']),
|
||||
403 => emqx_dashboard_swagger:error_codes(['UPDATE_FAILED'])
|
||||
}
|
||||
}
|
||||
};
|
||||
|
@ -226,7 +228,8 @@ schema(Path) ->
|
|||
'requestBody' => Schema,
|
||||
responses => #{
|
||||
200 => Schema,
|
||||
400 => emqx_dashboard_swagger:error_codes(['UPDATE_FAILED'])
|
||||
400 => emqx_dashboard_swagger:error_codes(['UPDATE_FAILED']),
|
||||
403 => emqx_dashboard_swagger:error_codes(['UPDATE_FAILED'])
|
||||
}
|
||||
}
|
||||
}.
|
||||
|
@ -254,6 +257,8 @@ config(put, #{body := Body}, Req) ->
|
|||
case emqx_conf:update(Path, Body, ?OPTS) of
|
||||
{ok, #{raw_config := RawConf}} ->
|
||||
{200, RawConf};
|
||||
{error, {permission_denied, Reason}} ->
|
||||
{403, #{code => 'UPDATE_FAILED', message => Reason}};
|
||||
{error, Reason} ->
|
||||
{400, #{code => 'UPDATE_FAILED', message => ?ERR_MSG(Reason)}}
|
||||
end.
|
||||
|
@ -297,6 +302,8 @@ config_reset(post, _Params, Req) ->
|
|||
case emqx_conf:reset(Path, ?OPTS) of
|
||||
{ok, _} ->
|
||||
{200};
|
||||
{error, {permission_denied, Reason}} ->
|
||||
{403, #{code => 'REST_FAILED', message => Reason}};
|
||||
{error, no_default_value} ->
|
||||
{400, #{code => 'NO_DEFAULT_VALUE', message => <<"No Default Value.">>}};
|
||||
{error, Reason} ->
|
||||
|
|
|
@ -100,6 +100,68 @@ t_wss_crud_listeners_by_id(_) ->
|
|||
Type = <<"wss">>,
|
||||
crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type).
|
||||
|
||||
t_api_listeners_list_not_ready(_Config) ->
|
||||
net_kernel:start(['listeners@127.0.0.1', longnames]),
|
||||
ct:timetrap({seconds, 120}),
|
||||
snabbkaffe:fix_ct_logging(),
|
||||
Cluster = [{Name, Opts}, {Name1, Opts1}] = cluster([core, core]),
|
||||
ct:pal("Starting ~p", [Cluster]),
|
||||
Node1 = emqx_common_test_helpers:start_slave(Name, Opts),
|
||||
Node2 = emqx_common_test_helpers:start_slave(Name1, Opts1),
|
||||
try
|
||||
L1 = get_tcp_listeners(Node1),
|
||||
|
||||
%% test init_config not ready.
|
||||
_ = rpc:call(Node1, application, set_env, [emqx, init_config_load_done, false]),
|
||||
assert_config_load_not_done(Node1),
|
||||
|
||||
L2 = get_tcp_listeners(Node1),
|
||||
L3 = get_tcp_listeners(Node2),
|
||||
|
||||
Comment = #{
|
||||
node1 => rpc:call(Node1, mria_mnesia, running_nodes, []),
|
||||
node2 => rpc:call(Node2, mria_mnesia, running_nodes, [])
|
||||
},
|
||||
|
||||
?assert(length(L1) > length(L2), Comment),
|
||||
?assertEqual(length(L2), length(L3), Comment)
|
||||
after
|
||||
emqx_common_test_helpers:stop_slave(Node1),
|
||||
emqx_common_test_helpers:stop_slave(Node2)
|
||||
end.
|
||||
|
||||
get_tcp_listeners(Node) ->
|
||||
Query = #{query_string => #{<<"type">> => tcp}},
|
||||
{200, L} = rpc:call(Node, emqx_mgmt_api_listeners, list_listeners, [get, Query]),
|
||||
[#{node_status := NodeStatus}] = L,
|
||||
ct:pal("Node:~p:~p", [Node, L]),
|
||||
NodeStatus.
|
||||
|
||||
assert_config_load_not_done(Node) ->
|
||||
Done = rpc:call(Node, emqx_app, get_init_config_load_done, []),
|
||||
?assertNot(Done, #{node => Node}).
|
||||
|
||||
cluster(Specs) ->
|
||||
Env = [
|
||||
{emqx, init_config_load_done, false},
|
||||
{emqx, boot_modules, []}
|
||||
],
|
||||
emqx_common_test_helpers:emqx_cluster(Specs, [
|
||||
{env, Env},
|
||||
{apps, [emqx_conf]},
|
||||
{load_schema, false},
|
||||
{join_to, true},
|
||||
{env_handler, fun
|
||||
(emqx) ->
|
||||
application:set_env(emqx, boot_modules, []),
|
||||
%% test init_config not ready.
|
||||
application:set_env(emqx, init_config_load_done, false),
|
||||
ok;
|
||||
(_) ->
|
||||
ok
|
||||
end}
|
||||
]).
|
||||
|
||||
crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type) ->
|
||||
OriginPath = emqx_mgmt_api_test_util:api_path(["listeners", ListenerId]),
|
||||
NewPath = emqx_mgmt_api_test_util:api_path(["listeners", NewListenerId]),
|
||||
|
|
|
@ -39,7 +39,10 @@
|
|||
sql_data/1
|
||||
]).
|
||||
|
||||
-define(EX_PLACE_HOLDER, "(\\$\\{[a-zA-Z0-9\\._]+\\}|\"\\$\\{[a-zA-Z0-9\\._]+\\}\")").
|
||||
-define(EX_PLACE_HOLDER, "(\\$\\{[a-zA-Z0-9\\._]+\\})").
|
||||
|
||||
-define(EX_PLACE_HOLDER_DOUBLE_QUOTE, "(\\$\\{[a-zA-Z0-9\\._]+\\}|\"\\$\\{[a-zA-Z0-9\\._]+\\}\")").
|
||||
|
||||
%% Space and CRLF
|
||||
-define(EX_WITHE_CHARS, "\\s").
|
||||
|
||||
|
@ -57,7 +60,8 @@
|
|||
|
||||
-type preproc_sql_opts() :: #{
|
||||
placeholders => list(binary()),
|
||||
replace_with => '?' | '$n'
|
||||
replace_with => '?' | '$n',
|
||||
strip_double_quote => boolean()
|
||||
}.
|
||||
|
||||
-type preproc_deep_opts() :: #{
|
||||
|
@ -89,7 +93,7 @@ preproc_tmpl(Str) ->
|
|||
preproc_tmpl(Str, Opts) ->
|
||||
RE = preproc_var_re(Opts),
|
||||
Tokens = re:split(Str, RE, [{return, binary}, group, trim]),
|
||||
do_preproc_tmpl(Tokens, []).
|
||||
do_preproc_tmpl(Opts, Tokens, []).
|
||||
|
||||
-spec proc_tmpl(tmpl_token(), map()) -> binary().
|
||||
proc_tmpl(Tokens, Data) ->
|
||||
|
@ -140,10 +144,11 @@ preproc_sql(Sql, ReplaceWith) when is_atom(ReplaceWith) ->
|
|||
preproc_sql(Sql, #{replace_with => ReplaceWith});
|
||||
preproc_sql(Sql, Opts) ->
|
||||
RE = preproc_var_re(Opts),
|
||||
Strip = maps:get(strip_double_quote, Opts, false),
|
||||
ReplaceWith = maps:get(replace_with, Opts, '?'),
|
||||
case re:run(Sql, RE, [{capture, all_but_first, binary}, global]) of
|
||||
{match, PlaceHolders} ->
|
||||
PhKs = [parse_nested(unwrap(Phld)) || [Phld | _] <- PlaceHolders],
|
||||
PhKs = [parse_nested(unwrap(Phld, Strip)) || [Phld | _] <- PlaceHolders],
|
||||
{replace_with(Sql, RE, ReplaceWith), [{var, Phld} || Phld <- PhKs]};
|
||||
nomatch ->
|
||||
{Sql, []}
|
||||
|
@ -234,29 +239,36 @@ get_phld_var(Fun, Data) when is_function(Fun) ->
|
|||
get_phld_var(Phld, Data) ->
|
||||
emqx_rule_maps:nested_get(Phld, Data).
|
||||
|
||||
preproc_var_re(#{placeholders := PHs}) ->
|
||||
preproc_var_re(#{placeholders := PHs, strip_double_quote := true}) ->
|
||||
Res = [ph_to_re(PH) || PH <- PHs],
|
||||
QuoteRes = ["\"" ++ Re ++ "\"" || Re <- Res],
|
||||
"(" ++ string:join(Res ++ QuoteRes, "|") ++ ")";
|
||||
preproc_var_re(#{placeholders := PHs}) ->
|
||||
"(" ++ string:join([ph_to_re(PH) || PH <- PHs], "|") ++ ")";
|
||||
preproc_var_re(#{strip_double_quote := true}) ->
|
||||
?EX_PLACE_HOLDER_DOUBLE_QUOTE;
|
||||
preproc_var_re(#{}) ->
|
||||
?EX_PLACE_HOLDER.
|
||||
|
||||
ph_to_re(VarPH) ->
|
||||
re:replace(VarPH, "[\\$\\{\\}]", "\\\\&", [global, {return, list}]).
|
||||
|
||||
do_preproc_tmpl([], Acc) ->
|
||||
do_preproc_tmpl(_Opts, [], Acc) ->
|
||||
lists:reverse(Acc);
|
||||
do_preproc_tmpl([[Str, Phld] | Tokens], Acc) ->
|
||||
do_preproc_tmpl(Opts, [[Str, Phld] | Tokens], Acc) ->
|
||||
Strip = maps:get(strip_double_quote, Opts, false),
|
||||
do_preproc_tmpl(
|
||||
Opts,
|
||||
Tokens,
|
||||
put_head(
|
||||
var,
|
||||
parse_nested(unwrap(Phld)),
|
||||
parse_nested(unwrap(Phld, Strip)),
|
||||
put_head(str, Str, Acc)
|
||||
)
|
||||
);
|
||||
do_preproc_tmpl([[Str] | Tokens], Acc) ->
|
||||
do_preproc_tmpl(Opts, [[Str] | Tokens], Acc) ->
|
||||
do_preproc_tmpl(
|
||||
Opts,
|
||||
Tokens,
|
||||
put_head(str, Str, Acc)
|
||||
).
|
||||
|
@ -293,10 +305,10 @@ parse_nested(Attr) ->
|
|||
Nested -> {path, [{key, P} || P <- Nested]}
|
||||
end.
|
||||
|
||||
unwrap(<<"${", Val/binary>>) ->
|
||||
binary:part(Val, {0, byte_size(Val) - 1});
|
||||
unwrap(<<"\"${", Val/binary>>) ->
|
||||
binary:part(Val, {0, byte_size(Val) - 2}).
|
||||
unwrap(<<"\"${", Val/binary>>, _StripDoubleQuote = true) ->
|
||||
binary:part(Val, {0, byte_size(Val) - 2});
|
||||
unwrap(<<"${", Val/binary>>, _StripDoubleQuote) ->
|
||||
binary:part(Val, {0, byte_size(Val) - 1}).
|
||||
|
||||
quote_sql(Str) ->
|
||||
quote(Str, <<"\\\\'">>).
|
||||
|
|
|
@ -150,20 +150,25 @@ t_preproc_sql6(_) ->
|
|||
emqx_placeholder:proc_sql(ParamsTokens, Selected)
|
||||
).
|
||||
|
||||
t_preproc_sql7(_) ->
|
||||
t_preproc_sql_strip_double_quote(_) ->
|
||||
Selected = #{a => <<"a">>, b => <<"b">>},
|
||||
Opts = #{replace_with => '$n', placeholders => [<<"${a}">>]},
|
||||
|
||||
%% no strip_double_quote option: "${key}" -> "value"
|
||||
{PrepareStatement, ParamsTokens} = emqx_placeholder:preproc_sql(
|
||||
<<"a:\"${a}\",b:\"${b}\"">>,
|
||||
#{
|
||||
replace_with => '$n',
|
||||
placeholders => [<<"${a}">>]
|
||||
}
|
||||
Opts
|
||||
),
|
||||
?assertEqual(<<"a:$1,b:\"${b}\"">>, PrepareStatement),
|
||||
?assertEqual(
|
||||
[<<"a">>],
|
||||
emqx_placeholder:proc_sql(ParamsTokens, Selected)
|
||||
).
|
||||
?assertEqual(<<"a:\"$1\",b:\"${b}\"">>, PrepareStatement),
|
||||
?assertEqual([<<"a">>], emqx_placeholder:proc_sql(ParamsTokens, Selected)),
|
||||
|
||||
%% strip_double_quote = true: "${key}" -> value
|
||||
{PrepareStatement1, ParamsTokens1} = emqx_placeholder:preproc_sql(
|
||||
<<"a:\"${a}\",b:\"${b}\"">>,
|
||||
Opts#{strip_double_quote => true}
|
||||
),
|
||||
?assertEqual(<<"a:$1,b:\"${b}\"">>, PrepareStatement1),
|
||||
?assertEqual([<<"a">>], emqx_placeholder:proc_sql(ParamsTokens1, Selected)).
|
||||
|
||||
t_preproc_tmpl_deep(_) ->
|
||||
Selected = #{a => <<"1">>, b => 1, c => 1.0, d => #{d1 => <<"hi">>}},
|
||||
|
|
|
@ -261,6 +261,17 @@ of the rule, then the string "undefined" is used.
|
|||
}
|
||||
}
|
||||
|
||||
rule_engine_jq_implementation_module {
|
||||
desc {
|
||||
en: "The implementation module for the jq rule engine function. The two options are jq_nif and jq_port. With the jq_nif option an Erlang NIF library is used while with the jq_port option an implementation based on Erlang port programs is used. The jq_nif option (the default option) is the fastest implementation of the two but jq_port is safer as the jq programs will not execute in the same process as the Erlang VM."
|
||||
zh: "jq 规则引擎功能的实现模块。可用的两个选项是 jq_nif 和 jq_port。jq_nif 使用 Erlang NIF 库访问 jq 库,而 jq_port 使用基于 Erlang Port 的实现。jq_nif 方式(默认选项)是这两个选项中最快的实现,但 jq_port 方式更安全,因为这种情况下 jq 程序不会在 Erlang VM 进程中执行。"
|
||||
}
|
||||
label: {
|
||||
en: "JQ Implementation Module"
|
||||
zh: "JQ 实现模块"
|
||||
}
|
||||
}
|
||||
|
||||
desc_rule_engine {
|
||||
desc {
|
||||
en: """Configuration for the EMQX Rule Engine."""
|
||||
|
|
|
@ -364,6 +364,10 @@ init([]) ->
|
|||
{write_concurrency, true},
|
||||
{read_concurrency, true}
|
||||
]),
|
||||
ok = emqx_config_handler:add_handler(
|
||||
[rule_engine, jq_implementation_module],
|
||||
emqx_rule_engine_schema
|
||||
),
|
||||
{ok, #{}}.
|
||||
|
||||
handle_call({insert_rule, Rule}, _From, State) ->
|
||||
|
|
|
@ -25,7 +25,8 @@
|
|||
namespace/0,
|
||||
roots/0,
|
||||
fields/1,
|
||||
desc/1
|
||||
desc/1,
|
||||
post_config_update/5
|
||||
]).
|
||||
|
||||
-export([validate_sql/1]).
|
||||
|
@ -49,6 +50,15 @@ fields("rule_engine") ->
|
|||
default => "10s",
|
||||
desc => ?DESC("rule_engine_jq_function_default_timeout")
|
||||
}
|
||||
)},
|
||||
{jq_implementation_module,
|
||||
?HOCON(
|
||||
hoconsc:enum([jq_nif, jq_port]),
|
||||
#{
|
||||
default => jq_nif,
|
||||
mapping => "jq.jq_implementation_module",
|
||||
desc => ?DESC("rule_engine_jq_implementation_module")
|
||||
}
|
||||
)}
|
||||
];
|
||||
fields("rules") ->
|
||||
|
@ -209,3 +219,13 @@ validate_sql(Sql) ->
|
|||
{ok, _Result} -> ok;
|
||||
{error, Reason} -> {error, Reason}
|
||||
end.
|
||||
|
||||
post_config_update(
|
||||
[rule_engine, jq_implementation_module],
|
||||
_Req,
|
||||
NewSysConf,
|
||||
_OldSysConf,
|
||||
_AppEnvs
|
||||
) ->
|
||||
jq:set_implementation_module(NewSysConf),
|
||||
ok.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
{application, emqx_slow_subs, [
|
||||
{description, "EMQX Slow Subscribers Statistics"},
|
||||
% strict semver, bump manually!
|
||||
{vsn, "1.0.1"},
|
||||
{vsn, "1.0.2"},
|
||||
{modules, []},
|
||||
{registered, [emqx_slow_subs_sup]},
|
||||
{applications, [kernel, stdlib, emqx]},
|
||||
|
|
|
@ -166,11 +166,11 @@ init([]) ->
|
|||
expire_timer => undefined
|
||||
},
|
||||
|
||||
Enable = emqx:get_config([slow_subs, enable]),
|
||||
{ok, check_enable(Enable, InitState)}.
|
||||
Cfg = emqx:get_config([slow_subs]),
|
||||
{ok, check_enable(Cfg, InitState)}.
|
||||
|
||||
handle_call({update_settings, #{enable := Enable}}, _From, State) ->
|
||||
State2 = check_enable(Enable, State),
|
||||
handle_call({update_settings, Cfg}, _From, State) ->
|
||||
State2 = check_enable(Cfg, State),
|
||||
{reply, ok, State2};
|
||||
handle_call(clear_history, _, State) ->
|
||||
do_clear_history(),
|
||||
|
@ -206,12 +206,14 @@ code_change(_OldVsn, State, _Extra) ->
|
|||
expire_tick() ->
|
||||
erlang:send_after(?EXPIRE_CHECK_INTERVAL, self(), ?FUNCTION_NAME).
|
||||
|
||||
load(State) ->
|
||||
load(
|
||||
#{
|
||||
top_k_num := MaxSizeT,
|
||||
stats_type := StatsType,
|
||||
threshold := Threshold
|
||||
} = emqx:get_config([slow_subs]),
|
||||
},
|
||||
State
|
||||
) ->
|
||||
MaxSize = erlang:min(MaxSizeT, ?MAX_SIZE),
|
||||
ok = emqx_hooks:put(
|
||||
'delivery.completed',
|
||||
|
@ -334,15 +336,15 @@ do_clear_history() ->
|
|||
ets:delete_all_objects(?INDEX_TAB),
|
||||
ets:delete_all_objects(?TOPK_TAB).
|
||||
|
||||
check_enable(Enable, #{enable := IsEnable} = State) ->
|
||||
check_enable(#{enable := Enable} = Cfg, #{enable := IsEnable} = State) ->
|
||||
case {IsEnable, Enable} of
|
||||
{false, true} ->
|
||||
load(State);
|
||||
load(Cfg, State);
|
||||
{true, false} ->
|
||||
unload(State);
|
||||
{true, true} ->
|
||||
S1 = unload(State),
|
||||
load(S1);
|
||||
load(Cfg, S1);
|
||||
_ ->
|
||||
State
|
||||
end.
|
||||
|
|
|
@ -26,13 +26,15 @@
|
|||
|
||||
-define(NOW, erlang:system_time(millisecond)).
|
||||
-define(CLUSTER_RPC_SHARD, emqx_cluster_rpc_shard).
|
||||
-define(LANTENCY, 101).
|
||||
|
||||
-define(BASE_CONF, <<
|
||||
""
|
||||
"\n"
|
||||
"slow_subs {\n"
|
||||
" enable = true\n"
|
||||
" top_k_num = 5,\n"
|
||||
" top_k_num = 5\n"
|
||||
" threshold = 100ms\n"
|
||||
" expire_interval = 5m\n"
|
||||
" stats_type = whole\n"
|
||||
" }"
|
||||
|
@ -64,10 +66,10 @@ end_per_suite(_Config) ->
|
|||
|
||||
init_per_testcase(t_expire, Config) ->
|
||||
{ok, _} = emqx_cluster_rpc:start_link(),
|
||||
Cfg = emqx_config:get([slow_subs]),
|
||||
emqx_slow_subs:update_settings(Cfg#{expire_interval := 1500}),
|
||||
update_config(<<"expire_interval">>, <<"1500ms">>),
|
||||
Config;
|
||||
init_per_testcase(_, Config) ->
|
||||
{ok, _} = emqx_cluster_rpc:start_link(),
|
||||
Config.
|
||||
|
||||
end_per_testcase(_, _) ->
|
||||
|
@ -84,38 +86,7 @@ end_per_testcase(_, _) ->
|
|||
%% Test Cases
|
||||
%%--------------------------------------------------------------------
|
||||
t_pub(_) ->
|
||||
%% Sub topic first
|
||||
Subs = [{<<"/test1/+">>, ?QOS_1}, {<<"/test2/+">>, ?QOS_2}],
|
||||
Clients = start_client(Subs),
|
||||
timer:sleep(1000),
|
||||
Now = ?NOW,
|
||||
%% publish
|
||||
|
||||
lists:foreach(
|
||||
fun(I) ->
|
||||
Topic = list_to_binary(io_lib:format("/test1/~p", [I])),
|
||||
Msg = emqx_message:make(undefined, ?QOS_1, Topic, <<"Hello">>),
|
||||
emqx:publish(Msg#message{timestamp = Now - 500}),
|
||||
timer:sleep(100)
|
||||
end,
|
||||
lists:seq(1, 10)
|
||||
),
|
||||
|
||||
lists:foreach(
|
||||
fun(I) ->
|
||||
Topic = list_to_binary(io_lib:format("/test2/~p", [I])),
|
||||
Msg = emqx_message:make(undefined, ?QOS_2, Topic, <<"Hello">>),
|
||||
emqx:publish(Msg#message{timestamp = Now - 500}),
|
||||
timer:sleep(100)
|
||||
end,
|
||||
lists:seq(1, 10)
|
||||
),
|
||||
|
||||
timer:sleep(1000),
|
||||
Size = ets:info(?TOPK_TAB, size),
|
||||
?assert(Size =< 10 andalso Size >= 3, io_lib:format("the size is :~p~n", [Size])),
|
||||
|
||||
[Client ! stop || Client <- Clients],
|
||||
_ = [stats_with_type(Type) || Type <- [whole, internal, response]],
|
||||
ok.
|
||||
|
||||
t_expire(_) ->
|
||||
|
@ -135,16 +106,12 @@ t_expire(_) ->
|
|||
?assertEqual(0, Size),
|
||||
ok.
|
||||
|
||||
start_client(Subs) ->
|
||||
[spawn(fun() -> client(I, Subs) end) || I <- lists:seq(1, 10)].
|
||||
start_client(Type, Subs) ->
|
||||
[spawn(fun() -> client(I, Type, Subs) end) || I <- lists:seq(1, 10)].
|
||||
|
||||
client(I, Subs) ->
|
||||
{ok, C} = emqtt:start_link([
|
||||
{host, "localhost"},
|
||||
{clientid, io_lib:format("slow_subs_~p", [I])},
|
||||
{username, <<"plain">>},
|
||||
{password, <<"plain">>}
|
||||
]),
|
||||
client(I, Type, Subs) ->
|
||||
ConnOptions = make_conn_options(Type, I),
|
||||
{ok, C} = emqtt:start_link(ConnOptions),
|
||||
{ok, _} = emqtt:connect(C),
|
||||
|
||||
Len = erlang:length(Subs),
|
||||
|
@ -155,3 +122,81 @@ client(I, Subs) ->
|
|||
stop ->
|
||||
ok
|
||||
end.
|
||||
|
||||
stats_with_type(Type) ->
|
||||
emqx_slow_subs:clear_history(),
|
||||
update_stats_type(Type),
|
||||
%% Sub topic first
|
||||
Subs = [{<<"/test1/+">>, ?QOS_1}, {<<"/test2/+">>, ?QOS_2}],
|
||||
Clients = start_client(Type, Subs),
|
||||
timer:sleep(1000),
|
||||
Now = ?NOW,
|
||||
%% publish
|
||||
|
||||
lists:foreach(
|
||||
fun(I) ->
|
||||
Topic = list_to_binary(io_lib:format("/test1/~p", [I])),
|
||||
Msg = emqx_message:make(undefined, ?QOS_1, Topic, <<"Hello">>),
|
||||
emqx:publish(Msg#message{timestamp = Now - ?LANTENCY}),
|
||||
timer:sleep(100)
|
||||
end,
|
||||
lists:seq(1, 10)
|
||||
),
|
||||
|
||||
lists:foreach(
|
||||
fun(I) ->
|
||||
Topic = list_to_binary(io_lib:format("/test2/~p", [I])),
|
||||
Msg = emqx_message:make(undefined, ?QOS_2, Topic, <<"Hello">>),
|
||||
emqx:publish(Msg#message{timestamp = Now - ?LANTENCY}),
|
||||
timer:sleep(100)
|
||||
end,
|
||||
lists:seq(1, 10)
|
||||
),
|
||||
|
||||
timer:sleep(1000),
|
||||
Size = ets:info(?TOPK_TAB, size),
|
||||
?assert(
|
||||
Size =< 10 andalso Size >= 3,
|
||||
lists:flatten(io_lib:format("with_type:~p, the size is :~p~n", [Type, Size]))
|
||||
),
|
||||
|
||||
?assert(
|
||||
lists:all(
|
||||
fun(#{timespan := Ts}) ->
|
||||
Ts >= 101 andalso Ts < ?NOW - Now
|
||||
end,
|
||||
emqx_slow_subs_api:get_history()
|
||||
)
|
||||
),
|
||||
|
||||
[Client ! stop || Client <- Clients],
|
||||
ok.
|
||||
|
||||
update_stats_type(Type) ->
|
||||
update_config(<<"stats_type">>, erlang:atom_to_binary(Type)).
|
||||
|
||||
update_config(Key, Value) ->
|
||||
Raw = #{
|
||||
<<"enable">> => true,
|
||||
<<"expire_interval">> => <<"5m">>,
|
||||
<<"stats_type">> => <<"whole">>,
|
||||
<<"threshold">> => <<"100ms">>,
|
||||
<<"top_k_num">> => 5
|
||||
},
|
||||
emqx_slow_subs:update_settings(Raw#{Key => Value}).
|
||||
|
||||
make_conn_options(response, I) ->
|
||||
[
|
||||
{msg_handler, #{
|
||||
publish => fun(_) -> timer:sleep(?LANTENCY) end,
|
||||
disconnected => fun(_) -> ok end
|
||||
}}
|
||||
| make_conn_options(whole, I)
|
||||
];
|
||||
make_conn_options(_, I) ->
|
||||
[
|
||||
{host, "localhost"},
|
||||
{clientid, io_lib:format("slow_subs_~p", [I])},
|
||||
{username, <<"plain">>},
|
||||
{password, <<"plain">>}
|
||||
].
|
||||
|
|
32
bin/emqx
32
bin/emqx
|
@ -7,7 +7,7 @@ set -euo pipefail
|
|||
DEBUG="${DEBUG:-0}"
|
||||
[ "$DEBUG" -eq 1 ] && set -x
|
||||
|
||||
RUNNER_ROOT_DIR="$(cd "$(dirname "$(realpath "$0" || echo "$0")")"/..; pwd -P)"
|
||||
RUNNER_ROOT_DIR="$(cd "$(dirname "$(readlink "$0" || echo "$0")")"/..; pwd -P)"
|
||||
|
||||
# shellcheck disable=SC1090,SC1091
|
||||
. "$RUNNER_ROOT_DIR"/releases/emqx_vars
|
||||
|
@ -24,9 +24,6 @@ REL_DIR="$RUNNER_ROOT_DIR/releases/$REL_VSN"
|
|||
|
||||
WHOAMI=$(whoami)
|
||||
|
||||
# Make sure log directory exists
|
||||
mkdir -p "$RUNNER_LOG_DIR"
|
||||
|
||||
# hocon try to read environment variables starting with "EMQX_"
|
||||
export HOCON_ENV_OVERRIDE_PREFIX='EMQX_'
|
||||
|
||||
|
@ -262,6 +259,9 @@ if [ "$ES" -ne 0 ]; then
|
|||
exit $ES
|
||||
fi
|
||||
|
||||
# Make sure log directory exists
|
||||
mkdir -p "$RUNNER_LOG_DIR"
|
||||
|
||||
COMPATIBILITY_CHECK='
|
||||
io:format("BEAM_OK~n", []),
|
||||
try
|
||||
|
@ -417,7 +417,7 @@ call_hocon() {
|
|||
## and parsing HOCON config + environment variables is a non-trivial task
|
||||
CONF_KEYS=( 'node.data_dir' 'node.name' 'node.cookie' 'node.db_backend' 'cluster.proto_dist' )
|
||||
if [ "$IS_ENTERPRISE" = 'yes' ]; then
|
||||
CONF_KEYS+=( 'license.type' 'license.file' 'license.key' )
|
||||
CONF_KEYS+=( 'license.key' )
|
||||
fi
|
||||
|
||||
if [ "$IS_BOOT_COMMAND" = 'yes' ]; then
|
||||
|
@ -499,22 +499,16 @@ check_license() {
|
|||
return 0
|
||||
fi
|
||||
|
||||
file_license="${EMQX_LICENSE__FILE:-$(get_boot_config 'license.file')}"
|
||||
key_license="${EMQX_LICENSE__KEY:-$(get_boot_config 'license.key')}"
|
||||
|
||||
if [[ -n "$file_license" && ("$file_license" != "undefined") ]]; then
|
||||
call_nodetool check_license_file "$file_license"
|
||||
if [[ -n "$key_license" && ("$key_license" != "undefined") ]]; then
|
||||
call_nodetool check_license_key "$key_license"
|
||||
else
|
||||
key_license="${EMQX_LICENSE__KEY:-$(get_boot_config 'license.key')}"
|
||||
|
||||
if [[ -n "$key_license" && ("$key_license" != "undefined") ]]; then
|
||||
call_nodetool check_license_key "$key_license"
|
||||
else
|
||||
set +x
|
||||
echoerr "License not found."
|
||||
echoerr "Please specify one via EMQX_LICENSE__KEY or EMQX_LICENSE__FILE variables"
|
||||
echoerr "or via license.key|file in emqx_enterprise.conf."
|
||||
return 1
|
||||
fi
|
||||
set +x
|
||||
echoerr "License not found."
|
||||
echoerr "Please specify one via the EMQX_LICENSE__KEY variable"
|
||||
echoerr "or via license.key in emqx-enterprise.conf."
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
|
|
@ -25,9 +25,7 @@ main(Args) ->
|
|||
%% forward the call to hocon_cli
|
||||
hocon_cli:main(Rest);
|
||||
["check_license_key", Key] ->
|
||||
check_license(#{type => key, key => list_to_binary(Key)});
|
||||
["check_license_file", File] ->
|
||||
check_license(#{type => file, file => list_to_binary(File)});
|
||||
check_license(#{key => list_to_binary(Key)});
|
||||
_ ->
|
||||
do(Args)
|
||||
end.
|
||||
|
|
58
build
58
build
|
@ -231,7 +231,16 @@ make_tgz() {
|
|||
local relpath="_build/${PROFILE}/rel/emqx"
|
||||
full_vsn="$(./pkg-vsn.sh "$PROFILE" --long)"
|
||||
fi
|
||||
target_name="${PROFILE}-${full_vsn}.tar.gz"
|
||||
|
||||
case "$SYSTEM" in
|
||||
macos*)
|
||||
target_name="${PROFILE}-${full_vsn}.zip"
|
||||
;;
|
||||
*)
|
||||
target_name="${PROFILE}-${full_vsn}.tar.gz"
|
||||
;;
|
||||
esac
|
||||
|
||||
target="${pkgpath}/${target_name}"
|
||||
|
||||
src_tarball="${relpath}/emqx-${PKG_VSN}.tar.gz"
|
||||
|
@ -248,23 +257,54 @@ make_tgz() {
|
|||
## try to be portable for tar.gz packages.
|
||||
## for DEB and RPM packages the dependencies are resoved by yum and apt
|
||||
cp_dyn_libs "${tard}/emqx"
|
||||
## create tar after change dir
|
||||
## to avoid creating an extra level of 'emqx' dir in the .tar.gz file
|
||||
pushd "${tard}/emqx" >/dev/null
|
||||
$TAR -zcf "../${target_name}" -- *
|
||||
popd >/dev/null
|
||||
mv "${tard}/${target_name}" "${target}"
|
||||
case "$SYSTEM" in
|
||||
macos*)
|
||||
# if the flag to sign macos binaries is set, but developer certificate
|
||||
# or certificate password is not configured, reset the flag
|
||||
# could happen, for example, when people submit PR from a fork, in this
|
||||
# case they cannot access secrets
|
||||
if [[ "${APPLE_SIGN_BINARIES:-0}" == 1 && \
|
||||
( "${APPLE_DEVELOPER_ID_BUNDLE:-0}" == 0 || \
|
||||
"${APPLE_DEVELOPER_ID_BUNDLE_PASSWORD:-0}" == 0 ) ]]; then
|
||||
echo "Apple developer certificate is not configured, skip signing"
|
||||
APPLE_SIGN_BINARIES=0
|
||||
fi
|
||||
if [ "${APPLE_SIGN_BINARIES:-0}" = 1 ]; then
|
||||
./scripts/macos-sign-binaries.sh "${tard}/emqx"
|
||||
fi
|
||||
## create zip after change dir
|
||||
## to avoid creating an extra level of 'emqx' dir in the .zip file
|
||||
pushd "${tard}/emqx" >/dev/null
|
||||
zip -r "../${target_name}" -- * >/dev/null
|
||||
popd >/dev/null
|
||||
mv "${tard}/${target_name}" "${target}"
|
||||
if [ "${APPLE_SIGN_BINARIES:-0}" = 1 ]; then
|
||||
# notarize the package
|
||||
# if fails, check what went wrong with this command:
|
||||
# xcrun notarytool log --apple-id <apple id> \
|
||||
# --apple-id <apple id> \
|
||||
# --password <apple id password>
|
||||
# --team-id <apple team id> <submission-id>
|
||||
xcrun notarytool submit \
|
||||
--apple-id "${APPLE_ID}" \
|
||||
--password "${APPLE_ID_PASSWORD}" \
|
||||
--team-id "${APPLE_TEAM_ID}" "${target}" --wait
|
||||
fi
|
||||
# sha256sum may not be available on macos
|
||||
openssl dgst -sha256 "${target}" | cut -d ' ' -f 2 > "${target}.sha256"
|
||||
;;
|
||||
*)
|
||||
## create tar after change dir
|
||||
## to avoid creating an extra level of 'emqx' dir in the .tar.gz file
|
||||
pushd "${tard}/emqx" >/dev/null
|
||||
$TAR -zcf "../${target_name}" -- *
|
||||
popd >/dev/null
|
||||
mv "${tard}/${target_name}" "${target}"
|
||||
sha256sum "${target}" | head -c 64 > "${target}.sha256"
|
||||
;;
|
||||
esac
|
||||
log "Tarball successfully repacked: ${target}"
|
||||
log "Tarball sha256sum: $(cat "${target}.sha256")"
|
||||
log "Archive successfully repacked: ${target}"
|
||||
log "Archive sha256sum: $(cat "${target}.sha256")"
|
||||
}
|
||||
|
||||
## This function builds the default docker image based on debian 11
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
apiVersion: v2
|
||||
name: emqx-enterprise
|
||||
icon: https://github.com/emqx.png
|
||||
description: A Helm chart for EMQX
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
version: 5.0.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application.
|
||||
appVersion: 5.0.0
|
|
@ -0,0 +1,121 @@
|
|||
# Introduction
|
||||
|
||||
This chart bootstraps an emqx deployment on a Kubernetes cluster using the Helm package manager.
|
||||
|
||||
# Prerequisites
|
||||
|
||||
+ Kubernetes 1.6+
|
||||
+ Helm
|
||||
|
||||
# Installing the Chart
|
||||
|
||||
To install the chart with the release name `my-emqx`:
|
||||
|
||||
+ From github
|
||||
```
|
||||
$ git clone https://github.com/emqx/emqx.git
|
||||
$ cd emqx/deploy/charts/emqx
|
||||
$ helm install my-emqx .
|
||||
```
|
||||
|
||||
+ From chart repos
|
||||
```
|
||||
helm repo add emqx https://repos.emqx.io/charts
|
||||
helm install my-emqx emqx/emqx
|
||||
```
|
||||
> If you want to install an unstable version, you need to add `--devel` when you execute the `helm install` command.
|
||||
|
||||
# Uninstalling the Chart
|
||||
|
||||
To uninstall/delete the `my-emqx` deployment:
|
||||
|
||||
```
|
||||
$ helm del my-emqx
|
||||
```
|
||||
|
||||
# Configuration
|
||||
|
||||
The following table lists the configurable parameters of the emqx chart and their default values.
|
||||
|
||||
| Parameter | Description | Default Value |
|
||||
|--------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------|
|
||||
| `replicaCount` | It is recommended to have odd number of nodes in a cluster, otherwise the emqx cluster cannot be automatically healed in case of net-split. | 3 |
|
||||
| `image.repository` | EMQX Image name | emqx/emqx |
|
||||
| `image.pullPolicy` | The image pull policy | IfNotPresent |
|
||||
| `image.pullSecrets ` | The image pull secrets | `[]` (does not add image pull secrets to deployed pods) |
|
||||
| `envFromSecret` | The name pull a secret in the same kubernetes namespace which contains values that will be added to the environment | nil |
|
||||
| `recreatePods` | Forces the recreation of pods during upgrades, which can be useful to always apply the most recent configuration. | false |
|
||||
| `podAnnotations ` | Annotations for pod | `{}` |
|
||||
| `podManagementPolicy` | To redeploy a chart with existing PVC(s), the value must be set to Parallel to avoid deadlock | `Parallel` |
|
||||
| `persistence.enabled` | Enable EMQX persistence using PVC | false |
|
||||
| `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) |
|
||||
| `persistence.existingClaim` | EMQX data Persistent Volume existing claim name, evaluated as a template | "" |
|
||||
| `persistence.accessMode` | PVC Access Mode for EMQX volume | ReadWriteOnce |
|
||||
| `persistence.size` | PVC Storage Request for EMQX volume | 20Mi |
|
||||
| `initContainers` | Containers that run before the creation of EMQX containers. They can contain utilities or setup scripts. | `{}` |
|
||||
| `resources` | CPU/Memory resource requests/limits | {} |
|
||||
| `nodeSelector` | Node labels for pod assignment | `{}` |
|
||||
| `tolerations` | Toleration labels for pod assignment | `[]` |
|
||||
| `affinity` | Map of node/pod affinities | `{}` |
|
||||
| `service.type` | Kubernetes Service type. | ClusterIP |
|
||||
| `service.mqtt` | Port for MQTT. | 1883 |
|
||||
| `service.mqttssl` | Port for MQTT(SSL). | 8883 |
|
||||
| `service.mgmt` | Port for mgmt API. | 8081 |
|
||||
| `service.ws` | Port for WebSocket/HTTP. | 8083 |
|
||||
| `service.wss` | Port for WSS/HTTPS. | 8084 |
|
||||
| `service.dashboard` | Port for dashboard. | 18083 |
|
||||
| `service.nodePorts.mqtt` | Kubernetes node port for MQTT. | nil |
|
||||
| `service.nodePorts.mqttssl` | Kubernetes node port for MQTT(SSL). | nil |
|
||||
| `service.nodePorts.mgmt` | Kubernetes node port for mgmt API. | nil |
|
||||
| `service.nodePorts.ws` | Kubernetes node port for WebSocket/HTTP. | nil |
|
||||
| `service.nodePorts.wss` | Kubernetes node port for WSS/HTTPS. | nil |
|
||||
| `service.nodePorts.dashboard` | Kubernetes node port for dashboard. | nil |
|
||||
| `service.loadBalancerIP` | loadBalancerIP for Service | nil |
|
||||
| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | [] |
|
||||
| `service.externalIPs` | ExternalIPs for the service | [] |
|
||||
| `service.annotations` | Service annotations | {}(evaluated as a template) |
|
||||
| `ingress.dashboard.enabled` | Enable ingress for EMQX Dashboard | false |
|
||||
| `ingress.dashboard.ingressClassName` | Set the ingress class for EMQX Dashboard | |
|
||||
| `ingress.dashboard.path` | Ingress path for EMQX Dashboard | / |
|
||||
| `ingress.dashboard.pathType` | Ingress pathType for EMQX Dashboard | `ImplementationSpecific` |
|
||||
| `ingress.dashboard.hosts` | Ingress hosts for EMQX Mgmt API | dashboard.emqx.local |
|
||||
| `ingress.dashboard.tls` | Ingress tls for EMQX Mgmt API | [] |
|
||||
| `ingress.dashboard.annotations` | Ingress annotations for EMQX Mgmt API | {} |
|
||||
| `ingress.mgmt.enabled` | Enable ingress for EMQX Mgmt API | false |
|
||||
| `ingress.dashboard.ingressClassName` | Set the ingress class for EMQX Mgmt API | |
|
||||
| `ingress.mgmt.path` | Ingress path for EMQX Mgmt API | / |
|
||||
| `ingress.mgmt.hosts` | Ingress hosts for EMQX Mgmt API | api.emqx.local |
|
||||
| `ingress.mgmt.tls` | Ingress tls for EMQX Mgmt API | [] |
|
||||
| `ingress.mgmt.annotations` | Ingress annotations for EMQX Mgmt API | {} |
|
||||
| `metrics.enable` | If set to true, [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) needs to be installed, and emqx_prometheus needs to enable | false |
|
||||
| `metrics.type` | Now we only supported "prometheus" | "prometheus" |
|
||||
| `ssl.enabled` | Enable SSL support | false |
|
||||
| `ssl.useExisting` | Use existing certificate or let cert-manager generate one | false |
|
||||
| `ssl.existingName` | Name of existing certificate | emqx-tls |
|
||||
| `ssl.dnsnames` | DNS name(s) for certificate to be generated | {} |
|
||||
| `ssl.issuer.name` | Issuer name for certificate generation | letsencrypt-dns |
|
||||
| `ssl.issuer.kind` | Issuer kind for certificate generation | ClusterIssuer |
|
||||
|
||||
## EMQX specific settings
|
||||
|
||||
The following table lists the configurable [EMQX](https://www.emqx.io/)-specific parameters of the chart and their
|
||||
default values.
|
||||
Parameter | Description | Default Value
|
||||
--- | --- | ---
|
||||
`emqxConfig` | Map of [configuration](https://www.emqx.io/docs/en/latest/configuration/configuration.html) items
|
||||
expressed as [environment variables](https://www.emqx.io/docs/en/v4.3/configuration/environment-variable.html) (prefix
|
||||
can be omitted) or using the configuration
|
||||
files [namespaced dotted notation](https://www.emqx.io/docs/en/latest/configuration/configuration.html) | `nil`
|
||||
`emqxLicenseSecretName` | Name of the secret that holds the license information | `nil`
|
||||
|
||||
## SSL settings
|
||||
`cert-manager` generates secrets with certificate data using the keys `tls.crt` and `tls.key`. The helm chart always mounts those keys as files to `/tmp/ssl/`
|
||||
which needs to explicitly configured by either changing the emqx config file or by passing the following environment variables:
|
||||
|
||||
```
|
||||
EMQX_LISTENERS__SSL__DEFAULT__SSL_OPTIONS__CERTFILE: /tmp/ssl/tls.crt
|
||||
EMQX_LISTENERS__SSL__DEFAULT__SSL_OPTIONS__KEYFILE: /tmp/ssl/tls.key
|
||||
```
|
||||
|
||||
If you chose to use an existing certificate, make sure, you update the filenames accordingly.
|
||||
|
|
@ -0,0 +1,168 @@
|
|||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: {{ include "emqx.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "emqx.name" . }}
|
||||
helm.sh/chart: {{ include "emqx.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
serviceName: {{ include "emqx.fullname" . }}-headless
|
||||
podManagementPolicy: {{ .Values.podManagementPolicy }}
|
||||
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: emqx-data
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "emqx.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
{{- if .Values.persistence.storageClassName }}
|
||||
storageClassName: {{ .Values.persistence.storageClassName | quote }}
|
||||
{{- end }}
|
||||
accessModes:
|
||||
- {{ .Values.persistence.accessMode | quote }}
|
||||
resources:
|
||||
requests:
|
||||
storage: {{ .Values.persistence.size | quote }}
|
||||
{{- end }}
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
replicas: {{ .Values.replicaCount }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "emqx.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{ include "emqx.name" . }}
|
||||
version: {{ .Chart.AppVersion }}
|
||||
app.kubernetes.io/name: {{ include "emqx.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
annotations:
|
||||
{{- with .Values.podAnnotations }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.recreatePods }}
|
||||
checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum | quote }}
|
||||
{{- end }}
|
||||
spec:
|
||||
volumes:
|
||||
{{- if .Values.ssl.enabled }}
|
||||
- name: ssl-cert
|
||||
secret:
|
||||
secretName: {{ include "emqx.fullname" . }}-tls
|
||||
{{- end }}
|
||||
{{- if not .Values.persistence.enabled }}
|
||||
- name: emqx-data
|
||||
emptyDir: {}
|
||||
{{- else if .Values.persistence.existingClaim }}
|
||||
- name: emqx-data
|
||||
persistentVolumeClaim:
|
||||
{{- with .Values.persistence.existingClaim }}
|
||||
claimName: {{ tpl . $ }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if .Values.emqxLicenseSecretName }}
|
||||
- name: emqx-license
|
||||
secret:
|
||||
secretName: {{ .Values.emqxLicenseSecretName }}
|
||||
{{- end }}
|
||||
{{- if eq .Values.emqxConfig.EMQX_CLUSTER__DISCOVERY_STRATEGY "k8s"}}
|
||||
serviceAccountName: {{ include "emqx.fullname" . }}
|
||||
{{- end }}
|
||||
{{- if .Values.podSecurityContext.enabled }}
|
||||
securityContext: {{- omit .Values.podSecurityContext "enabled" | toYaml | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.initContainers }}
|
||||
initContainers:
|
||||
{{ toYaml .Values.initContainers | indent 8 }}
|
||||
{{- end }}
|
||||
{{- if .Values.image.pullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{- range .Values.image.pullSecrets }}
|
||||
- name: {{ . }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: emqx
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
{{- if .Values.containerSecurityContext.enabled }}
|
||||
securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: mqtt
|
||||
containerPort: {{ .Values.emqxConfig.EMQX_LISTENERS__TCP__DEFAULT | default 1883 }}
|
||||
- name: mqttssl
|
||||
containerPort: {{ .Values.emqxConfig.EMQX_LISTENERS__SSL__DEFAULT | default 8883 }}
|
||||
- name: ws
|
||||
containerPort: {{ .Values.emqxConfig.EMQX_LISTENERS__WS__DEFAULT | default 8083 }}
|
||||
- name: wss
|
||||
containerPort: {{ .Values.emqxConfig.EMQX_LISTENERS__WSS__DEFAULT | default 8084 }}
|
||||
- name: dashboard
|
||||
containerPort: {{ .Values.emqxConfig.EMQX_DASHBOARD__LISTENER__HTTP | default 18083 }}
|
||||
{{- if not (empty .Values.emqxConfig.EMQX_LISTENERS__TCP__DEFAULT) }}
|
||||
- name: internalmqtt
|
||||
containerPort: {{ .Values.emqxConfig.EMQX_LISTENERS__TCP__DEFAULT }}
|
||||
{{- end }}
|
||||
{{- if not (empty .Values.emqxConfig.EMQX_DASHBOARD__LISTENER__HTTPS) }}
|
||||
- name: dashboardtls
|
||||
containerPort: {{ .Values.emqxConfig.EMQX_DASHBOARD__LISTENER__HTTPS }}
|
||||
{{- end }}
|
||||
- name: ekka
|
||||
containerPort: 4370
|
||||
envFrom:
|
||||
- configMapRef:
|
||||
name: {{ include "emqx.fullname" . }}-env
|
||||
{{- if .Values.envFromSecret }}
|
||||
- secretRef:
|
||||
name: {{ .Values.envFromSecret }}
|
||||
{{- end }}
|
||||
resources:
|
||||
{{ toYaml .Values.resources | indent 12 }}
|
||||
volumeMounts:
|
||||
- name: emqx-data
|
||||
mountPath: "/opt/emqx/data"
|
||||
{{- if .Values.ssl.enabled }}
|
||||
- name: ssl-cert
|
||||
mountPath: /tmp/ssl
|
||||
readOnly: true
|
||||
{{- end}}
|
||||
{{ if .Values.emqxLicenseSecretName }}
|
||||
- name: emqx-license
|
||||
mountPath: "/opt/emqx/etc/emqx.lic"
|
||||
subPath: "emqx.lic"
|
||||
readOnly: true
|
||||
{{- end }}
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /status
|
||||
port: {{ .Values.emqxConfig.EMQX_DASHBOARD__LISTENER__HTTP | default 18083 }}
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 5
|
||||
failureThreshold: 30
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /status
|
||||
port: {{ .Values.emqxConfig.EMQX_DASHBOARD__LISTENER__HTTP | default 18083 }}
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 30
|
||||
failureThreshold: 10
|
||||
{{- with .Values.nodeSelector }}
|
||||
nodeSelector:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.affinity }}
|
||||
affinity:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- with .Values.tolerations }}
|
||||
tolerations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
|
@ -0,0 +1,32 @@
|
|||
{{/* vim: set filetype=mustache: */}}
|
||||
{{/*
|
||||
Expand the name of the chart.
|
||||
*/}}
|
||||
{{- define "emqx.name" -}}
|
||||
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create a default fully qualified app name.
|
||||
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "emqx.fullname" -}}
|
||||
{{- if .Values.fullnameOverride -}}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride -}}
|
||||
{{- if contains $name .Release.Name -}}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- else -}}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
*/}}
|
||||
{{- define "emqx.chart" -}}
|
||||
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
|
||||
{{- end -}}
|
|
@ -0,0 +1,16 @@
|
|||
{{- if and (.Values.ssl.enable) (not .Values.ssl.useExisting) -}}
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: {{ include "emqx.fullname" . }}-tls
|
||||
spec:
|
||||
secretName: {{ include "emqx.fullname" . }}-tls
|
||||
issuerRef:
|
||||
name: {{ default "letsencrypt-staging" .Values.ssl.issuer.name }}
|
||||
kind: {{ default "ClusterIssuer" .Values.ssl.issuer.kind }}
|
||||
dnsNames:
|
||||
{{- range .Values.ssl.dnsnames }}
|
||||
- {{ . }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
|
@ -0,0 +1,19 @@
|
|||
{{- if .Values.emqxConfig }}
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: {{ include "emqx.fullname" . }}-env
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "emqx.name" . }}
|
||||
helm.sh/chart: {{ include "emqx.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
data:
|
||||
{{- range $index, $value := .Values.emqxConfig }}
|
||||
{{- if $value }}
|
||||
{{- $key := (regexReplaceAllLiteral "\\." (regexReplaceAllLiteral "EMQX[_\\.]" (upper (trimAll " " $index)) "") "__") }}
|
||||
{{ print "EMQX_" $key }}: "{{ tpl (printf "%v" $value) $ }}"
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,50 @@
|
|||
{{- if .Values.ingress.dashboard.enabled -}}
|
||||
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- else -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
{{- end }}
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: {{ printf "%s-%s" (include "emqx.fullname" .) "dashboard" }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "emqx.name" . }}
|
||||
helm.sh/chart: {{ include "emqx.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- if .Values.ingress.dashboard.annotations }}
|
||||
annotations:
|
||||
{{- toYaml .Values.ingress.dashboard.annotations | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if and .Values.ingress.dashboard.ingressClassName (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
|
||||
ingressClassName: {{ .Values.ingress.dashboard.ingressClassName }}
|
||||
{{- end }}
|
||||
rules:
|
||||
{{- range $host := .Values.ingress.dashboard.hosts }}
|
||||
- host: {{ $host }}
|
||||
http:
|
||||
paths:
|
||||
- path: {{ $.Values.ingress.dashboard.path | default "/" }}
|
||||
{{- if (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
|
||||
pathType: {{ $.Values.ingress.dashboard.pathType | default "ImplementationSpecific" }}
|
||||
{{- end }}
|
||||
backend:
|
||||
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
|
||||
service:
|
||||
name: {{ include "emqx.fullname" $ }}
|
||||
port:
|
||||
number: {{ $.Values.service.dashboard }}
|
||||
{{- else }}
|
||||
serviceName: {{ include "emqx.fullname" $ }}
|
||||
servicePort: {{ $.Values.service.dashboard }}
|
||||
{{- end }}
|
||||
{{- end -}}
|
||||
{{- if .Values.ingress.dashboard.tls }}
|
||||
tls:
|
||||
{{- toYaml .Values.ingress.dashboard.tls | nindent 4 }}
|
||||
{{- end }}
|
||||
---
|
||||
{{- end }}
|
|
@ -0,0 +1,44 @@
|
|||
{{- if eq .Values.emqxConfig.EMQX_CLUSTER__DISCOVERY_STRATEGY "k8s"}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
name: {{ include "emqx.fullname" . }}
|
||||
---
|
||||
kind: Role
|
||||
{{- if semverCompare ">=1.17-0" .Capabilities.KubeVersion.GitVersion }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
{{- else }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
{{- end }}
|
||||
metadata:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
name: {{ include "emqx.fullname" . }}
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- watch
|
||||
- list
|
||||
---
|
||||
kind: RoleBinding
|
||||
{{- if semverCompare ">=1.17-0" .Capabilities.KubeVersion.GitVersion }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
{{- else }}
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
{{- end }}
|
||||
metadata:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
name: {{ include "emqx.fullname" . }}
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "emqx.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: {{ include "emqx.fullname" . }}
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end }}
|
|
@ -0,0 +1,19 @@
|
|||
{{- if .Values.metrics.enabled }}
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: {{ include "emqx.fullname" . }}-basic-auth
|
||||
namespace: {{ .Release.Namespace }}
|
||||
type: kubernetes.io/basic-auth
|
||||
stringData:
|
||||
{{- if not (empty .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_USERNAME) }}
|
||||
username: admin
|
||||
{{- else }}
|
||||
username: {{ .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_USERNAME }}
|
||||
{{- end }}
|
||||
{{- if not (empty .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_PASSWORD) }}
|
||||
password: public
|
||||
{{- else }}
|
||||
password: {{ .Values.emqxConfig.EMQX_DASHBOARD__DEFAULT_PASSWORD}}
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -0,0 +1,39 @@
|
|||
{{- if and (.Values.metrics.enabled) (eq .Values.metrics.type "prometheus") }}
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: {{ include "emqx.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "emqx.name" . }}
|
||||
helm.sh/chart: {{ include "emqx.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- if .Values.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.service.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
endpoints:
|
||||
- interval: 10s
|
||||
port: dashboard
|
||||
scheme: http
|
||||
path: /api/v5/prometheus/stats
|
||||
params:
|
||||
type:
|
||||
- prometheus
|
||||
basicAuth:
|
||||
password:
|
||||
name: {{ include "emqx.fullname" . }}-basic-auth
|
||||
key: password
|
||||
username:
|
||||
name: {{ include "emqx.fullname" . }}-basic-auth
|
||||
key: username
|
||||
jobLabel: {{ .Release.Name }}-scraping
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- {{ .Release.Namespace }}
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: {{ include "emqx.name" . }}
|
||||
{{- end }}
|
|
@ -0,0 +1,149 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "emqx.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "emqx.name" . }}
|
||||
helm.sh/chart: {{ include "emqx.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
{{- if .Values.service.annotations }}
|
||||
annotations:
|
||||
{{ toYaml .Values.service.annotations | indent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
type: {{ .Values.service.type }}
|
||||
{{- if eq .Values.service.type "LoadBalancer" }}
|
||||
{{- if .Values.service.loadBalancerIP }}
|
||||
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.loadBalancerSourceRanges }}
|
||||
loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- if .Values.service.externalIPs }}
|
||||
externalIPs: {{- toYaml .Values.service.externalIPs | nindent 4 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
ports:
|
||||
- name: mqtt
|
||||
port: {{ .Values.service.mqtt | default 1883 }}
|
||||
protocol: TCP
|
||||
targetPort: mqtt
|
||||
{{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.mqtt)) }}
|
||||
nodePort: {{ .Values.service.nodePorts.mqtt }}
|
||||
{{- else if eq .Values.service.type "ClusterIP" }}
|
||||
nodePort: null
|
||||
{{- end }}
|
||||
{{- if not (empty .Values.emqxConfig.EMQX_LISTENERS__TCP__DEFAULT) }}
|
||||
- name: internalmqtt
|
||||
port: {{ .Values.service.internalmqtt | default 11883 }}
|
||||
protocol: TCP
|
||||
targetPort: internalmqtt
|
||||
{{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.internalmqtt)) }}
|
||||
nodePort: {{ .Values.service.nodePorts.internalmqtt }}
|
||||
{{- else if eq .Values.service.type "ClusterIP" }}
|
||||
nodePort: null
|
||||
{{- end }}
|
||||
{{ end }}
|
||||
- name: mqttssl
|
||||
port: {{ .Values.service.mqttssl | default 8883 }}
|
||||
protocol: TCP
|
||||
targetPort: mqttssl
|
||||
{{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.mqttssl)) }}
|
||||
nodePort: {{ .Values.service.nodePorts.mqttssl }}
|
||||
{{- else if eq .Values.service.type "ClusterIP" }}
|
||||
nodePort: null
|
||||
{{- end }}
|
||||
- name: ws
|
||||
port: {{ .Values.service.ws | default 8083 }}
|
||||
protocol: TCP
|
||||
targetPort: ws
|
||||
{{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.ws)) }}
|
||||
nodePort: {{ .Values.service.nodePorts.ws }}
|
||||
{{- else if eq .Values.service.type "ClusterIP" }}
|
||||
nodePort: null
|
||||
{{- end }}
|
||||
- name: wss
|
||||
port: {{ .Values.service.wss | default 8084 }}
|
||||
protocol: TCP
|
||||
targetPort: wss
|
||||
{{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.wss)) }}
|
||||
nodePort: {{ .Values.service.nodePorts.wss }}
|
||||
{{- else if eq .Values.service.type "ClusterIP" }}
|
||||
nodePort: null
|
||||
{{- end }}
|
||||
- name: dashboard
|
||||
port: {{ .Values.service.dashboard | default 18083 }}
|
||||
protocol: TCP
|
||||
targetPort: dashboard
|
||||
{{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.dashboard)) }}
|
||||
nodePort: {{ .Values.service.nodePorts.dashboard }}
|
||||
{{- else if eq .Values.service.type "ClusterIP" }}
|
||||
nodePort: null
|
||||
{{- end }}
|
||||
{{- if not (empty .Values.service.dashboardtls) }}
|
||||
- name: dashboardtls
|
||||
port: {{ .Values.service.dashboardtls }}
|
||||
protocol: TCP
|
||||
targetPort: dashboardtls
|
||||
{{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.dashboardtls)) }}
|
||||
nodePort: {{ .Values.service.nodePorts.dashboardtls }}
|
||||
{{- else if eq .Values.service.type "ClusterIP" }}
|
||||
nodePort: null
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "emqx.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "emqx.fullname" . }}-headless
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ include "emqx.name" . }}
|
||||
helm.sh/chart: {{ include "emqx.chart" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
spec:
|
||||
type: ClusterIP
|
||||
sessionAffinity: None
|
||||
clusterIP: None
|
||||
publishNotReadyAddresses: true
|
||||
ports:
|
||||
- name: mqtt
|
||||
port: {{ .Values.service.mqtt | default 1883 }}
|
||||
protocol: TCP
|
||||
targetPort: mqtt
|
||||
{{- if not (empty .Values.emqxConfig.EMQX_LISTENERS__TCP__DEFAULT) }}
|
||||
- name: internalmqtt
|
||||
port: {{ .Values.service.internalmqtt | default 11883 }}
|
||||
protocol: TCP
|
||||
targetPort: internalmqtt
|
||||
{{ end }}
|
||||
- name: mqttssl
|
||||
port: {{ .Values.service.mqttssl | default 8883 }}
|
||||
protocol: TCP
|
||||
targetPort: mqttssl
|
||||
- name: ws
|
||||
port: {{ .Values.service.ws | default 8083 }}
|
||||
protocol: TCP
|
||||
targetPort: ws
|
||||
- name: wss
|
||||
port: {{ .Values.service.wss | default 8084 }}
|
||||
protocol: TCP
|
||||
targetPort: wss
|
||||
- name: dashboard
|
||||
port: {{ .Values.service.dashboard | default 18083 }}
|
||||
protocol: TCP
|
||||
targetPort: dashboard
|
||||
- name: ekka
|
||||
port: 4370
|
||||
protocol: TCP
|
||||
targetPort: ekka
|
||||
selector:
|
||||
app.kubernetes.io/name: {{ include "emqx.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
|
@ -0,0 +1,214 @@
|
|||
## Default values for emqx.
|
||||
## This is a YAML-formatted file.
|
||||
## Declare variables to be passed into your templates.
|
||||
|
||||
## It is recommended to have odd number of nodes in a cluster, otherwise the emqx cluster cannot be automatically healed in case of net-split.
|
||||
replicaCount: 3
|
||||
image:
|
||||
repository: emqx/emqx-enterprise
|
||||
pullPolicy: IfNotPresent
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
## Secrets must be manually created in the namespace.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
##
|
||||
# pullSecrets:
|
||||
# - myRegistryKeySecretName
|
||||
|
||||
|
||||
# The name of a secret in the same kubernetes namespace which contains values to
|
||||
# be added to the environment (must be manually created)
|
||||
# This can be useful for passwords and logins, etc.
|
||||
|
||||
# envFromSecret: "emqx-secrets"
|
||||
|
||||
## Forces the recreation of pods during helm upgrades. This can be useful to update configuration values even if the container image did not change.
|
||||
recreatePods: false
|
||||
|
||||
podAnnotations: {}
|
||||
|
||||
# Pod deployment policy
|
||||
# value: OrderedReady | Parallel
|
||||
# To redeploy a chart with existing PVC(s), the value must be set to Parallel to avoid deadlock
|
||||
podManagementPolicy: Parallel
|
||||
|
||||
persistence:
|
||||
enabled: false
|
||||
size: 20Mi
|
||||
storageClassName: ""
|
||||
accessMode: ReadWriteOnce
|
||||
## Existing PersistentVolumeClaims
|
||||
## The value is evaluated as a template
|
||||
## So, for example, the name can depend on .Release or .Chart
|
||||
# existingClaim: ""
|
||||
|
||||
resources: {}
|
||||
# limits:
|
||||
# cpu: 500m
|
||||
# memory: 512Mi
|
||||
# requests:
|
||||
# cpu: 500m
|
||||
# memory: 512Mi
|
||||
|
||||
# Containers that run before the creation of EMQX containers. They can contain utilities or setup scripts.
|
||||
initContainers: {}
|
||||
# - name: sysctl
|
||||
# image: busybox
|
||||
# securityContext:
|
||||
# runAsUser: 0
|
||||
# runAsGroup: 0
|
||||
# capabilities:
|
||||
# add:
|
||||
# - SYS_ADMIN
|
||||
# drop:
|
||||
# - ALL
|
||||
# command:
|
||||
# - /bin/sh
|
||||
# - -c
|
||||
# - |
|
||||
# mount -o remount rw /proc/sys
|
||||
# sysctl -w net.core.somaxconn=65535
|
||||
# sysctl -w net.ipv4.ip_local_port_range="1024 65535"
|
||||
# sysctl -w kernel.core_uses_pid=0
|
||||
# sysctl -w net.ipv4.tcp_tw_reuse=1
|
||||
# sysctl -w fs.nr_open=1000000000
|
||||
# sysctl -w fs.file-max=1000000000
|
||||
# sysctl -w net.ipv4.ip_local_port_range='1025 65534'
|
||||
# sysctl -w net.ipv4.udp_mem='74583000 499445000 749166000'
|
||||
# sysctl -w net.ipv4.tcp_max_sync_backlog=163840
|
||||
# sysctl -w net.core.netdev_max_backlog=163840
|
||||
# sysctl -w net.core.optmem_max=16777216
|
||||
# sysctl -w net.ipv4.tcp_rmem='1024 4096 16777216'
|
||||
# sysctl -w net.ipv4.tcp_wmem='1024 4096 16777216'
|
||||
# sysctl -w net.ipv4.tcp_max_tw_buckets=1048576
|
||||
# sysctl -w net.ipv4.tcp_fin_timeout=15
|
||||
# sysctl -w net.core.rmem_default=262144000
|
||||
# sysctl -w net.core.wmem_default=262144000
|
||||
# sysctl -w net.core.rmem_max=262144000
|
||||
# sysctl -w net.core.wmem_max=262144000
|
||||
# sysctl -w net.ipv4.tcp_mem='378150000 504200000 756300000'
|
||||
# sysctl -w net.netfilter.nf_conntrack_max=1000000
|
||||
# sysctl -w net.netfilter.nf_conntrack_tcp_timeout_time_wait=30
|
||||
|
||||
## EMQX configuration item, see the documentation (https://hub.docker.com/r/emqx/emqx)
|
||||
emqxConfig:
|
||||
EMQX_CLUSTER__DISCOVERY_STRATEGY: "dns"
|
||||
EMQX_CLUSTER__DNS__NAME: "{{ .Release.Name }}-headless.{{ .Release.Namespace }}.svc.cluster.local"
|
||||
EMQX_CLUSTER__DNS__RECORD_TYPE: "srv"
|
||||
# EMQX_CLUSTER__DISCOVERY_STRATEGY: "k8s"
|
||||
# EMQX_CLUSTER__K8S__APISERVER: "https://kubernetes.default.svc:443"
|
||||
# EMQX_CLUSTER__K8S__SERVICE_NAME: "{{ .Release.Name }}-headless"
|
||||
# EMQX_CLUSTER__K8S__NAMESPACE: "{{ .Release.Namespace }}"
|
||||
## The address type is used to extract host from k8s service.
|
||||
## Value: ip | dns | hostname
|
||||
## Note:Hostname is only supported after v4.0-rc.2
|
||||
EMQX_CLUSTER__K8S__ADDRESS_TYPE: "hostname"
|
||||
EMQX_CLUSTER__K8S__SUFFIX: "svc.cluster.local"
|
||||
## if EMQX_CLUSTER__K8S__ADDRESS_TYPE eq dns
|
||||
# EMQX_CLUSTER__K8S__SUFFIX: "pod.cluster.local"
|
||||
EMQX_DASHBOARD__DEFAULT_USERNAME: "admin"
|
||||
EMQX_DASHBOARD__DEFAULT_PASSWORD: "public"
|
||||
|
||||
## EMQX Enterprise Edition requires manual creation of a Secret containing the licensed content. Write the name of Secret to the value of "emqxLicenseSecretName"
|
||||
## Example:
|
||||
## kubectl create secret generic emqx-license-secret-name --from-file=/path/to/emqx.lic
|
||||
emqxLicenseSecretName:
|
||||
|
||||
service:
|
||||
## Service type
|
||||
##
|
||||
type: ClusterIP
|
||||
## Port for MQTT
|
||||
##
|
||||
mqtt: 1883
|
||||
## Port for MQTT(SSL)
|
||||
##
|
||||
mqttssl: 8883
|
||||
## Port for mgmt API
|
||||
##
|
||||
mgmt: 8081
|
||||
## Port for WebSocket/HTTP
|
||||
##
|
||||
ws: 8083
|
||||
## Port for WSS/HTTPS
|
||||
##
|
||||
wss: 8084
|
||||
## Port for dashboard
|
||||
##
|
||||
dashboard: 18083
|
||||
## Port for dashboard HTTPS
|
||||
##
|
||||
# dashboardtls: 18084
|
||||
## Specify the nodePort(s) value for the LoadBalancer and NodePort service types.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport
|
||||
##
|
||||
nodePorts:
|
||||
mqtt:
|
||||
mqttssl:
|
||||
mgmt:
|
||||
ws:
|
||||
wss:
|
||||
dashboard:
|
||||
dashboardtls:
|
||||
## Set the LoadBalancer service type to internal only.
|
||||
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer
|
||||
##
|
||||
# loadBalancerIP:
|
||||
## Load Balancer sources
|
||||
## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service
|
||||
## Example:
|
||||
## loadBalancerSourceRanges:
|
||||
## - 10.10.10.0/24
|
||||
##
|
||||
loadBalancerSourceRanges: []
|
||||
## Set the ExternalIPs
|
||||
##
|
||||
externalIPs: []
|
||||
## Provide any additional annotations which may be required. Evaluated as a template
|
||||
##
|
||||
annotations: {}
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
tolerations: []
|
||||
|
||||
affinity: {}
|
||||
|
||||
ingress:
|
||||
## ingress for EMQX Dashboard
|
||||
dashboard:
|
||||
enabled: false
|
||||
# ingressClassName: nginx
|
||||
annotations: {}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
path: /
|
||||
pathType: ImplementationSpecific
|
||||
hosts:
|
||||
- dashboard.emqx.local
|
||||
tls: []
|
||||
|
||||
podSecurityContext:
|
||||
enabled: true
|
||||
fsGroup: 1000
|
||||
fsGroupChangePolicy: Always
|
||||
runAsUser: 1000
|
||||
supplementalGroups:
|
||||
- 1000
|
||||
|
||||
containerSecurityContext:
|
||||
enabled: true
|
||||
runAsNonRoot: true
|
||||
runAsUser: 1000
|
||||
|
||||
metrics:
|
||||
enabled: false
|
||||
type: prometheus
|
||||
|
||||
ssl:
|
||||
enabled: false
|
||||
useExisting: false
|
||||
existingName: emqx-tls
|
||||
dnsnames: {}
|
||||
issuer:
|
||||
name: letsencrypt-dns
|
||||
kind: ClusterIssuer
|
|
@ -14,8 +14,8 @@ type: application
|
|||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
version: 5.0.7
|
||||
version: 5.0.8
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application.
|
||||
appVersion: 5.0.7
|
||||
appVersion: 5.0.8
|
||||
|
|
|
@ -91,7 +91,7 @@ spec:
|
|||
{{- end }}
|
||||
containers:
|
||||
- name: emqx
|
||||
image: "{{ .Values.image.repository }}:{{ .Chart.AppVersion }}"
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
{{- if .Values.containerSecurityContext.enabled }}
|
||||
securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }}
|
||||
|
|
|
@ -7,6 +7,8 @@ replicaCount: 3
|
|||
image:
|
||||
repository: emqx/emqx
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: ""
|
||||
## Optionally specify an array of imagePullSecrets.
|
||||
## Secrets must be manually created in the namespace.
|
||||
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
|
|
|
@ -1,7 +0,0 @@
|
|||
license {
|
||||
type = key
|
||||
# The default license has 1000 connections limit, it is issued on 20220419 and valid for 5 years (1825 days)
|
||||
key = "MjIwMTExCjAKMTAKRXZhbHVhdGlvbgpjb250YWN0QGVtcXguaW8KZGVmYXVsdAoyMDIyMDQxOQoxODI1CjEwMDAK.MEQCICbgRVijCQov2hrvZXR1mk9Oa+tyV1F5oJ6iOZeSHjnQAiB9dUiVeaZekDOjztk+NCWjhk4PG8tWfw2uFZWruSzD6g=="
|
||||
connection_low_watermark = 75%,
|
||||
connection_high_watermark = 80%
|
||||
}
|
|
@ -10,17 +10,6 @@ emqx_license_http_api {
|
|||
}
|
||||
}
|
||||
|
||||
desc_license_file_api {
|
||||
desc {
|
||||
en: "Upload a license file"
|
||||
zh: "上传一个许可证文件"
|
||||
}
|
||||
label: {
|
||||
en: "Update license"
|
||||
zh: "更新许可证"
|
||||
}
|
||||
}
|
||||
|
||||
desc_license_key_api {
|
||||
desc {
|
||||
en: "Update a license key"
|
||||
|
|
|
@ -2,16 +2,16 @@ emqx_license_schema {
|
|||
license_root {
|
||||
desc {
|
||||
en: "Defines the EMQX Enterprise license. \n\n"
|
||||
"A license is either a `key` or a `file`.\n"
|
||||
"When `key` and `file` are both configured, `key` is used.\n"
|
||||
"\n"
|
||||
"The default license has 1000 connections limit, it is "
|
||||
"issued on 2022-04-19 and valid for 5 years (1825 days).\n"
|
||||
"\n"
|
||||
"EMQX comes with a default trial license. For production use, please \n"
|
||||
"visit https://www.emqx.com/apply-licenses/emqx to apply."
|
||||
zh: "EMQX企业许可证。\n"
|
||||
"许可证是一个 `key` 或一个 `file`。\n"
|
||||
"当 `key` 和 `file` 同时被配置时,优先使用 `key`。\n"
|
||||
"\n"
|
||||
"EMQX 自带一个默认的试用许可证,若需要在生产环境部署,\n"
|
||||
"EMQX 自带一个默认的试用许可证,"
|
||||
"默认试用许可允许最多接入 1000 个连接,签发时间是 2022年4月19日,有效期是 5 年(1825 天)。"
|
||||
"若需要在生产环境部署,\n"
|
||||
"请访问 https://www.emqx.com/apply-licenses/emqx 来申请。\n"
|
||||
}
|
||||
label {
|
||||
|
@ -20,17 +20,6 @@ emqx_license_schema {
|
|||
}
|
||||
}
|
||||
|
||||
license_type_field {
|
||||
desc {
|
||||
en: "License type"
|
||||
zh: "许可证类型"
|
||||
}
|
||||
label {
|
||||
en: "License type"
|
||||
zh: "许可证类型"
|
||||
}
|
||||
}
|
||||
|
||||
key_field {
|
||||
desc {
|
||||
en: "License string"
|
||||
|
@ -42,17 +31,6 @@ emqx_license_schema {
|
|||
}
|
||||
}
|
||||
|
||||
file_field {
|
||||
desc {
|
||||
en: "Path to the license file"
|
||||
zh: "许可证文件的路径"
|
||||
}
|
||||
label {
|
||||
en: "Path to the license file"
|
||||
zh: "许可证文件的路径"
|
||||
}
|
||||
}
|
||||
|
||||
connection_low_watermark_field {
|
||||
desc {
|
||||
en: "Low watermark limit below which license connection quota usage alarms are deactivated"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{application, emqx_license, [
|
||||
{description, "EMQX License"},
|
||||
{vsn, "5.0.2"},
|
||||
{vsn, "5.0.3"},
|
||||
{modules, []},
|
||||
{registered, [emqx_license_sup]},
|
||||
{applications, [kernel, stdlib]},
|
||||
|
|
|
@ -21,11 +21,7 @@
|
|||
unload/0,
|
||||
read_license/0,
|
||||
read_license/1,
|
||||
update_file/1,
|
||||
update_file_contents/1,
|
||||
update_key/1,
|
||||
license_dir/0,
|
||||
save_and_backup_license/1
|
||||
update_key/1
|
||||
]).
|
||||
|
||||
-define(CONF_KEY_PATH, [license]).
|
||||
|
@ -57,35 +53,6 @@ unload() ->
|
|||
emqx_conf:remove_handler(?CONF_KEY_PATH),
|
||||
emqx_license_cli:unload().
|
||||
|
||||
-spec license_dir() -> file:filename().
|
||||
license_dir() ->
|
||||
filename:join([emqx:data_dir(), licenses]).
|
||||
|
||||
%% Subdirectory relative to data dir.
|
||||
-spec relative_license_path() -> file:filename().
|
||||
relative_license_path() ->
|
||||
filename:join([licenses, "emqx.lic"]).
|
||||
|
||||
-spec update_file(binary() | string()) ->
|
||||
{ok, emqx_config:update_result()} | {error, emqx_config:update_error()}.
|
||||
update_file(Filename) when is_binary(Filename); is_list(Filename) ->
|
||||
case file:read_file(Filename) of
|
||||
{ok, Contents} ->
|
||||
update_file_contents(Contents);
|
||||
{error, Error} ->
|
||||
{error, Error}
|
||||
end.
|
||||
|
||||
-spec update_file_contents(binary() | string()) ->
|
||||
{ok, emqx_config:update_result()} | {error, emqx_config:update_error()}.
|
||||
update_file_contents(Contents) when is_binary(Contents) ->
|
||||
Result = emqx_conf:update(
|
||||
?CONF_KEY_PATH,
|
||||
{file, Contents},
|
||||
#{rawconf_with_defaults => true, override_to => local}
|
||||
),
|
||||
handle_config_update_result(Result).
|
||||
|
||||
-spec update_key(binary() | string()) ->
|
||||
{ok, emqx_config:update_result()} | {error, emqx_config:update_error()}.
|
||||
update_key(Value) when is_binary(Value); is_list(Value) ->
|
||||
|
@ -147,18 +114,10 @@ del_license_hook() ->
|
|||
_ = emqx_hooks:del('client.connect', {?MODULE, check, []}),
|
||||
ok.
|
||||
|
||||
do_update({file, NewContents}, Conf) ->
|
||||
Res = emqx_license_proto_v2:save_and_backup_license(mria_mnesia:running_nodes(), NewContents),
|
||||
%% assert
|
||||
true = lists:all(fun(X) -> X =:= {ok, ok} end, Res),
|
||||
%% Must be relative to the data dir, since different nodes might
|
||||
%% have different data directories configured...
|
||||
LicensePath = relative_license_path(),
|
||||
maps:remove(<<"key">>, Conf#{<<"type">> => file, <<"file">> => LicensePath});
|
||||
do_update({key, Content}, Conf) when is_binary(Content); is_list(Content) ->
|
||||
case emqx_license_parser:parse(Content) of
|
||||
{ok, _License} ->
|
||||
maps:remove(<<"file">>, Conf#{<<"type">> => key, <<"key">> => Content});
|
||||
Conf#{<<"key">> => Content};
|
||||
{error, Reason} ->
|
||||
erlang:throw(Reason)
|
||||
end;
|
||||
|
@ -166,57 +125,10 @@ do_update({key, Content}, Conf) when is_binary(Content); is_list(Content) ->
|
|||
do_update(_Other, Conf) ->
|
||||
Conf.
|
||||
|
||||
save_and_backup_license(NewLicenseKey) ->
|
||||
%% Must be relative to the data dir, since different nodes might
|
||||
%% have different data directories configured...
|
||||
CurrentLicensePath = filename:join(emqx:data_dir(), relative_license_path()),
|
||||
LicenseDir = filename:dirname(CurrentLicensePath),
|
||||
case filelib:ensure_dir(CurrentLicensePath) of
|
||||
ok -> ok;
|
||||
{error, EnsureError} -> throw({error_creating_license_dir, EnsureError})
|
||||
end,
|
||||
case file:read_file(CurrentLicensePath) of
|
||||
{ok, NewLicenseKey} ->
|
||||
%% same contents; nothing to do.
|
||||
ok;
|
||||
{ok, _OldContents} ->
|
||||
Time = calendar:system_time_to_rfc3339(erlang:system_time(second)),
|
||||
BackupPath = filename:join([
|
||||
LicenseDir,
|
||||
"emqx.lic." ++ Time ++ ".backup"
|
||||
]),
|
||||
case file:copy(CurrentLicensePath, BackupPath) of
|
||||
{ok, _} -> ok;
|
||||
{error, CopyError} -> throw({error_backing_up_license, CopyError})
|
||||
end,
|
||||
ok;
|
||||
{error, enoent} ->
|
||||
ok;
|
||||
{error, Error} ->
|
||||
throw({error_reading_existing_license, Error})
|
||||
end,
|
||||
case file:write_file(CurrentLicensePath, NewLicenseKey) of
|
||||
ok -> ok;
|
||||
{error, WriteError} -> throw({error_writing_license, WriteError})
|
||||
end,
|
||||
ok.
|
||||
|
||||
check_max_clients_exceeded(MaxClients) ->
|
||||
emqx_license_resources:connection_count() > MaxClients * 1.1.
|
||||
|
||||
read_license(#{type := file, file := Filename}) ->
|
||||
case file:read_file(Filename) of
|
||||
{ok, Content} ->
|
||||
emqx_license_parser:parse(Content);
|
||||
{error, _} = Error ->
|
||||
%% Could be a relative path in data folder after update.
|
||||
FilenameDataDir = filename:join(emqx:data_dir(), Filename),
|
||||
case file:read_file(FilenameDataDir) of
|
||||
{ok, Content} -> emqx_license_parser:parse(Content);
|
||||
_Error -> Error
|
||||
end
|
||||
end;
|
||||
read_license(#{type := key, key := Content}) ->
|
||||
read_license(#{key := Content}) ->
|
||||
emqx_license_parser:parse(Content).
|
||||
|
||||
handle_config_update_result({error, {post_config_update, ?MODULE, Error}}) ->
|
||||
|
|
|
@ -19,21 +19,6 @@
|
|||
load() ->
|
||||
ok = emqx_ctl:register_command(license, {?MODULE, license}, []).
|
||||
|
||||
license(["reload"]) ->
|
||||
case emqx:get_config([license]) of
|
||||
#{file := Filename} ->
|
||||
license(["reload", Filename]);
|
||||
#{key := _Key} ->
|
||||
?PRINT_MSG("License is not configured as a file, please specify file explicitly~n")
|
||||
end;
|
||||
license(["reload", Filename]) ->
|
||||
case emqx_license:update_file(Filename) of
|
||||
{ok, Warnings} ->
|
||||
ok = print_warnings(Warnings),
|
||||
ok = ?PRINT_MSG("ok~n");
|
||||
{error, Reason} ->
|
||||
?PRINT("Error: ~p~n", [Reason])
|
||||
end;
|
||||
license(["update", EncodedLicense]) ->
|
||||
case emqx_license:update_key(EncodedLicense) of
|
||||
{ok, Warnings} ->
|
||||
|
@ -56,8 +41,6 @@ license(_) ->
|
|||
emqx_ctl:usage(
|
||||
[
|
||||
{"license info", "Show license info"},
|
||||
{"license reload [<File>]",
|
||||
"Reload license from a file specified with an absolute path"},
|
||||
{"license update License", "Update license given as a string"}
|
||||
]
|
||||
).
|
||||
|
|
|
@ -17,9 +17,7 @@
|
|||
]).
|
||||
|
||||
-export([
|
||||
'/license'/2,
|
||||
'/license/key'/2,
|
||||
'/license/file'/2
|
||||
'/license'/2
|
||||
]).
|
||||
|
||||
-define(BAD_REQUEST, 'BAD_REQUEST').
|
||||
|
@ -31,9 +29,7 @@ api_spec() ->
|
|||
|
||||
paths() ->
|
||||
[
|
||||
"/license",
|
||||
"/license/key",
|
||||
"/license/file"
|
||||
"/license"
|
||||
].
|
||||
|
||||
schema("/license") ->
|
||||
|
@ -53,32 +49,7 @@ schema("/license") ->
|
|||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
};
|
||||
schema("/license/file") ->
|
||||
#{
|
||||
'operationId' => '/license/file',
|
||||
post => #{
|
||||
tags => [<<"license">>],
|
||||
summary => <<"Upload license file">>,
|
||||
description => ?DESC("desc_license_file_api"),
|
||||
'requestBody' => emqx_dashboard_swagger:file_schema(filename),
|
||||
responses => #{
|
||||
200 => emqx_dashboard_swagger:schema_with_examples(
|
||||
map(),
|
||||
#{
|
||||
sample_license_info => #{
|
||||
value => sample_license_info_response()
|
||||
}
|
||||
}
|
||||
),
|
||||
400 => emqx_dashboard_swagger:error_codes([?BAD_REQUEST], <<"Bad license file">>)
|
||||
}
|
||||
}
|
||||
};
|
||||
schema("/license/key") ->
|
||||
#{
|
||||
'operationId' => '/license/key',
|
||||
},
|
||||
post => #{
|
||||
tags => [<<"license">>],
|
||||
summary => <<"Update license key">>,
|
||||
|
@ -105,7 +76,7 @@ schema("/license/key") ->
|
|||
}
|
||||
}
|
||||
),
|
||||
400 => emqx_dashboard_swagger:error_codes([?BAD_REQUEST], <<"Bad license file">>)
|
||||
400 => emqx_dashboard_swagger:error_codes([?BAD_REQUEST], <<"Bad license key">>)
|
||||
}
|
||||
}
|
||||
}.
|
||||
|
@ -126,30 +97,12 @@ sample_license_info_response() ->
|
|||
error_msg(Code, Msg) ->
|
||||
#{code => Code, message => emqx_misc:readable_error_msg(Msg)}.
|
||||
|
||||
%% read license info
|
||||
'/license'(get, _Params) ->
|
||||
License = maps:from_list(emqx_license_checker:dump()),
|
||||
{200, License}.
|
||||
|
||||
'/license/file'(post, #{body := #{<<"filename">> := #{type := _} = File}}) ->
|
||||
[{_Filename, Contents}] = maps:to_list(maps:without([type], File)),
|
||||
case emqx_license:update_file_contents(Contents) of
|
||||
{error, Error} ->
|
||||
?SLOG(error, #{
|
||||
msg => "bad_license_file",
|
||||
reason => Error
|
||||
}),
|
||||
{400, error_msg(?BAD_REQUEST, <<"Bad license file">>)};
|
||||
{ok, _} ->
|
||||
?SLOG(info, #{
|
||||
msg => "updated_license_file"
|
||||
}),
|
||||
License = maps:from_list(emqx_license_checker:dump()),
|
||||
{200, License}
|
||||
end;
|
||||
'/license/file'(post, _Params) ->
|
||||
{400, error_msg(?BAD_REQUEST, <<"Invalid request params">>)}.
|
||||
|
||||
'/license/key'(post, #{body := #{<<"key">> := Key}}) ->
|
||||
{200, License};
|
||||
%% set/update license
|
||||
'/license'(post, #{body := #{<<"key">> := Key}}) ->
|
||||
case emqx_license:update_key(Key) of
|
||||
{error, Error} ->
|
||||
?SLOG(error, #{
|
||||
|
@ -162,5 +115,5 @@ error_msg(Code, Msg) ->
|
|||
License = maps:from_list(emqx_license_checker:dump()),
|
||||
{200, License}
|
||||
end;
|
||||
'/license/key'(post, _Params) ->
|
||||
'/license'(post, _Params) ->
|
||||
{400, error_msg(?BAD_REQUEST, <<"Invalid request params">>)}.
|
||||
|
|
|
@ -20,8 +20,7 @@
|
|||
>>).
|
||||
|
||||
-define(LICENSE_PARSE_MODULES, [
|
||||
emqx_license_parser_v20220101,
|
||||
emqx_license_parser_legacy
|
||||
emqx_license_parser_v20220101
|
||||
]).
|
||||
|
||||
-type license_data() :: term().
|
||||
|
|
|
@ -1,265 +0,0 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_license_parser_legacy).
|
||||
|
||||
-behaviour(emqx_license_parser).
|
||||
|
||||
-include_lib("public_key/include/public_key.hrl").
|
||||
-include("emqx_license.hrl").
|
||||
|
||||
-elvis([{elvis_style, atom_naming_convention, disable}]).
|
||||
|
||||
-define(CACERT, <<
|
||||
"-----BEGIN CERTIFICATE-----\n"
|
||||
"MIIDVDCCAjwCCQCckt8CVupoRDANBgkqhkiG9w0BAQsFADBsMQswCQYDVQQGEwJD\n"
|
||||
"TjERMA8GA1UECAwIWmhlamlhbmcxETAPBgNVBAcMCEhhbmd6aG91MQwwCgYDVQQK\n"
|
||||
"DANFTVExDDAKBgNVBAsMA0VNUTEbMBkGA1UEAwwSRU1RWCBFbnRlcnByaXNlIHY1\n"
|
||||
"MB4XDTIyMDQwODE1MTA1M1oXDTIzMDQwODE1MTA1M1owbDELMAkGA1UEBhMCQ04x\n"
|
||||
"ETAPBgNVBAgMCFpoZWppYW5nMREwDwYDVQQHDAhIYW5nemhvdTEMMAoGA1UECgwD\n"
|
||||
"RU1RMQwwCgYDVQQLDANFTVExGzAZBgNVBAMMEkVNUVggRW50ZXJwcmlzZSB2NTCC\n"
|
||||
"ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMiYB/gbxCSErWL8sNZHkP4s\n"
|
||||
"VTyeBho5T+5Uyp2S95qmcj10FBGi50ZnEN/62vMWED3HzEXsp6pq2Jk+Of3g9rSu\n"
|
||||
"63V082HzlqFNHFzUDGkEu23tWyxeEKwBGyYRLIJI1/az99Jq82Qo0UZ5ELVpouAz\n"
|
||||
"QVOKjpehHvWgEuWmPi+w1uuOieO08nO4AAOLHWcNOChgV50sl88gbz2n/kAcjqzl\n"
|
||||
"1MQXMXoRzfzseNf3bmBV0keNFOpcqePTWCeshFFVkqeKMbK5HIKsnoDSl3VtQ/KK\n"
|
||||
"iV88WpW4f0QfGGJV/gHt++4BAZS3nzxXUhGA0Tf2o7N1CHqnXuottJVcgzyIxHEC\n"
|
||||
"AwEAATANBgkqhkiG9w0BAQsFAAOCAQEANh3ofOa9Aoqb7gUoTb6dNj883aHZ4aHi\n"
|
||||
"kQVo4fVc4IH1MLVNuH/H/aqQ+YtRbbE4YT0icApJFa8qriv8afD9reh5/6ySdsms\n"
|
||||
"RAXSogCuAPk2DwT1fyQa6A45x5EBpgwW10rYhwa5JJi6YKPpWS/Uo1Fgk9YGmeW4\n"
|
||||
"FgGWYvWQHQIXhjfTC0wJPXlsDB2AB7xMINlOSfg/Bz8mhz7iOjM4pkvnTj17JrgR\n"
|
||||
"VQLAj4NFAvdLFFjhZarFtCjPiCE4gb5YZI/Os4iMenD1ZWnYy9Sy7JSNXhWda6e2\n"
|
||||
"WGl1AsyDsVPdvAzcB5ymrLnptCzZYT29PSubmCHS9nFgT6hkWCam4g==\n"
|
||||
"-----END CERTIFICATE-----"
|
||||
>>).
|
||||
|
||||
%% emqx_license_parser callbacks
|
||||
-export([
|
||||
parse/2,
|
||||
dump/1,
|
||||
customer_type/1,
|
||||
license_type/1,
|
||||
expiry_date/1,
|
||||
max_connections/1
|
||||
]).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% emqx_license_parser API
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
%% Sample parsed data:
|
||||
%% #{customer => <<"EMQ X Evaluation">>,
|
||||
%% email => "contact@emqx.io",
|
||||
%% permits =>
|
||||
%% #{customer_type => 10,
|
||||
%% enabled_plugins =>
|
||||
%% [emqx_backend_redis,emqx_backend_mysql,
|
||||
%% emqx_backend_pgsql,emqx_backend_mongo,
|
||||
%% emqx_backend_cassa,emqx_bridge_kafka,
|
||||
%% emqx_bridge_rabbit],
|
||||
%% max_connections => 10,type => 1},
|
||||
%% product => "EMQX Enterprise",
|
||||
%% validity =>
|
||||
%% {{{2020,6,20},{3,2,52}},{{2049,1,1},{3,2,52}}},
|
||||
%% vendor => "EMQ Technologies Co., Ltd.",
|
||||
%% version => "5.0.0-alpha.1-22e2ad1c"}
|
||||
|
||||
parse(Contents, _PublicKey) ->
|
||||
case decode_and_verify_signature(Contents) of
|
||||
{ok, DerCert} ->
|
||||
parse_payload(DerCert);
|
||||
{error, Error} ->
|
||||
{error, Error}
|
||||
end.
|
||||
|
||||
dump(#{
|
||||
customer := Customer,
|
||||
email := Email,
|
||||
permits :=
|
||||
#{
|
||||
customer_type := CustomerType,
|
||||
max_connections := MaxConnections,
|
||||
type := Type
|
||||
},
|
||||
validity := {{StartAtDate, _StartAtTime}, {ExpiryAtDate, _ExpiryAtTime}}
|
||||
}) ->
|
||||
{DateNow, _} = calendar:universal_time(),
|
||||
Expiry = DateNow > ExpiryAtDate,
|
||||
[
|
||||
{customer, Customer},
|
||||
{email, Email},
|
||||
{deployment, "default"},
|
||||
{max_connections, MaxConnections},
|
||||
{start_at, format_date(StartAtDate)},
|
||||
{expiry_at, format_date(ExpiryAtDate)},
|
||||
{type, format_type(Type)},
|
||||
{customer_type, CustomerType},
|
||||
{expiry, Expiry}
|
||||
].
|
||||
|
||||
customer_type(#{permits := Permits}) ->
|
||||
maps:get(customer_type, Permits, ?LARGE_CUSTOMER).
|
||||
|
||||
license_type(#{permits := Permits}) ->
|
||||
maps:get(type, Permits, ?TRIAL).
|
||||
|
||||
expiry_date(#{validity := {_From, {EndDate, _EndTime}}}) ->
|
||||
EndDate.
|
||||
|
||||
max_connections(#{permits := Permits}) ->
|
||||
maps:get(max_connections, Permits, 0).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Internal functions
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
decode_and_verify_signature(Contents) ->
|
||||
try
|
||||
{ok, Cert, DerCert} = decode_license(Contents),
|
||||
[{'Certificate', DerCaCert, _}] = public_key:pem_decode(?CACERT),
|
||||
CaCert = public_key:pkix_decode_cert(DerCaCert, otp),
|
||||
Result = public_key:pkix_path_validation(
|
||||
CaCert,
|
||||
[DerCert],
|
||||
[{verify_fun, {fun verify_fun/3, user_state}}]
|
||||
),
|
||||
case Result of
|
||||
{ok, _Info} ->
|
||||
{ok, Cert};
|
||||
{error, {bad_cert, Reason}} ->
|
||||
{error, Reason}
|
||||
end
|
||||
catch
|
||||
throw:bad_license_format ->
|
||||
{error, bad_license_format};
|
||||
_:_ ->
|
||||
{error, bad_certificate}
|
||||
end.
|
||||
|
||||
decode_license(Contents) ->
|
||||
case public_key:pem_decode(Contents) of
|
||||
[{'Certificate', DerCert, _}] ->
|
||||
Cert = public_key:pkix_decode_cert(DerCert, otp),
|
||||
{ok, Cert, DerCert};
|
||||
_ ->
|
||||
throw(bad_license_format)
|
||||
end.
|
||||
|
||||
parse_payload(DerCert) ->
|
||||
try
|
||||
{Start, End} = read_validity(DerCert),
|
||||
Subject = read_subject(DerCert),
|
||||
Permits = read_permits(DerCert),
|
||||
LicenseData = maps:merge(
|
||||
#{
|
||||
vendor => "EMQ Technologies Co., Ltd.",
|
||||
product => emqx_sys:sysdescr(),
|
||||
version => emqx_sys:version(),
|
||||
validity => {Start, End},
|
||||
permits => Permits
|
||||
},
|
||||
Subject
|
||||
),
|
||||
{ok, LicenseData}
|
||||
catch
|
||||
_:_ ->
|
||||
{error, bad_license}
|
||||
end.
|
||||
|
||||
read_validity(#'OTPCertificate'{tbsCertificate = #'OTPTBSCertificate'{validity = Validity}}) ->
|
||||
case Validity of
|
||||
{'Validity', {utcTime, Start0}, {utcTime, End0}} ->
|
||||
{local_time(Start0), local_time(End0)};
|
||||
{'Validity', {utcTime, Start0}, {generalTime, End0}} ->
|
||||
{local_time(Start0), local_time(End0)}
|
||||
end.
|
||||
|
||||
local_time([Y01, Y0, Y1, Y2, M1, M2, D1, D2, H1, H2, Min1, Min2, S1, S2, $Z]) ->
|
||||
{{b2l(<<Y01, Y0, Y1, Y2>>), b2l(<<M1, M2>>), b2l(<<D1, D2>>)}, {
|
||||
b2l(<<H1, H2>>), b2l(<<Min1, Min2>>), b2l(<<S1, S2>>)
|
||||
}};
|
||||
local_time([Y1, Y2, M1, M2, D1, D2, H1, H2, Min1, Min2, S1, S2, $Z]) ->
|
||||
{{b2l(<<"20", Y1, Y2>>), b2l(<<M1, M2>>), b2l(<<D1, D2>>)}, {
|
||||
b2l(<<H1, H2>>), b2l(<<Min1, Min2>>), b2l(<<S1, S2>>)
|
||||
}}.
|
||||
|
||||
b2l(L) -> binary_to_integer(L).
|
||||
|
||||
read_subject(#'OTPCertificate'{tbsCertificate = TbsCertificate}) ->
|
||||
#'OTPTBSCertificate'{subject = {rdnSequence, RDNs}} = TbsCertificate,
|
||||
read_subject(lists:flatten(RDNs), #{}).
|
||||
|
||||
read_subject([], Subject) ->
|
||||
Subject;
|
||||
read_subject([#'AttributeTypeAndValue'{type = {2, 5, 4, 3}, value = V0} | RDNs], Subject) ->
|
||||
V = unwrap_utf8_string(V0),
|
||||
read_subject(RDNs, maps:put(customer, V, Subject));
|
||||
read_subject([#'AttributeTypeAndValue'{type = {2, 5, 4, 10}, value = V0} | RDNs], Subject) ->
|
||||
V = unwrap_utf8_string(V0),
|
||||
read_subject(RDNs, maps:put(customer, V, Subject));
|
||||
read_subject(
|
||||
[#'AttributeTypeAndValue'{type = {1, 2, 840, 113549, 1, 9, 1}, value = V} | RDNs],
|
||||
Subject
|
||||
) ->
|
||||
read_subject(RDNs, maps:put(email, V, Subject));
|
||||
read_subject([_ | RDNs], Subject) ->
|
||||
read_subject(RDNs, Subject).
|
||||
|
||||
read_permits(#'OTPCertificate'{tbsCertificate = #'OTPTBSCertificate'{extensions = Extensions}}) ->
|
||||
read_permits(Extensions, #{}).
|
||||
|
||||
read_permits([], Permits) ->
|
||||
Permits;
|
||||
read_permits(
|
||||
[#'Extension'{extnID = {1, 3, 6, 1, 4, 1, 52509, 1}, extnValue = Val} | More], Permits
|
||||
) ->
|
||||
MaxConns = list_to_integer(parse_utf8_string(Val)),
|
||||
read_permits(More, maps:put(max_connections, MaxConns, Permits));
|
||||
read_permits(
|
||||
[#'Extension'{extnID = {1, 3, 6, 1, 4, 1, 52509, 2}, extnValue = Val} | More], Permits
|
||||
) ->
|
||||
Plugins = [list_to_atom(Plugin) || Plugin <- string:tokens(parse_utf8_string(Val), ",")],
|
||||
read_permits(More, maps:put(enabled_plugins, Plugins, Permits));
|
||||
read_permits(
|
||||
[#'Extension'{extnID = {1, 3, 6, 1, 4, 1, 52509, 3}, extnValue = Val} | More], Permits
|
||||
) ->
|
||||
Type = list_to_integer(parse_utf8_string(Val)),
|
||||
read_permits(More, maps:put(type, Type, Permits));
|
||||
read_permits(
|
||||
[#'Extension'{extnID = {1, 3, 6, 1, 4, 1, 52509, 4}, extnValue = Val} | More], Permits
|
||||
) ->
|
||||
CustomerType = list_to_integer(parse_utf8_string(Val)),
|
||||
read_permits(More, maps:put(customer_type, CustomerType, Permits));
|
||||
read_permits([_ | More], Permits) ->
|
||||
read_permits(More, Permits).
|
||||
|
||||
unwrap_utf8_string({utf8String, Str}) -> Str;
|
||||
unwrap_utf8_string(Str) -> Str.
|
||||
|
||||
parse_utf8_string(Val) ->
|
||||
{utf8String, Str} = public_key:der_decode('DisplayText', Val),
|
||||
binary_to_list(Str).
|
||||
|
||||
format_date({Year, Month, Day}) ->
|
||||
iolist_to_binary(
|
||||
io_lib:format(
|
||||
"~4..0w-~2..0w-~2..0w",
|
||||
[Year, Month, Day]
|
||||
)
|
||||
).
|
||||
|
||||
format_type(?OFFICIAL) -> <<"official">>;
|
||||
format_type(?TRIAL) -> <<"trial">>.
|
||||
|
||||
%% We want to issue new CA certificates with different issuer and keep
|
||||
%% validating old licenses.
|
||||
verify_fun(_OTPCertificate, {bad_cert, invalid_issuer}, UserState) ->
|
||||
{valid, UserState};
|
||||
%% We want to continue using the same CA certificate even after it
|
||||
%% expires.
|
||||
verify_fun(_OTPCertificate, {bad_cert, cert_expired}, UserState) ->
|
||||
{valid, UserState};
|
||||
verify_fun(OTPCertificate, Event, State) ->
|
||||
DefaultVerifyFun = element(1, ?DEFAULT_VERIFYFUN),
|
||||
DefaultVerifyFun(OTPCertificate, Event, State).
|
|
@ -16,16 +16,15 @@
|
|||
-export([roots/0, fields/1, validations/0, desc/1]).
|
||||
|
||||
-export([
|
||||
license_type/0,
|
||||
key_license/0,
|
||||
file_license/0
|
||||
default_license/0,
|
||||
key_license/0
|
||||
]).
|
||||
|
||||
roots() ->
|
||||
[
|
||||
{license,
|
||||
hoconsc:mk(
|
||||
license_type(),
|
||||
key_license(),
|
||||
#{
|
||||
desc => ?DESC(license_root)
|
||||
}
|
||||
|
@ -34,55 +33,14 @@ roots() ->
|
|||
|
||||
fields(key_license) ->
|
||||
[
|
||||
{type, #{
|
||||
type => key,
|
||||
required => true,
|
||||
desc => ?DESC(license_type_field)
|
||||
}},
|
||||
{key, #{
|
||||
type => string(),
|
||||
default => default_license(),
|
||||
%% so it's not logged
|
||||
sensitive => true,
|
||||
required => true,
|
||||
desc => ?DESC(key_field)
|
||||
}},
|
||||
{file, #{
|
||||
type => string(),
|
||||
required => false,
|
||||
desc => ?DESC(file_field)
|
||||
}}
|
||||
| common_fields()
|
||||
];
|
||||
fields(file_license) ->
|
||||
[
|
||||
{type, #{
|
||||
type => file,
|
||||
required => true,
|
||||
desc => ?DESC(license_type_field)
|
||||
}},
|
||||
{key, #{
|
||||
type => string(),
|
||||
%% so it's not logged
|
||||
sensitive => true,
|
||||
required => false,
|
||||
desc => ?DESC(key_field)
|
||||
}},
|
||||
{file, #{
|
||||
type => string(),
|
||||
desc => ?DESC(file_field)
|
||||
}}
|
||||
| common_fields()
|
||||
].
|
||||
|
||||
desc(key_license) ->
|
||||
"License provisioned as a string.";
|
||||
desc(file_license) ->
|
||||
"License provisioned as a file.";
|
||||
desc(_) ->
|
||||
undefined.
|
||||
|
||||
common_fields() ->
|
||||
[
|
||||
{connection_low_watermark, #{
|
||||
type => emqx_schema:percent(),
|
||||
default => "75%",
|
||||
|
@ -95,21 +53,17 @@ common_fields() ->
|
|||
}}
|
||||
].
|
||||
|
||||
desc(key_license) ->
|
||||
"License provisioned as a string.";
|
||||
desc(_) ->
|
||||
undefined.
|
||||
|
||||
validations() ->
|
||||
[{check_license_watermark, fun check_license_watermark/1}].
|
||||
|
||||
license_type() ->
|
||||
hoconsc:union([
|
||||
key_license(),
|
||||
file_license()
|
||||
]).
|
||||
|
||||
key_license() ->
|
||||
hoconsc:ref(?MODULE, key_license).
|
||||
|
||||
file_license() ->
|
||||
hoconsc:ref(?MODULE, file_license).
|
||||
|
||||
check_license_watermark(Conf) ->
|
||||
case hocon_maps:get("license.connection_low_watermark", Conf) of
|
||||
undefined ->
|
||||
|
@ -121,3 +75,14 @@ check_license_watermark(Conf) ->
|
|||
false -> {bad_license_watermark, #{high => High, low => Low}}
|
||||
end
|
||||
end.
|
||||
|
||||
%% @doc The default license key.
|
||||
%% This default license has 1000 connections limit.
|
||||
%% It is issued on 2022-04-19 and valid for 5 years (1825 days)
|
||||
%% NOTE: when updating a new key, the schema doc in emqx_license_schema_i18n.conf
|
||||
%% should be updated accordingly
|
||||
default_license() ->
|
||||
"MjIwMTExCjAKMTAKRXZhbHVhdGlvbgpjb250YWN0QGVtcXguaW8KZ"
|
||||
"GVmYXVsdAoyMDIyMDQxOQoxODI1CjEwMDAK.MEQCICbgRVijCQov2"
|
||||
"hrvZXR1mk9Oa+tyV1F5oJ6iOZeSHjnQAiB9dUiVeaZekDOjztk+NC"
|
||||
"Wjhk4PG8tWfw2uFZWruSzD6g==".
|
||||
|
|
|
@ -11,20 +11,15 @@
|
|||
-export([introduced_in/0]).
|
||||
|
||||
-export([
|
||||
remote_connection_counts/1,
|
||||
save_and_backup_license/2
|
||||
remote_connection_counts/1
|
||||
]).
|
||||
|
||||
-define(TIMEOUT, 500).
|
||||
-define(BACKUP_TIMEOUT, 15_000).
|
||||
|
||||
introduced_in() ->
|
||||
"5.0.5".
|
||||
"e5.0.0".
|
||||
|
||||
-spec remote_connection_counts(list(node())) -> list({atom(), term()}).
|
||||
remote_connection_counts(Nodes) ->
|
||||
erpc:multicall(Nodes, emqx_license_resources, local_connection_count, [], ?TIMEOUT).
|
||||
|
||||
-spec save_and_backup_license(list(node()), binary()) -> list({atom(), term()}).
|
||||
save_and_backup_license(Nodes, NewLicenseKey) ->
|
||||
erpc:multicall(Nodes, emqx_license, save_and_backup_license, [NewLicenseKey], ?BACKUP_TIMEOUT).
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue