Merge branch 'master' into EMQX-871-872

This commit is contained in:
x1001100011 2021-08-09 22:02:43 -07:00
commit 2c6e8204b8
106 changed files with 3069 additions and 2250 deletions

View File

@ -1,16 +0,0 @@
ARG BUILD_FROM=emqx/build-env:erl23.2.7.2-emqx-2-ubuntu20.04
FROM ${BUILD_FROM}
ARG EMQX_NAME=emqx
COPY . /emqx
WORKDIR /emqx
RUN rm -rf _build/${EMQX_NAME}/lib _build/${EMQX_NAME}-pkg/lib
RUN make ${EMQX_NAME}-zip || cat rebar3.crashdump
RUN make ${EMQX_NAME}-pkg || cat rebar3.crashdump
RUN /emqx/.ci/build_packages/tests.sh

View File

@ -73,7 +73,7 @@ emqx_test(){
fi
echo "running ${packagename} start"
running_test
run_test
echo "running ${packagename} stop"
dpkg -r "${EMQX_NAME}"
@ -99,7 +99,7 @@ emqx_test(){
fi
echo "running ${packagename} start"
running_test
run_test
echo "running ${packagename} stop"
rpm -e "${EMQX_NAME}"
@ -113,7 +113,7 @@ emqx_test(){
done
}
running_test(){
run_test(){
# sed -i '/emqx_telemetry/d' /var/lib/emqx/loaded_plugins
emqx_env_vars=$(dirname "$(readlink "$(command -v emqx)")")/../releases/emqx_vars
@ -132,7 +132,7 @@ EOF
exit 1
fi
if ! su - emqx -c "emqx start"; then
if ! emqx 'start'; then
cat /var/log/emqx/erlang.log.1 || true
cat /var/log/emqx/emqx.log.1 || true
exit 1
@ -149,7 +149,13 @@ EOF
done
pytest -v /paho-mqtt-testing/interoperability/test_client/V5/test_connect.py::test_basic
# shellcheck disable=SC2009 # pgrep does not support Extended Regular Expressions
emqx stop || kill "$(ps -ef | grep -E '\-progname\s.+emqx\s' |awk '{print $2}')"
ps -ef | grep -E '\-progname\s.+emqx\s'
if ! emqx 'stop'; then
echo "ERROR: failed_to_stop_emqx_with_the_stop_command"
cat /var/log/emqx/erlang.log.1 || true
cat /var/log/emqx/emqx.log.1 || true
exit 1
fi
if [ "$(sed -n '/^ID=/p' /etc/os-release | sed -r 's/ID=(.*)/\1/g' | sed 's/"//g')" = ubuntu ] \
|| [ "$(sed -n '/^ID=/p' /etc/os-release | sed -r 's/ID=(.*)/\1/g' | sed 's/"//g')" = debian ] ;then

View File

@ -4,6 +4,8 @@ services:
redis_server:
container_name: redis
image: redis:${REDIS_TAG}
ports:
- "6379:6379"
command:
- redis-server
- "--bind 0.0.0.0 ::"

View File

@ -25,8 +25,8 @@ jobs:
- name: set profile
id: set_profile
shell: bash
working-directory: source
run: |
cd source
vsn="$(./pkg-vsn.sh)"
pre_vsn="$(echo $vsn | grep -oE '^[0-9]+.[0-9]')"
if make emqx-ee --dry-run > /dev/null 2>&1; then
@ -38,20 +38,18 @@ jobs:
echo "::set-output name=old_vsns::$old_vsns"
echo "::set-output name=profiles::[\"emqx\", \"emqx-edge\"]"
fi
- name: get_all_deps
if: endsWith(github.repository, 'emqx')
run: |
make -C source deps-all
rm source/rebar.lock
zip -ryq source.zip source/* source/.[^.]*
- name: get_all_deps
- name: set get token
if: endsWith(github.repository, 'enterprise')
run: |
echo "https://ci%40emqx.io:${{ secrets.CI_GIT_TOKEN }}@github.com" > $HOME/.git-credentials
git config --global credential.helper store
echo "${{ secrets.CI_GIT_TOKEN }}" >> source/scripts/git-token
make -C source deps-all
zip -ryq source.zip source/* source/.[^.]*
- name: get deps
working-directory: source
run: |
make ensure-rebar3
./rebar3 as default get-deps
- name: gen zip file
run: zip -ryq source.zip source/* source/.[^.]*
- uses: actions/upload-artifact@v2
with:
name: source
@ -175,7 +173,6 @@ jobs:
cd source
make ensure-rebar3
sudo cp rebar3 /usr/local/bin/rebar3
rm -rf _build/${{ matrix.profile }}/lib
make ${{ matrix.profile }}-zip
- name: test
run: |
@ -216,8 +213,6 @@ jobs:
fail-fast: false
matrix:
profile: ${{fromJSON(needs.prepare.outputs.profiles)}}
erl_otp:
- 23.2.7.2-emqx-2
arch:
- amd64
- arm64
@ -233,6 +228,8 @@ jobs:
- centos6
- raspbian10
# - raspbian9
erl_otp:
- 23.2.7.2-emqx-2
exclude:
- os: centos6
arch: arm64
@ -254,15 +251,11 @@ jobs:
shell: bash
steps:
- name: prepare docker
run: |
mkdir -p $HOME/.docker
echo '{ "experimental": "enabled" }' | tee $HOME/.docker/config.json
echo '{ "experimental": true, "storage-driver": "overlay2", "max-concurrent-downloads": 50, "max-concurrent-uploads": 50}' | sudo tee /etc/docker/daemon.json
sudo systemctl restart docker
docker info
docker buildx create --use --name mybuild
docker run --rm --privileged tonistiigi/binfmt --install all
- uses: docker/setup-buildx-action@v1
- uses: docker/setup-qemu-action@v1
with:
image: tonistiigi/binfmt:latest
platforms: all
- uses: actions/download-artifact@v2
with:
name: source
@ -275,6 +268,7 @@ jobs:
ARCH: ${{ matrix.arch }}
SYSTEM: ${{ matrix.os }}
OLD_VSNS: ${{ needs.prepare.outputs.old_vsns }}
working-directory: source
run: |
set -e -x -u
broker=$PROFILE
@ -285,8 +279,8 @@ jobs:
export ARCH="arm"
fi
mkdir -p source/_upgrade_base
cd source/_upgrade_base
mkdir -p _upgrade_base
cd _upgrade_base
old_vsns=($(echo $OLD_VSNS | tr ' ' ' '))
for tag in ${old_vsns[@]}; do
if [ ! -z "$(echo $(curl -I -m 10 -o /dev/null -s -w %{http_code} https://s3-us-west-2.amazonaws.com/packages.emqx/$broker/$tag/$PROFILE-$SYSTEM-${tag#[e|v]}-$ARCH.zip) | grep -oE "^[23]+")" ];then
@ -301,32 +295,25 @@ jobs:
PROFILE: ${{ matrix.profile }}
ARCH: ${{ matrix.arch }}
SYSTEM: ${{ matrix.os }}
working-directory: source
run: |
set -e -u
cd source
docker buildx build --no-cache \
--platform=linux/$ARCH \
-t cross_build_emqx_for_$SYSTEM \
-f .ci/build_packages/Dockerfile \
--build-arg BUILD_FROM=emqx/build-env:$ERL_OTP-$SYSTEM \
--build-arg EMQX_NAME=$PROFILE \
--output type=tar,dest=/tmp/cross-build-$PROFILE-for-$SYSTEM.tar .
mkdir -p /tmp/packages/$PROFILE
tar -xvf /tmp/cross-build-$PROFILE-for-$SYSTEM.tar --wildcards emqx/_packages/$PROFILE/*
mv emqx/_packages/$PROFILE/* /tmp/packages/$PROFILE/
rm -rf /tmp/cross-build-$PROFILE-for-$SYSTEM.tar
docker rm -f $(docker ps -a -q)
docker volume prune -f
docker run -i --rm \
-v $(pwd):/emqx \
--workdir /emqx \
--platform linux/$ARCH \
emqx/build-env:$ERL_OTP-$SYSTEM \
bash -euc "make $PROFILE-zip || cat rebar3.crashdump; \
make $PROFILE-pkg || cat rebar3.crashdump; \
EMQX_NAME=$PROFILE && .ci/build_packages/tests.sh"
- name: create sha256
env:
PROFILE: ${{ matrix.profile}}
working-directory: source
run: |
if [ -d /tmp/packages/$PROFILE ]; then
cd /tmp/packages/$PROFILE
if [ -d _packages/$PROFILE ]; then
cd _packages/$PROFILE
for var in $(ls emqx-* ); do
bash -c "echo $(sha256sum $var | awk '{print $1}') > $var.sha256"
sudo bash -c "echo $(sha256sum $var | awk '{print $1}') > $var.sha256"
done
cd -
fi
@ -334,7 +321,7 @@ jobs:
if: startsWith(github.ref, 'refs/tags/')
with:
name: ${{ matrix.profile }}
path: /tmp/packages/${{ matrix.profile }}/.
path: source/_packages/${{ matrix.profile }}/.
docker:
runs-on: ubuntu-20.04
@ -364,8 +351,8 @@ jobs:
env:
ERL_OTP: erl${{ matrix.erl_otp }}
PROFILE: ${{ matrix.profile }}
working-directory: source
run: |
cd source
PKG_VSN="$(./pkg-vsn.sh)"
docker buildx build --no-cache \
--platform=linux/amd64,linux/arm64 \
@ -385,8 +372,8 @@ jobs:
env:
ERL_OTP: erl${{ matrix.erl_otp }}
PROFILE: ${{ matrix.profile }}
working-directory: source
run: |
cd source
PKG_VSN="$(./pkg-vsn.sh)"
docker buildx build --no-cache \
--platform=linux/amd64,linux/arm64 \

View File

View File

@ -1,702 +1,3 @@
## master-88df1713
## NOTE: The configurations in this file will be overridden by
## `<path-to-emqx-installation>/data/emqx_overrides.conf`
##==================================================================
## Node
##==================================================================
node {
## Node name.
## See: http://erlang.org/doc/reference_manual/distributed.html
##
## @doc node.name
## ValueType: NodeName
## Default: emqx@127.0.0.1
name: "emqx@127.0.0.1"
## Cookie for distributed node communication.
##
## @doc node.cookie
## ValueType: String
## Default: emqxsecretcookie
cookie: emqxsecretcookie
## Data dir for the node
##
## @doc node.data_dir
## ValueType: Folder
## Default: "{{ platform_data_dir }}/"
data_dir: "{{ platform_data_dir }}/"
## Dir of crash dump file.
##
## @doc node.crash_dump_dir
## ValueType: Folder
## Default: "{{ platform_log_dir }}/"
crash_dump_dir: "{{ platform_log_dir }}/"
## Global GC Interval.
##
## @doc node.global_gc_interval
## ValueType: Duration
## Default: 15m
global_gc_interval: 15m
## Sets the net_kernel tick time in seconds.
## Notice that all communicating nodes are to have the same
## TickTime value specified.
##
## See: http://www.erlang.org/doc/man/kernel_app.html#net_ticktime
##
## @doc node.dist_net_ticktime
## ValueType: Number
## Default: 2m
dist_net_ticktime: 2m
## Sets the port range for the listener socket of a distributed
## Erlang node.
## Note that if there are firewalls between clustered nodes, this
## port segment for nodes communication should be allowed.
##
## See: http://www.erlang.org/doc/man/kernel_app.html
##
## @doc node.dist_listen_min
## ValueType: Integer
## Range: [1024,65535]
## Default: 6369
dist_listen_min: 6369
## Sets the port range for the listener socket of a distributed
## Erlang node.
## Note that if there are firewalls between clustered nodes, this
## port segment for nodes communication should be allowed.
##
## See: http://www.erlang.org/doc/man/kernel_app.html
##
## @doc node.dist_listen_max
## ValueType: Integer
## Range: [1024,65535]
## Default: 6369
dist_listen_max: 6369
## Sets the maximum depth of call stack back-traces in the exit
## reason element of 'EXIT' tuples.
## The flag also limits the stacktrace depth returned by
## process_info item current_stacktrace.
##
## @doc node.backtrace_depth
## ValueType: Integer
## Range: [0,1024]
## Default: 23
backtrace_depth: 23
}
##==================================================================
## Cluster
##==================================================================
cluster {
## Cluster name.
##
## @doc cluster.name
## ValueType: String
## Default: emqxcl
name: emqxcl
## Enable cluster autoheal from network partition.
##
## @doc cluster.autoheal
## ValueType: Boolean
## Default: true
autoheal: true
## Autoclean down node. A down node will be removed from the cluster
## if this value > 0.
##
## @doc cluster.autoclean
## ValueType: Duration
## Default: 5m
autoclean: 5m
## Node discovery strategy to join the cluster.
##
## @doc cluster.discovery_strategy
## ValueType: manual | static | mcast | dns | etcd | k8s
## - manual: Manual join command
## - static: Static node list
## - mcast: IP Multicast
## - dns: DNS A Record
## - etcd: etcd
## - k8s: Kubernetes
##
## Default: manual
discovery_strategy: manual
##----------------------------------------------------------------
## Cluster using static node list
##----------------------------------------------------------------
static {
## Node list of the cluster
##
## @doc cluster.static.seeds
## ValueType: Array<NodeName>
## Default: []
seeds: ["emqx1@127.0.0.1", "emqx2@127.0.0.1"]
}
##----------------------------------------------------------------
## Cluster using IP Multicast
##----------------------------------------------------------------
mcast {
## IP Multicast Address.
##
## @doc cluster.mcast.addr
## ValueType: IPAddress
## Default: "239.192.0.1"
addr: "239.192.0.1"
## Multicast Ports.
##
## @doc cluster.mcast.ports
## ValueType: Array<Port>
## Default: [4369, 4370]
ports: [4369, 4370]
## Multicast Iface.
##
## @doc cluster.mcast.iface
## ValueType: IPAddress
## Default: "0.0.0.0"
iface: "0.0.0.0"
## Multicast Ttl.
##
## @doc cluster.mcast.ttl
## ValueType: Integer
## Range: [0,255]
## Default: 255
ttl: 255
## Multicast loop.
##
## @doc cluster.mcast.loop
## ValueType: Boolean
## Default: true
loop: true
}
##----------------------------------------------------------------
## Cluster using DNS A records
##----------------------------------------------------------------
dns {
## DNS name.
##
## @doc cluster.dns.name
## ValueType: String
## Default: localhost
name: localhost
## The App name is used to build 'node.name' with IP address.
##
## @doc cluster.dns.app
## ValueType: String
## Default: emqx
app: emqx
}
##----------------------------------------------------------------
## Cluster using etcd
##----------------------------------------------------------------
etcd {
## Etcd server list, seperated by ','.
##
## @doc cluster.etcd.server
## ValueType: URL
## Required: true
server: "http://127.0.0.1:2379"
## The prefix helps build nodes path in etcd. Each node in the cluster
## will create a path in etcd: v2/keys/<prefix>/<name>/<node.name>
##
## @doc cluster.etcd.prefix
## ValueType: String
## Default: emqxcl
prefix: emqxcl
## The TTL for node's path in etcd.
##
## @doc cluster.etcd.node_ttl
## ValueType: Duration
## Default: 1m
node_ttl: 1m
## Path to the file containing the user's private PEM-encoded key.
##
## @doc cluster.etcd.ssl.keyfile
## ValueType: File
## Default: "{{ platform_etc_dir }}/certs/key.pem"
ssl.keyfile: "{{ platform_etc_dir }}/certs/key.pem"
## Path to a file containing the user certificate.
##
## @doc cluster.etcd.ssl.certfile
## ValueType: File
## Default: "{{ platform_etc_dir }}/certs/cert.pem"
ssl.certfile: "{{ platform_etc_dir }}/certs/cert.pem"
## Path to the file containing PEM-encoded CA certificates. The CA certificates
## are used during server authentication and when building the client certificate chain.
##
## @doc cluster.etcd.ssl.cacertfile
## ValueType: File
## Default: "{{ platform_etc_dir }}/certs/cacert.pem"
ssl.cacertfile: "{{ platform_etc_dir }}/certs/cacert.pem"
}
##----------------------------------------------------------------
## Cluster using Kubernetes
##----------------------------------------------------------------
k8s {
## Kubernetes API server list, seperated by ','.
##
## @doc cluster.k8s.apiserver
## ValueType: URL
## Required: true
apiserver: "http://10.110.111.204:8080"
## The service name helps lookup EMQ nodes in the cluster.
##
## @doc cluster.k8s.service_name
## ValueType: String
## Default: emqx
service_name: emqx
## The address type is used to extract host from k8s service.
##
## @doc cluster.k8s.address_type
## ValueType: ip | dns | hostname
## Default: ip
address_type: ip
## The app name helps build 'node.name'.
##
## @doc cluster.k8s.app_name
## ValueType: String
## Default: emqx
app_name: emqx
## The suffix added to dns and hostname get from k8s service
##
## @doc cluster.k8s.suffix
## ValueType: String
## Default: "pod.local"
suffix: "pod.local"
## Kubernetes Namespace
##
## @doc cluster.k8s.namespace
## ValueType: String
## Default: default
namespace: default
}
db_backend: mnesia
rlog: {
# role: core
# core_nodes: []
}
}
##==================================================================
## Log
##==================================================================
log {
## The primary log level
##
## - all the log messages with levels lower than this level will
## be dropped.
## - all the log messages with levels higher than this level will
## go into the log handlers. The handlers then decide to log it
## out or drop it according to the level setting of the handler.
##
## Note: Only the messages with severity level higher than or
## equal to this level will be logged.
##
## @doc log.primary_level
## ValueType: debug | info | notice | warning | error | critical | alert | emergency
## Default: warning
primary_level: warning
##----------------------------------------------------------------
## The console log handler send log messages to emqx console
##----------------------------------------------------------------
## Log to single line
## @doc log.console_handler.enable
## ValueType: Boolean
## Default: false
console_handler.enable: false
## The log level of this handler
## All the log messages with levels lower than this level will
## be dropped.
##
## @doc log.console_handler.level
## ValueType: debug | info | notice | warning | error | critical | alert | emergency
## Default: warning
console_handler.level: warning
##----------------------------------------------------------------
## The file log handlers send log messages to files
##----------------------------------------------------------------
## file_handlers.<name>
file_handlers.emqx_log: {
## The log level filter of this handler
## All the log messages with levels lower than this level will
## be dropped.
##
## @doc log.file_handlers.<name>.level
## ValueType: debug | info | notice | warning | error | critical | alert | emergency
## Default: warning
level: warning
## The log file for specified level.
##
## If `rotation` is disabled, this is the file of the log files.
##
## If `rotation` is enabled, this is the base name of the files.
## Each file in a rotated log is named <base_name>.N, where N is an integer.
##
## Note: Log files for a specific log level will only contain all the logs
## that higher than or equal to that level
##
## @doc log.file_handlers.<name>.file
## ValueType: File
## Required: true
file: "{{ platform_log_dir }}/emqx.log"
## Enables the log rotation.
## With this enabled, new log files will be created when the current
## log file is full, max to `rotation_count` files will be created.
##
## @doc log.file_handlers.<name>.rotation.enable
## ValueType: Boolean
## Default: true
rotation.enable: true
## Maximum rotation count of log files.
##
## @doc log.file_handlers.<name>.rotation.count
## ValueType: Integer
## Range: [1, 2048]
## Default: 10
rotation.count: 10
## Maximum size of each log file.
##
## If the max_size reached and `rotation` is disabled, the handler
## will stop sending log messages, if the `rotation` is enabled,
## the file rotates.
##
## @doc log.file_handlers.<name>.max_size
## ValueType: Size | infinity
## Default: 10MB
max_size: 10MB
}
## file_handlers.<name>
##
## You could also create multiple file handlers for different
## log level for example:
file_handlers.emqx_error_log: {
level: error
file: "{{ platform_log_dir }}/error.log"
}
## Timezone offset to display in logs
##
## @doc log.time_offset
## ValueType: system | utc | String
## - "system" use system zone
## - "utc" for Universal Coordinated Time (UTC)
## - "+hh:mm" or "-hh:mm" for a specified offset
## Default: system
time_offset: system
## Limits the total number of characters printed for each log event.
##
## @doc log.chars_limit
## ValueType: Integer | infinity
## Range: [0, infinity)
## Default: infinity
chars_limit: infinity
## Maximum depth for Erlang term log formatting
## and Erlang process message queue inspection.
##
## @doc log.max_depth
## ValueType: Integer | infinity
## Default: 80
max_depth: 80
## Log formatter
## @doc log.formatter
## ValueType: text | json
## Default: text
formatter: text
## Log to single line
## @doc log.single_line
## ValueType: Boolean
## Default: true
single_line: true
## The max allowed queue length before switching to sync mode.
##
## Log overload protection parameter. If the message queue grows
## larger than this value the handler switches from anync to sync mode.
##
## @doc log.sync_mode_qlen
## ValueType: Integer
## Range: [0, ${log.drop_mode_qlen}]
## Default: 100
sync_mode_qlen: 100
## The max allowed queue length before switching to drop mode.
##
## Log overload protection parameter. When the message queue grows
## larger than this threshold, the handler switches to a mode in which
## it drops all new events that senders want to log.
##
## @doc log.drop_mode_qlen
## ValueType: Integer
## Range: [${log.sync_mode_qlen}, ${log.flush_qlen}]
## Default: 3000
drop_mode_qlen: 3000
## The max allowed queue length before switching to flush mode.
##
## Log overload protection parameter. If the length of the message queue
## grows larger than this threshold, a flush (delete) operation takes place.
## To flush events, the handler discards the messages in the message queue
## by receiving them in a loop without logging.
##
## @doc log.flush_qlen
## ValueType: Integer
## Range: [${log.drop_mode_qlen}, infinity)
## Default: 8000
flush_qlen: 8000
## Kill the log handler when it gets overloaded.
##
## Log overload protection parameter. It is possible that a handler,
## even if it can successfully manage peaks of high load without crashing,
## can build up a large message queue, or use a large amount of memory.
## We could kill the log handler in these cases and restart it after a
## few seconds.
##
## @doc log.overload_kill.enable
## ValueType: Boolean
## Default: true
overload_kill.enable: true
## The max allowed queue length before killing the log hanlder.
##
## Log overload protection parameter. This is the maximum allowed queue
## length. If the message queue grows larger than this, the handler
## process is terminated.
##
## @doc log.overload_kill.qlen
## ValueType: Integer
## Range: [0, 1048576]
## Default: 20000
overload_kill.qlen: 20000
## The max allowed memory size before killing the log hanlder.
##
## Log overload protection parameter. This is the maximum memory size
## that the handler process is allowed to use. If the handler grows
## larger than this, the process is terminated.
##
## @doc log.overload_kill.mem_size
## ValueType: Size
## Default: 30MB
overload_kill.mem_size: 30MB
## Restart the log hanlder after some seconds.
##
## Log overload protection parameter. If the handler is terminated,
## it restarts automatically after a delay specified in seconds.
##
## @doc log.overload_kill.restart_after
## ValueType: Duration
## Default: 5s
overload_kill.restart_after: 5s
## Controlling Bursts of Log Requests.
##
## Log overload protection parameter. Large bursts of log events - many
## events received by the handler under a short period of time - can
## potentially cause problems. By specifying the maximum number of events
## to be handled within a certain time frame, the handler can avoid
## choking the log with massive amounts of printouts.
##
## Note that there would be no warning if any messages were
## dropped because of burst control.
##
## @doc log.burst_limit.enable
## ValueType: Boolean
## Default: false
burst_limit.enable: false
## This config controls the maximum number of events to handle within
## a time frame. After the limit is reached, successive events are
## dropped until the end of the time frame defined by `window_time`.
##
## @doc log.burst_limit.max_count
## ValueType: Integer
## Default: 10000
burst_limit.max_count: 10000
## See the previous description of burst_limit_max_count.
##
## @doc log.burst_limit.window_time
## ValueType: duration
## Default: 1s
burst_limit.window_time: 1s
}
##==================================================================
## RPC
##==================================================================
rpc {
## RPC Mode.
##
## @doc rpc.mode
## ValueType: sync | async
## Default: async
mode: async
## Max batch size of async RPC requests.
##
## NOTE: RPC batch won't work when rpc.mode = sync
## Zero value disables rpc batching.
##
## @doc rpc.async_batch_size
## ValueType: Integer
## Range: [0, 1048576]
## Default: 0
async_batch_size: 256
## RPC port discovery
##
## The strategy for discovering the RPC listening port of
## other nodes.
##
## @doc cluster.discovery_strategy
## ValueType: manual | stateless
## - manual: discover ports by `tcp_server_port`.
## - stateless: discover ports in a stateless manner.
## If node name is `emqx<N>@127.0.0.1`, where the `<N>` is
## an integer, then the listening port will be `5370 + <N>`
##
## Default: `stateless`.
port_discovery: stateless
## TCP server port for RPC.
##
## Only takes effect when `rpc.port_discovery` = `manual`.
##
## @doc rpc.tcp_server_port
## ValueType: Integer
## Range: [1024-65535]
## Defaults: 5369
tcp_server_port: 5369
## Number of outgoing RPC connections.
##
## Set this to 1 to keep the message order sent from the same
## client.
##
## @doc rpc.tcp_client_num
## ValueType: Integer
## Range: [1, 256]
## Defaults: 1
tcp_client_num: 1
## RCP Client connect timeout.
##
## @doc rpc.connect_timeout
## ValueType: Duration
## Default: 5s
connect_timeout: 5s
## TCP send timeout of RPC client and server.
##
## @doc rpc.send_timeout
## ValueType: Duration
## Default: 5s
send_timeout: 5s
## Authentication timeout
##
## @doc rpc.authentication_timeout
## ValueType: Duration
## Default: 5s
authentication_timeout: 5s
## Default receive timeout for call() functions
##
## @doc rpc.call_receive_timeout
## ValueType: Duration
## Default: 15s
call_receive_timeout: 15s
## Socket idle keepalive.
##
## @doc rpc.socket_keepalive_idle
## ValueType: Duration
## Default: 900s
socket_keepalive_idle: 900s
## TCP Keepalive probes interval.
##
## @doc rpc.socket_keepalive_interval
## ValueType: Duration
## Default: 75s
socket_keepalive_interval: 75s
## Probes lost to close the connection
##
## @doc rpc.socket_keepalive_count
## ValueType: Integer
## Default: 9
socket_keepalive_count: 9
## Size of TCP send buffer.
##
## @doc rpc.socket_sndbuf
## ValueType: Size
## Default: 1MB
socket_sndbuf: 1MB
## Size of TCP receive buffer.
##
## @doc rpc.socket_recbuf
## ValueType: Size
## Default: 1MB
socket_recbuf: 1MB
## Size of user-level software socket buffer.
##
## @doc rpc.socket_buffer
## ValueType: Size
## Default: 1MB
socket_buffer: 1MB
}
##==================================================================
## Broker
##==================================================================

View File

@ -18,7 +18,7 @@ IsQuicSupp = fun() ->
end,
Bcrypt = {bcrypt, {git, "https://github.com/emqx/erlang-bcrypt.git", {branch, "0.6.0"}}},
Quicer = {quicer, {git, "https://github.com/emqx/quic.git", {branch, "main"}}},
Quicer = {quicer, {git, "https://github.com/emqx/quic.git", {branch, "0.0.7"}}},
ExtraDeps = fun(C) ->
{deps, Deps0} = lists:keyfind(deps, 1, C),

View File

@ -1,6 +1,6 @@
{application, emqx,
[{id, "emqx"},
{description, "EMQ X"},
{description, "EMQ X Core"},
{vsn, "5.0.0"}, % strict semver, bump manually!
{modules, []},
{registered, []},

View File

@ -23,7 +23,6 @@
%% Start/Stop the application
-export([ start/0
, restart/1
, is_running/1
, stop/0
]).
@ -52,12 +51,6 @@
, run_fold_hook/3
]).
%% Shutdown and reboot
-export([ shutdown/0
, shutdown/1
, reboot/0
]).
%% Troubleshooting
-export([ set_debug_secret/1
]).
@ -94,19 +87,8 @@ set_debug_secret(PathToSecretFile) ->
%% @doc Start emqx application
-spec(start() -> {ok, list(atom())} | {error, term()}).
start() ->
%% Check OS
%% Check VM
%% Check Mnesia
application:ensure_all_started(?APP).
-spec(restart(string()) -> ok).
restart(ConfFile) ->
reload_config(ConfFile),
shutdown(),
ok = application:stop(mnesia),
_ = application:start(mnesia),
reboot().
%% @doc Stop emqx application.
-spec(stop() -> ok | {error, term()}).
stop() ->
@ -202,40 +184,3 @@ run_hook(HookPoint, Args) ->
-spec(run_fold_hook(emqx_hooks:hookpoint(), list(any()), any()) -> any()).
run_fold_hook(HookPoint, Args, Acc) ->
emqx_hooks:run_fold(HookPoint, Args, Acc).
%%--------------------------------------------------------------------
%% Shutdown and reboot
%%--------------------------------------------------------------------
shutdown() ->
shutdown(normal).
shutdown(Reason) ->
?LOG(critical, "emqx shutdown for ~s", [Reason]),
_ = emqx_alarm_handler:unload(),
lists:foreach(fun application:stop/1
, lists:reverse(default_started_applications())
).
reboot() ->
lists:foreach(fun application:start/1 , default_started_applications()).
default_started_applications() ->
[gproc, esockd, ranch, cowboy, ekka, quicer, emqx] ++ emqx_feature().
%%--------------------------------------------------------------------
%% Internal functions
%%--------------------------------------------------------------------
reload_config(ConfFile) ->
{ok, [Conf]} = file:consult(ConfFile),
lists:foreach(fun({App, Vals}) ->
[application:set_env(App, Par, Val) || {Par, Val} <- Vals]
end, Conf).
-ifndef(EMQX_DEP_APPS).
emqx_feature() -> [].
-else.
emqx_feature() ->
?EMQX_DEP_APPS.
-endif.

View File

@ -24,6 +24,7 @@
, get_description/0
, get_release/0
, set_init_config_load_done/0
, set_override_conf_file/1
]).
-include("emqx.hrl").
@ -46,24 +47,14 @@
start(_Type, _Args) ->
ok = maybe_load_config(),
ok = set_backtrace_depth(),
print_otp_version_warning(),
print_banner(),
%% Load application first for ekka_mnesia scanner
_ = load_ce_modules(),
ekka:start(),
ok = ekka_rlog:wait_for_shards(?EMQX_SHARDS, infinity),
false == os:getenv("EMQX_NO_QUIC")
andalso application:ensure_all_started(quicer),
ok = maybe_start_quicer(),
{ok, Sup} = emqx_sup:start_link(),
ok = start_autocluster(),
%% ok = emqx_plugins:init(),
%% _ = emqx_plugins:load(),
%% _ = start_ce_modules(),
emqx_boot:is_enabled(listeners) andalso (ok = emqx_listeners:start()),
register(emqx, self()),
ok = maybe_start_listeners(),
ok = emqx_alarm_handler:load(),
print_vsn(),
register(emqx, self()),
{ok, Sup}.
prep_stop(_State) ->
@ -79,6 +70,13 @@ stop(_State) -> ok.
set_init_config_load_done() ->
application:set_env(emqx, init_config_load_done, true).
%% @doc This API is mostly for testing.
%% The override config file is typically located in the 'data' dir when
%% it is a emqx release, but emqx app should not have to konw where the
%% 'data' dir is located.
set_override_conf_file(File) ->
application:set_env(emqx, override_conf_file, File).
maybe_load_config() ->
case application:get_env(emqx, init_config_load_done, false) of
true ->
@ -89,52 +87,31 @@ maybe_load_config() ->
emqx_config:init_load(emqx_schema, ConfFiles)
end.
set_backtrace_depth() ->
Depth = emqx_config:get([node, backtrace_depth]),
_ = erlang:system_flag(backtrace_depth, Depth),
ok.
maybe_start_listeners() ->
case emqx_boot:is_enabled(listeners) of
true ->
ok = emqx_listeners:start();
false ->
ok
end.
-ifndef(EMQX_ENTERPRISE).
load_ce_modules() ->
application:load(emqx_modules).
start_ce_modules() ->
application:ensure_all_started(emqx_modules).
-else.
load_ce_modules() ->
ok.
start_ce_modules() ->
ok.
-endif.
maybe_start_quicer() ->
case is_quicer_app_present() andalso is_quic_listener_configured() of
true -> {ok, _} = application:ensure_all_started(quicer), ok;
false -> ok
end.
%%--------------------------------------------------------------------
%% Print Banner
%%--------------------------------------------------------------------
is_quicer_app_present() ->
case application:load(quicer) of
ok -> true;
{error, {already_loaded, _}} -> true;
_ ->
?SLOG(info, #{msg => "quicer_app_not_found"}),
false
end.
-if(?OTP_RELEASE> 22).
print_otp_version_warning() -> ok.
-else.
print_otp_version_warning() ->
?ULOG("WARNING: Running on Erlang/OTP version ~p. Recommended: 23~n",
[?OTP_RELEASE]).
-endif. % OTP_RELEASE
-ifndef(TEST).
print_banner() ->
?ULOG("Starting ~s on node ~s~n", [?APP, node()]).
print_vsn() ->
?ULOG("~s ~s is running now!~n", [get_description(), get_release()]).
-else. % TEST
print_vsn() ->
ok.
print_banner() ->
ok.
-endif. % TEST
is_quic_listener_configured() ->
emqx_listeners:has_enabled_listener_conf_by_type(quic).
get_description() ->
{ok, Descr0} = application:get_key(?APP, description),
@ -163,12 +140,3 @@ get_release() ->
release_in_macro() ->
element(2, ?EMQX_RELEASE).
%%--------------------------------------------------------------------
%% Autocluster
%%--------------------------------------------------------------------
start_autocluster() ->
ekka:callback(prepare, fun emqx:shutdown/1),
ekka:callback(reboot, fun emqx:reboot/0),
_ = ekka:autocluster(?APP), %% returns 'ok' or a pid or 'any()' as in spec
ok.

View File

@ -260,7 +260,7 @@ load_hocon_file(FileName, LoadType) ->
end.
emqx_override_conf_name() ->
filename:join([?MODULE:get([node, data_dir]), "emqx_override.conf"]).
application:get_env(emqx, override_conf_file, "emqx_override.conf").
bin(Bin) when is_binary(Bin) -> Bin;
bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8).

View File

@ -136,11 +136,11 @@ help() ->
-spec(print(io:format()) -> ok).
print(Msg) ->
io:format(format(Msg)).
io:format("~s", [format(Msg)]).
-spec(print(io:format(), [term()]) -> ok).
print(Format, Args) ->
io:format(format(Format, Args)).
io:format("~s", [format(Format, Args)]).
-spec(usage([cmd_usage()]) -> ok).
usage(UsageList) ->

View File

@ -29,7 +29,6 @@ init([]) ->
{ok, {{one_for_one, 10, 100},
%% always start emqx_config_handler first to load the emqx.conf to emqx_config
[ child_spec(emqx_config_handler, worker)
, child_spec(emqx_global_gc, worker)
, child_spec(emqx_pool_sup, supervisor)
, child_spec(emqx_hooks, worker)
, child_spec(emqx_stats, worker)

View File

@ -34,10 +34,15 @@
, stop_listener/3
, restart_listener/1
, restart_listener/3
, has_enabled_listener_conf_by_type/1
]).
%% @doc List configured listeners.
-spec(list() -> [{ListenerId :: atom(), ListenerConf :: map()}]).
list() ->
[{listener_id(ZoneName, LName), LConf} || {ZoneName, LName, LConf} <- do_list()].
do_list() ->
Zones = maps:to_list(emqx_config:get([zones], #{})),
lists:append([list(ZoneName, ZoneConf) || {ZoneName, ZoneConf} <- Zones]).
@ -45,26 +50,19 @@ list(ZoneName, ZoneConf) ->
Listeners = maps:to_list(maps:get(listeners, ZoneConf, #{})),
[
begin
ListenerId = listener_id(ZoneName, LName),
Running = is_running(ListenerId),
Conf = merge_zone_and_listener_confs(ZoneConf, LConf),
{ListenerId, maps:put(running, Running, Conf)}
Running = is_running(listener_id(ZoneName, LName), Conf),
{ZoneName , LName, maps:put(running, Running, Conf)}
end
|| {LName, LConf} <- Listeners].
|| {LName, LConf} <- Listeners, is_map(LConf)].
-spec is_running(ListenerId :: atom()) -> boolean() | {error, no_found}.
is_running(ListenerId) ->
Zones = maps:to_list(emqx_config:get([zones], #{})),
Listeners = lists:append(
[
[{listener_id(ZoneName, LName),merge_zone_and_listener_confs(ZoneConf, LConf)}
|| {LName, LConf} <- maps:to_list(maps:get(listeners, ZoneConf, #{}))]
|| {ZoneName, ZoneConf} <- Zones]),
case proplists:get_value(ListenerId, Listeners, undefined) of
undefined ->
{error, no_found};
Conf ->
is_running(ListenerId, Conf)
case lists:filtermap(fun({_Zone, Id, #{running := IsRunning}}) ->
Id =:= ListenerId andalso {true, IsRunning}
end, do_list()) of
[IsRunning] -> IsRunning;
[] -> {error, not_found}
end.
is_running(ListenerId, #{type := tcp, bind := ListenOn})->
@ -271,9 +269,11 @@ listener_id(ZoneName, ListenerName) ->
list_to_atom(lists:append([atom_to_list(ZoneName), ":", atom_to_list(ListenerName)])).
decode_listener_id(Id) ->
case string:split(atom_to_list(Id), ":", leading) of
[Zone, Listen] -> {list_to_atom(Zone), list_to_atom(Listen)};
_ -> error({invalid_listener_id, Id})
try
[Zone, Listen] = string:split(atom_to_list(Id), ":", leading),
{list_to_existing_atom(Zone), list_to_existing_atom(Listen)}
catch
_ : _ -> error({invalid_listener_id, Id})
end.
ssl_opts(Opts) ->
@ -291,11 +291,17 @@ is_ssl(Opts) ->
emqx_map_lib:deep_get([ssl, enable], Opts, false).
foreach_listeners(Do) ->
lists:foreach(fun({ZoneName, ZoneConf}) ->
lists:foreach(fun({LName, LConf}) ->
Do(ZoneName, LName, merge_zone_and_listener_confs(ZoneConf, LConf))
end, maps:to_list(maps:get(listeners, ZoneConf, #{})))
end, maps:to_list(emqx_config:get([zones], #{}))).
lists:foreach(
fun({ZoneName, LName, LConf}) ->
Do(ZoneName, LName, LConf)
end, do_list()).
has_enabled_listener_conf_by_type(Type) ->
lists:any(
fun({_Zone, _LName, LConf}) when is_map(LConf) ->
Type =:= maps:get(type, LConf) andalso
maps:get(enabled, LConf, true)
end, do_list()).
%% merge the configs in zone and listeners in a manner that
%% all config entries in the listener are prior to the ones in the zone.

View File

@ -36,7 +36,4 @@ enrich_fmt(Fmt, Args, #{mfa := Mfa, line := Line}) ->
enrich_fmt(Fmt, Args, _) ->
{Fmt, Args}.
mfa({M, F, A}) ->
<<(atom_to_binary(M, utf8))/binary, $:,
(atom_to_binary(F, utf8))/binary, $/,
(integer_to_binary(A))/binary>>.
mfa({M, F, A}) -> atom_to_list(M) ++ ":" ++ atom_to_list(F) ++ "/" ++ integer_to_list(A).

View File

@ -24,7 +24,6 @@
-include_lib("typerefl/include/types.hrl").
-type log_level() :: debug | info | notice | warning | error | critical | alert | emergency | all.
-type duration() :: integer().
-type duration_s() :: integer().
-type duration_ms() :: integer().
@ -60,179 +59,18 @@
-behaviour(hocon_schema).
-reflect_type([ log_level/0, duration/0, duration_s/0, duration_ms/0,
-reflect_type([ duration/0, duration_s/0, duration_ms/0,
bytesize/0, wordsize/0, percent/0, file/0,
comma_separated_list/0, bar_separated_list/0, ip_port/0,
cipher/0,
comma_separated_atoms/0]).
-export([structs/0, fields/1, translations/0, translation/1]).
-export([structs/0, fields/1]).
-export([t/1, t/3, t/4, ref/1]).
-export([conf_get/2, conf_get/3, keys/2, filter/1]).
-export([ssl/1]).
%% will be used by emqx_ct_helper to find the dependent apps
-export([includes/0, extra_schema_fields/1]).
structs() -> ["cluster", "node", "rpc", "log",
"zones", "listeners", "broker",
"plugins", "sysmon", "alarm"]
++ ?MODULE:includes().
-ifndef(EMQX_EXT_SCHEMAS).
includes() -> [].
-else.
includes() ->
[FieldName || {FieldName, _SchemaMod} <- ?EMQX_EXT_SCHEMAS].
-endif.
fields("cluster") ->
[ {"name", t(atom(), "ekka.cluster_name", emqxcl)}
, {"discovery_strategy", t(union([manual, static, mcast, dns, etcd, k8s]),
undefined, manual)}
, {"autoclean", t(duration(), "ekka.cluster_autoclean", "5m")}
, {"autoheal", t(boolean(), "ekka.cluster_autoheal", true)}
, {"static", ref("static")}
, {"mcast", ref("mcast")}
, {"proto_dist", t(union([inet_tcp, inet6_tcp, inet_tls]), "ekka.proto_dist", inet_tcp)}
, {"dns", ref("dns")}
, {"etcd", ref("etcd")}
, {"k8s", ref("k8s")}
, {"db_backend", t(union([mnesia, rlog]), "ekka.db_backend", mnesia)}
, {"rlog", ref("rlog")}
];
fields("static") ->
[ {"seeds", t(hoconsc:array(string()), undefined, [])}];
fields("mcast") ->
[ {"addr", t(string(), undefined, "239.192.0.1")}
, {"ports", t(hoconsc:array(integer()), undefined, [4369, 4370])}
, {"iface", t(string(), undefined, "0.0.0.0")}
, {"ttl", t(range(0, 255), undefined, 255)}
, {"loop", t(boolean(), undefined, true)}
, {"sndbuf", t(bytesize(), undefined, "16KB")}
, {"recbuf", t(bytesize(), undefined, "16KB")}
, {"buffer", t(bytesize(), undefined, "32KB")}
];
fields("dns") ->
[ {"name", t(string(), undefined, "localhost")}
, {"app", t(string(), undefined, "emqx")}];
fields("etcd") ->
[ {"server", t(comma_separated_list())}
, {"prefix", t(string(), undefined, "emqxcl")}
, {"node_ttl", t(duration(), undefined, "1m")}
, {"ssl", ref("etcd_ssl")}
];
fields("etcd_ssl") ->
ssl(#{});
fields("k8s") ->
[ {"apiserver", t(string())}
, {"service_name", t(string(), undefined, "emqx")}
, {"address_type", t(union([ip, dns, hostname]))}
, {"app_name", t(string(), undefined, "emqx")}
, {"namespace", t(string(), undefined, "default")}
, {"suffix", t(string(), undefined, "pod.local")}
];
fields("rlog") ->
[ {"role", t(union([core, replicant]), "ekka.node_role", core)}
, {"core_nodes", t(comma_separated_atoms(), "ekka.core_nodes", [])}
];
fields("node") ->
[ {"name", hoconsc:t(string(), #{default => "emqx@127.0.0.1",
override_env => "EMQX_NODE_NAME"
})}
, {"cookie", hoconsc:t(string(), #{mapping => "vm_args.-setcookie",
default => "emqxsecretcookie",
sensitive => true,
override_env => "EMQX_NODE_COOKIE"
})}
, {"data_dir", t(string())}
, {"config_files", t(comma_separated_list())}
, {"global_gc_interval", t(duration(), undefined, "15m")}
, {"crash_dump_dir", t(file(), "vm_args.-env ERL_CRASH_DUMP", undefined)}
, {"dist_net_ticktime", t(duration(), "vm_args.-kernel net_ticktime", "2m")}
, {"dist_listen_min", t(range(1024, 65535), "kernel.inet_dist_listen_min", 6369)}
, {"dist_listen_max", t(range(1024, 65535), "kernel.inet_dist_listen_max", 6369)}
, {"backtrace_depth", t(integer(), undefined, 23)}
];
fields("rpc") ->
[ {"mode", t(union(sync, async), undefined, async)}
, {"async_batch_size", t(integer(), "gen_rpc.max_batch_size", 256)}
, {"port_discovery",t(union(manual, stateless), "gen_rpc.port_discovery", stateless)}
, {"tcp_server_port", t(integer(), "gen_rpc.tcp_server_port", 5369)}
, {"tcp_client_num", t(range(1, 256), undefined, 1)}
, {"connect_timeout", t(duration(), "gen_rpc.connect_timeout", "5s")}
, {"send_timeout", t(duration(), "gen_rpc.send_timeout", "5s")}
, {"authentication_timeout", t(duration(), "gen_rpc.authentication_timeout", "5s")}
, {"call_receive_timeout", t(duration(), "gen_rpc.call_receive_timeout", "15s")}
, {"socket_keepalive_idle", t(duration_s(), "gen_rpc.socket_keepalive_idle", "7200s")}
, {"socket_keepalive_interval", t(duration_s(), "gen_rpc.socket_keepalive_interval", "75s")}
, {"socket_keepalive_count", t(integer(), "gen_rpc.socket_keepalive_count", 9)}
, {"socket_sndbuf", t(bytesize(), "gen_rpc.socket_sndbuf", "1MB")}
, {"socket_recbuf", t(bytesize(), "gen_rpc.socket_recbuf", "1MB")}
, {"socket_buffer", t(bytesize(), "gen_rpc.socket_buffer", "1MB")}
];
fields("log") ->
[ {"primary_level", t(log_level(), undefined, warning)}
, {"console_handler", ref("console_handler")}
, {"file_handlers", ref("file_handlers")}
, {"time_offset", t(string(), undefined, "system")}
, {"chars_limit", maybe_infinity(range(1, inf))}
, {"supervisor_reports", t(union([error, progress]), undefined, error)}
, {"max_depth", t(union([infinity, integer()]),
"kernel.error_logger_format_depth", 80)}
, {"formatter", t(union([text, json]), undefined, text)}
, {"single_line", t(boolean(), undefined, true)}
, {"sync_mode_qlen", t(integer(), undefined, 100)}
, {"drop_mode_qlen", t(integer(), undefined, 3000)}
, {"flush_qlen", t(integer(), undefined, 8000)}
, {"overload_kill", ref("log_overload_kill")}
, {"burst_limit", ref("log_burst_limit")}
, {"error_logger", t(atom(), "kernel.error_logger", silent)}
];
fields("console_handler") ->
[ {"enable", t(boolean(), undefined, false)}
, {"level", t(log_level(), undefined, warning)}
];
fields("file_handlers") ->
[ {"$name", ref("log_file_handler")}
];
fields("log_file_handler") ->
[ {"level", t(log_level(), undefined, warning)}
, {"file", t(file(), undefined, undefined)}
, {"rotation", ref("log_rotation")}
, {"max_size", maybe_infinity(bytesize(), "10MB")}
];
fields("log_rotation") ->
[ {"enable", t(boolean(), undefined, true)}
, {"count", t(range(1, 2048), undefined, 10)}
];
fields("log_overload_kill") ->
[ {"enable", t(boolean(), undefined, true)}
, {"mem_size", t(bytesize(), undefined, "30MB")}
, {"qlen", t(integer(), undefined, 20000)}
, {"restart_after", t(union(duration(), infinity), undefined, "5s")}
];
fields("log_burst_limit") ->
[ {"enable", t(boolean(), undefined, true)}
, {"max_count", t(integer(), undefined, 10000)}
, {"window_time", t(duration(), undefined, "1s")}
];
structs() -> ["zones", "listeners", "broker", "plugins", "sysmon", "alarm"].
fields("stats") ->
[ {"enable", t(boolean(), undefined, true)}
@ -354,7 +192,8 @@ fields("force_gc") ->
fields("listeners") ->
[ {"$name", hoconsc:union(
[ hoconsc:ref("mqtt_tcp_listener")
[ disabled
, hoconsc:ref("mqtt_tcp_listener")
, hoconsc:ref("mqtt_ws_listener")
, hoconsc:ref("mqtt_quic_listener")
])}
@ -480,20 +319,7 @@ fields("alarm") ->
[ {"actions", t(hoconsc:array(atom()), undefined, [log, publish])}
, {"size_limit", t(integer(), undefined, 1000)}
, {"validity_period", t(duration(), undefined, "24h")}
];
fields(FieldName) ->
?MODULE:extra_schema_fields(FieldName).
-ifndef(EMQX_EXT_SCHEMAS).
%% Function extra_schema_fields/1 only terminates with explicit exception
-dialyzer([{nowarn_function, [extra_schema_fields/1]}]).
extra_schema_fields(FieldName) -> error({unknown_field, FieldName}).
-else.
extra_schema_fields(FieldName) ->
{_, Mod} = lists:keyfind(FieldName, 1, ?EMQX_EXT_SCHEMAS),
Mod:fields(FieldName).
-endif.
].
mqtt_listener() ->
base_listener() ++
@ -509,117 +335,6 @@ base_listener() ->
, {"rate_limit", ref("rate_limit")}
].
translations() -> ["ekka", "kernel", "emqx"].
translation("ekka") ->
[ {"cluster_discovery", fun tr_cluster__discovery/1}];
translation("kernel") ->
[ {"logger_level", fun tr_logger_level/1}
, {"logger", fun tr_logger/1}];
translation("emqx") ->
[ {"config_files", fun tr_config_files/1}
].
tr_config_files(Conf) ->
case conf_get("emqx.config_files", Conf) of
[_ | _] = Files ->
Files;
_ ->
case os:getenv("RUNNER_ETC_DIR") of
false ->
[filename:join([code:lib_dir(emqx), "etc", "emqx.conf"])];
Dir ->
[filename:join([Dir, "emqx.conf"])]
end
end.
tr_cluster__discovery(Conf) ->
Strategy = conf_get("cluster.discovery_strategy", Conf),
{Strategy, filter(options(Strategy, Conf))}.
tr_logger_level(Conf) -> conf_get("log.primary_level", Conf).
tr_logger(Conf) ->
CharsLimit = case conf_get("log.chars_limit", Conf) of
infinity -> unlimited;
V -> V
end,
SingleLine = conf_get("log.single_line", Conf),
FmtName = conf_get("log.formatter", Conf),
Formatter = formatter(FmtName, CharsLimit, SingleLine),
BasicConf = #{
sync_mode_qlen => conf_get("log.sync_mode_qlen", Conf),
drop_mode_qlen => conf_get("log.drop_mode_qlen", Conf),
flush_qlen => conf_get("log.flush_qlen", Conf),
overload_kill_enable => conf_get("log.overload_kill.enable", Conf),
overload_kill_qlen => conf_get("log.overload_kill.qlen", Conf),
overload_kill_mem_size => conf_get("log.overload_kill.mem_size", Conf),
overload_kill_restart_after => conf_get("log.overload_kill.restart_after", Conf),
burst_limit_enable => conf_get("log.burst_limit.enable", Conf),
burst_limit_max_count => conf_get("log.burst_limit.max_count", Conf),
burst_limit_window_time => conf_get("log.burst_limit.window_time", Conf)
},
Filters = case conf_get("log.supervisor_reports", Conf) of
error -> [{drop_progress_reports, {fun logger_filters:progress/2, stop}}];
progress -> []
end,
%% For the default logger that outputs to console
ConsoleHandler =
case conf_get("log.console_handler.enable", Conf) of
true ->
[{handler, console, logger_std_h, #{
level => conf_get("log.console_handler.level", Conf),
config => BasicConf#{type => standard_io},
formatter => Formatter,
filters => Filters
}}];
false -> []
end,
%% For the file logger
FileHandlers =
[{handler, binary_to_atom(HandlerName, latin1), logger_disk_log_h, #{
level => conf_get("level", SubConf),
config => BasicConf#{
type => case conf_get("rotation.enable", SubConf) of
true -> wrap;
_ -> halt
end,
file => conf_get("file", SubConf),
max_no_files => conf_get("rotation.count", SubConf),
max_no_bytes => conf_get("max_size", SubConf)
},
formatter => Formatter,
filters => Filters,
filesync_repeat_interval => no_repeat
}}
|| {HandlerName, SubConf} <- maps:to_list(conf_get("log.file_handlers", Conf, #{}))],
[{handler, default, undefined}] ++ ConsoleHandler ++ FileHandlers.
%% helpers
formatter(json, CharsLimit, SingleLine) ->
{emqx_logger_jsonfmt,
#{chars_limit => CharsLimit,
single_line => SingleLine
}};
formatter(text, CharsLimit, SingleLine) ->
{emqx_logger_textfmt,
#{template =>
[time," [",level,"] ",
{clientid,
[{peername,
[clientid,"@",peername," "],
[clientid, " "]}],
[{peername,
[peername," "],
[]}]},
msg,"\n"],
chars_limit => CharsLimit,
single_line => SingleLine
}}.
%% utils
-spec(conf_get(string() | [string()], hocon:config()) -> term()).
conf_get(Key, Conf) ->
@ -740,8 +455,7 @@ t(Type, Mapping, Default, OverrideEnv, Validator) ->
, validator => Validator
}).
ref(Field) ->
fun (type) -> Field; (_) -> undefined end.
ref(Field) -> hoconsc:t(hoconsc:ref(?MODULE, Field)).
maybe_disabled(T) ->
maybe_sth(disabled, T, disabled).
@ -817,37 +531,6 @@ to_erl_cipher_suite(Str) ->
Cipher -> Cipher
end.
options(static, Conf) ->
[{seeds, [to_atom(S) || S <- conf_get("cluster.static.seeds", Conf, [])]}];
options(mcast, Conf) ->
{ok, Addr} = inet:parse_address(conf_get("cluster.mcast.addr", Conf)),
{ok, Iface} = inet:parse_address(conf_get("cluster.mcast.iface", Conf)),
Ports = conf_get("cluster.mcast.ports", Conf),
[{addr, Addr}, {ports, Ports}, {iface, Iface},
{ttl, conf_get("cluster.mcast.ttl", Conf, 1)},
{loop, conf_get("cluster.mcast.loop", Conf, true)}];
options(dns, Conf) ->
[{name, conf_get("cluster.dns.name", Conf)},
{app, conf_get("cluster.dns.app", Conf)}];
options(etcd, Conf) ->
Namespace = "cluster.etcd.ssl",
SslOpts = fun(C) ->
Options = keys(Namespace, C),
lists:map(fun(Key) -> {to_atom(Key), conf_get([Namespace, Key], Conf)} end, Options) end,
[{server, conf_get("cluster.etcd.server", Conf)},
{prefix, conf_get("cluster.etcd.prefix", Conf, "emqxcl")},
{node_ttl, conf_get("cluster.etcd.node_ttl", Conf, 60)},
{ssl_options, filter(SslOpts(Conf))}];
options(k8s, Conf) ->
[{apiserver, conf_get("cluster.k8s.apiserver", Conf)},
{service_name, conf_get("cluster.k8s.service_name", Conf)},
{address_type, conf_get("cluster.k8s.address_type", Conf, ip)},
{app_name, conf_get("cluster.k8s.app_name", Conf)},
{namespace, conf_get("cluster.k8s.namespace", Conf)},
{suffix, conf_get("cluster.k8s.suffix", Conf, "")}];
options(manual, _Conf) ->
[].
to_atom(Atom) when is_atom(Atom) ->
Atom;
to_atom(Str) when is_list(Str) ->

View File

@ -67,7 +67,7 @@ init([]) ->
BrokerSup = child_spec(emqx_broker_sup, supervisor),
CMSup = child_spec(emqx_cm_sup, supervisor),
SysSup = child_spec(emqx_sys_sup, supervisor),
Childs = [KernelSup] ++
Children = [KernelSup] ++
[RouterSup || emqx_boot:is_enabled(router)] ++
[BrokerSup || emqx_boot:is_enabled(broker)] ++
[CMSup || emqx_boot:is_enabled(broker)] ++
@ -76,7 +76,7 @@ init([]) ->
intensity => 0,
period => 1
},
{ok, {SupFlags, Childs}}.
{ok, {SupFlags, Children}}.
%%--------------------------------------------------------------------
%% Internal functions

View File

@ -89,7 +89,7 @@ version() -> emqx_app:get_release().
sysdescr() -> emqx_app:get_description().
%% @doc Get sys uptime
-spec(uptime() -> string()).
-spec(uptime() -> Milliseconds :: integer()).
uptime() ->
gen_server:call(?SYS, uptime).
@ -142,7 +142,7 @@ handle_cast(Msg, State) ->
{noreply, State}.
handle_info({timeout, TRef, heartbeat}, State = #state{heartbeat = TRef}) ->
publish_any(uptime, iolist_to_binary(uptime(State))),
publish_any(uptime, integer_to_binary(uptime(State))),
publish_any(datetime, iolist_to_binary(datetime())),
{noreply, heartbeat(State)};
@ -167,22 +167,7 @@ terminate(_Reason, #state{heartbeat = TRef1, ticker = TRef2}) ->
%%-----------------------------------------------------------------------------
uptime(#state{start_time = Ts}) ->
Secs = timer:now_diff(erlang:timestamp(), Ts) div 1000000,
lists:flatten(uptime(seconds, Secs)).
uptime(seconds, Secs) when Secs < 60 ->
[integer_to_list(Secs), " seconds"];
uptime(seconds, Secs) ->
[uptime(minutes, Secs div 60), integer_to_list(Secs rem 60), " seconds"];
uptime(minutes, M) when M < 60 ->
[integer_to_list(M), " minutes, "];
uptime(minutes, M) ->
[uptime(hours, M div 60), integer_to_list(M rem 60), " minutes, "];
uptime(hours, H) when H < 24 ->
[integer_to_list(H), " hours, "];
uptime(hours, H) ->
[uptime(days, H div 24), integer_to_list(H rem 24), " hours, "];
uptime(days, D) ->
[integer_to_list(D), " days, "].
timer:now_diff(erlang:timestamp(), Ts) div 1000.
publish_any(Name, Value) ->
_ = publish(Name, Value),

View File

@ -32,25 +32,6 @@ init_per_suite(Config) ->
end_per_suite(_Config) ->
emqx_ct_helpers:stop_apps([]).
t_restart(_) ->
ConfFile = "test.config",
Data = "[{emqx_statsd,[{interval,15000},{push_gateway,\"http://127.0.0.1:9091\"}]}].",
file:write_file(ConfFile, list_to_binary(Data)),
emqx:restart(ConfFile),
file:delete(ConfFile).
t_stop_start(_) ->
emqx:stop(),
false = emqx:is_running(node()),
emqx:start(),
true = emqx:is_running(node()),
ok = emqx:shutdown(),
false = emqx:is_running(node()),
ok = emqx:reboot(),
true = emqx:is_running(node()),
ok = emqx:shutdown(for_test),
false = emqx:is_running(node()).
t_emqx_pubsub_api(_) ->
true = emqx:is_running(node()),
{ok, C} = emqtt:start_link([{host, "localhost"}, {clientid, "myclient"}]),

View File

@ -67,10 +67,12 @@ t_run_commands(_) ->
t_print(_) ->
ok = emqx_ctl:print("help"),
ok = emqx_ctl:print("~s", [help]),
ok = emqx_ctl:print("~s", [<<"~!@#$%^&*()">>]),
% - check the output of the usage
mock_print(),
?assertEqual("help", emqx_ctl:print("help")),
?assertEqual("help", emqx_ctl:print("~s", [help])),
?assertEqual("~!@#$%^&*()", emqx_ctl:print("~s", [<<"~!@#$%^&*()">>])),
unmock_print().
t_usage(_) ->

View File

@ -39,12 +39,6 @@ end_per_suite(_Config) ->
% t_sysdescr(_) ->
% error('TODO').
t_uptime(_) ->
?assertEqual(<<"1 seconds">>, iolist_to_binary(emqx_sys:uptime(seconds, 1))),
?assertEqual(<<"1 minutes, 0 seconds">>, iolist_to_binary(emqx_sys:uptime(seconds, 60))),
?assertEqual(<<"1 hours, 0 minutes, 0 seconds">>, iolist_to_binary(emqx_sys:uptime(seconds, 3600))),
?assertEqual(<<"1 days, 0 hours, 0 minutes, 0 seconds">>, iolist_to_binary(emqx_sys:uptime(seconds, 86400))).
% t_datetime(_) ->
% error('TODO').

View File

@ -114,7 +114,7 @@ postcondition(_State, {call, emqx_sys, info, []}, Info) ->
postcondition(_State, {call, emqx_sys, version, []}, Version) ->
is_list(Version);
postcondition(_State, {call, emqx_sys, uptime, []}, Uptime) ->
is_list(Uptime);
is_integer(Uptime);
postcondition(_State, {call, emqx_sys, datetime, []}, Datetime) ->
is_list(Datetime);
postcondition(_State, {call, emqx_sys, sysdescr, []}, Sysdescr) ->

View File

@ -21,6 +21,17 @@ authentication: {
# salt_field: salt
# password_hash_algorithm: sha256
# salt_position: prefix
# },
# {
# name: "authenticator 3"
# mechanism: password-based
# server_type: redis
# server: "127.0.0.1:6379"
# password: "public"
# database: 0
# query: "HMGET ${mqtt-username} password_hash salt"
# password_hash_algorithm: sha256
# salt_position: prefix
# }
]
}

View File

@ -325,6 +325,8 @@ authenticator_provider(#{mechanism := 'password-based', server_type := 'pgsql'})
emqx_authn_pgsql;
authenticator_provider(#{mechanism := 'password-based', server_type := 'mongodb'}) ->
emqx_authn_mongodb;
authenticator_provider(#{mechanism := 'password-based', server_type := 'redis'}) ->
emqx_authn_redis;
authenticator_provider(#{mechanism := 'password-based', server_type := 'http-server'}) ->
emqx_authn_http;
authenticator_provider(#{mechanism := jwt}) ->

View File

@ -32,7 +32,7 @@
-define(EXAMPLE_1, #{name => <<"example 1">>,
mechanism => <<"password-based">>,
server_type => <<"built-in-example">>,
server_type => <<"built-in-database">>,
user_id_type => <<"username">>,
password_hash_algorithm => #{
name => <<"sha256">>
@ -76,6 +76,16 @@
salt_position => <<"prefix">>
}).
-define(EXAMPLE_5, #{name => <<"example 5">>,
mechanism => <<"password-based">>,
server_type => <<"redis">>,
server => <<"127.0.0.1:6379">>,
database => 0,
query => <<"HMGET ${mqtt-username} password_hash salt">>,
password_hash_algorithm => <<"sha256">>,
salt_position => <<"prefix">>
}).
-define(ERR_RESPONSE(Desc), #{description => Desc,
content => #{
'application/json' => #{
@ -180,6 +190,10 @@ authenticators_api() ->
mongodb => #{
summary => <<"Authentication with MongoDB">>,
value => emqx_json:encode(?EXAMPLE_4)
},
redis => #{
summary => <<"Authentication with Redis">>,
value => emqx_json:encode(?EXAMPLE_5)
}
}
}
@ -192,6 +206,7 @@ authenticators_api() ->
'application/json' => #{
schema => minirest:ref(<<"returned_authenticator">>),
examples => #{
%% TODO: return full content
example1 => #{
summary => <<"Example 1">>,
value => emqx_json:encode(maps:put(id, <<"example 1">>, ?EXAMPLE_1))
@ -207,6 +222,10 @@ authenticators_api() ->
example4 => #{
summary => <<"Example 4">>,
value => emqx_json:encode(maps:put(id, <<"example 4">>, ?EXAMPLE_4))
},
example5 => #{
summary => <<"Example 4">>,
value => emqx_json:encode(maps:put(id, <<"example 5">>, ?EXAMPLE_5))
}
}
}
@ -234,6 +253,7 @@ authenticators_api() ->
, maps:put(id, <<"example 2">>, ?EXAMPLE_2)
, maps:put(id, <<"example 3">>, ?EXAMPLE_3)
, maps:put(id, <<"example 4">>, ?EXAMPLE_4)
, maps:put(id, <<"example 5">>, ?EXAMPLE_5)
])
}
}
@ -281,6 +301,10 @@ authenticators_api2() ->
example4 => #{
summary => <<"Example 4">>,
value => emqx_json:encode(maps:put(id, <<"example 4">>, ?EXAMPLE_4))
},
example5 => #{
summary => <<"Example 5">>,
value => emqx_json:encode(maps:put(id, <<"example 5">>, ?EXAMPLE_5))
}
}
}
@ -345,6 +369,10 @@ authenticators_api2() ->
example4 => #{
summary => <<"Example 4">>,
value => emqx_json:encode(maps:put(id, <<"example 4">>, ?EXAMPLE_4))
},
example5 => #{
summary => <<"Example 5">>,
value => emqx_json:encode(maps:put(id, <<"example 5">>, ?EXAMPLE_5))
}
}
}
@ -1024,6 +1052,66 @@ definitions() ->
}
},
PasswordBasedRedisDef = #{
type => object,
required => [],
properties => #{
server_type => #{
type => string,
enum => [<<"redis">>],
example => [<<"redis">>]
},
server => #{
description => <<"Mutually exclusive with the 'servers' field, only valid in standalone mode">>,
type => string,
example => <<"127.0.0.1:27017">>
},
servers => #{
description => <<"Mutually exclusive with the 'server' field, only valid in cluster and sentinel mode">>,
type => array,
items => #{
type => string
},
example => [<<"127.0.0.1:27017">>]
},
sentinel => #{
description => <<"Only valid in sentinel mode">>,
type => string
},
password => #{
type => string
},
database => #{
type => integer,
exmaple => 0
},
query => #{
type => string,
example => <<"HMGET ${mqtt-username} password_hash salt">>
},
password_hash_algorithm => #{
type => string,
enum => [<<"plain">>, <<"md5">>, <<"sha">>, <<"sha256">>, <<"sha512">>, <<"bcrypt">>],
default => <<"sha256">>,
example => <<"sha256">>
},
salt_position => #{
type => string,
enum => [<<"prefix">>, <<"suffix">>],
default => <<"prefix">>,
example => <<"prefix">>
},
pool_size => #{
type => integer,
default => 8
},
auto_reconnect => #{
type => boolean,
default => true
}
}
},
PasswordBasedHTTPServerDef = #{
type => object,
required => [ server_type
@ -1155,6 +1243,7 @@ definitions() ->
, #{<<"password_based_mysql">> => PasswordBasedMySQLDef}
, #{<<"password_based_pgsql">> => PasswordBasedPgSQLDef}
, #{<<"password_based_mongodb">> => PasswordBasedMongoDBDef}
, #{<<"password_based_redis">> => PasswordBasedRedisDef}
, #{<<"password_based_http_server">> => PasswordBasedHTTPServerDef}
, #{<<"password_hash_algorithm">> => PasswordHashAlgorithmDef}
, #{<<"ssl">> => SSLDef}

View File

@ -28,6 +28,10 @@
-export([ authenticator_name/1
]).
%% Export it for emqx_gateway_schema module
-export([ authenticators/1
]).
structs() -> [ "authentication" ].
fields("authentication") ->
@ -49,7 +53,10 @@ authenticators(type) ->
, hoconsc:ref(emqx_authn_pgsql, config)
, hoconsc:ref(emqx_authn_mongodb, standalone)
, hoconsc:ref(emqx_authn_mongodb, 'replica-set')
, hoconsc:ref(emqx_authn_mongodb, sharded)
, hoconsc:ref(emqx_authn_mongodb, 'sharded-cluster')
, hoconsc:ref(emqx_authn_redis, standalone)
, hoconsc:ref(emqx_authn_redis, cluster)
, hoconsc:ref(emqx_authn_redis, sentinel)
, hoconsc:ref(emqx_authn_http, get)
, hoconsc:ref(emqx_authn_http, post)
, hoconsc:ref(emqx_authn_jwt, 'hmac-based')

View File

@ -18,6 +18,7 @@
-export([ replace_placeholders/2
, replace_placeholder/2
, hash/4
, gen_salt/0
, bin/1
]).
@ -54,6 +55,10 @@ replace_placeholder(<<"${cert-common-name}">>, Credential) ->
replace_placeholder(Constant, _) ->
Constant.
hash(Algorithm, Password, Salt, prefix) ->
emqx_passwd:hash(Algorithm, <<Salt/binary, Password/binary>>);
hash(Algorithm, Password, Salt, suffix) ->
emqx_passwd:hash(Algorithm, <<Password/binary, Salt/binary>>).
gen_salt() ->
<<X:128/big-unsigned-integer>> = crypto:strong_rand_bytes(16),

View File

@ -41,7 +41,7 @@ structs() -> [""].
fields("") ->
[ {config, {union, [ hoconsc:t(standalone)
, hoconsc:t('replica-set')
, hoconsc:t(sharded)
, hoconsc:t('sharded-cluster')
]}}
];
@ -51,7 +51,7 @@ fields(standalone) ->
fields('replica-set') ->
common_fields() ++ emqx_connector_mongo:fields(rs);
fields(sharded) ->
fields('sharded-cluster') ->
common_fields() ++ emqx_connector_mongo:fields(sharded).
common_fields() ->

View File

@ -146,11 +146,7 @@ check_password(Password,
#{password_hash_algorithm := Algorithm,
salt_position := SaltPosition}) ->
Salt = maps:get(salt, Selected, <<>>),
Hash0 = case SaltPosition of
prefix -> emqx_passwd:hash(Algorithm, <<Salt/binary, Password/binary>>);
suffix -> emqx_passwd:hash(Algorithm, <<Password/binary, Salt/binary>>)
end,
case Hash0 =:= Hash of
case Hash =:= emqx_authn_utils:hash(Algorithm, Password, Salt, SaltPosition) of
true -> ok;
false -> {error, bad_username_or_password}
end.

View File

@ -132,11 +132,7 @@ check_password(Password,
#{password_hash_algorithm := Algorithm,
salt_position := SaltPosition}) ->
Salt = maps:get(salt, Selected, <<>>),
Hash0 = case SaltPosition of
prefix -> emqx_passwd:hash(Algorithm, <<Salt/binary, Password/binary>>);
suffix -> emqx_passwd:hash(Algorithm, <<Password/binary, Salt/binary>>)
end,
case Hash0 =:= Hash of
case Hash =:= emqx_authn_utils:hash(Algorithm, Password, Salt, SaltPosition) of
true -> ok;
false -> {error, bad_username_or_password}
end.

View File

@ -0,0 +1,222 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2020-2021 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_authn_redis).
-include("emqx_authn.hrl").
-include_lib("emqx/include/logger.hrl").
-include_lib("typerefl/include/types.hrl").
-behaviour(hocon_schema).
-export([ structs/0
, fields/1
]).
-export([ create/1
, update/2
, authenticate/2
, destroy/1
]).
%%------------------------------------------------------------------------------
%% Hocon Schema
%%------------------------------------------------------------------------------
structs() -> [""].
fields("") ->
[ {config, {union, [ hoconsc:t(standalone)
, hoconsc:t(cluster)
, hoconsc:t(sentinel)
]}}
];
fields(standalone) ->
common_fields() ++ emqx_connector_redis:fields(single);
fields(cluster) ->
common_fields() ++ emqx_connector_redis:fields(cluster);
fields(sentinel) ->
common_fields() ++ emqx_connector_redis:fields(sentinel).
common_fields() ->
[ {name, fun emqx_authn_schema:authenticator_name/1}
, {mechanism, {enum, ['password-based']}}
, {server_type, {enum, [redis]}}
, {query, fun query/1}
, {password_hash_algorithm, fun password_hash_algorithm/1}
, {salt_position, fun salt_position/1}
].
query(type) -> string();
query(nullable) -> false;
query(_) -> undefined.
password_hash_algorithm(type) -> {enum, [plain, md5, sha, sha256, sha512, bcrypt]};
password_hash_algorithm(default) -> sha256;
password_hash_algorithm(_) -> undefined.
salt_position(type) -> {enum, [prefix, suffix]};
salt_position(default) -> prefix;
salt_position(_) -> undefined.
%%------------------------------------------------------------------------------
%% APIs
%%------------------------------------------------------------------------------
create(#{ query := Query
, '_unique' := Unique
} = Config) ->
try
NQuery = parse_query(Query),
State = maps:with([ password_hash_algorithm
, salt_position
, '_unique'], Config),
NState = State#{query => NQuery},
case emqx_resource:create_local(Unique, emqx_connector_redis, Config) of
{ok, _} ->
{ok, NState};
{error, already_created} ->
{ok, NState};
{error, Reason} ->
{error, Reason}
end
catch
error:{unsupported_query, Query} ->
{error, {unsupported_query, Query}};
error:missing_password_hash ->
{error, missing_password_hash};
error:{unsupported_field, Field} ->
{error, {unsupported_field, Field}}
end.
update(Config, State) ->
case create(Config) of
{ok, NewState} ->
ok = destroy(State),
{ok, NewState};
{error, Reason} ->
{error, Reason}
end.
authenticate(#{auth_method := _}, _) ->
ignore;
authenticate(#{password := Password} = Credential,
#{ query := {Command, Key, Fields}
, '_unique' := Unique
} = State) ->
try
NKey = binary_to_list(iolist_to_binary(replace_placeholders(Key, Credential))),
case emqx_resource:query(Unique, {cmd, [Command, NKey | Fields]}) of
{ok, Values} ->
check_password(Password, merge(Fields, Values), State);
{error, Reason} ->
?LOG(error, "['~s'] Query failed: ~p", [Unique, Reason]),
ignore
end
catch
error:{cannot_get_variable, Placeholder} ->
?LOG(warning, "The following error occurred in '~s' during authentication: ~p", [Unique, {cannot_get_variable, Placeholder}]),
ignore
end.
destroy(#{'_unique' := Unique}) ->
_ = emqx_resource:remove_local(Unique),
ok.
%%------------------------------------------------------------------------------
%% Internal functions
%%------------------------------------------------------------------------------
%% Only support HGET and HMGET
parse_query(Query) ->
case string:tokens(Query, " ") of
[Command, Key, Field | Fields] when Command =:= "HGET" orelse Command =:= "HMGET" ->
NFields = [Field | Fields],
check_fields(NFields),
NKey = parse_key(Key),
{Command, NKey, NFields};
_ ->
error({unsupported_query, Query})
end.
check_fields(Fields) ->
check_fields(Fields, false).
check_fields([], false) ->
error(missing_password_hash);
check_fields([], true) ->
ok;
check_fields(["password_hash" | More], false) ->
check_fields(More, true);
check_fields(["salt" | More], HasPassHash) ->
check_fields(More, HasPassHash);
% check_fields(["is_superuser" | More], HasPassHash) ->
% check_fields(More, HasPassHash);
check_fields([Field | _], _) ->
error({unsupported_field, Field}).
parse_key(Key) ->
Tokens = re:split(Key, "(" ++ ?RE_PLACEHOLDER ++ ")", [{return, binary}, group, trim]),
parse_key(Tokens, []).
parse_key([], Acc) ->
lists:reverse(Acc);
parse_key([[Constant, Placeholder] | Tokens], Acc) ->
parse_key(Tokens, [{placeholder, Placeholder}, {constant, Constant} | Acc]);
parse_key([[Constant] | Tokens], Acc) ->
parse_key(Tokens, [{constant, Constant} | Acc]).
replace_placeholders(Key, Credential) ->
lists:map(fun({constant, Constant}) ->
Constant;
({placeholder, Placeholder}) ->
case emqx_authn_utils:replace_placeholder(Placeholder, Credential) of
undefined -> error({cannot_get_variable, Placeholder});
Value -> Value
end
end, Key).
merge(Fields, Value) when not is_list(Value) ->
merge(Fields, [Value]);
merge(Fields, Values) ->
maps:from_list(
lists:filter(fun({_, V}) ->
V =/= undefined
end, lists:zip(Fields, Values))).
check_password(undefined, _Selected, _State) ->
{error, bad_username_or_password};
check_password(Password,
#{"password_hash" := PasswordHash},
#{password_hash_algorithm := bcrypt}) ->
case {ok, PasswordHash} =:= bcrypt:hashpw(Password, PasswordHash) of
true -> ok;
false -> {error, bad_username_or_password}
end;
check_password(Password,
#{"password_hash" := PasswordHash} = Selected,
#{password_hash_algorithm := Algorithm,
salt_position := SaltPosition}) ->
Salt = maps:get("salt", Selected, <<>>),
case PasswordHash =:= emqx_authn_utils:hash(Algorithm, Password, Salt, SaltPosition) of
true -> ok;
false -> {error, bad_username_or_password}
end;
check_password(_Password, _Selected, _State) ->
ignore.

View File

@ -57,8 +57,9 @@ init_per_suite(Config) ->
Config.
end_per_suite(_Config) ->
%delete_default_app(),
emqx_ct_helpers:stop_apps([emqx_authz]).
ok = emqx_authz:update(replace, []),
emqx_ct_helpers:stop_apps([emqx_authz]),
ok.
% set_special_configs(emqx) ->
% application:set_env(emqx, allow_anonymous, true),

View File

@ -49,8 +49,10 @@ init_per_suite(Config) ->
Config.
end_per_suite(_Config) ->
emqx_authz:update(replace, []),
emqx_ct_helpers:stop_apps([emqx_authz, emqx_resource]),
meck:unload(emqx_resource).
meck:unload(emqx_resource),
ok.
%%------------------------------------------------------------------------------
%% Testcases

View File

@ -50,8 +50,10 @@ init_per_suite(Config) ->
Config.
end_per_suite(_Config) ->
emqx_authz:update(replace, []),
emqx_ct_helpers:stop_apps([emqx_authz, emqx_resource]),
meck:unload(emqx_resource).
meck:unload(emqx_resource),
ok.
-define(RULE1,[#{<<"topics">> => [<<"#">>],
<<"permission">> => <<"deny">>,

View File

@ -52,6 +52,7 @@ init_per_suite(Config) ->
Config.
end_per_suite(_Config) ->
ok = emqx_authz:update(replace, []),
emqx_ct_helpers:stop_apps([emqx_authz, emqx_resource]),
meck:unload(emqx_resource).

View File

@ -51,6 +51,7 @@ init_per_suite(Config) ->
Config.
end_per_suite(_Config) ->
ok = emqx_authz:update(replace, []),
emqx_ct_helpers:stop_apps([emqx_authz, emqx_resource]),
meck:unload(emqx_resource).

View File

@ -50,6 +50,7 @@ init_per_suite(Config) ->
Config.
end_per_suite(_Config) ->
ok = emqx_authz:update(replace, []),
emqx_ct_helpers:stop_apps([emqx_authz, emqx_resource]),
meck:unload(emqx_resource).

View File

@ -3,7 +3,7 @@
{vsn, "5.0.0"}, % strict semver, bump manually!
{modules, []},
{registered, []},
{applications, [kernel,stdlib,replayq,emqtt]},
{applications, [kernel,stdlib,replayq,emqtt,emqx]},
{mod, {emqx_bridge_mqtt_app, []}},
{env, []},
{licenses, ["Apache-2.0"]},

View File

@ -12,7 +12,8 @@
eredis,
epgsql,
mysql,
mongodb
mongodb,
emqx
]},
{env,[]},
{modules, []},

View File

@ -5,6 +5,8 @@
emqx_dashboard:{
default_username: "admin"
default_password: "public"
## notice: sample_interval should be divisible by 60.
sample_interval: 10s
listeners: [
{
num_acceptors: 4

View File

@ -21,3 +21,8 @@
-define(EMPTY_KEY(Key), ((Key == undefined) orelse (Key == <<>>))).
-define(DASHBOARD_SHARD, emqx_dashboard_shard).
-record(mqtt_collect, {
timestamp :: integer(),
collect
}).

View File

@ -3,7 +3,7 @@
{vsn, "5.0.0"}, % strict semver, bump manually!
{modules, []},
{registered, [emqx_dashboard_sup]},
{applications, [kernel,stdlib,mnesia,minirest]},
{applications, [kernel,stdlib,mnesia,minirest,emqx]},
{mod, {emqx_dashboard_app,[]}},
{env, []},
{licenses, ["Apache-2.0"]},

View File

@ -105,6 +105,7 @@ user_api() ->
Metadata = #{
delete => #{
description => <<"Delete dashboard users">>,
parameters => [path_param_username()],
responses => #{
<<"200">> => response_schema(<<"Delete User successfully">>),
<<"400">> => bad_request()
@ -112,6 +113,7 @@ user_api() ->
},
put => #{
description => <<"Update dashboard users">>,
parameters => [path_param_username()],
'requestBody' => request_body_schema(#{
type => object,
properties => #{
@ -127,6 +129,7 @@ user_api() ->
},
post => #{
description => <<"Create dashboard users">>,
parameters => [path_param_username()],
'requestBody' => request_body_schema(create_user),
responses => #{
<<"200">> => response_schema(<<"Create Users successfully">>),
@ -140,6 +143,7 @@ change_pwd_api() ->
Metadata = #{
put => #{
description => <<"Update dashboard users password">>,
parameters => [path_param_username()],
'requestBody' => request_body_schema(#{
type => object,
properties => #{
@ -159,6 +163,15 @@ change_pwd_api() ->
},
{"/change_pwd/:username", Metadata, change_pwd}.
path_param_username() ->
#{
name => username,
in => path,
required => true,
schema => #{type => string},
example => <<"admin">>
}.
-define(EMPTY(V), (V == undefined orelse V == <<>>)).
auth(post, Request) ->

View File

@ -0,0 +1,173 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2019 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_dashboard_collection).
-behaviour(gen_server).
-include("emqx_dashboard.hrl").
-include_lib("stdlib/include/ms_transform.hrl").
-export([ start_link/0
]).
-export([ init/1
, handle_call/3
, handle_cast/2
, handle_info/2
, terminate/2
, code_change/3
]).
-export([get_collect/0]).
-export([get_local_time/0]).
-boot_mnesia({mnesia, [boot]}).
-copy_mnesia({mnesia, [copy]}).
%% Mnesia bootstrap
-export([mnesia/1]).
-define(APP, emqx_dashboard).
-define(DEFAULT_INTERVAL, 10). %% seconds
-define(COLLECT, {[],[],[]}).
-define(CLEAR_INTERVAL, 86400000).
-define(EXPIRE_INTERVAL, 86400000 * 7).
mnesia(boot) ->
ok = ekka_mnesia:create_table(emqx_collect, [
{type, set},
{local_content, true},
{disc_only_copies, [node()]},
{record_name, mqtt_collect},
{attributes, record_info(fields, mqtt_collect)}]);
mnesia(copy) ->
mnesia:add_table_copy(emqx_collect, node(), disc_only_copies).
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).
get_collect() -> gen_server:call(whereis(?MODULE), get_collect).
init([]) ->
timer(timer:seconds(interval()), collect),
timer(get_today_remaining_seconds(), clear_expire_data),
ExpireInterval = emqx_config:get([emqx_dashboard, monitor, interval], ?EXPIRE_INTERVAL),
State = #{
count => count(),
expire_interval => ExpireInterval,
collect => ?COLLECT,
temp_collect => {0, 0, 0, 0},
last_collects => {0, 0, 0}
},
{ok, State}.
interval() ->
emqx_config:get([?APP, sample_interval], ?DEFAULT_INTERVAL).
count() ->
60 div interval().
handle_call(get_collect, _From, State = #{temp_collect := {Received, Sent, _, _}}) ->
{reply, {Received, Sent, collect(subscriptions), collect(connections)}, State, hibernate};
handle_call(_Req, _From, State) ->
{reply, ok, State}.
handle_cast(_Req, State) ->
{noreply, State}.
handle_info(collect, State = #{collect := Collect, count := 1, temp_collect := TempCollect, last_collects := LastCollect}) ->
NewLastCollect = flush(collect_all(Collect), LastCollect),
TempCollect1 = temp_collect(TempCollect),
timer(timer:seconds(interval()), collect),
{noreply, State#{count => count(),
collect => ?COLLECT,
temp_collect => TempCollect1,
last_collects => NewLastCollect}};
handle_info(collect, State = #{count := Count, collect := Collect, temp_collect := TempCollect}) ->
TempCollect1 = temp_collect(TempCollect),
timer(timer:seconds(interval()), collect),
{noreply, State#{count => Count - 1,
collect => collect_all(Collect),
temp_collect => TempCollect1}, hibernate};
handle_info(clear_expire_data, State = #{expire_interval := ExpireInterval}) ->
timer(?CLEAR_INTERVAL, clear_expire_data),
T1 = get_local_time(),
Spec = ets:fun2ms(fun({_, T, _C} = Data) when (T1 - T) > ExpireInterval -> Data end),
Collects = dets:select(emqx_collect, Spec),
lists:foreach(fun(Collect) ->
dets:delete_object(emqx_collect, Collect)
end, Collects),
{noreply, State, hibernate};
handle_info(_Info, State) ->
{noreply, State}.
terminate(_Reason, _State) ->
ok.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
temp_collect({_, _, Received, Sent}) ->
Received1 = collect(received),
Sent1 = collect(sent),
{(Received1 - Received) div interval(),
(Sent1 - Sent) div interval(),
Received1,
Sent1}.
collect_all({Connection, Route, Subscription}) ->
{[collect(connections)| Connection],
[collect(routes)| Route],
[collect(subscriptions)| Subscription]}.
collect(connections) ->
emqx_stats:getstat('connections.count');
collect(routes) ->
emqx_stats:getstat('routes.count');
collect(subscriptions) ->
emqx_stats:getstat('subscriptions.count');
collect(received) ->
emqx_metrics:val('messages.received');
collect(sent) ->
emqx_metrics:val('messages.sent');
collect(dropped) ->
emqx_metrics:val('messages.dropped').
flush({Connection, Route, Subscription}, {Received0, Sent0, Dropped0}) ->
Received = collect(received),
Sent = collect(sent),
Dropped = collect(dropped),
Collect = {avg(Connection),
avg(Route),
avg(Subscription),
diff(Received, Received0),
diff(Sent, Sent0),
diff(Dropped, Dropped0)},
Ts = get_local_time(),
_ = mnesia:dirty_write(emqx_collect, #mqtt_collect{timestamp = Ts, collect = Collect}),
{Received, Sent, Dropped}.
avg(Items) ->
lists:sum(Items) div count().
diff(Item0, Item1) ->
Item0 - Item1.
timer(Secs, Msg) ->
erlang:send_after(Secs, self(), Msg).
get_today_remaining_seconds() ->
?CLEAR_INTERVAL - (get_local_time() rem ?CLEAR_INTERVAL).
get_local_time() ->
(calendar:datetime_to_gregorian_seconds(calendar:local_time()) -
calendar:datetime_to_gregorian_seconds({{1970,1,1}, {0,0,0}})) * 1000.

View File

@ -0,0 +1,205 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2019 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_dashboard_monitor_api).
-include("emqx_dashboard.hrl").
-behaviour(minirest_api).
-export([ sampling/1
, sampling/2
, get_collect/1
]).
-export([api_spec/0]).
-export([counters/2, current_counters/2]).
-define(COUNTERS, [ connection
, route
, subscriptions
, received
, sent
, dropped]).
api_spec() ->
{[monitor_api(), monitor_current_api()], [counters_schema()]}.
monitor_api() ->
Metadata = #{
get => #{
description => <<"List monitor data">>,
parameters => [
#{
name => node,
in => query,
required => false,
schema => #{type => string},
example => node()
},
#{
name => counter,
in => query,
required => false,
schema => #{type => string, enum => ?COUNTERS}
}
],
responses => #{
<<"200">> => emqx_mgmt_util:response_array_schema(<<"Monitor count data">>, counters)}}},
{"/monitor", Metadata, counters}.
monitor_current_api() ->
Metadata = #{
get => #{
description => <<"Current monitor data">>,
responses => #{
<<"200">> => emqx_mgmt_util:response_schema(<<"Current monitor data">>,
current_counters_schema())}}},
{"/monitor/current", Metadata, current_counters}.
current_counters_schema() ->
#{
type => object,
properties => #{
nodes => #{
type => integer,
description => <<"Nodes count">>},
connection => #{type => integer},
sent => #{type => integer},
received => #{type => integer},
subscription => #{type => integer}}
}.
counters_schema() ->
Node = #{
node => #{
type => string,
example => node()
}
},
Properties = lists:foldl(fun(K, M) -> maps:merge(M, counters_schema(K)) end, Node, ?COUNTERS),
#{
counters => #{
type => object,
properties => Properties}
}.
counters_schema(Name) ->
#{Name => #{
type => array,
items => #{
type => object,
properties => #{
timestamp => #{
type => integer},
count => #{
type => integer}}}}}.
%%%==============================================================================================
%% parameters trans
counters(get, Request) ->
case cowboy_req:parse_qs(Request) of
[] ->
Response = [sampling(Node) || Node <- ekka_mnesia:running_nodes()],
{200, Response};
Params ->
lookup(Params)
end.
current_counters(get, _) ->
Data = [get_collect(Node) || Node <- ekka_mnesia:running_nodes()],
Nodes = length(ekka_mnesia:running_nodes()),
{Received, Sent, Sub, Conn} = format_current_metrics(Data),
Response = #{
nodes => Nodes,
received => Received,
sent => Sent,
subscription => Sub,
connection => Conn
},
{200, Response}.
%%%==============================================================================================
%% api apply
lookup(Params) ->
Fun =
fun({K,V}, M) ->
maps:put(binary_to_atom(K, utf8), binary_to_atom(V, utf8), M)
end,
lookup_(lists:foldl(Fun, #{}, Params)).
lookup_(#{node := Node, counter := Counter}) ->
{200, sampling(Node, Counter)};
lookup_(#{node := Node}) ->
{200, sampling(Node)};
lookup_(#{counter := Counter}) ->
Data = [sampling(Node, Counter) || Node <- ekka_mnesia:running_nodes()],
{200, Data}.
format_current_metrics(Collects) ->
format_current_metrics(Collects, {0,0,0,0}).
format_current_metrics([], Acc) ->
Acc;
format_current_metrics([{Received, Sent, Sub, Conn} | Collects], {Received1, Sent1, Sub1, Conn1}) ->
format_current_metrics(Collects, {Received1 + Received, Sent1 + Sent, Sub1 + Sub, Conn1 + Conn}).
get_collect(Node) when Node =:= node() ->
emqx_dashboard_collection:get_collect();
get_collect(Node) ->
case rpc:call(Node, emqx_dashboard_collection, get_collect, []) of
{badrpc, _Reason} -> #{};
Res -> Res
end.
sampling(Node) when Node =:= node() ->
Time = emqx_dashboard_collection:get_local_time() - 7200000,
All = dets:select(emqx_collect, [{{mqtt_collect,'$1','$2'}, [{'>', '$1', Time}], ['$_']}]),
maps:put(node, Node, format(lists:sort(All)));
sampling(Node) ->
rpc:call(Node, ?MODULE, sampling, [Node]).
sampling(Node, Counter) when Node =:= node() ->
Time = emqx_dashboard_collection:get_local_time() - 7200000,
All = dets:select(emqx_collect, [{{mqtt_collect,'$1','$2'}, [{'>', '$1', Time}], ['$_']}]),
maps:put(node, Node, format_single(lists:sort(All), Counter));
sampling(Node, Counter) ->
rpc:call(Node, ?MODULE, sampling, [Node, Counter]).
format(Collects) ->
format(Collects, {[],[],[],[],[],[]}).
format([], {Connection, Route, Subscription, Received, Sent, Dropped}) ->
#{
connection => add_key(Connection),
route => add_key(Route),
subscriptions => add_key(Subscription),
received => add_key(Received),
sent => add_key(Sent),
dropped => add_key(Dropped)
};
format([#mqtt_collect{timestamp = Ts, collect = {C, R, S, Re, S1, D}} | Collects],
{Connection, Route, Subscription, Received, Sent, Dropped}) ->
format(Collects, {[[Ts, C] | Connection],
[[Ts, R] | Route],
[[Ts, S] | Subscription],
[[Ts, Re] | Received],
[[Ts, S1] | Sent],
[[Ts, D] | Dropped]}).
add_key(Collects) ->
lists:reverse([#{timestamp => Ts, count => C} || [Ts, C] <- Collects]).
format_single(Collects, Counter) ->
#{Counter => format_single(Collects, counter_index(Counter), [])}.
format_single([], _Index, Acc) ->
lists:reverse(Acc);
format_single([#mqtt_collect{timestamp = Ts, collect = Collect} | Collects], Index, Acc) ->
format_single(Collects, Index,
[#{timestamp => Ts, count => erlang:element(Index, Collect)} | Acc]).
counter_index(connection) -> 1;
counter_index(route) -> 2;
counter_index(subscriptions) -> 3;
counter_index(received) -> 4;
counter_index(sent) -> 5;
counter_index(dropped) -> 6.

View File

@ -27,6 +27,7 @@ fields("emqx_dashboard") ->
hoconsc:ref(?MODULE, "https")]))}
, {default_username, fun default_username/1}
, {default_password, fun default_password/1}
, {sample_interval, emqx_schema:t(emqx_schema:duration_s(), undefined, "10s")}
];
fields("http") ->

View File

@ -28,5 +28,5 @@ start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
init([]) ->
{ok, { {one_for_all, 10, 100}, [?CHILD(emqx_dashboard_admin)] } }.
{ok, {{one_for_all, 10, 100},
[?CHILD(emqx_dashboard_admin), ?CHILD(emqx_dashboard_collection)]}}.

View File

@ -5,7 +5,8 @@
{mod, {emqx_data_bridge_app, []}},
{applications,
[kernel,
stdlib
stdlib,
emqx
]},
{env,[]},
{modules, []},

View File

@ -3,12 +3,14 @@
##====================================================================
exhook: {
server.default: {
url: "http://127.0.0.1:9000"
#ssl: {
# cacertfile: "{{ platform_etc_dir }}/certs/cacert.pem"
# certfile: "{{ platform_etc_dir }}/certs/cert.pem"
# keyfile: "{{ platform_etc_dir }}/certs/key.pem"
#}
}
servers: [
# { name: "default"
# url: "http://127.0.0.1:9000"
# #ssl: {
# # cacertfile: "{{ platform_etc_dir }}/certs/cacert.pem"
# # certfile: "{{ platform_etc_dir }}/certs/cert.pem"
# # keyfile: "{{ platform_etc_dir }}/certs/key.pem"
# #}
# }
]
}

View File

@ -267,7 +267,7 @@ message BrokerInfo {
string sysdescr = 2;
string uptime = 3;
int64 uptime = 3;
string datetime = 4;
}

View File

@ -4,7 +4,7 @@
{modules, []},
{registered, []},
{mod, {emqx_exhook_app, []}},
{applications, [kernel,stdlib,grpc]},
{applications, [kernel,stdlib,grpc,emqx]},
{env,[]},
{licenses, ["Apache-2.0"]},
{maintainers, ["EMQ X Team <contact@emqx.io>"]},

View File

@ -40,7 +40,7 @@
list() ->
[server(Name) || Name <- running()].
-spec enable(atom()|string(), map()) -> ok | {error, term()}.
-spec enable(binary(), map()) -> ok | {error, term()}.
enable(Name, Options) ->
case lists:member(Name, running()) of
true ->
@ -55,7 +55,7 @@ enable(Name, Options) ->
end
end.
-spec disable(atom()|string()) -> ok | {error, term()}.
-spec disable(binary()) -> ok | {error, term()}.
disable(Name) ->
case server(Name) of
undefined -> {error, not_running};
@ -111,7 +111,6 @@ save(Name, ServiceState) ->
persistent_term:put(?APP, lists:reverse([Name | Saved])),
persistent_term:put({?APP, Name}, ServiceState).
-compile({inline, [unsave/1]}).
unsave(Name) ->
case persistent_term:get(?APP, []) of
[] ->
@ -122,11 +121,9 @@ unsave(Name) ->
persistent_term:erase({?APP, Name}),
ok.
-compile({inline, [running/0]}).
running() ->
persistent_term:get(?APP, []).
-compile({inline, [server/1]}).
server(Name) ->
case catch persistent_term:get({?APP, Name}) of
{'EXIT', {badarg,_}} -> undefined;

View File

@ -65,10 +65,14 @@ stop(_State) ->
%%--------------------------------------------------------------------
load_all_servers() ->
_ = maps:map(fun(Name, Options) ->
load_server(Name, Options)
end, emqx_config:get([exhook, server])),
ok.
try
lists:foreach(fun(#{name := Name} = Options) ->
load_server(Name, maps:remove(name, Options))
end, emqx_config:get([exhook, servers]))
catch
_Class : _Reason ->
ok
end, ok.
unload_all_servers() ->
emqx_exhook:disable_all().

View File

@ -29,8 +29,8 @@ cli(["server", "list"]) ->
cli(["server", "enable", Name0]) ->
if_enabled(fun() ->
Name = list_to_atom(Name0),
case maps:get(Name, emqx_config:get([exhook, server]), undefined) of
Name = iolist_to_binary(Name0),
case find_server_options(Name) of
undefined ->
emqx_ctl:print("not_found~n");
Opts ->
@ -40,7 +40,7 @@ cli(["server", "enable", Name0]) ->
cli(["server", "disable", Name]) ->
if_enabled(fun() ->
print(emqx_exhook:disable(list_to_atom(Name)))
print(emqx_exhook:disable(iolist_to_binary(Name)))
end);
cli(["server", "stats"]) ->
@ -59,6 +59,14 @@ print(ok) ->
print({error, Reason}) ->
emqx_ctl:print("~p~n", [Reason]).
find_server_options(Name) ->
Ls = emqx_config:get([exhook, servers]),
case [ E || E = #{name := N} <- Ls, N =:= Name] of
[] -> undefined;
[Options] ->
maps:remove(name, Options)
end.
%%--------------------------------------------------------------------
%% Internal funcs
%%--------------------------------------------------------------------

View File

@ -29,13 +29,11 @@
-export([structs/0, fields/1]).
-export([t/1, t/3, t/4, ref/1]).
structs() -> [server].
structs() -> [servers].
fields(server) ->
[{"$name", t(ref(server_structs))}];
fields(server_structs) ->
[ {url, t(string(), "emqx_exhook.url", "")}
fields(servers) ->
[ {name, string()}
, {url, string()}
, {ssl, t(ref(ssl_conf_group))}
];

View File

@ -84,7 +84,7 @@
%% Load/Unload APIs
%%--------------------------------------------------------------------
-spec load(atom(), options()) -> {ok, server()} | {error, term()} .
-spec load(binary(), options()) -> {ok, server()} | {error, term()} .
load(Name0, Opts0) ->
Name = to_list(Name0),
{SvrAddr, ClientOpts} = channel_opts(Opts0),
@ -160,7 +160,10 @@ do_deinit(Name) ->
ok.
do_init(ChannName) ->
Req = #{broker => maps:from_list(emqx_sys:info())},
%% BrokerInfo defined at: exhook.protos
BrokerInfo = maps:with([version, sysdescr, uptime, datetime],
maps:from_list(emqx_sys:info())),
Req = #{broker => BrokerInfo},
case do_call(ChannName, 'on_provider_loaded', Req) of
{ok, InitialResp} ->
try

View File

@ -23,7 +23,13 @@
-include_lib("common_test/include/ct.hrl").
-define(CONF_DEFAULT, <<"
exhook: { server.default: { url: \"http://127.0.0.1:9000\" } }
exhook: {
servers: [
{ name: \"default\"
url: \"http://127.0.0.1:9000\"
}
]
}
">>).
%%--------------------------------------------------------------------
@ -47,10 +53,10 @@ end_per_suite(_Cfg) ->
%%--------------------------------------------------------------------
t_noserver_nohook(_) ->
emqx_exhook:disable(default),
emqx_exhook:disable(<<"default">>),
?assertEqual([], loaded_exhook_hookpoints()),
Opts = emqx_config:get([exhook, server, default]),
ok = emqx_exhook:enable(default, Opts),
[#{name := Name} = Opts] = emqx_config:get([exhook, servers]),
ok = emqx_exhook:enable(Name, Opts),
?assertNotEqual([], loaded_exhook_hookpoints()).
t_cli_list(_) ->

View File

@ -31,7 +31,13 @@
]).
-define(CONF_DEFAULT, <<"
exhook: { server.default: { url: \"http://127.0.0.1:9000\" } }
exhook: {
servers: [
{ name: \"default\"
url: \"http://127.0.0.1:9000\"
}
]
}
">>).
-define(ALL(Vars, Types, Exprs),

View File

@ -1,82 +0,0 @@
##--------------------------------------------------------------------
## CoAP Gateway
##--------------------------------------------------------------------
## The IP and UDP port that CoAP bind with.
##
## Default: "0.0.0.0:5683"
##
## Examples:
## coap.bind.udp.x = "0.0.0.0:5683" | ":::5683" | "127.0.0.1:5683" | "::1:5683"
##
coap.bind.udp.1 = "0.0.0.0:5683"
##coap.bind.udp.2 = "0.0.0.0:6683"
## Whether to enable statistics for CoAP clients.
##
## Value: on | off
coap.enable_stats = off
##------------------------------------------------------------------------------
## DTLS options
## The DTLS port that CoAP is listening on.
##
## Default: "0.0.0.0:5684"
##
## Examples:
## coap.bind.dtls.x = "0.0.0.0:5684" | ":::5684" | "127.0.0.1:5684" | "::1:5684"
##
coap.bind.dtls.1 = "0.0.0.0:5684"
##coap.bind.dtls.2 = "0.0.0.0:6684"
## A server only does x509-path validation in mode verify_peer,
## as it then sends a certificate request to the client (this
## message is not sent if the verify option is verify_none).
## You can then also want to specify option fail_if_no_peer_cert.
## More information at: http://erlang.org/doc/man/ssl.html
##
## Value: verify_peer | verify_none
## coap.dtls.verify = verify_peer
## Private key file for DTLS
##
## Value: File
coap.dtls.keyfile = "{{ platform_etc_dir }}/certs/key.pem"
## Server certificate for DTLS.
##
## Value: File
coap.dtls.certfile = "{{ platform_etc_dir }}/certs/cert.pem"
## PEM-encoded CA certificates for DTLS
##
## Value: File
## coap.dtls.cacertfile = "{{ platform_etc_dir }}/certs/cacert.pem"
## Used together with {verify, verify_peer} by an SSL server. If set to true,
## the server fails if the client does not have a certificate to send, that is,
## sends an empty certificate.
##
## Value: true | false
## coap.dtls.fail_if_no_peer_cert = false
## This is the single most important configuration option of an Erlang SSL
## application. Ciphers (and their ordering) define the way the client and
## server encrypt information over the wire, from the initial Diffie-Helman
## key exchange, the session key encryption ## algorithm and the message
## digest algorithm. Selecting a good cipher suite is critical for the
## applications data security, confidentiality and performance.
##
## The cipher list above offers:
##
## A good balance between compatibility with older browsers.
## It can get stricter for Machine-To-Machine scenarios.
## Perfect Forward Secrecy.
## No old/insecure encryption and HMAC algorithms
##
## Most of it was copied from Mozillas Server Side TLS article
##
## Value: Ciphers
coap.dtls.ciphers = "ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-AES256-SHA384,ECDHE-RSA-AES256-SHA384,ECDHE-ECDSA-DES-CBC3-SHA,ECDH-ECDSA-AES256-GCM-SHA384,ECDH-RSA-AES256-GCM-SHA384,ECDH-ECDSA-AES256-SHA384,ECDH-RSA-AES256-SHA384,DHE-DSS-AES256-GCM-SHA384,DHE-DSS-AES256-SHA256,AES256-GCM-SHA384,AES256-SHA256,ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES128-SHA256,ECDHE-RSA-AES128-SHA256,ECDH-ECDSA-AES128-GCM-SHA256,ECDH-RSA-AES128-GCM-SHA256,ECDH-ECDSA-AES128-SHA256,ECDH-RSA-AES128-SHA256,DHE-DSS-AES128-GCM-SHA256,DHE-DSS-AES128-SHA256,AES128-GCM-SHA256,AES128-SHA256,ECDHE-ECDSA-AES256-SHA,ECDHE-RSA-AES256-SHA,DHE-DSS-AES256-SHA,ECDH-ECDSA-AES256-SHA,ECDH-RSA-AES256-SHA,AES256-SHA,ECDHE-ECDSA-AES128-SHA,ECDHE-RSA-AES128-SHA,DHE-DSS-AES128-SHA,ECDH-ECDSA-AES128-SHA,ECDH-RSA-AES128-SHA,AES128-SHA"

View File

@ -16,7 +16,17 @@ gateway: {
password: "${Packet.headers.passcode}"
}
authenticator: allow_anonymous
authentication: {
enable: true
authenticators: [
{
name: "authenticator1"
mechanism: password-based
server_type: built-in-database
user_id_type: clientid
}
]
}
listener.tcp.1: {
bind: 61613
@ -29,7 +39,7 @@ gateway: {
coap.1: {
enable_stats: false
authenticator: allow_anonymous
authentication.enable: false
heartbeat: 30s
resource: mqtt
notify_type: qos
@ -42,7 +52,7 @@ gateway: {
coap.2: {
enable_stats: false
authenticator: allow_anonymous
authentication.enable:false
heartbeat: 30s
resource: pubsub
notify_type: non
@ -114,7 +124,7 @@ gateway: {
#ssl.cacertfile:
}
authenticator: allow_anonymous
authentication.enable: false
listener.tcp.1: {
bind: 7993

View File

@ -1,149 +0,0 @@
##--------------------------------------------------------------------
## LwM2M Gateway
##--------------------------------------------------------------------
##--------------------------------------------------------------------
## Protocols
# To Limit the range of lifetime, in seconds
lwm2m.lifetime_min = 1s
lwm2m.lifetime_max = 86400s
# The time window for Q Mode, indicating that after how long time
# the downlink commands sent to the client will be cached.
#lwm2m.qmode_time_window = 22
# Auto send observer command to device. It can be configured as an OjbectList
# so that emqx will automatically observe the objects in this list.
#
# For examples: "/3/0,/3/0/1,/32976"
#
# Value: off | on | String
#lwm2m.auto_observe = off
# The topic subscribed by the lwm2m client after it is connected
# Placeholders supported:
# '%e': Endpoint Name
# '%a': IP Address
lwm2m.mountpoint = "lwm2m/%e/"
# The topic subscribed by the lwm2m client after it is connected
# Placeholders supported:
# '%e': Endpoint Name
# '%a': IP Address
lwm2m.topics.command = "dn/#"
# The topic to which the lwm2m client's response is published
lwm2m.topics.response = "up/resp"
# The topic to which the lwm2m client's notify message is published
lwm2m.topics.notify = "up/notify"
# The topic to which the lwm2m client's register message is published
lwm2m.topics.register = "up/resp"
# The topic to which the lwm2m client's update message is published
lwm2m.topics.update = "up/resp"
# When publish the update message.
#
# Can be one of:
# - contains_object_list: only if the update message contains object list
# - always: always publish the update message
#
# Defaults to contains_object_list
#lwm2m.update_msg_publish_condition = contains_object_list
# Dir where the object definition files can be found
lwm2m.xml_dir = "{{ platform_etc_dir }}/lwm2m_xml"
##--------------------------------------------------------------------
## UDP Listener options
## The IP and port of the LwM2M Gateway
##
## Default: "0.0.0.0:5683"
## Examples:
## lwm2m.bind.udp.x = "0.0.0.0:5683" | ":::5683" | "127.0.0.1:5683" | "::1:5683"
lwm2m.bind.udp.1 = "0.0.0.0:5683"
#lwm2m.bind.udp.2 = "0.0.0.0:6683"
## Socket options, used for performance tuning
##
## Examples:
## lwm2m.opts.$name = $value
## See: https://erlang.org/doc/man/gen_udp.html#type-option
lwm2m.opts.buffer = 1024KB
lwm2m.opts.recbuf = 1024KB
lwm2m.opts.sndbuf = 1024KB
lwm2m.opts.read_packets = 20
##--------------------------------------------------------------------
## DTLS Listener Options
## The DTLS port that LwM2M is listening on.
##
## Default: "0.0.0.0:5684"
##
## Examples:
## lwm2m.bind.dtls.x = "0.0.0.0:5684" | ":::5684" | "127.0.0.1:5684" | "::1:5684"
##
lwm2m.bind.dtls.1 = "0.0.0.0:5684"
#lwm2m.bind.dtls.2 = "0.0.0.0:6684"
## A server only does x509-path validation in mode verify_peer,
## as it then sends a certificate request to the client (this
## message is not sent if the verify option is verify_none).
## You can then also want to specify option fail_if_no_peer_cert.
## More information at: http://erlang.org/doc/man/ssl.html
##
## Value: verify_peer | verify_none
#lwm2m.dtls.verify = verify_peer
## Private key file for DTLS
##
## Value: File
lwm2m.dtls.keyfile = "{{ platform_etc_dir }}/certs/key.pem"
## Server certificate for DTLS.
##
## Value: File
lwm2m.dtls.certfile = "{{ platform_etc_dir }}/certs/cert.pem"
## PEM-encoded CA certificates for DTLS
##
## Value: File
#lwm2m.dtls.cacertfile = "{{ platform_etc_dir }}/certs/cacert.pem"
## Used together with {verify, verify_peer} by an SSL server. If set to true,
## the server fails if the client does not have a certificate to send, that is,
## sends an empty certificate.
##
## Value: true | false
#lwm2m.dtls.fail_if_no_peer_cert = false
## This is the single most important configuration option of an Erlang SSL
## application. Ciphers (and their ordering) define the way the client and
## server encrypt information over the wire, from the initial Diffie-Helman
## key exchange, the session key encryption ## algorithm and the message
## digest algorithm. Selecting a good cipher suite is critical for the
## applications data security, confidentiality and performance.
##
## The cipher list above offers:
##
## A good balance between compatibility with older browsers.
## It can get stricter for Machine-To-Machine scenarios.
## Perfect Forward Secrecy.
## No old/insecure encryption and HMAC algorithms
##
## Most of it was copied from Mozillas Server Side TLS article
##
## Value: Ciphers
lwm2m.dtls.ciphers = "ECDHE-ECDSA-AES256-GCM-SHA384,ECDHE-RSA-AES256-GCM-SHA384,ECDHE-ECDSA-AES256-SHA384,ECDHE-RSA-AES256-SHA384,ECDHE-ECDSA-DES-CBC3-SHA,ECDH-ECDSA-AES256-GCM-SHA384,ECDH-RSA-AES256-GCM-SHA384,ECDH-ECDSA-AES256-SHA384,ECDH-RSA-AES256-SHA384,DHE-DSS-AES256-GCM-SHA384,DHE-DSS-AES256-SHA256,AES256-GCM-SHA384,AES256-SHA256,ECDHE-ECDSA-AES128-GCM-SHA256,ECDHE-RSA-AES128-GCM-SHA256,ECDHE-ECDSA-AES128-SHA256,ECDHE-RSA-AES128-SHA256,ECDH-ECDSA-AES128-GCM-SHA256,ECDH-RSA-AES128-GCM-SHA256,ECDH-ECDSA-AES128-SHA256,ECDH-RSA-AES128-SHA256,DHE-DSS-AES128-GCM-SHA256,DHE-DSS-AES128-SHA256,AES128-GCM-SHA256,AES128-SHA256,ECDHE-ECDSA-AES256-SHA,ECDHE-RSA-AES256-SHA,DHE-DSS-AES256-SHA,ECDH-ECDSA-AES256-SHA,ECDH-RSA-AES256-SHA,AES256-SHA,ECDHE-ECDSA-AES128-SHA,ECDHE-RSA-AES128-SHA,DHE-DSS-AES128-SHA,ECDH-ECDSA-AES128-SHA,ECDH-RSA-AES128-SHA,AES128-SHA"
## Ciphers for TLS PSK.
##
## Note that 'lwm2m.dtls.ciphers' and 'lwm2m.dtls.psk_ciphers' cannot
## be configured at the same time.
## See 'https://tools.ietf.org/html/rfc4279#section-2'.
#lwm2m.dtls.psk_ciphers = "PSK-AES128-CBC-SHA,PSK-AES256-CBC-SHA,PSK-3DES-EDE-CBC-SHA,PSK-RC4-SHA"

View File

@ -1,90 +0,0 @@
%%-*- mode: erlang -*-
%% emqx_coap config mapping
{mapping, "coap.bind.udp.$number", "emqx_coap.bind_udp", [
{datatype, ip},
{default, "0.0.0.0:5683"}
]}.
{mapping, "coap.enable_stats", "emqx_coap.enable_stats", [
{datatype, flag}
]}.
{mapping, "coap.bind.dtls.$number", "emqx_coap.bind_dtls", [
{datatype, ip},
{default, "0.0.0.0:5684"}
]}.
{mapping, "coap.dtls.keyfile", "emqx_coap.dtls_opts", [
{datatype, string}
]}.
{mapping, "coap.dtls.certfile", "emqx_coap.dtls_opts", [
{datatype, string}
]}.
{mapping, "coap.dtls.verify", "emqx_coap.dtls_opts", [
{default, verify_none},
{datatype, {enum, [verify_none, verify_peer]}}
]}.
{mapping, "coap.dtls.cacertfile", "emqx_coap.dtls_opts", [
{datatype, string}
]}.
{mapping, "coap.dtls.fail_if_no_peer_cert", "emqx_coap.dtls_opts", [
{datatype, {enum, [true, false]}}
]}.
{mapping, "coap.dtls.ciphers", "emqx_coap.dtls_opts", [
{datatype, string}
]}.
{translation, "emqx_coap.bind_udp", fun(Conf) ->
Options = cuttlefish_variable:filter_by_prefix("coap.bind.udp", Conf),
lists:map(fun({_, Bind}) ->
{Ip, Port} = cuttlefish_datatypes:from_string(Bind, ip),
Opts = case inet:parse_address(Ip) of
{ok, {_,_,_,_} = Address} ->
[inet, {ip, Address}];
{ok, {_,_,_,_,_,_,_,_} = Address} ->
[inet6, {ip, Address}]
end,
{Port, Opts}
end, Options)
end}.
{translation, "emqx_coap.bind_dtls", fun(Conf) ->
Options = cuttlefish_variable:filter_by_prefix("coap.bind.dtls", Conf),
lists:map(fun({_, Bind}) ->
{Ip, Port} = cuttlefish_datatypes:from_string(Bind, ip),
Opts = case inet:parse_address(Ip) of
{ok, {_,_,_,_} = Address} ->
[inet, {ip, Address}];
{ok, {_,_,_,_,_,_,_,_} = Address} ->
[inet6, {ip, Address}]
end,
{Port, Opts}
end, Options)
end}.
{translation, "emqx_coap.dtls_opts", fun(Conf) ->
Filter = fun(Opts) -> [{K, V} || {K, V} <- Opts, V =/= undefined] end,
%% Ciphers
SplitFun = fun(undefined) -> undefined; (S) -> string:tokens(S, ",") end,
Ciphers =
case cuttlefish:conf_get("coap.dtls.ciphers", Conf, undefined) of
undefined ->
lists:append([ssl:cipher_suites(all, V, openssl) || V <- ['dtlsv1.2', 'dtlsv1']]);
C ->
SplitFun(C)
end,
Filter([{verify, cuttlefish:conf_get("coap.dtls.verify", Conf, undefined)},
{keyfile, cuttlefish:conf_get("coap.dtls.keyfile", Conf, undefined)},
{certfile, cuttlefish:conf_get("coap.dtls.certfile", Conf, undefined)},
{cacertfile, cuttlefish:conf_get("coap.dtls.cacertfile", Conf, undefined)},
{fail_if_no_peer_cert, cuttlefish:conf_get("coap.dtls.fail_if_no_peer_cert", Conf, undefined)},
{ciphers, Ciphers}])
end}.

View File

@ -1,38 +0,0 @@
%%-*- mode: erlang -*-
{mapping, "exhook.server.$name.url", "emqx_exhook.servers", [
{datatype, string}
]}.
{mapping, "exhook.server.$name.ssl.cacertfile", "emqx_exhook.servers", [
{datatype, string}
]}.
{mapping, "exhook.server.$name.ssl.certfile", "emqx_exhook.servers", [
{datatype, string}
]}.
{mapping, "exhook.server.$name.ssl.keyfile", "emqx_exhook.servers", [
{datatype, string}
]}.
{translation, "emqx_exhook.servers", fun(Conf) ->
Filter = fun(Opts) -> [{K, V} || {K, V} <- Opts, V =/= undefined] end,
ServerOptions = fun(Prefix) ->
case http_uri:parse(cuttlefish:conf_get(Prefix ++ ".url", Conf)) of
{ok, {http, _, Host, Port, _, _}} ->
[{scheme, http}, {host, Host}, {port, Port}];
{ok, {https, _, Host, Port, _, _}} ->
[{scheme, https}, {host, Host}, {port, Port},
{ssl_options,
Filter([{ssl, true},
{certfile, cuttlefish:conf_get(Prefix ++ ".ssl.certfile", Conf, undefined)},
{keyfile, cuttlefish:conf_get(Prefix ++ ".ssl.keyfile", Conf, undefined)},
{cacertfile, cuttlefish:conf_get(Prefix ++ ".ssl.cacertfile", Conf, undefined)}
])}];
_ -> error(invalid_server_options)
end
end,
[{list_to_atom(Name), ServerOptions("exhook.server." ++ Name)}
|| {["exhook", "server", Name, "url"], _} <- cuttlefish_variable:filter_by_prefix("exhook.server", Conf)]
end}.

View File

@ -1,220 +0,0 @@
%% -*-: erlang -*-
{mapping, "lwm2m.bind.udp.$number", "emqx_lwm2m.bind_udp", [
{datatype, ip},
{default, "0.0.0.0:5683"}
]}.
{mapping, "lwm2m.bind.dtls.$number", "emqx_lwm2m.bind_dtls", [
{datatype, ip},
{default, "0.0.0.0:5684"}
]}.
{mapping, "lwm2m.lifetime_min", "emqx_lwm2m.lifetime_min", [
{datatype, {duration, s}},
{default, 0}
]}.
{mapping, "lwm2m.lifetime_max", "emqx_lwm2m.lifetime_max", [
{datatype, {duration, s}},
{default, 315360000} %% 10 years
]}.
{mapping, "lwm2m.qmode_time_window", "emqx_lwm2m.qmode_time_window", [
{datatype, integer},
{default, 0}
]}.
{mapping, "lwm2m.auto_observe", "emqx_lwm2m.auto_observe", [
{datatype, string},
{default, "off"} %% BACKW: v4.3.0
]}.
{mapping, "lwm2m.lb", "emqx_lwm2m.options", [
{datatype, atom},
{default, undefined}
]}.
{mapping, "lwm2m.opts.$name", "emqx_lwm2m.options", [
{datatype, bytesize}
]}.
{translation, "emqx_lwm2m.auto_observe", fun(Conf) ->
case cuttlefish:conf_get("lwm2m.auto_observe", Conf, "off") of
"off" -> false; %% BACKW: v4.3.0
"on" -> true; %% BACKW: v4.3.0
Str -> string:tokens(Str, ", ")
end
end}.
{translation, "emqx_lwm2m.bind_udp", fun(Conf) ->
Options = cuttlefish_variable:filter_by_prefix("lwm2m.bind.udp", Conf),
lists:map(fun({_, Bind}) ->
{Ip, Port} = cuttlefish_datatypes:from_string(Bind, ip),
Opts = case inet:parse_address(Ip) of
{ok, {_,_,_,_} = Address} ->
[inet, {ip, Address}];
{ok, {_,_,_,_,_,_,_,_} = Address} ->
[inet6, {ip, Address}]
end,
{Port, Opts}
end, Options)
end}.
{translation, "emqx_lwm2m.bind_dtls", fun(Conf) ->
Options = cuttlefish_variable:filter_by_prefix("lwm2m.bind.dtls", Conf),
lists:map(fun({_, Bind}) ->
{Ip, Port} = cuttlefish_datatypes:from_string(Bind, ip),
Opts = case inet:parse_address(Ip) of
{ok, {_,_,_,_} = Address} ->
[inet, {ip, Address}];
{ok, {_,_,_,_,_,_,_,_} = Address} ->
[inet6, {ip, Address}]
end,
{Port, Opts}
end, Options)
end}.
{translation, "emqx_lwm2m.options", fun(Conf) ->
Options = cuttlefish_variable:filter_by_prefix("lwm2m.opts", Conf),
Opts = lists:map(fun({[_,_, Key], Value}) ->
{list_to_atom(Key), Value}
end, Options),
case cuttlefish:conf_get("lwm2m.lb", Conf, undefined) of
undefined -> ignore;
_ ->
cuttlefish:warn("The 'lwm2m.lb' option has removed from v4.2.0!")
end,
Opts
end}.
{mapping, "lwm2m.mountpoint", "emqx_lwm2m.mountpoint", [
{datatype, string},
{default, ""}
]}.
{mapping, "lwm2m.topics.command", "emqx_lwm2m.topics", [
{datatype, string},
{default, "lwm2m/%e/dn/#"}
]}.
{mapping, "lwm2m.topics.response", "emqx_lwm2m.topics", [
{datatype, string},
{default, "lwm2m/%e/up/resp"}
]}.
{mapping, "lwm2m.topics.notify", "emqx_lwm2m.topics", [
{datatype, string},
{default, "lwm2m/%e/up/notify"}
]}.
{mapping, "lwm2m.topics.register", "emqx_lwm2m.topics", [
{datatype, string},
{default, "lwm2m/%e/up/resp"}
]}.
{mapping, "lwm2m.topics.update", "emqx_lwm2m.topics", [
{datatype, string},
{default, "lwm2m/%e/up/resp"}
]}.
{mapping, "lwm2m.update_msg_publish_condition", "emqx_lwm2m.update_msg_publish_condition", [
{datatype, {enum, [contains_object_list, always]}},
{default, contains_object_list}
]}.
{translation, "emqx_lwm2m.topics", fun(Conf) ->
Topics = cuttlefish_variable:filter_by_prefix("lwm2m.topics", Conf),
Opts = lists:map(fun({[_,_, Key], Value}) ->
{list_to_atom(Key), Value}
end, Topics),
Opts
end}.
{mapping, "lwm2m.xml_dir", "emqx_lwm2m.xml_dir", [
{datatype, string}
]}.
%% Plan to remove v5.0-alpha.1, please use lwm2m.dtls_opts.keyfile instead
{mapping, "lwm2m.keyfile", "emqx_lwm2m.dtls_opts", [
{datatype, string}
]}.
%% Plan to remove v5.0-alpha.1, please use lwm2m.dtls_opts.certfile instead
{mapping, "lwm2m.certfile", "emqx_lwm2m.dtls_opts", [
{datatype, string}
]}.
{mapping, "lwm2m.dtls.keyfile", "emqx_lwm2m.dtls_opts", [
{datatype, string}
]}.
{mapping, "lwm2m.dtls.certfile", "emqx_lwm2m.dtls_opts", [
{datatype, string}
]}.
{mapping, "lwm2m.dtls.verify", "emqx_lwm2m.dtls_opts", [
{default, verify_none},
{datatype, {enum, [verify_none, verify_peer]}}
]}.
{mapping, "lwm2m.dtls.cacertfile", "emqx_lwm2m.dtls_opts", [
{datatype, string}
]}.
{mapping, "lwm2m.dtls.fail_if_no_peer_cert", "emqx_lwm2m.dtls_opts", [
{datatype, {enum, [true, false]}}
]}.
{mapping, "lwm2m.dtls.ciphers", "emqx_lwm2m.dtls_opts", [
{datatype, string}
]}.
{mapping, "lwm2m.dtls.psk_ciphers", "emqx_lwm2m.dtls_opts", [
{datatype, string}
]}.
{translation, "emqx_lwm2m.dtls_opts", fun(Conf) ->
Filter = fun(Opts) -> [{K, V} || {K, V} <- Opts, V =/= undefined] end,
%% compatible with before v4.2
%% It plan to remove at v5.0-alpha.1
OldKey = cuttlefish:conf_get("lwm2m.keyfile", Conf, undefined),
OldCert = cuttlefish:conf_get("lwm2m.certfile", Conf, undefined),
%% Ciphers
SplitFun = fun(undefined) -> undefined; (S) -> string:tokens(S, ",") end,
Ciphers =
case cuttlefish:conf_get("lwm2m.dtls.ciphers", Conf, undefined) of
undefined ->
[];
C ->
[{ciphers, SplitFun(C)}]
end,
PskCiphers =
case cuttlefish:conf_get("lwm2m.dtls.psk_ciphers", Conf, undefined) of
undefined ->
[];
C2 ->
Psk = lists:map(fun("PSK-AES128-CBC-SHA") -> {psk, aes_128_cbc, sha};
("PSK-AES256-CBC-SHA") -> {psk, aes_256_cbc, sha};
("PSK-3DES-EDE-CBC-SHA") -> {psk, '3des_ede_cbc', sha};
("PSK-RC4-SHA") -> {psk, rc4_128, sha}
end, SplitFun(C2)),
[{ciphers, Psk}, {user_lookup_fun, {fun emqx_psk:lookup/3, <<>>}}]
end,
Ciphers /= []
andalso PskCiphers /= []
andalso cuttlefish:invalid("The 'lwm2m.dtls.ciphers' and 'lwm2m.dtls.psk_ciphers' cannot exist simultaneously."),
NCiphers = Ciphers ++ PskCiphers,
Filter([{verify, cuttlefish:conf_get("lwm2m.dtls.verify", Conf, undefined)},
{keyfile, cuttlefish:conf_get("lwm2m.dtls.keyfile", Conf, OldKey)},
{certfile, cuttlefish:conf_get("lwm2m.dtls.certfile", Conf, OldCert)},
{cacertfile, cuttlefish:conf_get("lwm2m.dtls.cacertfile", Conf, undefined)},
{fail_if_no_peer_cert, cuttlefish:conf_get("lwm2m.dtls.fail_if_no_peer_cert", Conf, undefined)} | NCiphers])
end}.

View File

@ -154,8 +154,11 @@ handle_info(Info, State) ->
terminate(Reason, #state{}) ->
ets:delete(?COAP_TOPIC_TABLE),
?LOG(error, "the ~p terminate for reason ~p", [?MODULE, Reason]),
ok.
Level = case Reason =:= normal orelse Reason =:= shutdown of
true -> debug;
false -> error
end,
?SLOG(Level, #{terminate_reason => Reason}).
code_change(_OldVsn, State, _Extra) ->
{ok, State}.

View File

@ -3,7 +3,7 @@
{vsn, "0.1.0"},
{registered, []},
{mod, {emqx_gateway_app, []}},
{applications, [kernel, stdlib, grpc, lwm2m_coap]},
{applications, [kernel, stdlib, grpc, lwm2m_coap, emqx]},
{env, []},
{modules, []},
{licenses, ["Apache 2.0"]},

View File

@ -32,7 +32,7 @@
%% Gateway ID
, type := gateway_type()
%% Autenticator
, auth := emqx_authn:chain_id()
, auth := emqx_authn:chain_id() | undefined
%% The ConnectionManager PID
, cm := pid()
}.
@ -65,6 +65,8 @@
-spec authenticate(context(), emqx_types:clientinfo())
-> {ok, emqx_types:clientinfo()}
| {error, any()}.
authenticate(_Ctx = #{auth := undefined}, ClientInfo) ->
{ok, mountpoint(ClientInfo)};
authenticate(_Ctx = #{auth := ChainId}, ClientInfo0) ->
ClientInfo = ClientInfo0#{
zone => default,
@ -78,7 +80,7 @@ authenticate(_Ctx = #{auth := ChainId}, ClientInfo0) ->
{error, Reason}
end;
authenticate(_Ctx, ClientInfo) ->
{ok, ClientInfo}.
{ok, mountpoint(ClientInfo)}.
%% @doc Register the session to the cluster.
%%

View File

@ -86,8 +86,8 @@ call(Pid, Req) ->
init([Insta, Ctx0, _GwDscrptr]) ->
process_flag(trap_exit, true),
#{rawconf := RawConf} = Insta,
Ctx = do_init_context(RawConf, Ctx0),
#{id := InstaId, rawconf := RawConf} = Insta,
Ctx = do_init_context(InstaId, RawConf, Ctx0),
State = #state{
insta = Insta,
ctx = Ctx,
@ -103,16 +103,18 @@ init([Insta, Ctx0, _GwDscrptr]) ->
{ok, NState}
end.
do_init_context(RawConf, Ctx) ->
Auth = case maps:get(authenticator, RawConf, allow_anonymous) of
allow_anonymous -> allow_anonymous;
Funcs when is_list(Funcs) ->
create_authenticator_for_gateway_insta(Funcs)
do_init_context(InstaId, RawConf, Ctx) ->
Auth = case maps:get(authentication, RawConf, #{enable => false}) of
#{enable := true,
authenticators := AuthCfgs} when is_list(AuthCfgs) ->
create_authenticators_for_gateway_insta(InstaId, AuthCfgs);
_ ->
undefined
end,
Ctx#{auth => Auth}.
do_deinit_context(Ctx) ->
cleanup_authenticator_for_gateway_insta(maps:get(auth, Ctx)),
cleanup_authenticators_for_gateway_insta(maps:get(auth, Ctx)),
ok.
handle_call(info, _From, State = #state{insta = Insta}) ->
@ -213,13 +215,42 @@ code_change(_OldVsn, State, _Extra) ->
%% Internal funcs
%%--------------------------------------------------------------------
create_authenticator_for_gateway_insta(_Funcs) ->
todo.
%% @doc AuthCfgs is a array of authenticatior configurations,
%% see: emqx_authn_schema:authenticators/1
create_authenticators_for_gateway_insta(InstaId0, AuthCfgs) ->
ChainId = atom_to_binary(InstaId0, utf8),
case emqx_authn:create_chain(#{id => ChainId}) of
{ok, _ChainInfo} ->
Results = lists:map(fun(AuthCfg = #{name := Name}) ->
case emqx_authn:create_authenticator(
ChainId,
AuthCfg) of
{ok, _AuthInfo} -> ok;
{error, Reason} -> {Name, Reason}
end
end, AuthCfgs),
NResults = [ E || E <- Results, E /= ok],
NResults /= [] andalso begin
logger:error("Failed to create authenticators: ~p", [NResults]),
throw({bad_autheticators, NResults})
end, ok;
{error, Reason} ->
logger:error("Failed to create authenticator chain: ~p", [Reason]),
throw({bad_chain, {ChainId, Reason}})
end.
cleanup_authenticator_for_gateway_insta(allow_anonymouse) ->
cleanup_authenticators_for_gateway_insta(undefined) ->
ok;
cleanup_authenticator_for_gateway_insta(_ChainId) ->
todo.
cleanup_authenticators_for_gateway_insta(ChainId) ->
case emqx_authn:delete_chain(ChainId) of
ok -> ok;
{error, {not_found, _}} ->
logger:warning("Failed clean authenticator chain: ~s, "
"reason: not_found", [ChainId]);
{error, Reason} ->
logger:error("Failed clean authenticator chain: ~s, "
"reason: ~p", [ChainId, Reason])
end.
cb_insta_destroy(State = #state{insta = Insta = #{type := Type},
insta_state = InstaState}) ->

View File

@ -46,7 +46,7 @@ fields(stomp) ->
fields(stomp_structs) ->
[ {frame, t(ref(stomp_frame))}
, {clientinfo_override, t(ref(clientinfo_override))}
, {authenticator, t(union([allow_anonymous]))}
, {authentication, t(ref(authentication))}
, {listener, t(ref(tcp_listener_group))}
];
@ -97,7 +97,7 @@ fields(exproto) ->
fields(exproto_structs) ->
[ {server, t(ref(exproto_grpc_server))}
, {handler, t(ref(exproto_grpc_handler))}
, {authenticator, t(union([allow_anonymous]))}
, {authentication, t(ref(authentication))}
, {listener, t(ref(udp_tcp_listener_group))}
];
@ -111,6 +111,11 @@ fields(exproto_grpc_handler) ->
%% TODO: ssl
];
fields(authentication) ->
[ {enable, #{type => boolean(), default => false}}
, {authenticators, fun emqx_authn_schema:authenticators/1}
];
fields(clientinfo_override) ->
[ {username, t(string())}
, {password, t(string())}
@ -209,7 +214,7 @@ fields(coap) ->
fields(coap_structs) ->
[ {enable_stats, t(boolean(), undefined, true)}
, {authenticator, t(union([allow_anonymous]))}
, {authentication, t(ref(authentication))}
, {heartbeat, t(duration(), undefined, "15s")}
, {resource, t(union([mqtt, pubsub]), undefined, mqtt)}
, {notify_type, t(union([non, con, qos]), undefined, qos)}

View File

@ -109,7 +109,7 @@ format_listenon({Addr, Port}) when is_tuple(Addr) ->
-type rawconf() ::
#{ clientinfo_override => #{}
, authenticators := #{}
, authenticators := list()
, listeners => listener()
, atom() => any()
}.

View File

@ -588,7 +588,7 @@ default_conninfo(ConnInfo) ->
default_clientinfo(#{peername := {PeerHost, _},
sockname := {_, SockPort}}) ->
#{zone => external,
#{zone => default,
protocol => undefined,
peerhost => PeerHost,
sockport => SockPort,

View File

@ -119,7 +119,7 @@ init(ConnInfo = #{peername := {PeerHost, _},
EnableQoS3 = maps:get(enable_qos3, Option, true),
ClientInfo = set_peercert_infos(
Peercert,
#{ zone => undefined %% XXX:
#{ zone => default
, protocol => 'mqtt-sn'
, peerhost => PeerHost
, sockport => SockPort

View File

@ -139,9 +139,9 @@ t_auth_deny(Cfg) ->
},
Password = <<"123456">>,
ok = meck:new(emqx_access_control, [passthrough, no_history, no_link]),
ok = meck:expect(emqx_access_control, authenticate,
fun(_) -> {error, ?RC_NOT_AUTHORIZED} end),
ok = meck:new(emqx_gateway_ctx, [passthrough, no_history, no_link]),
ok = meck:expect(emqx_gateway_ctx, authenticate,
fun(_, _) -> {error, ?RC_NOT_AUTHORIZED} end),
ConnBin = frame_connect(Client, Password),
ConnAckBin = frame_connack(1),
@ -152,7 +152,7 @@ t_auth_deny(Cfg) ->
SockType =/= udp andalso begin
{error, closed} = recv(Sock, 5000)
end,
meck:unload([emqx_access_control]).
meck:unload([emqx_gateway_ctx]).
t_acl_deny(Cfg) ->
SockType = proplists:get_value(listener_type, Cfg),
@ -164,8 +164,8 @@ t_acl_deny(Cfg) ->
},
Password = <<"123456">>,
ok = meck:new(emqx_access_control, [passthrough, no_history, no_link]),
ok = meck:expect(emqx_access_control, authorize, fun(_, _, _) -> deny end),
ok = meck:new(emqx_gateway_ctx, [passthrough, no_history, no_link]),
ok = meck:expect(emqx_gateway_ctx, authorize, fun(_, _, _, _) -> deny end),
ConnBin = frame_connect(Client, Password),
ConnAckBin = frame_connack(0),
@ -188,7 +188,7 @@ t_acl_deny(Cfg) ->
send(Sock, PubBin),
{ok, PubBinFailedAck} = recv(Sock, 5000),
meck:unload([emqx_access_control]),
meck:unload([emqx_gateway_ctx]),
send(Sock, PubBin),
{ok, PubBinSuccesAck} = recv(Sock, 5000),

View File

@ -26,7 +26,6 @@
-define(CONF_DEFAULT, <<"""
gateway: {
stomp.1: {
authenticator: allow_anonymous
clientinfo_override: {
username: \"${Packet.headers.login}\"
password: \"${Packet.headers.passcode}\"

View File

@ -0,0 +1,696 @@
## NOTE: The configurations in this file will be overridden by
## `<path-to-emqx-installation>/data/emqx_overrides.conf`
##==================================================================
## Node
##==================================================================
node {
## Node name.
## See: http://erlang.org/doc/reference_manual/distributed.html
##
## @doc node.name
## ValueType: NodeName
## Default: emqx@127.0.0.1
name: "emqx@127.0.0.1"
## Cookie for distributed node communication.
##
## @doc node.cookie
## ValueType: String
## Default: emqxsecretcookie
cookie: emqxsecretcookie
## Data dir for the node
##
## @doc node.data_dir
## ValueType: Folder
## Default: "{{ platform_data_dir }}/"
data_dir: "{{ platform_data_dir }}/"
## Dir of crash dump file.
##
## @doc node.crash_dump_dir
## ValueType: Folder
## Default: "{{ platform_log_dir }}/"
crash_dump_dir: "{{ platform_log_dir }}/"
## Global GC Interval.
##
## @doc node.global_gc_interval
## ValueType: Duration
## Default: 15m
global_gc_interval: 15m
## Sets the net_kernel tick time in seconds.
## Notice that all communicating nodes are to have the same
## TickTime value specified.
##
## See: http://www.erlang.org/doc/man/kernel_app.html#net_ticktime
##
## @doc node.dist_net_ticktime
## ValueType: Number
## Default: 2m
dist_net_ticktime: 2m
## Sets the port range for the listener socket of a distributed
## Erlang node.
## Note that if there are firewalls between clustered nodes, this
## port segment for nodes communication should be allowed.
##
## See: http://www.erlang.org/doc/man/kernel_app.html
##
## @doc node.dist_listen_min
## ValueType: Integer
## Range: [1024,65535]
## Default: 6369
dist_listen_min: 6369
## Sets the port range for the listener socket of a distributed
## Erlang node.
## Note that if there are firewalls between clustered nodes, this
## port segment for nodes communication should be allowed.
##
## See: http://www.erlang.org/doc/man/kernel_app.html
##
## @doc node.dist_listen_max
## ValueType: Integer
## Range: [1024,65535]
## Default: 6369
dist_listen_max: 6369
## Sets the maximum depth of call stack back-traces in the exit
## reason element of 'EXIT' tuples.
## The flag also limits the stacktrace depth returned by
## process_info item current_stacktrace.
##
## @doc node.backtrace_depth
## ValueType: Integer
## Range: [0,1024]
## Default: 23
backtrace_depth: 23
}
##==================================================================
## Cluster
##==================================================================
cluster {
## Cluster name.
##
## @doc cluster.name
## ValueType: String
## Default: emqxcl
name: emqxcl
## Enable cluster autoheal from network partition.
##
## @doc cluster.autoheal
## ValueType: Boolean
## Default: true
autoheal: true
## Autoclean down node. A down node will be removed from the cluster
## if this value > 0.
##
## @doc cluster.autoclean
## ValueType: Duration
## Default: 5m
autoclean: 5m
## Node discovery strategy to join the cluster.
##
## @doc cluster.discovery_strategy
## ValueType: manual | static | mcast | dns | etcd | k8s
## - manual: Manual join command
## - static: Static node list
## - mcast: IP Multicast
## - dns: DNS A Record
## - etcd: etcd
## - k8s: Kubernetes
##
## Default: manual
discovery_strategy: manual
##----------------------------------------------------------------
## Cluster using static node list
##----------------------------------------------------------------
static {
## Node list of the cluster
##
## @doc cluster.static.seeds
## ValueType: Array<NodeName>
## Default: []
seeds: ["emqx1@127.0.0.1", "emqx2@127.0.0.1"]
}
##----------------------------------------------------------------
## Cluster using IP Multicast
##----------------------------------------------------------------
mcast {
## IP Multicast Address.
##
## @doc cluster.mcast.addr
## ValueType: IPAddress
## Default: "239.192.0.1"
addr: "239.192.0.1"
## Multicast Ports.
##
## @doc cluster.mcast.ports
## ValueType: Array<Port>
## Default: [4369, 4370]
ports: [4369, 4370]
## Multicast Iface.
##
## @doc cluster.mcast.iface
## ValueType: IPAddress
## Default: "0.0.0.0"
iface: "0.0.0.0"
## Multicast Ttl.
##
## @doc cluster.mcast.ttl
## ValueType: Integer
## Range: [0,255]
## Default: 255
ttl: 255
## Multicast loop.
##
## @doc cluster.mcast.loop
## ValueType: Boolean
## Default: true
loop: true
}
##----------------------------------------------------------------
## Cluster using DNS A records
##----------------------------------------------------------------
dns {
## DNS name.
##
## @doc cluster.dns.name
## ValueType: String
## Default: localhost
name: localhost
## The App name is used to build 'node.name' with IP address.
##
## @doc cluster.dns.app
## ValueType: String
## Default: emqx
app: emqx
}
##----------------------------------------------------------------
## Cluster using etcd
##----------------------------------------------------------------
etcd {
## Etcd server list, seperated by ','.
##
## @doc cluster.etcd.server
## ValueType: URL
## Required: true
server: "http://127.0.0.1:2379"
## The prefix helps build nodes path in etcd. Each node in the cluster
## will create a path in etcd: v2/keys/<prefix>/<name>/<node.name>
##
## @doc cluster.etcd.prefix
## ValueType: String
## Default: emqxcl
prefix: emqxcl
## The TTL for node's path in etcd.
##
## @doc cluster.etcd.node_ttl
## ValueType: Duration
## Default: 1m
node_ttl: 1m
## Path to the file containing the user's private PEM-encoded key.
##
## @doc cluster.etcd.ssl.keyfile
## ValueType: File
## Default: "{{ platform_etc_dir }}/certs/key.pem"
ssl.keyfile: "{{ platform_etc_dir }}/certs/key.pem"
## Path to a file containing the user certificate.
##
## @doc cluster.etcd.ssl.certfile
## ValueType: File
## Default: "{{ platform_etc_dir }}/certs/cert.pem"
ssl.certfile: "{{ platform_etc_dir }}/certs/cert.pem"
## Path to the file containing PEM-encoded CA certificates. The CA certificates
## are used during server authentication and when building the client certificate chain.
##
## @doc cluster.etcd.ssl.cacertfile
## ValueType: File
## Default: "{{ platform_etc_dir }}/certs/cacert.pem"
ssl.cacertfile: "{{ platform_etc_dir }}/certs/cacert.pem"
}
##----------------------------------------------------------------
## Cluster using Kubernetes
##----------------------------------------------------------------
k8s {
## Kubernetes API server list, seperated by ','.
##
## @doc cluster.k8s.apiserver
## ValueType: URL
## Required: true
apiserver: "http://10.110.111.204:8080"
## The service name helps lookup EMQ nodes in the cluster.
##
## @doc cluster.k8s.service_name
## ValueType: String
## Default: emqx
service_name: emqx
## The address type is used to extract host from k8s service.
##
## @doc cluster.k8s.address_type
## ValueType: ip | dns | hostname
## Default: ip
address_type: ip
## The app name helps build 'node.name'.
##
## @doc cluster.k8s.app_name
## ValueType: String
## Default: emqx
app_name: emqx
## The suffix added to dns and hostname get from k8s service
##
## @doc cluster.k8s.suffix
## ValueType: String
## Default: "pod.local"
suffix: "pod.local"
## Kubernetes Namespace
##
## @doc cluster.k8s.namespace
## ValueType: String
## Default: default
namespace: default
}
db_backend: mnesia
rlog: {
# role: core
# core_nodes: []
}
}
##==================================================================
## Log
##==================================================================
log {
## The primary log level
##
## - all the log messages with levels lower than this level will
## be dropped.
## - all the log messages with levels higher than this level will
## go into the log handlers. The handlers then decide to log it
## out or drop it according to the level setting of the handler.
##
## Note: Only the messages with severity level higher than or
## equal to this level will be logged.
##
## @doc log.primary_level
## ValueType: debug | info | notice | warning | error | critical | alert | emergency
## Default: warning
primary_level: warning
##----------------------------------------------------------------
## The console log handler send log messages to emqx console
##----------------------------------------------------------------
## Log to single line
## @doc log.console_handler.enable
## ValueType: Boolean
## Default: false
console_handler.enable: false
## The log level of this handler
## All the log messages with levels lower than this level will
## be dropped.
##
## @doc log.console_handler.level
## ValueType: debug | info | notice | warning | error | critical | alert | emergency
## Default: warning
console_handler.level: warning
##----------------------------------------------------------------
## The file log handlers send log messages to files
##----------------------------------------------------------------
## file_handlers.<name>
file_handlers.emqx_log: {
## The log level filter of this handler
## All the log messages with levels lower than this level will
## be dropped.
##
## @doc log.file_handlers.<name>.level
## ValueType: debug | info | notice | warning | error | critical | alert | emergency
## Default: warning
level: warning
## The log file for specified level.
##
## If `rotation` is disabled, this is the file of the log files.
##
## If `rotation` is enabled, this is the base name of the files.
## Each file in a rotated log is named <base_name>.N, where N is an integer.
##
## Note: Log files for a specific log level will only contain all the logs
## that higher than or equal to that level
##
## @doc log.file_handlers.<name>.file
## ValueType: File
## Required: true
file: "{{ platform_log_dir }}/emqx.log"
## Enables the log rotation.
## With this enabled, new log files will be created when the current
## log file is full, max to `rotation_count` files will be created.
##
## @doc log.file_handlers.<name>.rotation.enable
## ValueType: Boolean
## Default: true
rotation.enable: true
## Maximum rotation count of log files.
##
## @doc log.file_handlers.<name>.rotation.count
## ValueType: Integer
## Range: [1, 2048]
## Default: 10
rotation.count: 10
## Maximum size of each log file.
##
## If the max_size reached and `rotation` is disabled, the handler
## will stop sending log messages, if the `rotation` is enabled,
## the file rotates.
##
## @doc log.file_handlers.<name>.max_size
## ValueType: Size | infinity
## Default: 10MB
max_size: 10MB
}
## file_handlers.<name>
##
## You could also create multiple file handlers for different
## log level for example:
file_handlers.emqx_error_log: {
level: error
file: "{{ platform_log_dir }}/error.log"
}
## Timezone offset to display in logs
##
## @doc log.time_offset
## ValueType: system | utc | String
## - "system" use system zone
## - "utc" for Universal Coordinated Time (UTC)
## - "+hh:mm" or "-hh:mm" for a specified offset
## Default: system
time_offset: system
## Limits the total number of characters printed for each log event.
##
## @doc log.chars_limit
## ValueType: Integer | infinity
## Range: [0, infinity)
## Default: infinity
chars_limit: infinity
## Maximum depth for Erlang term log formatting
## and Erlang process message queue inspection.
##
## @doc log.max_depth
## ValueType: Integer | infinity
## Default: 80
max_depth: 80
## Log formatter
## @doc log.formatter
## ValueType: text | json
## Default: text
formatter: text
## Log to single line
## @doc log.single_line
## ValueType: Boolean
## Default: true
single_line: true
## The max allowed queue length before switching to sync mode.
##
## Log overload protection parameter. If the message queue grows
## larger than this value the handler switches from anync to sync mode.
##
## @doc log.sync_mode_qlen
## ValueType: Integer
## Range: [0, ${log.drop_mode_qlen}]
## Default: 100
sync_mode_qlen: 100
## The max allowed queue length before switching to drop mode.
##
## Log overload protection parameter. When the message queue grows
## larger than this threshold, the handler switches to a mode in which
## it drops all new events that senders want to log.
##
## @doc log.drop_mode_qlen
## ValueType: Integer
## Range: [${log.sync_mode_qlen}, ${log.flush_qlen}]
## Default: 3000
drop_mode_qlen: 3000
## The max allowed queue length before switching to flush mode.
##
## Log overload protection parameter. If the length of the message queue
## grows larger than this threshold, a flush (delete) operation takes place.
## To flush events, the handler discards the messages in the message queue
## by receiving them in a loop without logging.
##
## @doc log.flush_qlen
## ValueType: Integer
## Range: [${log.drop_mode_qlen}, infinity)
## Default: 8000
flush_qlen: 8000
## Kill the log handler when it gets overloaded.
##
## Log overload protection parameter. It is possible that a handler,
## even if it can successfully manage peaks of high load without crashing,
## can build up a large message queue, or use a large amount of memory.
## We could kill the log handler in these cases and restart it after a
## few seconds.
##
## @doc log.overload_kill.enable
## ValueType: Boolean
## Default: true
overload_kill.enable: true
## The max allowed queue length before killing the log hanlder.
##
## Log overload protection parameter. This is the maximum allowed queue
## length. If the message queue grows larger than this, the handler
## process is terminated.
##
## @doc log.overload_kill.qlen
## ValueType: Integer
## Range: [0, 1048576]
## Default: 20000
overload_kill.qlen: 20000
## The max allowed memory size before killing the log hanlder.
##
## Log overload protection parameter. This is the maximum memory size
## that the handler process is allowed to use. If the handler grows
## larger than this, the process is terminated.
##
## @doc log.overload_kill.mem_size
## ValueType: Size
## Default: 30MB
overload_kill.mem_size: 30MB
## Restart the log hanlder after some seconds.
##
## Log overload protection parameter. If the handler is terminated,
## it restarts automatically after a delay specified in seconds.
##
## @doc log.overload_kill.restart_after
## ValueType: Duration
## Default: 5s
overload_kill.restart_after: 5s
## Controlling Bursts of Log Requests.
##
## Log overload protection parameter. Large bursts of log events - many
## events received by the handler under a short period of time - can
## potentially cause problems. By specifying the maximum number of events
## to be handled within a certain time frame, the handler can avoid
## choking the log with massive amounts of printouts.
##
## Note that there would be no warning if any messages were
## dropped because of burst control.
##
## @doc log.burst_limit.enable
## ValueType: Boolean
## Default: false
burst_limit.enable: false
## This config controls the maximum number of events to handle within
## a time frame. After the limit is reached, successive events are
## dropped until the end of the time frame defined by `window_time`.
##
## @doc log.burst_limit.max_count
## ValueType: Integer
## Default: 10000
burst_limit.max_count: 10000
## See the previous description of burst_limit_max_count.
##
## @doc log.burst_limit.window_time
## ValueType: duration
## Default: 1s
burst_limit.window_time: 1s
}
##==================================================================
## RPC
##==================================================================
rpc {
## RPC Mode.
##
## @doc rpc.mode
## ValueType: sync | async
## Default: async
mode: async
## Max batch size of async RPC requests.
##
## NOTE: RPC batch won't work when rpc.mode = sync
## Zero value disables rpc batching.
##
## @doc rpc.async_batch_size
## ValueType: Integer
## Range: [0, 1048576]
## Default: 0
async_batch_size: 256
## RPC port discovery
##
## The strategy for discovering the RPC listening port of
## other nodes.
##
## @doc cluster.discovery_strategy
## ValueType: manual | stateless
## - manual: discover ports by `tcp_server_port`.
## - stateless: discover ports in a stateless manner.
## If node name is `emqx<N>@127.0.0.1`, where the `<N>` is
## an integer, then the listening port will be `5370 + <N>`
##
## Default: `stateless`.
port_discovery: stateless
## TCP server port for RPC.
##
## Only takes effect when `rpc.port_discovery` = `manual`.
##
## @doc rpc.tcp_server_port
## ValueType: Integer
## Range: [1024-65535]
## Defaults: 5369
tcp_server_port: 5369
## Number of outgoing RPC connections.
##
## Set this to 1 to keep the message order sent from the same
## client.
##
## @doc rpc.tcp_client_num
## ValueType: Integer
## Range: [1, 256]
## Defaults: 1
tcp_client_num: 1
## RCP Client connect timeout.
##
## @doc rpc.connect_timeout
## ValueType: Duration
## Default: 5s
connect_timeout: 5s
## TCP send timeout of RPC client and server.
##
## @doc rpc.send_timeout
## ValueType: Duration
## Default: 5s
send_timeout: 5s
## Authentication timeout
##
## @doc rpc.authentication_timeout
## ValueType: Duration
## Default: 5s
authentication_timeout: 5s
## Default receive timeout for call() functions
##
## @doc rpc.call_receive_timeout
## ValueType: Duration
## Default: 15s
call_receive_timeout: 15s
## Socket idle keepalive.
##
## @doc rpc.socket_keepalive_idle
## ValueType: Duration
## Default: 900s
socket_keepalive_idle: 900s
## TCP Keepalive probes interval.
##
## @doc rpc.socket_keepalive_interval
## ValueType: Duration
## Default: 75s
socket_keepalive_interval: 75s
## Probes lost to close the connection
##
## @doc rpc.socket_keepalive_count
## ValueType: Integer
## Default: 9
socket_keepalive_count: 9
## Size of TCP send buffer.
##
## @doc rpc.socket_sndbuf
## ValueType: Size
## Default: 1MB
socket_sndbuf: 1MB
## Size of TCP receive buffer.
##
## @doc rpc.socket_recbuf
## ValueType: Size
## Default: 1MB
socket_recbuf: 1MB
## Size of user-level software socket buffer.
##
## @doc rpc.socket_buffer
## ValueType: Size
## Default: 1MB
socket_buffer: 1MB
}

View File

@ -0,0 +1,15 @@
{application, emqx_machine,
[{id, "emqx_machine"},
{description, "The EMQ X Machine"},
{vsn, "0.1.0"}, % strict semver, bump manually!
{modules, []},
{registered, []},
{applications, [kernel,stdlib]},
{mod, {emqx_machine_app,[]}},
{env, []},
{licenses, ["Apache-2.0"]},
{maintainers, ["EMQ X Team <contact@emqx.io>"]},
{links, [{"Homepage", "https://emqx.io/"},
{"Github", "https://github.com/emqx/emqx"}
]}
]}.

View File

@ -0,0 +1,187 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_machine).
-export([ start/0
, graceful_shutdown/0
, is_ready/0
]).
-export([ stop_apps/1
, ensure_apps_started/0
]).
-export([sorted_reboot_apps/0]).
-ifdef(TEST).
-export([sorted_reboot_apps/1]).
-endif.
-include_lib("emqx/include/logger.hrl").
%% @doc EMQ X boot entrypoint.
start() ->
os:set_signal(sighup, ignore),
os:set_signal(sigterm, handle), %% default is handle
ok = set_backtrace_depth(),
ok = print_otp_version_warning(),
%% need to load some app envs
%% TODO delete it once emqx boot does not depend on modules envs
_ = load_modules(),
ok = load_config_files(),
ok = ensure_apps_started(),
_ = emqx_plugins:load(),
ok = print_vsn(),
ok = start_autocluster().
graceful_shutdown() ->
emqx_machine_terminator:graceful_wait().
set_backtrace_depth() ->
{ok, Depth} = application:get_env(emqx_machine, backtrace_depth),
_ = erlang:system_flag(backtrace_depth, Depth),
ok.
%% @doc Return true if boot is complete.
is_ready() ->
emqx_machine_terminator:is_running().
-if(?OTP_RELEASE > 22).
print_otp_version_warning() -> ok.
-else.
print_otp_version_warning() ->
?ULOG("WARNING: Running on Erlang/OTP version ~p. Recommended: 23~n",
[?OTP_RELEASE]).
-endif. % OTP_RELEASE > 22
-ifdef(TEST).
print_vsn() -> ok.
-else. % TEST
print_vsn() ->
?ULOG("~s ~s is running now!~n", [emqx_app:get_description(), emqx_app:get_release()]).
-endif. % TEST
-ifndef(EMQX_ENTERPRISE).
load_modules() ->
application:load(emqx_modules).
-else.
load_modules() ->
ok.
-endif.
load_config_files() ->
%% the app env 'config_files' for 'emqx` app should be set
%% in app.time.config by boot script before starting Erlang VM
ConfFiles = application:get_env(emqx, config_files, []),
%% emqx_machine_schema is a superset of emqx_schema
ok = emqx_config:init_load(emqx_machine_schema, ConfFiles),
%% to avoid config being loaded again when emqx app starts.
ok = emqx_app:set_init_config_load_done().
start_autocluster() ->
ekka:callback(prepare, fun ?MODULE:stop_apps/1),
ekka:callback(reboot, fun ?MODULE:ensure_apps_started/0),
_ = ekka:autocluster(emqx), %% returns 'ok' or a pid or 'any()' as in spec
ok.
stop_apps(Reason) ->
?SLOG(info, #{msg => "stopping_apps", reason => Reason}),
_ = emqx_alarm_handler:unload(),
lists:foreach(fun stop_one_app/1, lists:reverse(sorted_reboot_apps())).
stop_one_app(App) ->
?SLOG(debug, #{msg => "stopping_app", app => App}),
try
_ = application:stop(App)
catch
C : E ->
?SLOG(error, #{msg => "failed_to_stop_app",
app => App,
exception => C,
reason => E})
end.
ensure_apps_started() ->
lists:foreach(fun start_one_app/1, sorted_reboot_apps()).
start_one_app(App) ->
?SLOG(debug, #{msg => "starting_app", app => App}),
case application:ensure_all_started(App) of
{ok, Apps} ->
?SLOG(debug, #{msg => "started_apps", apps => Apps});
{error, Reason} ->
?SLOG(critical, #{msg => "failed_to_start_app", app => App, reason => Reason}),
error({faile_to_start_app, App, Reason})
end.
%% list of app names which should be rebooted when:
%% 1. due to static static config change
%% 2. after join a cluster
reboot_apps() ->
[gproc, esockd, ranch, cowboy, ekka, emqx | ?EMQX_DEP_APPS].
sorted_reboot_apps() ->
Apps = [{App, app_deps(App)} || App <- reboot_apps()],
sorted_reboot_apps(Apps).
app_deps(App) ->
case application:get_key(App, applications) of
undefined -> [];
{ok, List} -> lists:filter(fun(A) -> lists:member(A, reboot_apps()) end, List)
end.
sorted_reboot_apps(Apps) ->
G = digraph:new(),
try
lists:foreach(fun({App, Deps}) -> add_app(G, App, Deps) end, Apps),
case digraph_utils:topsort(G) of
Sorted when is_list(Sorted) ->
Sorted;
false ->
Loops = find_loops(G),
error({circular_application_dependency, Loops})
end
after
digraph:delete(G)
end.
add_app(G, App, undefined) ->
?SLOG(debug, #{msg => "app_is_not_loaded", app => App}),
%% not loaded
add_app(G, App, []);
add_app(_G, _App, []) ->
ok;
add_app(G, App, [Dep | Deps]) ->
digraph:add_vertex(G, App),
digraph:add_vertex(G, Dep),
digraph:add_edge(G, Dep, App), %% dep -> app as dependency
add_app(G, App, Deps).
find_loops(G) ->
lists:filtermap(
fun (App) ->
case digraph:get_short_cycle(G, App) of
false -> false;
Apps -> {true, Apps}
end
end, digraph:vertices(G)).

View File

@ -0,0 +1,30 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_machine_app).
-export([ start/2
, stop/1
]).
-behaviour(application).
start(_Type, _Args) ->
ok = emqx_machine:start(),
emqx_machine_sup:start_link().
stop(_State) ->
ok.

View File

@ -0,0 +1,426 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_machine_schema).
-dialyzer(no_return).
-dialyzer(no_match).
-dialyzer(no_contracts).
-dialyzer(no_unused).
-dialyzer(no_fail_call).
-include_lib("typerefl/include/types.hrl").
-type log_level() :: debug | info | notice | warning | error | critical | alert | emergency | all.
-type file() :: string().
-type cipher() :: map().
-behaviour(hocon_schema).
-reflect_type([ log_level/0,
file/0,
cipher/0]).
-export([structs/0, fields/1, translations/0, translation/1]).
-export([t/1, t/3, t/4, ref/1]).
-export([conf_get/2, conf_get/3, keys/2, filter/1]).
%% Static apps which merge their configs into the merged emqx.conf
%% The list can not be made a dynamic read at run-time as it is used
%% by nodetool to generate app.<time>.config before EMQ X is started
-define(MERGED_CONFIGS,
[ emqx_schema
, emqx_data_bridge_schema
, emqx_retainer_schema
, emqx_statsd_schema
, emqx_authn_schema
, emqx_authz_schema
, emqx_bridge_mqtt_schema
, emqx_modules_schema
, emqx_management_schema
, emqx_dashboard_schema
, emqx_gateway_schema
, emqx_prometheus_schema
]).
%% TODO: add a test case to ensure the list elements are unique
structs() ->
["cluster", "node", "rpc", "log"]
++ lists:flatmap(fun(Mod) -> Mod:structs() end, ?MERGED_CONFIGS).
fields("cluster") ->
[ {"name", t(atom(), "ekka.cluster_name", emqxcl)}
, {"discovery_strategy", t(union([manual, static, mcast, dns, etcd, k8s]),
undefined, manual)}
, {"autoclean", t(emqx_schema:duration(), "ekka.cluster_autoclean", "5m")}
, {"autoheal", t(boolean(), "ekka.cluster_autoheal", true)}
, {"static", ref("static")}
, {"mcast", ref("mcast")}
, {"proto_dist", t(union([inet_tcp, inet6_tcp, inet_tls]), "ekka.proto_dist", inet_tcp)}
, {"dns", ref("dns")}
, {"etcd", ref("etcd")}
, {"k8s", ref("k8s")}
, {"db_backend", t(union([mnesia, rlog]), "ekka.db_backend", mnesia)}
, {"rlog", ref("rlog")}
];
fields("static") ->
[ {"seeds", t(hoconsc:array(string()), undefined, [])}];
fields("mcast") ->
[ {"addr", t(string(), undefined, "239.192.0.1")}
, {"ports", t(hoconsc:array(integer()), undefined, [4369, 4370])}
, {"iface", t(string(), undefined, "0.0.0.0")}
, {"ttl", t(range(0, 255), undefined, 255)}
, {"loop", t(boolean(), undefined, true)}
, {"sndbuf", t(emqx_schema:bytesize(), undefined, "16KB")}
, {"recbuf", t(emqx_schema:bytesize(), undefined, "16KB")}
, {"buffer", t(emqx_schema:bytesize(), undefined, "32KB")}
];
fields("dns") ->
[ {"name", t(string(), undefined, "localhost")}
, {"app", t(string(), undefined, "emqx")}];
fields("etcd") ->
[ {"server", t(emqx_schema:comma_separated_list())}
, {"prefix", t(string(), undefined, "emqxcl")}
, {"node_ttl", t(emqx_schema:duration(), undefined, "1m")}
, {"ssl", ref("etcd_ssl")}
];
fields("etcd_ssl") ->
emqx_schema:ssl(#{});
fields("k8s") ->
[ {"apiserver", t(string())}
, {"service_name", t(string(), undefined, "emqx")}
, {"address_type", t(union([ip, dns, hostname]))}
, {"app_name", t(string(), undefined, "emqx")}
, {"namespace", t(string(), undefined, "default")}
, {"suffix", t(string(), undefined, "pod.local")}
];
fields("rlog") ->
[ {"role", t(union([core, replicant]), "ekka.node_role", core)}
, {"core_nodes", t(emqx_schema:comma_separated_atoms(), "ekka.core_nodes", [])}
];
fields("node") ->
[ {"name", hoconsc:t(string(), #{default => "emqx@127.0.0.1",
override_env => "EMQX_NODE_NAME"
})}
, {"cookie", hoconsc:t(string(), #{mapping => "vm_args.-setcookie",
default => "emqxsecretcookie",
sensitive => true,
override_env => "EMQX_NODE_COOKIE"
})}
, {"data_dir", hoconsc:t(string(), #{nullable => false})}
, {"config_files", t(list(string()), "emqx.config_files", undefined)}
, {"global_gc_interval", t(emqx_schema:duration(), undefined, "15m")}
, {"crash_dump_dir", t(file(), "vm_args.-env ERL_CRASH_DUMP", undefined)}
, {"dist_net_ticktime", t(emqx_schema:duration(), "vm_args.-kernel net_ticktime", "2m")}
, {"dist_listen_min", t(range(1024, 65535), "kernel.inet_dist_listen_min", 6369)}
, {"dist_listen_max", t(range(1024, 65535), "kernel.inet_dist_listen_max", 6369)}
, {"backtrace_depth", t(integer(), "emqx_machine.backtrace_depth", 23)}
];
fields("rpc") ->
[ {"mode", t(union(sync, async), undefined, async)}
, {"async_batch_size", t(integer(), "gen_rpc.max_batch_size", 256)}
, {"port_discovery",t(union(manual, stateless), "gen_rpc.port_discovery", stateless)}
, {"tcp_server_port", t(integer(), "gen_rpc.tcp_server_port", 5369)}
, {"tcp_client_num", t(range(1, 256), undefined, 1)}
, {"connect_timeout", t(emqx_schema:duration(), "gen_rpc.connect_timeout", "5s")}
, {"send_timeout", t(emqx_schema:duration(), "gen_rpc.send_timeout", "5s")}
, {"authentication_timeout", t(emqx_schema:duration(), "gen_rpc.authentication_timeout", "5s")}
, {"call_receive_timeout", t(emqx_schema:duration(), "gen_rpc.call_receive_timeout", "15s")}
, {"socket_keepalive_idle", t(emqx_schema:duration_s(), "gen_rpc.socket_keepalive_idle", "7200s")}
, {"socket_keepalive_interval", t(emqx_schema:duration_s(), "gen_rpc.socket_keepalive_interval", "75s")}
, {"socket_keepalive_count", t(integer(), "gen_rpc.socket_keepalive_count", 9)}
, {"socket_sndbuf", t(emqx_schema:bytesize(), "gen_rpc.socket_sndbuf", "1MB")}
, {"socket_recbuf", t(emqx_schema:bytesize(), "gen_rpc.socket_recbuf", "1MB")}
, {"socket_buffer", t(emqx_schema:bytesize(), "gen_rpc.socket_buffer", "1MB")}
];
fields("log") ->
[ {"primary_level", t(log_level(), undefined, warning)}
, {"console_handler", ref("console_handler")}
, {"file_handlers", ref("file_handlers")}
, {"time_offset", t(string(), undefined, "system")}
, {"chars_limit", maybe_infinity(range(1, inf))}
, {"supervisor_reports", t(union([error, progress]), undefined, error)}
, {"max_depth", t(union([infinity, integer()]),
"kernel.error_logger_format_depth", 80)}
, {"formatter", t(union([text, json]), undefined, text)}
, {"single_line", t(boolean(), undefined, true)}
, {"sync_mode_qlen", t(integer(), undefined, 100)}
, {"drop_mode_qlen", t(integer(), undefined, 3000)}
, {"flush_qlen", t(integer(), undefined, 8000)}
, {"overload_kill", ref("log_overload_kill")}
, {"burst_limit", ref("log_burst_limit")}
, {"error_logger", t(atom(), "kernel.error_logger", silent)}
];
fields("console_handler") ->
[ {"enable", t(boolean(), undefined, false)}
, {"level", t(log_level(), undefined, warning)}
];
fields("file_handlers") ->
[ {"$name", ref("log_file_handler")}
];
fields("log_file_handler") ->
[ {"level", t(log_level(), undefined, warning)}
, {"file", t(file(), undefined, undefined)}
, {"rotation", ref("log_rotation")}
, {"max_size", maybe_infinity(emqx_schema:bytesize(), "10MB")}
];
fields("log_rotation") ->
[ {"enable", t(boolean(), undefined, true)}
, {"count", t(range(1, 2048), undefined, 10)}
];
fields("log_overload_kill") ->
[ {"enable", t(boolean(), undefined, true)}
, {"mem_size", t(emqx_schema:bytesize(), undefined, "30MB")}
, {"qlen", t(integer(), undefined, 20000)}
, {"restart_after", t(union(emqx_schema:duration(), infinity), undefined, "5s")}
];
fields("log_burst_limit") ->
[ {"enable", t(boolean(), undefined, true)}
, {"max_count", t(integer(), undefined, 10000)}
, {"window_time", t(emqx_schema:duration(), undefined, "1s")}
];
fields(Name) ->
find_field(Name, ?MERGED_CONFIGS).
find_field(Name, []) ->
error({unknown_config_struct_field, Name});
find_field(Name, [SchemaModule | Rest]) ->
case lists:member(Name, SchemaModule:structs()) of
true -> SchemaModule:fields(Name);
false -> find_field(Name, Rest)
end.
translations() -> ["ekka", "kernel", "emqx"].
translation("ekka") ->
[ {"cluster_discovery", fun tr_cluster__discovery/1}];
translation("kernel") ->
[ {"logger_level", fun tr_logger_level/1}
, {"logger", fun tr_logger/1}];
translation("emqx") ->
[ {"config_files", fun tr_config_files/1}
, {"override_conf_file", fun tr_override_conf_fie/1}
].
tr_config_files(Conf) ->
case conf_get("emqx.config_files", Conf) of
[_ | _] = Files ->
Files;
_ ->
case os:getenv("RUNNER_ETC_DIR") of
false ->
[filename:join([code:lib_dir(emqx), "etc", "emqx.conf"])];
Dir ->
[filename:join([Dir, "emqx.conf"])]
end
end.
tr_override_conf_fie(Conf) ->
DataDir = conf_get("node.data_dir", Conf),
%% assert, this config is not nullable
[_ | _] = DataDir,
filename:join([DataDir, "emqx_override.conf"]).
tr_cluster__discovery(Conf) ->
Strategy = conf_get("cluster.discovery_strategy", Conf),
{Strategy, filter(options(Strategy, Conf))}.
tr_logger_level(Conf) -> conf_get("log.primary_level", Conf).
tr_logger(Conf) ->
CharsLimit = case conf_get("log.chars_limit", Conf) of
infinity -> unlimited;
V -> V
end,
SingleLine = conf_get("log.single_line", Conf),
FmtName = conf_get("log.formatter", Conf),
Formatter = formatter(FmtName, CharsLimit, SingleLine),
BasicConf = #{
sync_mode_qlen => conf_get("log.sync_mode_qlen", Conf),
drop_mode_qlen => conf_get("log.drop_mode_qlen", Conf),
flush_qlen => conf_get("log.flush_qlen", Conf),
overload_kill_enable => conf_get("log.overload_kill.enable", Conf),
overload_kill_qlen => conf_get("log.overload_kill.qlen", Conf),
overload_kill_mem_size => conf_get("log.overload_kill.mem_size", Conf),
overload_kill_restart_after => conf_get("log.overload_kill.restart_after", Conf),
burst_limit_enable => conf_get("log.burst_limit.enable", Conf),
burst_limit_max_count => conf_get("log.burst_limit.max_count", Conf),
burst_limit_window_time => conf_get("log.burst_limit.window_time", Conf)
},
Filters = case conf_get("log.supervisor_reports", Conf) of
error -> [{drop_progress_reports, {fun logger_filters:progress/2, stop}}];
progress -> []
end,
%% For the default logger that outputs to console
ConsoleHandler =
case conf_get("log.console_handler.enable", Conf) of
true ->
[{handler, console, logger_std_h, #{
level => conf_get("log.console_handler.level", Conf),
config => BasicConf#{type => standard_io},
formatter => Formatter,
filters => Filters
}}];
false -> []
end,
%% For the file logger
FileHandlers =
[{handler, binary_to_atom(HandlerName, latin1), logger_disk_log_h, #{
level => conf_get("level", SubConf),
config => BasicConf#{
type => case conf_get("rotation.enable", SubConf) of
true -> wrap;
_ -> halt
end,
file => conf_get("file", SubConf),
max_no_files => conf_get("rotation.count", SubConf),
max_no_bytes => conf_get("max_size", SubConf)
},
formatter => Formatter,
filters => Filters,
filesync_repeat_interval => no_repeat
}}
|| {HandlerName, SubConf} <- maps:to_list(conf_get("log.file_handlers", Conf, #{}))],
[{handler, default, undefined}] ++ ConsoleHandler ++ FileHandlers.
%% helpers
formatter(json, CharsLimit, SingleLine) ->
{emqx_logger_jsonfmt,
#{chars_limit => CharsLimit,
single_line => SingleLine
}};
formatter(text, CharsLimit, SingleLine) ->
{emqx_logger_textfmt,
#{template =>
[time," [",level,"] ",
{clientid,
[{peername,
[clientid,"@",peername," "],
[clientid, " "]}],
[{peername,
[peername," "],
[]}]},
msg,"\n"],
chars_limit => CharsLimit,
single_line => SingleLine
}}.
%% utils
-spec(conf_get(string() | [string()], hocon:config()) -> term()).
conf_get(Key, Conf) ->
V = hocon_schema:get_value(Key, Conf),
case is_binary(V) of
true ->
binary_to_list(V);
false ->
V
end.
conf_get(Key, Conf, Default) ->
V = hocon_schema:get_value(Key, Conf, Default),
case is_binary(V) of
true ->
binary_to_list(V);
false ->
V
end.
filter(Opts) ->
[{K, V} || {K, V} <- Opts, V =/= undefined].
%% @private return a list of keys in a parent field
-spec(keys(string(), hocon:config()) -> [string()]).
keys(Parent, Conf) ->
[binary_to_list(B) || B <- maps:keys(conf_get(Parent, Conf, #{}))].
%% types
t(Type) -> hoconsc:t(Type).
t(Type, Mapping, Default) ->
hoconsc:t(Type, #{mapping => Mapping, default => Default}).
t(Type, Mapping, Default, OverrideEnv) ->
hoconsc:t(Type, #{ mapping => Mapping
, default => Default
, override_env => OverrideEnv
}).
ref(Field) -> hoconsc:t(hoconsc:ref(Field)).
maybe_infinity(T) ->
maybe_sth(infinity, T, infinity).
maybe_infinity(T, Default) ->
maybe_sth(infinity, T, Default).
maybe_sth(What, Type, Default) ->
t(union([What, Type]), undefined, Default).
options(static, Conf) ->
[{seeds, [to_atom(S) || S <- conf_get("cluster.static.seeds", Conf, [])]}];
options(mcast, Conf) ->
{ok, Addr} = inet:parse_address(conf_get("cluster.mcast.addr", Conf)),
{ok, Iface} = inet:parse_address(conf_get("cluster.mcast.iface", Conf)),
Ports = conf_get("cluster.mcast.ports", Conf),
[{addr, Addr}, {ports, Ports}, {iface, Iface},
{ttl, conf_get("cluster.mcast.ttl", Conf, 1)},
{loop, conf_get("cluster.mcast.loop", Conf, true)}];
options(dns, Conf) ->
[{name, conf_get("cluster.dns.name", Conf)},
{app, conf_get("cluster.dns.app", Conf)}];
options(etcd, Conf) ->
Namespace = "cluster.etcd.ssl",
SslOpts = fun(C) ->
Options = keys(Namespace, C),
lists:map(fun(Key) -> {to_atom(Key), conf_get([Namespace, Key], Conf)} end, Options) end,
[{server, conf_get("cluster.etcd.server", Conf)},
{prefix, conf_get("cluster.etcd.prefix", Conf, "emqxcl")},
{node_ttl, conf_get("cluster.etcd.node_ttl", Conf, 60)},
{ssl_options, filter(SslOpts(Conf))}];
options(k8s, Conf) ->
[{apiserver, conf_get("cluster.k8s.apiserver", Conf)},
{service_name, conf_get("cluster.k8s.service_name", Conf)},
{address_type, conf_get("cluster.k8s.address_type", Conf, ip)},
{app_name, conf_get("cluster.k8s.app_name", Conf)},
{namespace, conf_get("cluster.k8s.namespace", Conf)},
{suffix, conf_get("cluster.k8s.suffix", Conf, "")}];
options(manual, _Conf) ->
[].
to_atom(Atom) when is_atom(Atom) ->
Atom;
to_atom(Str) when is_list(Str) ->
list_to_atom(Str);
to_atom(Bin) when is_binary(Bin) ->
binary_to_atom(Bin, utf8).

View File

@ -0,0 +1,63 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
%% This module implements a gen_event handler which
%% swap-in replaces the default one from OTP.
%% The kill signal (sigterm) is captured so we can
%% perform graceful shutdown.
-module(emqx_machine_signal_handler).
-export([start/0, init/1, format_status/2,
handle_event/2, handle_call/2, handle_info/2,
terminate/2, code_change/3]).
-include_lib("emqx/include/logger.hrl").
start() ->
ok = gen_event:swap_sup_handler(
erl_signal_server,
{erl_signal_handler, []},
{?MODULE, []}).
init({[], _}) -> {ok, #{}}.
handle_event(sigterm, State) ->
?ULOG("Received terminate signal, shutting down now~n", []),
emqx_machine_terminator:graceful(),
{ok, State};
handle_event(Event, State) ->
%% delegate other events back to erl_signal_handler
%% erl_signal_handler does not make use of the State
%% so we can pass whatever from here
_ = erl_signal_handler:handle_event(Event, State),
{ok, State}.
handle_info(stop, State) ->
{ok, State};
handle_info(_Other, State) ->
{ok, State}.
handle_call(_Request, State) ->
{ok, ok, State}.
format_status(_Opt, [_Pdict,_S]) ->
ok.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
terminate(_Args, _State) ->
ok.

View File

@ -0,0 +1,48 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
%% @doc This supervisor manages workers which should never need a restart
%% due to config changes or when joining a cluster.
-module(emqx_machine_sup).
-behaviour(supervisor).
-export([ start_link/0
]).
-export([init/1]).
start_link() ->
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
init([]) ->
GlobalGC = child_worker(emqx_global_gc, [], permanent),
Terminator = child_worker(emqx_machine_terminator, [], transient),
Children = [GlobalGC, Terminator],
SupFlags = #{strategy => one_for_one,
intensity => 100,
period => 10
},
{ok, {SupFlags, Children}}.
child_worker(M, Args, Restart) ->
#{id => M,
start => {M, start_link, Args},
restart => Restart,
shutdown => 5000,
type => worker,
modules => [M]
}.

View File

@ -0,0 +1,107 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_machine_terminator).
-behaviour(gen_server).
-export([ start_link/0
, graceful/0
, graceful_wait/0
, is_running/0
]).
-export([init/1, format_status/2,
handle_cast/2, handle_call/3, handle_info/2,
terminate/2, code_change/3]).
-include_lib("emqx/include/logger.hrl").
-define(TERMINATOR, ?MODULE).
-define(DO_IT, graceful_shutdown).
%% @doc This API is called to shutdown the Erlang VM by RPC call from remote shell node.
%% The shutown of apps is delegated to a to a process instead of doing it in the RPC spawned
%% process which has a remote group leader.
start_link() ->
{ok, _} = gen_server:start_link({local, ?TERMINATOR}, ?MODULE, [], []).
is_running() -> is_pid(whereis(?TERMINATOR)).
%% @doc Call `emqx_machine_terminator' to stop applications
%% then call init:stop() stop beam.
graceful() ->
try
_ = gen_server:call(?TERMINATOR, ?DO_IT, infinity)
catch
_ : _ ->
%% failed to notify terminator, probably due to not started yet
%% or node is going down, either case, the caller
%% should issue a shutdown to be sure
%% NOTE: not exit_loop here because we do not want to
%% block erl_signal_server
?ELOG("Shutdown before node is ready?~n", []),
init:stop()
end,
ok.
%% @doc Shutdown the Erlang VM and wait indefinitely.
graceful_wait() ->
ok = graceful(),
exit_loop().
exit_loop() ->
timer:sleep(100),
init:stop(),
exit_loop().
init(_) ->
ok = emqx_machine_signal_handler:start(),
{ok, #{}}.
handle_info(_, State) ->
{noreply, State}.
handle_cast(_Cast, State) ->
{noreply, State}.
handle_call(?DO_IT, _From, State) ->
try
emqx_machine:stop_apps(normal)
catch
C : E : St ->
Apps = [element(1, A) || A <- application:which_applications()],
?SLOG(error, #{msg => "failed_to_stop_apps",
exception => C,
reason => E,
stacktrace => St,
remaining_apps => Apps
})
after
init:stop()
end,
{reply, ok, State};
handle_call(_Call, _From, State) ->
{noreply, State}.
format_status(_Opt, [_Pdict,_S]) ->
ok.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
terminate(_Args, _State) ->
ok.

View File

@ -0,0 +1,41 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2020-2021 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_machine_SUITE).
-compile(export_all).
-compile(nowarn_export_all).
-include_lib("emqx/include/emqx.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
all() -> emqx_ct:all(?MODULE).
init_per_suite(Config) ->
emqx_ct_helpers:start_apps([]),
Config.
end_per_suite(_Config) ->
emqx_ct_helpers:stop_apps([]).
t_shutdown_reboot(_Config) ->
emqx_machine:stop_apps(normal),
false = emqx:is_running(node()),
emqx_machine:ensure_apps_started(),
true = emqx:is_running(node()),
ok = emqx_machine:stop_apps(for_test),
false = emqx:is_running(node()).

View File

@ -0,0 +1,60 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2021 EMQ Technologies Co., Ltd. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
%% You may obtain a copy of the License at
%%
%% http://www.apache.org/licenses/LICENSE-2.0
%%
%% Unless required by applicable law or agreed to in writing, software
%% distributed under the License is distributed on an "AS IS" BASIS,
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
%% See the License for the specific language governing permissions and
%% limitations under the License.
%%--------------------------------------------------------------------
-module(emqx_machine_tests).
-include_lib("eunit/include/eunit.hrl").
sorted_reboot_apps_test_() ->
Apps1 = [{1, [2, 3, 4]},
{2, [3, 4]}
],
Apps2 = [{1, [2, 3, 4]},
{2, [3, 4]},
{5, [4, 3, 2, 1, 1]}
],
[fun() -> check_order(Apps1) end,
fun() -> check_order(Apps2) end
].
sorted_reboot_apps_cycle_test() ->
Apps = [{1,[2]},{2, [1,3]}],
?assertError({circular_application_dependency, [[1, 2, 1], [2, 1, 2]]},
check_order(Apps)).
check_order(Apps) ->
AllApps = lists:usort(lists:append([[A | Deps] || {A, Deps} <- Apps])),
Sorted = emqx_machine:sorted_reboot_apps(Apps),
case length(AllApps) =:= length(Sorted) of
true -> ok;
false -> error({AllApps, Sorted})
end,
{_, SortedWithIndex} =
lists:foldr(fun(A, {I, Acc}) -> {I + 1, [{A, I} | Acc]} end, {1, []}, Sorted),
do_check_order(Apps, SortedWithIndex).
do_check_order([], _) -> ok;
do_check_order([{A, Deps} | Rest], Sorted) ->
case lists:filter(fun(Dep) -> is_sorted_before(Dep, A, Sorted) end, Deps) of
[] -> do_check_order(Rest, Sorted);
Bad -> throw({A, Bad})
end.
is_sorted_before(A, B, Sorted) ->
{A, IndexA} = lists:keyfind(A, 1, Sorted),
{B, IndexB} = lists:keyfind(B, 1, Sorted),
IndexA < IndexB.

View File

@ -3,7 +3,7 @@
{vsn, "5.0.0"}, % strict semver, bump manually!
{modules, []},
{registered, [emqx_management_sup]},
{applications, [kernel,stdlib,minirest]},
{applications, [kernel,stdlib,minirest,emqx]},
{mod, {emqx_mgmt_app,[]}},
{env, []},
{licenses, ["Apache-2.0"]},

View File

@ -149,7 +149,7 @@ node_info(Node) when Node =:= node() ->
max_fds => proplists:get_value(max_fds, lists:usort(lists:flatten(erlang:system_info(check_io)))),
connections => ets:info(emqx_channel, size),
node_status => 'Running',
uptime => iolist_to_binary(proplists:get_value(uptime, BrokerInfo)),
uptime => proplists:get_value(uptime, BrokerInfo),
version => iolist_to_binary(proplists:get_value(version, BrokerInfo))
};
node_info(Node) ->
@ -502,10 +502,10 @@ listener_id_filter(Identifier, Listeners) ->
-spec manage_listener(Operation :: start_listener|stop_listener|restart_listener, Param :: map()) ->
ok | {error, Reason :: term()}.
manage_listener(Operation, #{identifier := Identifier, node := Node}) when Node =:= node()->
erlang:apply(emqx_listeners, Operation, [Identifier]);
manage_listener(Operation, #{id := ID, node := Node}) when Node =:= node()->
erlang:apply(emqx_listeners, Operation, [ID]);
manage_listener(Operation, Param = #{node := Node}) ->
rpc_call(Node, restart_listener, [Operation, Param]).
rpc_call(Node, manage_listener, [Operation, Param]).
%%--------------------------------------------------------------------
%% Get Alarms

View File

@ -87,8 +87,8 @@ node_query(Node, Params, {Tab, QsSchema}, QueryFun) ->
{_, Rows} = do_query(Node, Qs, QueryFun, Start, Limit+1),
Meta = #{page => Page, limit => Limit},
NMeta = case CodCnt =:= 0 of
true -> Meta#{count => count(Tab), hasnext => length(Rows) > Limit};
_ -> Meta#{count => -1, hasnext => length(Rows) > Limit}
true -> Meta#{count => count(Tab)};
_ -> Meta#{count => length(Rows)}
end,
#{meta => NMeta, data => lists:sublist(Rows, Limit)}.
@ -120,8 +120,8 @@ cluster_query(Params, {Tab, QsSchema}, QueryFun) ->
Rows = do_cluster_query(Nodes, Qs, QueryFun, Start, Limit+1, []),
Meta = #{page => Page, limit => Limit},
NMeta = case CodCnt =:= 0 of
true -> Meta#{count => count(Tab, Nodes), hasnext => length(Rows) > Limit};
_ -> Meta#{count => -1, hasnext => length(Rows) > Limit}
true -> Meta#{count => count(Tab, Nodes)};
_ -> Meta#{count => length(Rows)}
end,
#{meta => NMeta, data => lists:sublist(Rows, Limit)}.

View File

@ -32,6 +32,7 @@
, subscriptions/2
, authz_cache/2
, subscribe/2
, unsubscribe/2
, subscribe_batch/2]).
-export([ query/3
@ -41,7 +42,7 @@
-export([do_subscribe/3]).
-define(CLIENT_QS_SCHEMA, {emqx_channel_info,
[ {<<"clientid">>, binary}
[ {<<"node">>, atom}
, {<<"username">>, binary}
, {<<"zone">>, atom}
, {<<"ip_address">>, ip}
@ -70,7 +71,8 @@ apis() ->
, client_api()
, clients_authz_cache_api()
, clients_subscriptions_api()
, subscribe_api()].
, subscribe_api()
, unsubscribe_api()].
schemas() ->
Client = #{
@ -211,22 +213,98 @@ schemas() ->
}
}
},
Subscription = #{
subscription => #{
type => object,
properties => #{
topic => #{
type => string},
qos => #{
type => integer,
enum => [0,1,2]}}}
},
[Client, AuthzCache, Subscription].
[Client, AuthzCache].
clients_api() ->
Metadata = #{
get => #{
description => <<"List clients">>,
parameters => [
#{
name => node,
in => query,
required => false,
schema => #{type => string}
},
#{
name => username,
in => query,
required => false,
schema => #{type => string}
},
#{
name => zone,
in => query,
required => false,
schema => #{type => string}
},
#{
name => ip_address,
in => query,
required => false,
schema => #{type => string}
},
#{
name => conn_state,
in => query,
required => false,
schema => #{type => string}
},
#{
name => clean_start,
in => query,
required => false,
schema => #{type => string}
},
#{
name => proto_name,
in => query,
required => false,
schema => #{type => string}
},
#{
name => proto_ver,
in => query,
required => false,
schema => #{type => string}
},
#{
name => like_clientid,
in => query,
required => false,
schema => #{type => string}
},
#{
name => like_username,
in => query,
required => false,
schema => #{type => string}
},
#{
name => gte_created_at,
in => query,
required => false,
schema => #{type => string}
},
#{
name => lte_created_at,
in => query,
required => false,
schema => #{type => string}
},
#{
name => gte_connected_at,
in => query,
required => false,
schema => #{type => string}
},
#{
name => lte_connected_at,
in => query,
required => false,
schema => #{type => string}
}
],
responses => #{
<<"200">> => emqx_mgmt_util:response_array_schema(<<"List clients 200 OK">>, client)}}},
{"/clients", Metadata, clients}.
@ -284,6 +362,15 @@ clients_authz_cache_api() ->
{"/clients/:clientid/authz_cache", Metadata, authz_cache}.
clients_subscriptions_api() ->
SubscriptionSchema = #{
type => object,
properties => #{
topic => #{
type => string},
qos => #{
type => integer,
enum => [0,1,2]}}
},
Metadata = #{
get => #{
description => <<"Get client subscriptions">>,
@ -294,10 +381,33 @@ clients_subscriptions_api() ->
required => true
}],
responses => #{
<<"200">> => emqx_mgmt_util:response_array_schema(<<"Get client subscriptions">>, subscription)}}
<<"200">> =>
emqx_mgmt_util:response_array_schema(<<"Get client subscriptions">>, SubscriptionSchema)}}
},
{"/clients/:clientid/subscriptions", Metadata, subscriptions}.
unsubscribe_api() ->
Metadata = #{
post => #{
description => <<"Unsubscribe">>,
parameters => [
#{
name => clientid,
in => path,
schema => #{type => string},
required => true
}
],
'requestBody' => emqx_mgmt_util:request_body_schema(#{
type => object,
properties => #{
topic => #{
type => string,
description => <<"Topic">>}}}),
responses => #{
<<"404">> => emqx_mgmt_util:response_error_schema(<<"Client id not found">>),
<<"200">> => emqx_mgmt_util:response_schema(<<"Unsubscribe ok">>)}}},
{"/clients/:clientid/unsubscribe", Metadata, unsubscribe}.
subscribe_api() ->
Metadata = #{
post => #{
@ -321,32 +431,14 @@ subscribe_api() ->
description => <<"QoS">>}}}),
responses => #{
<<"404">> => emqx_mgmt_util:response_error_schema(<<"Client id not found">>),
<<"200">> => emqx_mgmt_util:response_schema(<<"Subscribe ok">>)}},
delete => #{
description => <<"Unsubscribe">>,
parameters => [
#{
name => clientid,
in => path,
schema => #{type => string},
required => true
},
#{
name => topic,
in => query,
schema => #{type => string},
required => true
}
],
responses => #{
<<"404">> => emqx_mgmt_util:response_error_schema(<<"Client id not found">>),
<<"200">> => emqx_mgmt_util:response_schema(<<"Unsubscribe ok">>)}}},
<<"200">> => emqx_mgmt_util:response_schema(<<"Subscribe ok">>)}}},
{"/clients/:clientid/subscribe", Metadata, subscribe}.
%%%==============================================================================================
%% parameters trans
clients(get, _Request) ->
list(#{}).
clients(get, Request) ->
Params = cowboy_req:parse_qs(Request),
list(Params).
client(get, Request) ->
ClientID = cowboy_req:binding(clientid, Request),
@ -370,11 +462,13 @@ subscribe(post, Request) ->
TopicInfo = emqx_json:decode(Body, [return_maps]),
Topic = maps:get(<<"topic">>, TopicInfo),
Qos = maps:get(<<"qos">>, TopicInfo, 0),
subscribe(#{clientid => ClientID, topic => Topic, qos => Qos});
subscribe(#{clientid => ClientID, topic => Topic, qos => Qos}).
subscribe(delete, Request) ->
unsubscribe(post, Request) ->
ClientID = cowboy_req:binding(clientid, Request),
#{topic := Topic} = cowboy_req:match_qs([topic], Request),
{ok, Body, _} = cowboy_req:read_body(Request),
TopicInfo = emqx_json:decode(Body, [return_maps]),
Topic = maps:get(<<"topic">>, TopicInfo),
unsubscribe(#{clientid => ClientID, topic => Topic}).
%% TODO: batch
@ -402,7 +496,7 @@ subscriptions(get, Request) ->
%% api apply
list(Params) ->
Response = emqx_mgmt_api:cluster_query(maps:to_list(Params), ?CLIENT_QS_SCHEMA, ?query_fun),
Response = emqx_mgmt_api:cluster_query(Params, ?CLIENT_QS_SCHEMA, ?query_fun),
{200, Response}.
lookup(#{clientid := ClientID}) ->
@ -495,6 +589,7 @@ format_channel_info({_, ClientInfo, ClientStats}) ->
, conn_props
, peercert
, sockstate
, subscriptions
, receive_maximum
, protocol
, is_superuser

View File

@ -35,7 +35,7 @@ api_spec() ->
{
[
listeners_api(),
restart_listeners_api(),
listener_api(),
nodes_listeners_api(),
nodes_listener_api(),
manage_listeners_api(),
@ -53,21 +53,21 @@ listener_schema() ->
type => string,
description => <<"Node">>,
example => node()},
identifier => #{
id => #{
type => string,
description => <<"Identifier">>},
acceptors => #{
type => integer,
description => <<"Number of Acceptor proce">>},
description => <<"Number of Acceptor process">>},
max_conn => #{
type => integer,
description => <<"Maximum number of allowed connection">>},
type => #{
type => string,
description => <<"Plugin decription">>},
description => <<"Listener type">>},
listen_on => #{
type => string,
description => <<"Litening port">>},
description => <<"Listening port">>},
running => #{
type => boolean,
description => <<"Open or close">>},
@ -84,24 +84,24 @@ listeners_api() ->
emqx_mgmt_util:response_array_schema(<<"List all listeners">>, listener)}}},
{"/listeners", Metadata, listeners}.
restart_listeners_api() ->
listener_api() ->
Metadata = #{
get => #{
description => <<"List listeners by listener ID">>,
parameters => [param_path_identifier()],
parameters => [param_path_id()],
responses => #{
<<"404">> =>
emqx_mgmt_util:response_error_schema(<<"Listener id not found">>, ['BAD_LISTENER_ID']),
<<"200">> =>
emqx_mgmt_util:response_array_schema(<<"List listener info ok">>, listener)}}},
{"/listeners/:identifier", Metadata, listener}.
{"/listeners/:id", Metadata, listener}.
manage_listeners_api() ->
Metadata = #{
get => #{
description => <<"Restart listeners in cluster">>,
parameters => [
param_path_identifier(),
param_path_id(),
param_path_operation()],
responses => #{
<<"500">> =>
@ -114,15 +114,15 @@ manage_listeners_api() ->
['BAD_REQUEST']),
<<"200">> =>
emqx_mgmt_util:response_schema(<<"Operation success">>)}}},
{"/listeners/:identifier/:operation", Metadata, manage_listeners}.
{"/listeners/:id/:operation", Metadata, manage_listeners}.
manage_nodes_listeners_api() ->
Metadata = #{
get => #{
put => #{
description => <<"Restart listeners in cluster">>,
parameters => [
param_path_node(),
param_path_identifier(),
param_path_id(),
param_path_operation()],
responses => #{
<<"500">> =>
@ -135,20 +135,20 @@ manage_nodes_listeners_api() ->
['BAD_REQUEST']),
<<"200">> =>
emqx_mgmt_util:response_schema(<<"Operation success">>)}}},
{"/node/:node/listeners/:identifier/:operation", Metadata, manage_nodes_listeners}.
{"/node/:node/listeners/:id/:operation", Metadata, manage_nodes_listeners}.
nodes_listeners_api() ->
Metadata = #{
get => #{
description => <<"Get listener info in one node">>,
parameters => [param_path_node(), param_path_identifier()],
parameters => [param_path_node(), param_path_id()],
responses => #{
<<"404">> =>
emqx_mgmt_util:response_error_schema(<<"Node name or listener id not found">>,
['BAD_NODE_NAME', 'BAD_LISTENER_ID']),
<<"200">> =>
emqx_mgmt_util:response_schema(<<"Get listener info ok">>, listener)}}},
{"/nodes/:node/listeners/:identifier", Metadata, node_listener}.
{"/nodes/:node/listeners/:id", Metadata, node_listener}.
nodes_listener_api() ->
Metadata = #{
@ -172,10 +172,10 @@ param_path_node() ->
example => node()
}.
param_path_identifier() ->
param_path_id() ->
{Example,_} = hd(emqx_mgmt:list_listeners(node())),
#{
name => identifier,
name => id,
in => path,
schema => #{type => string},
required => true,
@ -199,8 +199,8 @@ listeners(get, _Request) ->
list().
listener(get, Request) ->
ListenerID = binary_to_atom(cowboy_req:binding(identifier, Request)),
get_listeners(#{identifier => ListenerID}).
ID = binary_to_atom(cowboy_req:binding(id, Request)),
get_listeners(#{id => ID}).
node_listeners(get, Request) ->
Node = binary_to_atom(cowboy_req:binding(node, Request)),
@ -208,19 +208,19 @@ node_listeners(get, Request) ->
node_listener(get, Request) ->
Node = binary_to_atom(cowboy_req:binding(node, Request)),
ListenerID = binary_to_atom(cowboy_req:binding(identifier, Request)),
get_listeners(#{node => Node, identifier => ListenerID}).
ID = binary_to_atom(cowboy_req:binding(id, Request)),
get_listeners(#{node => Node, id => ID}).
manage_listeners(_, Request) ->
Identifier = binary_to_atom(cowboy_req:binding(identifier, Request)),
ID = binary_to_atom(cowboy_req:binding(id, Request)),
Operation = binary_to_atom(cowboy_req:binding(operation, Request)),
manage(Operation, #{identifier => Identifier}).
manage(Operation, #{id => ID}).
manage_nodes_listeners(_, Request) ->
Node = binary_to_atom(cowboy_req:binding(node, Request)),
Identifier = binary_to_atom(cowboy_req:binding(identifier, Request)),
ID = binary_to_atom(cowboy_req:binding(id, Request)),
Operation = binary_to_atom(cowboy_req:binding(operation, Request)),
manage(Operation, #{identifier => Identifier, node => Node}).
manage(Operation, #{id => ID, node => Node}).
%%%==============================================================================================
@ -231,8 +231,8 @@ list() ->
get_listeners(Param) ->
case list_listener(Param) of
{error, not_found} ->
Identifier = maps:get(identifier, Param),
Reason = list_to_binary(io_lib:format("Error listener identifier ~p", [Identifier])),
ID = maps:get(id, Param),
Reason = list_to_binary(io_lib:format("Error listener id ~p", [ID])),
{404, #{code => 'BAD_LISTENER_ID', message => Reason}};
{error, nodedown} ->
Node = maps:get(node, Param),
@ -240,8 +240,8 @@ get_listeners(Param) ->
Response = #{code => 'BAD_NODE_NAME', message => Reason},
{404, Response};
[] ->
Identifier = maps:get(identifier, Param),
Reason = list_to_binary(io_lib:format("Error listener identifier ~p", [Identifier])),
ID = maps:get(id, Param),
Reason = list_to_binary(io_lib:format("Error listener id ~p", [ID])),
{404, #{code => 'BAD_LISTENER_ID', message => Reason}};
Data ->
{200, Data}
@ -252,8 +252,8 @@ manage(Operation0, Param) ->
Operation = maps:get(Operation0, OperationMap),
case list_listener(Param) of
{error, not_found} ->
Identifier = maps:get(identifier, Param),
Reason = list_to_binary(io_lib:format("Error listener identifier ~p", [Identifier])),
ID = maps:get(id, Param),
Reason = list_to_binary(io_lib:format("Error listener id ~p", [ID])),
{404, #{code => 'BAD_LISTENER_ID', message => Reason}};
{error, nodedown} ->
Node = maps:get(node, Param),
@ -261,8 +261,8 @@ manage(Operation0, Param) ->
Response = #{code => 'BAD_NODE_NAME', message => Reason},
{404, Response};
[] ->
Identifier = maps:get(identifier, Param),
Reason = list_to_binary(io_lib:format("Error listener identifier ~p", [Identifier])),
ID = maps:get(id, Param),
Reason = list_to_binary(io_lib:format("Error listener id ~p", [ID])),
{404, #{code => 'RESOURCE_NOT_FOUND', message => Reason}};
ListenersOrSingleListener ->
manage_(Operation, ListenersOrSingleListener)
@ -278,14 +278,14 @@ manage_(Operation, Listeners) when is_list(Listeners) ->
Errors ->
case lists:filter(fun({error, {already_started, _}}) -> false; (_) -> true end, Results) of
[] ->
Identifier = maps:get(identifier, hd(Listeners)),
Message = list_to_binary(io_lib:format("Already Started: ~s", [Identifier])),
ID = maps:get(id, hd(Listeners)),
Message = list_to_binary(io_lib:format("Already Started: ~s", [ID])),
{400, #{code => 'BAD_REQUEST', message => Message}};
_ ->
case lists:filter(fun({error,not_found}) -> false; (_) -> true end, Results) of
[] ->
Identifier = maps:get(identifier, hd(Listeners)),
Message = list_to_binary(io_lib:format("Already Stoped: ~s", [Identifier])),
ID = maps:get(id, hd(Listeners)),
Message = list_to_binary(io_lib:format("Already Stopped: ~s", [ID])),
{400, #{code => 'BAD_REQUEST', message => Message}};
_ ->
Reason = list_to_binary(io_lib:format("~p", [Errors])),
@ -299,9 +299,9 @@ manage_(Operation, Listeners) when is_list(Listeners) ->
list_listener(Params) ->
format(list_listener_(Params)).
list_listener_(#{node := Node, identifier := Identifier}) ->
list_listener_(#{node := Node, id := Identifier}) ->
emqx_mgmt:get_listener(Node, Identifier);
list_listener_(#{identifier := Identifier}) ->
list_listener_(#{id := Identifier}) ->
emqx_mgmt:list_listeners_by_id(Identifier);
list_listener_(#{node := Node}) ->
emqx_mgmt:list_listeners(Node);
@ -314,9 +314,9 @@ format(Listeners) when is_list(Listeners) ->
format({error, Reason}) ->
{error, Reason};
format({Identifier, Conf}) ->
format({ID, Conf}) ->
#{
identifier => Identifier,
id => ID,
node => maps:get(node, Conf),
acceptors => maps:get(acceptors, Conf),
max_conn => maps:get(max_connections, Conf),

View File

@ -82,8 +82,8 @@ node_schema() ->
type => integer,
description => <<"Number of used processes">>},
uptime => #{
type => string,
description => <<"EMQ X Broker runtime">>},
type => integer,
description => <<"EMQ X Broker runtime, millisecond">>},
version => #{
type => string,
description => <<"EMQ X Broker version">>},

View File

@ -26,36 +26,32 @@
api_spec() ->
{
[publish_api(), publish_batch_api()],
[publish_api(), publish_bulk_api()],
[message_schema()]
}.
publish_api() ->
Schema = #{
type => object,
properties => maps:without([id], message_properties())
},
MeteData = #{
post => #{
description => <<"Publish">>,
'requestBody' => #{
content => #{
'application/json' => #{
schema => #{
type => object,
properties => maps:with([id], message_properties())}}}},
'requestBody' => emqx_mgmt_util:request_body_schema(Schema),
responses => #{
<<"200">> => emqx_mgmt_util:response_schema(<<"publish ok">>, message)}}},
{"/publish", MeteData, publish}.
publish_batch_api() ->
publish_bulk_api() ->
Schema = #{
type => object,
properties => maps:without([id], message_properties())
},
MeteData = #{
post => #{
description => <<"publish">>,
'requestBody' => #{
content => #{
'application/json' => #{
schema => #{
type => array,
items => #{
type => object,
properties => maps:with([id], message_properties())}}}}},
'requestBody' => emqx_mgmt_util:request_body_array_schema(Schema),
responses => #{
<<"200">> => emqx_mgmt_util:response_array_schema(<<"publish ok">>, message)}}},
{"/publish/bulk", MeteData, publish_batch}.

View File

@ -101,8 +101,8 @@ t_clients(_) ->
?assertEqual(AfterSubTopic, Topic),
?assertEqual(AfterSubQos, Qos),
%% delete /clients/:clientid/subscribe
UnSubscribeQuery = "topic=" ++ binary_to_list(Topic),
{ok, _} = emqx_mgmt_api_test_util:request_api(delete, SubscribePath, UnSubscribeQuery, AuthHeader),
%% post /clients/:clientid/unsubscribe
UnSubscribePath = emqx_mgmt_api_test_util:api_path(["clients", binary_to_list(ClientId1), "unsubscribe"]),
{ok, _} = emqx_mgmt_api_test_util:request_api(post, UnSubscribePath, "", AuthHeader, SubscribeBody),
timer:sleep(100),
?assertEqual([], emqx_mgmt:lookup_subscriptions(Client1)).

View File

@ -49,36 +49,41 @@ t_list_node_listeners(_) ->
t_get_listeners(_) ->
LocalListener = emqx_mgmt_api_listeners:format(hd(emqx_mgmt:list_listeners())),
Identifier = maps:get(identifier, LocalListener),
Path = emqx_mgmt_api_test_util:api_path(["listeners", atom_to_list(Identifier)]),
ID = maps:get(id, LocalListener),
Path = emqx_mgmt_api_test_util:api_path(["listeners", atom_to_list(ID)]),
get_api(Path).
t_get_node_listeners(_) ->
LocalListener = emqx_mgmt_api_listeners:format(hd(emqx_mgmt:list_listeners())),
Identifier = maps:get(identifier, LocalListener),
ID = maps:get(id, LocalListener),
Path = emqx_mgmt_api_test_util:api_path(
["nodes", atom_to_binary(node(), utf8), "listeners", atom_to_list(Identifier)]),
["nodes", atom_to_binary(node(), utf8), "listeners", atom_to_list(ID)]),
get_api(Path).
t_stop_listener(_) ->
LocalListener = emqx_mgmt_api_listeners:format(hd(emqx_mgmt:list_listeners())),
Identifier = maps:get(identifier, LocalListener),
Path = emqx_mgmt_api_test_util:api_path(["listeners", atom_to_list(Identifier), "stop"]),
t_manage_listener(_) ->
ID = "default:mqtt_tcp",
manage_listener(ID, "stop", false),
manage_listener(ID, "start", true),
manage_listener(ID, "restart", true).
manage_listener(ID, Operation, Running) ->
Path = emqx_mgmt_api_test_util:api_path(["listeners", ID, Operation]),
{ok, _} = emqx_mgmt_api_test_util:request_api(get, Path),
GetPath = emqx_mgmt_api_test_util:api_path(["listeners", atom_to_list(Identifier)]),
timer:sleep(500),
GetPath = emqx_mgmt_api_test_util:api_path(["listeners", ID]),
{ok, ListenersResponse} = emqx_mgmt_api_test_util:request_api(get, GetPath),
Listeners = emqx_json:decode(ListenersResponse, [return_maps]),
[listener_stats(Listener, false) || Listener <- Listeners].
[listener_stats(Listener, Running) || Listener <- Listeners].
get_api(Path) ->
{ok, ListenersData} = emqx_mgmt_api_test_util:request_api(get, Path),
LocalListeners = emqx_mgmt_api_listeners:format(emqx_mgmt:list_listeners()),
case emqx_json:decode(ListenersData, [return_maps]) of
[Listener] ->
Identifier = binary_to_atom(maps:get(<<"identifier">>, Listener), utf8),
ID = binary_to_atom(maps:get(<<"id">>, Listener), utf8),
Filter =
fun(Local) ->
maps:get(identifier, Local) =:= Identifier
maps:get(id, Local) =:= ID
end,
LocalListener = hd(lists:filter(Filter, LocalListeners)),
comparison_listener(LocalListener, Listener);
@ -86,28 +91,28 @@ get_api(Path) ->
?assertEqual(erlang:length(LocalListeners), erlang:length(Listeners)),
Fun =
fun(LocalListener) ->
Identifier = maps:get(identifier, LocalListener),
IdentifierBinary = atom_to_binary(Identifier, utf8),
ID = maps:get(id, LocalListener),
IDBinary = atom_to_binary(ID, utf8),
Filter =
fun(Listener) ->
maps:get(<<"identifier">>, Listener) =:= IdentifierBinary
maps:get(<<"id">>, Listener) =:= IDBinary
end,
Listener = hd(lists:filter(Filter, Listeners)),
comparison_listener(LocalListener, Listener)
end,
lists:foreach(Fun, LocalListeners);
Listener when is_map(Listener) ->
Identifier = binary_to_atom(maps:get(<<"identifier">>, Listener), utf8),
ID = binary_to_atom(maps:get(<<"id">>, Listener), utf8),
Filter =
fun(Local) ->
maps:get(identifier, Local) =:= Identifier
maps:get(id, Local) =:= ID
end,
LocalListener = hd(lists:filter(Filter, LocalListeners)),
comparison_listener(LocalListener, Listener)
end.
comparison_listener(Local, Response) ->
?assertEqual(maps:get(identifier, Local), binary_to_atom(maps:get(<<"identifier">>, Response))),
?assertEqual(maps:get(id, Local), binary_to_atom(maps:get(<<"id">>, Response))),
?assertEqual(maps:get(node, Local), binary_to_atom(maps:get(<<"node">>, Response))),
?assertEqual(maps:get(acceptors, Local), maps:get(<<"acceptors">>, Response)),
?assertEqual(maps:get(max_conn, Local), maps:get(<<"max_conn">>, Response)),

View File

@ -2,7 +2,7 @@
[{description, "EMQ X Modules"},
{vsn, "5.0.0"},
{modules, []},
{applications, [kernel,stdlib]},
{applications, [kernel,stdlib,emqx]},
{mod, {emqx_modules_app, []}},
{registered, [emqx_modules_sup]},
{env, []}

View File

@ -3,7 +3,7 @@
{vsn, "5.0.0"}, % strict semver, bump manually!
{modules, []},
{registered, [emqx_prometheus_sup]},
{applications, [kernel,stdlib,prometheus]},
{applications, [kernel,stdlib,prometheus,emqx]},
{mod, {emqx_prometheus_app,[]}},
{env, []},
{licenses, ["Apache-2.0"]},

View File

@ -8,7 +8,8 @@
stdlib,
gproc,
hocon,
jsx
jsx,
emqx
]},
{env,[]},
{modules, []},

View File

@ -3,7 +3,7 @@
{vsn, "5.0.0"}, % strict semver, bump manually!
{modules, []},
{registered, [emqx_retainer_sup]},
{applications, [kernel,stdlib]},
{applications, [kernel,stdlib,emqx]},
{mod, {emqx_retainer_app,[]}},
{env, []},
{licenses, ["Apache-2.0"]},

View File

@ -3,7 +3,7 @@
{vsn, "5.0.0"},
{registered, []},
{applications,
[kernel,stdlib]},
[kernel,stdlib,emqx]},
{env,[]},
{modules, []},
{licenses, ["Apache 2.0"]},

View File

@ -6,7 +6,8 @@
{applications,
[kernel,
stdlib,
estatsd
estatsd,
emqx
]},
{env,[]},
{modules, []},

Some files were not shown because too many files have changed in this diff Show More