Merge branch 'emqx:master' into feat_round_robin_per_node

This commit is contained in:
Benjamin Krenn 2022-08-04 11:02:51 +02:00 committed by GitHub
commit 5fa2a2aa44
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
196 changed files with 3957 additions and 1653 deletions

View File

@ -26,6 +26,8 @@ up:
-f .ci/docker-compose-file/docker-compose-pgsql-tls.yaml \
-f .ci/docker-compose-file/docker-compose-redis-single-tcp.yaml \
-f .ci/docker-compose-file/docker-compose-redis-single-tls.yaml \
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tcp.yaml \
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tls.yaml \
up -d --build
down:
@ -39,6 +41,8 @@ down:
-f .ci/docker-compose-file/docker-compose-pgsql-tls.yaml \
-f .ci/docker-compose-file/docker-compose-redis-single-tcp.yaml \
-f .ci/docker-compose-file/docker-compose-redis-single-tls.yaml \
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tcp.yaml \
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tls.yaml \
down
ct:

View File

@ -1,8 +1,8 @@
version: '3.9'
services:
redis_server:
container_name: redis
redis_sentinel_server:
container_name: redis-sentinel
image: redis:${REDIS_TAG}
volumes:
- ./redis/:/data/conf

View File

@ -1,8 +1,8 @@
version: '3.9'
services:
redis_server:
container_name: redis
redis_sentinel_server_tls:
container_name: redis-sentinel-tls
image: redis:${REDIS_TAG}
volumes:
- ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.crt

View File

@ -30,24 +30,12 @@ defaults
##----------------------------------------------------------------
## API
##----------------------------------------------------------------
frontend emqx_mgmt
mode tcp
option tcplog
bind *:18083
default_backend emqx_mgmt_back
frontend emqx_dashboard
mode tcp
option tcplog
bind *:18083
default_backend emqx_dashboard_back
backend emqx_mgmt_back
mode http
# balance static-rr
server emqx-1 node1.emqx.io:18083
server emqx-2 node2.emqx.io:18083
backend emqx_dashboard_back
mode http
# balance static-rr

View File

@ -16,11 +16,15 @@ case $key in
shift # past argument
shift # past value
;;
-t|--tls-enabled)
-t)
tls="$2"
shift # past argument
shift # past value
;;
--tls-enabled)
tls=1
shift # past argument
;;
*)
shift # past argument
;;

View File

@ -27,4 +27,3 @@ ok
+ POST `/counter`
计数器加一

View File

@ -3,7 +3,7 @@
{erl_opts, [debug_info]}.
{deps,
[
{minirest, {git, "https://github.com/emqx/minirest.git", {tag, "0.3.6"}}}
{minirest, {git, "https://github.com/emqx/minirest.git", {tag, "1.3.6"}}}
]}.
{shell, [

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, http_server,
[{description, "An OTP application"},
{vsn, "0.1.0"},
[{description, "An HTTP server application"},
{vsn, "0.2.0"},
{registered, []},
% {mod, {http_server_app, []}},
{modules, []},

View File

@ -10,51 +10,107 @@
stop/0
]).
-rest_api(#{
name => get_counter,
method => 'GET',
path => "/counter",
func => get_counter,
descr => "Check counter"
}).
-rest_api(#{
name => add_counter,
method => 'POST',
path => "/counter",
func => add_counter,
descr => "Counter plus one"
}).
-behavior(minirest_api).
-export([
get_counter/2,
add_counter/2
]).
-export([api_spec/0]).
-export([counter/2]).
api_spec() ->
{
[counter_api()],
[]
}.
counter_api() ->
MetaData = #{
get => #{
description => "Get counter",
summary => "Get counter",
responses => #{
200 => #{
content => #{
'application/json' =>
#{
type => object,
properties => #{
code => #{type => integer, example => 0},
data => #{type => integer, example => 0}
}
}
}
}
}
},
post => #{
description => "Add counter",
summary => "Add counter",
'requestBody' => #{
content => #{
'application/json' => #{
schema =>
#{
type => object,
properties => #{
payload => #{type => string, example => <<"sample payload">>},
id => #{type => integer, example => 0}
}
}
}
}
},
responses => #{
200 => #{
content => #{
'application/json' =>
#{
type => object,
properties => #{
code => #{type => integer, example => 0}
}
}
}
}
}
}
},
{"/counter", MetaData, counter}.
counter(get, _Params) ->
V = ets:info(relup_test_message, size),
{200, #{<<"content-type">> => <<"text/plain">>}, #{<<"code">> => 0, <<"data">> => V}};
counter(post, #{body := Params}) ->
case Params of
#{<<"payload">> := _, <<"id">> := Id} ->
ets:insert(relup_test_message, {Id, maps:remove(<<"id">>, Params)}),
{200, #{<<"code">> => 0}};
_ ->
io:format("discarded: ~p\n", [Params]),
{200, #{<<"code">> => -1}}
end.
start() ->
application:ensure_all_started(minirest),
_ = spawn(fun ets_owner/0),
Handlers = [{"/", minirest:handler(#{modules => [?MODULE]})}],
Dispatch = [{"/[...]", minirest, Handlers}],
minirest:start_http(?MODULE, #{socket_opts => [inet, {port, 7077}]}, Dispatch).
RanchOptions = #{
max_connections => 512,
num_acceptors => 4,
socket_opts => [{send_timeout, 5000}, {port, 7077}, {backlog, 512}]
},
Minirest = #{
base_path => "",
modules => [?MODULE],
dispatch => [{"/[...]", ?MODULE, []}],
protocol => http,
ranch_options => RanchOptions,
middlewares => [cowboy_router, cowboy_handler]
},
Res = minirest:start(?MODULE, Minirest),
minirest:update_dispatch(?MODULE),
Res.
stop() ->
ets:delete(relup_test_message),
minirest:stop_http(?MODULE).
get_counter(_Binding, _Params) ->
V = ets:info(relup_test_message, size),
return({ok, V}).
add_counter(_Binding, Params) ->
case lists:keymember(<<"payload">>, 1, Params) of
true ->
{value, {<<"id">>, ID}, Params1} = lists:keytake(<<"id">>, 1, Params),
ets:insert(relup_test_message, {ID, Params1});
_ ->
io:format("discarded: ~p\n", [Params]),
ok
end,
return().
minirest:stop(?MODULE).
ets_owner() ->
ets:new(relup_test_message, [named_table, public]),

View File

@ -143,7 +143,6 @@ jobs:
- 24.2.1-1
os:
- macos-11
- macos-10.15
runs-on: ${{ matrix.os }}
steps:
- uses: actions/download-artifact@v2

View File

@ -76,7 +76,7 @@ jobs:
- uses: actions/upload-artifact@v2
with:
name: ${{ matrix.profile}}-${{ matrix.otp }}-${{ matrix.os }}
path: _packages/${{ matrix.profile}}/*.tar.gz
path: _packages/${{ matrix.profile}}/*
- uses: actions/upload-artifact@v2
with:
name: "${{ matrix.profile }}_schema_dump"
@ -120,7 +120,7 @@ jobs:
- uses: actions/upload-artifact@v2
with:
name: windows
path: _packages/${{ matrix.profile}}/*.tar.gz
path: _packages/${{ matrix.profile}}/*
mac:
strategy:
@ -133,7 +133,6 @@ jobs:
- 24.2.1-1
macos:
- macos-11
- macos-10.15
runs-on: ${{ matrix.macos }}
@ -196,7 +195,7 @@ jobs:
- uses: actions/upload-artifact@v2
with:
name: macos
path: _packages/**/*.tar.gz
path: _packages/**/*
spellcheck:
needs: linux

View File

@ -157,6 +157,10 @@ jobs:
if: matrix.discovery == 'k8s'
run: |
helm install emqx \
--set emqxConfig.EMQX_CLUSTER__DISCOVERY_STRATEGY="k8s" \
--set emqxConfig.EMQX_CLUSTER__K8S__APISERVER="https://kubernetes.default.svc:443" \
--set emqxConfig.EMQX_CLUSTER__K8S__SERVICE_NAME="emqx-headless" \
--set emqxConfig.EMQX_CLUSTER__K8S__NAMESPACE="default" \
--set image.repository=$TARGET \
--set image.pullPolicy=Never \
--set emqxAclConfig="" \
@ -173,8 +177,8 @@ jobs:
run: |
helm install emqx \
--set emqxConfig.EMQX_CLUSTER__DISCOVERY_STRATEGY="dns" \
--set emqxConfig.EMQX_CLUSTER__DNS__NAME="emqx-headless.default.svc.cluster.local" \
--set emqxConfig.EMQX_CLUSTER__DNS__RECORD_TYPE="srv" \
--set emqxConfig.EMQX_CLUSTER__DNS__NAME="emqx-headless.default.svc.cluster.local" \
--set image.repository=$TARGET \
--set image.pullPolicy=Never \
--set emqxAclConfig="" \

View File

@ -118,6 +118,8 @@ jobs:
-f .ci/docker-compose-file/docker-compose-pgsql-tls.yaml \
-f .ci/docker-compose-file/docker-compose-redis-single-tcp.yaml \
-f .ci/docker-compose-file/docker-compose-redis-single-tls.yaml \
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tcp.yaml \
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tls.yaml \
-f .ci/docker-compose-file/docker-compose.yaml \
up -d --build

View File

@ -1,3 +1,15 @@
# 5.0.5
## Bug fixes
* Allow changing the license type from key to file (and vice-versa). [#8598](https://github.com/emqx/emqx/pull/8598)
## Enhancements
* The license is now copied to all nodes in the cluster when it's reloaded. [#8598](https://github.com/emqx/emqx/pull/8598)
* Added a HTTP API to manage licenses. [#8610](https://github.com/emqx/emqx/pull/8610)
* Updated `/nodes` API node_status from `Running/Stopped` to `running/stopped`. [#8642](https://github.com/emqx/emqx/pull/8642)
# 5.0.4
## Bug fixes
@ -7,37 +19,63 @@
Prior to this change, the webhook only checks the connectivity of the TCP port using `gen_tcp:connect/2`, so
if it's a HTTPs server, we didn't check if TLS handshake was successful.
[commits/6b45d2ea](https://github.com/emqx/emqx/commit/6b45d2ea9fde6d3b4a5b007f7a8c5a1c573d141e)
* The `create_at` field of rules is missing after emqx restarts. [commits/5fc09e6b](https://github.com/emqx/emqx/commit/5fc09e6b950c340243d7be627a0ce1700691221c)
* The `created_at` field of rules is missing after emqx restarts. [commits/5fc09e6b](https://github.com/emqx/emqx/commit/5fc09e6b950c340243d7be627a0ce1700691221c)
* The rule engine's jq function now works even when the path to the EMQX install dir contains spaces [jq#35](https://github.com/emqx/jq/pull/35) [#8455](https://github.com/emqx/emqx/pull/8455)
* Avoid applying any ACL checks on superusers [#8452](https://github.com/emqx/emqx/pull/8452)
* Fix statistics related system topic name error
* Fix AuthN JWKS SSL schema. Using schema in `emqx_schema`. [#8458](https://github.com/emqx/emqx/pull/8458)
* `sentinel` field should be required when AuthN/AuthZ Redis using sentinel mode. [#8458](https://github.com/emqx/emqx/pull/8458)
* Fix bad swagger format. [#8517](https://github.com/emqx/emqx/pull/8517)
* Fix `chars_limit` is not working when `formatter` is `json`. [#8518](http://github.com/emqx/emqx/pull/8518)
* Ensuring that exhook dispatches the client events are sequential. [#8530](https://github.com/emqx/emqx/pull/8530)
* Avoid using RocksDB backend for persistent sessions when such backend is unavailable. [#8528](https://github.com/emqx/emqx/pull/8528)
* Fix AuthN `cert_subject` and `cert_common_name` placeholder rendering failure. [#8531](https://github.com/emqx/emqx/pull/8531)
* Support listen on an IPv6 address, e.g: [::1]:1883 or ::1:1883. [#8547](https://github.com/emqx/emqx/pull/8547)
* GET '/rules' support for pagination and fuzzy search. [#8472](https://github.com/emqx/emqx/pull/8472)
**‼️ Note** : The previous API only returns array: `[RuleObj1,RuleObj2]`, after updating, it will become
`{"data": [RuleObj1,RuleObj2], "meta":{"count":2, "limit":100, "page":1}`,
which will carry the paging meta information.
* Fix the issue that webhook leaks TCP connections. [ehttpc#34](https://github.com/emqx/ehttpc/pull/34), [#8580](https://github.com/emqx/emqx/pull/8580)
## Enhancements
* Improve the dashboard listener startup log, the listener name is no longer spliced with port information,
and the colon(:) is no longer displayed when IP is not specified. [#8480](https://github.com/emqx/emqx/pull/8480)
* Remove `/configs/listeners` API, use `/listeners/` instead. [#8485](https://github.com/emqx/emqx/pull/8485)
* Optimize performance of builtin database operations in processes with long message queue [#8439](https://github.com/emqx/emqx/pull/8439)
* Improve authentication tracing. [#8554](https://github.com/emqx/emqx/pull/8554)
* Standardize the '/listeners' and `/gateway/<name>/listeners` API fields.
It will introduce some incompatible updates, see [#8571](https://github.com/emqx/emqx/pull/8571)
# 5.0.3
## Bug fixes
* Websocket listener failed to read headers `X-Forwared-For` and `X-Forwarded-Port` [8415](https://github.com/emqx/emqx/pull/8415)
* Deleted `cluster_singleton` from MQTT bridge config document. This config is no longer applicable in 5.0 [8407](https://github.com/emqx/emqx/pull/8407)
* Fix `emqx/emqx:latest` docker image publish to use the Erlang flavor, but not Elixir flavor [8414](https://github.com/emqx/emqx/pull/8414)
* Changed the `exp` field in JWT auth to be optional rather than required to fix backwards compatability with 4.X releases. [8425](https://github.com/emqx/emqx/pull/8425)
* Websocket listener failed to read headers `X-Forwarded-For` and `X-Forwarded-Port` [#8415](https://github.com/emqx/emqx/pull/8415)
* Deleted `cluster_singleton` from MQTT bridge config document. This config is no longer applicable in 5.0 [#8407](https://github.com/emqx/emqx/pull/8407)
* Fix `emqx/emqx:latest` docker image publish to use the Erlang flavor, but not Elixir flavor [#8414](https://github.com/emqx/emqx/pull/8414)
* Changed the `exp` field in JWT auth to be optional rather than required to fix backwards compatability with 4.X releases. [#8425](https://github.com/emqx/emqx/pull/8425)
## Enhancements
* Improve the speed of dashboard's HTTP API routing rule generation, which sometimes causes timeout [8438](https://github.com/emqx/emqx/pull/8438)
* Improve the speed of dashboard's HTTP API routing rule generation, which sometimes causes timeout [#8438](https://github.com/emqx/emqx/pull/8438)
# 5.0.2
Announcemnet: EMQX team has decided to stop supporting relup for opensouce edition.
Going forward, it will be an enterprise only feature.
Announcement: EMQX team has decided to stop supporting relup for opensource edition.
Going forward, it will be an enterprise-only feature.
Main reason: relup requires carefully crafted upgrade instructions from ALL previous versions.
For example, 4.3 is now at 4.3.16, we have `4.3.0->4.3.16`, `4.3.1->4.3.16`, ... 16 such upgrade paths in total to maintain.
This had been the biggest obstacle for EMQX team to act agile enough in deliverying enhancements and fixes.
This had been the biggest obstacle for EMQX team to act agile enough in delivering enhancements and fixes.
## Enhancements
## Bug fixes
* Fixed a typo in `bin/emqx` which affects MacOs release when trying to enable Erlang distribution over TLS [8398](https://github.com/emqx/emqx/pull/8398)
* Restricted shell was accidentally disabled in 5.0.1, it has been added back. [8396](https://github.com/emqx/emqx/pull/8396)
* Fixed a typo in `bin/emqx` which affects MacOs release when trying to enable Erlang distribution over TLS [#8398](https://github.com/emqx/emqx/pull/8398)
* Restricted shell was accidentally disabled in 5.0.1, it has been added back. [#8396](https://github.com/emqx/emqx/pull/8396)
# 5.0.1
@ -66,25 +104,25 @@ Exceptions:
## Enhancements
* Removed management API auth for prometheus scraping endpoint /api/v5/prometheus/stats [8299](https://github.com/emqx/emqx/pull/8299)
* Added more TCP options for exhook (gRPC) connections. [8317](https://github.com/emqx/emqx/pull/8317)
* HTTP Servers used for authentication and authorization will now indicate the result via the response body. [8374](https://github.com/emqx/emqx/pull/8374) [8377](https://github.com/emqx/emqx/pull/8377)
* Bulk subscribe/unsubscribe APIs [8356](https://github.com/emqx/emqx/pull/8356)
* Added exclusive subscription [8315](https://github.com/emqx/emqx/pull/8315)
* Provide authentication counter metrics [8352](https://github.com/emqx/emqx/pull/8352) [8375](https://github.com/emqx/emqx/pull/8375)
* Do not allow admin user self-deletion [8286](https://github.com/emqx/emqx/pull/8286)
* After restart, ensure to copy `cluster-override.conf` from the clustered node which has the greatest `tnxid`. [8333](https://github.com/emqx/emqx/pull/8333)
* Removed management API auth for prometheus scraping endpoint /api/v5/prometheus/stats [#8299](https://github.com/emqx/emqx/pull/8299)
* Added more TCP options for exhook (gRPC) connections. [#8317](https://github.com/emqx/emqx/pull/8317)
* HTTP Servers used for authentication and authorization will now indicate the result via the response body. [#8374](https://github.com/emqx/emqx/pull/8374) [#8377](https://github.com/emqx/emqx/pull/8377)
* Bulk subscribe/unsubscribe APIs [#8356](https://github.com/emqx/emqx/pull/8356)
* Added exclusive subscription [#8315](https://github.com/emqx/emqx/pull/8315)
* Provide authentication counter metrics [#8352](https://github.com/emqx/emqx/pull/8352) [#8375](https://github.com/emqx/emqx/pull/8375)
* Do not allow admin user self-deletion [#8286](https://github.com/emqx/emqx/pull/8286)
* After restart, ensure to copy `cluster-override.conf` from the clustered node which has the greatest `tnxid`. [#8333](https://github.com/emqx/emqx/pull/8333)
## Bug fixes
* A bug fix ported from 4.x: allow deleting subscriptions from `client.subscribe` hookpoint callback result. [8304](https://github.com/emqx/emqx/pull/8304) [8347](https://github.com/emqx/emqx/pull/8377)
* Fixed Erlang distribution over TLS [8309](https://github.com/emqx/emqx/pull/8309)
* Made possible to override authentication configs from environment variables [8323](https://github.com/emqx/emqx/pull/8309)
* Made authentication passwords in Mnesia database backward compatible to 4.x, so we can support data migration better. [8351](https://github.com/emqx/emqx/pull/8351)
* Fix plugins upload for rpm/deb installations [8379](https://github.com/emqx/emqx/pull/8379)
* Sync data/authz/acl.conf and data/certs from clustered nodes after a new node joins the cluster [8369](https://github.com/emqx/emqx/pull/8369)
* Ensure auto-retry of failed resources [8371](https://github.com/emqx/emqx/pull/8371)
* Fix the issue that the count of `packets.connack.auth_error` is inaccurate when the client uses a protocol version below MQTT v5.0 to access [8178](https://github.com/emqx/emqx/pull/8178)
* A bug fix ported from 4.x: allow deleting subscriptions from `client.subscribe` hookpoint callback result. [#8304](https://github.com/emqx/emqx/pull/8304) [#8347](https://github.com/emqx/emqx/pull/8377)
* Fixed Erlang distribution over TLS [#8309](https://github.com/emqx/emqx/pull/8309)
* Made possible to override authentication configs from environment variables [#8323](https://github.com/emqx/emqx/pull/8309)
* Made authentication passwords in Mnesia database backward compatible to 4.x, so we can support data migration better. [#8351](https://github.com/emqx/emqx/pull/8351)
* Fix plugins upload for rpm/deb installations [#8379](https://github.com/emqx/emqx/pull/8379)
* Sync data/authz/acl.conf and data/certs from clustered nodes after a new node joins the cluster [#8369](https://github.com/emqx/emqx/pull/8369)
* Ensure auto-retry of failed resources [#8371](https://github.com/emqx/emqx/pull/8371)
* Fix the issue that the count of `packets.connack.auth_error` is inaccurate when the client uses a protocol version below MQTT v5.0 to access [#8178](https://github.com/emqx/emqx/pull/8178)
## Others

View File

@ -1,7 +1,7 @@
Source code in this repository is variously licensed under below licenses.
For EMQX Community Edition: Apache License 2.0, see APL.txt,
For EMQX: Apache License 2.0, see APL.txt,
which applies to all source files except for lib-ee sub-directory.
For EMQX Enterprise Edition (since version 5.0): Business Source License 1.1,
For EMQX Enterprise (since version 5.0): Business Source License 1.1,
see lib-ee/BSL.txt, which applies to source code in lib-ee sub-directory.

View File

@ -7,7 +7,7 @@ export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-d
export EMQX_DEFAULT_RUNNER = debian:11-slim
export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh)
export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh)
export EMQX_DASHBOARD_VERSION ?= v1.0.2
export EMQX_DASHBOARD_VERSION ?= v1.0.5
export EMQX_REL_FORM ?= tgz
export QUICER_DOWNLOAD_FROM_RELEASE = 1
ifeq ($(OS),Windows_NT)
@ -249,3 +249,4 @@ $(foreach tt,$(ALL_ELIXIR_TGZS),$(eval $(call gen-elixir-tgz-target,$(tt))))
fmt: $(REBAR)
@./scripts/erlfmt -w '{apps,lib-ee}/*/{src,include,test}/**/*.{erl,hrl,app.src}'
@./scripts/erlfmt -w 'rebar.config.erl'
@mix format

View File

@ -4,10 +4,10 @@
[![Build Status](https://img.shields.io/travis/emqx/emqx?label=Build)](https://travis-ci.org/emqx/emqx)
[![Coverage Status](https://img.shields.io/coveralls/github/emqx/emqx/master?label=Coverage)](https://coveralls.io/github/emqx/emqx?branch=master)
[![Docker Pulls](https://img.shields.io/docker/pulls/emqx/emqx?label=Docker%20Pulls)](https://hub.docker.com/r/emqx/emqx)
[![Slack](https://img.shields.io/badge/Slack-EMQ%20X-39AE85?logo=slack)](https://slack-invite.emqx.io/)
[![Slack](https://img.shields.io/badge/Slack-EMQ-39AE85?logo=slack)](https://slack-invite.emqx.io/)
[![Discord](https://img.shields.io/discord/931086341838622751?label=Discord&logo=discord)](https://discord.gg/xYGf3fQnES)
[![Twitter](https://img.shields.io/badge/Twitter-EMQ-1DA1F2?logo=twitter)](https://twitter.com/EMQTech)
[![Community](https://img.shields.io/badge/Community-EMQ%20X-yellow)](https://askemq.com)
[![Community](https://img.shields.io/badge/Community-EMQX-yellow)](https://askemq.com)
[![YouTube](https://img.shields.io/badge/Subscribe-EMQ%20中文-FF0000?logo=youtube)](https://www.youtube.com/channel/UCir_r04HIsLjf2qqyZ4A8Cg)

View File

@ -4,7 +4,7 @@
[![Build Status](https://img.shields.io/travis/emqx/emqx?label=Build)](https://travis-ci.org/emqx/emqx)
[![Coverage Status](https://img.shields.io/coveralls/github/emqx/emqx/master?label=Coverage)](https://coveralls.io/github/emqx/emqx?branch=master)
[![Docker Pulls](https://img.shields.io/docker/pulls/emqx/emqx?label=Docker%20Pulls)](https://hub.docker.com/r/emqx/emqx)
[![Slack](https://img.shields.io/badge/Slack-EMQ%20X-39AE85?logo=slack)](https://slack-invite.emqx.io/)
[![Slack](https://img.shields.io/badge/Slack-EMQ-39AE85?logo=slack)](https://slack-invite.emqx.io/)
[![Discord](https://img.shields.io/discord/931086341838622751?label=Discord&logo=discord)](https://discord.gg/xYGf3fQnES)
[![Twitter](https://img.shields.io/badge/Twitter-EMQ-1DA1F2?logo=twitter)](https://twitter.com/EMQTech)
[![YouTube](https://img.shields.io/badge/Subscribe-EMQ-FF0000?logo=youtube)](https://www.youtube.com/channel/UC5FjR77ErAxvZENEWzQaO5Q)

View File

@ -4,10 +4,10 @@
[![Build Status](https://img.shields.io/travis/emqx/emqx?label=Build)](https://travis-ci.org/emqx/emqx)
[![Coverage Status](https://img.shields.io/coveralls/github/emqx/emqx/master?label=Coverage)](https://coveralls.io/github/emqx/emqx?branch=master)
[![Docker Pulls](https://img.shields.io/docker/pulls/emqx/emqx?label=Docker%20Pulls)](https://hub.docker.com/r/emqx/emqx)
[![Slack](https://img.shields.io/badge/Slack-EMQ%20X-39AE85?logo=slack)](https://slack-invite.emqx.io/)
[![Slack](https://img.shields.io/badge/Slack-EMQ-39AE85?logo=slack)](https://slack-invite.emqx.io/)
[![Discord](https://img.shields.io/discord/931086341838622751?label=Discord&logo=discord)](https://discord.gg/xYGf3fQnES)
[![Twitter](https://img.shields.io/badge/Follow-EMQ-1DA1F2?logo=twitter)](https://twitter.com/EMQTech)
[![Community](https://img.shields.io/badge/Community-EMQ%20X-yellow?logo=github)](https://github.com/emqx/emqx/discussions)
[![Community](https://img.shields.io/badge/Community-EMQX-yellow?logo=github)](https://github.com/emqx/emqx/discussions)
[![YouTube](https://img.shields.io/badge/Subscribe-EMQ-FF0000?logo=youtube)](https://www.youtube.com/channel/UC5FjR77ErAxvZENEWzQaO5Q)
[![The best IoT MQTT open source team looks forward to your joining](https://assets.emqx.com/images/github_readme_en_bg.png)](https://www.emqx.com/en/careers)

View File

@ -4,7 +4,7 @@
[![Build Status](https://img.shields.io/travis/emqx/emqx?label=Build)](https://travis-ci.org/emqx/emqx)
[![Coverage Status](https://img.shields.io/coveralls/github/emqx/emqx/master?label=Coverage)](https://coveralls.io/github/emqx/emqx?branch=master)
[![Docker Pulls](https://img.shields.io/docker/pulls/emqx/emqx?label=Docker%20Pulls)](https://hub.docker.com/r/emqx/emqx)
[![Slack](https://img.shields.io/badge/Slack-EMQ%20X-39AE85?logo=slack)](https://slack-invite.emqx.io/)
[![Slack](https://img.shields.io/badge/Slack-EMQ-39AE85?logo=slack)](https://slack-invite.emqx.io/)
[![Discord](https://img.shields.io/discord/931086341838622751?label=Discord&logo=discord)](https://discord.gg/xYGf3fQnES)
[![Twitter](https://img.shields.io/badge/Follow-EMQ-1DA1F2?logo=twitter)](https://twitter.com/EMQTech)
[![YouTube](https://img.shields.io/badge/Subscribe-EMQ-FF0000?logo=youtube)](https://www.youtube.com/channel/UC5FjR77ErAxvZENEWzQaO5Q)

View File

@ -66,6 +66,9 @@ Cygwin is what we tested with.
Start (restart) CMD or powershell console and execute `which bash`, it should
print out `/usr/bin/bash`
NOTE: Make sure cygwin's bin dir is added before `C:\Windows\system32` in `Path`,
otherwise the build scripts may end up using binaries from wsl instead of cygwin.
### Other tools
Some of the unix world tools are required to build EMQX. Including:

View File

@ -89,10 +89,10 @@ the check/consume will succeed, but it will be forced to wait for a short period
}
}
per_client {
client {
desc {
en: """The rate limit for each user of the bucket, this field is not required"""
zh: """对桶的每个使用者的速率控制设置,这个不是必须的"""
en: """The rate limit for each user of the bucket"""
zh: """对桶的每个使用者的速率控制设置"""
}
label: {
en: """Per Client"""
@ -124,20 +124,6 @@ the check/consume will succeed, but it will be forced to wait for a short period
}
}
batch {
desc {
en: """The batch limiter.
This is used for EMQX internal batch operation
e.g. limit the retainer's deliver rate"""
zh: """批量操作速率控制器。
这是给 EMQX 内部的批量操作使用的,比如用来控制保留消息的派发速率"""
}
label: {
en: """Batch"""
zh: """批量操作"""
}
}
message_routing {
desc {
en: """The message routing limiter.
@ -193,4 +179,12 @@ Once the limit is reached, the restricted client will be slow down even be hung
zh: """流入字节率"""
}
}
internal {
desc {
en: """Limiter for EMQX internal app."""
zh: """EMQX 内部功能所用限制器。"""
}
}
}

View File

@ -17,6 +17,19 @@
-ifndef(EMQX_AUTHENTICATION_HRL).
-define(EMQX_AUTHENTICATION_HRL, true).
-include_lib("emqx/include/logger.hrl").
-define(AUTHN_TRACE_TAG, "AUTHN").
-define(TRACE_AUTHN_PROVIDER(Msg), ?TRACE_AUTHN_PROVIDER(Msg, #{})).
-define(TRACE_AUTHN_PROVIDER(Msg, Meta), ?TRACE_AUTHN_PROVIDER(debug, Msg, Meta)).
-define(TRACE_AUTHN_PROVIDER(Level, Msg, Meta),
?TRACE_AUTHN(Level, Msg, (Meta)#{provider => ?MODULE})
).
-define(TRACE_AUTHN(Msg, Meta), ?TRACE_AUTHN(debug, Msg, Meta)).
-define(TRACE_AUTHN(Level, Msg, Meta), ?TRACE(Level, ?AUTHN_TRACE_TAG, Msg, Meta)).
%% config root name all auth providers have to agree on.
-define(EMQX_AUTHENTICATION_CONFIG_ROOT_NAME, "authentication").
-define(EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_ATOM, authentication).

View File

@ -32,7 +32,7 @@
%% `apps/emqx/src/bpapi/README.md'
%% Community edition
-define(EMQX_RELEASE_CE, "5.0.3").
-define(EMQX_RELEASE_CE, "5.0.4").
%% Enterprise edition
-define(EMQX_RELEASE_EE, "5.0.0-alpha.1").

View File

@ -42,17 +42,21 @@
-define(TRACE_FILTER, emqx_trace_filter).
-define(TRACE(Tag, Msg, Meta), ?TRACE(debug, Tag, Msg, Meta)).
%% Only evaluate when necessary
%% Always debug the trace events.
-define(TRACE(Tag, Msg, Meta), begin
case persistent_term:get(?TRACE_FILTER, undefined) of
undefined -> ok;
-define(TRACE(Level, Tag, Msg, Meta), begin
case persistent_term:get(?TRACE_FILTER, []) of
[] -> ok;
List -> emqx_trace:log(List, Msg, Meta#{trace_tag => Tag})
%% We can't bind filter list to a variablebecause we pollute the calling scope with it.
%% We also don't want to wrap the macro body in a fun
%% beacause this adds overhead to the happy path.
%% So evaluate `persistent_term:get` twice.
_ -> emqx_trace:log(persistent_term:get(?TRACE_FILTER, []), Msg, (Meta)#{trace_tag => Tag})
end,
?SLOG(
debug,
(emqx_trace_formatter:format_meta(Meta))#{msg => Msg, tag => Tag},
Level,
(emqx_trace_formatter:format_meta_map(Meta))#{msg => Msg, tag => Tag},
#{is_trace => false}
)
end).

View File

@ -14,6 +14,7 @@
{emqx_gateway_cm,1}.
{emqx_gateway_http,1}.
{emqx_license,1}.
{emqx_license,2}.
{emqx_management,1}.
{emqx_management,2}.
{emqx_mgmt_api_plugins,1}.

View File

@ -27,9 +27,9 @@
{jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}},
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}},
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.3"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.1"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.3"}}},
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.28.3"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.29.0"}}},
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},
{snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.0"}}}

View File

@ -24,7 +24,7 @@ IsQuicSupp = fun() ->
end,
Bcrypt = {bcrypt, {git, "https://github.com/emqx/erlang-bcrypt.git", {tag, "0.6.0"}}},
Quicer = {quicer, {git, "https://github.com/emqx/quic.git", {tag, "0.0.14"}}}.
Quicer = {quicer, {git, "https://github.com/emqx/quic.git", {tag, "0.0.16"}}}.
ExtraDeps = fun(C) ->
{deps, Deps0} = lists:keyfind(deps, 1, C),

View File

@ -3,7 +3,7 @@
{id, "emqx"},
{description, "EMQX Core"},
% strict semver, bump manually!
{vsn, "5.0.4"},
{vsn, "5.0.5"},
{modules, []},
{registered, []},
{applications, [

View File

@ -101,6 +101,14 @@
-define(CHAINS_TAB, emqx_authn_chains).
-define(TRACE_RESULT(Label, Result, Reason), begin
?TRACE_AUTHN(Label, #{
result => (Result),
reason => (Reason)
}),
Result
end).
-type chain_name() :: atom().
-type authenticator_id() :: binary().
-type position() :: front | rear | {before, authenticator_id()} | {'after', authenticator_id()}.
@ -216,14 +224,14 @@ when
authenticate(#{enable_authn := false}, _AuthResult) ->
inc_authenticate_metric('authentication.success.anonymous'),
ignore;
?TRACE_RESULT("authentication_result", ignore, enable_authn_false);
authenticate(#{listener := Listener, protocol := Protocol} = Credential, _AuthResult) ->
case get_authenticators(Listener, global_chain(Protocol)) of
{ok, ChainName, Authenticators} ->
case get_enabled(Authenticators) of
[] ->
inc_authenticate_metric('authentication.success.anonymous'),
ignore;
?TRACE_RESULT("authentication_result", ignore, empty_chain);
NAuthenticators ->
Result = do_authenticate(ChainName, NAuthenticators, Credential),
@ -235,11 +243,11 @@ authenticate(#{listener := Listener, protocol := Protocol} = Credential, _AuthRe
_ ->
ok
end,
Result
?TRACE_RESULT("authentication_result", Result, chain_result)
end;
none ->
inc_authenticate_metric('authentication.success.anonymous'),
ignore
?TRACE_RESULT("authentication_result", ignore, no_chain)
end.
get_authenticators(Listener, Global) ->
@ -626,11 +634,11 @@ handle_create_authenticator(Chain, Config, Providers) ->
do_authenticate(_ChainName, [], _) ->
{stop, {error, not_authorized}};
do_authenticate(
ChainName, [#authenticator{id = ID, provider = Provider, state = State} | More], Credential
ChainName, [#authenticator{id = ID} = Authenticator | More], Credential
) ->
MetricsID = metrics_id(ChainName, ID),
emqx_metrics_worker:inc(authn_metrics, MetricsID, total),
try Provider:authenticate(Credential, State) of
try authenticate_with_provider(Authenticator, Credential) of
ignore ->
ok = emqx_metrics_worker:inc(authn_metrics, MetricsID, nomatch),
do_authenticate(ChainName, More, Credential);
@ -651,8 +659,7 @@ do_authenticate(
{stop, Result}
catch
Class:Reason:Stacktrace ->
?SLOG(warning, #{
msg => "unexpected_error_in_authentication",
?TRACE_AUTHN(warning, "authenticator_error", #{
exception => Class,
reason => Reason,
stacktrace => Stacktrace,
@ -662,6 +669,14 @@ do_authenticate(
do_authenticate(ChainName, More, Credential)
end.
authenticate_with_provider(#authenticator{id = ID, provider = Provider, state = State}, Credential) ->
AuthnResult = Provider:authenticate(Credential, State),
?TRACE_AUTHN("authenticator_result", #{
authenticator => ID,
result => AuthnResult
}),
AuthnResult.
reply(Reply, State) ->
{reply, Reply, State}.

View File

@ -252,11 +252,12 @@ init(
<<>> -> undefined;
MP -> MP
end,
ListenerId = emqx_listeners:listener_id(Type, Listener),
ClientInfo = set_peercert_infos(
Peercert,
#{
zone => Zone,
listener => emqx_listeners:listener_id(Type, Listener),
listener => ListenerId,
protocol => Protocol,
peerhost => PeerHost,
sockport => SockPort,
@ -278,7 +279,9 @@ init(
outbound => #{}
},
auth_cache = #{},
quota = emqx_limiter_container:get_limiter_by_names([?LIMITER_ROUTING], LimiterCfg),
quota = emqx_limiter_container:get_limiter_by_types(
ListenerId, [?LIMITER_ROUTING], LimiterCfg
),
timers = #{},
conn_state = idle,
takeover = false,
@ -354,7 +357,7 @@ handle_in(?CONNECT_PACKET(ConnPkt) = Packet, Channel) ->
},
case authenticate(?CONNECT_PACKET(NConnPkt), NChannel1) of
{ok, Properties, NChannel2} ->
process_connect(Properties, ensure_connected(NChannel2));
process_connect(Properties, NChannel2);
{continue, Properties, NChannel2} ->
handle_out(auth, {?RC_CONTINUE_AUTHENTICATION, Properties}, NChannel2);
{error, ReasonCode} ->
@ -378,7 +381,7 @@ handle_in(
{ok, NProperties, NChannel} ->
case ConnState of
connecting ->
process_connect(NProperties, ensure_connected(NChannel));
process_connect(NProperties, NChannel);
_ ->
handle_out(
auth,
@ -608,7 +611,7 @@ process_connect(
case emqx_cm:open_session(CleanStart, ClientInfo, ConnInfo) of
{ok, #{session := Session, present := false}} ->
NChannel = Channel#channel{session = Session},
handle_out(connack, {?RC_SUCCESS, sp(false), AckProps}, NChannel);
handle_out(connack, {?RC_SUCCESS, sp(false), AckProps}, ensure_connected(NChannel));
{ok, #{session := Session, present := true, pendings := Pendings}} ->
Pendings1 = lists:usort(lists:append(Pendings, emqx_misc:drain_deliver())),
NChannel = Channel#channel{
@ -616,7 +619,7 @@ process_connect(
resuming = true,
pendings = Pendings1
},
handle_out(connack, {?RC_SUCCESS, sp(true), AckProps}, NChannel);
handle_out(connack, {?RC_SUCCESS, sp(true), AckProps}, ensure_connected(NChannel));
{error, client_id_unavailable} ->
handle_out(connack, ?RC_CLIENT_IDENTIFIER_NOT_VALID, Channel);
{error, Reason} ->
@ -1199,9 +1202,6 @@ handle_call(
disconnect_and_shutdown(takenover, AllPendings, Channel);
handle_call(list_authz_cache, Channel) ->
{reply, emqx_authz_cache:list_authz_cache(), Channel};
handle_call({quota, Bucket}, #channel{quota = Quota} = Channel) ->
Quota2 = emqx_limiter_container:update_by_name(message_routing, Bucket, Quota),
reply(ok, Channel#channel{quota = Quota2});
handle_call(
{keepalive, Interval},
Channel = #channel{

View File

@ -556,10 +556,12 @@ save_to_override_conf(RawConf, Opts) ->
add_handlers() ->
ok = emqx_config_logger:add_handler(),
emqx_sys_mon:add_handler(),
ok.
remove_handlers() ->
ok = emqx_config_logger:remove_handler(),
emqx_sys_mon:remove_handler(),
ok.
load_hocon_file(FileName, LoadType) ->

View File

@ -321,7 +321,7 @@ init_state(
},
LimiterTypes = [?LIMITER_BYTES_IN, ?LIMITER_MESSAGE_IN],
Limiter = emqx_limiter_container:get_limiter_by_names(LimiterTypes, LimiterCfg),
Limiter = emqx_limiter_container:get_limiter_by_types(Listener, LimiterTypes, LimiterCfg),
FrameOpts = #{
strict_mode => emqx_config:get_zone_conf(Zone, [mqtt, strict_mode]),
@ -672,12 +672,6 @@ handle_call(_From, info, State) ->
{reply, info(State), State};
handle_call(_From, stats, State) ->
{reply, stats(State), State};
handle_call(_From, {ratelimit, Changes}, State = #state{limiter = Limiter}) ->
Fun = fun({Type, Bucket}, Acc) ->
emqx_limiter_container:update_by_name(Type, Bucket, Acc)
end,
Limiter2 = lists:foldl(Fun, Limiter, Changes),
{reply, ok, State#state{limiter = Limiter2}};
handle_call(_From, Req, State = #state{channel = Channel}) ->
case emqx_channel:handle_call(Req, Channel) of
{reply, Reply, NChannel} ->

View File

@ -19,12 +19,13 @@
-behaviour(esockd_generic_limiter).
%% API
-export([new_create_options/2, create/1, delete/1, consume/2]).
-export([new_create_options/3, create/1, delete/1, consume/2]).
-type create_options() :: #{
module := ?MODULE,
id := emqx_limiter_schema:limiter_id(),
type := emqx_limiter_schema:limiter_type(),
bucket := emqx_limiter_schema:bucket_name()
bucket := hocons:config()
}.
%%--------------------------------------------------------------------
@ -32,15 +33,16 @@
%%--------------------------------------------------------------------
-spec new_create_options(
emqx_limiter_schema:limiter_id(),
emqx_limiter_schema:limiter_type(),
emqx_limiter_schema:bucket_name()
hocons:config()
) -> create_options().
new_create_options(Type, BucketName) ->
#{module => ?MODULE, type => Type, bucket => BucketName}.
new_create_options(Id, Type, BucketCfg) ->
#{module => ?MODULE, id => Id, type => Type, bucket => BucketCfg}.
-spec create(create_options()) -> esockd_generic_limiter:limiter().
create(#{module := ?MODULE, type := Type, bucket := BucketName}) ->
{ok, Limiter} = emqx_limiter_server:connect(Type, BucketName),
create(#{module := ?MODULE, id := Id, type := Type, bucket := BucketCfg}) ->
{ok, Limiter} = emqx_limiter_server:connect(Id, Type, BucketCfg),
#{module => ?MODULE, name => Type, limiter => Limiter}.
delete(_GLimiter) ->

View File

@ -22,10 +22,8 @@
%% API
-export([
new/0, new/1, new/2,
get_limiter_by_names/2,
get_limiter_by_types/3,
add_new/3,
update_by_name/3,
set_retry_context/2,
check/3,
retry/2,
@ -48,10 +46,10 @@
}.
-type future() :: pos_integer().
-type limiter_id() :: emqx_limiter_schema:limiter_id().
-type limiter_type() :: emqx_limiter_schema:limiter_type().
-type limiter() :: emqx_htb_limiter:limiter().
-type retry_context() :: emqx_htb_limiter:retry_context().
-type bucket_name() :: emqx_limiter_schema:bucket_name().
-type millisecond() :: non_neg_integer().
-type check_result() ::
{ok, container()}
@ -64,46 +62,24 @@
%%--------------------------------------------------------------------
%% API
%%--------------------------------------------------------------------
-spec new() -> container().
new() ->
new([]).
%% @doc generate default data according to the type of limiter
-spec new(list(limiter_type())) -> container().
new(Types) ->
new(Types, #{}).
-spec new(
list(limiter_type()),
#{limiter_type() => emqx_limiter_schema:bucket_name()}
) -> container().
new(Types, Names) ->
get_limiter_by_names(Types, Names).
%% @doc generate a container
%% according to the type of limiter and the bucket name configuration of the limiter
%% @end
-spec get_limiter_by_names(
-spec get_limiter_by_types(
limiter_id() | {atom(), atom()},
list(limiter_type()),
#{limiter_type() => emqx_limiter_schema:bucket_name()}
#{limiter_type() => hocons:config()}
) -> container().
get_limiter_by_names(Types, BucketNames) ->
get_limiter_by_types({Type, Listener}, Types, BucketCfgs) ->
Id = emqx_listeners:listener_id(Type, Listener),
get_limiter_by_types(Id, Types, BucketCfgs);
get_limiter_by_types(Id, Types, BucketCfgs) ->
Init = fun(Type, Acc) ->
{ok, Limiter} = emqx_limiter_server:connect(Type, BucketNames),
{ok, Limiter} = emqx_limiter_server:connect(Id, Type, BucketCfgs),
add_new(Type, Limiter, Acc)
end,
lists:foldl(Init, #{retry_ctx => undefined}, Types).
%% @doc add the specified type of limiter to the container
-spec update_by_name(
limiter_type(),
bucket_name() | #{limiter_type() => bucket_name()},
container()
) -> container().
update_by_name(Type, Buckets, Container) ->
{ok, Limiter} = emqx_limiter_server:connect(Type, Buckets),
add_new(Type, Limiter, Container).
-spec add_new(limiter_type(), limiter(), container()) -> container().
add_new(Type, Limiter, Container) ->
Container#{

View File

@ -24,11 +24,9 @@
%% API
-export([
start_link/0,
find_bucket/1,
find_bucket/2,
insert_bucket/2,
insert_bucket/3,
make_path/2,
delete_bucket/2,
post_config_update/5
]).
@ -50,20 +48,19 @@
format_status/2
]).
-export_type([path/0]).
-type path() :: list(atom()).
-type limiter_id() :: emqx_limiter_schema:limiter_id().
-type limiter_type() :: emqx_limiter_schema:limiter_type().
-type bucket_name() :: emqx_limiter_schema:bucket_name().
-type uid() :: {limiter_id(), limiter_type()}.
%% counter record in ets table
-record(bucket, {
path :: path(),
uid :: uid(),
bucket :: bucket_ref()
}).
-type bucket_ref() :: emqx_limiter_bucket_ref:bucket_ref().
-define(UID(Id, Type), {Id, Type}).
-define(TAB, emqx_limiter_counters).
%%--------------------------------------------------------------------
@ -85,14 +82,10 @@ restart_server(Type) ->
stop_server(Type) ->
emqx_limiter_server_sup:stop(Type).
-spec find_bucket(limiter_type(), bucket_name()) ->
-spec find_bucket(limiter_id(), limiter_type()) ->
{ok, bucket_ref()} | undefined.
find_bucket(Type, BucketName) ->
find_bucket(make_path(Type, BucketName)).
-spec find_bucket(path()) -> {ok, bucket_ref()} | undefined.
find_bucket(Path) ->
case ets:lookup(?TAB, Path) of
find_bucket(Id, Type) ->
case ets:lookup(?TAB, ?UID(Id, Type)) of
[#bucket{bucket = Bucket}] ->
{ok, Bucket};
_ ->
@ -100,20 +93,19 @@ find_bucket(Path) ->
end.
-spec insert_bucket(
limiter_id(),
limiter_type(),
bucket_name(),
bucket_ref()
) -> boolean().
insert_bucket(Type, BucketName, Bucket) ->
inner_insert_bucket(make_path(Type, BucketName), Bucket).
insert_bucket(Id, Type, Bucket) ->
ets:insert(
?TAB,
#bucket{uid = ?UID(Id, Type), bucket = Bucket}
).
-spec insert_bucket(path(), bucket_ref()) -> true.
insert_bucket(Path, Bucket) ->
inner_insert_bucket(Path, Bucket).
-spec make_path(limiter_type(), bucket_name()) -> path().
make_path(Type, BucketName) ->
[Type | BucketName].
-spec delete_bucket(limiter_id(), limiter_type()) -> true.
delete_bucket(Type, Id) ->
ets:delete(?TAB, ?UID(Id, Type)).
post_config_update([limiter, Type], _Config, NewConf, _OldConf, _AppEnvs) ->
Config = maps:get(Type, NewConf),
@ -159,7 +151,7 @@ init([]) ->
set,
public,
named_table,
{keypos, #bucket.path},
{keypos, #bucket.uid},
{write_concurrency, true},
{read_concurrency, true},
{heir, erlang:whereis(emqx_limiter_sup), none}
@ -266,9 +258,3 @@ format_status(_Opt, Status) ->
%%--------------------------------------------------------------------
%% Internal functions
%%--------------------------------------------------------------------
-spec inner_insert_bucket(path(), bucket_ref()) -> true.
inner_insert_bucket(Path, Bucket) ->
ets:insert(
?TAB,
#bucket{path = Path, bucket = Bucket}
).

View File

@ -41,8 +41,10 @@
| message_in
| connection
| message_routing
| batch.
%% internal limiter for unclassified resources
| internal.
-type limiter_id() :: atom().
-type bucket_name() :: atom().
-type rate() :: infinity | float().
-type burst_rate() :: 0 | float().
@ -76,7 +78,7 @@
bucket_name/0
]).
-export_type([limiter_type/0, bucket_path/0]).
-export_type([limiter_id/0, limiter_type/0, bucket_path/0]).
-define(UNIT_TIME_IN_MS, 1000).
@ -87,52 +89,50 @@ roots() -> [limiter].
fields(limiter) ->
[
{Type,
?HOCON(?R_REF(limiter_opts), #{
?HOCON(?R_REF(node_opts), #{
desc => ?DESC(Type),
default => make_limiter_default(Type)
default => #{}
})}
|| Type <- types()
] ++
[
{client,
?HOCON(
?R_REF(client_fields),
#{
desc => ?DESC(client),
default => maps:from_list([
{erlang:atom_to_binary(Type), #{}}
|| Type <- types()
])
}
)}
];
fields(limiter_opts) ->
fields(node_opts) ->
[
{rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => "infinity"})},
{burst,
?HOCON(burst_rate(), #{
desc => ?DESC(burst),
default => 0
})},
{bucket,
?HOCON(
?MAP("bucket_name", ?R_REF(bucket_opts)),
#{
desc => ?DESC(bucket_cfg),
default => #{<<"default">> => #{}},
example => #{
<<"mybucket-name">> => #{
<<"rate">> => <<"infinity">>,
<<"capcity">> => <<"infinity">>,
<<"initial">> => <<"100">>,
<<"per_client">> => #{<<"rate">> => <<"infinity">>}
}
}
}
)}
})}
];
fields(client_fields) ->
[
{Type,
?HOCON(?R_REF(client_opts), #{
desc => ?DESC(Type),
default => #{}
})}
|| Type <- types()
];
fields(bucket_opts) ->
[
{rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => "infinity"})},
{capacity, ?HOCON(capacity(), #{desc => ?DESC(capacity), default => "infinity"})},
{initial, ?HOCON(initial(), #{default => "0", desc => ?DESC(initial)})},
{per_client,
?HOCON(
?R_REF(client_bucket),
#{
default => #{},
desc => ?DESC(per_client)
}
)}
{initial, ?HOCON(initial(), #{default => "0", desc => ?DESC(initial)})}
];
fields(client_bucket) ->
fields(client_opts) ->
[
{rate, ?HOCON(rate(), #{default => "infinity", desc => ?DESC(rate)})},
{initial, ?HOCON(initial(), #{default => "0", desc => ?DESC(initial)})},
@ -177,16 +177,30 @@ fields(client_bucket) ->
default => force
}
)}
].
];
fields(listener_fields) ->
bucket_fields([bytes_in, message_in, connection, message_routing], listener_client_fields);
fields(listener_client_fields) ->
client_fields([bytes_in, message_in, connection, message_routing]);
fields(Type) ->
bucket_field(Type).
desc(limiter) ->
"Settings for the rate limiter.";
desc(limiter_opts) ->
"Settings for the limiter.";
desc(node_opts) ->
"Settings for the limiter of the node level.";
desc(bucket_opts) ->
"Settings for the bucket.";
desc(client_bucket) ->
"Settings for the client bucket.";
desc(client_opts) ->
"Settings for the client in bucket level.";
desc(client_fields) ->
"Fields of the client level.";
desc(listener_fields) ->
"Fields of the listener.";
desc(listener_client_fields) ->
"Fields of the client level of the listener.";
desc(internal) ->
"Internal limiter.";
desc(_) ->
undefined.
@ -202,7 +216,7 @@ get_bucket_cfg_path(Type, BucketName) ->
[limiter, Type, bucket, BucketName].
types() ->
[bytes_in, message_in, connection, message_routing, batch].
[bytes_in, message_in, connection, message_routing, internal].
%%--------------------------------------------------------------------
%% Internal functions
@ -322,16 +336,44 @@ apply_unit("mb", Val) -> Val * ?KILOBYTE * ?KILOBYTE;
apply_unit("gb", Val) -> Val * ?KILOBYTE * ?KILOBYTE * ?KILOBYTE;
apply_unit(Unit, _) -> throw("invalid unit:" ++ Unit).
make_limiter_default(connection) ->
bucket_field(Type) when is_atom(Type) ->
fields(bucket_opts) ++
[
{client,
?HOCON(
?R_REF(?MODULE, client_opts),
#{
<<"rate">> => <<"1000/s">>,
<<"bucket">> => #{
<<"default">> =>
desc => ?DESC(client),
required => false
}
)}
].
bucket_fields(Types, ClientRef) ->
[
{Type,
?HOCON(?R_REF(?MODULE, bucket_opts), #{
desc => ?DESC(?MODULE, Type),
required => false
})}
|| Type <- Types
] ++
[
{client,
?HOCON(
?R_REF(?MODULE, ClientRef),
#{
<<"rate">> => <<"1000/s">>,
<<"capacity">> => 1000
desc => ?DESC(client),
required => false
}
}
};
make_limiter_default(_) ->
#{}.
)}
].
client_fields(Types) ->
[
{Type,
?HOCON(?R_REF(client_opts), #{
desc => ?DESC(Type),
required => false
})}
|| Type <- Types
].

View File

@ -42,11 +42,13 @@
-export([
start_link/2,
connect/2,
connect/3,
add_bucket/3,
del_bucket/2,
get_initial_val/1,
whereis/1,
info/1,
name/1,
get_initial_val/1,
restart/1,
update_config/2
]).
@ -73,16 +75,17 @@
-type state() :: #{
type := limiter_type(),
root := undefined | root(),
root := root(),
buckets := buckets(),
%% current counter to alloc
counter := undefined | counters:counters_ref(),
index := index()
counter := counters:counters_ref(),
index := 0 | index()
}.
-type buckets() :: #{bucket_name() => bucket()}.
-type limiter_type() :: emqx_limiter_schema:limiter_type().
-type bucket_name() :: emqx_limiter_schema:bucket_name().
-type limiter_id() :: emqx_limiter_schema:limiter_id().
-type rate() :: decimal().
-type flow() :: decimal().
-type capacity() :: decimal().
@ -94,7 +97,7 @@
%% minimum coefficient for overloaded limiter
-define(OVERLOAD_MIN_ALLOC, 0.3).
-define(CURRYING(X, F2), fun(Y) -> F2(X, Y) end).
-define(COUNTER_SIZE, 8).
-export_type([index/0]).
-import(emqx_limiter_decimal, [add/2, sub/2, mul/2, put_to_counter/3]).
@ -105,39 +108,49 @@
%% API
%%--------------------------------------------------------------------
-spec connect(
limiter_id(),
limiter_type(),
bucket_name() | #{limiter_type() => bucket_name() | undefined}
) ->
{ok, emqx_htb_limiter:limiter()} | {error, _}.
%% If no bucket path is set in config, there will be no limit
connect(_Type, undefined) ->
connect(_Id, _Type, undefined) ->
{ok, emqx_htb_limiter:make_infinity_limiter()};
connect(Type, BucketName) when is_atom(BucketName) ->
case get_bucket_cfg(Type, BucketName) of
undefined ->
?SLOG(error, #{msg => "bucket_config_not_found", type => Type, bucket => BucketName}),
{error, config_not_found};
connect(Id, Type, Cfg) ->
case find_limiter_cfg(Type, Cfg) of
{undefined, _} ->
{ok, emqx_htb_limiter:make_infinity_limiter()};
{
#{
rate := BucketRate,
capacity := BucketSize,
per_client := #{rate := CliRate, capacity := CliSize} = Cfg
capacity := BucketSize
},
#{rate := CliRate, capacity := CliSize} = ClientCfg
} ->
case emqx_limiter_manager:find_bucket(Type, BucketName) of
case emqx_limiter_manager:find_bucket(Id, Type) of
{ok, Bucket} ->
{ok,
if
CliRate < BucketRate orelse CliSize < BucketSize ->
emqx_htb_limiter:make_token_bucket_limiter(Cfg, Bucket);
emqx_htb_limiter:make_token_bucket_limiter(ClientCfg, Bucket);
true ->
emqx_htb_limiter:make_ref_limiter(Cfg, Bucket)
emqx_htb_limiter:make_ref_limiter(ClientCfg, Bucket)
end};
undefined ->
?SLOG(error, #{msg => "bucket_not_found", type => Type, bucket => BucketName}),
?SLOG(error, #{msg => "bucket_not_found", type => Type, id => Id}),
{error, invalid_bucket}
end
end;
connect(Type, Paths) ->
connect(Type, maps:get(Type, Paths, undefined)).
end.
-spec add_bucket(limiter_id(), limiter_type(), hocons:config() | undefined) -> ok.
add_bucket(_Id, _Type, undefine) ->
ok;
add_bucket(Id, Type, Cfg) ->
?CALL(Type, {add_bucket, Id, Cfg}).
-spec del_bucket(limiter_id(), limiter_type()) -> ok.
del_bucket(Id, Type) ->
?CALL(Type, {del_bucket, Id}).
-spec info(limiter_type()) -> state() | {error, _}.
info(Type) ->
@ -213,6 +226,12 @@ handle_call(restart, _From, #{type := Type}) ->
handle_call({update_config, Type, Config}, _From, #{type := Type}) ->
NewState = init_tree(Type, Config),
{reply, ok, NewState};
handle_call({add_bucket, Id, Cfg}, _From, State) ->
NewState = do_add_bucket(Id, Cfg, State),
{reply, ok, NewState};
handle_call({del_bucket, Id}, _From, State) ->
NewState = do_del_bucket(Id, State),
{reply, ok, NewState};
handle_call(Req, _From, State) ->
?SLOG(error, #{msg => "unexpected_call", call => Req}),
{reply, ignored, State}.
@ -456,24 +475,14 @@ init_tree(Type) when is_atom(Type) ->
Cfg = emqx:get_config([limiter, Type]),
init_tree(Type, Cfg).
init_tree(Type, #{bucket := Buckets} = Cfg) ->
State = #{
init_tree(Type, Cfg) ->
#{
type => Type,
root => undefined,
counter => undefined,
index => 1,
root => make_root(Cfg),
counter => counters:new(?COUNTER_SIZE, [write_concurrency]),
index => 0,
buckets => #{}
},
Root = make_root(Cfg),
{CounterNum, DelayBuckets} = make_bucket(maps:to_list(Buckets), Type, Cfg, 1, []),
State2 = State#{
root := Root,
counter := counters:new(CounterNum, [write_concurrency])
},
lists:foldl(fun(F, Acc) -> F(Acc) end, State2, DelayBuckets).
}.
-spec make_root(hocons:confg()) -> root().
make_root(#{rate := Rate, burst := Burst}) ->
@ -484,79 +493,50 @@ make_root(#{rate := Rate, burst := Burst}) ->
produced => 0.0
}.
make_bucket([{Name, Conf} | T], Type, GlobalCfg, CounterNum, DelayBuckets) ->
Path = emqx_limiter_manager:make_path(Type, Name),
Rate = get_counter_rate(Conf, GlobalCfg),
#{capacity := Capacity} = Conf,
Initial = get_initial_val(Conf),
CounterNum2 = CounterNum + 1,
InitFun = fun(#{name := BucketName} = Bucket, #{buckets := Buckets} = State) ->
{Counter, Idx, State2} = alloc_counter(Path, Rate, Initial, State),
Bucket2 = Bucket#{counter := Counter, index := Idx},
State2#{buckets := Buckets#{BucketName => Bucket2}}
end,
do_add_bucket(Id, #{rate := Rate, capacity := Capacity} = Cfg, #{buckets := Buckets} = State) ->
case maps:get(Id, Buckets, undefined) of
undefined ->
make_bucket(Id, Cfg, State);
Bucket ->
Bucket2 = Bucket#{rate := Rate, capacity := Capacity},
State#{buckets := Buckets#{Id := Bucket2}}
end.
make_bucket(Id, Cfg, #{index := ?COUNTER_SIZE} = State) ->
make_bucket(Id, Cfg, State#{
counter => counters:new(?COUNTER_SIZE, [write_concurrency]),
index => 0
});
make_bucket(
Id,
#{rate := Rate, capacity := Capacity} = Cfg,
#{type := Type, counter := Counter, index := Index, buckets := Buckets} = State
) ->
NewIndex = Index + 1,
Initial = get_initial_val(Cfg),
Bucket = #{
name => Name,
name => Id,
rate => Rate,
obtained => Initial,
correction => 0,
capacity => Capacity,
counter => undefined,
index => undefined
counter => Counter,
index => NewIndex
},
_ = put_to_counter(Counter, NewIndex, Initial),
Ref = emqx_limiter_bucket_ref:new(Counter, NewIndex, Rate),
emqx_limiter_manager:insert_bucket(Id, Type, Ref),
State#{buckets := Buckets#{Id => Bucket}, index := NewIndex}.
DelayInit = ?CURRYING(Bucket, InitFun),
make_bucket(
T,
Type,
GlobalCfg,
CounterNum2,
[DelayInit | DelayBuckets]
);
make_bucket([], _Type, _Global, CounterNum, DelayBuckets) ->
{CounterNum, DelayBuckets}.
-spec alloc_counter(emqx_limiter_manager:path(), rate(), capacity(), state()) ->
{counters:counters_ref(), pos_integer(), state()}.
alloc_counter(
Path,
Rate,
Initial,
#{counter := Counter, index := Index} = State
) ->
case emqx_limiter_manager:find_bucket(Path) of
{ok, #{
counter := ECounter,
index := EIndex
}} when ECounter =/= undefined ->
init_counter(Path, ECounter, EIndex, Rate, Initial, State);
do_del_bucket(Id, #{type := Type, buckets := Buckets} = State) ->
case maps:get(Id, Buckets, undefined) of
undefined ->
State;
_ ->
init_counter(
Path,
Counter,
Index,
Rate,
Initial,
State#{index := Index + 1}
)
emqx_limiter_manager:delete_bucket(Id, Type),
State#{buckets := maps:remove(Id, Buckets)}
end.
init_counter(Path, Counter, Index, Rate, Initial, State) ->
_ = put_to_counter(Counter, Index, Initial),
Ref = emqx_limiter_bucket_ref:new(Counter, Index, Rate),
emqx_limiter_manager:insert_bucket(Path, Ref),
{Counter, Index, State}.
%% @doc find first limited node
get_counter_rate(#{rate := Rate}, _GlobalCfg) when Rate =/= infinity ->
Rate;
get_counter_rate(_Cfg, #{rate := Rate}) when Rate =/= infinity ->
Rate;
get_counter_rate(_Cfg, _GlobalCfg) ->
emqx_limiter_schema:infinity_value().
-spec get_initial_val(hocons:config()) -> decimal().
get_initial_val(
#{
@ -587,8 +567,21 @@ call(Type, Msg) ->
gen_server:call(Pid, Msg)
end.
-spec get_bucket_cfg(limiter_type(), bucket_name()) ->
undefined | limiter_not_started | hocons:config().
get_bucket_cfg(Type, Bucket) ->
Path = emqx_limiter_schema:get_bucket_cfg_path(Type, Bucket),
emqx:get_config(Path, undefined).
find_limiter_cfg(Type, #{rate := _} = Cfg) ->
{Cfg, find_client_cfg(Type, maps:get(client, Cfg, undefined))};
find_limiter_cfg(Type, Cfg) ->
{
maps:get(Type, Cfg, undefined),
find_client_cfg(Type, emqx_map_lib:deep_get([client, Type], Cfg, undefined))
}.
find_client_cfg(Type, BucketCfg) ->
NodeCfg = emqx:get_config([limiter, client, Type], undefined),
merge_client_cfg(NodeCfg, BucketCfg).
merge_client_cfg(undefined, BucketCfg) ->
BucketCfg;
merge_client_cfg(NodeCfg, undefined) ->
NodeCfg;
merge_client_cfg(NodeCfg, BucketCfg) ->
maps:merge(NodeCfg, BucketCfg).

View File

@ -54,7 +54,7 @@
-export([pre_config_update/3, post_config_update/5]).
-export([format_addr/1]).
-export([format_bind/1]).
-define(CONF_KEY_PATH, [listeners, '?', '?']).
-define(TYPES_STRING, ["tcp", "ssl", "ws", "wss", "quic"]).
@ -201,14 +201,14 @@ start_listener(Type, ListenerName, #{bind := Bind} = Conf) ->
?tp(listener_started, #{type => Type, bind => Bind}),
console_print(
"Listener ~ts on ~ts started.~n",
[listener_id(Type, ListenerName), format_addr(Bind)]
[listener_id(Type, ListenerName), format_bind(Bind)]
),
ok;
{error, {already_started, Pid}} ->
{error, {already_started, Pid}};
{error, Reason} ->
ListenerId = listener_id(Type, ListenerName),
BindStr = format_addr(Bind),
BindStr = format_bind(Bind),
?ELOG(
"Failed to start listener ~ts on ~ts: ~0p.~n",
[ListenerId, BindStr, Reason]
@ -261,30 +261,37 @@ stop_listener(Type, ListenerName, #{bind := Bind} = Conf) ->
ok ->
console_print(
"Listener ~ts on ~ts stopped.~n",
[listener_id(Type, ListenerName), format_addr(Bind)]
[listener_id(Type, ListenerName), format_bind(Bind)]
),
ok;
{error, not_found} ->
?ELOG(
"Failed to stop listener ~ts on ~ts: ~0p~n",
[listener_id(Type, ListenerName), format_addr(Bind), already_stopped]
[listener_id(Type, ListenerName), format_bind(Bind), already_stopped]
),
ok;
{error, Reason} ->
?ELOG(
"Failed to stop listener ~ts on ~ts: ~0p~n",
[listener_id(Type, ListenerName), format_addr(Bind), Reason]
[listener_id(Type, ListenerName), format_bind(Bind), Reason]
),
{error, Reason}
end.
-spec do_stop_listener(atom(), atom(), map()) -> ok | {error, term()}.
do_stop_listener(Type, ListenerName, #{bind := ListenOn}) when Type == tcp; Type == ssl ->
esockd:close(listener_id(Type, ListenerName), ListenOn);
do_stop_listener(Type, ListenerName, _Conf) when Type == ws; Type == wss ->
cowboy:stop_listener(listener_id(Type, ListenerName));
do_stop_listener(quic, ListenerName, _Conf) ->
quicer:stop_listener(listener_id(quic, ListenerName)).
do_stop_listener(Type, ListenerName, #{bind := ListenOn} = Conf) when Type == tcp; Type == ssl ->
Id = listener_id(Type, ListenerName),
del_limiter_bucket(Id, Conf),
esockd:close(Id, ListenOn);
do_stop_listener(Type, ListenerName, Conf) when Type == ws; Type == wss ->
Id = listener_id(Type, ListenerName),
del_limiter_bucket(Id, Conf),
cowboy:stop_listener(Id);
do_stop_listener(quic, ListenerName, Conf) ->
Id = listener_id(quic, ListenerName),
del_limiter_bucket(Id, Conf),
quicer:stop_listener(Id).
-ifndef(TEST).
console_print(Fmt, Args) -> ?ULOG(Fmt, Args).
@ -300,10 +307,12 @@ do_start_listener(_Type, _ListenerName, #{enabled := false}) ->
do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts) when
Type == tcp; Type == ssl
->
Id = listener_id(Type, ListenerName),
add_limiter_bucket(Id, Opts),
esockd:open(
listener_id(Type, ListenerName),
Id,
ListenOn,
merge_default(esockd_opts(Type, Opts)),
merge_default(esockd_opts(Id, Type, Opts)),
{emqx_connection, start_link, [
#{
listener => {Type, ListenerName},
@ -318,6 +327,7 @@ do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts) when
Type == ws; Type == wss
->
Id = listener_id(Type, ListenerName),
add_limiter_bucket(Id, Opts),
RanchOpts = ranch_opts(Type, ListenOn, Opts),
WsOpts = ws_opts(Type, ListenerName, Opts),
case Type of
@ -352,8 +362,10 @@ do_start_listener(quic, ListenerName, #{bind := ListenOn} = Opts) ->
limiter => limiter(Opts)
},
StreamOpts = [{stream_callback, emqx_quic_stream}],
Id = listener_id(quic, ListenerName),
add_limiter_bucket(Id, Opts),
quicer:start_listener(
listener_id(quic, ListenerName),
Id,
port(ListenOn),
{ListenOpts, ConnectionOpts, StreamOpts}
);
@ -410,16 +422,18 @@ post_config_update([listeners, Type, Name], {action, _Action, _}, NewConf, OldCo
post_config_update(_Path, _Request, _NewConf, _OldConf, _AppEnvs) ->
ok.
esockd_opts(Type, Opts0) ->
esockd_opts(ListenerId, Type, Opts0) ->
Opts1 = maps:with([acceptors, max_connections, proxy_protocol, proxy_protocol_timeout], Opts0),
Limiter = limiter(Opts0),
Opts2 =
case maps:get(connection, Limiter, undefined) of
undefined ->
Opts1;
BucketName ->
BucketCfg ->
Opts1#{
limiter => emqx_esockd_htb_limiter:new_create_options(connection, BucketName)
limiter => emqx_esockd_htb_limiter:new_create_options(
ListenerId, connection, BucketCfg
)
}
end,
Opts3 = Opts2#{
@ -492,17 +506,32 @@ merge_default(Options) ->
[{tcp_options, ?MQTT_SOCKOPTS} | Options]
end.
format_addr(Port) when is_integer(Port) ->
-spec format_bind(
integer() | {tuple(), integer()} | string() | binary()
) -> io_lib:chars().
format_bind(Port) when is_integer(Port) ->
io_lib:format(":~w", [Port]);
%% Print only the port number when bound on all interfaces
format_addr({{0, 0, 0, 0}, Port}) ->
format_addr(Port);
format_addr({{0, 0, 0, 0, 0, 0, 0, 0}, Port}) ->
format_addr(Port);
format_addr({Addr, Port}) when is_list(Addr) ->
format_bind({{0, 0, 0, 0}, Port}) ->
format_bind(Port);
format_bind({{0, 0, 0, 0, 0, 0, 0, 0}, Port}) ->
format_bind(Port);
format_bind({Addr, Port}) when is_list(Addr) ->
io_lib:format("~ts:~w", [Addr, Port]);
format_addr({Addr, Port}) when is_tuple(Addr) ->
io_lib:format("~ts:~w", [inet:ntoa(Addr), Port]).
format_bind({Addr, Port}) when is_tuple(Addr), tuple_size(Addr) == 4 ->
io_lib:format("~ts:~w", [inet:ntoa(Addr), Port]);
format_bind({Addr, Port}) when is_tuple(Addr), tuple_size(Addr) == 8 ->
io_lib:format("[~ts]:~w", [inet:ntoa(Addr), Port]);
%% Support string, binary type for Port or IP:Port
format_bind(Str) when is_list(Str) ->
case emqx_schema:to_ip_port(Str) of
{ok, {Ip, Port}} ->
format_bind({Ip, Port});
{error, _} ->
format_bind(list_to_integer(Str))
end;
format_bind(Bin) when is_binary(Bin) ->
format_bind(binary_to_list(Bin)).
listener_id(Type, ListenerName) ->
list_to_atom(lists:append([str(Type), ":", str(ListenerName)])).
@ -524,6 +553,27 @@ zone(Opts) ->
limiter(Opts) ->
maps:get(limiter, Opts, #{}).
add_limiter_bucket(Id, #{limiter := Limiter}) ->
maps:fold(
fun(Type, Cfg, _) ->
emqx_limiter_server:add_bucket(Id, Type, Cfg)
end,
ok,
maps:without([client], Limiter)
);
add_limiter_bucket(_Id, _Cfg) ->
ok.
del_limiter_bucket(Id, #{limiter := Limiters}) ->
lists:foreach(
fun(Type) ->
emqx_limiter_server:del_bucket(Id, Type)
end,
maps:keys(Limiters)
);
del_limiter_bucket(_Id, _Cfg) ->
ok.
enable_authn(Opts) ->
maps:get(enable_authn, Opts, true).

View File

@ -69,9 +69,10 @@ best_effort_json(Input, Opts) ->
jsx:encode(JsonReady, Opts).
-spec format(logger:log_event(), config()) -> iodata().
format(#{level := Level, msg := Msg, meta := Meta}, Config0) when is_map(Config0) ->
format(#{level := Level, msg := Msg, meta := Meta} = Event, Config0) when is_map(Config0) ->
Config = add_default_config(Config0),
[format(Msg, Meta#{level => Level}, Config), "\n"].
MsgBin = format(Msg, Meta#{level => Level}, Config),
logger_formatter:format(Event#{msg => {string, MsgBin}}, Config).
format(Msg, Meta, Config) ->
Data0 =

View File

@ -35,6 +35,8 @@
current_sysmem_percent/0
]).
-export([update/1]).
%% gen_server callbacks
-export([
init/1,
@ -52,6 +54,9 @@
start_link() ->
gen_server:start_link({local, ?OS_MON}, ?MODULE, [], []).
update(OS) ->
erlang:send(?MODULE, {monitor_conf_update, OS}).
%%--------------------------------------------------------------------
%% API
%%--------------------------------------------------------------------
@ -87,18 +92,24 @@ current_sysmem_percent() ->
init([]) ->
%% memsup is not reliable, ignore
memsup:set_sysmem_high_watermark(1.0),
SysHW = init_os_monitor(),
_ = start_mem_check_timer(),
_ = start_cpu_check_timer(),
{ok, #{sysmem_high_watermark => SysHW}}.
init_os_monitor() ->
init_os_monitor(emqx:get_config([sysmon, os])).
init_os_monitor(OS) ->
#{
sysmem_high_watermark := SysHW,
procmem_high_watermark := PHW,
mem_check_interval := MCI
} = emqx:get_config([sysmon, os]),
} = OS,
set_procmem_high_watermark(PHW),
set_mem_check_interval(MCI),
ok = update_mem_alarm_status(SysHW),
_ = start_mem_check_timer(),
_ = start_cpu_check_timer(),
{ok, #{sysmem_high_watermark => SysHW}}.
SysHW.
handle_call(get_sysmem_high_watermark, _From, #{sysmem_high_watermark := HWM} = State) ->
{reply, HWM, State};
@ -147,6 +158,9 @@ handle_info({timeout, _Timer, cpu_check}, State) ->
end,
ok = start_cpu_check_timer(),
{noreply, State};
handle_info({monitor_conf_update, OS}, _State) ->
SysHW = init_os_monitor(OS),
{noreply, #{sysmem_high_watermark => SysHW}};
handle_info(Info, State) ->
?SLOG(error, #{msg => "unexpected_info", info => Info}),
{noreply, State}.

View File

@ -1635,10 +1635,15 @@ base_listener(Bind) ->
)},
{"limiter",
sc(
map("ratelimit_name", emqx_limiter_schema:bucket_name()),
?R_REF(
emqx_limiter_schema,
listener_fields
),
#{
desc => ?DESC(base_listener_limiter),
default => #{<<"connection">> => <<"default">>}
default => #{
<<"connection">> => #{<<"rate">> => <<"1000/s">>, <<"capacity">> => 1000}
}
}
)},
{"enable_authn",
@ -2129,9 +2134,13 @@ to_comma_separated_atoms(Str) ->
to_bar_separated_list(Str) ->
{ok, string:tokens(Str, "| ")}.
%% @doc support the following format:
%% - 127.0.0.1:1883
%% - ::1:1883
%% - [::1]:1883
to_ip_port(Str) ->
case string:tokens(Str, ": ") of
[Ip, Port] ->
case split_ip_port(Str) of
{Ip, Port} ->
PortVal = list_to_integer(Port),
case inet:parse_address(Ip) of
{ok, R} ->
@ -2149,6 +2158,26 @@ to_ip_port(Str) ->
{error, Str}
end.
split_ip_port(Str0) ->
Str = re:replace(Str0, " ", "", [{return, list}, global]),
case lists:split(string:rchr(Str, $:), Str) of
%% no port
{[], Str} ->
error;
{IpPlusColon, PortString} ->
IpStr0 = lists:droplast(IpPlusColon),
case IpStr0 of
%% dropp head/tail brackets
[$[ | S] ->
case lists:last(S) of
$] -> {lists:droplast(S), PortString};
_ -> error
end;
_ ->
{IpStr0, PortString}
end
end.
to_erl_cipher_suite(Str) ->
case ssl:str_to_suite(Str) of
{error, Reason} -> error({invalid_cipher, Reason});

View File

@ -333,7 +333,7 @@ publish(brokers, Nodes) ->
safe_publish(<<"$SYS/brokers">>, #{retain => true}, Payload);
publish(stats, Stats) ->
[
safe_publish(systop(lists:concat(['stats/', Stat])), integer_to_binary(Val))
safe_publish(systop(stats_topic(Stat)), integer_to_binary(Val))
|| {Stat, Val} <- Stats, is_atom(Stat), is_integer(Val)
];
publish(metrics, Metrics) ->
@ -351,7 +351,13 @@ publish(Event, Payload) when
safe_publish(Topic, emqx_json:encode(Payload)).
metric_topic(Name) ->
lists:concat(["metrics/", string:replace(atom_to_list(Name), ".", "/", all)]).
translate_topic("metrics/", Name).
stats_topic(Name) ->
translate_topic("stats/", Name).
translate_topic(Prefix, Name) ->
lists:concat([Prefix, string:replace(atom_to_list(Name), ".", "/", all)]).
safe_publish(Topic, Payload) ->
safe_publish(Topic, #{}, Payload).

View File

@ -35,32 +35,52 @@
terminate/2,
code_change/3
]).
-export([add_handler/0, remove_handler/0, post_config_update/5]).
-export([update/1]).
-define(SYSMON, ?MODULE).
-define(SYSMON_CONF_ROOT, [sysmon]).
%% @doc Start the system monitor.
-spec start_link() -> startlink_ret().
start_link() ->
gen_server:start_link({local, ?SYSMON}, ?MODULE, [], []).
add_handler() ->
ok = emqx_config_handler:add_handler(?SYSMON_CONF_ROOT, ?MODULE),
ok.
remove_handler() ->
ok = emqx_config_handler:remove_handler(?SYSMON_CONF_ROOT),
ok.
post_config_update(_, _Req, NewConf, OldConf, _AppEnvs) ->
#{os := OS1, vm := VM1} = OldConf,
#{os := OS2, vm := VM2} = NewConf,
VM1 =/= VM2 andalso ?MODULE:update(VM2),
OS1 =/= OS2 andalso emqx_os_mon:update(OS2),
ok.
update(VM) ->
erlang:send(?MODULE, {monitor_conf_update, VM}).
%%--------------------------------------------------------------------
%% gen_server callbacks
%%--------------------------------------------------------------------
init([]) ->
_ = erlang:system_monitor(self(), sysm_opts()),
emqx_logger:set_proc_metadata(#{sysmon => true}),
init_system_monitor(),
%% Monitor cluster partition event
ekka:monitor(partition, fun handle_partition_event/1),
{ok, start_timer(#{timer => undefined, events => []})}.
start_timer(State) ->
State#{timer := emqx_misc:start_timer(timer:seconds(2), reset)}.
sysm_opts() ->
sysm_opts(maps:to_list(emqx:get_config([sysmon, vm])), []).
sysm_opts(VM) ->
sysm_opts(maps:to_list(VM), []).
sysm_opts([], Acc) ->
Acc;
sysm_opts([{_, disabled} | Opts], Acc) ->
@ -176,12 +196,16 @@ handle_info({monitor, SusPid, busy_dist_port, Port}, State) ->
);
handle_info({timeout, _Ref, reset}, State) ->
{noreply, State#{events := []}, hibernate};
handle_info({monitor_conf_update, VM}, State) ->
init_system_monitor(VM),
{noreply, State#{events := []}, hibernate};
handle_info(Info, State) ->
?SLOG(error, #{msg => "unexpected_info", info => Info}),
{noreply, State}.
terminate(_Reason, #{timer := TRef}) ->
emqx_misc:cancel_timer(TRef).
emqx_misc:cancel_timer(TRef),
ok.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
@ -237,3 +261,11 @@ safe_publish(Event, WarnMsg) ->
sysmon_msg(Topic, Payload) ->
Msg = emqx_message:make(?SYSMON, Topic, Payload),
emqx_message:set_flag(sys, Msg).
init_system_monitor() ->
VM = emqx:get_config([sysmon, vm]),
init_system_monitor(VM).
init_system_monitor(VM) ->
_ = erlang:system_monitor(self(), sysm_opts(VM)),
ok.

View File

@ -92,15 +92,16 @@ unsubscribe(<<"$SYS/", _/binary>>, _SubOpts) ->
unsubscribe(Topic, SubOpts) ->
?TRACE("UNSUBSCRIBE", "unsubscribe", #{topic => Topic, sub_opts => SubOpts}).
log(List, Msg, Meta0) ->
Meta =
case logger:get_process_metadata() of
undefined -> Meta0;
ProcMeta -> maps:merge(ProcMeta, Meta0)
end,
Log = #{level => debug, meta => Meta, msg => Msg},
log(List, Msg, Meta) ->
Log = #{level => debug, meta => enrich_meta(Meta), msg => Msg},
log_filter(List, Log).
enrich_meta(Meta) ->
case logger:get_process_metadata() of
undefined -> Meta;
ProcMeta -> maps:merge(ProcMeta, Meta)
end.
log_filter([], _Log) ->
ok;
log_filter([{Id, FilterFun, Filter, Name} | Rest], Log0) ->

View File

@ -16,7 +16,7 @@
-module(emqx_trace_formatter).
-export([format/2]).
-export([format_meta/1]).
-export([format_meta_map/1]).
%%%-----------------------------------------------------------------
%%% API
@ -31,32 +31,39 @@ format(
ClientId = to_iolist(maps:get(clientid, Meta, "")),
Peername = maps:get(peername, Meta, ""),
MetaBin = format_meta(Meta, PEncode),
[Time, " [", Tag, "] ", ClientId, "@", Peername, " msg: ", Msg, MetaBin, "\n"];
[Time, " [", Tag, "] ", ClientId, "@", Peername, " msg: ", Msg, ", ", MetaBin, "\n"];
format(Event, Config) ->
emqx_logger_textfmt:format(Event, Config).
format_meta(Meta) ->
format_meta_map(Meta) ->
Encode = emqx_trace_handler:payload_encode(),
do_format_meta(Meta, Encode).
format_meta_map(Meta, Encode).
format_meta(Meta0, Encode) ->
Meta1 = #{packet := Packet0, payload := Payload0} = do_format_meta(Meta0, Encode),
Packet = enrich(", packet: ", Packet0),
Payload = enrich(", payload: ", Payload0),
Meta2 = maps:without([msg, clientid, peername, packet, payload, trace_tag], Meta1),
case Meta2 =:= #{} of
true -> [Packet, Payload];
false -> [Packet, ", ", map_to_iolist(Meta2), Payload]
format_meta_map(Meta, Encode) ->
format_meta_map(Meta, Encode, [{packet, fun format_packet/2}, {payload, fun format_payload/2}]).
format_meta_map(Meta, _Encode, []) ->
Meta;
format_meta_map(Meta, Encode, [{Name, FormatFun} | Rest]) ->
case Meta of
#{Name := Value} ->
NewMeta = Meta#{Name => FormatFun(Value, Encode)},
format_meta_map(NewMeta, Encode, Rest);
#{} ->
format_meta_map(Meta, Encode, Rest)
end.
enrich(_, "") -> "";
enrich(Key, IoData) -> [Key, IoData].
format_meta(Meta0, Encode) ->
Meta1 = maps:without([msg, clientid, peername, trace_tag], Meta0),
Meta2 = format_meta_map(Meta1, Encode),
kvs_to_iolist(lists:sort(fun compare_meta_kvs/2, maps:to_list(Meta2))).
do_format_meta(Meta, Encode) ->
Meta#{
packet => format_packet(maps:get(packet, Meta, undefined), Encode),
payload => format_payload(maps:get(payload, Meta, undefined), Encode)
}.
%% packet always goes first; payload always goes last
compare_meta_kvs(KV1, KV2) -> weight(KV1) =< weight(KV2).
weight({packet, _}) -> {0, packet};
weight({payload, _}) -> {2, payload};
weight({K, _}) -> {1, K}.
format_packet(undefined, _) -> "";
format_packet(Packet, Encode) -> emqx_packet:format(Packet, Encode).
@ -69,14 +76,14 @@ format_payload(_, hidden) -> "******".
to_iolist(Atom) when is_atom(Atom) -> atom_to_list(Atom);
to_iolist(Int) when is_integer(Int) -> integer_to_list(Int);
to_iolist(Float) when is_float(Float) -> float_to_list(Float, [{decimals, 2}]);
to_iolist(SubMap) when is_map(SubMap) -> ["[", map_to_iolist(SubMap), "]"];
to_iolist(SubMap) when is_map(SubMap) -> ["[", kvs_to_iolist(maps:to_list(SubMap)), "]"];
to_iolist(Char) -> emqx_logger_textfmt:try_format_unicode(Char).
map_to_iolist(Map) ->
kvs_to_iolist(KVs) ->
lists:join(
",",
", ",
lists:map(
fun({K, V}) -> [to_iolist(K), ": ", to_iolist(V)] end,
maps:to_list(Map)
KVs
)
).

View File

@ -86,7 +86,7 @@ handle_info({timeout, _Timer, check}, State) ->
},
Message
);
_Precent ->
_Percent ->
ok
end,
_ = start_check_timer(),

View File

@ -273,7 +273,7 @@ check_origin_header(Req, #{listener := {Type, Listener}} = Opts) ->
end.
websocket_init([Req, Opts]) ->
#{zone := Zone, limiter := LimiterCfg, listener := {Type, Listener}} = Opts,
#{zone := Zone, limiter := LimiterCfg, listener := {Type, Listener} = ListenerCfg} = Opts,
case check_max_connection(Type, Listener) of
allow ->
{Peername, PeerCert} = get_peer_info(Type, Listener, Req, Opts),
@ -287,8 +287,10 @@ websocket_init([Req, Opts]) ->
ws_cookie => WsCookie,
conn_mod => ?MODULE
},
Limiter = emqx_limiter_container:get_limiter_by_names(
[?LIMITER_BYTES_IN, ?LIMITER_MESSAGE_IN], LimiterCfg
Limiter = emqx_limiter_container:get_limiter_by_types(
ListenerCfg,
[?LIMITER_BYTES_IN, ?LIMITER_MESSAGE_IN],
LimiterCfg
),
MQTTPiggyback = get_ws_opts(Type, Listener, mqtt_piggyback),
FrameOpts = #{
@ -487,9 +489,6 @@ handle_call(From, info, State) ->
handle_call(From, stats, State) ->
gen_server:reply(From, stats(State)),
return(State);
handle_call(_From, {ratelimit, Type, Bucket}, State = #state{limiter = Limiter}) ->
Limiter2 = emqx_limiter_container:update_by_name(Type, Bucket, Limiter),
{reply, ok, State#state{limiter = Limiter2}};
handle_call(From, Req, State = #state{channel = Channel}) ->
case emqx_channel:handle_call(Req, Channel) of
{reply, Reply, NChannel} ->

View File

@ -131,16 +131,23 @@ storage_properties(_, Backend) when ?IS_ETS(Backend) ->
storage_properties(_, _) ->
[].
%% Dialyzer sees the compiled literal in
%% `mria:rocksdb_backend_available/0' and complains about the
%% complementar match arm...
-dialyzer({no_match, table_type/1}).
-spec table_type(atom()) -> mria_table_type().
table_type(Table) ->
DiscPersistence = emqx_config:get([?cfg_root, on_disc]),
RamCache = get_overlayed(Table, ram_cache),
case {DiscPersistence, RamCache} of
{true, true} ->
RocksDBAvailable = mria:rocksdb_backend_available(),
case {DiscPersistence, RamCache, RocksDBAvailable} of
{true, true, _} ->
disc_copies;
{true, false} ->
{true, false, true} ->
rocksdb_copies;
{false, _} ->
{true, false, false} ->
disc_copies;
{false, _, _} ->
ram_copies
end.

View File

@ -33,18 +33,6 @@ force_gc_conf() ->
force_shutdown_conf() ->
#{enable => true, max_heap_size => 4194304, max_message_queue_len => 1000}.
rate_limit_conf() ->
#{
conn_bytes_in => ["100KB", "10s"],
conn_messages_in => ["100", "10s"],
max_conn_rate => 1000,
quota =>
#{
conn_messages_routing => infinity,
overall_messages_routing => infinity
}
}.
rpc_conf() ->
#{
async_batch_size => 256,
@ -173,27 +161,9 @@ listeners_conf() ->
limiter_conf() ->
Make = fun() ->
#{
bucket =>
#{
default =>
#{
capacity => infinity,
initial => 0,
rate => infinity,
per_client =>
#{
capacity => infinity,
divisible => false,
failure_strategy => force,
initial => 0,
low_watermark => 0,
max_retry_time => 5000,
rate => infinity
}
}
},
burst => 0,
rate => infinity
rate => infinity,
capacity => infinity
}
end,
@ -202,7 +172,7 @@ limiter_conf() ->
Acc#{Name => Make()}
end,
#{},
[bytes_in, message_in, message_routing, connection, batch]
[bytes_in, message_in, message_routing, connection, internal]
).
stats_conf() ->
@ -213,7 +183,6 @@ zone_conf() ->
basic_conf() ->
#{
rate_limit => rate_limit_conf(),
force_gc => force_gc_conf(),
force_shutdown => force_shutdown_conf(),
mqtt => mqtt_conf(),
@ -274,10 +243,9 @@ end_per_suite(_Config) ->
emqx_banned
]).
init_per_testcase(TestCase, Config) ->
init_per_testcase(_TestCase, Config) ->
OldConf = set_test_listener_confs(),
emqx_common_test_helpers:start_apps([]),
check_modify_limiter(TestCase),
[{config, OldConf} | Config].
end_per_testcase(_TestCase, Config) ->
@ -285,41 +253,6 @@ end_per_testcase(_TestCase, Config) ->
emqx_common_test_helpers:stop_apps([]),
Config.
check_modify_limiter(TestCase) ->
Checks = [t_quota_qos0, t_quota_qos1, t_quota_qos2],
case lists:member(TestCase, Checks) of
true ->
modify_limiter();
_ ->
ok
end.
%% per_client 5/1s,5
%% aggregated 10/1s,10
modify_limiter() ->
Limiter = emqx_config:get([limiter]),
#{message_routing := #{bucket := Bucket} = Routing} = Limiter,
#{default := #{per_client := Client} = Default} = Bucket,
Client2 = Client#{
rate := 5,
initial := 0,
capacity := 5,
low_watermark := 1
},
Default2 = Default#{
per_client := Client2,
rate => 10,
initial => 0,
capacity => 10
},
Bucket2 = Bucket#{default := Default2},
Routing2 = Routing#{bucket := Bucket2},
emqx_config:put([limiter], Limiter#{message_routing := Routing2}),
emqx_limiter_manager:restart_server(message_routing),
timer:sleep(100),
ok.
%%--------------------------------------------------------------------
%% Test cases for channel info/stats/caps
%%--------------------------------------------------------------------
@ -729,6 +662,7 @@ t_process_unsubscribe(_) ->
t_quota_qos0(_) ->
esockd_limiter:start_link(),
add_bucket(),
Cnter = counters:new(1, []),
ok = meck:expect(emqx_broker, publish, fun(_) -> [{node(), <<"topic">>, {ok, 4}}] end),
ok = meck:expect(
@ -755,10 +689,12 @@ t_quota_qos0(_) ->
ok = meck:expect(emqx_metrics, inc, fun(_) -> ok end),
ok = meck:expect(emqx_metrics, inc, fun(_, _) -> ok end),
del_bucket(),
esockd_limiter:stop().
t_quota_qos1(_) ->
esockd_limiter:start_link(),
add_bucket(),
ok = meck:expect(emqx_broker, publish, fun(_) -> [{node(), <<"topic">>, {ok, 4}}] end),
Chann = channel(#{conn_state => connected, quota => quota()}),
Pub = ?PUBLISH_PACKET(?QOS_1, <<"topic">>, 1, <<"payload">>),
@ -769,10 +705,12 @@ t_quota_qos1(_) ->
{ok, ?PUBACK_PACKET(1, ?RC_SUCCESS), Chann4} = emqx_channel:handle_in(Pub, Chann3),
%% Quota in overall
{ok, ?PUBACK_PACKET(1, ?RC_QUOTA_EXCEEDED), _} = emqx_channel:handle_in(Pub, Chann4),
del_bucket(),
esockd_limiter:stop().
t_quota_qos2(_) ->
esockd_limiter:start_link(),
add_bucket(),
ok = meck:expect(emqx_broker, publish, fun(_) -> [{node(), <<"topic">>, {ok, 4}}] end),
Chann = channel(#{conn_state => connected, quota => quota()}),
Pub1 = ?PUBLISH_PACKET(?QOS_2, <<"topic">>, 1, <<"payload">>),
@ -786,6 +724,7 @@ t_quota_qos2(_) ->
{ok, ?PUBREC_PACKET(3, ?RC_SUCCESS), Chann4} = emqx_channel:handle_in(Pub3, Chann3),
%% Quota in overall
{ok, ?PUBREC_PACKET(4, ?RC_QUOTA_EXCEEDED), _} = emqx_channel:handle_in(Pub4, Chann4),
del_bucket(),
esockd_limiter:stop().
%%--------------------------------------------------------------------
@ -952,12 +891,6 @@ t_handle_call_takeover_end(_) ->
{shutdown, takenover, [], _, _Chan} =
emqx_channel:handle_call({takeover, 'end'}, channel()).
t_handle_call_quota(_) ->
{reply, ok, _Chan} = emqx_channel:handle_call(
{quota, default},
channel()
).
t_handle_call_unexpected(_) ->
{reply, ignored, _Chan} = emqx_channel:handle_call(unexpected_req, channel()).
@ -1176,7 +1109,7 @@ t_ws_cookie_init(_) ->
ConnInfo,
#{
zone => default,
limiter => limiter_cfg(),
limiter => undefined,
listener => {tcp, default}
}
),
@ -1210,7 +1143,7 @@ channel(InitFields) ->
ConnInfo,
#{
zone => default,
limiter => limiter_cfg(),
limiter => undefined,
listener => {tcp, default}
}
),
@ -1270,9 +1203,31 @@ session(InitFields) when is_map(InitFields) ->
%% conn: 5/s; overall: 10/s
quota() ->
emqx_limiter_container:get_limiter_by_names([message_routing], limiter_cfg()).
emqx_limiter_container:get_limiter_by_types(?MODULE, [message_routing], limiter_cfg()).
limiter_cfg() -> #{message_routing => default}.
limiter_cfg() ->
Client = #{
rate => 5,
initial => 0,
capacity => 5,
low_watermark => 1,
divisible => false,
max_retry_time => timer:seconds(5),
failure_strategy => force
},
#{
message_routing => bucket_cfg(),
client => #{message_routing => Client}
}.
bucket_cfg() ->
#{rate => 10, initial => 0, capacity => 10}.
add_bucket() ->
emqx_limiter_server:add_bucket(?MODULE, message_routing, bucket_cfg()).
del_bucket() ->
emqx_limiter_server:del_bucket(?MODULE, message_routing).
v4(Channel) ->
ConnInfo = emqx_channel:info(conninfo, Channel),

View File

@ -44,6 +44,7 @@
client_ssl_twoway/1,
ensure_mnesia_stopped/0,
ensure_quic_listener/2,
is_all_tcp_servers_available/1,
is_tcp_server_available/2,
is_tcp_server_available/3,
load_config/2,
@ -432,6 +433,18 @@ load_config(SchemaModule, Config, Opts) ->
load_config(SchemaModule, Config) ->
load_config(SchemaModule, Config, #{raw_with_default => false}).
-spec is_all_tcp_servers_available(Servers) -> Result when
Servers :: [{Host, Port}],
Host :: inet:socket_address() | inet:hostname(),
Port :: inet:port_number(),
Result :: boolean().
is_all_tcp_servers_available(Servers) ->
Fun =
fun({Host, Port}) ->
is_tcp_server_available(Host, Port)
end,
lists:all(Fun, Servers).
-spec is_tcp_server_available(
Host :: inet:socket_address() | inet:hostname(),
Port :: inet:port_number()
@ -582,6 +595,7 @@ setup_node(Node, Opts) when is_map(Opts) ->
EnvHandler = maps:get(env_handler, Opts, fun(_) -> ok end),
ConfigureGenRpc = maps:get(configure_gen_rpc, Opts, true),
LoadSchema = maps:get(load_schema, Opts, true),
SchemaMod = maps:get(schema_mod, Opts, emqx_schema),
LoadApps = maps:get(load_apps, Opts, [gen_rpc, emqx, ekka, mria] ++ Apps),
Env = maps:get(env, Opts, []),
Conf = maps:get(conf, Opts, []),
@ -617,7 +631,7 @@ setup_node(Node, Opts) when is_map(Opts) ->
%% Otherwise, configuration get's loaded and all preset env in envhandler is lost
LoadSchema andalso
begin
emqx_config:init_load(emqx_schema),
emqx_config:init_load(SchemaMod),
application:set_env(emqx, init_config_load_done, true)
end,

View File

@ -78,6 +78,7 @@ end_per_suite(_Config) ->
init_per_testcase(TestCase, Config) when
TestCase =/= t_ws_pingreq_before_connected
->
add_bucket(),
ok = meck:expect(emqx_transport, wait, fun(Sock) -> {ok, Sock} end),
ok = meck:expect(emqx_transport, type, fun(_Sock) -> tcp end),
ok = meck:expect(
@ -104,9 +105,11 @@ init_per_testcase(TestCase, Config) when
_ -> Config
end;
init_per_testcase(_, Config) ->
add_bucket(),
Config.
end_per_testcase(TestCase, Config) ->
del_bucket(),
case erlang:function_exported(?MODULE, TestCase, 2) of
true -> ?MODULE:TestCase('end', Config);
false -> ok
@ -291,11 +294,6 @@ t_handle_call(_) ->
?assertMatch({ok, _St}, handle_msg({event, undefined}, St)),
?assertMatch({reply, _Info, _NSt}, handle_call(self(), info, St)),
?assertMatch({reply, _Stats, _NSt}, handle_call(self(), stats, St)),
?assertMatch({reply, ok, _NSt}, handle_call(self(), {ratelimit, []}, St)),
?assertMatch(
{reply, ok, _NSt},
handle_call(self(), {ratelimit, [{bytes_in, default}]}, St)
),
?assertEqual({reply, ignored, St}, handle_call(self(), for_testing, St)),
?assertMatch(
{stop, {shutdown, kicked}, ok, _NSt},
@ -704,7 +702,34 @@ handle_msg(Msg, St) -> emqx_connection:handle_msg(Msg, St).
handle_call(Pid, Call, St) -> emqx_connection:handle_call(Pid, Call, St).
limiter_cfg() -> #{}.
-define(LIMITER_ID, 'tcp:default').
init_limiter() ->
emqx_limiter_container:get_limiter_by_names([bytes_in, message_in], limiter_cfg()).
emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes_in, message_in], limiter_cfg()).
limiter_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(),
Cfg = bucket_cfg(),
Client = #{
rate => Infinity,
initial => 0,
capacity => Infinity,
low_watermark => 1,
divisible => false,
max_retry_time => timer:seconds(5),
failure_strategy => force
},
#{bytes_in => Cfg, message_in => Cfg, client => #{bytes_in => Client, message_in => Client}}.
bucket_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(),
#{rate => Infinity, initial => 0, capacity => Infinity}.
add_bucket() ->
Cfg = bucket_cfg(),
emqx_limiter_server:add_bucket(?LIMITER_ID, bytes_in, Cfg),
emqx_limiter_server:add_bucket(?LIMITER_ID, message_in, Cfg).
del_bucket() ->
emqx_limiter_server:del_bucket(?LIMITER_ID, bytes_in),
emqx_limiter_server:del_bucket(?LIMITER_ID, message_in).

View File

@ -24,48 +24,7 @@
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
-define(BASE_CONF, <<
""
"\n"
"limiter {\n"
" bytes_in {\n"
" bucket.default {\n"
" rate = infinity\n"
" capacity = infinity\n"
" }\n"
" }\n"
"\n"
" message_in {\n"
" bucket.default {\n"
" rate = infinity\n"
" capacity = infinity\n"
" }\n"
" }\n"
"\n"
" connection {\n"
" bucket.default {\n"
" rate = infinity\n"
" capacity = infinity\n"
" }\n"
" }\n"
"\n"
" message_routing {\n"
" bucket.default {\n"
" rate = infinity\n"
" capacity = infinity\n"
" }\n"
" }\n"
"\n"
" batch {\n"
" bucket.retainer {\n"
" rate = infinity\n"
" capacity = infinity\n"
" }\n"
" }\n"
"}\n"
"\n"
""
>>).
-define(BASE_CONF, <<"">>).
-record(client, {
counter :: counters:counter_ref(),
@ -97,6 +56,9 @@ end_per_suite(_Config) ->
init_per_testcase(_TestCase, Config) ->
Config.
end_per_testcase(_TestCase, Config) ->
Config.
load_conf() ->
emqx_common_test_helpers:load_config(emqx_limiter_schema, ?BASE_CONF).
@ -116,12 +78,12 @@ t_consume(_) ->
failure_strategy := force
}
end,
Case = fun() ->
Client = connect(default),
Case = fun(BucketCfg) ->
Client = connect(BucketCfg),
{ok, L2} = emqx_htb_limiter:consume(50, Client),
{ok, _L3} = emqx_htb_limiter:consume(150, L2)
end,
with_per_client(default, Cfg, Case).
with_per_client(Cfg, Case).
t_retry(_) ->
Cfg = fun(Cfg) ->
@ -133,15 +95,15 @@ t_retry(_) ->
failure_strategy := force
}
end,
Case = fun() ->
Client = connect(default),
{ok, Client} = emqx_htb_limiter:retry(Client),
{_, _, Retry, L2} = emqx_htb_limiter:check(150, Client),
Case = fun(BucketCfg) ->
Client = connect(BucketCfg),
{ok, Client2} = emqx_htb_limiter:retry(Client),
{_, _, Retry, L2} = emqx_htb_limiter:check(150, Client2),
L3 = emqx_htb_limiter:set_retry(Retry, L2),
timer:sleep(500),
{ok, _L4} = emqx_htb_limiter:retry(L3)
end,
with_per_client(default, Cfg, Case).
with_per_client(Cfg, Case).
t_restore(_) ->
Cfg = fun(Cfg) ->
@ -153,15 +115,15 @@ t_restore(_) ->
failure_strategy := force
}
end,
Case = fun() ->
Client = connect(default),
Case = fun(BucketCfg) ->
Client = connect(BucketCfg),
{_, _, Retry, L2} = emqx_htb_limiter:check(150, Client),
timer:sleep(200),
{ok, L3} = emqx_htb_limiter:check(Retry, L2),
Avaiable = emqx_htb_limiter:available(L3),
?assert(Avaiable >= 50)
end,
with_per_client(default, Cfg, Case).
with_per_client(Cfg, Case).
t_max_retry_time(_) ->
Cfg = fun(Cfg) ->
@ -172,15 +134,15 @@ t_max_retry_time(_) ->
failure_strategy := drop
}
end,
Case = fun() ->
Client = connect(default),
Case = fun(BucketCfg) ->
Client = connect(BucketCfg),
Begin = ?NOW,
Result = emqx_htb_limiter:consume(101, Client),
?assertMatch({drop, _}, Result),
Time = ?NOW - Begin,
?assert(Time >= 500 andalso Time < 550)
end,
with_per_client(default, Cfg, Case).
with_per_client(Cfg, Case).
t_divisible(_) ->
Cfg = fun(Cfg) ->
@ -191,8 +153,8 @@ t_divisible(_) ->
capacity := 600
}
end,
Case = fun() ->
Client = connect(default),
Case = fun(BucketCfg) ->
Client = connect(BucketCfg),
Result = emqx_htb_limiter:check(1000, Client),
?assertMatch(
{partial, 400,
@ -206,7 +168,7 @@ t_divisible(_) ->
Result
)
end,
with_per_client(default, Cfg, Case).
with_per_client(Cfg, Case).
t_low_watermark(_) ->
Cfg = fun(Cfg) ->
@ -217,8 +179,8 @@ t_low_watermark(_) ->
capacity := 1000
}
end,
Case = fun() ->
Client = connect(default),
Case = fun(BucketCfg) ->
Client = connect(BucketCfg),
Result = emqx_htb_limiter:check(500, Client),
?assertMatch({ok, _}, Result),
{_, Client2} = Result,
@ -233,28 +195,21 @@ t_low_watermark(_) ->
Result2
)
end,
with_per_client(default, Cfg, Case).
with_per_client(Cfg, Case).
t_infinity_client(_) ->
Fun = fun(#{per_client := Cli} = Bucket) ->
Bucket2 = Bucket#{
rate := infinity,
capacity := infinity
},
Cli2 = Cli#{rate := infinity, capacity := infinity},
Bucket2#{per_client := Cli2}
end,
Case = fun() ->
Client = connect(default),
Fun = fun(Cfg) -> Cfg end,
Case = fun(Cfg) ->
Client = connect(Cfg),
InfVal = emqx_limiter_schema:infinity_value(),
?assertMatch(#{bucket := #{rate := InfVal}}, Client),
Result = emqx_htb_limiter:check(100000, Client),
?assertEqual({ok, Client}, Result)
end,
with_bucket(default, Fun, Case).
with_per_client(Fun, Case).
t_try_restore_agg(_) ->
Fun = fun(#{per_client := Cli} = Bucket) ->
Fun = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{
rate := 1,
capacity := 200,
@ -267,20 +222,20 @@ t_try_restore_agg(_) ->
max_retry_time := 100,
failure_strategy := force
},
Bucket2#{per_client := Cli2}
Bucket2#{client := Cli2}
end,
Case = fun() ->
Client = connect(default),
Case = fun(Cfg) ->
Client = connect(Cfg),
{_, _, Retry, L2} = emqx_htb_limiter:check(150, Client),
timer:sleep(200),
{ok, L3} = emqx_htb_limiter:check(Retry, L2),
Avaiable = emqx_htb_limiter:available(L3),
?assert(Avaiable >= 50)
end,
with_bucket(default, Fun, Case).
with_bucket(Fun, Case).
t_short_board(_) ->
Fun = fun(#{per_client := Cli} = Bucket) ->
Fun = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{
rate := ?RATE("100/1s"),
initial := 0,
@ -291,18 +246,18 @@ t_short_board(_) ->
capacity := 600,
initial := 600
},
Bucket2#{per_client := Cli2}
Bucket2#{client := Cli2}
end,
Case = fun() ->
Case = fun(Cfg) ->
Counter = counters:new(1, []),
start_client(default, ?NOW + 2000, Counter, 20),
start_client(Cfg, ?NOW + 2000, Counter, 20),
timer:sleep(2100),
check_average_rate(Counter, 2, 100)
end,
with_bucket(default, Fun, Case).
with_bucket(Fun, Case).
t_rate(_) ->
Fun = fun(#{per_client := Cli} = Bucket) ->
Fun = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{
rate := ?RATE("100/100ms"),
initial := 0,
@ -313,10 +268,10 @@ t_rate(_) ->
capacity := infinity,
initial := 0
},
Bucket2#{per_client := Cli2}
Bucket2#{client := Cli2}
end,
Case = fun() ->
Client = connect(default),
Case = fun(Cfg) ->
Client = connect(Cfg),
Ts1 = erlang:system_time(millisecond),
C1 = emqx_htb_limiter:available(Client),
timer:sleep(1000),
@ -326,11 +281,11 @@ t_rate(_) ->
Inc = C2 - C1,
?assert(in_range(Inc, ShouldInc - 100, ShouldInc + 100), "test bucket rate")
end,
with_bucket(default, Fun, Case).
with_bucket(Fun, Case).
t_capacity(_) ->
Capacity = 600,
Fun = fun(#{per_client := Cli} = Bucket) ->
Fun = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{
rate := ?RATE("100/100ms"),
initial := 0,
@ -341,25 +296,25 @@ t_capacity(_) ->
capacity := infinity,
initial := 0
},
Bucket2#{per_client := Cli2}
Bucket2#{client := Cli2}
end,
Case = fun() ->
Client = connect(default),
Case = fun(Cfg) ->
Client = connect(Cfg),
timer:sleep(1000),
C1 = emqx_htb_limiter:available(Client),
?assertEqual(Capacity, C1, "test bucket capacity")
end,
with_bucket(default, Fun, Case).
with_bucket(Fun, Case).
%%--------------------------------------------------------------------
%% Test Cases Global Level
%%--------------------------------------------------------------------
t_collaborative_alloc(_) ->
GlobalMod = fun(Cfg) ->
Cfg#{rate := ?RATE("600/1s")}
GlobalMod = fun(#{message_routing := MR} = Cfg) ->
Cfg#{message_routing := MR#{rate := ?RATE("600/1s")}}
end,
Bucket1 = fun(#{per_client := Cli} = Bucket) ->
Bucket1 = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{
rate := ?RATE("400/1s"),
initial := 0,
@ -370,7 +325,7 @@ t_collaborative_alloc(_) ->
capacity := 100,
initial := 100
},
Bucket2#{per_client := Cli2}
Bucket2#{client := Cli2}
end,
Bucket2 = fun(Bucket) ->
@ -381,8 +336,8 @@ t_collaborative_alloc(_) ->
Case = fun() ->
C1 = counters:new(1, []),
C2 = counters:new(1, []),
start_client(b1, ?NOW + 2000, C1, 20),
start_client(b2, ?NOW + 2000, C2, 30),
start_client({b1, Bucket1}, ?NOW + 2000, C1, 20),
start_client({b2, Bucket2}, ?NOW + 2000, C2, 30),
timer:sleep(2100),
check_average_rate(C1, 2, 300),
check_average_rate(C2, 2, 300)
@ -395,14 +350,16 @@ t_collaborative_alloc(_) ->
).
t_burst(_) ->
GlobalMod = fun(Cfg) ->
GlobalMod = fun(#{message_routing := MR} = Cfg) ->
Cfg#{
message_routing := MR#{
rate := ?RATE("200/1s"),
burst := ?RATE("400/1s")
}
}
end,
Bucket = fun(#{per_client := Cli} = Bucket) ->
Bucket = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{
rate := ?RATE("200/1s"),
initial := 0,
@ -413,16 +370,16 @@ t_burst(_) ->
capacity := 200,
divisible := true
},
Bucket2#{per_client := Cli2}
Bucket2#{client := Cli2}
end,
Case = fun() ->
C1 = counters:new(1, []),
C2 = counters:new(1, []),
C3 = counters:new(1, []),
start_client(b1, ?NOW + 2000, C1, 20),
start_client(b2, ?NOW + 2000, C2, 30),
start_client(b3, ?NOW + 2000, C3, 30),
start_client({b1, Bucket}, ?NOW + 2000, C1, 20),
start_client({b2, Bucket}, ?NOW + 2000, C2, 30),
start_client({b3, Bucket}, ?NOW + 2000, C3, 30),
timer:sleep(2100),
Total = lists:sum([counters:get(X, 1) || X <- [C1, C2, C3]]),
@ -436,11 +393,11 @@ t_burst(_) ->
).
t_limit_global_with_unlimit_other(_) ->
GlobalMod = fun(Cfg) ->
Cfg#{rate := ?RATE("600/1s")}
GlobalMod = fun(#{message_routing := MR} = Cfg) ->
Cfg#{message_routing := MR#{rate := ?RATE("600/1s")}}
end,
Bucket = fun(#{per_client := Cli} = Bucket) ->
Bucket = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{
rate := infinity,
initial := 0,
@ -451,12 +408,12 @@ t_limit_global_with_unlimit_other(_) ->
capacity := infinity,
initial := 0
},
Bucket2#{per_client := Cli2}
Bucket2#{client := Cli2}
end,
Case = fun() ->
C1 = counters:new(1, []),
start_client(b1, ?NOW + 2000, C1, 20),
start_client({b1, Bucket}, ?NOW + 2000, C1, 20),
timer:sleep(2100),
check_average_rate(C1, 2, 600)
end,
@ -470,28 +427,6 @@ t_limit_global_with_unlimit_other(_) ->
%%--------------------------------------------------------------------
%% Test Cases container
%%--------------------------------------------------------------------
t_new_container(_) ->
C1 = emqx_limiter_container:new(),
C2 = emqx_limiter_container:new([message_routing]),
C3 = emqx_limiter_container:update_by_name(message_routing, default, C1),
?assertMatch(
#{
message_routing := _,
retry_ctx := undefined,
{retry, message_routing} := _
},
C2
),
?assertMatch(
#{
message_routing := _,
retry_ctx := undefined,
{retry, message_routing} := _
},
C3
),
ok.
t_check_container(_) ->
Cfg = fun(Cfg) ->
Cfg#{
@ -500,10 +435,11 @@ t_check_container(_) ->
capacity := 1000
}
end,
Case = fun() ->
C1 = emqx_limiter_container:new(
Case = fun(#{client := Client} = BucketCfg) ->
C1 = emqx_limiter_container:get_limiter_by_types(
?MODULE,
[message_routing],
#{message_routing => default}
#{message_routing => BucketCfg, client => #{message_routing => Client}}
),
{ok, C2} = emqx_limiter_container:check(1000, message_routing, C1),
{pause, Pause, C3} = emqx_limiter_container:check(1000, message_routing, C2),
@ -514,7 +450,39 @@ t_check_container(_) ->
RetryData = emqx_limiter_container:get_retry_context(C5),
?assertEqual(Context, RetryData)
end,
with_per_client(default, Cfg, Case).
with_per_client(Cfg, Case).
%%--------------------------------------------------------------------
%% Test Override
%%--------------------------------------------------------------------
t_bucket_no_client(_) ->
Rate = ?RATE("1/s"),
GlobalMod = fun(#{client := #{message_routing := MR} = Client} = Cfg) ->
Cfg#{client := Client#{message_routing := MR#{rate := Rate}}}
end,
BucketMod = fun(Bucket) ->
maps:remove(client, Bucket)
end,
Case = fun() ->
Limiter = connect(BucketMod(make_limiter_cfg())),
?assertMatch(#{rate := Rate}, Limiter)
end,
with_global(GlobalMod, [BucketMod], Case).
t_bucket_client(_) ->
GlobalRate = ?RATE("1/s"),
BucketRate = ?RATE("10/s"),
GlobalMod = fun(#{client := #{message_routing := MR} = Client} = Cfg) ->
Cfg#{client := Client#{message_routing := MR#{rate := GlobalRate}}}
end,
BucketMod = fun(#{client := Client} = Bucket) ->
Bucket#{client := Client#{rate := BucketRate}}
end,
Case = fun() ->
Limiter = connect(BucketMod(make_limiter_cfg())),
?assertMatch(#{rate := BucketRate}, Limiter)
end,
with_global(GlobalMod, [BucketMod], Case).
%%--------------------------------------------------------------------
%% Test Cases misc
@ -607,19 +575,23 @@ t_schema_unit(_) ->
%%--------------------------------------------------------------------
%%% Internal functions
%%--------------------------------------------------------------------
start_client(Name, EndTime, Counter, Number) ->
start_client(Cfg, EndTime, Counter, Number) ->
lists:foreach(
fun(_) ->
spawn(fun() ->
start_client(Name, EndTime, Counter)
do_start_client(Cfg, EndTime, Counter)
end)
end,
lists:seq(1, Number)
).
start_client(Name, EndTime, Counter) ->
#{per_client := PerClient} =
emqx_config:get([limiter, message_routing, bucket, Name]),
do_start_client({Name, CfgFun}, EndTime, Counter) ->
do_start_client(Name, CfgFun(make_limiter_cfg()), EndTime, Counter);
do_start_client(Cfg, EndTime, Counter) ->
do_start_client(?MODULE, Cfg, EndTime, Counter).
do_start_client(Name, Cfg, EndTime, Counter) ->
#{client := PerClient} = Cfg,
#{rate := Rate} = PerClient,
Client = #client{
start = ?NOW,
@ -627,7 +599,7 @@ start_client(Name, EndTime, Counter) ->
counter = Counter,
obtained = 0,
rate = Rate,
client = connect(Name)
client = connect(Name, Cfg)
},
client_loop(Client).
@ -711,35 +683,50 @@ to_rate(Str) ->
{ok, Rate} = emqx_limiter_schema:to_rate(Str),
Rate.
with_global(Modifier, BuckeTemps, Case) ->
Fun = fun(Cfg) ->
#{bucket := #{default := BucketCfg}} = Cfg2 = Modifier(Cfg),
Fun = fun({Name, BMod}, Acc) ->
Acc#{Name => BMod(BucketCfg)}
end,
Buckets = lists:foldl(Fun, #{}, BuckeTemps),
Cfg2#{bucket := Buckets}
end,
with_global(Modifier, Buckets, Case) ->
with_config([limiter], Modifier, Buckets, Case).
with_config([limiter, message_routing], Fun, Case).
with_bucket(Modifier, Case) ->
Cfg = Modifier(make_limiter_cfg()),
add_bucket(Cfg),
Case(Cfg),
del_bucket().
with_bucket(Bucket, Modifier, Case) ->
Path = [limiter, message_routing, bucket, Bucket],
with_config(Path, Modifier, Case).
with_per_client(Modifier, Case) ->
#{client := Client} = Cfg = make_limiter_cfg(),
Cfg2 = Cfg#{client := Modifier(Client)},
add_bucket(Cfg2),
Case(Cfg2),
del_bucket().
with_per_client(Bucket, Modifier, Case) ->
Path = [limiter, message_routing, bucket, Bucket, per_client],
with_config(Path, Modifier, Case).
with_config(Path, Modifier, Case) ->
with_config(Path, Modifier, Buckets, Case) ->
Cfg = emqx_config:get(Path),
NewCfg = Modifier(Cfg),
ct:pal("test with config:~p~n", [NewCfg]),
emqx_config:put(Path, NewCfg),
emqx_limiter_server:restart(message_routing),
timer:sleep(500),
BucketCfg = make_limiter_cfg(),
lists:foreach(
fun
({Name, BucketFun}) ->
add_bucket(Name, BucketFun(BucketCfg));
(BucketFun) ->
add_bucket(BucketFun(BucketCfg))
end,
Buckets
),
DelayReturn = delay_return(Case),
lists:foreach(
fun
({Name, _Cfg}) ->
del_bucket(Name);
(_Cfg) ->
del_bucket()
end,
Buckets
),
emqx_config:put(Path, Cfg),
emqx_limiter_server:restart(message_routing),
DelayReturn().
delay_return(Case) ->
@ -751,10 +738,40 @@ delay_return(Case) ->
fun() -> erlang:raise(Type, Reason, Trace) end
end.
connect(Name) ->
{ok, Limiter} = emqx_limiter_server:connect(message_routing, Name),
connect({Name, CfgFun}) ->
connect(Name, CfgFun(make_limiter_cfg()));
connect(Cfg) ->
connect(?MODULE, Cfg).
connect(Name, Cfg) ->
{ok, Limiter} = emqx_limiter_server:connect(Name, message_routing, Cfg),
Limiter.
make_limiter_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(),
Client = #{
rate => Infinity,
initial => 0,
capacity => Infinity,
low_watermark => 0,
divisible => false,
max_retry_time => timer:seconds(5),
failure_strategy => force
},
#{client => Client, rate => Infinity, initial => 0, capacity => Infinity}.
add_bucket(Cfg) ->
add_bucket(?MODULE, Cfg).
add_bucket(Name, Cfg) ->
emqx_limiter_server:add_bucket(Name, message_routing, Cfg).
del_bucket() ->
del_bucket(?MODULE).
del_bucket(Name) ->
emqx_limiter_server:del_bucket(Name, message_routing).
check_average_rate(Counter, Second, Rate) ->
Cost = counters:get(Counter, 1),
PerSec = Cost / Second,

View File

@ -59,6 +59,7 @@ init_per_testcase(TestCase, Config) when
TestCase =/= t_ws_pingreq_before_connected,
TestCase =/= t_ws_non_check_origin
->
add_bucket(),
%% Meck Cm
ok = meck:new(emqx_cm, [passthrough, no_history, no_link]),
ok = meck:expect(emqx_cm, mark_channel_connected, fun(_) -> ok end),
@ -96,6 +97,7 @@ init_per_testcase(TestCase, Config) when
| Config
];
init_per_testcase(t_ws_non_check_origin, Config) ->
add_bucket(),
ok = emqx_common_test_helpers:start_apps([]),
PrevConfig = emqx_config:get_listener_conf(ws, default, [websocket]),
emqx_config:put_listener_conf(ws, default, [websocket, check_origin_enable], false),
@ -105,6 +107,7 @@ init_per_testcase(t_ws_non_check_origin, Config) ->
| Config
];
init_per_testcase(_, Config) ->
add_bucket(),
PrevConfig = emqx_config:get_listener_conf(ws, default, [websocket]),
ok = emqx_common_test_helpers:start_apps([]),
[
@ -119,6 +122,7 @@ end_per_testcase(TestCase, _Config) when
TestCase =/= t_ws_non_check_origin,
TestCase =/= t_ws_pingreq_before_connected
->
del_bucket(),
lists:foreach(
fun meck:unload/1,
[
@ -131,11 +135,13 @@ end_per_testcase(TestCase, _Config) when
]
);
end_per_testcase(t_ws_non_check_origin, Config) ->
del_bucket(),
PrevConfig = ?config(prev_config, Config),
emqx_config:put_listener_conf(ws, default, [websocket], PrevConfig),
emqx_common_test_helpers:stop_apps([]),
ok;
end_per_testcase(_, Config) ->
del_bucket(),
PrevConfig = ?config(prev_config, Config),
emqx_config:put_listener_conf(ws, default, [websocket], PrevConfig),
emqx_common_test_helpers:stop_apps([]),
@ -501,15 +507,12 @@ t_handle_timeout_emit_stats(_) ->
?assertEqual(undefined, ?ws_conn:info(stats_timer, St)).
t_ensure_rate_limit(_) ->
%% XXX In the future, limiter should provide API for config update
Path = [limiter, bytes_in, bucket, default, per_client],
PerClient = emqx_config:get(Path),
{ok, Rate} = emqx_limiter_schema:to_rate("50MB"),
emqx_config:put(Path, PerClient#{rate := Rate}),
emqx_limiter_server:restart(bytes_in),
timer:sleep(100),
Limiter = init_limiter(),
Limiter = init_limiter(#{
bytes_in => bucket_cfg(),
message_in => bucket_cfg(),
client => #{bytes_in => client_cfg(Rate)}
}),
St = st(#{limiter => Limiter}),
%% must bigger than value in emqx_ratelimit_SUITE
@ -522,11 +525,7 @@ t_ensure_rate_limit(_) ->
St
),
?assertEqual(blocked, ?ws_conn:info(sockstate, St1)),
?assertEqual([{active, false}], ?ws_conn:info(postponed, St1)),
emqx_config:put(Path, PerClient),
emqx_limiter_server:restart(bytes_in),
timer:sleep(100).
?assertEqual([{active, false}], ?ws_conn:info(postponed, St1)).
t_parse_incoming(_) ->
{Packets, St} = ?ws_conn:parse_incoming(<<48, 3>>, [], st()),
@ -691,7 +690,44 @@ ws_client(State) ->
ct:fail(ws_timeout)
end.
limiter_cfg() -> #{bytes_in => default, message_in => default}.
-define(LIMITER_ID, 'ws:default').
init_limiter() ->
emqx_limiter_container:get_limiter_by_names([bytes_in, message_in], limiter_cfg()).
init_limiter(limiter_cfg()).
init_limiter(LimiterCfg) ->
emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes_in, message_in], LimiterCfg).
limiter_cfg() ->
Cfg = bucket_cfg(),
Client = client_cfg(),
#{bytes_in => Cfg, message_in => Cfg, client => #{bytes_in => Client, message_in => Client}}.
client_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(),
client_cfg(Infinity).
client_cfg(Rate) ->
Infinity = emqx_limiter_schema:infinity_value(),
#{
rate => Rate,
initial => 0,
capacity => Infinity,
low_watermark => 1,
divisible => false,
max_retry_time => timer:seconds(5),
failure_strategy => force
}.
bucket_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(),
#{rate => Infinity, initial => 0, capacity => Infinity}.
add_bucket() ->
Cfg = bucket_cfg(),
emqx_limiter_server:add_bucket(?LIMITER_ID, bytes_in, Cfg),
emqx_limiter_server:add_bucket(?LIMITER_ID, message_in, Cfg).
del_bucket() ->
emqx_limiter_server:del_bucket(?LIMITER_ID, bytes_in),
emqx_limiter_server:del_bucket(?LIMITER_ID, message_in).

View File

@ -36,6 +36,6 @@
-type authenticator_id() :: binary().
-endif.
-define(RESOURCE_GROUP, <<"emqx_authn">>).
-endif.

View File

@ -33,14 +33,8 @@
% Swagger
-define(API_TAGS_GLOBAL, [
?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_BINARY,
<<"authentication config(global)">>
]).
-define(API_TAGS_SINGLE, [
?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_BINARY,
<<"authentication config(single listener)">>
]).
-define(API_TAGS_GLOBAL, [<<"Authentication">>]).
-define(API_TAGS_SINGLE, [<<"Listener authentication">>]).
-export([
api_spec/0,

View File

@ -29,15 +29,8 @@
-define(NOT_FOUND, 'NOT_FOUND').
% Swagger
-define(API_TAGS_GLOBAL, [
?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_BINARY,
<<"authentication config(global)">>
]).
-define(API_TAGS_SINGLE, [
?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_BINARY,
<<"authentication config(single listener)">>
]).
-define(API_TAGS_GLOBAL, [<<"Authentication">>]).
-define(API_TAGS_SINGLE, [<<"Listener authentication">>]).
-export([
api_spec/0,
@ -66,15 +59,7 @@ schema("/authentication/:id/import_users") ->
tags => ?API_TAGS_GLOBAL,
description => ?DESC(authentication_id_import_users_post),
parameters => [emqx_authn_api:param_auth_id()],
'requestBody' => #{
content => #{
'multipart/form-data' => #{
schema => #{
filename => file
}
}
}
},
'requestBody' => emqx_dashboard_swagger:file_schema(filename),
responses => #{
204 => <<"Users imported">>,
400 => error_codes([?BAD_REQUEST], <<"Bad Request">>),
@ -89,15 +74,7 @@ schema("/listeners/:listener_id/authentication/:id/import_users") ->
tags => ?API_TAGS_SINGLE,
description => ?DESC(listeners_listener_id_authentication_id_import_users_post),
parameters => [emqx_authn_api:param_listener_id(), emqx_authn_api:param_auth_id()],
'requestBody' => #{
content => #{
'multipart/form-data' => #{
schema => #{
filename => file
}
}
}
},
'requestBody' => emqx_dashboard_swagger:file_schema(filename),
responses => #{
204 => <<"Users imported">>,
400 => error_codes([?BAD_REQUEST], <<"Bad Request">>),

View File

@ -33,7 +33,8 @@
bin/1,
ensure_apps_started/1,
cleanup_resources/0,
make_resource_id/1
make_resource_id/1,
without_password/1
]).
-define(AUTHN_PLACEHOLDERS, [
@ -117,21 +118,21 @@ parse_sql(Template, ReplaceWith) ->
render_deep(Template, Credential) ->
emqx_placeholder:proc_tmpl_deep(
Template,
Credential,
mapping_credential(Credential),
#{return => full_binary, var_trans => fun handle_var/2}
).
render_str(Template, Credential) ->
emqx_placeholder:proc_tmpl(
Template,
Credential,
mapping_credential(Credential),
#{return => full_binary, var_trans => fun handle_var/2}
).
render_sql_params(ParamList, Credential) ->
emqx_placeholder:proc_tmpl(
ParamList,
Credential,
mapping_credential(Credential),
#{return => rawlist, var_trans => fun handle_sql_var/2}
).
@ -199,10 +200,23 @@ make_resource_id(Name) ->
NameBin = bin(Name),
emqx_resource:generate_id(NameBin).
without_password(Credential) ->
without_password(Credential, [password, <<"password">>]).
%%--------------------------------------------------------------------
%% Internal functions
%%--------------------------------------------------------------------
without_password(Credential, []) ->
Credential;
without_password(Credential, [Name | Rest]) ->
case maps:is_key(Name, Credential) of
true ->
without_password(Credential#{Name => <<"[password]">>}, Rest);
false ->
without_password(Credential, Rest)
end.
handle_var({var, Name}, undefined) ->
error({cannot_get_variable, Name});
handle_var({var, <<"peerhost">>}, PeerHost) ->
@ -216,3 +230,8 @@ handle_sql_var({var, <<"peerhost">>}, PeerHost) ->
emqx_placeholder:bin(inet:ntoa(PeerHost));
handle_sql_var(_, Value) ->
emqx_placeholder:sql_data(Value).
mapping_credential(C = #{cn := CN, dn := DN}) ->
C#{cert_common_name => CN, cert_subject => DN};
mapping_credential(C) ->
C.

View File

@ -331,7 +331,10 @@ check_client_first_message(Bin, _Cache, #{iteration_count := IterationCount} = S
{continue, ServerFirstMessage, Cache};
ignore ->
ignore;
{error, _Reason} ->
{error, Reason} ->
?TRACE_AUTHN_PROVIDER("check_client_first_message_error", #{
reason => Reason
}),
{error, not_authorized}
end.
@ -344,7 +347,10 @@ check_client_final_message(Bin, #{is_superuser := IsSuperuser} = Cache, #{algori
of
{ok, ServerFinalMessage} ->
{ok, #{is_superuser => IsSuperuser}, ServerFinalMessage};
{error, _Reason} ->
{error, Reason} ->
?TRACE_AUTHN_PROVIDER("check_client_final_message_error", #{
reason => Reason
}),
{error, not_authorized}
end.

View File

@ -188,23 +188,22 @@ authenticate(
} = State
) ->
Request = generate_request(Credential, State),
case emqx_resource:query(ResourceId, {Method, Request, RequestTimeout}) of
Response = emqx_resource:query(ResourceId, {Method, Request, RequestTimeout}),
?TRACE_AUTHN_PROVIDER("http_response", #{
request => request_for_log(Credential, State),
response => response_for_log(Response),
resource => ResourceId
}),
case Response of
{ok, 204, _Headers} ->
{ok, #{is_superuser => false}};
{ok, 200, Headers, Body} ->
handle_response(Headers, Body);
{ok, _StatusCode, _Headers} = Response ->
log_response(ResourceId, Response),
ignore;
{ok, _StatusCode, _Headers, _Body} = Response ->
log_response(ResourceId, Response),
ignore;
{error, Reason} ->
?SLOG(error, #{
msg => "http_server_query_failed",
resource => ResourceId,
reason => Reason
}),
{error, _Reason} ->
ignore
end.
@ -296,7 +295,8 @@ parse_config(
cow_qs:parse_qs(to_bin(Query))
),
body_template => emqx_authn_utils:parse_deep(maps:get(body, Config, #{})),
request_timeout => RequestTimeout
request_timeout => RequestTimeout,
url => RawUrl
},
{Config#{base_url => BaseUrl, pool_type => random}, State}.
@ -379,11 +379,6 @@ parse_body(<<"application/x-www-form-urlencoded", _/binary>>, Body) ->
parse_body(ContentType, _) ->
{error, {unsupported_content_type, ContentType}}.
may_append_body(Output, {ok, _, _, Body}) ->
Output#{body => Body};
may_append_body(Output, {ok, _, _}) ->
Output.
uri_encode(T) ->
emqx_http_lib:uri_encode(to_list(T)).
@ -391,26 +386,33 @@ encode_path(Path) ->
Parts = string:split(Path, "/", all),
lists:flatten(["/" ++ Part || Part <- lists:map(fun uri_encode/1, Parts)]).
log_response(ResourceId, Other) ->
Output = may_append_body(#{resource => ResourceId}, Other),
case erlang:element(2, Other) of
Code5xx when Code5xx >= 500 andalso Code5xx < 600 ->
?SLOG(error, Output#{
msg => "http_server_error",
code => Code5xx
});
Code4xx when Code4xx >= 400 andalso Code4xx < 500 ->
?SLOG(warning, Output#{
msg => "refused_by_http_server",
code => Code4xx
});
OtherCode ->
?SLOG(error, Output#{
msg => "undesired_response_code",
code => OtherCode
})
request_for_log(Credential, #{url := Url} = State) ->
SafeCredential = emqx_authn_utils:without_password(Credential),
case generate_request(SafeCredential, State) of
{PathQuery, Headers} ->
#{
method => post,
base_url => Url,
path_query => PathQuery,
headers => Headers
};
{PathQuery, Headers, Body} ->
#{
method => post,
base_url => Url,
path_query => PathQuery,
headers => Headers,
mody => Body
}
end.
response_for_log({ok, StatusCode, Headers}) ->
#{status => StatusCode, headers => Headers};
response_for_log({ok, StatusCode, Headers, Body}) ->
#{status => StatusCode, headers => Headers, body => Body};
response_for_log({error, Error}) ->
#{error => Error}.
to_list(A) when is_atom(A) ->
atom_to_list(A);
to_list(B) when is_binary(B) ->

View File

@ -75,26 +75,11 @@ fields('jwks') ->
{pool_size, fun emqx_connector_schema_lib:pool_size/1},
{refresh_interval, fun refresh_interval/1},
{ssl, #{
type => hoconsc:union([
hoconsc:ref(?MODULE, ssl_enable),
hoconsc:ref(?MODULE, ssl_disable)
]),
desc => ?DESC(ssl),
type => hoconsc:ref(emqx_schema, "ssl_client_opts"),
default => #{<<"enable">> => false},
required => false
desc => ?DESC("ssl")
}}
] ++ common_fields();
fields(ssl_enable) ->
[
{enable, #{type => true, desc => ?DESC(enable)}},
{cacertfile, fun cacertfile/1},
{certfile, fun certfile/1},
{keyfile, fun keyfile/1},
{verify, fun verify/1},
{server_name_indication, fun server_name_indication/1}
];
fields(ssl_disable) ->
[{enable, #{type => false, desc => ?DESC(enable)}}].
] ++ common_fields().
desc('hmac-based') ->
?DESC('hmac-based');
@ -147,27 +132,6 @@ refresh_interval(default) -> 300;
refresh_interval(validator) -> [fun(I) -> I > 0 end];
refresh_interval(_) -> undefined.
cacertfile(type) -> string();
cacertfile(desc) -> ?DESC(?FUNCTION_NAME);
cacertfile(_) -> undefined.
certfile(type) -> string();
certfile(desc) -> ?DESC(?FUNCTION_NAME);
certfile(_) -> undefined.
keyfile(type) -> string();
keyfile(desc) -> ?DESC(?FUNCTION_NAME);
keyfile(_) -> undefined.
verify(type) -> hoconsc:enum([verify_peer, verify_none]);
verify(desc) -> ?DESC(?FUNCTION_NAME);
verify(default) -> verify_none;
verify(_) -> undefined.
server_name_indication(type) -> string();
server_name_indication(desc) -> ?DESC(?FUNCTION_NAME);
server_name_indication(_) -> undefined.
verify_claims(type) ->
list();
verify_claims(desc) ->
@ -263,8 +227,7 @@ authenticate(
) ->
case emqx_resource:query(ResourceId, get_jwks) of
{error, Reason} ->
?SLOG(error, #{
msg => "get_jwks_failed",
?TRACE_AUTHN_PROVIDER(error, "get_jwks_failed", #{
resource => ResourceId,
reason => Reason
}),
@ -386,10 +349,17 @@ verify(undefined, _, _, _) ->
ignore;
verify(JWT, JWKs, VerifyClaims, AclClaimName) ->
case do_verify(JWT, JWKs, VerifyClaims) of
{ok, Extra} -> {ok, acl(Extra, AclClaimName)};
{error, {missing_claim, _}} -> {error, bad_username_or_password};
{error, invalid_signature} -> ignore;
{error, {claims, _}} -> {error, bad_username_or_password}
{ok, Extra} ->
{ok, acl(Extra, AclClaimName)};
{error, {missing_claim, Claim}} ->
?TRACE_AUTHN_PROVIDER("missing_jwt_claim", #{jwt => JWT, claim => Claim}),
{error, bad_username_or_password};
{error, invalid_signature} ->
?TRACE_AUTHN_PROVIDER("invalid_jwt_signature", #{jwks => JWKs, jwt => JWT}),
ignore;
{error, {claims, Claims}} ->
?TRACE_AUTHN_PROVIDER("invalid_jwt_claims", #{jwt => JWT, claims => Claims}),
{error, bad_username_or_password}
end.
acl(Claims, AclClaimName) ->
@ -407,11 +377,11 @@ acl(Claims, AclClaimName) ->
end,
maps:merge(emqx_authn_utils:is_superuser(Claims), Acl).
do_verify(_JWS, [], _VerifyClaims) ->
do_verify(_JWT, [], _VerifyClaims) ->
{error, invalid_signature};
do_verify(JWS, [JWK | More], VerifyClaims) ->
try jose_jws:verify(JWK, JWS) of
{true, Payload, _JWS} ->
do_verify(JWT, [JWK | More], VerifyClaims) ->
try jose_jws:verify(JWK, JWT) of
{true, Payload, _JWT} ->
Claims0 = emqx_json:decode(Payload, [return_maps]),
Claims = try_convert_to_int(Claims0, [<<"exp">>, <<"iat">>, <<"nbf">>]),
case verify_claims(Claims, VerifyClaims) of
@ -421,11 +391,11 @@ do_verify(JWS, [JWK | More], VerifyClaims) ->
{error, Reason}
end;
{false, _, _} ->
do_verify(JWS, More, VerifyClaims)
do_verify(JWT, More, VerifyClaims)
catch
_:_Reason ->
?TRACE("JWT", "authn_jwt_invalid_signature", #{jwk => JWK, jws => JWS}),
{error, invalid_signature}
_:Reason ->
?TRACE_AUTHN_PROVIDER("jwt_verify_error", #{jwk => JWK, jwt => JWT, reason => Reason}),
do_verify(JWT, More, VerifyClaims)
end.
verify_claims(Claims, VerifyClaims0) ->

View File

@ -17,6 +17,7 @@
-module(emqx_authn_mnesia).
-include("emqx_authn.hrl").
-include_lib("emqx/include/logger.hrl").
-include_lib("stdlib/include/ms_transform.hrl").
-include_lib("hocon/include/hoconsc.hrl").
@ -158,6 +159,7 @@ authenticate(
UserID = get_user_identity(Credential, Type),
case mnesia:dirty_read(?TAB, {UserGroup, UserID}) of
[] ->
?TRACE_AUTHN_PROVIDER("user_not_found"),
ignore;
[#user_info{password_hash = PasswordHash, salt = Salt, is_superuser = IsSuperuser}] ->
case
@ -165,8 +167,10 @@ authenticate(
Algorithm, Salt, PasswordHash, Password
)
of
true -> {ok, #{is_superuser => IsSuperuser}};
false -> {error, bad_username_or_password}
true ->
{ok, #{is_superuser => IsSuperuser}};
false ->
{error, bad_username_or_password}
end
end.

View File

@ -167,8 +167,7 @@ authenticate(
undefined ->
ignore;
{error, Reason} ->
?SLOG(error, #{
msg => "mongodb_query_failed",
?TRACE_AUTHN_PROVIDER(error, "mongodb_query_failed", #{
resource => ResourceId,
collection => Collection,
filter => Filter,
@ -180,11 +179,11 @@ authenticate(
ok ->
{ok, is_superuser(Doc, State)};
{error, {cannot_find_password_hash_field, PasswordHashField}} ->
?SLOG(error, #{
msg => "cannot_find_password_hash_field",
?TRACE_AUTHN_PROVIDER(error, "cannot_find_password_hash_field", #{
resource => ResourceId,
collection => Collection,
filter => Filter,
document => Doc,
password_hash_field => PasswordHashField
}),
ignore;

View File

@ -130,8 +130,7 @@ authenticate(
{error, Reason}
end;
{error, Reason} ->
?SLOG(error, #{
msg => "mysql_query_failed",
?TRACE_AUTHN_PROVIDER(error, "mysql_query_failed", #{
resource => ResourceId,
tmpl_token => TmplToken,
params => Params,

View File

@ -133,8 +133,7 @@ authenticate(
{error, Reason}
end;
{error, Reason} ->
?SLOG(error, #{
msg => "postgresql_query_failed",
?TRACE_AUTHN_PROVIDER(error, "postgresql_query_failed", #{
resource => ResourceId,
params => Params,
reason => Reason

View File

@ -128,13 +128,14 @@ authenticate(#{auth_method := _}, _) ->
authenticate(
#{password := Password} = Credential,
#{
cmd := {Command, KeyTemplate, Fields},
cmd := {CommandName, KeyTemplate, Fields},
resource_id := ResourceId,
password_hash_algorithm := Algorithm
}
) ->
NKey = emqx_authn_utils:render_str(KeyTemplate, Credential),
case emqx_resource:query(ResourceId, {cmd, [Command, NKey | Fields]}) of
Command = [CommandName, NKey | Fields],
case emqx_resource:query(ResourceId, {cmd, Command}) of
{ok, []} ->
ignore;
{ok, Values} ->
@ -150,8 +151,7 @@ authenticate(
{error, Reason}
end;
{error, Reason} ->
?SLOG(error, #{
msg => "redis_query_failed",
?TRACE_AUTHN_PROVIDER(error, "redis_query_failed", #{
resource => ResourceId,
cmd => Command,
keys => NKey,

View File

@ -34,7 +34,9 @@
password => <<"plain">>,
peerhost => {127, 0, 0, 1},
listener => 'tcp:default',
protocol => mqtt
protocol => mqtt,
cert_subject => <<"cert_subject_data">>,
cert_common_name => <<"cert_common_name_data">>
}).
-define(SERVER_RESPONSE_JSON(Result), ?SERVER_RESPONSE_JSON(Result, false)).
@ -517,7 +519,9 @@ samples() ->
<<"username">> := <<"plain">>,
<<"password">> := <<"plain">>,
<<"clientid">> := <<"clienta">>,
<<"peerhost">> := <<"127.0.0.1">>
<<"peerhost">> := <<"127.0.0.1">>,
<<"cert_subject">> := <<"cert_subject_data">>,
<<"cert_common_name">> := <<"cert_common_name_data">>
} = jiffy:decode(RawBody, [return_maps]),
Req = cowboy_req:reply(
200,
@ -534,7 +538,9 @@ samples() ->
<<"clientid">> => ?PH_CLIENTID,
<<"username">> => ?PH_USERNAME,
<<"password">> => ?PH_PASSWORD,
<<"peerhost">> => ?PH_PEERHOST
<<"peerhost">> => ?PH_PEERHOST,
<<"cert_subject">> => ?PH_CERT_SUBJECT,
<<"cert_common_name">> => ?PH_CERT_CN_NAME
}
},
result => {ok, #{is_superuser => false, user_property => #{}}}

View File

@ -345,6 +345,33 @@ user_seeds() ->
result => {ok, #{is_superuser => true}}
},
#{
data => #{
cert_subject => <<"cert_subject_data">>,
cert_common_name => <<"cert_common_name_data">>,
password_hash =>
<<"ac63a624e7074776d677dd61a003b8c803eb11db004d0ec6ae032a5d7c9c5caf">>,
salt => <<"salt">>,
is_superuser => 1
},
credentials => #{
cert_subject => <<"cert_subject_data">>,
cert_common_name => <<"cert_common_name_data">>,
password => <<"sha256">>
},
config_params => #{
<<"filter">> => #{
<<"cert_subject">> => <<"${cert_subject}">>,
<<"cert_common_name">> => <<"${cert_common_name}">>
},
<<"password_hash_algorithm">> => #{
<<"name">> => <<"sha256">>,
<<"salt_position">> => <<"prefix">>
}
},
result => {ok, #{is_superuser => true}}
},
#{
data => #{
username => <<"bcrypt">>,

View File

@ -318,6 +318,36 @@ user_seeds() ->
result => {ok, #{is_superuser => true}}
},
#{
data => #{
username => "sha256",
password_hash => "ac63a624e7074776d677dd61a003b8c803eb11db004d0ec6ae032a5d7c9c5caf",
cert_subject => <<"cert_subject_data">>,
cert_common_name => <<"cert_common_name_data">>,
salt => "salt",
is_superuser_int => 1
},
credentials => #{
clientid => <<"sha256">>,
password => <<"sha256">>,
cert_subject => <<"cert_subject_data">>,
cert_common_name => <<"cert_common_name_data">>
},
config_params => #{
<<"query">> =>
<<
"SELECT password_hash, salt, is_superuser_int as is_superuser\n"
" FROM users where cert_subject = ${cert_subject} AND \n"
" cert_common_name = ${cert_common_name} LIMIT 1"
>>,
<<"password_hash_algorithm">> => #{
<<"name">> => <<"sha256">>,
<<"salt_position">> => <<"prefix">>
}
},
result => {ok, #{is_superuser => true}}
},
#{
data => #{
username => <<"bcrypt">>,
@ -433,14 +463,24 @@ init_seeds() ->
" username VARCHAR(255),\n"
" password_hash VARCHAR(255),\n"
" salt VARCHAR(255),\n"
" cert_subject VARCHAR(255),\n"
" cert_common_name VARCHAR(255),\n"
" is_superuser_str VARCHAR(255),\n"
" is_superuser_int TINYINT)"
),
Fields = [username, password_hash, salt, is_superuser_str, is_superuser_int],
Fields = [
username,
password_hash,
salt,
cert_subject,
cert_common_name,
is_superuser_str,
is_superuser_int
],
InsertQuery =
"INSERT INTO users(username, password_hash, salt, "
" is_superuser_str, is_superuser_int) VALUES(?, ?, ?, ?, ?)",
"INSERT INTO users(username, password_hash, salt, cert_subject, cert_common_name,"
" is_superuser_str, is_superuser_int) VALUES(?, ?, ?, ?, ?, ?, ?)",
lists:foreach(
fun(#{data := Values}) ->

View File

@ -380,6 +380,36 @@ user_seeds() ->
result => {ok, #{is_superuser => true}}
},
#{
data => #{
username => "sha256",
password_hash => "ac63a624e7074776d677dd61a003b8c803eb11db004d0ec6ae032a5d7c9c5caf",
cert_subject => <<"cert_subject_data">>,
cert_common_name => <<"cert_common_name_data">>,
salt => "salt",
is_superuser_int => 1
},
credentials => #{
clientid => <<"sha256">>,
password => <<"sha256">>,
cert_subject => <<"cert_subject_data">>,
cert_common_name => <<"cert_common_name_data">>
},
config_params => #{
<<"query">> =>
<<
"SELECT password_hash, salt, is_superuser_int as is_superuser\n"
" FROM users where cert_subject = ${cert_subject} AND \n"
" cert_common_name = ${cert_common_name} LIMIT 1"
>>,
<<"password_hash_algorithm">> => #{
<<"name">> => <<"sha256">>,
<<"salt_position">> => <<"prefix">>
}
},
result => {ok, #{is_superuser => true}}
},
#{
data => #{
username => <<"bcrypt">>,
@ -474,6 +504,8 @@ init_seeds() ->
" username varchar(255),\n"
" password_hash varchar(255),\n"
" salt varchar(255),\n"
" cert_subject varchar(255),\n"
" cert_common_name varchar(255),\n"
" is_superuser_str varchar(255),\n"
" is_superuser_int smallint,\n"
" is_superuser_bool boolean)"
@ -487,12 +519,21 @@ init_seeds() ->
).
create_user(Values) ->
Fields = [username, password_hash, salt, is_superuser_str, is_superuser_int, is_superuser_bool],
Fields = [
username,
password_hash,
salt,
cert_subject,
cert_common_name,
is_superuser_str,
is_superuser_int,
is_superuser_bool
],
InsertQuery =
"INSERT INTO users(username, password_hash, salt,"
"INSERT INTO users(username, password_hash, salt, cert_subject, cert_common_name, "
"is_superuser_str, is_superuser_int, is_superuser_bool) "
"VALUES($1, $2, $3, $4, $5, $6)",
"VALUES($1, $2, $3, $4, $5, $6, $7, $8)",
Params = [maps:get(F, Values, null) || F <- Fields],
{ok, 1} = q(InsertQuery, Params),

View File

@ -475,6 +475,52 @@ user_seeds() ->
}
},
result => {ok, #{is_superuser => true}}
},
#{
data => #{
password_hash =>
<<"a3c7f6b085c3e5897ffb9b86f18a9d905063f8550a74444b5892e193c1b50428">>,
is_superuser => <<"1">>
},
credentials => #{
clientid => <<"sha256_no_salt">>,
cn => <<"cert_common_name">>,
dn => <<"cert_subject_name">>,
password => <<"sha256_no_salt">>
},
key => <<"mqtt_user:cert_common_name">>,
config_params => #{
<<"cmd">> => <<"HMGET mqtt_user:${cert_common_name} password_hash is_superuser">>,
<<"password_hash_algorithm">> => #{
<<"name">> => <<"sha256">>,
<<"salt_position">> => <<"disable">>
}
},
result => {ok, #{is_superuser => true}}
},
#{
data => #{
password_hash =>
<<"a3c7f6b085c3e5897ffb9b86f18a9d905063f8550a74444b5892e193c1b50428">>,
is_superuser => <<"1">>
},
credentials => #{
clientid => <<"sha256_no_salt">>,
cn => <<"cert_common_name">>,
dn => <<"cert_subject_name">>,
password => <<"sha256_no_salt">>
},
key => <<"mqtt_user:cert_subject_name">>,
config_params => #{
<<"cmd">> => <<"HMGET mqtt_user:${cert_subject} password_hash is_superuser">>,
<<"password_hash_algorithm">> => #{
<<"name">> => <<"sha256">>,
<<"salt_position">> => <<"disable">>
}
},
result => {ok, #{is_superuser => true}}
}
].

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_authz, [
{description, "An OTP application"},
{vsn, "0.1.2"},
{vsn, "0.1.3"},
{registered, []},
{mod, {emqx_authz_app, []}},
{applications, [

View File

@ -53,11 +53,12 @@
-type sources() :: [source()].
-define(METRIC_SUPERUSER, 'authorization.superuser').
-define(METRIC_ALLOW, 'authorization.matched.allow').
-define(METRIC_DENY, 'authorization.matched.deny').
-define(METRIC_NOMATCH, 'authorization.nomatch').
-define(METRICS, [?METRIC_ALLOW, ?METRIC_DENY, ?METRIC_NOMATCH]).
-define(METRICS, [?METRIC_SUPERUSER, ?METRIC_ALLOW, ?METRIC_DENY, ?METRIC_NOMATCH]).
-define(IS_ENABLED(Enable), ((Enable =:= true) or (Enable =:= <<"true">>))).
@ -308,6 +309,30 @@ authorize(
Topic,
DefaultResult,
Sources
) ->
case maps:get(is_superuser, Client, false) of
true ->
log_allowed(#{
username => Username,
ipaddr => IpAddress,
topic => Topic,
is_superuser => true
}),
emqx_metrics:inc(?METRIC_SUPERUSER),
{stop, allow};
false ->
authorize_non_superuser(Client, PubSub, Topic, DefaultResult, Sources)
end.
authorize_non_superuser(
#{
username := Username,
peerhost := IpAddress
} = Client,
PubSub,
Topic,
DefaultResult,
Sources
) ->
case do_authorize(Client, PubSub, Topic, sources_with_defaults(Sources)) of
{{matched, allow}, AuthzSource} ->
@ -315,8 +340,7 @@ authorize(
'client.check_authz_complete',
[Client, PubSub, Topic, allow, AuthzSource]
),
?SLOG(info, #{
msg => "authorization_permission_allowed",
log_allowed(#{
username => Username,
ipaddr => IpAddress,
topic => Topic,
@ -356,6 +380,9 @@ authorize(
{stop, DefaultResult}
end.
log_allowed(Meta) ->
?SLOG(info, Meta#{msg => "authorization_permission_allowed"}).
do_authorize(_Client, _PubSub, _Topic, []) ->
nomatch;
do_authorize(Client, PubSub, Topic, [#{enable := false} | Rest]) ->

View File

@ -50,6 +50,8 @@
aggregate_metrics/1
]).
-define(TAGS, [<<"Authorization">>]).
api_spec() ->
emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}).
@ -70,6 +72,7 @@ schema("/authorization/sources") ->
get =>
#{
description => ?DESC(authorization_sources_get),
tags => ?TAGS,
responses =>
#{
200 => mk(
@ -81,6 +84,7 @@ schema("/authorization/sources") ->
post =>
#{
description => ?DESC(authorization_sources_post),
tags => ?TAGS,
'requestBody' => mk(
hoconsc:union(authz_sources_type_refs()),
#{desc => ?DESC(source_config)}
@ -101,6 +105,7 @@ schema("/authorization/sources/:type") ->
get =>
#{
description => ?DESC(authorization_sources_type_get),
tags => ?TAGS,
parameters => parameters_field(),
responses =>
#{
@ -114,6 +119,7 @@ schema("/authorization/sources/:type") ->
put =>
#{
description => ?DESC(authorization_sources_type_put),
tags => ?TAGS,
parameters => parameters_field(),
'requestBody' => mk(hoconsc:union(authz_sources_type_refs())),
responses =>
@ -125,6 +131,7 @@ schema("/authorization/sources/:type") ->
delete =>
#{
description => ?DESC(authorization_sources_type_delete),
tags => ?TAGS,
parameters => parameters_field(),
responses =>
#{
@ -139,6 +146,7 @@ schema("/authorization/sources/:type/status") ->
get =>
#{
description => ?DESC(authorization_sources_type_status_get),
tags => ?TAGS,
parameters => parameters_field(),
responses =>
#{
@ -159,6 +167,7 @@ schema("/authorization/sources/:type/move") ->
post =>
#{
description => ?DESC(authorization_sources_type_move_post),
tags => ?TAGS,
parameters => parameters_field(),
'requestBody' =>
emqx_dashboard_swagger:schema_with_examples(
@ -564,6 +573,10 @@ authz_sources_type_refs() ->
bin(Term) -> erlang:iolist_to_binary(io_lib:format("~p", [Term])).
status_metrics_example() ->
#{
'metrics_example' => #{
summary => <<"Showing a typical metrics example">>,
value =>
#{
resource_metrics => #{
matched => 0,
@ -617,6 +630,8 @@ status_metrics_example() ->
status => connected
}
]
}
}
}.
create_authz_file(Body) ->

View File

@ -84,8 +84,6 @@ t_ok(_Config) ->
<<"rules">> => <<"{allow, {user, \"username\"}, publish, [\"t\"]}.">>
}),
io:format("~p", [emqx_authz:acl_conf_file()]),
?assertEqual(
allow,
emqx_access_control:authorize(ClientInfo, publish, <<"t">>)
@ -96,6 +94,31 @@ t_ok(_Config) ->
emqx_access_control:authorize(ClientInfo, subscribe, <<"t">>)
).
t_superuser(_Config) ->
ClientInfo = #{
clientid => <<"clientid">>,
username => <<"username">>,
is_superuser => true,
peerhost => {127, 0, 0, 1},
zone => default,
listener => {tcp, default}
},
%% no rules apply to superuser
ok = setup_config(?RAW_SOURCE#{
<<"rules">> => <<"{deny, {user, \"username\"}, publish, [\"t\"]}.">>
}),
?assertEqual(
allow,
emqx_access_control:authorize(ClientInfo, publish, <<"t">>)
),
?assertEqual(
allow,
emqx_access_control:authorize(ClientInfo, subscribe, <<"t">>)
).
t_invalid_file(_Config) ->
?assertMatch(
{error, bad_acl_file_content},

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_auto_subscribe, [
{description, "An OTP application"},
{vsn, "0.1.0"},
{vsn, "0.1.1"},
{registered, []},
{mod, {emqx_auto_subscribe_app, []}},
{applications, [

View File

@ -44,12 +44,14 @@ schema("/mqtt/auto_subscribe") ->
'operationId' => auto_subscribe,
get => #{
description => ?DESC(list_auto_subscribe_api),
tags => [<<"Auto subscribe">>],
responses => #{
200 => hoconsc:ref(emqx_auto_subscribe_schema, "auto_subscribe")
}
},
put => #{
description => ?DESC(update_auto_subscribe_api),
tags => [<<"Auto subscribe">>],
'requestBody' => hoconsc:ref(emqx_auto_subscribe_schema, "auto_subscribe"),
responses => #{
200 => hoconsc:ref(emqx_auto_subscribe_schema, "auto_subscribe"),

View File

@ -127,6 +127,17 @@ HTTP 请求的正文。</br>
}
}
config_max_retries {
desc {
en: """HTTP request max retry times if failed."""
zh: """HTTP 请求失败最大重试次数"""
}
label: {
en: "HTTP Request Max Retries"
zh: "HTTP 请求重试次数"
}
}
desc_type {
desc {
en: """The Bridge Type"""

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*-
{application, emqx_bridge, [
{description, "An OTP application"},
{vsn, "0.1.0"},
{vsn, "0.1.1"},
{registered, []},
{mod, {emqx_bridge_app, []}},
{applications, [

View File

@ -225,7 +225,6 @@ info_example_basic(webhook, _) ->
request_timeout => <<"15s">>,
connect_timeout => <<"15s">>,
max_retries => 3,
retry_interval => <<"10s">>,
pool_type => <<"random">>,
pool_size => 4,
enable_pipelining => 100,

View File

@ -238,7 +238,8 @@ parse_confs(
method := Method,
body := Body,
headers := Headers,
request_timeout := ReqTimeout
request_timeout := ReqTimeout,
max_retries := Retry
} = Conf
) ->
{BaseUrl, Path} = parse_url(Url),
@ -251,7 +252,8 @@ parse_confs(
method => Method,
body => Body,
headers => Headers,
request_timeout => ReqTimeout
request_timeout => ReqTimeout,
max_retries => Retry
}
};
parse_confs(Type, Name, #{connector := ConnId, direction := Direction} = Conf) when

View File

@ -14,7 +14,46 @@ namespace() -> "bridge".
roots() -> [].
fields("config") ->
basic_config() ++
basic_config() ++ request_config();
fields("post") ->
[
type_field(),
name_field()
] ++ fields("config");
fields("put") ->
fields("config");
fields("get") ->
emqx_bridge_schema:metrics_status_fields() ++ fields("post").
desc("config") ->
?DESC("desc_config");
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->
["Configuration for WebHook using `", string:to_upper(Method), "` method."];
desc(_) ->
undefined.
basic_config() ->
[
{enable,
mk(
boolean(),
#{
desc => ?DESC("config_enable"),
default => true
}
)},
{direction,
mk(
egress,
#{
desc => ?DESC("config_direction"),
default => egress
}
)}
] ++
proplists:delete(base_url, emqx_connector_http:fields(config)).
request_config() ->
[
{url,
mk(
@ -59,6 +98,14 @@ fields("config") ->
desc => ?DESC("config_body")
}
)},
{max_retries,
mk(
non_neg_integer(),
#{
default => 2,
desc => ?DESC("config_max_retries")
}
)},
{request_timeout,
mk(
emqx_schema:duration_ms(),
@ -67,44 +114,7 @@ fields("config") ->
desc => ?DESC("config_request_timeout")
}
)}
];
fields("post") ->
[
type_field(),
name_field()
] ++ fields("config");
fields("put") ->
fields("config");
fields("get") ->
emqx_bridge_schema:metrics_status_fields() ++ fields("post").
desc("config") ->
?DESC("desc_config");
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->
["Configuration for WebHook using `", string:to_upper(Method), "` method."];
desc(_) ->
undefined.
basic_config() ->
[
{enable,
mk(
boolean(),
#{
desc => ?DESC("config_enable"),
default => true
}
)},
{direction,
mk(
egress,
#{
desc => ?DESC("config_direction"),
default => egress
}
)}
] ++
proplists:delete(base_url, emqx_connector_http:fields(config)).
].
%%======================================================================================

View File

@ -1039,12 +1039,18 @@ Defaults to: <code>system</code>.
common_handler_chars_limit {
desc {
en: """Set the maximum length of a single log message. If this length is exceeded, the log message will be truncated."""
zh: """设置单个日志消息的最大长度。 如果超过此长度则日志消息将被截断。最小可设置的长度为100。"""
en: """
Set the maximum length of a single log message. If this length is exceeded, the log message will be truncated.
NOTE: Restrict char limiter if formatter is JSON , it will get a truncated incomplete JSON data, which is not recommended.
"""
zh: """
设置单个日志消息的最大长度。 如果超过此长度则日志消息将被截断。最小可设置的长度为100。
注意:如果日志格式为 JSON限制字符长度可能会导致截断不完整的 JSON 数据。
"""
}
label {
en: "Single Log Max Length"
zh: "单个日志最大长度"
zh: "单条日志长度限制"
}
}

View File

@ -262,6 +262,8 @@ fast_forward_to_commit(Node, ToTnxId) ->
%% @private
init([Node, RetryMs]) ->
%% Workaround for https://github.com/emqx/mria/issues/94:
_ = mria_rlog:wait_for_shards([?CLUSTER_RPC_SHARD], 1000),
_ = mria:wait_for_tables([?CLUSTER_MFA, ?CLUSTER_COMMIT]),
{ok, _} = mnesia:subscribe({table, ?CLUSTER_MFA, simple}),
State = #{node => Node, retry_interval => RetryMs},

View File

@ -1,6 +1,6 @@
{application, emqx_conf, [
{description, "EMQX configuration management"},
{vsn, "0.1.1"},
{vsn, "0.1.2"},
{registered, []},
{mod, {emqx_conf_app, []}},
{applications, [kernel, stdlib]},

View File

@ -41,17 +41,6 @@ base URL 只包含host和port。</br>
}
}
retry_interval {
desc {
en: "Interval between retries."
zh: "重试之间的间隔时间。"
}
label: {
en: "Retry Interval"
zh: "重试间隔"
}
}
pool_type {
desc {
en: "The type of the pool. Can be one of `random`, `hash`."
@ -76,8 +65,8 @@ base URL 只包含host和port。</br>
enable_pipelining {
desc {
en: "Whether to send HTTP requests continuously, when set to 0, it means that after each HTTP request is sent, you need to wait for the server to return and then continue to send the next request."
zh: "是否连续发送 HTTP 请求,当设置为 0 时,表示每次发送完成 HTTP 请求后都需要等待服务器返回,再继续发送下一个请求。"
en: "A positive integer. Whether to send HTTP requests continuously, when set to 1, it means that after each HTTP request is sent, you need to wait for the server to return and then continue to send the next request."
zh: "正整数,设置最大可发送的异步 HTTP 请求数量。当设置为 1 时,表示每次发送完成 HTTP 请求后都需要等待服务器返回,再继续发送下一个请求。"
}
label: {
en: "HTTP Pipelineing"

View File

@ -88,22 +88,6 @@ fields(config) ->
desc => ?DESC("connect_timeout")
}
)},
{max_retries,
sc(
non_neg_integer(),
#{
default => 5,
desc => ?DESC("max_retries")
}
)},
{retry_interval,
sc(
emqx_schema:duration(),
#{
default => "1s",
desc => ?DESC("retry_interval")
}
)},
{pool_type,
sc(
pool_type(),
@ -147,6 +131,14 @@ fields("request") ->
{path, hoconsc:mk(binary(), #{required => false, desc => ?DESC("path")})},
{body, hoconsc:mk(binary(), #{required => false, desc => ?DESC("body")})},
{headers, hoconsc:mk(map(), #{required => false, desc => ?DESC("headers")})},
{max_retries,
sc(
non_neg_integer(),
#{
required => false,
desc => ?DESC("max_retries")
}
)},
{request_timeout,
sc(
emqx_schema:duration_ms(),
@ -182,8 +174,6 @@ on_start(
path := BasePath
},
connect_timeout := ConnectTimeout,
max_retries := MaxRetries,
retry_interval := RetryInterval,
pool_type := PoolType,
pool_size := PoolSize
} = Config
@ -206,8 +196,6 @@ on_start(
{host, Host},
{port, Port},
{connect_timeout, ConnectTimeout},
{retry, MaxRetries},
{retry_timeout, RetryInterval},
{keepalive, 30000},
{pool_type, PoolType},
{pool_size, PoolSize},
@ -247,17 +235,23 @@ on_query(InstId, {send_message, Msg}, AfterQuery, State) ->
path := Path,
body := Body,
headers := Headers,
request_timeout := Timeout
request_timeout := Timeout,
max_retries := Retry
} = process_request(Request, Msg),
on_query(InstId, {Method, {Path, Headers, Body}, Timeout}, AfterQuery, State)
on_query(
InstId,
{undefined, Method, {Path, Headers, Body}, Timeout, Retry},
AfterQuery,
State
)
end;
on_query(InstId, {Method, Request}, AfterQuery, State) ->
on_query(InstId, {undefined, Method, Request, 5000}, AfterQuery, State);
on_query(InstId, {undefined, Method, Request, 5000, 2}, AfterQuery, State);
on_query(InstId, {Method, Request, Timeout}, AfterQuery, State) ->
on_query(InstId, {undefined, Method, Request, Timeout}, AfterQuery, State);
on_query(InstId, {undefined, Method, Request, Timeout, 2}, AfterQuery, State);
on_query(
InstId,
{KeyOrNum, Method, Request, Timeout},
{KeyOrNum, Method, Request, Timeout, Retry},
AfterQuery,
#{pool_name := PoolName, base_path := BasePath} = State
) ->
@ -275,7 +269,8 @@ on_query(
end,
Method,
NRequest,
Timeout
Timeout,
Retry
)
of
{error, Reason} ->
@ -368,7 +363,8 @@ preprocess_request(
path => emqx_plugin_libs_rule:preproc_tmpl(Path),
body => emqx_plugin_libs_rule:preproc_tmpl(Body),
headers => preproc_headers(Headers),
request_timeout => maps:get(request_timeout, Req, 30000)
request_timeout => maps:get(request_timeout, Req, 30000),
max_retries => maps:get(max_retries, Req, 2)
}.
preproc_headers(Headers) when is_map(Headers) ->

View File

@ -90,6 +90,7 @@ fields(sentinel) ->
}},
{sentinel, #{
type => string(),
required => true,
desc => ?DESC("sentinel_desc")
}}
] ++

View File

@ -23,8 +23,10 @@
-include_lib("emqx/include/emqx.hrl").
-include_lib("stdlib/include/assert.hrl").
-define(REDIS_HOST, "redis").
-define(REDIS_PORT, 6379).
-define(REDIS_SINGLE_HOST, "redis").
-define(REDIS_SINGLE_PORT, 6379).
-define(REDIS_SENTINEL_HOST, "redis-sentinel").
-define(REDIS_SENTINEL_PORT, 26379).
-define(REDIS_RESOURCE_MOD, emqx_connector_redis).
all() ->
@ -34,7 +36,14 @@ groups() ->
[].
init_per_suite(Config) ->
case emqx_common_test_helpers:is_tcp_server_available(?REDIS_HOST, ?REDIS_PORT) of
case
emqx_common_test_helpers:is_all_tcp_servers_available(
[
{?REDIS_SINGLE_HOST, ?REDIS_SINGLE_PORT},
{?REDIS_SENTINEL_HOST, ?REDIS_SENTINEL_PORT}
]
)
of
true ->
ok = emqx_common_test_helpers:start_apps([emqx_conf]),
ok = emqx_connector_test_helpers:start_apps([emqx_resource, emqx_connector]),
@ -141,20 +150,35 @@ redis_config_cluster() ->
redis_config_sentinel() ->
redis_config_base("sentinel", "servers").
-define(REDIS_CONFIG_BASE(MaybeSentinel),
"" ++
"\n" ++
" auto_reconnect = true\n" ++
" database = 1\n" ++
" pool_size = 8\n" ++
" redis_type = ~s\n" ++
MaybeSentinel ++
" password = public\n" ++
" ~s = \"~s:~b\"\n" ++
" " ++
""
).
redis_config_base(Type, ServerKey) ->
case Type of
"sentinel" ->
Host = ?REDIS_SENTINEL_HOST,
Port = ?REDIS_SENTINEL_PORT,
MaybeSentinel = " sentinel = mymaster\n";
_ ->
Host = ?REDIS_SINGLE_HOST,
Port = ?REDIS_SINGLE_PORT,
MaybeSentinel = ""
end,
RawConfig = list_to_binary(
io_lib:format(
""
"\n"
" auto_reconnect = true\n"
" database = 1\n"
" pool_size = 8\n"
" redis_type = ~s\n"
" password = public\n"
" ~s = \"~s:~b\"\n"
" "
"",
[Type, ServerKey, ?REDIS_HOST, ?REDIS_PORT]
?REDIS_CONFIG_BASE(MaybeSentinel),
[Type, ServerKey, Host, Port]
)
),

View File

@ -1,7 +1,7 @@
dashboard {
listeners.http {
bind: 18083
bind = 18083
}
default_username: "admin"
default_password: "public"
default_username = "admin"
default_password = "public"
}

View File

@ -2,7 +2,7 @@
{application, emqx_dashboard, [
{description, "EMQX Web Dashboard"},
% strict semver, bump manually!
{vsn, "5.0.2"},
{vsn, "5.0.4"},
{modules, []},
{registered, [emqx_dashboard_sup]},
{applications, [kernel, stdlib, mnesia, minirest, emqx]},

View File

@ -1,13 +1,7 @@
%% -*- mode: erlang -*-
%% Unless you know what you are doing, DO NOT edit manually!!
{VSN,
[{"5.0.0",
[{load_module,emqx_dashboard,brutal_purge,soft_purge,[]},
{load_module,emqx_dashboard_api,brutal_purge,soft_purge,[]},
{load_module,emqx_dashboard_token,brutal_purge,soft_purge,[]}]},
{<<".*">>,[]}],
[{"5.0.0",
[{load_module,emqx_dashboard,brutal_purge,soft_purge,[]},
{load_module,emqx_dashboard_api,brutal_purge,soft_purge,[]},
{load_module,emqx_dashboard_token,brutal_purge,soft_purge,[]}]},
{<<".*">>,[]}]}.
%% we should always restart dashboard to make sure api rules/swagger is updated
[{<<".*">>,[{restart_application, emqx_dashboard}]}],
[{<<".*">>,[{restart_application, emqx_dashboard}]}]
}.

View File

@ -92,7 +92,7 @@ start_listeners(Listeners) ->
case minirest:start(Name, RanchOptions, Minirest) of
{ok, _} ->
?ULOG("Listener ~ts on ~ts started.~n", [
Name, emqx_listeners:format_addr(Bind)
Name, emqx_listeners:format_bind(Bind)
]),
Acc;
{error, _Reason} ->
@ -114,7 +114,7 @@ stop_listeners(Listeners) ->
case minirest:stop(Name) of
ok ->
?ULOG("Stop listener ~ts on ~ts successfully.~n", [
Name, emqx_listeners:format_addr(Port)
Name, emqx_listeners:format_bind(Port)
]);
{error, not_found} ->
?SLOG(warning, #{msg => "stop_listener_failed", name => Name, port => Port})
@ -159,7 +159,7 @@ listeners(Listeners) ->
maps:get(enable, Conf) andalso
begin
{Conf1, Bind} = ip_port(Conf),
{true, {listener_name(Protocol, Conf1), Protocol, Bind, ranch_opts(Conf1)}}
{true, {listener_name(Protocol), Protocol, Bind, ranch_opts(Conf1)}}
end
end,
maps:to_list(Listeners)
@ -208,19 +208,8 @@ ranch_opts(Options) ->
filter_false(_K, false, S) -> S;
filter_false(K, V, S) -> [{K, V} | S].
listener_name(Protocol, #{port := Port, ip := IP}) ->
Name =
"dashboard:" ++
atom_to_list(Protocol) ++ ":" ++
inet:ntoa(IP) ++ ":" ++
integer_to_list(Port),
list_to_atom(Name);
listener_name(Protocol, #{port := Port}) ->
Name =
"dashboard:" ++
atom_to_list(Protocol) ++ ":" ++
integer_to_list(Port),
list_to_atom(Name).
listener_name(Protocol) ->
list_to_atom(atom_to_list(Protocol) ++ ":dashboard").
authorize(Req) ->
case cowboy_req:parse_header(<<"authorization">>, Req) of

View File

@ -180,7 +180,6 @@ field(username_in_path) ->
{username,
mk(binary(), #{
desc => ?DESC(username),
'maxLength' => 100,
example => <<"admin">>,
in => path,
required => true

View File

@ -32,14 +32,22 @@ admins(["add", Username, Password, Desc]) ->
{ok, _} ->
emqx_ctl:print("ok~n");
{error, Reason} ->
emqx_ctl:print("Error: ~p~n", [Reason])
print_error(Reason)
end;
admins(["passwd", Username, Password]) ->
Status = emqx_dashboard_admin:change_password(bin(Username), bin(Password)),
emqx_ctl:print("~p~n", [Status]);
case emqx_dashboard_admin:change_password(bin(Username), bin(Password)) of
{ok, _} ->
emqx_ctl:print("ok~n");
{error, Reason} ->
print_error(Reason)
end;
admins(["del", Username]) ->
Status = emqx_dashboard_admin:remove_user(bin(Username)),
emqx_ctl:print("~p~n", [Status]);
case emqx_dashboard_admin:remove_user(bin(Username)) of
{ok, _} ->
emqx_ctl:print("ok~n");
{error, Reason} ->
print_error(Reason)
end;
admins(_) ->
emqx_ctl:usage(
[
@ -53,3 +61,9 @@ unload() ->
emqx_ctl:unregister_command(admins).
bin(S) -> iolist_to_binary(S).
print_error(Reason) when is_binary(Reason) ->
emqx_ctl:print("Error: ~s~n", [Reason]).
%% Maybe has more types of error, but there is only binary now. So close it for dialyzer.
% print_error(Reason) ->
% emqx_ctl:print("Error: ~p~n", [Reason]).

View File

@ -51,6 +51,7 @@ schema("/error_codes") ->
get => #{
security => [],
description => <<"API Error Codes">>,
tags => [<<"Error codes">>],
responses => #{
200 => hoconsc:array(hoconsc:ref(?MODULE, error_code))
}
@ -62,6 +63,7 @@ schema("/error_codes/:code") ->
get => #{
security => [],
description => <<"API Error Codes">>,
tags => [<<"Error codes">>],
parameters => [
{code,
hoconsc:mk(hoconsc:enum(emqx_dashboard_error_code:all()), #{

View File

@ -38,7 +38,12 @@
]).
is_ready(Timeout) ->
ready =:= gen_server:call(?MODULE, is_ready, Timeout).
try
ready =:= gen_server:call(?MODULE, is_ready, Timeout)
catch
exit:{timeout, _} ->
false
end.
start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).

View File

@ -43,5 +43,6 @@ check_dispatch_ready(Env) ->
true;
true ->
%% dashboard should always ready, if not, is_ready/1 will block until ready.
emqx_dashboard_listener:is_ready(timer:seconds(15))
%% if not ready, dashboard will return 503.
emqx_dashboard_listener:is_ready(timer:seconds(20))
end.

View File

@ -115,13 +115,16 @@ granularity_adapter(List) ->
%% Get the current rate. Not the current sampler data.
current_rate() ->
Fun =
fun(Node, Cluster) ->
fun
(Node, Cluster) when is_map(Cluster) ->
case current_rate(Node) of
{ok, CurrentRate} ->
merge_cluster_rate(CurrentRate, Cluster);
{badrpc, Reason} ->
{badrpc, {Node, Reason}}
end
end;
(_Node, Error) ->
Error
end,
case lists:foldl(Fun, #{}, mria_mnesia:cluster_nodes(running)) of
{badrpc, Reason} ->

View File

@ -37,7 +37,7 @@ schema("/monitor") ->
#{
'operationId' => monitor,
get => #{
tags => [dashboard],
tags => [<<"Metrics">>],
desc => <<"List monitor data.">>,
parameters => [parameter_latest()],
responses => #{
@ -50,7 +50,7 @@ schema("/monitor/nodes/:node") ->
#{
'operationId' => monitor,
get => #{
tags => [dashboard],
tags => [<<"Metrics">>],
desc => <<"List the monitor data on the node.">>,
parameters => [parameter_node(), parameter_latest()],
responses => #{
@ -63,7 +63,7 @@ schema("/monitor_current") ->
#{
'operationId' => monitor_current,
get => #{
tags => [dashboard],
tags => [<<"Metrics">>],
desc => <<"Current status. Gauge and rate.">>,
responses => #{
200 => hoconsc:mk(hoconsc:ref(sampler_current), #{})
@ -74,7 +74,7 @@ schema("/monitor_current/nodes/:node") ->
#{
'operationId' => monitor_current,
get => #{
tags => [dashboard],
tags => [<<"Metrics">>],
desc => <<"Node current status. Gauge and rate.">>,
parameters => [parameter_node()],
responses => #{

Some files were not shown because too many files have changed in this diff Show More