Merge branch 'emqx:master' into feat_round_robin_per_node

This commit is contained in:
Benjamin Krenn 2022-08-04 11:02:51 +02:00 committed by GitHub
commit 5fa2a2aa44
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
196 changed files with 3957 additions and 1653 deletions

View File

@ -26,6 +26,8 @@ up:
-f .ci/docker-compose-file/docker-compose-pgsql-tls.yaml \ -f .ci/docker-compose-file/docker-compose-pgsql-tls.yaml \
-f .ci/docker-compose-file/docker-compose-redis-single-tcp.yaml \ -f .ci/docker-compose-file/docker-compose-redis-single-tcp.yaml \
-f .ci/docker-compose-file/docker-compose-redis-single-tls.yaml \ -f .ci/docker-compose-file/docker-compose-redis-single-tls.yaml \
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tcp.yaml \
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tls.yaml \
up -d --build up -d --build
down: down:
@ -39,6 +41,8 @@ down:
-f .ci/docker-compose-file/docker-compose-pgsql-tls.yaml \ -f .ci/docker-compose-file/docker-compose-pgsql-tls.yaml \
-f .ci/docker-compose-file/docker-compose-redis-single-tcp.yaml \ -f .ci/docker-compose-file/docker-compose-redis-single-tcp.yaml \
-f .ci/docker-compose-file/docker-compose-redis-single-tls.yaml \ -f .ci/docker-compose-file/docker-compose-redis-single-tls.yaml \
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tcp.yaml \
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tls.yaml \
down down
ct: ct:

View File

@ -1,8 +1,8 @@
version: '3.9' version: '3.9'
services: services:
redis_server: redis_sentinel_server:
container_name: redis container_name: redis-sentinel
image: redis:${REDIS_TAG} image: redis:${REDIS_TAG}
volumes: volumes:
- ./redis/:/data/conf - ./redis/:/data/conf

View File

@ -1,8 +1,8 @@
version: '3.9' version: '3.9'
services: services:
redis_server: redis_sentinel_server_tls:
container_name: redis container_name: redis-sentinel-tls
image: redis:${REDIS_TAG} image: redis:${REDIS_TAG}
volumes: volumes:
- ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.crt - ../../apps/emqx/etc/certs/cacert.pem:/etc/certs/ca.crt

View File

@ -30,24 +30,12 @@ defaults
##---------------------------------------------------------------- ##----------------------------------------------------------------
## API ## API
##---------------------------------------------------------------- ##----------------------------------------------------------------
frontend emqx_mgmt
mode tcp
option tcplog
bind *:18083
default_backend emqx_mgmt_back
frontend emqx_dashboard frontend emqx_dashboard
mode tcp mode tcp
option tcplog option tcplog
bind *:18083 bind *:18083
default_backend emqx_dashboard_back default_backend emqx_dashboard_back
backend emqx_mgmt_back
mode http
# balance static-rr
server emqx-1 node1.emqx.io:18083
server emqx-2 node2.emqx.io:18083
backend emqx_dashboard_back backend emqx_dashboard_back
mode http mode http
# balance static-rr # balance static-rr

View File

@ -16,11 +16,15 @@ case $key in
shift # past argument shift # past argument
shift # past value shift # past value
;; ;;
-t|--tls-enabled) -t)
tls="$2" tls="$2"
shift # past argument shift # past argument
shift # past value shift # past value
;; ;;
--tls-enabled)
tls=1
shift # past argument
;;
*) *)
shift # past argument shift # past argument
;; ;;

View File

@ -27,4 +27,3 @@ ok
+ POST `/counter` + POST `/counter`
计数器加一 计数器加一

View File

@ -3,7 +3,7 @@
{erl_opts, [debug_info]}. {erl_opts, [debug_info]}.
{deps, {deps,
[ [
{minirest, {git, "https://github.com/emqx/minirest.git", {tag, "0.3.6"}}} {minirest, {git, "https://github.com/emqx/minirest.git", {tag, "1.3.6"}}}
]}. ]}.
{shell, [ {shell, [

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, http_server, {application, http_server,
[{description, "An OTP application"}, [{description, "An HTTP server application"},
{vsn, "0.1.0"}, {vsn, "0.2.0"},
{registered, []}, {registered, []},
% {mod, {http_server_app, []}}, % {mod, {http_server_app, []}},
{modules, []}, {modules, []},

View File

@ -10,51 +10,107 @@
stop/0 stop/0
]). ]).
-rest_api(#{ -behavior(minirest_api).
name => get_counter,
method => 'GET',
path => "/counter",
func => get_counter,
descr => "Check counter"
}).
-rest_api(#{
name => add_counter,
method => 'POST',
path => "/counter",
func => add_counter,
descr => "Counter plus one"
}).
-export([ -export([api_spec/0]).
get_counter/2, -export([counter/2]).
add_counter/2
]). api_spec() ->
{
[counter_api()],
[]
}.
counter_api() ->
MetaData = #{
get => #{
description => "Get counter",
summary => "Get counter",
responses => #{
200 => #{
content => #{
'application/json' =>
#{
type => object,
properties => #{
code => #{type => integer, example => 0},
data => #{type => integer, example => 0}
}
}
}
}
}
},
post => #{
description => "Add counter",
summary => "Add counter",
'requestBody' => #{
content => #{
'application/json' => #{
schema =>
#{
type => object,
properties => #{
payload => #{type => string, example => <<"sample payload">>},
id => #{type => integer, example => 0}
}
}
}
}
},
responses => #{
200 => #{
content => #{
'application/json' =>
#{
type => object,
properties => #{
code => #{type => integer, example => 0}
}
}
}
}
}
}
},
{"/counter", MetaData, counter}.
counter(get, _Params) ->
V = ets:info(relup_test_message, size),
{200, #{<<"content-type">> => <<"text/plain">>}, #{<<"code">> => 0, <<"data">> => V}};
counter(post, #{body := Params}) ->
case Params of
#{<<"payload">> := _, <<"id">> := Id} ->
ets:insert(relup_test_message, {Id, maps:remove(<<"id">>, Params)}),
{200, #{<<"code">> => 0}};
_ ->
io:format("discarded: ~p\n", [Params]),
{200, #{<<"code">> => -1}}
end.
start() -> start() ->
application:ensure_all_started(minirest), application:ensure_all_started(minirest),
_ = spawn(fun ets_owner/0), _ = spawn(fun ets_owner/0),
Handlers = [{"/", minirest:handler(#{modules => [?MODULE]})}], RanchOptions = #{
Dispatch = [{"/[...]", minirest, Handlers}], max_connections => 512,
minirest:start_http(?MODULE, #{socket_opts => [inet, {port, 7077}]}, Dispatch). num_acceptors => 4,
socket_opts => [{send_timeout, 5000}, {port, 7077}, {backlog, 512}]
},
Minirest = #{
base_path => "",
modules => [?MODULE],
dispatch => [{"/[...]", ?MODULE, []}],
protocol => http,
ranch_options => RanchOptions,
middlewares => [cowboy_router, cowboy_handler]
},
Res = minirest:start(?MODULE, Minirest),
minirest:update_dispatch(?MODULE),
Res.
stop() -> stop() ->
ets:delete(relup_test_message), ets:delete(relup_test_message),
minirest:stop_http(?MODULE). minirest:stop(?MODULE).
get_counter(_Binding, _Params) ->
V = ets:info(relup_test_message, size),
return({ok, V}).
add_counter(_Binding, Params) ->
case lists:keymember(<<"payload">>, 1, Params) of
true ->
{value, {<<"id">>, ID}, Params1} = lists:keytake(<<"id">>, 1, Params),
ets:insert(relup_test_message, {ID, Params1});
_ ->
io:format("discarded: ~p\n", [Params]),
ok
end,
return().
ets_owner() -> ets_owner() ->
ets:new(relup_test_message, [named_table, public]), ets:new(relup_test_message, [named_table, public]),

View File

@ -143,7 +143,6 @@ jobs:
- 24.2.1-1 - 24.2.1-1
os: os:
- macos-11 - macos-11
- macos-10.15
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:
- uses: actions/download-artifact@v2 - uses: actions/download-artifact@v2

View File

@ -76,7 +76,7 @@ jobs:
- uses: actions/upload-artifact@v2 - uses: actions/upload-artifact@v2
with: with:
name: ${{ matrix.profile}}-${{ matrix.otp }}-${{ matrix.os }} name: ${{ matrix.profile}}-${{ matrix.otp }}-${{ matrix.os }}
path: _packages/${{ matrix.profile}}/*.tar.gz path: _packages/${{ matrix.profile}}/*
- uses: actions/upload-artifact@v2 - uses: actions/upload-artifact@v2
with: with:
name: "${{ matrix.profile }}_schema_dump" name: "${{ matrix.profile }}_schema_dump"
@ -120,7 +120,7 @@ jobs:
- uses: actions/upload-artifact@v2 - uses: actions/upload-artifact@v2
with: with:
name: windows name: windows
path: _packages/${{ matrix.profile}}/*.tar.gz path: _packages/${{ matrix.profile}}/*
mac: mac:
strategy: strategy:
@ -133,7 +133,6 @@ jobs:
- 24.2.1-1 - 24.2.1-1
macos: macos:
- macos-11 - macos-11
- macos-10.15
runs-on: ${{ matrix.macos }} runs-on: ${{ matrix.macos }}
@ -196,7 +195,7 @@ jobs:
- uses: actions/upload-artifact@v2 - uses: actions/upload-artifact@v2
with: with:
name: macos name: macos
path: _packages/**/*.tar.gz path: _packages/**/*
spellcheck: spellcheck:
needs: linux needs: linux

View File

@ -157,6 +157,10 @@ jobs:
if: matrix.discovery == 'k8s' if: matrix.discovery == 'k8s'
run: | run: |
helm install emqx \ helm install emqx \
--set emqxConfig.EMQX_CLUSTER__DISCOVERY_STRATEGY="k8s" \
--set emqxConfig.EMQX_CLUSTER__K8S__APISERVER="https://kubernetes.default.svc:443" \
--set emqxConfig.EMQX_CLUSTER__K8S__SERVICE_NAME="emqx-headless" \
--set emqxConfig.EMQX_CLUSTER__K8S__NAMESPACE="default" \
--set image.repository=$TARGET \ --set image.repository=$TARGET \
--set image.pullPolicy=Never \ --set image.pullPolicy=Never \
--set emqxAclConfig="" \ --set emqxAclConfig="" \
@ -173,8 +177,8 @@ jobs:
run: | run: |
helm install emqx \ helm install emqx \
--set emqxConfig.EMQX_CLUSTER__DISCOVERY_STRATEGY="dns" \ --set emqxConfig.EMQX_CLUSTER__DISCOVERY_STRATEGY="dns" \
--set emqxConfig.EMQX_CLUSTER__DNS__NAME="emqx-headless.default.svc.cluster.local" \
--set emqxConfig.EMQX_CLUSTER__DNS__RECORD_TYPE="srv" \ --set emqxConfig.EMQX_CLUSTER__DNS__RECORD_TYPE="srv" \
--set emqxConfig.EMQX_CLUSTER__DNS__NAME="emqx-headless.default.svc.cluster.local" \
--set image.repository=$TARGET \ --set image.repository=$TARGET \
--set image.pullPolicy=Never \ --set image.pullPolicy=Never \
--set emqxAclConfig="" \ --set emqxAclConfig="" \

View File

@ -118,6 +118,8 @@ jobs:
-f .ci/docker-compose-file/docker-compose-pgsql-tls.yaml \ -f .ci/docker-compose-file/docker-compose-pgsql-tls.yaml \
-f .ci/docker-compose-file/docker-compose-redis-single-tcp.yaml \ -f .ci/docker-compose-file/docker-compose-redis-single-tcp.yaml \
-f .ci/docker-compose-file/docker-compose-redis-single-tls.yaml \ -f .ci/docker-compose-file/docker-compose-redis-single-tls.yaml \
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tcp.yaml \
-f .ci/docker-compose-file/docker-compose-redis-sentinel-tls.yaml \
-f .ci/docker-compose-file/docker-compose.yaml \ -f .ci/docker-compose-file/docker-compose.yaml \
up -d --build up -d --build

View File

@ -1,3 +1,15 @@
# 5.0.5
## Bug fixes
* Allow changing the license type from key to file (and vice-versa). [#8598](https://github.com/emqx/emqx/pull/8598)
## Enhancements
* The license is now copied to all nodes in the cluster when it's reloaded. [#8598](https://github.com/emqx/emqx/pull/8598)
* Added a HTTP API to manage licenses. [#8610](https://github.com/emqx/emqx/pull/8610)
* Updated `/nodes` API node_status from `Running/Stopped` to `running/stopped`. [#8642](https://github.com/emqx/emqx/pull/8642)
# 5.0.4 # 5.0.4
## Bug fixes ## Bug fixes
@ -7,37 +19,63 @@
Prior to this change, the webhook only checks the connectivity of the TCP port using `gen_tcp:connect/2`, so Prior to this change, the webhook only checks the connectivity of the TCP port using `gen_tcp:connect/2`, so
if it's a HTTPs server, we didn't check if TLS handshake was successful. if it's a HTTPs server, we didn't check if TLS handshake was successful.
[commits/6b45d2ea](https://github.com/emqx/emqx/commit/6b45d2ea9fde6d3b4a5b007f7a8c5a1c573d141e) [commits/6b45d2ea](https://github.com/emqx/emqx/commit/6b45d2ea9fde6d3b4a5b007f7a8c5a1c573d141e)
* The `create_at` field of rules is missing after emqx restarts. [commits/5fc09e6b](https://github.com/emqx/emqx/commit/5fc09e6b950c340243d7be627a0ce1700691221c) * The `created_at` field of rules is missing after emqx restarts. [commits/5fc09e6b](https://github.com/emqx/emqx/commit/5fc09e6b950c340243d7be627a0ce1700691221c)
* The rule engine's jq function now works even when the path to the EMQX install dir contains spaces [jq#35](https://github.com/emqx/jq/pull/35) [#8455](https://github.com/emqx/emqx/pull/8455)
* Avoid applying any ACL checks on superusers [#8452](https://github.com/emqx/emqx/pull/8452)
* Fix statistics related system topic name error
* Fix AuthN JWKS SSL schema. Using schema in `emqx_schema`. [#8458](https://github.com/emqx/emqx/pull/8458)
* `sentinel` field should be required when AuthN/AuthZ Redis using sentinel mode. [#8458](https://github.com/emqx/emqx/pull/8458)
* Fix bad swagger format. [#8517](https://github.com/emqx/emqx/pull/8517)
* Fix `chars_limit` is not working when `formatter` is `json`. [#8518](http://github.com/emqx/emqx/pull/8518)
* Ensuring that exhook dispatches the client events are sequential. [#8530](https://github.com/emqx/emqx/pull/8530)
* Avoid using RocksDB backend for persistent sessions when such backend is unavailable. [#8528](https://github.com/emqx/emqx/pull/8528)
* Fix AuthN `cert_subject` and `cert_common_name` placeholder rendering failure. [#8531](https://github.com/emqx/emqx/pull/8531)
* Support listen on an IPv6 address, e.g: [::1]:1883 or ::1:1883. [#8547](https://github.com/emqx/emqx/pull/8547)
* GET '/rules' support for pagination and fuzzy search. [#8472](https://github.com/emqx/emqx/pull/8472)
**‼️ Note** : The previous API only returns array: `[RuleObj1,RuleObj2]`, after updating, it will become
`{"data": [RuleObj1,RuleObj2], "meta":{"count":2, "limit":100, "page":1}`,
which will carry the paging meta information.
* Fix the issue that webhook leaks TCP connections. [ehttpc#34](https://github.com/emqx/ehttpc/pull/34), [#8580](https://github.com/emqx/emqx/pull/8580)
## Enhancements
* Improve the dashboard listener startup log, the listener name is no longer spliced with port information,
and the colon(:) is no longer displayed when IP is not specified. [#8480](https://github.com/emqx/emqx/pull/8480)
* Remove `/configs/listeners` API, use `/listeners/` instead. [#8485](https://github.com/emqx/emqx/pull/8485)
* Optimize performance of builtin database operations in processes with long message queue [#8439](https://github.com/emqx/emqx/pull/8439)
* Improve authentication tracing. [#8554](https://github.com/emqx/emqx/pull/8554)
* Standardize the '/listeners' and `/gateway/<name>/listeners` API fields.
It will introduce some incompatible updates, see [#8571](https://github.com/emqx/emqx/pull/8571)
# 5.0.3 # 5.0.3
## Bug fixes ## Bug fixes
* Websocket listener failed to read headers `X-Forwared-For` and `X-Forwarded-Port` [8415](https://github.com/emqx/emqx/pull/8415) * Websocket listener failed to read headers `X-Forwarded-For` and `X-Forwarded-Port` [#8415](https://github.com/emqx/emqx/pull/8415)
* Deleted `cluster_singleton` from MQTT bridge config document. This config is no longer applicable in 5.0 [8407](https://github.com/emqx/emqx/pull/8407) * Deleted `cluster_singleton` from MQTT bridge config document. This config is no longer applicable in 5.0 [#8407](https://github.com/emqx/emqx/pull/8407)
* Fix `emqx/emqx:latest` docker image publish to use the Erlang flavor, but not Elixir flavor [8414](https://github.com/emqx/emqx/pull/8414) * Fix `emqx/emqx:latest` docker image publish to use the Erlang flavor, but not Elixir flavor [#8414](https://github.com/emqx/emqx/pull/8414)
* Changed the `exp` field in JWT auth to be optional rather than required to fix backwards compatability with 4.X releases. [8425](https://github.com/emqx/emqx/pull/8425) * Changed the `exp` field in JWT auth to be optional rather than required to fix backwards compatability with 4.X releases. [#8425](https://github.com/emqx/emqx/pull/8425)
## Enhancements ## Enhancements
* Improve the speed of dashboard's HTTP API routing rule generation, which sometimes causes timeout [8438](https://github.com/emqx/emqx/pull/8438) * Improve the speed of dashboard's HTTP API routing rule generation, which sometimes causes timeout [#8438](https://github.com/emqx/emqx/pull/8438)
# 5.0.2 # 5.0.2
Announcemnet: EMQX team has decided to stop supporting relup for opensouce edition. Announcement: EMQX team has decided to stop supporting relup for opensource edition.
Going forward, it will be an enterprise only feature. Going forward, it will be an enterprise-only feature.
Main reason: relup requires carefully crafted upgrade instructions from ALL previous versions. Main reason: relup requires carefully crafted upgrade instructions from ALL previous versions.
For example, 4.3 is now at 4.3.16, we have `4.3.0->4.3.16`, `4.3.1->4.3.16`, ... 16 such upgrade paths in total to maintain. For example, 4.3 is now at 4.3.16, we have `4.3.0->4.3.16`, `4.3.1->4.3.16`, ... 16 such upgrade paths in total to maintain.
This had been the biggest obstacle for EMQX team to act agile enough in deliverying enhancements and fixes. This had been the biggest obstacle for EMQX team to act agile enough in delivering enhancements and fixes.
## Enhancements ## Enhancements
## Bug fixes ## Bug fixes
* Fixed a typo in `bin/emqx` which affects MacOs release when trying to enable Erlang distribution over TLS [8398](https://github.com/emqx/emqx/pull/8398) * Fixed a typo in `bin/emqx` which affects MacOs release when trying to enable Erlang distribution over TLS [#8398](https://github.com/emqx/emqx/pull/8398)
* Restricted shell was accidentally disabled in 5.0.1, it has been added back. [8396](https://github.com/emqx/emqx/pull/8396) * Restricted shell was accidentally disabled in 5.0.1, it has been added back. [#8396](https://github.com/emqx/emqx/pull/8396)
# 5.0.1 # 5.0.1
@ -66,25 +104,25 @@ Exceptions:
## Enhancements ## Enhancements
* Removed management API auth for prometheus scraping endpoint /api/v5/prometheus/stats [8299](https://github.com/emqx/emqx/pull/8299) * Removed management API auth for prometheus scraping endpoint /api/v5/prometheus/stats [#8299](https://github.com/emqx/emqx/pull/8299)
* Added more TCP options for exhook (gRPC) connections. [8317](https://github.com/emqx/emqx/pull/8317) * Added more TCP options for exhook (gRPC) connections. [#8317](https://github.com/emqx/emqx/pull/8317)
* HTTP Servers used for authentication and authorization will now indicate the result via the response body. [8374](https://github.com/emqx/emqx/pull/8374) [8377](https://github.com/emqx/emqx/pull/8377) * HTTP Servers used for authentication and authorization will now indicate the result via the response body. [#8374](https://github.com/emqx/emqx/pull/8374) [#8377](https://github.com/emqx/emqx/pull/8377)
* Bulk subscribe/unsubscribe APIs [8356](https://github.com/emqx/emqx/pull/8356) * Bulk subscribe/unsubscribe APIs [#8356](https://github.com/emqx/emqx/pull/8356)
* Added exclusive subscription [8315](https://github.com/emqx/emqx/pull/8315) * Added exclusive subscription [#8315](https://github.com/emqx/emqx/pull/8315)
* Provide authentication counter metrics [8352](https://github.com/emqx/emqx/pull/8352) [8375](https://github.com/emqx/emqx/pull/8375) * Provide authentication counter metrics [#8352](https://github.com/emqx/emqx/pull/8352) [#8375](https://github.com/emqx/emqx/pull/8375)
* Do not allow admin user self-deletion [8286](https://github.com/emqx/emqx/pull/8286) * Do not allow admin user self-deletion [#8286](https://github.com/emqx/emqx/pull/8286)
* After restart, ensure to copy `cluster-override.conf` from the clustered node which has the greatest `tnxid`. [8333](https://github.com/emqx/emqx/pull/8333) * After restart, ensure to copy `cluster-override.conf` from the clustered node which has the greatest `tnxid`. [#8333](https://github.com/emqx/emqx/pull/8333)
## Bug fixes ## Bug fixes
* A bug fix ported from 4.x: allow deleting subscriptions from `client.subscribe` hookpoint callback result. [8304](https://github.com/emqx/emqx/pull/8304) [8347](https://github.com/emqx/emqx/pull/8377) * A bug fix ported from 4.x: allow deleting subscriptions from `client.subscribe` hookpoint callback result. [#8304](https://github.com/emqx/emqx/pull/8304) [#8347](https://github.com/emqx/emqx/pull/8377)
* Fixed Erlang distribution over TLS [8309](https://github.com/emqx/emqx/pull/8309) * Fixed Erlang distribution over TLS [#8309](https://github.com/emqx/emqx/pull/8309)
* Made possible to override authentication configs from environment variables [8323](https://github.com/emqx/emqx/pull/8309) * Made possible to override authentication configs from environment variables [#8323](https://github.com/emqx/emqx/pull/8309)
* Made authentication passwords in Mnesia database backward compatible to 4.x, so we can support data migration better. [8351](https://github.com/emqx/emqx/pull/8351) * Made authentication passwords in Mnesia database backward compatible to 4.x, so we can support data migration better. [#8351](https://github.com/emqx/emqx/pull/8351)
* Fix plugins upload for rpm/deb installations [8379](https://github.com/emqx/emqx/pull/8379) * Fix plugins upload for rpm/deb installations [#8379](https://github.com/emqx/emqx/pull/8379)
* Sync data/authz/acl.conf and data/certs from clustered nodes after a new node joins the cluster [8369](https://github.com/emqx/emqx/pull/8369) * Sync data/authz/acl.conf and data/certs from clustered nodes after a new node joins the cluster [#8369](https://github.com/emqx/emqx/pull/8369)
* Ensure auto-retry of failed resources [8371](https://github.com/emqx/emqx/pull/8371) * Ensure auto-retry of failed resources [#8371](https://github.com/emqx/emqx/pull/8371)
* Fix the issue that the count of `packets.connack.auth_error` is inaccurate when the client uses a protocol version below MQTT v5.0 to access [8178](https://github.com/emqx/emqx/pull/8178) * Fix the issue that the count of `packets.connack.auth_error` is inaccurate when the client uses a protocol version below MQTT v5.0 to access [#8178](https://github.com/emqx/emqx/pull/8178)
## Others ## Others

View File

@ -1,7 +1,7 @@
Source code in this repository is variously licensed under below licenses. Source code in this repository is variously licensed under below licenses.
For EMQX Community Edition: Apache License 2.0, see APL.txt, For EMQX: Apache License 2.0, see APL.txt,
which applies to all source files except for lib-ee sub-directory. which applies to all source files except for lib-ee sub-directory.
For EMQX Enterprise Edition (since version 5.0): Business Source License 1.1, For EMQX Enterprise (since version 5.0): Business Source License 1.1,
see lib-ee/BSL.txt, which applies to source code in lib-ee sub-directory. see lib-ee/BSL.txt, which applies to source code in lib-ee sub-directory.

View File

@ -7,7 +7,7 @@ export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-d
export EMQX_DEFAULT_RUNNER = debian:11-slim export EMQX_DEFAULT_RUNNER = debian:11-slim
export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh) export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh)
export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh) export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh)
export EMQX_DASHBOARD_VERSION ?= v1.0.2 export EMQX_DASHBOARD_VERSION ?= v1.0.5
export EMQX_REL_FORM ?= tgz export EMQX_REL_FORM ?= tgz
export QUICER_DOWNLOAD_FROM_RELEASE = 1 export QUICER_DOWNLOAD_FROM_RELEASE = 1
ifeq ($(OS),Windows_NT) ifeq ($(OS),Windows_NT)
@ -249,3 +249,4 @@ $(foreach tt,$(ALL_ELIXIR_TGZS),$(eval $(call gen-elixir-tgz-target,$(tt))))
fmt: $(REBAR) fmt: $(REBAR)
@./scripts/erlfmt -w '{apps,lib-ee}/*/{src,include,test}/**/*.{erl,hrl,app.src}' @./scripts/erlfmt -w '{apps,lib-ee}/*/{src,include,test}/**/*.{erl,hrl,app.src}'
@./scripts/erlfmt -w 'rebar.config.erl' @./scripts/erlfmt -w 'rebar.config.erl'
@mix format

View File

@ -4,10 +4,10 @@
[![Build Status](https://img.shields.io/travis/emqx/emqx?label=Build)](https://travis-ci.org/emqx/emqx) [![Build Status](https://img.shields.io/travis/emqx/emqx?label=Build)](https://travis-ci.org/emqx/emqx)
[![Coverage Status](https://img.shields.io/coveralls/github/emqx/emqx/master?label=Coverage)](https://coveralls.io/github/emqx/emqx?branch=master) [![Coverage Status](https://img.shields.io/coveralls/github/emqx/emqx/master?label=Coverage)](https://coveralls.io/github/emqx/emqx?branch=master)
[![Docker Pulls](https://img.shields.io/docker/pulls/emqx/emqx?label=Docker%20Pulls)](https://hub.docker.com/r/emqx/emqx) [![Docker Pulls](https://img.shields.io/docker/pulls/emqx/emqx?label=Docker%20Pulls)](https://hub.docker.com/r/emqx/emqx)
[![Slack](https://img.shields.io/badge/Slack-EMQ%20X-39AE85?logo=slack)](https://slack-invite.emqx.io/) [![Slack](https://img.shields.io/badge/Slack-EMQ-39AE85?logo=slack)](https://slack-invite.emqx.io/)
[![Discord](https://img.shields.io/discord/931086341838622751?label=Discord&logo=discord)](https://discord.gg/xYGf3fQnES) [![Discord](https://img.shields.io/discord/931086341838622751?label=Discord&logo=discord)](https://discord.gg/xYGf3fQnES)
[![Twitter](https://img.shields.io/badge/Twitter-EMQ-1DA1F2?logo=twitter)](https://twitter.com/EMQTech) [![Twitter](https://img.shields.io/badge/Twitter-EMQ-1DA1F2?logo=twitter)](https://twitter.com/EMQTech)
[![Community](https://img.shields.io/badge/Community-EMQ%20X-yellow)](https://askemq.com) [![Community](https://img.shields.io/badge/Community-EMQX-yellow)](https://askemq.com)
[![YouTube](https://img.shields.io/badge/Subscribe-EMQ%20中文-FF0000?logo=youtube)](https://www.youtube.com/channel/UCir_r04HIsLjf2qqyZ4A8Cg) [![YouTube](https://img.shields.io/badge/Subscribe-EMQ%20中文-FF0000?logo=youtube)](https://www.youtube.com/channel/UCir_r04HIsLjf2qqyZ4A8Cg)

View File

@ -4,7 +4,7 @@
[![Build Status](https://img.shields.io/travis/emqx/emqx?label=Build)](https://travis-ci.org/emqx/emqx) [![Build Status](https://img.shields.io/travis/emqx/emqx?label=Build)](https://travis-ci.org/emqx/emqx)
[![Coverage Status](https://img.shields.io/coveralls/github/emqx/emqx/master?label=Coverage)](https://coveralls.io/github/emqx/emqx?branch=master) [![Coverage Status](https://img.shields.io/coveralls/github/emqx/emqx/master?label=Coverage)](https://coveralls.io/github/emqx/emqx?branch=master)
[![Docker Pulls](https://img.shields.io/docker/pulls/emqx/emqx?label=Docker%20Pulls)](https://hub.docker.com/r/emqx/emqx) [![Docker Pulls](https://img.shields.io/docker/pulls/emqx/emqx?label=Docker%20Pulls)](https://hub.docker.com/r/emqx/emqx)
[![Slack](https://img.shields.io/badge/Slack-EMQ%20X-39AE85?logo=slack)](https://slack-invite.emqx.io/) [![Slack](https://img.shields.io/badge/Slack-EMQ-39AE85?logo=slack)](https://slack-invite.emqx.io/)
[![Discord](https://img.shields.io/discord/931086341838622751?label=Discord&logo=discord)](https://discord.gg/xYGf3fQnES) [![Discord](https://img.shields.io/discord/931086341838622751?label=Discord&logo=discord)](https://discord.gg/xYGf3fQnES)
[![Twitter](https://img.shields.io/badge/Twitter-EMQ-1DA1F2?logo=twitter)](https://twitter.com/EMQTech) [![Twitter](https://img.shields.io/badge/Twitter-EMQ-1DA1F2?logo=twitter)](https://twitter.com/EMQTech)
[![YouTube](https://img.shields.io/badge/Subscribe-EMQ-FF0000?logo=youtube)](https://www.youtube.com/channel/UC5FjR77ErAxvZENEWzQaO5Q) [![YouTube](https://img.shields.io/badge/Subscribe-EMQ-FF0000?logo=youtube)](https://www.youtube.com/channel/UC5FjR77ErAxvZENEWzQaO5Q)

View File

@ -4,10 +4,10 @@
[![Build Status](https://img.shields.io/travis/emqx/emqx?label=Build)](https://travis-ci.org/emqx/emqx) [![Build Status](https://img.shields.io/travis/emqx/emqx?label=Build)](https://travis-ci.org/emqx/emqx)
[![Coverage Status](https://img.shields.io/coveralls/github/emqx/emqx/master?label=Coverage)](https://coveralls.io/github/emqx/emqx?branch=master) [![Coverage Status](https://img.shields.io/coveralls/github/emqx/emqx/master?label=Coverage)](https://coveralls.io/github/emqx/emqx?branch=master)
[![Docker Pulls](https://img.shields.io/docker/pulls/emqx/emqx?label=Docker%20Pulls)](https://hub.docker.com/r/emqx/emqx) [![Docker Pulls](https://img.shields.io/docker/pulls/emqx/emqx?label=Docker%20Pulls)](https://hub.docker.com/r/emqx/emqx)
[![Slack](https://img.shields.io/badge/Slack-EMQ%20X-39AE85?logo=slack)](https://slack-invite.emqx.io/) [![Slack](https://img.shields.io/badge/Slack-EMQ-39AE85?logo=slack)](https://slack-invite.emqx.io/)
[![Discord](https://img.shields.io/discord/931086341838622751?label=Discord&logo=discord)](https://discord.gg/xYGf3fQnES) [![Discord](https://img.shields.io/discord/931086341838622751?label=Discord&logo=discord)](https://discord.gg/xYGf3fQnES)
[![Twitter](https://img.shields.io/badge/Follow-EMQ-1DA1F2?logo=twitter)](https://twitter.com/EMQTech) [![Twitter](https://img.shields.io/badge/Follow-EMQ-1DA1F2?logo=twitter)](https://twitter.com/EMQTech)
[![Community](https://img.shields.io/badge/Community-EMQ%20X-yellow?logo=github)](https://github.com/emqx/emqx/discussions) [![Community](https://img.shields.io/badge/Community-EMQX-yellow?logo=github)](https://github.com/emqx/emqx/discussions)
[![YouTube](https://img.shields.io/badge/Subscribe-EMQ-FF0000?logo=youtube)](https://www.youtube.com/channel/UC5FjR77ErAxvZENEWzQaO5Q) [![YouTube](https://img.shields.io/badge/Subscribe-EMQ-FF0000?logo=youtube)](https://www.youtube.com/channel/UC5FjR77ErAxvZENEWzQaO5Q)
[![The best IoT MQTT open source team looks forward to your joining](https://assets.emqx.com/images/github_readme_en_bg.png)](https://www.emqx.com/en/careers) [![The best IoT MQTT open source team looks forward to your joining](https://assets.emqx.com/images/github_readme_en_bg.png)](https://www.emqx.com/en/careers)

View File

@ -4,7 +4,7 @@
[![Build Status](https://img.shields.io/travis/emqx/emqx?label=Build)](https://travis-ci.org/emqx/emqx) [![Build Status](https://img.shields.io/travis/emqx/emqx?label=Build)](https://travis-ci.org/emqx/emqx)
[![Coverage Status](https://img.shields.io/coveralls/github/emqx/emqx/master?label=Coverage)](https://coveralls.io/github/emqx/emqx?branch=master) [![Coverage Status](https://img.shields.io/coveralls/github/emqx/emqx/master?label=Coverage)](https://coveralls.io/github/emqx/emqx?branch=master)
[![Docker Pulls](https://img.shields.io/docker/pulls/emqx/emqx?label=Docker%20Pulls)](https://hub.docker.com/r/emqx/emqx) [![Docker Pulls](https://img.shields.io/docker/pulls/emqx/emqx?label=Docker%20Pulls)](https://hub.docker.com/r/emqx/emqx)
[![Slack](https://img.shields.io/badge/Slack-EMQ%20X-39AE85?logo=slack)](https://slack-invite.emqx.io/) [![Slack](https://img.shields.io/badge/Slack-EMQ-39AE85?logo=slack)](https://slack-invite.emqx.io/)
[![Discord](https://img.shields.io/discord/931086341838622751?label=Discord&logo=discord)](https://discord.gg/xYGf3fQnES) [![Discord](https://img.shields.io/discord/931086341838622751?label=Discord&logo=discord)](https://discord.gg/xYGf3fQnES)
[![Twitter](https://img.shields.io/badge/Follow-EMQ-1DA1F2?logo=twitter)](https://twitter.com/EMQTech) [![Twitter](https://img.shields.io/badge/Follow-EMQ-1DA1F2?logo=twitter)](https://twitter.com/EMQTech)
[![YouTube](https://img.shields.io/badge/Subscribe-EMQ-FF0000?logo=youtube)](https://www.youtube.com/channel/UC5FjR77ErAxvZENEWzQaO5Q) [![YouTube](https://img.shields.io/badge/Subscribe-EMQ-FF0000?logo=youtube)](https://www.youtube.com/channel/UC5FjR77ErAxvZENEWzQaO5Q)

View File

@ -66,6 +66,9 @@ Cygwin is what we tested with.
Start (restart) CMD or powershell console and execute `which bash`, it should Start (restart) CMD or powershell console and execute `which bash`, it should
print out `/usr/bin/bash` print out `/usr/bin/bash`
NOTE: Make sure cygwin's bin dir is added before `C:\Windows\system32` in `Path`,
otherwise the build scripts may end up using binaries from wsl instead of cygwin.
### Other tools ### Other tools
Some of the unix world tools are required to build EMQX. Including: Some of the unix world tools are required to build EMQX. Including:

View File

@ -89,10 +89,10 @@ the check/consume will succeed, but it will be forced to wait for a short period
} }
} }
per_client { client {
desc { desc {
en: """The rate limit for each user of the bucket, this field is not required""" en: """The rate limit for each user of the bucket"""
zh: """对桶的每个使用者的速率控制设置,这个不是必须的""" zh: """对桶的每个使用者的速率控制设置"""
} }
label: { label: {
en: """Per Client""" en: """Per Client"""
@ -124,20 +124,6 @@ the check/consume will succeed, but it will be forced to wait for a short period
} }
} }
batch {
desc {
en: """The batch limiter.
This is used for EMQX internal batch operation
e.g. limit the retainer's deliver rate"""
zh: """批量操作速率控制器。
这是给 EMQX 内部的批量操作使用的,比如用来控制保留消息的派发速率"""
}
label: {
en: """Batch"""
zh: """批量操作"""
}
}
message_routing { message_routing {
desc { desc {
en: """The message routing limiter. en: """The message routing limiter.
@ -193,4 +179,12 @@ Once the limit is reached, the restricted client will be slow down even be hung
zh: """流入字节率""" zh: """流入字节率"""
} }
} }
internal {
desc {
en: """Limiter for EMQX internal app."""
zh: """EMQX 内部功能所用限制器。"""
}
}
} }

View File

@ -17,6 +17,19 @@
-ifndef(EMQX_AUTHENTICATION_HRL). -ifndef(EMQX_AUTHENTICATION_HRL).
-define(EMQX_AUTHENTICATION_HRL, true). -define(EMQX_AUTHENTICATION_HRL, true).
-include_lib("emqx/include/logger.hrl").
-define(AUTHN_TRACE_TAG, "AUTHN").
-define(TRACE_AUTHN_PROVIDER(Msg), ?TRACE_AUTHN_PROVIDER(Msg, #{})).
-define(TRACE_AUTHN_PROVIDER(Msg, Meta), ?TRACE_AUTHN_PROVIDER(debug, Msg, Meta)).
-define(TRACE_AUTHN_PROVIDER(Level, Msg, Meta),
?TRACE_AUTHN(Level, Msg, (Meta)#{provider => ?MODULE})
).
-define(TRACE_AUTHN(Msg, Meta), ?TRACE_AUTHN(debug, Msg, Meta)).
-define(TRACE_AUTHN(Level, Msg, Meta), ?TRACE(Level, ?AUTHN_TRACE_TAG, Msg, Meta)).
%% config root name all auth providers have to agree on. %% config root name all auth providers have to agree on.
-define(EMQX_AUTHENTICATION_CONFIG_ROOT_NAME, "authentication"). -define(EMQX_AUTHENTICATION_CONFIG_ROOT_NAME, "authentication").
-define(EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_ATOM, authentication). -define(EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_ATOM, authentication).

View File

@ -32,7 +32,7 @@
%% `apps/emqx/src/bpapi/README.md' %% `apps/emqx/src/bpapi/README.md'
%% Community edition %% Community edition
-define(EMQX_RELEASE_CE, "5.0.3"). -define(EMQX_RELEASE_CE, "5.0.4").
%% Enterprise edition %% Enterprise edition
-define(EMQX_RELEASE_EE, "5.0.0-alpha.1"). -define(EMQX_RELEASE_EE, "5.0.0-alpha.1").

View File

@ -42,17 +42,21 @@
-define(TRACE_FILTER, emqx_trace_filter). -define(TRACE_FILTER, emqx_trace_filter).
-define(TRACE(Tag, Msg, Meta), ?TRACE(debug, Tag, Msg, Meta)).
%% Only evaluate when necessary %% Only evaluate when necessary
%% Always debug the trace events. -define(TRACE(Level, Tag, Msg, Meta), begin
-define(TRACE(Tag, Msg, Meta), begin case persistent_term:get(?TRACE_FILTER, []) of
case persistent_term:get(?TRACE_FILTER, undefined) of
undefined -> ok;
[] -> ok; [] -> ok;
List -> emqx_trace:log(List, Msg, Meta#{trace_tag => Tag}) %% We can't bind filter list to a variablebecause we pollute the calling scope with it.
%% We also don't want to wrap the macro body in a fun
%% beacause this adds overhead to the happy path.
%% So evaluate `persistent_term:get` twice.
_ -> emqx_trace:log(persistent_term:get(?TRACE_FILTER, []), Msg, (Meta)#{trace_tag => Tag})
end, end,
?SLOG( ?SLOG(
debug, Level,
(emqx_trace_formatter:format_meta(Meta))#{msg => Msg, tag => Tag}, (emqx_trace_formatter:format_meta_map(Meta))#{msg => Msg, tag => Tag},
#{is_trace => false} #{is_trace => false}
) )
end). end).

View File

@ -14,6 +14,7 @@
{emqx_gateway_cm,1}. {emqx_gateway_cm,1}.
{emqx_gateway_http,1}. {emqx_gateway_http,1}.
{emqx_license,1}. {emqx_license,1}.
{emqx_license,2}.
{emqx_management,1}. {emqx_management,1}.
{emqx_management,2}. {emqx_management,2}.
{emqx_mgmt_api_plugins,1}. {emqx_mgmt_api_plugins,1}.

View File

@ -27,9 +27,9 @@
{jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}}, {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}},
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}}, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}},
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.3"}}}, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.3"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.1"}}}, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.3"}}},
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.28.3"}}}, {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.29.0"}}},
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}}, {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}}, {recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},
{snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.0"}}} {snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.0"}}}

View File

@ -24,7 +24,7 @@ IsQuicSupp = fun() ->
end, end,
Bcrypt = {bcrypt, {git, "https://github.com/emqx/erlang-bcrypt.git", {tag, "0.6.0"}}}, Bcrypt = {bcrypt, {git, "https://github.com/emqx/erlang-bcrypt.git", {tag, "0.6.0"}}},
Quicer = {quicer, {git, "https://github.com/emqx/quic.git", {tag, "0.0.14"}}}. Quicer = {quicer, {git, "https://github.com/emqx/quic.git", {tag, "0.0.16"}}}.
ExtraDeps = fun(C) -> ExtraDeps = fun(C) ->
{deps, Deps0} = lists:keyfind(deps, 1, C), {deps, Deps0} = lists:keyfind(deps, 1, C),

View File

@ -3,7 +3,7 @@
{id, "emqx"}, {id, "emqx"},
{description, "EMQX Core"}, {description, "EMQX Core"},
% strict semver, bump manually! % strict semver, bump manually!
{vsn, "5.0.4"}, {vsn, "5.0.5"},
{modules, []}, {modules, []},
{registered, []}, {registered, []},
{applications, [ {applications, [

View File

@ -101,6 +101,14 @@
-define(CHAINS_TAB, emqx_authn_chains). -define(CHAINS_TAB, emqx_authn_chains).
-define(TRACE_RESULT(Label, Result, Reason), begin
?TRACE_AUTHN(Label, #{
result => (Result),
reason => (Reason)
}),
Result
end).
-type chain_name() :: atom(). -type chain_name() :: atom().
-type authenticator_id() :: binary(). -type authenticator_id() :: binary().
-type position() :: front | rear | {before, authenticator_id()} | {'after', authenticator_id()}. -type position() :: front | rear | {before, authenticator_id()} | {'after', authenticator_id()}.
@ -216,14 +224,14 @@ when
authenticate(#{enable_authn := false}, _AuthResult) -> authenticate(#{enable_authn := false}, _AuthResult) ->
inc_authenticate_metric('authentication.success.anonymous'), inc_authenticate_metric('authentication.success.anonymous'),
ignore; ?TRACE_RESULT("authentication_result", ignore, enable_authn_false);
authenticate(#{listener := Listener, protocol := Protocol} = Credential, _AuthResult) -> authenticate(#{listener := Listener, protocol := Protocol} = Credential, _AuthResult) ->
case get_authenticators(Listener, global_chain(Protocol)) of case get_authenticators(Listener, global_chain(Protocol)) of
{ok, ChainName, Authenticators} -> {ok, ChainName, Authenticators} ->
case get_enabled(Authenticators) of case get_enabled(Authenticators) of
[] -> [] ->
inc_authenticate_metric('authentication.success.anonymous'), inc_authenticate_metric('authentication.success.anonymous'),
ignore; ?TRACE_RESULT("authentication_result", ignore, empty_chain);
NAuthenticators -> NAuthenticators ->
Result = do_authenticate(ChainName, NAuthenticators, Credential), Result = do_authenticate(ChainName, NAuthenticators, Credential),
@ -235,11 +243,11 @@ authenticate(#{listener := Listener, protocol := Protocol} = Credential, _AuthRe
_ -> _ ->
ok ok
end, end,
Result ?TRACE_RESULT("authentication_result", Result, chain_result)
end; end;
none -> none ->
inc_authenticate_metric('authentication.success.anonymous'), inc_authenticate_metric('authentication.success.anonymous'),
ignore ?TRACE_RESULT("authentication_result", ignore, no_chain)
end. end.
get_authenticators(Listener, Global) -> get_authenticators(Listener, Global) ->
@ -626,11 +634,11 @@ handle_create_authenticator(Chain, Config, Providers) ->
do_authenticate(_ChainName, [], _) -> do_authenticate(_ChainName, [], _) ->
{stop, {error, not_authorized}}; {stop, {error, not_authorized}};
do_authenticate( do_authenticate(
ChainName, [#authenticator{id = ID, provider = Provider, state = State} | More], Credential ChainName, [#authenticator{id = ID} = Authenticator | More], Credential
) -> ) ->
MetricsID = metrics_id(ChainName, ID), MetricsID = metrics_id(ChainName, ID),
emqx_metrics_worker:inc(authn_metrics, MetricsID, total), emqx_metrics_worker:inc(authn_metrics, MetricsID, total),
try Provider:authenticate(Credential, State) of try authenticate_with_provider(Authenticator, Credential) of
ignore -> ignore ->
ok = emqx_metrics_worker:inc(authn_metrics, MetricsID, nomatch), ok = emqx_metrics_worker:inc(authn_metrics, MetricsID, nomatch),
do_authenticate(ChainName, More, Credential); do_authenticate(ChainName, More, Credential);
@ -651,8 +659,7 @@ do_authenticate(
{stop, Result} {stop, Result}
catch catch
Class:Reason:Stacktrace -> Class:Reason:Stacktrace ->
?SLOG(warning, #{ ?TRACE_AUTHN(warning, "authenticator_error", #{
msg => "unexpected_error_in_authentication",
exception => Class, exception => Class,
reason => Reason, reason => Reason,
stacktrace => Stacktrace, stacktrace => Stacktrace,
@ -662,6 +669,14 @@ do_authenticate(
do_authenticate(ChainName, More, Credential) do_authenticate(ChainName, More, Credential)
end. end.
authenticate_with_provider(#authenticator{id = ID, provider = Provider, state = State}, Credential) ->
AuthnResult = Provider:authenticate(Credential, State),
?TRACE_AUTHN("authenticator_result", #{
authenticator => ID,
result => AuthnResult
}),
AuthnResult.
reply(Reply, State) -> reply(Reply, State) ->
{reply, Reply, State}. {reply, Reply, State}.

View File

@ -252,11 +252,12 @@ init(
<<>> -> undefined; <<>> -> undefined;
MP -> MP MP -> MP
end, end,
ListenerId = emqx_listeners:listener_id(Type, Listener),
ClientInfo = set_peercert_infos( ClientInfo = set_peercert_infos(
Peercert, Peercert,
#{ #{
zone => Zone, zone => Zone,
listener => emqx_listeners:listener_id(Type, Listener), listener => ListenerId,
protocol => Protocol, protocol => Protocol,
peerhost => PeerHost, peerhost => PeerHost,
sockport => SockPort, sockport => SockPort,
@ -278,7 +279,9 @@ init(
outbound => #{} outbound => #{}
}, },
auth_cache = #{}, auth_cache = #{},
quota = emqx_limiter_container:get_limiter_by_names([?LIMITER_ROUTING], LimiterCfg), quota = emqx_limiter_container:get_limiter_by_types(
ListenerId, [?LIMITER_ROUTING], LimiterCfg
),
timers = #{}, timers = #{},
conn_state = idle, conn_state = idle,
takeover = false, takeover = false,
@ -354,7 +357,7 @@ handle_in(?CONNECT_PACKET(ConnPkt) = Packet, Channel) ->
}, },
case authenticate(?CONNECT_PACKET(NConnPkt), NChannel1) of case authenticate(?CONNECT_PACKET(NConnPkt), NChannel1) of
{ok, Properties, NChannel2} -> {ok, Properties, NChannel2} ->
process_connect(Properties, ensure_connected(NChannel2)); process_connect(Properties, NChannel2);
{continue, Properties, NChannel2} -> {continue, Properties, NChannel2} ->
handle_out(auth, {?RC_CONTINUE_AUTHENTICATION, Properties}, NChannel2); handle_out(auth, {?RC_CONTINUE_AUTHENTICATION, Properties}, NChannel2);
{error, ReasonCode} -> {error, ReasonCode} ->
@ -378,7 +381,7 @@ handle_in(
{ok, NProperties, NChannel} -> {ok, NProperties, NChannel} ->
case ConnState of case ConnState of
connecting -> connecting ->
process_connect(NProperties, ensure_connected(NChannel)); process_connect(NProperties, NChannel);
_ -> _ ->
handle_out( handle_out(
auth, auth,
@ -608,7 +611,7 @@ process_connect(
case emqx_cm:open_session(CleanStart, ClientInfo, ConnInfo) of case emqx_cm:open_session(CleanStart, ClientInfo, ConnInfo) of
{ok, #{session := Session, present := false}} -> {ok, #{session := Session, present := false}} ->
NChannel = Channel#channel{session = Session}, NChannel = Channel#channel{session = Session},
handle_out(connack, {?RC_SUCCESS, sp(false), AckProps}, NChannel); handle_out(connack, {?RC_SUCCESS, sp(false), AckProps}, ensure_connected(NChannel));
{ok, #{session := Session, present := true, pendings := Pendings}} -> {ok, #{session := Session, present := true, pendings := Pendings}} ->
Pendings1 = lists:usort(lists:append(Pendings, emqx_misc:drain_deliver())), Pendings1 = lists:usort(lists:append(Pendings, emqx_misc:drain_deliver())),
NChannel = Channel#channel{ NChannel = Channel#channel{
@ -616,7 +619,7 @@ process_connect(
resuming = true, resuming = true,
pendings = Pendings1 pendings = Pendings1
}, },
handle_out(connack, {?RC_SUCCESS, sp(true), AckProps}, NChannel); handle_out(connack, {?RC_SUCCESS, sp(true), AckProps}, ensure_connected(NChannel));
{error, client_id_unavailable} -> {error, client_id_unavailable} ->
handle_out(connack, ?RC_CLIENT_IDENTIFIER_NOT_VALID, Channel); handle_out(connack, ?RC_CLIENT_IDENTIFIER_NOT_VALID, Channel);
{error, Reason} -> {error, Reason} ->
@ -1199,9 +1202,6 @@ handle_call(
disconnect_and_shutdown(takenover, AllPendings, Channel); disconnect_and_shutdown(takenover, AllPendings, Channel);
handle_call(list_authz_cache, Channel) -> handle_call(list_authz_cache, Channel) ->
{reply, emqx_authz_cache:list_authz_cache(), Channel}; {reply, emqx_authz_cache:list_authz_cache(), Channel};
handle_call({quota, Bucket}, #channel{quota = Quota} = Channel) ->
Quota2 = emqx_limiter_container:update_by_name(message_routing, Bucket, Quota),
reply(ok, Channel#channel{quota = Quota2});
handle_call( handle_call(
{keepalive, Interval}, {keepalive, Interval},
Channel = #channel{ Channel = #channel{

View File

@ -556,10 +556,12 @@ save_to_override_conf(RawConf, Opts) ->
add_handlers() -> add_handlers() ->
ok = emqx_config_logger:add_handler(), ok = emqx_config_logger:add_handler(),
emqx_sys_mon:add_handler(),
ok. ok.
remove_handlers() -> remove_handlers() ->
ok = emqx_config_logger:remove_handler(), ok = emqx_config_logger:remove_handler(),
emqx_sys_mon:remove_handler(),
ok. ok.
load_hocon_file(FileName, LoadType) -> load_hocon_file(FileName, LoadType) ->

View File

@ -321,7 +321,7 @@ init_state(
}, },
LimiterTypes = [?LIMITER_BYTES_IN, ?LIMITER_MESSAGE_IN], LimiterTypes = [?LIMITER_BYTES_IN, ?LIMITER_MESSAGE_IN],
Limiter = emqx_limiter_container:get_limiter_by_names(LimiterTypes, LimiterCfg), Limiter = emqx_limiter_container:get_limiter_by_types(Listener, LimiterTypes, LimiterCfg),
FrameOpts = #{ FrameOpts = #{
strict_mode => emqx_config:get_zone_conf(Zone, [mqtt, strict_mode]), strict_mode => emqx_config:get_zone_conf(Zone, [mqtt, strict_mode]),
@ -672,12 +672,6 @@ handle_call(_From, info, State) ->
{reply, info(State), State}; {reply, info(State), State};
handle_call(_From, stats, State) -> handle_call(_From, stats, State) ->
{reply, stats(State), State}; {reply, stats(State), State};
handle_call(_From, {ratelimit, Changes}, State = #state{limiter = Limiter}) ->
Fun = fun({Type, Bucket}, Acc) ->
emqx_limiter_container:update_by_name(Type, Bucket, Acc)
end,
Limiter2 = lists:foldl(Fun, Limiter, Changes),
{reply, ok, State#state{limiter = Limiter2}};
handle_call(_From, Req, State = #state{channel = Channel}) -> handle_call(_From, Req, State = #state{channel = Channel}) ->
case emqx_channel:handle_call(Req, Channel) of case emqx_channel:handle_call(Req, Channel) of
{reply, Reply, NChannel} -> {reply, Reply, NChannel} ->

View File

@ -19,12 +19,13 @@
-behaviour(esockd_generic_limiter). -behaviour(esockd_generic_limiter).
%% API %% API
-export([new_create_options/2, create/1, delete/1, consume/2]). -export([new_create_options/3, create/1, delete/1, consume/2]).
-type create_options() :: #{ -type create_options() :: #{
module := ?MODULE, module := ?MODULE,
id := emqx_limiter_schema:limiter_id(),
type := emqx_limiter_schema:limiter_type(), type := emqx_limiter_schema:limiter_type(),
bucket := emqx_limiter_schema:bucket_name() bucket := hocons:config()
}. }.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -32,15 +33,16 @@
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-spec new_create_options( -spec new_create_options(
emqx_limiter_schema:limiter_id(),
emqx_limiter_schema:limiter_type(), emqx_limiter_schema:limiter_type(),
emqx_limiter_schema:bucket_name() hocons:config()
) -> create_options(). ) -> create_options().
new_create_options(Type, BucketName) -> new_create_options(Id, Type, BucketCfg) ->
#{module => ?MODULE, type => Type, bucket => BucketName}. #{module => ?MODULE, id => Id, type => Type, bucket => BucketCfg}.
-spec create(create_options()) -> esockd_generic_limiter:limiter(). -spec create(create_options()) -> esockd_generic_limiter:limiter().
create(#{module := ?MODULE, type := Type, bucket := BucketName}) -> create(#{module := ?MODULE, id := Id, type := Type, bucket := BucketCfg}) ->
{ok, Limiter} = emqx_limiter_server:connect(Type, BucketName), {ok, Limiter} = emqx_limiter_server:connect(Id, Type, BucketCfg),
#{module => ?MODULE, name => Type, limiter => Limiter}. #{module => ?MODULE, name => Type, limiter => Limiter}.
delete(_GLimiter) -> delete(_GLimiter) ->

View File

@ -22,10 +22,8 @@
%% API %% API
-export([ -export([
new/0, new/1, new/2, get_limiter_by_types/3,
get_limiter_by_names/2,
add_new/3, add_new/3,
update_by_name/3,
set_retry_context/2, set_retry_context/2,
check/3, check/3,
retry/2, retry/2,
@ -48,10 +46,10 @@
}. }.
-type future() :: pos_integer(). -type future() :: pos_integer().
-type limiter_id() :: emqx_limiter_schema:limiter_id().
-type limiter_type() :: emqx_limiter_schema:limiter_type(). -type limiter_type() :: emqx_limiter_schema:limiter_type().
-type limiter() :: emqx_htb_limiter:limiter(). -type limiter() :: emqx_htb_limiter:limiter().
-type retry_context() :: emqx_htb_limiter:retry_context(). -type retry_context() :: emqx_htb_limiter:retry_context().
-type bucket_name() :: emqx_limiter_schema:bucket_name().
-type millisecond() :: non_neg_integer(). -type millisecond() :: non_neg_integer().
-type check_result() :: -type check_result() ::
{ok, container()} {ok, container()}
@ -64,46 +62,24 @@
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% API %% API
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-spec new() -> container().
new() ->
new([]).
%% @doc generate default data according to the type of limiter
-spec new(list(limiter_type())) -> container().
new(Types) ->
new(Types, #{}).
-spec new(
list(limiter_type()),
#{limiter_type() => emqx_limiter_schema:bucket_name()}
) -> container().
new(Types, Names) ->
get_limiter_by_names(Types, Names).
%% @doc generate a container %% @doc generate a container
%% according to the type of limiter and the bucket name configuration of the limiter %% according to the type of limiter and the bucket name configuration of the limiter
%% @end %% @end
-spec get_limiter_by_names( -spec get_limiter_by_types(
limiter_id() | {atom(), atom()},
list(limiter_type()), list(limiter_type()),
#{limiter_type() => emqx_limiter_schema:bucket_name()} #{limiter_type() => hocons:config()}
) -> container(). ) -> container().
get_limiter_by_names(Types, BucketNames) -> get_limiter_by_types({Type, Listener}, Types, BucketCfgs) ->
Id = emqx_listeners:listener_id(Type, Listener),
get_limiter_by_types(Id, Types, BucketCfgs);
get_limiter_by_types(Id, Types, BucketCfgs) ->
Init = fun(Type, Acc) -> Init = fun(Type, Acc) ->
{ok, Limiter} = emqx_limiter_server:connect(Type, BucketNames), {ok, Limiter} = emqx_limiter_server:connect(Id, Type, BucketCfgs),
add_new(Type, Limiter, Acc) add_new(Type, Limiter, Acc)
end, end,
lists:foldl(Init, #{retry_ctx => undefined}, Types). lists:foldl(Init, #{retry_ctx => undefined}, Types).
%% @doc add the specified type of limiter to the container
-spec update_by_name(
limiter_type(),
bucket_name() | #{limiter_type() => bucket_name()},
container()
) -> container().
update_by_name(Type, Buckets, Container) ->
{ok, Limiter} = emqx_limiter_server:connect(Type, Buckets),
add_new(Type, Limiter, Container).
-spec add_new(limiter_type(), limiter(), container()) -> container(). -spec add_new(limiter_type(), limiter(), container()) -> container().
add_new(Type, Limiter, Container) -> add_new(Type, Limiter, Container) ->
Container#{ Container#{

View File

@ -24,11 +24,9 @@
%% API %% API
-export([ -export([
start_link/0, start_link/0,
find_bucket/1,
find_bucket/2, find_bucket/2,
insert_bucket/2,
insert_bucket/3, insert_bucket/3,
make_path/2, delete_bucket/2,
post_config_update/5 post_config_update/5
]). ]).
@ -50,20 +48,19 @@
format_status/2 format_status/2
]). ]).
-export_type([path/0]). -type limiter_id() :: emqx_limiter_schema:limiter_id().
-type path() :: list(atom()).
-type limiter_type() :: emqx_limiter_schema:limiter_type(). -type limiter_type() :: emqx_limiter_schema:limiter_type().
-type bucket_name() :: emqx_limiter_schema:bucket_name(). -type uid() :: {limiter_id(), limiter_type()}.
%% counter record in ets table %% counter record in ets table
-record(bucket, { -record(bucket, {
path :: path(), uid :: uid(),
bucket :: bucket_ref() bucket :: bucket_ref()
}). }).
-type bucket_ref() :: emqx_limiter_bucket_ref:bucket_ref(). -type bucket_ref() :: emqx_limiter_bucket_ref:bucket_ref().
-define(UID(Id, Type), {Id, Type}).
-define(TAB, emqx_limiter_counters). -define(TAB, emqx_limiter_counters).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -85,14 +82,10 @@ restart_server(Type) ->
stop_server(Type) -> stop_server(Type) ->
emqx_limiter_server_sup:stop(Type). emqx_limiter_server_sup:stop(Type).
-spec find_bucket(limiter_type(), bucket_name()) -> -spec find_bucket(limiter_id(), limiter_type()) ->
{ok, bucket_ref()} | undefined. {ok, bucket_ref()} | undefined.
find_bucket(Type, BucketName) -> find_bucket(Id, Type) ->
find_bucket(make_path(Type, BucketName)). case ets:lookup(?TAB, ?UID(Id, Type)) of
-spec find_bucket(path()) -> {ok, bucket_ref()} | undefined.
find_bucket(Path) ->
case ets:lookup(?TAB, Path) of
[#bucket{bucket = Bucket}] -> [#bucket{bucket = Bucket}] ->
{ok, Bucket}; {ok, Bucket};
_ -> _ ->
@ -100,20 +93,19 @@ find_bucket(Path) ->
end. end.
-spec insert_bucket( -spec insert_bucket(
limiter_id(),
limiter_type(), limiter_type(),
bucket_name(),
bucket_ref() bucket_ref()
) -> boolean(). ) -> boolean().
insert_bucket(Type, BucketName, Bucket) -> insert_bucket(Id, Type, Bucket) ->
inner_insert_bucket(make_path(Type, BucketName), Bucket). ets:insert(
?TAB,
#bucket{uid = ?UID(Id, Type), bucket = Bucket}
).
-spec insert_bucket(path(), bucket_ref()) -> true. -spec delete_bucket(limiter_id(), limiter_type()) -> true.
insert_bucket(Path, Bucket) -> delete_bucket(Type, Id) ->
inner_insert_bucket(Path, Bucket). ets:delete(?TAB, ?UID(Id, Type)).
-spec make_path(limiter_type(), bucket_name()) -> path().
make_path(Type, BucketName) ->
[Type | BucketName].
post_config_update([limiter, Type], _Config, NewConf, _OldConf, _AppEnvs) -> post_config_update([limiter, Type], _Config, NewConf, _OldConf, _AppEnvs) ->
Config = maps:get(Type, NewConf), Config = maps:get(Type, NewConf),
@ -159,7 +151,7 @@ init([]) ->
set, set,
public, public,
named_table, named_table,
{keypos, #bucket.path}, {keypos, #bucket.uid},
{write_concurrency, true}, {write_concurrency, true},
{read_concurrency, true}, {read_concurrency, true},
{heir, erlang:whereis(emqx_limiter_sup), none} {heir, erlang:whereis(emqx_limiter_sup), none}
@ -266,9 +258,3 @@ format_status(_Opt, Status) ->
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Internal functions %% Internal functions
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-spec inner_insert_bucket(path(), bucket_ref()) -> true.
inner_insert_bucket(Path, Bucket) ->
ets:insert(
?TAB,
#bucket{path = Path, bucket = Bucket}
).

View File

@ -41,8 +41,10 @@
| message_in | message_in
| connection | connection
| message_routing | message_routing
| batch. %% internal limiter for unclassified resources
| internal.
-type limiter_id() :: atom().
-type bucket_name() :: atom(). -type bucket_name() :: atom().
-type rate() :: infinity | float(). -type rate() :: infinity | float().
-type burst_rate() :: 0 | float(). -type burst_rate() :: 0 | float().
@ -76,7 +78,7 @@
bucket_name/0 bucket_name/0
]). ]).
-export_type([limiter_type/0, bucket_path/0]). -export_type([limiter_id/0, limiter_type/0, bucket_path/0]).
-define(UNIT_TIME_IN_MS, 1000). -define(UNIT_TIME_IN_MS, 1000).
@ -87,52 +89,50 @@ roots() -> [limiter].
fields(limiter) -> fields(limiter) ->
[ [
{Type, {Type,
?HOCON(?R_REF(limiter_opts), #{ ?HOCON(?R_REF(node_opts), #{
desc => ?DESC(Type), desc => ?DESC(Type),
default => make_limiter_default(Type) default => #{}
})} })}
|| Type <- types() || Type <- types()
] ++
[
{client,
?HOCON(
?R_REF(client_fields),
#{
desc => ?DESC(client),
default => maps:from_list([
{erlang:atom_to_binary(Type), #{}}
|| Type <- types()
])
}
)}
]; ];
fields(limiter_opts) -> fields(node_opts) ->
[ [
{rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => "infinity"})}, {rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => "infinity"})},
{burst, {burst,
?HOCON(burst_rate(), #{ ?HOCON(burst_rate(), #{
desc => ?DESC(burst), desc => ?DESC(burst),
default => 0 default => 0
})}, })}
{bucket, ];
?HOCON( fields(client_fields) ->
?MAP("bucket_name", ?R_REF(bucket_opts)), [
#{ {Type,
desc => ?DESC(bucket_cfg), ?HOCON(?R_REF(client_opts), #{
default => #{<<"default">> => #{}}, desc => ?DESC(Type),
example => #{ default => #{}
<<"mybucket-name">> => #{ })}
<<"rate">> => <<"infinity">>, || Type <- types()
<<"capcity">> => <<"infinity">>,
<<"initial">> => <<"100">>,
<<"per_client">> => #{<<"rate">> => <<"infinity">>}
}
}
}
)}
]; ];
fields(bucket_opts) -> fields(bucket_opts) ->
[ [
{rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => "infinity"})}, {rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => "infinity"})},
{capacity, ?HOCON(capacity(), #{desc => ?DESC(capacity), default => "infinity"})}, {capacity, ?HOCON(capacity(), #{desc => ?DESC(capacity), default => "infinity"})},
{initial, ?HOCON(initial(), #{default => "0", desc => ?DESC(initial)})}, {initial, ?HOCON(initial(), #{default => "0", desc => ?DESC(initial)})}
{per_client,
?HOCON(
?R_REF(client_bucket),
#{
default => #{},
desc => ?DESC(per_client)
}
)}
]; ];
fields(client_bucket) -> fields(client_opts) ->
[ [
{rate, ?HOCON(rate(), #{default => "infinity", desc => ?DESC(rate)})}, {rate, ?HOCON(rate(), #{default => "infinity", desc => ?DESC(rate)})},
{initial, ?HOCON(initial(), #{default => "0", desc => ?DESC(initial)})}, {initial, ?HOCON(initial(), #{default => "0", desc => ?DESC(initial)})},
@ -177,16 +177,30 @@ fields(client_bucket) ->
default => force default => force
} }
)} )}
]. ];
fields(listener_fields) ->
bucket_fields([bytes_in, message_in, connection, message_routing], listener_client_fields);
fields(listener_client_fields) ->
client_fields([bytes_in, message_in, connection, message_routing]);
fields(Type) ->
bucket_field(Type).
desc(limiter) -> desc(limiter) ->
"Settings for the rate limiter."; "Settings for the rate limiter.";
desc(limiter_opts) -> desc(node_opts) ->
"Settings for the limiter."; "Settings for the limiter of the node level.";
desc(bucket_opts) -> desc(bucket_opts) ->
"Settings for the bucket."; "Settings for the bucket.";
desc(client_bucket) -> desc(client_opts) ->
"Settings for the client bucket."; "Settings for the client in bucket level.";
desc(client_fields) ->
"Fields of the client level.";
desc(listener_fields) ->
"Fields of the listener.";
desc(listener_client_fields) ->
"Fields of the client level of the listener.";
desc(internal) ->
"Internal limiter.";
desc(_) -> desc(_) ->
undefined. undefined.
@ -202,7 +216,7 @@ get_bucket_cfg_path(Type, BucketName) ->
[limiter, Type, bucket, BucketName]. [limiter, Type, bucket, BucketName].
types() -> types() ->
[bytes_in, message_in, connection, message_routing, batch]. [bytes_in, message_in, connection, message_routing, internal].
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Internal functions %% Internal functions
@ -322,16 +336,44 @@ apply_unit("mb", Val) -> Val * ?KILOBYTE * ?KILOBYTE;
apply_unit("gb", Val) -> Val * ?KILOBYTE * ?KILOBYTE * ?KILOBYTE; apply_unit("gb", Val) -> Val * ?KILOBYTE * ?KILOBYTE * ?KILOBYTE;
apply_unit(Unit, _) -> throw("invalid unit:" ++ Unit). apply_unit(Unit, _) -> throw("invalid unit:" ++ Unit).
make_limiter_default(connection) -> bucket_field(Type) when is_atom(Type) ->
fields(bucket_opts) ++
[
{client,
?HOCON(
?R_REF(?MODULE, client_opts),
#{ #{
<<"rate">> => <<"1000/s">>, desc => ?DESC(client),
<<"bucket">> => #{ required => false
<<"default">> => }
)}
].
bucket_fields(Types, ClientRef) ->
[
{Type,
?HOCON(?R_REF(?MODULE, bucket_opts), #{
desc => ?DESC(?MODULE, Type),
required => false
})}
|| Type <- Types
] ++
[
{client,
?HOCON(
?R_REF(?MODULE, ClientRef),
#{ #{
<<"rate">> => <<"1000/s">>, desc => ?DESC(client),
<<"capacity">> => 1000 required => false
} }
} )}
}; ].
make_limiter_default(_) ->
#{}. client_fields(Types) ->
[
{Type,
?HOCON(?R_REF(client_opts), #{
desc => ?DESC(Type),
required => false
})}
|| Type <- Types
].

View File

@ -42,11 +42,13 @@
-export([ -export([
start_link/2, start_link/2,
connect/2, connect/3,
add_bucket/3,
del_bucket/2,
get_initial_val/1,
whereis/1, whereis/1,
info/1, info/1,
name/1, name/1,
get_initial_val/1,
restart/1, restart/1,
update_config/2 update_config/2
]). ]).
@ -73,16 +75,17 @@
-type state() :: #{ -type state() :: #{
type := limiter_type(), type := limiter_type(),
root := undefined | root(), root := root(),
buckets := buckets(), buckets := buckets(),
%% current counter to alloc %% current counter to alloc
counter := undefined | counters:counters_ref(), counter := counters:counters_ref(),
index := index() index := 0 | index()
}. }.
-type buckets() :: #{bucket_name() => bucket()}. -type buckets() :: #{bucket_name() => bucket()}.
-type limiter_type() :: emqx_limiter_schema:limiter_type(). -type limiter_type() :: emqx_limiter_schema:limiter_type().
-type bucket_name() :: emqx_limiter_schema:bucket_name(). -type bucket_name() :: emqx_limiter_schema:bucket_name().
-type limiter_id() :: emqx_limiter_schema:limiter_id().
-type rate() :: decimal(). -type rate() :: decimal().
-type flow() :: decimal(). -type flow() :: decimal().
-type capacity() :: decimal(). -type capacity() :: decimal().
@ -94,7 +97,7 @@
%% minimum coefficient for overloaded limiter %% minimum coefficient for overloaded limiter
-define(OVERLOAD_MIN_ALLOC, 0.3). -define(OVERLOAD_MIN_ALLOC, 0.3).
-define(CURRYING(X, F2), fun(Y) -> F2(X, Y) end). -define(COUNTER_SIZE, 8).
-export_type([index/0]). -export_type([index/0]).
-import(emqx_limiter_decimal, [add/2, sub/2, mul/2, put_to_counter/3]). -import(emqx_limiter_decimal, [add/2, sub/2, mul/2, put_to_counter/3]).
@ -105,39 +108,49 @@
%% API %% API
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-spec connect( -spec connect(
limiter_id(),
limiter_type(), limiter_type(),
bucket_name() | #{limiter_type() => bucket_name() | undefined} bucket_name() | #{limiter_type() => bucket_name() | undefined}
) -> ) ->
{ok, emqx_htb_limiter:limiter()} | {error, _}. {ok, emqx_htb_limiter:limiter()} | {error, _}.
%% If no bucket path is set in config, there will be no limit %% If no bucket path is set in config, there will be no limit
connect(_Type, undefined) -> connect(_Id, _Type, undefined) ->
{ok, emqx_htb_limiter:make_infinity_limiter()}; {ok, emqx_htb_limiter:make_infinity_limiter()};
connect(Type, BucketName) when is_atom(BucketName) -> connect(Id, Type, Cfg) ->
case get_bucket_cfg(Type, BucketName) of case find_limiter_cfg(Type, Cfg) of
undefined -> {undefined, _} ->
?SLOG(error, #{msg => "bucket_config_not_found", type => Type, bucket => BucketName}), {ok, emqx_htb_limiter:make_infinity_limiter()};
{error, config_not_found}; {
#{ #{
rate := BucketRate, rate := BucketRate,
capacity := BucketSize, capacity := BucketSize
per_client := #{rate := CliRate, capacity := CliSize} = Cfg },
#{rate := CliRate, capacity := CliSize} = ClientCfg
} -> } ->
case emqx_limiter_manager:find_bucket(Type, BucketName) of case emqx_limiter_manager:find_bucket(Id, Type) of
{ok, Bucket} -> {ok, Bucket} ->
{ok, {ok,
if if
CliRate < BucketRate orelse CliSize < BucketSize -> CliRate < BucketRate orelse CliSize < BucketSize ->
emqx_htb_limiter:make_token_bucket_limiter(Cfg, Bucket); emqx_htb_limiter:make_token_bucket_limiter(ClientCfg, Bucket);
true -> true ->
emqx_htb_limiter:make_ref_limiter(Cfg, Bucket) emqx_htb_limiter:make_ref_limiter(ClientCfg, Bucket)
end}; end};
undefined -> undefined ->
?SLOG(error, #{msg => "bucket_not_found", type => Type, bucket => BucketName}), ?SLOG(error, #{msg => "bucket_not_found", type => Type, id => Id}),
{error, invalid_bucket} {error, invalid_bucket}
end end
end; end.
connect(Type, Paths) ->
connect(Type, maps:get(Type, Paths, undefined)). -spec add_bucket(limiter_id(), limiter_type(), hocons:config() | undefined) -> ok.
add_bucket(_Id, _Type, undefine) ->
ok;
add_bucket(Id, Type, Cfg) ->
?CALL(Type, {add_bucket, Id, Cfg}).
-spec del_bucket(limiter_id(), limiter_type()) -> ok.
del_bucket(Id, Type) ->
?CALL(Type, {del_bucket, Id}).
-spec info(limiter_type()) -> state() | {error, _}. -spec info(limiter_type()) -> state() | {error, _}.
info(Type) -> info(Type) ->
@ -213,6 +226,12 @@ handle_call(restart, _From, #{type := Type}) ->
handle_call({update_config, Type, Config}, _From, #{type := Type}) -> handle_call({update_config, Type, Config}, _From, #{type := Type}) ->
NewState = init_tree(Type, Config), NewState = init_tree(Type, Config),
{reply, ok, NewState}; {reply, ok, NewState};
handle_call({add_bucket, Id, Cfg}, _From, State) ->
NewState = do_add_bucket(Id, Cfg, State),
{reply, ok, NewState};
handle_call({del_bucket, Id}, _From, State) ->
NewState = do_del_bucket(Id, State),
{reply, ok, NewState};
handle_call(Req, _From, State) -> handle_call(Req, _From, State) ->
?SLOG(error, #{msg => "unexpected_call", call => Req}), ?SLOG(error, #{msg => "unexpected_call", call => Req}),
{reply, ignored, State}. {reply, ignored, State}.
@ -456,24 +475,14 @@ init_tree(Type) when is_atom(Type) ->
Cfg = emqx:get_config([limiter, Type]), Cfg = emqx:get_config([limiter, Type]),
init_tree(Type, Cfg). init_tree(Type, Cfg).
init_tree(Type, #{bucket := Buckets} = Cfg) -> init_tree(Type, Cfg) ->
State = #{ #{
type => Type, type => Type,
root => undefined, root => make_root(Cfg),
counter => undefined, counter => counters:new(?COUNTER_SIZE, [write_concurrency]),
index => 1, index => 0,
buckets => #{} buckets => #{}
}, }.
Root = make_root(Cfg),
{CounterNum, DelayBuckets} = make_bucket(maps:to_list(Buckets), Type, Cfg, 1, []),
State2 = State#{
root := Root,
counter := counters:new(CounterNum, [write_concurrency])
},
lists:foldl(fun(F, Acc) -> F(Acc) end, State2, DelayBuckets).
-spec make_root(hocons:confg()) -> root(). -spec make_root(hocons:confg()) -> root().
make_root(#{rate := Rate, burst := Burst}) -> make_root(#{rate := Rate, burst := Burst}) ->
@ -484,79 +493,50 @@ make_root(#{rate := Rate, burst := Burst}) ->
produced => 0.0 produced => 0.0
}. }.
make_bucket([{Name, Conf} | T], Type, GlobalCfg, CounterNum, DelayBuckets) -> do_add_bucket(Id, #{rate := Rate, capacity := Capacity} = Cfg, #{buckets := Buckets} = State) ->
Path = emqx_limiter_manager:make_path(Type, Name), case maps:get(Id, Buckets, undefined) of
Rate = get_counter_rate(Conf, GlobalCfg), undefined ->
#{capacity := Capacity} = Conf, make_bucket(Id, Cfg, State);
Initial = get_initial_val(Conf), Bucket ->
CounterNum2 = CounterNum + 1, Bucket2 = Bucket#{rate := Rate, capacity := Capacity},
InitFun = fun(#{name := BucketName} = Bucket, #{buckets := Buckets} = State) -> State#{buckets := Buckets#{Id := Bucket2}}
{Counter, Idx, State2} = alloc_counter(Path, Rate, Initial, State), end.
Bucket2 = Bucket#{counter := Counter, index := Idx},
State2#{buckets := Buckets#{BucketName => Bucket2}}
end,
make_bucket(Id, Cfg, #{index := ?COUNTER_SIZE} = State) ->
make_bucket(Id, Cfg, State#{
counter => counters:new(?COUNTER_SIZE, [write_concurrency]),
index => 0
});
make_bucket(
Id,
#{rate := Rate, capacity := Capacity} = Cfg,
#{type := Type, counter := Counter, index := Index, buckets := Buckets} = State
) ->
NewIndex = Index + 1,
Initial = get_initial_val(Cfg),
Bucket = #{ Bucket = #{
name => Name, name => Id,
rate => Rate, rate => Rate,
obtained => Initial, obtained => Initial,
correction => 0, correction => 0,
capacity => Capacity, capacity => Capacity,
counter => undefined, counter => Counter,
index => undefined index => NewIndex
}, },
_ = put_to_counter(Counter, NewIndex, Initial),
Ref = emqx_limiter_bucket_ref:new(Counter, NewIndex, Rate),
emqx_limiter_manager:insert_bucket(Id, Type, Ref),
State#{buckets := Buckets#{Id => Bucket}, index := NewIndex}.
DelayInit = ?CURRYING(Bucket, InitFun), do_del_bucket(Id, #{type := Type, buckets := Buckets} = State) ->
case maps:get(Id, Buckets, undefined) of
make_bucket( undefined ->
T, State;
Type,
GlobalCfg,
CounterNum2,
[DelayInit | DelayBuckets]
);
make_bucket([], _Type, _Global, CounterNum, DelayBuckets) ->
{CounterNum, DelayBuckets}.
-spec alloc_counter(emqx_limiter_manager:path(), rate(), capacity(), state()) ->
{counters:counters_ref(), pos_integer(), state()}.
alloc_counter(
Path,
Rate,
Initial,
#{counter := Counter, index := Index} = State
) ->
case emqx_limiter_manager:find_bucket(Path) of
{ok, #{
counter := ECounter,
index := EIndex
}} when ECounter =/= undefined ->
init_counter(Path, ECounter, EIndex, Rate, Initial, State);
_ -> _ ->
init_counter( emqx_limiter_manager:delete_bucket(Id, Type),
Path, State#{buckets := maps:remove(Id, Buckets)}
Counter,
Index,
Rate,
Initial,
State#{index := Index + 1}
)
end. end.
init_counter(Path, Counter, Index, Rate, Initial, State) ->
_ = put_to_counter(Counter, Index, Initial),
Ref = emqx_limiter_bucket_ref:new(Counter, Index, Rate),
emqx_limiter_manager:insert_bucket(Path, Ref),
{Counter, Index, State}.
%% @doc find first limited node
get_counter_rate(#{rate := Rate}, _GlobalCfg) when Rate =/= infinity ->
Rate;
get_counter_rate(_Cfg, #{rate := Rate}) when Rate =/= infinity ->
Rate;
get_counter_rate(_Cfg, _GlobalCfg) ->
emqx_limiter_schema:infinity_value().
-spec get_initial_val(hocons:config()) -> decimal(). -spec get_initial_val(hocons:config()) -> decimal().
get_initial_val( get_initial_val(
#{ #{
@ -587,8 +567,21 @@ call(Type, Msg) ->
gen_server:call(Pid, Msg) gen_server:call(Pid, Msg)
end. end.
-spec get_bucket_cfg(limiter_type(), bucket_name()) -> find_limiter_cfg(Type, #{rate := _} = Cfg) ->
undefined | limiter_not_started | hocons:config(). {Cfg, find_client_cfg(Type, maps:get(client, Cfg, undefined))};
get_bucket_cfg(Type, Bucket) -> find_limiter_cfg(Type, Cfg) ->
Path = emqx_limiter_schema:get_bucket_cfg_path(Type, Bucket), {
emqx:get_config(Path, undefined). maps:get(Type, Cfg, undefined),
find_client_cfg(Type, emqx_map_lib:deep_get([client, Type], Cfg, undefined))
}.
find_client_cfg(Type, BucketCfg) ->
NodeCfg = emqx:get_config([limiter, client, Type], undefined),
merge_client_cfg(NodeCfg, BucketCfg).
merge_client_cfg(undefined, BucketCfg) ->
BucketCfg;
merge_client_cfg(NodeCfg, undefined) ->
NodeCfg;
merge_client_cfg(NodeCfg, BucketCfg) ->
maps:merge(NodeCfg, BucketCfg).

View File

@ -54,7 +54,7 @@
-export([pre_config_update/3, post_config_update/5]). -export([pre_config_update/3, post_config_update/5]).
-export([format_addr/1]). -export([format_bind/1]).
-define(CONF_KEY_PATH, [listeners, '?', '?']). -define(CONF_KEY_PATH, [listeners, '?', '?']).
-define(TYPES_STRING, ["tcp", "ssl", "ws", "wss", "quic"]). -define(TYPES_STRING, ["tcp", "ssl", "ws", "wss", "quic"]).
@ -201,14 +201,14 @@ start_listener(Type, ListenerName, #{bind := Bind} = Conf) ->
?tp(listener_started, #{type => Type, bind => Bind}), ?tp(listener_started, #{type => Type, bind => Bind}),
console_print( console_print(
"Listener ~ts on ~ts started.~n", "Listener ~ts on ~ts started.~n",
[listener_id(Type, ListenerName), format_addr(Bind)] [listener_id(Type, ListenerName), format_bind(Bind)]
), ),
ok; ok;
{error, {already_started, Pid}} -> {error, {already_started, Pid}} ->
{error, {already_started, Pid}}; {error, {already_started, Pid}};
{error, Reason} -> {error, Reason} ->
ListenerId = listener_id(Type, ListenerName), ListenerId = listener_id(Type, ListenerName),
BindStr = format_addr(Bind), BindStr = format_bind(Bind),
?ELOG( ?ELOG(
"Failed to start listener ~ts on ~ts: ~0p.~n", "Failed to start listener ~ts on ~ts: ~0p.~n",
[ListenerId, BindStr, Reason] [ListenerId, BindStr, Reason]
@ -261,30 +261,37 @@ stop_listener(Type, ListenerName, #{bind := Bind} = Conf) ->
ok -> ok ->
console_print( console_print(
"Listener ~ts on ~ts stopped.~n", "Listener ~ts on ~ts stopped.~n",
[listener_id(Type, ListenerName), format_addr(Bind)] [listener_id(Type, ListenerName), format_bind(Bind)]
), ),
ok; ok;
{error, not_found} -> {error, not_found} ->
?ELOG( ?ELOG(
"Failed to stop listener ~ts on ~ts: ~0p~n", "Failed to stop listener ~ts on ~ts: ~0p~n",
[listener_id(Type, ListenerName), format_addr(Bind), already_stopped] [listener_id(Type, ListenerName), format_bind(Bind), already_stopped]
), ),
ok; ok;
{error, Reason} -> {error, Reason} ->
?ELOG( ?ELOG(
"Failed to stop listener ~ts on ~ts: ~0p~n", "Failed to stop listener ~ts on ~ts: ~0p~n",
[listener_id(Type, ListenerName), format_addr(Bind), Reason] [listener_id(Type, ListenerName), format_bind(Bind), Reason]
), ),
{error, Reason} {error, Reason}
end. end.
-spec do_stop_listener(atom(), atom(), map()) -> ok | {error, term()}. -spec do_stop_listener(atom(), atom(), map()) -> ok | {error, term()}.
do_stop_listener(Type, ListenerName, #{bind := ListenOn}) when Type == tcp; Type == ssl ->
esockd:close(listener_id(Type, ListenerName), ListenOn); do_stop_listener(Type, ListenerName, #{bind := ListenOn} = Conf) when Type == tcp; Type == ssl ->
do_stop_listener(Type, ListenerName, _Conf) when Type == ws; Type == wss -> Id = listener_id(Type, ListenerName),
cowboy:stop_listener(listener_id(Type, ListenerName)); del_limiter_bucket(Id, Conf),
do_stop_listener(quic, ListenerName, _Conf) -> esockd:close(Id, ListenOn);
quicer:stop_listener(listener_id(quic, ListenerName)). do_stop_listener(Type, ListenerName, Conf) when Type == ws; Type == wss ->
Id = listener_id(Type, ListenerName),
del_limiter_bucket(Id, Conf),
cowboy:stop_listener(Id);
do_stop_listener(quic, ListenerName, Conf) ->
Id = listener_id(quic, ListenerName),
del_limiter_bucket(Id, Conf),
quicer:stop_listener(Id).
-ifndef(TEST). -ifndef(TEST).
console_print(Fmt, Args) -> ?ULOG(Fmt, Args). console_print(Fmt, Args) -> ?ULOG(Fmt, Args).
@ -300,10 +307,12 @@ do_start_listener(_Type, _ListenerName, #{enabled := false}) ->
do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts) when do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts) when
Type == tcp; Type == ssl Type == tcp; Type == ssl
-> ->
Id = listener_id(Type, ListenerName),
add_limiter_bucket(Id, Opts),
esockd:open( esockd:open(
listener_id(Type, ListenerName), Id,
ListenOn, ListenOn,
merge_default(esockd_opts(Type, Opts)), merge_default(esockd_opts(Id, Type, Opts)),
{emqx_connection, start_link, [ {emqx_connection, start_link, [
#{ #{
listener => {Type, ListenerName}, listener => {Type, ListenerName},
@ -318,6 +327,7 @@ do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts) when
Type == ws; Type == wss Type == ws; Type == wss
-> ->
Id = listener_id(Type, ListenerName), Id = listener_id(Type, ListenerName),
add_limiter_bucket(Id, Opts),
RanchOpts = ranch_opts(Type, ListenOn, Opts), RanchOpts = ranch_opts(Type, ListenOn, Opts),
WsOpts = ws_opts(Type, ListenerName, Opts), WsOpts = ws_opts(Type, ListenerName, Opts),
case Type of case Type of
@ -352,8 +362,10 @@ do_start_listener(quic, ListenerName, #{bind := ListenOn} = Opts) ->
limiter => limiter(Opts) limiter => limiter(Opts)
}, },
StreamOpts = [{stream_callback, emqx_quic_stream}], StreamOpts = [{stream_callback, emqx_quic_stream}],
Id = listener_id(quic, ListenerName),
add_limiter_bucket(Id, Opts),
quicer:start_listener( quicer:start_listener(
listener_id(quic, ListenerName), Id,
port(ListenOn), port(ListenOn),
{ListenOpts, ConnectionOpts, StreamOpts} {ListenOpts, ConnectionOpts, StreamOpts}
); );
@ -410,16 +422,18 @@ post_config_update([listeners, Type, Name], {action, _Action, _}, NewConf, OldCo
post_config_update(_Path, _Request, _NewConf, _OldConf, _AppEnvs) -> post_config_update(_Path, _Request, _NewConf, _OldConf, _AppEnvs) ->
ok. ok.
esockd_opts(Type, Opts0) -> esockd_opts(ListenerId, Type, Opts0) ->
Opts1 = maps:with([acceptors, max_connections, proxy_protocol, proxy_protocol_timeout], Opts0), Opts1 = maps:with([acceptors, max_connections, proxy_protocol, proxy_protocol_timeout], Opts0),
Limiter = limiter(Opts0), Limiter = limiter(Opts0),
Opts2 = Opts2 =
case maps:get(connection, Limiter, undefined) of case maps:get(connection, Limiter, undefined) of
undefined -> undefined ->
Opts1; Opts1;
BucketName -> BucketCfg ->
Opts1#{ Opts1#{
limiter => emqx_esockd_htb_limiter:new_create_options(connection, BucketName) limiter => emqx_esockd_htb_limiter:new_create_options(
ListenerId, connection, BucketCfg
)
} }
end, end,
Opts3 = Opts2#{ Opts3 = Opts2#{
@ -492,17 +506,32 @@ merge_default(Options) ->
[{tcp_options, ?MQTT_SOCKOPTS} | Options] [{tcp_options, ?MQTT_SOCKOPTS} | Options]
end. end.
format_addr(Port) when is_integer(Port) -> -spec format_bind(
integer() | {tuple(), integer()} | string() | binary()
) -> io_lib:chars().
format_bind(Port) when is_integer(Port) ->
io_lib:format(":~w", [Port]); io_lib:format(":~w", [Port]);
%% Print only the port number when bound on all interfaces %% Print only the port number when bound on all interfaces
format_addr({{0, 0, 0, 0}, Port}) -> format_bind({{0, 0, 0, 0}, Port}) ->
format_addr(Port); format_bind(Port);
format_addr({{0, 0, 0, 0, 0, 0, 0, 0}, Port}) -> format_bind({{0, 0, 0, 0, 0, 0, 0, 0}, Port}) ->
format_addr(Port); format_bind(Port);
format_addr({Addr, Port}) when is_list(Addr) -> format_bind({Addr, Port}) when is_list(Addr) ->
io_lib:format("~ts:~w", [Addr, Port]); io_lib:format("~ts:~w", [Addr, Port]);
format_addr({Addr, Port}) when is_tuple(Addr) -> format_bind({Addr, Port}) when is_tuple(Addr), tuple_size(Addr) == 4 ->
io_lib:format("~ts:~w", [inet:ntoa(Addr), Port]). io_lib:format("~ts:~w", [inet:ntoa(Addr), Port]);
format_bind({Addr, Port}) when is_tuple(Addr), tuple_size(Addr) == 8 ->
io_lib:format("[~ts]:~w", [inet:ntoa(Addr), Port]);
%% Support string, binary type for Port or IP:Port
format_bind(Str) when is_list(Str) ->
case emqx_schema:to_ip_port(Str) of
{ok, {Ip, Port}} ->
format_bind({Ip, Port});
{error, _} ->
format_bind(list_to_integer(Str))
end;
format_bind(Bin) when is_binary(Bin) ->
format_bind(binary_to_list(Bin)).
listener_id(Type, ListenerName) -> listener_id(Type, ListenerName) ->
list_to_atom(lists:append([str(Type), ":", str(ListenerName)])). list_to_atom(lists:append([str(Type), ":", str(ListenerName)])).
@ -524,6 +553,27 @@ zone(Opts) ->
limiter(Opts) -> limiter(Opts) ->
maps:get(limiter, Opts, #{}). maps:get(limiter, Opts, #{}).
add_limiter_bucket(Id, #{limiter := Limiter}) ->
maps:fold(
fun(Type, Cfg, _) ->
emqx_limiter_server:add_bucket(Id, Type, Cfg)
end,
ok,
maps:without([client], Limiter)
);
add_limiter_bucket(_Id, _Cfg) ->
ok.
del_limiter_bucket(Id, #{limiter := Limiters}) ->
lists:foreach(
fun(Type) ->
emqx_limiter_server:del_bucket(Id, Type)
end,
maps:keys(Limiters)
);
del_limiter_bucket(_Id, _Cfg) ->
ok.
enable_authn(Opts) -> enable_authn(Opts) ->
maps:get(enable_authn, Opts, true). maps:get(enable_authn, Opts, true).

View File

@ -69,9 +69,10 @@ best_effort_json(Input, Opts) ->
jsx:encode(JsonReady, Opts). jsx:encode(JsonReady, Opts).
-spec format(logger:log_event(), config()) -> iodata(). -spec format(logger:log_event(), config()) -> iodata().
format(#{level := Level, msg := Msg, meta := Meta}, Config0) when is_map(Config0) -> format(#{level := Level, msg := Msg, meta := Meta} = Event, Config0) when is_map(Config0) ->
Config = add_default_config(Config0), Config = add_default_config(Config0),
[format(Msg, Meta#{level => Level}, Config), "\n"]. MsgBin = format(Msg, Meta#{level => Level}, Config),
logger_formatter:format(Event#{msg => {string, MsgBin}}, Config).
format(Msg, Meta, Config) -> format(Msg, Meta, Config) ->
Data0 = Data0 =

View File

@ -35,6 +35,8 @@
current_sysmem_percent/0 current_sysmem_percent/0
]). ]).
-export([update/1]).
%% gen_server callbacks %% gen_server callbacks
-export([ -export([
init/1, init/1,
@ -52,6 +54,9 @@
start_link() -> start_link() ->
gen_server:start_link({local, ?OS_MON}, ?MODULE, [], []). gen_server:start_link({local, ?OS_MON}, ?MODULE, [], []).
update(OS) ->
erlang:send(?MODULE, {monitor_conf_update, OS}).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% API %% API
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -87,18 +92,24 @@ current_sysmem_percent() ->
init([]) -> init([]) ->
%% memsup is not reliable, ignore %% memsup is not reliable, ignore
memsup:set_sysmem_high_watermark(1.0), memsup:set_sysmem_high_watermark(1.0),
SysHW = init_os_monitor(),
_ = start_mem_check_timer(),
_ = start_cpu_check_timer(),
{ok, #{sysmem_high_watermark => SysHW}}.
init_os_monitor() ->
init_os_monitor(emqx:get_config([sysmon, os])).
init_os_monitor(OS) ->
#{ #{
sysmem_high_watermark := SysHW, sysmem_high_watermark := SysHW,
procmem_high_watermark := PHW, procmem_high_watermark := PHW,
mem_check_interval := MCI mem_check_interval := MCI
} = emqx:get_config([sysmon, os]), } = OS,
set_procmem_high_watermark(PHW), set_procmem_high_watermark(PHW),
set_mem_check_interval(MCI), set_mem_check_interval(MCI),
ok = update_mem_alarm_status(SysHW), ok = update_mem_alarm_status(SysHW),
_ = start_mem_check_timer(), SysHW.
_ = start_cpu_check_timer(),
{ok, #{sysmem_high_watermark => SysHW}}.
handle_call(get_sysmem_high_watermark, _From, #{sysmem_high_watermark := HWM} = State) -> handle_call(get_sysmem_high_watermark, _From, #{sysmem_high_watermark := HWM} = State) ->
{reply, HWM, State}; {reply, HWM, State};
@ -147,6 +158,9 @@ handle_info({timeout, _Timer, cpu_check}, State) ->
end, end,
ok = start_cpu_check_timer(), ok = start_cpu_check_timer(),
{noreply, State}; {noreply, State};
handle_info({monitor_conf_update, OS}, _State) ->
SysHW = init_os_monitor(OS),
{noreply, #{sysmem_high_watermark => SysHW}};
handle_info(Info, State) -> handle_info(Info, State) ->
?SLOG(error, #{msg => "unexpected_info", info => Info}), ?SLOG(error, #{msg => "unexpected_info", info => Info}),
{noreply, State}. {noreply, State}.

View File

@ -1635,10 +1635,15 @@ base_listener(Bind) ->
)}, )},
{"limiter", {"limiter",
sc( sc(
map("ratelimit_name", emqx_limiter_schema:bucket_name()), ?R_REF(
emqx_limiter_schema,
listener_fields
),
#{ #{
desc => ?DESC(base_listener_limiter), desc => ?DESC(base_listener_limiter),
default => #{<<"connection">> => <<"default">>} default => #{
<<"connection">> => #{<<"rate">> => <<"1000/s">>, <<"capacity">> => 1000}
}
} }
)}, )},
{"enable_authn", {"enable_authn",
@ -2129,9 +2134,13 @@ to_comma_separated_atoms(Str) ->
to_bar_separated_list(Str) -> to_bar_separated_list(Str) ->
{ok, string:tokens(Str, "| ")}. {ok, string:tokens(Str, "| ")}.
%% @doc support the following format:
%% - 127.0.0.1:1883
%% - ::1:1883
%% - [::1]:1883
to_ip_port(Str) -> to_ip_port(Str) ->
case string:tokens(Str, ": ") of case split_ip_port(Str) of
[Ip, Port] -> {Ip, Port} ->
PortVal = list_to_integer(Port), PortVal = list_to_integer(Port),
case inet:parse_address(Ip) of case inet:parse_address(Ip) of
{ok, R} -> {ok, R} ->
@ -2149,6 +2158,26 @@ to_ip_port(Str) ->
{error, Str} {error, Str}
end. end.
split_ip_port(Str0) ->
Str = re:replace(Str0, " ", "", [{return, list}, global]),
case lists:split(string:rchr(Str, $:), Str) of
%% no port
{[], Str} ->
error;
{IpPlusColon, PortString} ->
IpStr0 = lists:droplast(IpPlusColon),
case IpStr0 of
%% dropp head/tail brackets
[$[ | S] ->
case lists:last(S) of
$] -> {lists:droplast(S), PortString};
_ -> error
end;
_ ->
{IpStr0, PortString}
end
end.
to_erl_cipher_suite(Str) -> to_erl_cipher_suite(Str) ->
case ssl:str_to_suite(Str) of case ssl:str_to_suite(Str) of
{error, Reason} -> error({invalid_cipher, Reason}); {error, Reason} -> error({invalid_cipher, Reason});

View File

@ -333,7 +333,7 @@ publish(brokers, Nodes) ->
safe_publish(<<"$SYS/brokers">>, #{retain => true}, Payload); safe_publish(<<"$SYS/brokers">>, #{retain => true}, Payload);
publish(stats, Stats) -> publish(stats, Stats) ->
[ [
safe_publish(systop(lists:concat(['stats/', Stat])), integer_to_binary(Val)) safe_publish(systop(stats_topic(Stat)), integer_to_binary(Val))
|| {Stat, Val} <- Stats, is_atom(Stat), is_integer(Val) || {Stat, Val} <- Stats, is_atom(Stat), is_integer(Val)
]; ];
publish(metrics, Metrics) -> publish(metrics, Metrics) ->
@ -351,7 +351,13 @@ publish(Event, Payload) when
safe_publish(Topic, emqx_json:encode(Payload)). safe_publish(Topic, emqx_json:encode(Payload)).
metric_topic(Name) -> metric_topic(Name) ->
lists:concat(["metrics/", string:replace(atom_to_list(Name), ".", "/", all)]). translate_topic("metrics/", Name).
stats_topic(Name) ->
translate_topic("stats/", Name).
translate_topic(Prefix, Name) ->
lists:concat([Prefix, string:replace(atom_to_list(Name), ".", "/", all)]).
safe_publish(Topic, Payload) -> safe_publish(Topic, Payload) ->
safe_publish(Topic, #{}, Payload). safe_publish(Topic, #{}, Payload).

View File

@ -35,32 +35,52 @@
terminate/2, terminate/2,
code_change/3 code_change/3
]). ]).
-export([add_handler/0, remove_handler/0, post_config_update/5]).
-export([update/1]).
-define(SYSMON, ?MODULE). -define(SYSMON, ?MODULE).
-define(SYSMON_CONF_ROOT, [sysmon]).
%% @doc Start the system monitor. %% @doc Start the system monitor.
-spec start_link() -> startlink_ret(). -spec start_link() -> startlink_ret().
start_link() -> start_link() ->
gen_server:start_link({local, ?SYSMON}, ?MODULE, [], []). gen_server:start_link({local, ?SYSMON}, ?MODULE, [], []).
add_handler() ->
ok = emqx_config_handler:add_handler(?SYSMON_CONF_ROOT, ?MODULE),
ok.
remove_handler() ->
ok = emqx_config_handler:remove_handler(?SYSMON_CONF_ROOT),
ok.
post_config_update(_, _Req, NewConf, OldConf, _AppEnvs) ->
#{os := OS1, vm := VM1} = OldConf,
#{os := OS2, vm := VM2} = NewConf,
VM1 =/= VM2 andalso ?MODULE:update(VM2),
OS1 =/= OS2 andalso emqx_os_mon:update(OS2),
ok.
update(VM) ->
erlang:send(?MODULE, {monitor_conf_update, VM}).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% gen_server callbacks %% gen_server callbacks
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
init([]) -> init([]) ->
_ = erlang:system_monitor(self(), sysm_opts()),
emqx_logger:set_proc_metadata(#{sysmon => true}), emqx_logger:set_proc_metadata(#{sysmon => true}),
init_system_monitor(),
%% Monitor cluster partition event %% Monitor cluster partition event
ekka:monitor(partition, fun handle_partition_event/1), ekka:monitor(partition, fun handle_partition_event/1),
{ok, start_timer(#{timer => undefined, events => []})}. {ok, start_timer(#{timer => undefined, events => []})}.
start_timer(State) -> start_timer(State) ->
State#{timer := emqx_misc:start_timer(timer:seconds(2), reset)}. State#{timer := emqx_misc:start_timer(timer:seconds(2), reset)}.
sysm_opts() -> sysm_opts(VM) ->
sysm_opts(maps:to_list(emqx:get_config([sysmon, vm])), []). sysm_opts(maps:to_list(VM), []).
sysm_opts([], Acc) -> sysm_opts([], Acc) ->
Acc; Acc;
sysm_opts([{_, disabled} | Opts], Acc) -> sysm_opts([{_, disabled} | Opts], Acc) ->
@ -176,12 +196,16 @@ handle_info({monitor, SusPid, busy_dist_port, Port}, State) ->
); );
handle_info({timeout, _Ref, reset}, State) -> handle_info({timeout, _Ref, reset}, State) ->
{noreply, State#{events := []}, hibernate}; {noreply, State#{events := []}, hibernate};
handle_info({monitor_conf_update, VM}, State) ->
init_system_monitor(VM),
{noreply, State#{events := []}, hibernate};
handle_info(Info, State) -> handle_info(Info, State) ->
?SLOG(error, #{msg => "unexpected_info", info => Info}), ?SLOG(error, #{msg => "unexpected_info", info => Info}),
{noreply, State}. {noreply, State}.
terminate(_Reason, #{timer := TRef}) -> terminate(_Reason, #{timer := TRef}) ->
emqx_misc:cancel_timer(TRef). emqx_misc:cancel_timer(TRef),
ok.
code_change(_OldVsn, State, _Extra) -> code_change(_OldVsn, State, _Extra) ->
{ok, State}. {ok, State}.
@ -237,3 +261,11 @@ safe_publish(Event, WarnMsg) ->
sysmon_msg(Topic, Payload) -> sysmon_msg(Topic, Payload) ->
Msg = emqx_message:make(?SYSMON, Topic, Payload), Msg = emqx_message:make(?SYSMON, Topic, Payload),
emqx_message:set_flag(sys, Msg). emqx_message:set_flag(sys, Msg).
init_system_monitor() ->
VM = emqx:get_config([sysmon, vm]),
init_system_monitor(VM).
init_system_monitor(VM) ->
_ = erlang:system_monitor(self(), sysm_opts(VM)),
ok.

View File

@ -92,15 +92,16 @@ unsubscribe(<<"$SYS/", _/binary>>, _SubOpts) ->
unsubscribe(Topic, SubOpts) -> unsubscribe(Topic, SubOpts) ->
?TRACE("UNSUBSCRIBE", "unsubscribe", #{topic => Topic, sub_opts => SubOpts}). ?TRACE("UNSUBSCRIBE", "unsubscribe", #{topic => Topic, sub_opts => SubOpts}).
log(List, Msg, Meta0) -> log(List, Msg, Meta) ->
Meta = Log = #{level => debug, meta => enrich_meta(Meta), msg => Msg},
case logger:get_process_metadata() of
undefined -> Meta0;
ProcMeta -> maps:merge(ProcMeta, Meta0)
end,
Log = #{level => debug, meta => Meta, msg => Msg},
log_filter(List, Log). log_filter(List, Log).
enrich_meta(Meta) ->
case logger:get_process_metadata() of
undefined -> Meta;
ProcMeta -> maps:merge(ProcMeta, Meta)
end.
log_filter([], _Log) -> log_filter([], _Log) ->
ok; ok;
log_filter([{Id, FilterFun, Filter, Name} | Rest], Log0) -> log_filter([{Id, FilterFun, Filter, Name} | Rest], Log0) ->

View File

@ -16,7 +16,7 @@
-module(emqx_trace_formatter). -module(emqx_trace_formatter).
-export([format/2]). -export([format/2]).
-export([format_meta/1]). -export([format_meta_map/1]).
%%%----------------------------------------------------------------- %%%-----------------------------------------------------------------
%%% API %%% API
@ -31,32 +31,39 @@ format(
ClientId = to_iolist(maps:get(clientid, Meta, "")), ClientId = to_iolist(maps:get(clientid, Meta, "")),
Peername = maps:get(peername, Meta, ""), Peername = maps:get(peername, Meta, ""),
MetaBin = format_meta(Meta, PEncode), MetaBin = format_meta(Meta, PEncode),
[Time, " [", Tag, "] ", ClientId, "@", Peername, " msg: ", Msg, MetaBin, "\n"]; [Time, " [", Tag, "] ", ClientId, "@", Peername, " msg: ", Msg, ", ", MetaBin, "\n"];
format(Event, Config) -> format(Event, Config) ->
emqx_logger_textfmt:format(Event, Config). emqx_logger_textfmt:format(Event, Config).
format_meta(Meta) -> format_meta_map(Meta) ->
Encode = emqx_trace_handler:payload_encode(), Encode = emqx_trace_handler:payload_encode(),
do_format_meta(Meta, Encode). format_meta_map(Meta, Encode).
format_meta(Meta0, Encode) -> format_meta_map(Meta, Encode) ->
Meta1 = #{packet := Packet0, payload := Payload0} = do_format_meta(Meta0, Encode), format_meta_map(Meta, Encode, [{packet, fun format_packet/2}, {payload, fun format_payload/2}]).
Packet = enrich(", packet: ", Packet0),
Payload = enrich(", payload: ", Payload0), format_meta_map(Meta, _Encode, []) ->
Meta2 = maps:without([msg, clientid, peername, packet, payload, trace_tag], Meta1), Meta;
case Meta2 =:= #{} of format_meta_map(Meta, Encode, [{Name, FormatFun} | Rest]) ->
true -> [Packet, Payload]; case Meta of
false -> [Packet, ", ", map_to_iolist(Meta2), Payload] #{Name := Value} ->
NewMeta = Meta#{Name => FormatFun(Value, Encode)},
format_meta_map(NewMeta, Encode, Rest);
#{} ->
format_meta_map(Meta, Encode, Rest)
end. end.
enrich(_, "") -> ""; format_meta(Meta0, Encode) ->
enrich(Key, IoData) -> [Key, IoData]. Meta1 = maps:without([msg, clientid, peername, trace_tag], Meta0),
Meta2 = format_meta_map(Meta1, Encode),
kvs_to_iolist(lists:sort(fun compare_meta_kvs/2, maps:to_list(Meta2))).
do_format_meta(Meta, Encode) -> %% packet always goes first; payload always goes last
Meta#{ compare_meta_kvs(KV1, KV2) -> weight(KV1) =< weight(KV2).
packet => format_packet(maps:get(packet, Meta, undefined), Encode),
payload => format_payload(maps:get(payload, Meta, undefined), Encode) weight({packet, _}) -> {0, packet};
}. weight({payload, _}) -> {2, payload};
weight({K, _}) -> {1, K}.
format_packet(undefined, _) -> ""; format_packet(undefined, _) -> "";
format_packet(Packet, Encode) -> emqx_packet:format(Packet, Encode). format_packet(Packet, Encode) -> emqx_packet:format(Packet, Encode).
@ -69,14 +76,14 @@ format_payload(_, hidden) -> "******".
to_iolist(Atom) when is_atom(Atom) -> atom_to_list(Atom); to_iolist(Atom) when is_atom(Atom) -> atom_to_list(Atom);
to_iolist(Int) when is_integer(Int) -> integer_to_list(Int); to_iolist(Int) when is_integer(Int) -> integer_to_list(Int);
to_iolist(Float) when is_float(Float) -> float_to_list(Float, [{decimals, 2}]); to_iolist(Float) when is_float(Float) -> float_to_list(Float, [{decimals, 2}]);
to_iolist(SubMap) when is_map(SubMap) -> ["[", map_to_iolist(SubMap), "]"]; to_iolist(SubMap) when is_map(SubMap) -> ["[", kvs_to_iolist(maps:to_list(SubMap)), "]"];
to_iolist(Char) -> emqx_logger_textfmt:try_format_unicode(Char). to_iolist(Char) -> emqx_logger_textfmt:try_format_unicode(Char).
map_to_iolist(Map) -> kvs_to_iolist(KVs) ->
lists:join( lists:join(
", ", ", ",
lists:map( lists:map(
fun({K, V}) -> [to_iolist(K), ": ", to_iolist(V)] end, fun({K, V}) -> [to_iolist(K), ": ", to_iolist(V)] end,
maps:to_list(Map) KVs
) )
). ).

View File

@ -86,7 +86,7 @@ handle_info({timeout, _Timer, check}, State) ->
}, },
Message Message
); );
_Precent -> _Percent ->
ok ok
end, end,
_ = start_check_timer(), _ = start_check_timer(),

View File

@ -273,7 +273,7 @@ check_origin_header(Req, #{listener := {Type, Listener}} = Opts) ->
end. end.
websocket_init([Req, Opts]) -> websocket_init([Req, Opts]) ->
#{zone := Zone, limiter := LimiterCfg, listener := {Type, Listener}} = Opts, #{zone := Zone, limiter := LimiterCfg, listener := {Type, Listener} = ListenerCfg} = Opts,
case check_max_connection(Type, Listener) of case check_max_connection(Type, Listener) of
allow -> allow ->
{Peername, PeerCert} = get_peer_info(Type, Listener, Req, Opts), {Peername, PeerCert} = get_peer_info(Type, Listener, Req, Opts),
@ -287,8 +287,10 @@ websocket_init([Req, Opts]) ->
ws_cookie => WsCookie, ws_cookie => WsCookie,
conn_mod => ?MODULE conn_mod => ?MODULE
}, },
Limiter = emqx_limiter_container:get_limiter_by_names( Limiter = emqx_limiter_container:get_limiter_by_types(
[?LIMITER_BYTES_IN, ?LIMITER_MESSAGE_IN], LimiterCfg ListenerCfg,
[?LIMITER_BYTES_IN, ?LIMITER_MESSAGE_IN],
LimiterCfg
), ),
MQTTPiggyback = get_ws_opts(Type, Listener, mqtt_piggyback), MQTTPiggyback = get_ws_opts(Type, Listener, mqtt_piggyback),
FrameOpts = #{ FrameOpts = #{
@ -487,9 +489,6 @@ handle_call(From, info, State) ->
handle_call(From, stats, State) -> handle_call(From, stats, State) ->
gen_server:reply(From, stats(State)), gen_server:reply(From, stats(State)),
return(State); return(State);
handle_call(_From, {ratelimit, Type, Bucket}, State = #state{limiter = Limiter}) ->
Limiter2 = emqx_limiter_container:update_by_name(Type, Bucket, Limiter),
{reply, ok, State#state{limiter = Limiter2}};
handle_call(From, Req, State = #state{channel = Channel}) -> handle_call(From, Req, State = #state{channel = Channel}) ->
case emqx_channel:handle_call(Req, Channel) of case emqx_channel:handle_call(Req, Channel) of
{reply, Reply, NChannel} -> {reply, Reply, NChannel} ->

View File

@ -131,16 +131,23 @@ storage_properties(_, Backend) when ?IS_ETS(Backend) ->
storage_properties(_, _) -> storage_properties(_, _) ->
[]. [].
%% Dialyzer sees the compiled literal in
%% `mria:rocksdb_backend_available/0' and complains about the
%% complementar match arm...
-dialyzer({no_match, table_type/1}).
-spec table_type(atom()) -> mria_table_type(). -spec table_type(atom()) -> mria_table_type().
table_type(Table) -> table_type(Table) ->
DiscPersistence = emqx_config:get([?cfg_root, on_disc]), DiscPersistence = emqx_config:get([?cfg_root, on_disc]),
RamCache = get_overlayed(Table, ram_cache), RamCache = get_overlayed(Table, ram_cache),
case {DiscPersistence, RamCache} of RocksDBAvailable = mria:rocksdb_backend_available(),
{true, true} -> case {DiscPersistence, RamCache, RocksDBAvailable} of
{true, true, _} ->
disc_copies; disc_copies;
{true, false} -> {true, false, true} ->
rocksdb_copies; rocksdb_copies;
{false, _} -> {true, false, false} ->
disc_copies;
{false, _, _} ->
ram_copies ram_copies
end. end.

View File

@ -33,18 +33,6 @@ force_gc_conf() ->
force_shutdown_conf() -> force_shutdown_conf() ->
#{enable => true, max_heap_size => 4194304, max_message_queue_len => 1000}. #{enable => true, max_heap_size => 4194304, max_message_queue_len => 1000}.
rate_limit_conf() ->
#{
conn_bytes_in => ["100KB", "10s"],
conn_messages_in => ["100", "10s"],
max_conn_rate => 1000,
quota =>
#{
conn_messages_routing => infinity,
overall_messages_routing => infinity
}
}.
rpc_conf() -> rpc_conf() ->
#{ #{
async_batch_size => 256, async_batch_size => 256,
@ -173,27 +161,9 @@ listeners_conf() ->
limiter_conf() -> limiter_conf() ->
Make = fun() -> Make = fun() ->
#{ #{
bucket =>
#{
default =>
#{
capacity => infinity,
initial => 0,
rate => infinity,
per_client =>
#{
capacity => infinity,
divisible => false,
failure_strategy => force,
initial => 0,
low_watermark => 0,
max_retry_time => 5000,
rate => infinity
}
}
},
burst => 0, burst => 0,
rate => infinity rate => infinity,
capacity => infinity
} }
end, end,
@ -202,7 +172,7 @@ limiter_conf() ->
Acc#{Name => Make()} Acc#{Name => Make()}
end, end,
#{}, #{},
[bytes_in, message_in, message_routing, connection, batch] [bytes_in, message_in, message_routing, connection, internal]
). ).
stats_conf() -> stats_conf() ->
@ -213,7 +183,6 @@ zone_conf() ->
basic_conf() -> basic_conf() ->
#{ #{
rate_limit => rate_limit_conf(),
force_gc => force_gc_conf(), force_gc => force_gc_conf(),
force_shutdown => force_shutdown_conf(), force_shutdown => force_shutdown_conf(),
mqtt => mqtt_conf(), mqtt => mqtt_conf(),
@ -274,10 +243,9 @@ end_per_suite(_Config) ->
emqx_banned emqx_banned
]). ]).
init_per_testcase(TestCase, Config) -> init_per_testcase(_TestCase, Config) ->
OldConf = set_test_listener_confs(), OldConf = set_test_listener_confs(),
emqx_common_test_helpers:start_apps([]), emqx_common_test_helpers:start_apps([]),
check_modify_limiter(TestCase),
[{config, OldConf} | Config]. [{config, OldConf} | Config].
end_per_testcase(_TestCase, Config) -> end_per_testcase(_TestCase, Config) ->
@ -285,41 +253,6 @@ end_per_testcase(_TestCase, Config) ->
emqx_common_test_helpers:stop_apps([]), emqx_common_test_helpers:stop_apps([]),
Config. Config.
check_modify_limiter(TestCase) ->
Checks = [t_quota_qos0, t_quota_qos1, t_quota_qos2],
case lists:member(TestCase, Checks) of
true ->
modify_limiter();
_ ->
ok
end.
%% per_client 5/1s,5
%% aggregated 10/1s,10
modify_limiter() ->
Limiter = emqx_config:get([limiter]),
#{message_routing := #{bucket := Bucket} = Routing} = Limiter,
#{default := #{per_client := Client} = Default} = Bucket,
Client2 = Client#{
rate := 5,
initial := 0,
capacity := 5,
low_watermark := 1
},
Default2 = Default#{
per_client := Client2,
rate => 10,
initial => 0,
capacity => 10
},
Bucket2 = Bucket#{default := Default2},
Routing2 = Routing#{bucket := Bucket2},
emqx_config:put([limiter], Limiter#{message_routing := Routing2}),
emqx_limiter_manager:restart_server(message_routing),
timer:sleep(100),
ok.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Test cases for channel info/stats/caps %% Test cases for channel info/stats/caps
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -729,6 +662,7 @@ t_process_unsubscribe(_) ->
t_quota_qos0(_) -> t_quota_qos0(_) ->
esockd_limiter:start_link(), esockd_limiter:start_link(),
add_bucket(),
Cnter = counters:new(1, []), Cnter = counters:new(1, []),
ok = meck:expect(emqx_broker, publish, fun(_) -> [{node(), <<"topic">>, {ok, 4}}] end), ok = meck:expect(emqx_broker, publish, fun(_) -> [{node(), <<"topic">>, {ok, 4}}] end),
ok = meck:expect( ok = meck:expect(
@ -755,10 +689,12 @@ t_quota_qos0(_) ->
ok = meck:expect(emqx_metrics, inc, fun(_) -> ok end), ok = meck:expect(emqx_metrics, inc, fun(_) -> ok end),
ok = meck:expect(emqx_metrics, inc, fun(_, _) -> ok end), ok = meck:expect(emqx_metrics, inc, fun(_, _) -> ok end),
del_bucket(),
esockd_limiter:stop(). esockd_limiter:stop().
t_quota_qos1(_) -> t_quota_qos1(_) ->
esockd_limiter:start_link(), esockd_limiter:start_link(),
add_bucket(),
ok = meck:expect(emqx_broker, publish, fun(_) -> [{node(), <<"topic">>, {ok, 4}}] end), ok = meck:expect(emqx_broker, publish, fun(_) -> [{node(), <<"topic">>, {ok, 4}}] end),
Chann = channel(#{conn_state => connected, quota => quota()}), Chann = channel(#{conn_state => connected, quota => quota()}),
Pub = ?PUBLISH_PACKET(?QOS_1, <<"topic">>, 1, <<"payload">>), Pub = ?PUBLISH_PACKET(?QOS_1, <<"topic">>, 1, <<"payload">>),
@ -769,10 +705,12 @@ t_quota_qos1(_) ->
{ok, ?PUBACK_PACKET(1, ?RC_SUCCESS), Chann4} = emqx_channel:handle_in(Pub, Chann3), {ok, ?PUBACK_PACKET(1, ?RC_SUCCESS), Chann4} = emqx_channel:handle_in(Pub, Chann3),
%% Quota in overall %% Quota in overall
{ok, ?PUBACK_PACKET(1, ?RC_QUOTA_EXCEEDED), _} = emqx_channel:handle_in(Pub, Chann4), {ok, ?PUBACK_PACKET(1, ?RC_QUOTA_EXCEEDED), _} = emqx_channel:handle_in(Pub, Chann4),
del_bucket(),
esockd_limiter:stop(). esockd_limiter:stop().
t_quota_qos2(_) -> t_quota_qos2(_) ->
esockd_limiter:start_link(), esockd_limiter:start_link(),
add_bucket(),
ok = meck:expect(emqx_broker, publish, fun(_) -> [{node(), <<"topic">>, {ok, 4}}] end), ok = meck:expect(emqx_broker, publish, fun(_) -> [{node(), <<"topic">>, {ok, 4}}] end),
Chann = channel(#{conn_state => connected, quota => quota()}), Chann = channel(#{conn_state => connected, quota => quota()}),
Pub1 = ?PUBLISH_PACKET(?QOS_2, <<"topic">>, 1, <<"payload">>), Pub1 = ?PUBLISH_PACKET(?QOS_2, <<"topic">>, 1, <<"payload">>),
@ -786,6 +724,7 @@ t_quota_qos2(_) ->
{ok, ?PUBREC_PACKET(3, ?RC_SUCCESS), Chann4} = emqx_channel:handle_in(Pub3, Chann3), {ok, ?PUBREC_PACKET(3, ?RC_SUCCESS), Chann4} = emqx_channel:handle_in(Pub3, Chann3),
%% Quota in overall %% Quota in overall
{ok, ?PUBREC_PACKET(4, ?RC_QUOTA_EXCEEDED), _} = emqx_channel:handle_in(Pub4, Chann4), {ok, ?PUBREC_PACKET(4, ?RC_QUOTA_EXCEEDED), _} = emqx_channel:handle_in(Pub4, Chann4),
del_bucket(),
esockd_limiter:stop(). esockd_limiter:stop().
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -952,12 +891,6 @@ t_handle_call_takeover_end(_) ->
{shutdown, takenover, [], _, _Chan} = {shutdown, takenover, [], _, _Chan} =
emqx_channel:handle_call({takeover, 'end'}, channel()). emqx_channel:handle_call({takeover, 'end'}, channel()).
t_handle_call_quota(_) ->
{reply, ok, _Chan} = emqx_channel:handle_call(
{quota, default},
channel()
).
t_handle_call_unexpected(_) -> t_handle_call_unexpected(_) ->
{reply, ignored, _Chan} = emqx_channel:handle_call(unexpected_req, channel()). {reply, ignored, _Chan} = emqx_channel:handle_call(unexpected_req, channel()).
@ -1176,7 +1109,7 @@ t_ws_cookie_init(_) ->
ConnInfo, ConnInfo,
#{ #{
zone => default, zone => default,
limiter => limiter_cfg(), limiter => undefined,
listener => {tcp, default} listener => {tcp, default}
} }
), ),
@ -1210,7 +1143,7 @@ channel(InitFields) ->
ConnInfo, ConnInfo,
#{ #{
zone => default, zone => default,
limiter => limiter_cfg(), limiter => undefined,
listener => {tcp, default} listener => {tcp, default}
} }
), ),
@ -1270,9 +1203,31 @@ session(InitFields) when is_map(InitFields) ->
%% conn: 5/s; overall: 10/s %% conn: 5/s; overall: 10/s
quota() -> quota() ->
emqx_limiter_container:get_limiter_by_names([message_routing], limiter_cfg()). emqx_limiter_container:get_limiter_by_types(?MODULE, [message_routing], limiter_cfg()).
limiter_cfg() -> #{message_routing => default}. limiter_cfg() ->
Client = #{
rate => 5,
initial => 0,
capacity => 5,
low_watermark => 1,
divisible => false,
max_retry_time => timer:seconds(5),
failure_strategy => force
},
#{
message_routing => bucket_cfg(),
client => #{message_routing => Client}
}.
bucket_cfg() ->
#{rate => 10, initial => 0, capacity => 10}.
add_bucket() ->
emqx_limiter_server:add_bucket(?MODULE, message_routing, bucket_cfg()).
del_bucket() ->
emqx_limiter_server:del_bucket(?MODULE, message_routing).
v4(Channel) -> v4(Channel) ->
ConnInfo = emqx_channel:info(conninfo, Channel), ConnInfo = emqx_channel:info(conninfo, Channel),

View File

@ -44,6 +44,7 @@
client_ssl_twoway/1, client_ssl_twoway/1,
ensure_mnesia_stopped/0, ensure_mnesia_stopped/0,
ensure_quic_listener/2, ensure_quic_listener/2,
is_all_tcp_servers_available/1,
is_tcp_server_available/2, is_tcp_server_available/2,
is_tcp_server_available/3, is_tcp_server_available/3,
load_config/2, load_config/2,
@ -432,6 +433,18 @@ load_config(SchemaModule, Config, Opts) ->
load_config(SchemaModule, Config) -> load_config(SchemaModule, Config) ->
load_config(SchemaModule, Config, #{raw_with_default => false}). load_config(SchemaModule, Config, #{raw_with_default => false}).
-spec is_all_tcp_servers_available(Servers) -> Result when
Servers :: [{Host, Port}],
Host :: inet:socket_address() | inet:hostname(),
Port :: inet:port_number(),
Result :: boolean().
is_all_tcp_servers_available(Servers) ->
Fun =
fun({Host, Port}) ->
is_tcp_server_available(Host, Port)
end,
lists:all(Fun, Servers).
-spec is_tcp_server_available( -spec is_tcp_server_available(
Host :: inet:socket_address() | inet:hostname(), Host :: inet:socket_address() | inet:hostname(),
Port :: inet:port_number() Port :: inet:port_number()
@ -582,6 +595,7 @@ setup_node(Node, Opts) when is_map(Opts) ->
EnvHandler = maps:get(env_handler, Opts, fun(_) -> ok end), EnvHandler = maps:get(env_handler, Opts, fun(_) -> ok end),
ConfigureGenRpc = maps:get(configure_gen_rpc, Opts, true), ConfigureGenRpc = maps:get(configure_gen_rpc, Opts, true),
LoadSchema = maps:get(load_schema, Opts, true), LoadSchema = maps:get(load_schema, Opts, true),
SchemaMod = maps:get(schema_mod, Opts, emqx_schema),
LoadApps = maps:get(load_apps, Opts, [gen_rpc, emqx, ekka, mria] ++ Apps), LoadApps = maps:get(load_apps, Opts, [gen_rpc, emqx, ekka, mria] ++ Apps),
Env = maps:get(env, Opts, []), Env = maps:get(env, Opts, []),
Conf = maps:get(conf, Opts, []), Conf = maps:get(conf, Opts, []),
@ -617,7 +631,7 @@ setup_node(Node, Opts) when is_map(Opts) ->
%% Otherwise, configuration get's loaded and all preset env in envhandler is lost %% Otherwise, configuration get's loaded and all preset env in envhandler is lost
LoadSchema andalso LoadSchema andalso
begin begin
emqx_config:init_load(emqx_schema), emqx_config:init_load(SchemaMod),
application:set_env(emqx, init_config_load_done, true) application:set_env(emqx, init_config_load_done, true)
end, end,

View File

@ -78,6 +78,7 @@ end_per_suite(_Config) ->
init_per_testcase(TestCase, Config) when init_per_testcase(TestCase, Config) when
TestCase =/= t_ws_pingreq_before_connected TestCase =/= t_ws_pingreq_before_connected
-> ->
add_bucket(),
ok = meck:expect(emqx_transport, wait, fun(Sock) -> {ok, Sock} end), ok = meck:expect(emqx_transport, wait, fun(Sock) -> {ok, Sock} end),
ok = meck:expect(emqx_transport, type, fun(_Sock) -> tcp end), ok = meck:expect(emqx_transport, type, fun(_Sock) -> tcp end),
ok = meck:expect( ok = meck:expect(
@ -104,9 +105,11 @@ init_per_testcase(TestCase, Config) when
_ -> Config _ -> Config
end; end;
init_per_testcase(_, Config) -> init_per_testcase(_, Config) ->
add_bucket(),
Config. Config.
end_per_testcase(TestCase, Config) -> end_per_testcase(TestCase, Config) ->
del_bucket(),
case erlang:function_exported(?MODULE, TestCase, 2) of case erlang:function_exported(?MODULE, TestCase, 2) of
true -> ?MODULE:TestCase('end', Config); true -> ?MODULE:TestCase('end', Config);
false -> ok false -> ok
@ -291,11 +294,6 @@ t_handle_call(_) ->
?assertMatch({ok, _St}, handle_msg({event, undefined}, St)), ?assertMatch({ok, _St}, handle_msg({event, undefined}, St)),
?assertMatch({reply, _Info, _NSt}, handle_call(self(), info, St)), ?assertMatch({reply, _Info, _NSt}, handle_call(self(), info, St)),
?assertMatch({reply, _Stats, _NSt}, handle_call(self(), stats, St)), ?assertMatch({reply, _Stats, _NSt}, handle_call(self(), stats, St)),
?assertMatch({reply, ok, _NSt}, handle_call(self(), {ratelimit, []}, St)),
?assertMatch(
{reply, ok, _NSt},
handle_call(self(), {ratelimit, [{bytes_in, default}]}, St)
),
?assertEqual({reply, ignored, St}, handle_call(self(), for_testing, St)), ?assertEqual({reply, ignored, St}, handle_call(self(), for_testing, St)),
?assertMatch( ?assertMatch(
{stop, {shutdown, kicked}, ok, _NSt}, {stop, {shutdown, kicked}, ok, _NSt},
@ -704,7 +702,34 @@ handle_msg(Msg, St) -> emqx_connection:handle_msg(Msg, St).
handle_call(Pid, Call, St) -> emqx_connection:handle_call(Pid, Call, St). handle_call(Pid, Call, St) -> emqx_connection:handle_call(Pid, Call, St).
limiter_cfg() -> #{}. -define(LIMITER_ID, 'tcp:default').
init_limiter() -> init_limiter() ->
emqx_limiter_container:get_limiter_by_names([bytes_in, message_in], limiter_cfg()). emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes_in, message_in], limiter_cfg()).
limiter_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(),
Cfg = bucket_cfg(),
Client = #{
rate => Infinity,
initial => 0,
capacity => Infinity,
low_watermark => 1,
divisible => false,
max_retry_time => timer:seconds(5),
failure_strategy => force
},
#{bytes_in => Cfg, message_in => Cfg, client => #{bytes_in => Client, message_in => Client}}.
bucket_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(),
#{rate => Infinity, initial => 0, capacity => Infinity}.
add_bucket() ->
Cfg = bucket_cfg(),
emqx_limiter_server:add_bucket(?LIMITER_ID, bytes_in, Cfg),
emqx_limiter_server:add_bucket(?LIMITER_ID, message_in, Cfg).
del_bucket() ->
emqx_limiter_server:del_bucket(?LIMITER_ID, bytes_in),
emqx_limiter_server:del_bucket(?LIMITER_ID, message_in).

View File

@ -24,48 +24,7 @@
-include_lib("eunit/include/eunit.hrl"). -include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl"). -include_lib("common_test/include/ct.hrl").
-define(BASE_CONF, << -define(BASE_CONF, <<"">>).
""
"\n"
"limiter {\n"
" bytes_in {\n"
" bucket.default {\n"
" rate = infinity\n"
" capacity = infinity\n"
" }\n"
" }\n"
"\n"
" message_in {\n"
" bucket.default {\n"
" rate = infinity\n"
" capacity = infinity\n"
" }\n"
" }\n"
"\n"
" connection {\n"
" bucket.default {\n"
" rate = infinity\n"
" capacity = infinity\n"
" }\n"
" }\n"
"\n"
" message_routing {\n"
" bucket.default {\n"
" rate = infinity\n"
" capacity = infinity\n"
" }\n"
" }\n"
"\n"
" batch {\n"
" bucket.retainer {\n"
" rate = infinity\n"
" capacity = infinity\n"
" }\n"
" }\n"
"}\n"
"\n"
""
>>).
-record(client, { -record(client, {
counter :: counters:counter_ref(), counter :: counters:counter_ref(),
@ -97,6 +56,9 @@ end_per_suite(_Config) ->
init_per_testcase(_TestCase, Config) -> init_per_testcase(_TestCase, Config) ->
Config. Config.
end_per_testcase(_TestCase, Config) ->
Config.
load_conf() -> load_conf() ->
emqx_common_test_helpers:load_config(emqx_limiter_schema, ?BASE_CONF). emqx_common_test_helpers:load_config(emqx_limiter_schema, ?BASE_CONF).
@ -116,12 +78,12 @@ t_consume(_) ->
failure_strategy := force failure_strategy := force
} }
end, end,
Case = fun() -> Case = fun(BucketCfg) ->
Client = connect(default), Client = connect(BucketCfg),
{ok, L2} = emqx_htb_limiter:consume(50, Client), {ok, L2} = emqx_htb_limiter:consume(50, Client),
{ok, _L3} = emqx_htb_limiter:consume(150, L2) {ok, _L3} = emqx_htb_limiter:consume(150, L2)
end, end,
with_per_client(default, Cfg, Case). with_per_client(Cfg, Case).
t_retry(_) -> t_retry(_) ->
Cfg = fun(Cfg) -> Cfg = fun(Cfg) ->
@ -133,15 +95,15 @@ t_retry(_) ->
failure_strategy := force failure_strategy := force
} }
end, end,
Case = fun() -> Case = fun(BucketCfg) ->
Client = connect(default), Client = connect(BucketCfg),
{ok, Client} = emqx_htb_limiter:retry(Client), {ok, Client2} = emqx_htb_limiter:retry(Client),
{_, _, Retry, L2} = emqx_htb_limiter:check(150, Client), {_, _, Retry, L2} = emqx_htb_limiter:check(150, Client2),
L3 = emqx_htb_limiter:set_retry(Retry, L2), L3 = emqx_htb_limiter:set_retry(Retry, L2),
timer:sleep(500), timer:sleep(500),
{ok, _L4} = emqx_htb_limiter:retry(L3) {ok, _L4} = emqx_htb_limiter:retry(L3)
end, end,
with_per_client(default, Cfg, Case). with_per_client(Cfg, Case).
t_restore(_) -> t_restore(_) ->
Cfg = fun(Cfg) -> Cfg = fun(Cfg) ->
@ -153,15 +115,15 @@ t_restore(_) ->
failure_strategy := force failure_strategy := force
} }
end, end,
Case = fun() -> Case = fun(BucketCfg) ->
Client = connect(default), Client = connect(BucketCfg),
{_, _, Retry, L2} = emqx_htb_limiter:check(150, Client), {_, _, Retry, L2} = emqx_htb_limiter:check(150, Client),
timer:sleep(200), timer:sleep(200),
{ok, L3} = emqx_htb_limiter:check(Retry, L2), {ok, L3} = emqx_htb_limiter:check(Retry, L2),
Avaiable = emqx_htb_limiter:available(L3), Avaiable = emqx_htb_limiter:available(L3),
?assert(Avaiable >= 50) ?assert(Avaiable >= 50)
end, end,
with_per_client(default, Cfg, Case). with_per_client(Cfg, Case).
t_max_retry_time(_) -> t_max_retry_time(_) ->
Cfg = fun(Cfg) -> Cfg = fun(Cfg) ->
@ -172,15 +134,15 @@ t_max_retry_time(_) ->
failure_strategy := drop failure_strategy := drop
} }
end, end,
Case = fun() -> Case = fun(BucketCfg) ->
Client = connect(default), Client = connect(BucketCfg),
Begin = ?NOW, Begin = ?NOW,
Result = emqx_htb_limiter:consume(101, Client), Result = emqx_htb_limiter:consume(101, Client),
?assertMatch({drop, _}, Result), ?assertMatch({drop, _}, Result),
Time = ?NOW - Begin, Time = ?NOW - Begin,
?assert(Time >= 500 andalso Time < 550) ?assert(Time >= 500 andalso Time < 550)
end, end,
with_per_client(default, Cfg, Case). with_per_client(Cfg, Case).
t_divisible(_) -> t_divisible(_) ->
Cfg = fun(Cfg) -> Cfg = fun(Cfg) ->
@ -191,8 +153,8 @@ t_divisible(_) ->
capacity := 600 capacity := 600
} }
end, end,
Case = fun() -> Case = fun(BucketCfg) ->
Client = connect(default), Client = connect(BucketCfg),
Result = emqx_htb_limiter:check(1000, Client), Result = emqx_htb_limiter:check(1000, Client),
?assertMatch( ?assertMatch(
{partial, 400, {partial, 400,
@ -206,7 +168,7 @@ t_divisible(_) ->
Result Result
) )
end, end,
with_per_client(default, Cfg, Case). with_per_client(Cfg, Case).
t_low_watermark(_) -> t_low_watermark(_) ->
Cfg = fun(Cfg) -> Cfg = fun(Cfg) ->
@ -217,8 +179,8 @@ t_low_watermark(_) ->
capacity := 1000 capacity := 1000
} }
end, end,
Case = fun() -> Case = fun(BucketCfg) ->
Client = connect(default), Client = connect(BucketCfg),
Result = emqx_htb_limiter:check(500, Client), Result = emqx_htb_limiter:check(500, Client),
?assertMatch({ok, _}, Result), ?assertMatch({ok, _}, Result),
{_, Client2} = Result, {_, Client2} = Result,
@ -233,28 +195,21 @@ t_low_watermark(_) ->
Result2 Result2
) )
end, end,
with_per_client(default, Cfg, Case). with_per_client(Cfg, Case).
t_infinity_client(_) -> t_infinity_client(_) ->
Fun = fun(#{per_client := Cli} = Bucket) -> Fun = fun(Cfg) -> Cfg end,
Bucket2 = Bucket#{ Case = fun(Cfg) ->
rate := infinity, Client = connect(Cfg),
capacity := infinity
},
Cli2 = Cli#{rate := infinity, capacity := infinity},
Bucket2#{per_client := Cli2}
end,
Case = fun() ->
Client = connect(default),
InfVal = emqx_limiter_schema:infinity_value(), InfVal = emqx_limiter_schema:infinity_value(),
?assertMatch(#{bucket := #{rate := InfVal}}, Client), ?assertMatch(#{bucket := #{rate := InfVal}}, Client),
Result = emqx_htb_limiter:check(100000, Client), Result = emqx_htb_limiter:check(100000, Client),
?assertEqual({ok, Client}, Result) ?assertEqual({ok, Client}, Result)
end, end,
with_bucket(default, Fun, Case). with_per_client(Fun, Case).
t_try_restore_agg(_) -> t_try_restore_agg(_) ->
Fun = fun(#{per_client := Cli} = Bucket) -> Fun = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := 1, rate := 1,
capacity := 200, capacity := 200,
@ -267,20 +222,20 @@ t_try_restore_agg(_) ->
max_retry_time := 100, max_retry_time := 100,
failure_strategy := force failure_strategy := force
}, },
Bucket2#{per_client := Cli2} Bucket2#{client := Cli2}
end, end,
Case = fun() -> Case = fun(Cfg) ->
Client = connect(default), Client = connect(Cfg),
{_, _, Retry, L2} = emqx_htb_limiter:check(150, Client), {_, _, Retry, L2} = emqx_htb_limiter:check(150, Client),
timer:sleep(200), timer:sleep(200),
{ok, L3} = emqx_htb_limiter:check(Retry, L2), {ok, L3} = emqx_htb_limiter:check(Retry, L2),
Avaiable = emqx_htb_limiter:available(L3), Avaiable = emqx_htb_limiter:available(L3),
?assert(Avaiable >= 50) ?assert(Avaiable >= 50)
end, end,
with_bucket(default, Fun, Case). with_bucket(Fun, Case).
t_short_board(_) -> t_short_board(_) ->
Fun = fun(#{per_client := Cli} = Bucket) -> Fun = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := ?RATE("100/1s"), rate := ?RATE("100/1s"),
initial := 0, initial := 0,
@ -291,18 +246,18 @@ t_short_board(_) ->
capacity := 600, capacity := 600,
initial := 600 initial := 600
}, },
Bucket2#{per_client := Cli2} Bucket2#{client := Cli2}
end, end,
Case = fun() -> Case = fun(Cfg) ->
Counter = counters:new(1, []), Counter = counters:new(1, []),
start_client(default, ?NOW + 2000, Counter, 20), start_client(Cfg, ?NOW + 2000, Counter, 20),
timer:sleep(2100), timer:sleep(2100),
check_average_rate(Counter, 2, 100) check_average_rate(Counter, 2, 100)
end, end,
with_bucket(default, Fun, Case). with_bucket(Fun, Case).
t_rate(_) -> t_rate(_) ->
Fun = fun(#{per_client := Cli} = Bucket) -> Fun = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := ?RATE("100/100ms"), rate := ?RATE("100/100ms"),
initial := 0, initial := 0,
@ -313,10 +268,10 @@ t_rate(_) ->
capacity := infinity, capacity := infinity,
initial := 0 initial := 0
}, },
Bucket2#{per_client := Cli2} Bucket2#{client := Cli2}
end, end,
Case = fun() -> Case = fun(Cfg) ->
Client = connect(default), Client = connect(Cfg),
Ts1 = erlang:system_time(millisecond), Ts1 = erlang:system_time(millisecond),
C1 = emqx_htb_limiter:available(Client), C1 = emqx_htb_limiter:available(Client),
timer:sleep(1000), timer:sleep(1000),
@ -326,11 +281,11 @@ t_rate(_) ->
Inc = C2 - C1, Inc = C2 - C1,
?assert(in_range(Inc, ShouldInc - 100, ShouldInc + 100), "test bucket rate") ?assert(in_range(Inc, ShouldInc - 100, ShouldInc + 100), "test bucket rate")
end, end,
with_bucket(default, Fun, Case). with_bucket(Fun, Case).
t_capacity(_) -> t_capacity(_) ->
Capacity = 600, Capacity = 600,
Fun = fun(#{per_client := Cli} = Bucket) -> Fun = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := ?RATE("100/100ms"), rate := ?RATE("100/100ms"),
initial := 0, initial := 0,
@ -341,25 +296,25 @@ t_capacity(_) ->
capacity := infinity, capacity := infinity,
initial := 0 initial := 0
}, },
Bucket2#{per_client := Cli2} Bucket2#{client := Cli2}
end, end,
Case = fun() -> Case = fun(Cfg) ->
Client = connect(default), Client = connect(Cfg),
timer:sleep(1000), timer:sleep(1000),
C1 = emqx_htb_limiter:available(Client), C1 = emqx_htb_limiter:available(Client),
?assertEqual(Capacity, C1, "test bucket capacity") ?assertEqual(Capacity, C1, "test bucket capacity")
end, end,
with_bucket(default, Fun, Case). with_bucket(Fun, Case).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Test Cases Global Level %% Test Cases Global Level
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
t_collaborative_alloc(_) -> t_collaborative_alloc(_) ->
GlobalMod = fun(Cfg) -> GlobalMod = fun(#{message_routing := MR} = Cfg) ->
Cfg#{rate := ?RATE("600/1s")} Cfg#{message_routing := MR#{rate := ?RATE("600/1s")}}
end, end,
Bucket1 = fun(#{per_client := Cli} = Bucket) -> Bucket1 = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := ?RATE("400/1s"), rate := ?RATE("400/1s"),
initial := 0, initial := 0,
@ -370,7 +325,7 @@ t_collaborative_alloc(_) ->
capacity := 100, capacity := 100,
initial := 100 initial := 100
}, },
Bucket2#{per_client := Cli2} Bucket2#{client := Cli2}
end, end,
Bucket2 = fun(Bucket) -> Bucket2 = fun(Bucket) ->
@ -381,8 +336,8 @@ t_collaborative_alloc(_) ->
Case = fun() -> Case = fun() ->
C1 = counters:new(1, []), C1 = counters:new(1, []),
C2 = counters:new(1, []), C2 = counters:new(1, []),
start_client(b1, ?NOW + 2000, C1, 20), start_client({b1, Bucket1}, ?NOW + 2000, C1, 20),
start_client(b2, ?NOW + 2000, C2, 30), start_client({b2, Bucket2}, ?NOW + 2000, C2, 30),
timer:sleep(2100), timer:sleep(2100),
check_average_rate(C1, 2, 300), check_average_rate(C1, 2, 300),
check_average_rate(C2, 2, 300) check_average_rate(C2, 2, 300)
@ -395,14 +350,16 @@ t_collaborative_alloc(_) ->
). ).
t_burst(_) -> t_burst(_) ->
GlobalMod = fun(Cfg) -> GlobalMod = fun(#{message_routing := MR} = Cfg) ->
Cfg#{ Cfg#{
message_routing := MR#{
rate := ?RATE("200/1s"), rate := ?RATE("200/1s"),
burst := ?RATE("400/1s") burst := ?RATE("400/1s")
} }
}
end, end,
Bucket = fun(#{per_client := Cli} = Bucket) -> Bucket = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := ?RATE("200/1s"), rate := ?RATE("200/1s"),
initial := 0, initial := 0,
@ -413,16 +370,16 @@ t_burst(_) ->
capacity := 200, capacity := 200,
divisible := true divisible := true
}, },
Bucket2#{per_client := Cli2} Bucket2#{client := Cli2}
end, end,
Case = fun() -> Case = fun() ->
C1 = counters:new(1, []), C1 = counters:new(1, []),
C2 = counters:new(1, []), C2 = counters:new(1, []),
C3 = counters:new(1, []), C3 = counters:new(1, []),
start_client(b1, ?NOW + 2000, C1, 20), start_client({b1, Bucket}, ?NOW + 2000, C1, 20),
start_client(b2, ?NOW + 2000, C2, 30), start_client({b2, Bucket}, ?NOW + 2000, C2, 30),
start_client(b3, ?NOW + 2000, C3, 30), start_client({b3, Bucket}, ?NOW + 2000, C3, 30),
timer:sleep(2100), timer:sleep(2100),
Total = lists:sum([counters:get(X, 1) || X <- [C1, C2, C3]]), Total = lists:sum([counters:get(X, 1) || X <- [C1, C2, C3]]),
@ -436,11 +393,11 @@ t_burst(_) ->
). ).
t_limit_global_with_unlimit_other(_) -> t_limit_global_with_unlimit_other(_) ->
GlobalMod = fun(Cfg) -> GlobalMod = fun(#{message_routing := MR} = Cfg) ->
Cfg#{rate := ?RATE("600/1s")} Cfg#{message_routing := MR#{rate := ?RATE("600/1s")}}
end, end,
Bucket = fun(#{per_client := Cli} = Bucket) -> Bucket = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := infinity, rate := infinity,
initial := 0, initial := 0,
@ -451,12 +408,12 @@ t_limit_global_with_unlimit_other(_) ->
capacity := infinity, capacity := infinity,
initial := 0 initial := 0
}, },
Bucket2#{per_client := Cli2} Bucket2#{client := Cli2}
end, end,
Case = fun() -> Case = fun() ->
C1 = counters:new(1, []), C1 = counters:new(1, []),
start_client(b1, ?NOW + 2000, C1, 20), start_client({b1, Bucket}, ?NOW + 2000, C1, 20),
timer:sleep(2100), timer:sleep(2100),
check_average_rate(C1, 2, 600) check_average_rate(C1, 2, 600)
end, end,
@ -470,28 +427,6 @@ t_limit_global_with_unlimit_other(_) ->
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Test Cases container %% Test Cases container
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
t_new_container(_) ->
C1 = emqx_limiter_container:new(),
C2 = emqx_limiter_container:new([message_routing]),
C3 = emqx_limiter_container:update_by_name(message_routing, default, C1),
?assertMatch(
#{
message_routing := _,
retry_ctx := undefined,
{retry, message_routing} := _
},
C2
),
?assertMatch(
#{
message_routing := _,
retry_ctx := undefined,
{retry, message_routing} := _
},
C3
),
ok.
t_check_container(_) -> t_check_container(_) ->
Cfg = fun(Cfg) -> Cfg = fun(Cfg) ->
Cfg#{ Cfg#{
@ -500,10 +435,11 @@ t_check_container(_) ->
capacity := 1000 capacity := 1000
} }
end, end,
Case = fun() -> Case = fun(#{client := Client} = BucketCfg) ->
C1 = emqx_limiter_container:new( C1 = emqx_limiter_container:get_limiter_by_types(
?MODULE,
[message_routing], [message_routing],
#{message_routing => default} #{message_routing => BucketCfg, client => #{message_routing => Client}}
), ),
{ok, C2} = emqx_limiter_container:check(1000, message_routing, C1), {ok, C2} = emqx_limiter_container:check(1000, message_routing, C1),
{pause, Pause, C3} = emqx_limiter_container:check(1000, message_routing, C2), {pause, Pause, C3} = emqx_limiter_container:check(1000, message_routing, C2),
@ -514,7 +450,39 @@ t_check_container(_) ->
RetryData = emqx_limiter_container:get_retry_context(C5), RetryData = emqx_limiter_container:get_retry_context(C5),
?assertEqual(Context, RetryData) ?assertEqual(Context, RetryData)
end, end,
with_per_client(default, Cfg, Case). with_per_client(Cfg, Case).
%%--------------------------------------------------------------------
%% Test Override
%%--------------------------------------------------------------------
t_bucket_no_client(_) ->
Rate = ?RATE("1/s"),
GlobalMod = fun(#{client := #{message_routing := MR} = Client} = Cfg) ->
Cfg#{client := Client#{message_routing := MR#{rate := Rate}}}
end,
BucketMod = fun(Bucket) ->
maps:remove(client, Bucket)
end,
Case = fun() ->
Limiter = connect(BucketMod(make_limiter_cfg())),
?assertMatch(#{rate := Rate}, Limiter)
end,
with_global(GlobalMod, [BucketMod], Case).
t_bucket_client(_) ->
GlobalRate = ?RATE("1/s"),
BucketRate = ?RATE("10/s"),
GlobalMod = fun(#{client := #{message_routing := MR} = Client} = Cfg) ->
Cfg#{client := Client#{message_routing := MR#{rate := GlobalRate}}}
end,
BucketMod = fun(#{client := Client} = Bucket) ->
Bucket#{client := Client#{rate := BucketRate}}
end,
Case = fun() ->
Limiter = connect(BucketMod(make_limiter_cfg())),
?assertMatch(#{rate := BucketRate}, Limiter)
end,
with_global(GlobalMod, [BucketMod], Case).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Test Cases misc %% Test Cases misc
@ -607,19 +575,23 @@ t_schema_unit(_) ->
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%%% Internal functions %%% Internal functions
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
start_client(Name, EndTime, Counter, Number) -> start_client(Cfg, EndTime, Counter, Number) ->
lists:foreach( lists:foreach(
fun(_) -> fun(_) ->
spawn(fun() -> spawn(fun() ->
start_client(Name, EndTime, Counter) do_start_client(Cfg, EndTime, Counter)
end) end)
end, end,
lists:seq(1, Number) lists:seq(1, Number)
). ).
start_client(Name, EndTime, Counter) -> do_start_client({Name, CfgFun}, EndTime, Counter) ->
#{per_client := PerClient} = do_start_client(Name, CfgFun(make_limiter_cfg()), EndTime, Counter);
emqx_config:get([limiter, message_routing, bucket, Name]), do_start_client(Cfg, EndTime, Counter) ->
do_start_client(?MODULE, Cfg, EndTime, Counter).
do_start_client(Name, Cfg, EndTime, Counter) ->
#{client := PerClient} = Cfg,
#{rate := Rate} = PerClient, #{rate := Rate} = PerClient,
Client = #client{ Client = #client{
start = ?NOW, start = ?NOW,
@ -627,7 +599,7 @@ start_client(Name, EndTime, Counter) ->
counter = Counter, counter = Counter,
obtained = 0, obtained = 0,
rate = Rate, rate = Rate,
client = connect(Name) client = connect(Name, Cfg)
}, },
client_loop(Client). client_loop(Client).
@ -711,35 +683,50 @@ to_rate(Str) ->
{ok, Rate} = emqx_limiter_schema:to_rate(Str), {ok, Rate} = emqx_limiter_schema:to_rate(Str),
Rate. Rate.
with_global(Modifier, BuckeTemps, Case) -> with_global(Modifier, Buckets, Case) ->
Fun = fun(Cfg) -> with_config([limiter], Modifier, Buckets, Case).
#{bucket := #{default := BucketCfg}} = Cfg2 = Modifier(Cfg),
Fun = fun({Name, BMod}, Acc) ->
Acc#{Name => BMod(BucketCfg)}
end,
Buckets = lists:foldl(Fun, #{}, BuckeTemps),
Cfg2#{bucket := Buckets}
end,
with_config([limiter, message_routing], Fun, Case). with_bucket(Modifier, Case) ->
Cfg = Modifier(make_limiter_cfg()),
add_bucket(Cfg),
Case(Cfg),
del_bucket().
with_bucket(Bucket, Modifier, Case) -> with_per_client(Modifier, Case) ->
Path = [limiter, message_routing, bucket, Bucket], #{client := Client} = Cfg = make_limiter_cfg(),
with_config(Path, Modifier, Case). Cfg2 = Cfg#{client := Modifier(Client)},
add_bucket(Cfg2),
Case(Cfg2),
del_bucket().
with_per_client(Bucket, Modifier, Case) -> with_config(Path, Modifier, Buckets, Case) ->
Path = [limiter, message_routing, bucket, Bucket, per_client],
with_config(Path, Modifier, Case).
with_config(Path, Modifier, Case) ->
Cfg = emqx_config:get(Path), Cfg = emqx_config:get(Path),
NewCfg = Modifier(Cfg), NewCfg = Modifier(Cfg),
ct:pal("test with config:~p~n", [NewCfg]),
emqx_config:put(Path, NewCfg), emqx_config:put(Path, NewCfg),
emqx_limiter_server:restart(message_routing), emqx_limiter_server:restart(message_routing),
timer:sleep(500), timer:sleep(500),
BucketCfg = make_limiter_cfg(),
lists:foreach(
fun
({Name, BucketFun}) ->
add_bucket(Name, BucketFun(BucketCfg));
(BucketFun) ->
add_bucket(BucketFun(BucketCfg))
end,
Buckets
),
DelayReturn = delay_return(Case), DelayReturn = delay_return(Case),
lists:foreach(
fun
({Name, _Cfg}) ->
del_bucket(Name);
(_Cfg) ->
del_bucket()
end,
Buckets
),
emqx_config:put(Path, Cfg), emqx_config:put(Path, Cfg),
emqx_limiter_server:restart(message_routing),
DelayReturn(). DelayReturn().
delay_return(Case) -> delay_return(Case) ->
@ -751,10 +738,40 @@ delay_return(Case) ->
fun() -> erlang:raise(Type, Reason, Trace) end fun() -> erlang:raise(Type, Reason, Trace) end
end. end.
connect(Name) -> connect({Name, CfgFun}) ->
{ok, Limiter} = emqx_limiter_server:connect(message_routing, Name), connect(Name, CfgFun(make_limiter_cfg()));
connect(Cfg) ->
connect(?MODULE, Cfg).
connect(Name, Cfg) ->
{ok, Limiter} = emqx_limiter_server:connect(Name, message_routing, Cfg),
Limiter. Limiter.
make_limiter_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(),
Client = #{
rate => Infinity,
initial => 0,
capacity => Infinity,
low_watermark => 0,
divisible => false,
max_retry_time => timer:seconds(5),
failure_strategy => force
},
#{client => Client, rate => Infinity, initial => 0, capacity => Infinity}.
add_bucket(Cfg) ->
add_bucket(?MODULE, Cfg).
add_bucket(Name, Cfg) ->
emqx_limiter_server:add_bucket(Name, message_routing, Cfg).
del_bucket() ->
del_bucket(?MODULE).
del_bucket(Name) ->
emqx_limiter_server:del_bucket(Name, message_routing).
check_average_rate(Counter, Second, Rate) -> check_average_rate(Counter, Second, Rate) ->
Cost = counters:get(Counter, 1), Cost = counters:get(Counter, 1),
PerSec = Cost / Second, PerSec = Cost / Second,

View File

@ -59,6 +59,7 @@ init_per_testcase(TestCase, Config) when
TestCase =/= t_ws_pingreq_before_connected, TestCase =/= t_ws_pingreq_before_connected,
TestCase =/= t_ws_non_check_origin TestCase =/= t_ws_non_check_origin
-> ->
add_bucket(),
%% Meck Cm %% Meck Cm
ok = meck:new(emqx_cm, [passthrough, no_history, no_link]), ok = meck:new(emqx_cm, [passthrough, no_history, no_link]),
ok = meck:expect(emqx_cm, mark_channel_connected, fun(_) -> ok end), ok = meck:expect(emqx_cm, mark_channel_connected, fun(_) -> ok end),
@ -96,6 +97,7 @@ init_per_testcase(TestCase, Config) when
| Config | Config
]; ];
init_per_testcase(t_ws_non_check_origin, Config) -> init_per_testcase(t_ws_non_check_origin, Config) ->
add_bucket(),
ok = emqx_common_test_helpers:start_apps([]), ok = emqx_common_test_helpers:start_apps([]),
PrevConfig = emqx_config:get_listener_conf(ws, default, [websocket]), PrevConfig = emqx_config:get_listener_conf(ws, default, [websocket]),
emqx_config:put_listener_conf(ws, default, [websocket, check_origin_enable], false), emqx_config:put_listener_conf(ws, default, [websocket, check_origin_enable], false),
@ -105,6 +107,7 @@ init_per_testcase(t_ws_non_check_origin, Config) ->
| Config | Config
]; ];
init_per_testcase(_, Config) -> init_per_testcase(_, Config) ->
add_bucket(),
PrevConfig = emqx_config:get_listener_conf(ws, default, [websocket]), PrevConfig = emqx_config:get_listener_conf(ws, default, [websocket]),
ok = emqx_common_test_helpers:start_apps([]), ok = emqx_common_test_helpers:start_apps([]),
[ [
@ -119,6 +122,7 @@ end_per_testcase(TestCase, _Config) when
TestCase =/= t_ws_non_check_origin, TestCase =/= t_ws_non_check_origin,
TestCase =/= t_ws_pingreq_before_connected TestCase =/= t_ws_pingreq_before_connected
-> ->
del_bucket(),
lists:foreach( lists:foreach(
fun meck:unload/1, fun meck:unload/1,
[ [
@ -131,11 +135,13 @@ end_per_testcase(TestCase, _Config) when
] ]
); );
end_per_testcase(t_ws_non_check_origin, Config) -> end_per_testcase(t_ws_non_check_origin, Config) ->
del_bucket(),
PrevConfig = ?config(prev_config, Config), PrevConfig = ?config(prev_config, Config),
emqx_config:put_listener_conf(ws, default, [websocket], PrevConfig), emqx_config:put_listener_conf(ws, default, [websocket], PrevConfig),
emqx_common_test_helpers:stop_apps([]), emqx_common_test_helpers:stop_apps([]),
ok; ok;
end_per_testcase(_, Config) -> end_per_testcase(_, Config) ->
del_bucket(),
PrevConfig = ?config(prev_config, Config), PrevConfig = ?config(prev_config, Config),
emqx_config:put_listener_conf(ws, default, [websocket], PrevConfig), emqx_config:put_listener_conf(ws, default, [websocket], PrevConfig),
emqx_common_test_helpers:stop_apps([]), emqx_common_test_helpers:stop_apps([]),
@ -501,15 +507,12 @@ t_handle_timeout_emit_stats(_) ->
?assertEqual(undefined, ?ws_conn:info(stats_timer, St)). ?assertEqual(undefined, ?ws_conn:info(stats_timer, St)).
t_ensure_rate_limit(_) -> t_ensure_rate_limit(_) ->
%% XXX In the future, limiter should provide API for config update
Path = [limiter, bytes_in, bucket, default, per_client],
PerClient = emqx_config:get(Path),
{ok, Rate} = emqx_limiter_schema:to_rate("50MB"), {ok, Rate} = emqx_limiter_schema:to_rate("50MB"),
emqx_config:put(Path, PerClient#{rate := Rate}), Limiter = init_limiter(#{
emqx_limiter_server:restart(bytes_in), bytes_in => bucket_cfg(),
timer:sleep(100), message_in => bucket_cfg(),
client => #{bytes_in => client_cfg(Rate)}
Limiter = init_limiter(), }),
St = st(#{limiter => Limiter}), St = st(#{limiter => Limiter}),
%% must bigger than value in emqx_ratelimit_SUITE %% must bigger than value in emqx_ratelimit_SUITE
@ -522,11 +525,7 @@ t_ensure_rate_limit(_) ->
St St
), ),
?assertEqual(blocked, ?ws_conn:info(sockstate, St1)), ?assertEqual(blocked, ?ws_conn:info(sockstate, St1)),
?assertEqual([{active, false}], ?ws_conn:info(postponed, St1)), ?assertEqual([{active, false}], ?ws_conn:info(postponed, St1)).
emqx_config:put(Path, PerClient),
emqx_limiter_server:restart(bytes_in),
timer:sleep(100).
t_parse_incoming(_) -> t_parse_incoming(_) ->
{Packets, St} = ?ws_conn:parse_incoming(<<48, 3>>, [], st()), {Packets, St} = ?ws_conn:parse_incoming(<<48, 3>>, [], st()),
@ -691,7 +690,44 @@ ws_client(State) ->
ct:fail(ws_timeout) ct:fail(ws_timeout)
end. end.
limiter_cfg() -> #{bytes_in => default, message_in => default}. -define(LIMITER_ID, 'ws:default').
init_limiter() -> init_limiter() ->
emqx_limiter_container:get_limiter_by_names([bytes_in, message_in], limiter_cfg()). init_limiter(limiter_cfg()).
init_limiter(LimiterCfg) ->
emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes_in, message_in], LimiterCfg).
limiter_cfg() ->
Cfg = bucket_cfg(),
Client = client_cfg(),
#{bytes_in => Cfg, message_in => Cfg, client => #{bytes_in => Client, message_in => Client}}.
client_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(),
client_cfg(Infinity).
client_cfg(Rate) ->
Infinity = emqx_limiter_schema:infinity_value(),
#{
rate => Rate,
initial => 0,
capacity => Infinity,
low_watermark => 1,
divisible => false,
max_retry_time => timer:seconds(5),
failure_strategy => force
}.
bucket_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(),
#{rate => Infinity, initial => 0, capacity => Infinity}.
add_bucket() ->
Cfg = bucket_cfg(),
emqx_limiter_server:add_bucket(?LIMITER_ID, bytes_in, Cfg),
emqx_limiter_server:add_bucket(?LIMITER_ID, message_in, Cfg).
del_bucket() ->
emqx_limiter_server:del_bucket(?LIMITER_ID, bytes_in),
emqx_limiter_server:del_bucket(?LIMITER_ID, message_in).

View File

@ -36,6 +36,6 @@
-type authenticator_id() :: binary(). -type authenticator_id() :: binary().
-endif.
-define(RESOURCE_GROUP, <<"emqx_authn">>). -define(RESOURCE_GROUP, <<"emqx_authn">>).
-endif.

View File

@ -33,14 +33,8 @@
% Swagger % Swagger
-define(API_TAGS_GLOBAL, [ -define(API_TAGS_GLOBAL, [<<"Authentication">>]).
?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_BINARY, -define(API_TAGS_SINGLE, [<<"Listener authentication">>]).
<<"authentication config(global)">>
]).
-define(API_TAGS_SINGLE, [
?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_BINARY,
<<"authentication config(single listener)">>
]).
-export([ -export([
api_spec/0, api_spec/0,

View File

@ -29,15 +29,8 @@
-define(NOT_FOUND, 'NOT_FOUND'). -define(NOT_FOUND, 'NOT_FOUND').
% Swagger % Swagger
-define(API_TAGS_GLOBAL, [<<"Authentication">>]).
-define(API_TAGS_GLOBAL, [ -define(API_TAGS_SINGLE, [<<"Listener authentication">>]).
?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_BINARY,
<<"authentication config(global)">>
]).
-define(API_TAGS_SINGLE, [
?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_BINARY,
<<"authentication config(single listener)">>
]).
-export([ -export([
api_spec/0, api_spec/0,
@ -66,15 +59,7 @@ schema("/authentication/:id/import_users") ->
tags => ?API_TAGS_GLOBAL, tags => ?API_TAGS_GLOBAL,
description => ?DESC(authentication_id_import_users_post), description => ?DESC(authentication_id_import_users_post),
parameters => [emqx_authn_api:param_auth_id()], parameters => [emqx_authn_api:param_auth_id()],
'requestBody' => #{ 'requestBody' => emqx_dashboard_swagger:file_schema(filename),
content => #{
'multipart/form-data' => #{
schema => #{
filename => file
}
}
}
},
responses => #{ responses => #{
204 => <<"Users imported">>, 204 => <<"Users imported">>,
400 => error_codes([?BAD_REQUEST], <<"Bad Request">>), 400 => error_codes([?BAD_REQUEST], <<"Bad Request">>),
@ -89,15 +74,7 @@ schema("/listeners/:listener_id/authentication/:id/import_users") ->
tags => ?API_TAGS_SINGLE, tags => ?API_TAGS_SINGLE,
description => ?DESC(listeners_listener_id_authentication_id_import_users_post), description => ?DESC(listeners_listener_id_authentication_id_import_users_post),
parameters => [emqx_authn_api:param_listener_id(), emqx_authn_api:param_auth_id()], parameters => [emqx_authn_api:param_listener_id(), emqx_authn_api:param_auth_id()],
'requestBody' => #{ 'requestBody' => emqx_dashboard_swagger:file_schema(filename),
content => #{
'multipart/form-data' => #{
schema => #{
filename => file
}
}
}
},
responses => #{ responses => #{
204 => <<"Users imported">>, 204 => <<"Users imported">>,
400 => error_codes([?BAD_REQUEST], <<"Bad Request">>), 400 => error_codes([?BAD_REQUEST], <<"Bad Request">>),

View File

@ -33,7 +33,8 @@
bin/1, bin/1,
ensure_apps_started/1, ensure_apps_started/1,
cleanup_resources/0, cleanup_resources/0,
make_resource_id/1 make_resource_id/1,
without_password/1
]). ]).
-define(AUTHN_PLACEHOLDERS, [ -define(AUTHN_PLACEHOLDERS, [
@ -117,21 +118,21 @@ parse_sql(Template, ReplaceWith) ->
render_deep(Template, Credential) -> render_deep(Template, Credential) ->
emqx_placeholder:proc_tmpl_deep( emqx_placeholder:proc_tmpl_deep(
Template, Template,
Credential, mapping_credential(Credential),
#{return => full_binary, var_trans => fun handle_var/2} #{return => full_binary, var_trans => fun handle_var/2}
). ).
render_str(Template, Credential) -> render_str(Template, Credential) ->
emqx_placeholder:proc_tmpl( emqx_placeholder:proc_tmpl(
Template, Template,
Credential, mapping_credential(Credential),
#{return => full_binary, var_trans => fun handle_var/2} #{return => full_binary, var_trans => fun handle_var/2}
). ).
render_sql_params(ParamList, Credential) -> render_sql_params(ParamList, Credential) ->
emqx_placeholder:proc_tmpl( emqx_placeholder:proc_tmpl(
ParamList, ParamList,
Credential, mapping_credential(Credential),
#{return => rawlist, var_trans => fun handle_sql_var/2} #{return => rawlist, var_trans => fun handle_sql_var/2}
). ).
@ -199,10 +200,23 @@ make_resource_id(Name) ->
NameBin = bin(Name), NameBin = bin(Name),
emqx_resource:generate_id(NameBin). emqx_resource:generate_id(NameBin).
without_password(Credential) ->
without_password(Credential, [password, <<"password">>]).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Internal functions %% Internal functions
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
without_password(Credential, []) ->
Credential;
without_password(Credential, [Name | Rest]) ->
case maps:is_key(Name, Credential) of
true ->
without_password(Credential#{Name => <<"[password]">>}, Rest);
false ->
without_password(Credential, Rest)
end.
handle_var({var, Name}, undefined) -> handle_var({var, Name}, undefined) ->
error({cannot_get_variable, Name}); error({cannot_get_variable, Name});
handle_var({var, <<"peerhost">>}, PeerHost) -> handle_var({var, <<"peerhost">>}, PeerHost) ->
@ -216,3 +230,8 @@ handle_sql_var({var, <<"peerhost">>}, PeerHost) ->
emqx_placeholder:bin(inet:ntoa(PeerHost)); emqx_placeholder:bin(inet:ntoa(PeerHost));
handle_sql_var(_, Value) -> handle_sql_var(_, Value) ->
emqx_placeholder:sql_data(Value). emqx_placeholder:sql_data(Value).
mapping_credential(C = #{cn := CN, dn := DN}) ->
C#{cert_common_name => CN, cert_subject => DN};
mapping_credential(C) ->
C.

View File

@ -331,7 +331,10 @@ check_client_first_message(Bin, _Cache, #{iteration_count := IterationCount} = S
{continue, ServerFirstMessage, Cache}; {continue, ServerFirstMessage, Cache};
ignore -> ignore ->
ignore; ignore;
{error, _Reason} -> {error, Reason} ->
?TRACE_AUTHN_PROVIDER("check_client_first_message_error", #{
reason => Reason
}),
{error, not_authorized} {error, not_authorized}
end. end.
@ -344,7 +347,10 @@ check_client_final_message(Bin, #{is_superuser := IsSuperuser} = Cache, #{algori
of of
{ok, ServerFinalMessage} -> {ok, ServerFinalMessage} ->
{ok, #{is_superuser => IsSuperuser}, ServerFinalMessage}; {ok, #{is_superuser => IsSuperuser}, ServerFinalMessage};
{error, _Reason} -> {error, Reason} ->
?TRACE_AUTHN_PROVIDER("check_client_final_message_error", #{
reason => Reason
}),
{error, not_authorized} {error, not_authorized}
end. end.

View File

@ -188,23 +188,22 @@ authenticate(
} = State } = State
) -> ) ->
Request = generate_request(Credential, State), Request = generate_request(Credential, State),
case emqx_resource:query(ResourceId, {Method, Request, RequestTimeout}) of Response = emqx_resource:query(ResourceId, {Method, Request, RequestTimeout}),
?TRACE_AUTHN_PROVIDER("http_response", #{
request => request_for_log(Credential, State),
response => response_for_log(Response),
resource => ResourceId
}),
case Response of
{ok, 204, _Headers} -> {ok, 204, _Headers} ->
{ok, #{is_superuser => false}}; {ok, #{is_superuser => false}};
{ok, 200, Headers, Body} -> {ok, 200, Headers, Body} ->
handle_response(Headers, Body); handle_response(Headers, Body);
{ok, _StatusCode, _Headers} = Response -> {ok, _StatusCode, _Headers} = Response ->
log_response(ResourceId, Response),
ignore; ignore;
{ok, _StatusCode, _Headers, _Body} = Response -> {ok, _StatusCode, _Headers, _Body} = Response ->
log_response(ResourceId, Response),
ignore; ignore;
{error, Reason} -> {error, _Reason} ->
?SLOG(error, #{
msg => "http_server_query_failed",
resource => ResourceId,
reason => Reason
}),
ignore ignore
end. end.
@ -296,7 +295,8 @@ parse_config(
cow_qs:parse_qs(to_bin(Query)) cow_qs:parse_qs(to_bin(Query))
), ),
body_template => emqx_authn_utils:parse_deep(maps:get(body, Config, #{})), body_template => emqx_authn_utils:parse_deep(maps:get(body, Config, #{})),
request_timeout => RequestTimeout request_timeout => RequestTimeout,
url => RawUrl
}, },
{Config#{base_url => BaseUrl, pool_type => random}, State}. {Config#{base_url => BaseUrl, pool_type => random}, State}.
@ -379,11 +379,6 @@ parse_body(<<"application/x-www-form-urlencoded", _/binary>>, Body) ->
parse_body(ContentType, _) -> parse_body(ContentType, _) ->
{error, {unsupported_content_type, ContentType}}. {error, {unsupported_content_type, ContentType}}.
may_append_body(Output, {ok, _, _, Body}) ->
Output#{body => Body};
may_append_body(Output, {ok, _, _}) ->
Output.
uri_encode(T) -> uri_encode(T) ->
emqx_http_lib:uri_encode(to_list(T)). emqx_http_lib:uri_encode(to_list(T)).
@ -391,26 +386,33 @@ encode_path(Path) ->
Parts = string:split(Path, "/", all), Parts = string:split(Path, "/", all),
lists:flatten(["/" ++ Part || Part <- lists:map(fun uri_encode/1, Parts)]). lists:flatten(["/" ++ Part || Part <- lists:map(fun uri_encode/1, Parts)]).
log_response(ResourceId, Other) -> request_for_log(Credential, #{url := Url} = State) ->
Output = may_append_body(#{resource => ResourceId}, Other), SafeCredential = emqx_authn_utils:without_password(Credential),
case erlang:element(2, Other) of case generate_request(SafeCredential, State) of
Code5xx when Code5xx >= 500 andalso Code5xx < 600 -> {PathQuery, Headers} ->
?SLOG(error, Output#{ #{
msg => "http_server_error", method => post,
code => Code5xx base_url => Url,
}); path_query => PathQuery,
Code4xx when Code4xx >= 400 andalso Code4xx < 500 -> headers => Headers
?SLOG(warning, Output#{ };
msg => "refused_by_http_server", {PathQuery, Headers, Body} ->
code => Code4xx #{
}); method => post,
OtherCode -> base_url => Url,
?SLOG(error, Output#{ path_query => PathQuery,
msg => "undesired_response_code", headers => Headers,
code => OtherCode mody => Body
}) }
end. end.
response_for_log({ok, StatusCode, Headers}) ->
#{status => StatusCode, headers => Headers};
response_for_log({ok, StatusCode, Headers, Body}) ->
#{status => StatusCode, headers => Headers, body => Body};
response_for_log({error, Error}) ->
#{error => Error}.
to_list(A) when is_atom(A) -> to_list(A) when is_atom(A) ->
atom_to_list(A); atom_to_list(A);
to_list(B) when is_binary(B) -> to_list(B) when is_binary(B) ->

View File

@ -75,26 +75,11 @@ fields('jwks') ->
{pool_size, fun emqx_connector_schema_lib:pool_size/1}, {pool_size, fun emqx_connector_schema_lib:pool_size/1},
{refresh_interval, fun refresh_interval/1}, {refresh_interval, fun refresh_interval/1},
{ssl, #{ {ssl, #{
type => hoconsc:union([ type => hoconsc:ref(emqx_schema, "ssl_client_opts"),
hoconsc:ref(?MODULE, ssl_enable),
hoconsc:ref(?MODULE, ssl_disable)
]),
desc => ?DESC(ssl),
default => #{<<"enable">> => false}, default => #{<<"enable">> => false},
required => false desc => ?DESC("ssl")
}} }}
] ++ common_fields(); ] ++ common_fields().
fields(ssl_enable) ->
[
{enable, #{type => true, desc => ?DESC(enable)}},
{cacertfile, fun cacertfile/1},
{certfile, fun certfile/1},
{keyfile, fun keyfile/1},
{verify, fun verify/1},
{server_name_indication, fun server_name_indication/1}
];
fields(ssl_disable) ->
[{enable, #{type => false, desc => ?DESC(enable)}}].
desc('hmac-based') -> desc('hmac-based') ->
?DESC('hmac-based'); ?DESC('hmac-based');
@ -147,27 +132,6 @@ refresh_interval(default) -> 300;
refresh_interval(validator) -> [fun(I) -> I > 0 end]; refresh_interval(validator) -> [fun(I) -> I > 0 end];
refresh_interval(_) -> undefined. refresh_interval(_) -> undefined.
cacertfile(type) -> string();
cacertfile(desc) -> ?DESC(?FUNCTION_NAME);
cacertfile(_) -> undefined.
certfile(type) -> string();
certfile(desc) -> ?DESC(?FUNCTION_NAME);
certfile(_) -> undefined.
keyfile(type) -> string();
keyfile(desc) -> ?DESC(?FUNCTION_NAME);
keyfile(_) -> undefined.
verify(type) -> hoconsc:enum([verify_peer, verify_none]);
verify(desc) -> ?DESC(?FUNCTION_NAME);
verify(default) -> verify_none;
verify(_) -> undefined.
server_name_indication(type) -> string();
server_name_indication(desc) -> ?DESC(?FUNCTION_NAME);
server_name_indication(_) -> undefined.
verify_claims(type) -> verify_claims(type) ->
list(); list();
verify_claims(desc) -> verify_claims(desc) ->
@ -263,8 +227,7 @@ authenticate(
) -> ) ->
case emqx_resource:query(ResourceId, get_jwks) of case emqx_resource:query(ResourceId, get_jwks) of
{error, Reason} -> {error, Reason} ->
?SLOG(error, #{ ?TRACE_AUTHN_PROVIDER(error, "get_jwks_failed", #{
msg => "get_jwks_failed",
resource => ResourceId, resource => ResourceId,
reason => Reason reason => Reason
}), }),
@ -386,10 +349,17 @@ verify(undefined, _, _, _) ->
ignore; ignore;
verify(JWT, JWKs, VerifyClaims, AclClaimName) -> verify(JWT, JWKs, VerifyClaims, AclClaimName) ->
case do_verify(JWT, JWKs, VerifyClaims) of case do_verify(JWT, JWKs, VerifyClaims) of
{ok, Extra} -> {ok, acl(Extra, AclClaimName)}; {ok, Extra} ->
{error, {missing_claim, _}} -> {error, bad_username_or_password}; {ok, acl(Extra, AclClaimName)};
{error, invalid_signature} -> ignore; {error, {missing_claim, Claim}} ->
{error, {claims, _}} -> {error, bad_username_or_password} ?TRACE_AUTHN_PROVIDER("missing_jwt_claim", #{jwt => JWT, claim => Claim}),
{error, bad_username_or_password};
{error, invalid_signature} ->
?TRACE_AUTHN_PROVIDER("invalid_jwt_signature", #{jwks => JWKs, jwt => JWT}),
ignore;
{error, {claims, Claims}} ->
?TRACE_AUTHN_PROVIDER("invalid_jwt_claims", #{jwt => JWT, claims => Claims}),
{error, bad_username_or_password}
end. end.
acl(Claims, AclClaimName) -> acl(Claims, AclClaimName) ->
@ -407,11 +377,11 @@ acl(Claims, AclClaimName) ->
end, end,
maps:merge(emqx_authn_utils:is_superuser(Claims), Acl). maps:merge(emqx_authn_utils:is_superuser(Claims), Acl).
do_verify(_JWS, [], _VerifyClaims) -> do_verify(_JWT, [], _VerifyClaims) ->
{error, invalid_signature}; {error, invalid_signature};
do_verify(JWS, [JWK | More], VerifyClaims) -> do_verify(JWT, [JWK | More], VerifyClaims) ->
try jose_jws:verify(JWK, JWS) of try jose_jws:verify(JWK, JWT) of
{true, Payload, _JWS} -> {true, Payload, _JWT} ->
Claims0 = emqx_json:decode(Payload, [return_maps]), Claims0 = emqx_json:decode(Payload, [return_maps]),
Claims = try_convert_to_int(Claims0, [<<"exp">>, <<"iat">>, <<"nbf">>]), Claims = try_convert_to_int(Claims0, [<<"exp">>, <<"iat">>, <<"nbf">>]),
case verify_claims(Claims, VerifyClaims) of case verify_claims(Claims, VerifyClaims) of
@ -421,11 +391,11 @@ do_verify(JWS, [JWK | More], VerifyClaims) ->
{error, Reason} {error, Reason}
end; end;
{false, _, _} -> {false, _, _} ->
do_verify(JWS, More, VerifyClaims) do_verify(JWT, More, VerifyClaims)
catch catch
_:_Reason -> _:Reason ->
?TRACE("JWT", "authn_jwt_invalid_signature", #{jwk => JWK, jws => JWS}), ?TRACE_AUTHN_PROVIDER("jwt_verify_error", #{jwk => JWK, jwt => JWT, reason => Reason}),
{error, invalid_signature} do_verify(JWT, More, VerifyClaims)
end. end.
verify_claims(Claims, VerifyClaims0) -> verify_claims(Claims, VerifyClaims0) ->

View File

@ -17,6 +17,7 @@
-module(emqx_authn_mnesia). -module(emqx_authn_mnesia).
-include("emqx_authn.hrl"). -include("emqx_authn.hrl").
-include_lib("emqx/include/logger.hrl").
-include_lib("stdlib/include/ms_transform.hrl"). -include_lib("stdlib/include/ms_transform.hrl").
-include_lib("hocon/include/hoconsc.hrl"). -include_lib("hocon/include/hoconsc.hrl").
@ -158,6 +159,7 @@ authenticate(
UserID = get_user_identity(Credential, Type), UserID = get_user_identity(Credential, Type),
case mnesia:dirty_read(?TAB, {UserGroup, UserID}) of case mnesia:dirty_read(?TAB, {UserGroup, UserID}) of
[] -> [] ->
?TRACE_AUTHN_PROVIDER("user_not_found"),
ignore; ignore;
[#user_info{password_hash = PasswordHash, salt = Salt, is_superuser = IsSuperuser}] -> [#user_info{password_hash = PasswordHash, salt = Salt, is_superuser = IsSuperuser}] ->
case case
@ -165,8 +167,10 @@ authenticate(
Algorithm, Salt, PasswordHash, Password Algorithm, Salt, PasswordHash, Password
) )
of of
true -> {ok, #{is_superuser => IsSuperuser}}; true ->
false -> {error, bad_username_or_password} {ok, #{is_superuser => IsSuperuser}};
false ->
{error, bad_username_or_password}
end end
end. end.

View File

@ -167,8 +167,7 @@ authenticate(
undefined -> undefined ->
ignore; ignore;
{error, Reason} -> {error, Reason} ->
?SLOG(error, #{ ?TRACE_AUTHN_PROVIDER(error, "mongodb_query_failed", #{
msg => "mongodb_query_failed",
resource => ResourceId, resource => ResourceId,
collection => Collection, collection => Collection,
filter => Filter, filter => Filter,
@ -180,11 +179,11 @@ authenticate(
ok -> ok ->
{ok, is_superuser(Doc, State)}; {ok, is_superuser(Doc, State)};
{error, {cannot_find_password_hash_field, PasswordHashField}} -> {error, {cannot_find_password_hash_field, PasswordHashField}} ->
?SLOG(error, #{ ?TRACE_AUTHN_PROVIDER(error, "cannot_find_password_hash_field", #{
msg => "cannot_find_password_hash_field",
resource => ResourceId, resource => ResourceId,
collection => Collection, collection => Collection,
filter => Filter, filter => Filter,
document => Doc,
password_hash_field => PasswordHashField password_hash_field => PasswordHashField
}), }),
ignore; ignore;

View File

@ -130,8 +130,7 @@ authenticate(
{error, Reason} {error, Reason}
end; end;
{error, Reason} -> {error, Reason} ->
?SLOG(error, #{ ?TRACE_AUTHN_PROVIDER(error, "mysql_query_failed", #{
msg => "mysql_query_failed",
resource => ResourceId, resource => ResourceId,
tmpl_token => TmplToken, tmpl_token => TmplToken,
params => Params, params => Params,

View File

@ -133,8 +133,7 @@ authenticate(
{error, Reason} {error, Reason}
end; end;
{error, Reason} -> {error, Reason} ->
?SLOG(error, #{ ?TRACE_AUTHN_PROVIDER(error, "postgresql_query_failed", #{
msg => "postgresql_query_failed",
resource => ResourceId, resource => ResourceId,
params => Params, params => Params,
reason => Reason reason => Reason

View File

@ -128,13 +128,14 @@ authenticate(#{auth_method := _}, _) ->
authenticate( authenticate(
#{password := Password} = Credential, #{password := Password} = Credential,
#{ #{
cmd := {Command, KeyTemplate, Fields}, cmd := {CommandName, KeyTemplate, Fields},
resource_id := ResourceId, resource_id := ResourceId,
password_hash_algorithm := Algorithm password_hash_algorithm := Algorithm
} }
) -> ) ->
NKey = emqx_authn_utils:render_str(KeyTemplate, Credential), NKey = emqx_authn_utils:render_str(KeyTemplate, Credential),
case emqx_resource:query(ResourceId, {cmd, [Command, NKey | Fields]}) of Command = [CommandName, NKey | Fields],
case emqx_resource:query(ResourceId, {cmd, Command}) of
{ok, []} -> {ok, []} ->
ignore; ignore;
{ok, Values} -> {ok, Values} ->
@ -150,8 +151,7 @@ authenticate(
{error, Reason} {error, Reason}
end; end;
{error, Reason} -> {error, Reason} ->
?SLOG(error, #{ ?TRACE_AUTHN_PROVIDER(error, "redis_query_failed", #{
msg => "redis_query_failed",
resource => ResourceId, resource => ResourceId,
cmd => Command, cmd => Command,
keys => NKey, keys => NKey,

View File

@ -34,7 +34,9 @@
password => <<"plain">>, password => <<"plain">>,
peerhost => {127, 0, 0, 1}, peerhost => {127, 0, 0, 1},
listener => 'tcp:default', listener => 'tcp:default',
protocol => mqtt protocol => mqtt,
cert_subject => <<"cert_subject_data">>,
cert_common_name => <<"cert_common_name_data">>
}). }).
-define(SERVER_RESPONSE_JSON(Result), ?SERVER_RESPONSE_JSON(Result, false)). -define(SERVER_RESPONSE_JSON(Result), ?SERVER_RESPONSE_JSON(Result, false)).
@ -517,7 +519,9 @@ samples() ->
<<"username">> := <<"plain">>, <<"username">> := <<"plain">>,
<<"password">> := <<"plain">>, <<"password">> := <<"plain">>,
<<"clientid">> := <<"clienta">>, <<"clientid">> := <<"clienta">>,
<<"peerhost">> := <<"127.0.0.1">> <<"peerhost">> := <<"127.0.0.1">>,
<<"cert_subject">> := <<"cert_subject_data">>,
<<"cert_common_name">> := <<"cert_common_name_data">>
} = jiffy:decode(RawBody, [return_maps]), } = jiffy:decode(RawBody, [return_maps]),
Req = cowboy_req:reply( Req = cowboy_req:reply(
200, 200,
@ -534,7 +538,9 @@ samples() ->
<<"clientid">> => ?PH_CLIENTID, <<"clientid">> => ?PH_CLIENTID,
<<"username">> => ?PH_USERNAME, <<"username">> => ?PH_USERNAME,
<<"password">> => ?PH_PASSWORD, <<"password">> => ?PH_PASSWORD,
<<"peerhost">> => ?PH_PEERHOST <<"peerhost">> => ?PH_PEERHOST,
<<"cert_subject">> => ?PH_CERT_SUBJECT,
<<"cert_common_name">> => ?PH_CERT_CN_NAME
} }
}, },
result => {ok, #{is_superuser => false, user_property => #{}}} result => {ok, #{is_superuser => false, user_property => #{}}}

View File

@ -345,6 +345,33 @@ user_seeds() ->
result => {ok, #{is_superuser => true}} result => {ok, #{is_superuser => true}}
}, },
#{
data => #{
cert_subject => <<"cert_subject_data">>,
cert_common_name => <<"cert_common_name_data">>,
password_hash =>
<<"ac63a624e7074776d677dd61a003b8c803eb11db004d0ec6ae032a5d7c9c5caf">>,
salt => <<"salt">>,
is_superuser => 1
},
credentials => #{
cert_subject => <<"cert_subject_data">>,
cert_common_name => <<"cert_common_name_data">>,
password => <<"sha256">>
},
config_params => #{
<<"filter">> => #{
<<"cert_subject">> => <<"${cert_subject}">>,
<<"cert_common_name">> => <<"${cert_common_name}">>
},
<<"password_hash_algorithm">> => #{
<<"name">> => <<"sha256">>,
<<"salt_position">> => <<"prefix">>
}
},
result => {ok, #{is_superuser => true}}
},
#{ #{
data => #{ data => #{
username => <<"bcrypt">>, username => <<"bcrypt">>,

View File

@ -318,6 +318,36 @@ user_seeds() ->
result => {ok, #{is_superuser => true}} result => {ok, #{is_superuser => true}}
}, },
#{
data => #{
username => "sha256",
password_hash => "ac63a624e7074776d677dd61a003b8c803eb11db004d0ec6ae032a5d7c9c5caf",
cert_subject => <<"cert_subject_data">>,
cert_common_name => <<"cert_common_name_data">>,
salt => "salt",
is_superuser_int => 1
},
credentials => #{
clientid => <<"sha256">>,
password => <<"sha256">>,
cert_subject => <<"cert_subject_data">>,
cert_common_name => <<"cert_common_name_data">>
},
config_params => #{
<<"query">> =>
<<
"SELECT password_hash, salt, is_superuser_int as is_superuser\n"
" FROM users where cert_subject = ${cert_subject} AND \n"
" cert_common_name = ${cert_common_name} LIMIT 1"
>>,
<<"password_hash_algorithm">> => #{
<<"name">> => <<"sha256">>,
<<"salt_position">> => <<"prefix">>
}
},
result => {ok, #{is_superuser => true}}
},
#{ #{
data => #{ data => #{
username => <<"bcrypt">>, username => <<"bcrypt">>,
@ -433,14 +463,24 @@ init_seeds() ->
" username VARCHAR(255),\n" " username VARCHAR(255),\n"
" password_hash VARCHAR(255),\n" " password_hash VARCHAR(255),\n"
" salt VARCHAR(255),\n" " salt VARCHAR(255),\n"
" cert_subject VARCHAR(255),\n"
" cert_common_name VARCHAR(255),\n"
" is_superuser_str VARCHAR(255),\n" " is_superuser_str VARCHAR(255),\n"
" is_superuser_int TINYINT)" " is_superuser_int TINYINT)"
), ),
Fields = [username, password_hash, salt, is_superuser_str, is_superuser_int], Fields = [
username,
password_hash,
salt,
cert_subject,
cert_common_name,
is_superuser_str,
is_superuser_int
],
InsertQuery = InsertQuery =
"INSERT INTO users(username, password_hash, salt, " "INSERT INTO users(username, password_hash, salt, cert_subject, cert_common_name,"
" is_superuser_str, is_superuser_int) VALUES(?, ?, ?, ?, ?)", " is_superuser_str, is_superuser_int) VALUES(?, ?, ?, ?, ?, ?, ?)",
lists:foreach( lists:foreach(
fun(#{data := Values}) -> fun(#{data := Values}) ->

View File

@ -380,6 +380,36 @@ user_seeds() ->
result => {ok, #{is_superuser => true}} result => {ok, #{is_superuser => true}}
}, },
#{
data => #{
username => "sha256",
password_hash => "ac63a624e7074776d677dd61a003b8c803eb11db004d0ec6ae032a5d7c9c5caf",
cert_subject => <<"cert_subject_data">>,
cert_common_name => <<"cert_common_name_data">>,
salt => "salt",
is_superuser_int => 1
},
credentials => #{
clientid => <<"sha256">>,
password => <<"sha256">>,
cert_subject => <<"cert_subject_data">>,
cert_common_name => <<"cert_common_name_data">>
},
config_params => #{
<<"query">> =>
<<
"SELECT password_hash, salt, is_superuser_int as is_superuser\n"
" FROM users where cert_subject = ${cert_subject} AND \n"
" cert_common_name = ${cert_common_name} LIMIT 1"
>>,
<<"password_hash_algorithm">> => #{
<<"name">> => <<"sha256">>,
<<"salt_position">> => <<"prefix">>
}
},
result => {ok, #{is_superuser => true}}
},
#{ #{
data => #{ data => #{
username => <<"bcrypt">>, username => <<"bcrypt">>,
@ -474,6 +504,8 @@ init_seeds() ->
" username varchar(255),\n" " username varchar(255),\n"
" password_hash varchar(255),\n" " password_hash varchar(255),\n"
" salt varchar(255),\n" " salt varchar(255),\n"
" cert_subject varchar(255),\n"
" cert_common_name varchar(255),\n"
" is_superuser_str varchar(255),\n" " is_superuser_str varchar(255),\n"
" is_superuser_int smallint,\n" " is_superuser_int smallint,\n"
" is_superuser_bool boolean)" " is_superuser_bool boolean)"
@ -487,12 +519,21 @@ init_seeds() ->
). ).
create_user(Values) -> create_user(Values) ->
Fields = [username, password_hash, salt, is_superuser_str, is_superuser_int, is_superuser_bool], Fields = [
username,
password_hash,
salt,
cert_subject,
cert_common_name,
is_superuser_str,
is_superuser_int,
is_superuser_bool
],
InsertQuery = InsertQuery =
"INSERT INTO users(username, password_hash, salt," "INSERT INTO users(username, password_hash, salt, cert_subject, cert_common_name, "
"is_superuser_str, is_superuser_int, is_superuser_bool) " "is_superuser_str, is_superuser_int, is_superuser_bool) "
"VALUES($1, $2, $3, $4, $5, $6)", "VALUES($1, $2, $3, $4, $5, $6, $7, $8)",
Params = [maps:get(F, Values, null) || F <- Fields], Params = [maps:get(F, Values, null) || F <- Fields],
{ok, 1} = q(InsertQuery, Params), {ok, 1} = q(InsertQuery, Params),

View File

@ -475,6 +475,52 @@ user_seeds() ->
} }
}, },
result => {ok, #{is_superuser => true}} result => {ok, #{is_superuser => true}}
},
#{
data => #{
password_hash =>
<<"a3c7f6b085c3e5897ffb9b86f18a9d905063f8550a74444b5892e193c1b50428">>,
is_superuser => <<"1">>
},
credentials => #{
clientid => <<"sha256_no_salt">>,
cn => <<"cert_common_name">>,
dn => <<"cert_subject_name">>,
password => <<"sha256_no_salt">>
},
key => <<"mqtt_user:cert_common_name">>,
config_params => #{
<<"cmd">> => <<"HMGET mqtt_user:${cert_common_name} password_hash is_superuser">>,
<<"password_hash_algorithm">> => #{
<<"name">> => <<"sha256">>,
<<"salt_position">> => <<"disable">>
}
},
result => {ok, #{is_superuser => true}}
},
#{
data => #{
password_hash =>
<<"a3c7f6b085c3e5897ffb9b86f18a9d905063f8550a74444b5892e193c1b50428">>,
is_superuser => <<"1">>
},
credentials => #{
clientid => <<"sha256_no_salt">>,
cn => <<"cert_common_name">>,
dn => <<"cert_subject_name">>,
password => <<"sha256_no_salt">>
},
key => <<"mqtt_user:cert_subject_name">>,
config_params => #{
<<"cmd">> => <<"HMGET mqtt_user:${cert_subject} password_hash is_superuser">>,
<<"password_hash_algorithm">> => #{
<<"name">> => <<"sha256">>,
<<"salt_position">> => <<"disable">>
}
},
result => {ok, #{is_superuser => true}}
} }
]. ].

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, emqx_authz, [ {application, emqx_authz, [
{description, "An OTP application"}, {description, "An OTP application"},
{vsn, "0.1.2"}, {vsn, "0.1.3"},
{registered, []}, {registered, []},
{mod, {emqx_authz_app, []}}, {mod, {emqx_authz_app, []}},
{applications, [ {applications, [

View File

@ -53,11 +53,12 @@
-type sources() :: [source()]. -type sources() :: [source()].
-define(METRIC_SUPERUSER, 'authorization.superuser').
-define(METRIC_ALLOW, 'authorization.matched.allow'). -define(METRIC_ALLOW, 'authorization.matched.allow').
-define(METRIC_DENY, 'authorization.matched.deny'). -define(METRIC_DENY, 'authorization.matched.deny').
-define(METRIC_NOMATCH, 'authorization.nomatch'). -define(METRIC_NOMATCH, 'authorization.nomatch').
-define(METRICS, [?METRIC_ALLOW, ?METRIC_DENY, ?METRIC_NOMATCH]). -define(METRICS, [?METRIC_SUPERUSER, ?METRIC_ALLOW, ?METRIC_DENY, ?METRIC_NOMATCH]).
-define(IS_ENABLED(Enable), ((Enable =:= true) or (Enable =:= <<"true">>))). -define(IS_ENABLED(Enable), ((Enable =:= true) or (Enable =:= <<"true">>))).
@ -308,6 +309,30 @@ authorize(
Topic, Topic,
DefaultResult, DefaultResult,
Sources Sources
) ->
case maps:get(is_superuser, Client, false) of
true ->
log_allowed(#{
username => Username,
ipaddr => IpAddress,
topic => Topic,
is_superuser => true
}),
emqx_metrics:inc(?METRIC_SUPERUSER),
{stop, allow};
false ->
authorize_non_superuser(Client, PubSub, Topic, DefaultResult, Sources)
end.
authorize_non_superuser(
#{
username := Username,
peerhost := IpAddress
} = Client,
PubSub,
Topic,
DefaultResult,
Sources
) -> ) ->
case do_authorize(Client, PubSub, Topic, sources_with_defaults(Sources)) of case do_authorize(Client, PubSub, Topic, sources_with_defaults(Sources)) of
{{matched, allow}, AuthzSource} -> {{matched, allow}, AuthzSource} ->
@ -315,8 +340,7 @@ authorize(
'client.check_authz_complete', 'client.check_authz_complete',
[Client, PubSub, Topic, allow, AuthzSource] [Client, PubSub, Topic, allow, AuthzSource]
), ),
?SLOG(info, #{ log_allowed(#{
msg => "authorization_permission_allowed",
username => Username, username => Username,
ipaddr => IpAddress, ipaddr => IpAddress,
topic => Topic, topic => Topic,
@ -356,6 +380,9 @@ authorize(
{stop, DefaultResult} {stop, DefaultResult}
end. end.
log_allowed(Meta) ->
?SLOG(info, Meta#{msg => "authorization_permission_allowed"}).
do_authorize(_Client, _PubSub, _Topic, []) -> do_authorize(_Client, _PubSub, _Topic, []) ->
nomatch; nomatch;
do_authorize(Client, PubSub, Topic, [#{enable := false} | Rest]) -> do_authorize(Client, PubSub, Topic, [#{enable := false} | Rest]) ->

View File

@ -50,6 +50,8 @@
aggregate_metrics/1 aggregate_metrics/1
]). ]).
-define(TAGS, [<<"Authorization">>]).
api_spec() -> api_spec() ->
emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}). emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}).
@ -70,6 +72,7 @@ schema("/authorization/sources") ->
get => get =>
#{ #{
description => ?DESC(authorization_sources_get), description => ?DESC(authorization_sources_get),
tags => ?TAGS,
responses => responses =>
#{ #{
200 => mk( 200 => mk(
@ -81,6 +84,7 @@ schema("/authorization/sources") ->
post => post =>
#{ #{
description => ?DESC(authorization_sources_post), description => ?DESC(authorization_sources_post),
tags => ?TAGS,
'requestBody' => mk( 'requestBody' => mk(
hoconsc:union(authz_sources_type_refs()), hoconsc:union(authz_sources_type_refs()),
#{desc => ?DESC(source_config)} #{desc => ?DESC(source_config)}
@ -101,6 +105,7 @@ schema("/authorization/sources/:type") ->
get => get =>
#{ #{
description => ?DESC(authorization_sources_type_get), description => ?DESC(authorization_sources_type_get),
tags => ?TAGS,
parameters => parameters_field(), parameters => parameters_field(),
responses => responses =>
#{ #{
@ -114,6 +119,7 @@ schema("/authorization/sources/:type") ->
put => put =>
#{ #{
description => ?DESC(authorization_sources_type_put), description => ?DESC(authorization_sources_type_put),
tags => ?TAGS,
parameters => parameters_field(), parameters => parameters_field(),
'requestBody' => mk(hoconsc:union(authz_sources_type_refs())), 'requestBody' => mk(hoconsc:union(authz_sources_type_refs())),
responses => responses =>
@ -125,6 +131,7 @@ schema("/authorization/sources/:type") ->
delete => delete =>
#{ #{
description => ?DESC(authorization_sources_type_delete), description => ?DESC(authorization_sources_type_delete),
tags => ?TAGS,
parameters => parameters_field(), parameters => parameters_field(),
responses => responses =>
#{ #{
@ -139,6 +146,7 @@ schema("/authorization/sources/:type/status") ->
get => get =>
#{ #{
description => ?DESC(authorization_sources_type_status_get), description => ?DESC(authorization_sources_type_status_get),
tags => ?TAGS,
parameters => parameters_field(), parameters => parameters_field(),
responses => responses =>
#{ #{
@ -159,6 +167,7 @@ schema("/authorization/sources/:type/move") ->
post => post =>
#{ #{
description => ?DESC(authorization_sources_type_move_post), description => ?DESC(authorization_sources_type_move_post),
tags => ?TAGS,
parameters => parameters_field(), parameters => parameters_field(),
'requestBody' => 'requestBody' =>
emqx_dashboard_swagger:schema_with_examples( emqx_dashboard_swagger:schema_with_examples(
@ -564,6 +573,10 @@ authz_sources_type_refs() ->
bin(Term) -> erlang:iolist_to_binary(io_lib:format("~p", [Term])). bin(Term) -> erlang:iolist_to_binary(io_lib:format("~p", [Term])).
status_metrics_example() -> status_metrics_example() ->
#{
'metrics_example' => #{
summary => <<"Showing a typical metrics example">>,
value =>
#{ #{
resource_metrics => #{ resource_metrics => #{
matched => 0, matched => 0,
@ -617,6 +630,8 @@ status_metrics_example() ->
status => connected status => connected
} }
] ]
}
}
}. }.
create_authz_file(Body) -> create_authz_file(Body) ->

View File

@ -84,8 +84,6 @@ t_ok(_Config) ->
<<"rules">> => <<"{allow, {user, \"username\"}, publish, [\"t\"]}.">> <<"rules">> => <<"{allow, {user, \"username\"}, publish, [\"t\"]}.">>
}), }),
io:format("~p", [emqx_authz:acl_conf_file()]),
?assertEqual( ?assertEqual(
allow, allow,
emqx_access_control:authorize(ClientInfo, publish, <<"t">>) emqx_access_control:authorize(ClientInfo, publish, <<"t">>)
@ -96,6 +94,31 @@ t_ok(_Config) ->
emqx_access_control:authorize(ClientInfo, subscribe, <<"t">>) emqx_access_control:authorize(ClientInfo, subscribe, <<"t">>)
). ).
t_superuser(_Config) ->
ClientInfo = #{
clientid => <<"clientid">>,
username => <<"username">>,
is_superuser => true,
peerhost => {127, 0, 0, 1},
zone => default,
listener => {tcp, default}
},
%% no rules apply to superuser
ok = setup_config(?RAW_SOURCE#{
<<"rules">> => <<"{deny, {user, \"username\"}, publish, [\"t\"]}.">>
}),
?assertEqual(
allow,
emqx_access_control:authorize(ClientInfo, publish, <<"t">>)
),
?assertEqual(
allow,
emqx_access_control:authorize(ClientInfo, subscribe, <<"t">>)
).
t_invalid_file(_Config) -> t_invalid_file(_Config) ->
?assertMatch( ?assertMatch(
{error, bad_acl_file_content}, {error, bad_acl_file_content},

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, emqx_auto_subscribe, [ {application, emqx_auto_subscribe, [
{description, "An OTP application"}, {description, "An OTP application"},
{vsn, "0.1.0"}, {vsn, "0.1.1"},
{registered, []}, {registered, []},
{mod, {emqx_auto_subscribe_app, []}}, {mod, {emqx_auto_subscribe_app, []}},
{applications, [ {applications, [

View File

@ -44,12 +44,14 @@ schema("/mqtt/auto_subscribe") ->
'operationId' => auto_subscribe, 'operationId' => auto_subscribe,
get => #{ get => #{
description => ?DESC(list_auto_subscribe_api), description => ?DESC(list_auto_subscribe_api),
tags => [<<"Auto subscribe">>],
responses => #{ responses => #{
200 => hoconsc:ref(emqx_auto_subscribe_schema, "auto_subscribe") 200 => hoconsc:ref(emqx_auto_subscribe_schema, "auto_subscribe")
} }
}, },
put => #{ put => #{
description => ?DESC(update_auto_subscribe_api), description => ?DESC(update_auto_subscribe_api),
tags => [<<"Auto subscribe">>],
'requestBody' => hoconsc:ref(emqx_auto_subscribe_schema, "auto_subscribe"), 'requestBody' => hoconsc:ref(emqx_auto_subscribe_schema, "auto_subscribe"),
responses => #{ responses => #{
200 => hoconsc:ref(emqx_auto_subscribe_schema, "auto_subscribe"), 200 => hoconsc:ref(emqx_auto_subscribe_schema, "auto_subscribe"),

View File

@ -127,6 +127,17 @@ HTTP 请求的正文。</br>
} }
} }
config_max_retries {
desc {
en: """HTTP request max retry times if failed."""
zh: """HTTP 请求失败最大重试次数"""
}
label: {
en: "HTTP Request Max Retries"
zh: "HTTP 请求重试次数"
}
}
desc_type { desc_type {
desc { desc {
en: """The Bridge Type""" en: """The Bridge Type"""

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, emqx_bridge, [ {application, emqx_bridge, [
{description, "An OTP application"}, {description, "An OTP application"},
{vsn, "0.1.0"}, {vsn, "0.1.1"},
{registered, []}, {registered, []},
{mod, {emqx_bridge_app, []}}, {mod, {emqx_bridge_app, []}},
{applications, [ {applications, [

View File

@ -225,7 +225,6 @@ info_example_basic(webhook, _) ->
request_timeout => <<"15s">>, request_timeout => <<"15s">>,
connect_timeout => <<"15s">>, connect_timeout => <<"15s">>,
max_retries => 3, max_retries => 3,
retry_interval => <<"10s">>,
pool_type => <<"random">>, pool_type => <<"random">>,
pool_size => 4, pool_size => 4,
enable_pipelining => 100, enable_pipelining => 100,

View File

@ -238,7 +238,8 @@ parse_confs(
method := Method, method := Method,
body := Body, body := Body,
headers := Headers, headers := Headers,
request_timeout := ReqTimeout request_timeout := ReqTimeout,
max_retries := Retry
} = Conf } = Conf
) -> ) ->
{BaseUrl, Path} = parse_url(Url), {BaseUrl, Path} = parse_url(Url),
@ -251,7 +252,8 @@ parse_confs(
method => Method, method => Method,
body => Body, body => Body,
headers => Headers, headers => Headers,
request_timeout => ReqTimeout request_timeout => ReqTimeout,
max_retries => Retry
} }
}; };
parse_confs(Type, Name, #{connector := ConnId, direction := Direction} = Conf) when parse_confs(Type, Name, #{connector := ConnId, direction := Direction} = Conf) when

View File

@ -14,7 +14,46 @@ namespace() -> "bridge".
roots() -> []. roots() -> [].
fields("config") -> fields("config") ->
basic_config() ++ basic_config() ++ request_config();
fields("post") ->
[
type_field(),
name_field()
] ++ fields("config");
fields("put") ->
fields("config");
fields("get") ->
emqx_bridge_schema:metrics_status_fields() ++ fields("post").
desc("config") ->
?DESC("desc_config");
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->
["Configuration for WebHook using `", string:to_upper(Method), "` method."];
desc(_) ->
undefined.
basic_config() ->
[
{enable,
mk(
boolean(),
#{
desc => ?DESC("config_enable"),
default => true
}
)},
{direction,
mk(
egress,
#{
desc => ?DESC("config_direction"),
default => egress
}
)}
] ++
proplists:delete(base_url, emqx_connector_http:fields(config)).
request_config() ->
[ [
{url, {url,
mk( mk(
@ -59,6 +98,14 @@ fields("config") ->
desc => ?DESC("config_body") desc => ?DESC("config_body")
} }
)}, )},
{max_retries,
mk(
non_neg_integer(),
#{
default => 2,
desc => ?DESC("config_max_retries")
}
)},
{request_timeout, {request_timeout,
mk( mk(
emqx_schema:duration_ms(), emqx_schema:duration_ms(),
@ -67,44 +114,7 @@ fields("config") ->
desc => ?DESC("config_request_timeout") desc => ?DESC("config_request_timeout")
} }
)} )}
]; ].
fields("post") ->
[
type_field(),
name_field()
] ++ fields("config");
fields("put") ->
fields("config");
fields("get") ->
emqx_bridge_schema:metrics_status_fields() ++ fields("post").
desc("config") ->
?DESC("desc_config");
desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" ->
["Configuration for WebHook using `", string:to_upper(Method), "` method."];
desc(_) ->
undefined.
basic_config() ->
[
{enable,
mk(
boolean(),
#{
desc => ?DESC("config_enable"),
default => true
}
)},
{direction,
mk(
egress,
#{
desc => ?DESC("config_direction"),
default => egress
}
)}
] ++
proplists:delete(base_url, emqx_connector_http:fields(config)).
%%====================================================================================== %%======================================================================================

View File

@ -1039,12 +1039,18 @@ Defaults to: <code>system</code>.
common_handler_chars_limit { common_handler_chars_limit {
desc { desc {
en: """Set the maximum length of a single log message. If this length is exceeded, the log message will be truncated.""" en: """
zh: """设置单个日志消息的最大长度。 如果超过此长度则日志消息将被截断。最小可设置的长度为100。""" Set the maximum length of a single log message. If this length is exceeded, the log message will be truncated.
NOTE: Restrict char limiter if formatter is JSON , it will get a truncated incomplete JSON data, which is not recommended.
"""
zh: """
设置单个日志消息的最大长度。 如果超过此长度则日志消息将被截断。最小可设置的长度为100。
注意:如果日志格式为 JSON限制字符长度可能会导致截断不完整的 JSON 数据。
"""
} }
label { label {
en: "Single Log Max Length" en: "Single Log Max Length"
zh: "单个日志最大长度" zh: "单条日志长度限制"
} }
} }

View File

@ -262,6 +262,8 @@ fast_forward_to_commit(Node, ToTnxId) ->
%% @private %% @private
init([Node, RetryMs]) -> init([Node, RetryMs]) ->
%% Workaround for https://github.com/emqx/mria/issues/94:
_ = mria_rlog:wait_for_shards([?CLUSTER_RPC_SHARD], 1000),
_ = mria:wait_for_tables([?CLUSTER_MFA, ?CLUSTER_COMMIT]), _ = mria:wait_for_tables([?CLUSTER_MFA, ?CLUSTER_COMMIT]),
{ok, _} = mnesia:subscribe({table, ?CLUSTER_MFA, simple}), {ok, _} = mnesia:subscribe({table, ?CLUSTER_MFA, simple}),
State = #{node => Node, retry_interval => RetryMs}, State = #{node => Node, retry_interval => RetryMs},

View File

@ -1,6 +1,6 @@
{application, emqx_conf, [ {application, emqx_conf, [
{description, "EMQX configuration management"}, {description, "EMQX configuration management"},
{vsn, "0.1.1"}, {vsn, "0.1.2"},
{registered, []}, {registered, []},
{mod, {emqx_conf_app, []}}, {mod, {emqx_conf_app, []}},
{applications, [kernel, stdlib]}, {applications, [kernel, stdlib]},

View File

@ -41,17 +41,6 @@ base URL 只包含host和port。</br>
} }
} }
retry_interval {
desc {
en: "Interval between retries."
zh: "重试之间的间隔时间。"
}
label: {
en: "Retry Interval"
zh: "重试间隔"
}
}
pool_type { pool_type {
desc { desc {
en: "The type of the pool. Can be one of `random`, `hash`." en: "The type of the pool. Can be one of `random`, `hash`."
@ -76,8 +65,8 @@ base URL 只包含host和port。</br>
enable_pipelining { enable_pipelining {
desc { desc {
en: "Whether to send HTTP requests continuously, when set to 0, it means that after each HTTP request is sent, you need to wait for the server to return and then continue to send the next request." en: "A positive integer. Whether to send HTTP requests continuously, when set to 1, it means that after each HTTP request is sent, you need to wait for the server to return and then continue to send the next request."
zh: "是否连续发送 HTTP 请求,当设置为 0 时,表示每次发送完成 HTTP 请求后都需要等待服务器返回,再继续发送下一个请求。" zh: "正整数,设置最大可发送的异步 HTTP 请求数量。当设置为 1 时,表示每次发送完成 HTTP 请求后都需要等待服务器返回,再继续发送下一个请求。"
} }
label: { label: {
en: "HTTP Pipelineing" en: "HTTP Pipelineing"

View File

@ -88,22 +88,6 @@ fields(config) ->
desc => ?DESC("connect_timeout") desc => ?DESC("connect_timeout")
} }
)}, )},
{max_retries,
sc(
non_neg_integer(),
#{
default => 5,
desc => ?DESC("max_retries")
}
)},
{retry_interval,
sc(
emqx_schema:duration(),
#{
default => "1s",
desc => ?DESC("retry_interval")
}
)},
{pool_type, {pool_type,
sc( sc(
pool_type(), pool_type(),
@ -147,6 +131,14 @@ fields("request") ->
{path, hoconsc:mk(binary(), #{required => false, desc => ?DESC("path")})}, {path, hoconsc:mk(binary(), #{required => false, desc => ?DESC("path")})},
{body, hoconsc:mk(binary(), #{required => false, desc => ?DESC("body")})}, {body, hoconsc:mk(binary(), #{required => false, desc => ?DESC("body")})},
{headers, hoconsc:mk(map(), #{required => false, desc => ?DESC("headers")})}, {headers, hoconsc:mk(map(), #{required => false, desc => ?DESC("headers")})},
{max_retries,
sc(
non_neg_integer(),
#{
required => false,
desc => ?DESC("max_retries")
}
)},
{request_timeout, {request_timeout,
sc( sc(
emqx_schema:duration_ms(), emqx_schema:duration_ms(),
@ -182,8 +174,6 @@ on_start(
path := BasePath path := BasePath
}, },
connect_timeout := ConnectTimeout, connect_timeout := ConnectTimeout,
max_retries := MaxRetries,
retry_interval := RetryInterval,
pool_type := PoolType, pool_type := PoolType,
pool_size := PoolSize pool_size := PoolSize
} = Config } = Config
@ -206,8 +196,6 @@ on_start(
{host, Host}, {host, Host},
{port, Port}, {port, Port},
{connect_timeout, ConnectTimeout}, {connect_timeout, ConnectTimeout},
{retry, MaxRetries},
{retry_timeout, RetryInterval},
{keepalive, 30000}, {keepalive, 30000},
{pool_type, PoolType}, {pool_type, PoolType},
{pool_size, PoolSize}, {pool_size, PoolSize},
@ -247,17 +235,23 @@ on_query(InstId, {send_message, Msg}, AfterQuery, State) ->
path := Path, path := Path,
body := Body, body := Body,
headers := Headers, headers := Headers,
request_timeout := Timeout request_timeout := Timeout,
max_retries := Retry
} = process_request(Request, Msg), } = process_request(Request, Msg),
on_query(InstId, {Method, {Path, Headers, Body}, Timeout}, AfterQuery, State)
end;
on_query(InstId, {Method, Request}, AfterQuery, State) ->
on_query(InstId, {undefined, Method, Request, 5000}, AfterQuery, State);
on_query(InstId, {Method, Request, Timeout}, AfterQuery, State) ->
on_query(InstId, {undefined, Method, Request, Timeout}, AfterQuery, State);
on_query( on_query(
InstId, InstId,
{KeyOrNum, Method, Request, Timeout}, {undefined, Method, {Path, Headers, Body}, Timeout, Retry},
AfterQuery,
State
)
end;
on_query(InstId, {Method, Request}, AfterQuery, State) ->
on_query(InstId, {undefined, Method, Request, 5000, 2}, AfterQuery, State);
on_query(InstId, {Method, Request, Timeout}, AfterQuery, State) ->
on_query(InstId, {undefined, Method, Request, Timeout, 2}, AfterQuery, State);
on_query(
InstId,
{KeyOrNum, Method, Request, Timeout, Retry},
AfterQuery, AfterQuery,
#{pool_name := PoolName, base_path := BasePath} = State #{pool_name := PoolName, base_path := BasePath} = State
) -> ) ->
@ -275,7 +269,8 @@ on_query(
end, end,
Method, Method,
NRequest, NRequest,
Timeout Timeout,
Retry
) )
of of
{error, Reason} -> {error, Reason} ->
@ -368,7 +363,8 @@ preprocess_request(
path => emqx_plugin_libs_rule:preproc_tmpl(Path), path => emqx_plugin_libs_rule:preproc_tmpl(Path),
body => emqx_plugin_libs_rule:preproc_tmpl(Body), body => emqx_plugin_libs_rule:preproc_tmpl(Body),
headers => preproc_headers(Headers), headers => preproc_headers(Headers),
request_timeout => maps:get(request_timeout, Req, 30000) request_timeout => maps:get(request_timeout, Req, 30000),
max_retries => maps:get(max_retries, Req, 2)
}. }.
preproc_headers(Headers) when is_map(Headers) -> preproc_headers(Headers) when is_map(Headers) ->

View File

@ -90,6 +90,7 @@ fields(sentinel) ->
}}, }},
{sentinel, #{ {sentinel, #{
type => string(), type => string(),
required => true,
desc => ?DESC("sentinel_desc") desc => ?DESC("sentinel_desc")
}} }}
] ++ ] ++

View File

@ -23,8 +23,10 @@
-include_lib("emqx/include/emqx.hrl"). -include_lib("emqx/include/emqx.hrl").
-include_lib("stdlib/include/assert.hrl"). -include_lib("stdlib/include/assert.hrl").
-define(REDIS_HOST, "redis"). -define(REDIS_SINGLE_HOST, "redis").
-define(REDIS_PORT, 6379). -define(REDIS_SINGLE_PORT, 6379).
-define(REDIS_SENTINEL_HOST, "redis-sentinel").
-define(REDIS_SENTINEL_PORT, 26379).
-define(REDIS_RESOURCE_MOD, emqx_connector_redis). -define(REDIS_RESOURCE_MOD, emqx_connector_redis).
all() -> all() ->
@ -34,7 +36,14 @@ groups() ->
[]. [].
init_per_suite(Config) -> init_per_suite(Config) ->
case emqx_common_test_helpers:is_tcp_server_available(?REDIS_HOST, ?REDIS_PORT) of case
emqx_common_test_helpers:is_all_tcp_servers_available(
[
{?REDIS_SINGLE_HOST, ?REDIS_SINGLE_PORT},
{?REDIS_SENTINEL_HOST, ?REDIS_SENTINEL_PORT}
]
)
of
true -> true ->
ok = emqx_common_test_helpers:start_apps([emqx_conf]), ok = emqx_common_test_helpers:start_apps([emqx_conf]),
ok = emqx_connector_test_helpers:start_apps([emqx_resource, emqx_connector]), ok = emqx_connector_test_helpers:start_apps([emqx_resource, emqx_connector]),
@ -141,20 +150,35 @@ redis_config_cluster() ->
redis_config_sentinel() -> redis_config_sentinel() ->
redis_config_base("sentinel", "servers"). redis_config_base("sentinel", "servers").
-define(REDIS_CONFIG_BASE(MaybeSentinel),
"" ++
"\n" ++
" auto_reconnect = true\n" ++
" database = 1\n" ++
" pool_size = 8\n" ++
" redis_type = ~s\n" ++
MaybeSentinel ++
" password = public\n" ++
" ~s = \"~s:~b\"\n" ++
" " ++
""
).
redis_config_base(Type, ServerKey) -> redis_config_base(Type, ServerKey) ->
case Type of
"sentinel" ->
Host = ?REDIS_SENTINEL_HOST,
Port = ?REDIS_SENTINEL_PORT,
MaybeSentinel = " sentinel = mymaster\n";
_ ->
Host = ?REDIS_SINGLE_HOST,
Port = ?REDIS_SINGLE_PORT,
MaybeSentinel = ""
end,
RawConfig = list_to_binary( RawConfig = list_to_binary(
io_lib:format( io_lib:format(
"" ?REDIS_CONFIG_BASE(MaybeSentinel),
"\n" [Type, ServerKey, Host, Port]
" auto_reconnect = true\n"
" database = 1\n"
" pool_size = 8\n"
" redis_type = ~s\n"
" password = public\n"
" ~s = \"~s:~b\"\n"
" "
"",
[Type, ServerKey, ?REDIS_HOST, ?REDIS_PORT]
) )
), ),

View File

@ -1,7 +1,7 @@
dashboard { dashboard {
listeners.http { listeners.http {
bind: 18083 bind = 18083
} }
default_username: "admin" default_username = "admin"
default_password: "public" default_password = "public"
} }

View File

@ -2,7 +2,7 @@
{application, emqx_dashboard, [ {application, emqx_dashboard, [
{description, "EMQX Web Dashboard"}, {description, "EMQX Web Dashboard"},
% strict semver, bump manually! % strict semver, bump manually!
{vsn, "5.0.2"}, {vsn, "5.0.4"},
{modules, []}, {modules, []},
{registered, [emqx_dashboard_sup]}, {registered, [emqx_dashboard_sup]},
{applications, [kernel, stdlib, mnesia, minirest, emqx]}, {applications, [kernel, stdlib, mnesia, minirest, emqx]},

View File

@ -1,13 +1,7 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
%% Unless you know what you are doing, DO NOT edit manually!! %% Unless you know what you are doing, DO NOT edit manually!!
{VSN, {VSN,
[{"5.0.0", %% we should always restart dashboard to make sure api rules/swagger is updated
[{load_module,emqx_dashboard,brutal_purge,soft_purge,[]}, [{<<".*">>,[{restart_application, emqx_dashboard}]}],
{load_module,emqx_dashboard_api,brutal_purge,soft_purge,[]}, [{<<".*">>,[{restart_application, emqx_dashboard}]}]
{load_module,emqx_dashboard_token,brutal_purge,soft_purge,[]}]}, }.
{<<".*">>,[]}],
[{"5.0.0",
[{load_module,emqx_dashboard,brutal_purge,soft_purge,[]},
{load_module,emqx_dashboard_api,brutal_purge,soft_purge,[]},
{load_module,emqx_dashboard_token,brutal_purge,soft_purge,[]}]},
{<<".*">>,[]}]}.

View File

@ -92,7 +92,7 @@ start_listeners(Listeners) ->
case minirest:start(Name, RanchOptions, Minirest) of case minirest:start(Name, RanchOptions, Minirest) of
{ok, _} -> {ok, _} ->
?ULOG("Listener ~ts on ~ts started.~n", [ ?ULOG("Listener ~ts on ~ts started.~n", [
Name, emqx_listeners:format_addr(Bind) Name, emqx_listeners:format_bind(Bind)
]), ]),
Acc; Acc;
{error, _Reason} -> {error, _Reason} ->
@ -114,7 +114,7 @@ stop_listeners(Listeners) ->
case minirest:stop(Name) of case minirest:stop(Name) of
ok -> ok ->
?ULOG("Stop listener ~ts on ~ts successfully.~n", [ ?ULOG("Stop listener ~ts on ~ts successfully.~n", [
Name, emqx_listeners:format_addr(Port) Name, emqx_listeners:format_bind(Port)
]); ]);
{error, not_found} -> {error, not_found} ->
?SLOG(warning, #{msg => "stop_listener_failed", name => Name, port => Port}) ?SLOG(warning, #{msg => "stop_listener_failed", name => Name, port => Port})
@ -159,7 +159,7 @@ listeners(Listeners) ->
maps:get(enable, Conf) andalso maps:get(enable, Conf) andalso
begin begin
{Conf1, Bind} = ip_port(Conf), {Conf1, Bind} = ip_port(Conf),
{true, {listener_name(Protocol, Conf1), Protocol, Bind, ranch_opts(Conf1)}} {true, {listener_name(Protocol), Protocol, Bind, ranch_opts(Conf1)}}
end end
end, end,
maps:to_list(Listeners) maps:to_list(Listeners)
@ -208,19 +208,8 @@ ranch_opts(Options) ->
filter_false(_K, false, S) -> S; filter_false(_K, false, S) -> S;
filter_false(K, V, S) -> [{K, V} | S]. filter_false(K, V, S) -> [{K, V} | S].
listener_name(Protocol, #{port := Port, ip := IP}) -> listener_name(Protocol) ->
Name = list_to_atom(atom_to_list(Protocol) ++ ":dashboard").
"dashboard:" ++
atom_to_list(Protocol) ++ ":" ++
inet:ntoa(IP) ++ ":" ++
integer_to_list(Port),
list_to_atom(Name);
listener_name(Protocol, #{port := Port}) ->
Name =
"dashboard:" ++
atom_to_list(Protocol) ++ ":" ++
integer_to_list(Port),
list_to_atom(Name).
authorize(Req) -> authorize(Req) ->
case cowboy_req:parse_header(<<"authorization">>, Req) of case cowboy_req:parse_header(<<"authorization">>, Req) of

View File

@ -180,7 +180,6 @@ field(username_in_path) ->
{username, {username,
mk(binary(), #{ mk(binary(), #{
desc => ?DESC(username), desc => ?DESC(username),
'maxLength' => 100,
example => <<"admin">>, example => <<"admin">>,
in => path, in => path,
required => true required => true

View File

@ -32,14 +32,22 @@ admins(["add", Username, Password, Desc]) ->
{ok, _} -> {ok, _} ->
emqx_ctl:print("ok~n"); emqx_ctl:print("ok~n");
{error, Reason} -> {error, Reason} ->
emqx_ctl:print("Error: ~p~n", [Reason]) print_error(Reason)
end; end;
admins(["passwd", Username, Password]) -> admins(["passwd", Username, Password]) ->
Status = emqx_dashboard_admin:change_password(bin(Username), bin(Password)), case emqx_dashboard_admin:change_password(bin(Username), bin(Password)) of
emqx_ctl:print("~p~n", [Status]); {ok, _} ->
emqx_ctl:print("ok~n");
{error, Reason} ->
print_error(Reason)
end;
admins(["del", Username]) -> admins(["del", Username]) ->
Status = emqx_dashboard_admin:remove_user(bin(Username)), case emqx_dashboard_admin:remove_user(bin(Username)) of
emqx_ctl:print("~p~n", [Status]); {ok, _} ->
emqx_ctl:print("ok~n");
{error, Reason} ->
print_error(Reason)
end;
admins(_) -> admins(_) ->
emqx_ctl:usage( emqx_ctl:usage(
[ [
@ -53,3 +61,9 @@ unload() ->
emqx_ctl:unregister_command(admins). emqx_ctl:unregister_command(admins).
bin(S) -> iolist_to_binary(S). bin(S) -> iolist_to_binary(S).
print_error(Reason) when is_binary(Reason) ->
emqx_ctl:print("Error: ~s~n", [Reason]).
%% Maybe has more types of error, but there is only binary now. So close it for dialyzer.
% print_error(Reason) ->
% emqx_ctl:print("Error: ~p~n", [Reason]).

View File

@ -51,6 +51,7 @@ schema("/error_codes") ->
get => #{ get => #{
security => [], security => [],
description => <<"API Error Codes">>, description => <<"API Error Codes">>,
tags => [<<"Error codes">>],
responses => #{ responses => #{
200 => hoconsc:array(hoconsc:ref(?MODULE, error_code)) 200 => hoconsc:array(hoconsc:ref(?MODULE, error_code))
} }
@ -62,6 +63,7 @@ schema("/error_codes/:code") ->
get => #{ get => #{
security => [], security => [],
description => <<"API Error Codes">>, description => <<"API Error Codes">>,
tags => [<<"Error codes">>],
parameters => [ parameters => [
{code, {code,
hoconsc:mk(hoconsc:enum(emqx_dashboard_error_code:all()), #{ hoconsc:mk(hoconsc:enum(emqx_dashboard_error_code:all()), #{

View File

@ -38,7 +38,12 @@
]). ]).
is_ready(Timeout) -> is_ready(Timeout) ->
ready =:= gen_server:call(?MODULE, is_ready, Timeout). try
ready =:= gen_server:call(?MODULE, is_ready, Timeout)
catch
exit:{timeout, _} ->
false
end.
start_link() -> start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).

View File

@ -43,5 +43,6 @@ check_dispatch_ready(Env) ->
true; true;
true -> true ->
%% dashboard should always ready, if not, is_ready/1 will block until ready. %% dashboard should always ready, if not, is_ready/1 will block until ready.
emqx_dashboard_listener:is_ready(timer:seconds(15)) %% if not ready, dashboard will return 503.
emqx_dashboard_listener:is_ready(timer:seconds(20))
end. end.

View File

@ -115,13 +115,16 @@ granularity_adapter(List) ->
%% Get the current rate. Not the current sampler data. %% Get the current rate. Not the current sampler data.
current_rate() -> current_rate() ->
Fun = Fun =
fun(Node, Cluster) -> fun
(Node, Cluster) when is_map(Cluster) ->
case current_rate(Node) of case current_rate(Node) of
{ok, CurrentRate} -> {ok, CurrentRate} ->
merge_cluster_rate(CurrentRate, Cluster); merge_cluster_rate(CurrentRate, Cluster);
{badrpc, Reason} -> {badrpc, Reason} ->
{badrpc, {Node, Reason}} {badrpc, {Node, Reason}}
end end;
(_Node, Error) ->
Error
end, end,
case lists:foldl(Fun, #{}, mria_mnesia:cluster_nodes(running)) of case lists:foldl(Fun, #{}, mria_mnesia:cluster_nodes(running)) of
{badrpc, Reason} -> {badrpc, Reason} ->

View File

@ -37,7 +37,7 @@ schema("/monitor") ->
#{ #{
'operationId' => monitor, 'operationId' => monitor,
get => #{ get => #{
tags => [dashboard], tags => [<<"Metrics">>],
desc => <<"List monitor data.">>, desc => <<"List monitor data.">>,
parameters => [parameter_latest()], parameters => [parameter_latest()],
responses => #{ responses => #{
@ -50,7 +50,7 @@ schema("/monitor/nodes/:node") ->
#{ #{
'operationId' => monitor, 'operationId' => monitor,
get => #{ get => #{
tags => [dashboard], tags => [<<"Metrics">>],
desc => <<"List the monitor data on the node.">>, desc => <<"List the monitor data on the node.">>,
parameters => [parameter_node(), parameter_latest()], parameters => [parameter_node(), parameter_latest()],
responses => #{ responses => #{
@ -63,7 +63,7 @@ schema("/monitor_current") ->
#{ #{
'operationId' => monitor_current, 'operationId' => monitor_current,
get => #{ get => #{
tags => [dashboard], tags => [<<"Metrics">>],
desc => <<"Current status. Gauge and rate.">>, desc => <<"Current status. Gauge and rate.">>,
responses => #{ responses => #{
200 => hoconsc:mk(hoconsc:ref(sampler_current), #{}) 200 => hoconsc:mk(hoconsc:ref(sampler_current), #{})
@ -74,7 +74,7 @@ schema("/monitor_current/nodes/:node") ->
#{ #{
'operationId' => monitor_current, 'operationId' => monitor_current,
get => #{ get => #{
tags => [dashboard], tags => [<<"Metrics">>],
desc => <<"Node current status. Gauge and rate.">>, desc => <<"Node current status. Gauge and rate.">>,
parameters => [parameter_node()], parameters => [parameter_node()],
responses => #{ responses => #{

Some files were not shown because too many files have changed in this diff Show More