Merge pull request #8716 from emqx/merge_master_into_ee5.0

Merge master into ee5.0
This commit is contained in:
Xinyu Liu 2022-08-15 08:55:31 +08:00 committed by GitHub
commit 166e10b3c0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
103 changed files with 2419 additions and 1001 deletions

View File

@ -27,4 +27,3 @@ ok
+ POST `/counter` + POST `/counter`
计数器加一 计数器加一

View File

@ -3,7 +3,7 @@
{erl_opts, [debug_info]}. {erl_opts, [debug_info]}.
{deps, {deps,
[ [
{minirest, {git, "https://github.com/emqx/minirest.git", {tag, "0.3.6"}}} {minirest, {git, "https://github.com/emqx/minirest.git", {tag, "1.3.6"}}}
]}. ]}.
{shell, [ {shell, [

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, http_server, {application, http_server,
[{description, "An OTP application"}, [{description, "An HTTP server application"},
{vsn, "0.1.0"}, {vsn, "0.2.0"},
{registered, []}, {registered, []},
% {mod, {http_server_app, []}}, % {mod, {http_server_app, []}},
{modules, []}, {modules, []},

View File

@ -10,51 +10,107 @@
stop/0 stop/0
]). ]).
-rest_api(#{ -behavior(minirest_api).
name => get_counter,
method => 'GET',
path => "/counter",
func => get_counter,
descr => "Check counter"
}).
-rest_api(#{
name => add_counter,
method => 'POST',
path => "/counter",
func => add_counter,
descr => "Counter plus one"
}).
-export([ -export([api_spec/0]).
get_counter/2, -export([counter/2]).
add_counter/2
]). api_spec() ->
{
[counter_api()],
[]
}.
counter_api() ->
MetaData = #{
get => #{
description => "Get counter",
summary => "Get counter",
responses => #{
200 => #{
content => #{
'application/json' =>
#{
type => object,
properties => #{
code => #{type => integer, example => 0},
data => #{type => integer, example => 0}
}
}
}
}
}
},
post => #{
description => "Add counter",
summary => "Add counter",
'requestBody' => #{
content => #{
'application/json' => #{
schema =>
#{
type => object,
properties => #{
payload => #{type => string, example => <<"sample payload">>},
id => #{type => integer, example => 0}
}
}
}
}
},
responses => #{
200 => #{
content => #{
'application/json' =>
#{
type => object,
properties => #{
code => #{type => integer, example => 0}
}
}
}
}
}
}
},
{"/counter", MetaData, counter}.
counter(get, _Params) ->
V = ets:info(relup_test_message, size),
{200, #{<<"content-type">> => <<"text/plain">>}, #{<<"code">> => 0, <<"data">> => V}};
counter(post, #{body := Params}) ->
case Params of
#{<<"payload">> := _, <<"id">> := Id} ->
ets:insert(relup_test_message, {Id, maps:remove(<<"id">>, Params)}),
{200, #{<<"code">> => 0}};
_ ->
io:format("discarded: ~p\n", [Params]),
{200, #{<<"code">> => -1}}
end.
start() -> start() ->
application:ensure_all_started(minirest), application:ensure_all_started(minirest),
_ = spawn(fun ets_owner/0), _ = spawn(fun ets_owner/0),
Handlers = [{"/", minirest:handler(#{modules => [?MODULE]})}], RanchOptions = #{
Dispatch = [{"/[...]", minirest, Handlers}], max_connections => 512,
minirest:start_http(?MODULE, #{socket_opts => [inet, {port, 7077}]}, Dispatch). num_acceptors => 4,
socket_opts => [{send_timeout, 5000}, {port, 7077}, {backlog, 512}]
},
Minirest = #{
base_path => "",
modules => [?MODULE],
dispatch => [{"/[...]", ?MODULE, []}],
protocol => http,
ranch_options => RanchOptions,
middlewares => [cowboy_router, cowboy_handler]
},
Res = minirest:start(?MODULE, Minirest),
minirest:update_dispatch(?MODULE),
Res.
stop() -> stop() ->
ets:delete(relup_test_message), ets:delete(relup_test_message),
minirest:stop_http(?MODULE). minirest:stop(?MODULE).
get_counter(_Binding, _Params) ->
V = ets:info(relup_test_message, size),
return({ok, V}).
add_counter(_Binding, Params) ->
case lists:keymember(<<"payload">>, 1, Params) of
true ->
{value, {<<"id">>, ID}, Params1} = lists:keytake(<<"id">>, 1, Params),
ets:insert(relup_test_message, {ID, Params1});
_ ->
io:format("discarded: ~p\n", [Params]),
ok
end,
return().
ets_owner() -> ets_owner() ->
ets:new(relup_test_message, [named_table, public]), ets:new(relup_test_message, [named_table, public]),

View File

@ -194,7 +194,7 @@ jobs:
./emqx/bin/emqx start || cat emqx/log/erlang.log.1 ./emqx/bin/emqx start || cat emqx/log/erlang.log.1
ready='no' ready='no'
for i in {1..18}; do for i in {1..18}; do
if curl -fs 127.0.0.1:18083/api/v5/status > /dev/null; then if curl -fs 127.0.0.1:18083/status > /dev/null; then
ready='yes' ready='yes'
break break
fi fi

View File

@ -178,7 +178,7 @@ jobs:
./emqx/bin/emqx start || cat emqx/log/erlang.log.1 ./emqx/bin/emqx start || cat emqx/log/erlang.log.1
ready='no' ready='no'
for i in {1..30}; do for i in {1..30}; do
if curl -fs 127.0.0.1:18083/api/v5/status > /dev/null; then if curl -fs 127.0.0.1:18083/status > /dev/null; then
ready='yes' ready='yes'
break break
fi fi

View File

@ -145,6 +145,10 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
app_name: ${{ fromJson(needs.prepare.outputs.fast_ct_apps) }} app_name: ${{ fromJson(needs.prepare.outputs.fast_ct_apps) }}
profile:
- emqx
- emqx-enterprise
runs-on: aws-amd64 runs-on: aws-amd64
container: "ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-ubuntu20.04" container: "ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-ubuntu20.04"
defaults: defaults:
@ -163,13 +167,35 @@ jobs:
# produces <app-name>.coverdata # produces <app-name>.coverdata
- name: run common test - name: run common test
working-directory: source working-directory: source
env:
PROFILE: ${{ matrix.profile }}
WHICH_APP: ${{ matrix.app_name }}
run: | run: |
make ${{ matrix.app_name }}-ct if [ "$PROFILE" = 'emqx-enterprise' ]; then
- uses: actions/upload-artifact@v1 COMPILE_FLAGS="$(grep -R "EMQX_RELEASE_EDITION" "$WHICH_APP" | wc -l || true)"
if [ "$COMPILE_FLAGS" -gt 0 ]; then
# need to clean first because the default profile was
make clean
make "${WHICH_APP}-ct"
else
echo "skip_common_test_run_for_app ${WHICH_APP}-ct"
fi
else
case "$WHICH_APP" in
lib-ee/*)
echo "skip_opensource_edition_test_for_lib-ee"
;;
*)
make "${WHICH_APP}-ct"
;;
esac
fi
- uses: actions/upload-artifact@v3
with: with:
name: coverdata name: coverdata
path: source/_build/test/cover path: source/_build/test/cover
- uses: actions/upload-artifact@v1 if-no-files-found: warn # do not fail if no coverdata found
- uses: actions/upload-artifact@v3
if: failure() if: failure()
with: with:
name: logs_${{ matrix.otp_release }} name: logs_${{ matrix.otp_release }}

1
.gitignore vendored
View File

@ -68,3 +68,4 @@ apps/emqx/test/emqx_static_checks_data/master.bpapi
# rendered configurations # rendered configurations
*.conf.rendered *.conf.rendered
lux_logs/ lux_logs/
.ci/docker-compose-file/redis/*.log

View File

@ -1,3 +1,19 @@
# 5.0.5
## Bug fixes
* Allow changing the license type from key to file (and vice-versa). [#8598](https://github.com/emqx/emqx/pull/8598)
* Add back http connector config keys `max_retries` `retry_interval` as deprecated fields [#8672](https://github.com/emqx/emqx/issues/8672)
This caused upgrade failure in 5.0.4, because it would fail to boot on configs created from older version.
## Enhancements
* The license is now copied to all nodes in the cluster when it's reloaded. [#8598](https://github.com/emqx/emqx/pull/8598)
* Added a HTTP API to manage licenses. [#8610](https://github.com/emqx/emqx/pull/8610)
* Updated `/nodes` API node_status from `Running/Stopped` to `running/stopped`. [#8642](https://github.com/emqx/emqx/pull/8642)
* Improve handling of placeholder interpolation errors [#8635](https://github.com/emqx/emqx/pull/8635)
* Better logging on unknown object IDs. [#8670](https://github.com/emqx/emqx/pull/8670)
# 5.0.4 # 5.0.4
## Bug fixes ## Bug fixes
@ -34,6 +50,7 @@
* Improve authentication tracing. [#8554](https://github.com/emqx/emqx/pull/8554) * Improve authentication tracing. [#8554](https://github.com/emqx/emqx/pull/8554)
* Standardize the '/listeners' and `/gateway/<name>/listeners` API fields. * Standardize the '/listeners' and `/gateway/<name>/listeners` API fields.
It will introduce some incompatible updates, see [#8571](https://github.com/emqx/emqx/pull/8571) It will introduce some incompatible updates, see [#8571](https://github.com/emqx/emqx/pull/8571)
* Add option to perform GC on connection process after TLS/SSL handshake is performed. [#8637](https://github.com/emqx/emqx/pull/8637)
# 5.0.3 # 5.0.3

View File

@ -1,4 +1,3 @@
$(shell $(CURDIR)/scripts/git-hooks-init.sh)
REBAR = $(CURDIR)/rebar3 REBAR = $(CURDIR)/rebar3
BUILD = $(CURDIR)/build BUILD = $(CURDIR)/build
SCRIPTS = $(CURDIR)/scripts SCRIPTS = $(CURDIR)/scripts
@ -7,7 +6,8 @@ export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-17:1.13.4-24.2.1-1-d
export EMQX_DEFAULT_RUNNER = debian:11-slim export EMQX_DEFAULT_RUNNER = debian:11-slim
export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh) export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh)
export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh) export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh)
export EMQX_DASHBOARD_VERSION ?= v1.0.5 export EMQX_DASHBOARD_VERSION ?= v1.0.6
export EMQX_EE_DASHBOARD_VERSION ?= e1.0.0
export EMQX_REL_FORM ?= tgz export EMQX_REL_FORM ?= tgz
export QUICER_DOWNLOAD_FROM_RELEASE = 1 export QUICER_DOWNLOAD_FROM_RELEASE = 1
ifeq ($(OS),Windows_NT) ifeq ($(OS),Windows_NT)
@ -30,6 +30,13 @@ export REBAR_GIT_CLONE_OPTIONS += --depth=1
.PHONY: default .PHONY: default
default: $(REBAR) $(PROFILE) default: $(REBAR) $(PROFILE)
.PHONY: prepare
prepare: FORCE
@$(SCRIPTS)/git-hooks-init.sh # this is no longer needed since 5.0 but we keep it anyway
@$(SCRIPTS)/prepare-build-deps.sh
FORCE:
.PHONY: all .PHONY: all
all: $(REBAR) $(PROFILES) all: $(REBAR) $(PROFILES)
@ -53,11 +60,7 @@ ensure-mix-rebar: $(REBAR)
mix-deps-get: $(ELIXIR_COMMON_DEPS) mix-deps-get: $(ELIXIR_COMMON_DEPS)
@mix deps.get @mix deps.get
$(REBAR): ensure-rebar3 $(REBAR): prepare ensure-rebar3
.PHONY: get-dashboard
get-dashboard:
@$(SCRIPTS)/get-dashboard.sh
.PHONY: eunit .PHONY: eunit
eunit: $(REBAR) conf-segs eunit: $(REBAR) conf-segs
@ -75,13 +78,14 @@ ct: $(REBAR) conf-segs
static_checks: static_checks:
@$(REBAR) as check do dialyzer, xref, ct --suite apps/emqx/test/emqx_static_checks --readable $(CT_READABLE) @$(REBAR) as check do dialyzer, xref, ct --suite apps/emqx/test/emqx_static_checks --readable $(CT_READABLE)
APPS=$(shell $(CURDIR)/scripts/find-apps.sh) APPS=$(shell $(SCRIPTS)/find-apps.sh)
## app/name-ct targets are intended for local tests hence cover is not enabled ## app/name-ct targets are intended for local tests hence cover is not enabled
.PHONY: $(APPS:%=%-ct) .PHONY: $(APPS:%=%-ct)
define gen-app-ct-target define gen-app-ct-target
$1-ct: $(REBAR) conf-segs $1-ct: $(REBAR)
@ENABLE_COVER_COMPILE=1 $(REBAR) ct --name $(CT_NODE_NAME) -c -v --cover_export_name $(subst /,-,$1) --suite $(shell $(CURDIR)/scripts/find-suites.sh $1) @$(SCRIPTS)/pre-compile.sh $(PROFILE)
@ENABLE_COVER_COMPILE=1 $(REBAR) ct --name $(CT_NODE_NAME) -c -v --cover_export_name $(subst /,-,$1) --suite $(shell $(SCRIPTS)/find-suites.sh $1)
endef endef
$(foreach app,$(APPS),$(eval $(call gen-app-ct-target,$(app)))) $(foreach app,$(APPS),$(eval $(call gen-app-ct-target,$(app))))
@ -89,7 +93,7 @@ $(foreach app,$(APPS),$(eval $(call gen-app-ct-target,$(app))))
.PHONY: $(APPS:%=%-prop) .PHONY: $(APPS:%=%-prop)
define gen-app-prop-target define gen-app-prop-target
$1-prop: $1-prop:
$(REBAR) proper -d test/props -v -m $(shell $(CURDIR)/scripts/find-props.sh $1) $(REBAR) proper -d test/props -v -m $(shell $(SCRIPTS)/find-props.sh $1)
endef endef
$(foreach app,$(APPS),$(eval $(call gen-app-prop-target,$(app)))) $(foreach app,$(APPS),$(eval $(call gen-app-prop-target,$(app))))
@ -111,7 +115,8 @@ cover: $(REBAR)
coveralls: $(REBAR) coveralls: $(REBAR)
@ENABLE_COVER_COMPILE=1 $(REBAR) as test coveralls send @ENABLE_COVER_COMPILE=1 $(REBAR) as test coveralls send
COMMON_DEPS := $(REBAR) prepare-build-deps get-dashboard conf-segs COMMON_DEPS := $(REBAR)
ELIXIR_COMMON_DEPS := ensure-hex ensure-mix-rebar3 ensure-mix-rebar ELIXIR_COMMON_DEPS := ensure-hex ensure-mix-rebar3 ensure-mix-rebar
.PHONY: $(REL_PROFILES) .PHONY: $(REL_PROFILES)
@ -147,6 +152,7 @@ deps-all: $(REBAR) $(PROFILES:%=deps-%)
## which may not have the right credentials ## which may not have the right credentials
.PHONY: $(PROFILES:%=deps-%) .PHONY: $(PROFILES:%=deps-%)
$(PROFILES:%=deps-%): $(COMMON_DEPS) $(PROFILES:%=deps-%): $(COMMON_DEPS)
@$(SCRIPTS)/pre-compile.sh $(@:deps-%=%)
@$(REBAR) as $(@:deps-%=%) get-deps @$(REBAR) as $(@:deps-%=%) get-deps
@rm -f rebar.lock @rm -f rebar.lock
@ -167,7 +173,7 @@ $(REL_PROFILES:%=%-rel) $(PKG_PROFILES:%=%-rel): $(COMMON_DEPS)
.PHONY: $(REL_PROFILES:%=%-relup-downloads) .PHONY: $(REL_PROFILES:%=%-relup-downloads)
define download-relup-packages define download-relup-packages
$1-relup-downloads: $1-relup-downloads:
@if [ "$${EMQX_RELUP}" = "true" ]; then $(CURDIR)/scripts/relup-build/download-base-packages.sh $1; fi @if [ "$${EMQX_RELUP}" = "true" ]; then $(SCRIPTS)/relup-build/download-base-packages.sh $1; fi
endef endef
ALL_ZIPS = $(REL_PROFILES) ALL_ZIPS = $(REL_PROFILES)
$(foreach zt,$(ALL_ZIPS),$(eval $(call download-relup-packages,$(zt)))) $(foreach zt,$(ALL_ZIPS),$(eval $(call download-relup-packages,$(zt))))
@ -216,11 +222,8 @@ $(foreach zt,$(ALL_DOCKERS),$(eval $(call gen-docker-target,$(zt))))
.PHONY: .PHONY:
conf-segs: conf-segs:
@scripts/merge-config.escript @$(SCRIPTS)/merge-config.escript
@scripts/merge-i18n.escript @$(SCRIPTS)/merge-i18n.escript
prepare-build-deps:
@scripts/prepare-build-deps.sh
## elixir target is to create release packages using Elixir's Mix ## elixir target is to create release packages using Elixir's Mix
.PHONY: $(REL_PROFILES:%=%-elixir) $(PKG_PROFILES:%=%-elixir) .PHONY: $(REL_PROFILES:%=%-elixir) $(PKG_PROFILES:%=%-elixir)
@ -247,6 +250,6 @@ $(foreach tt,$(ALL_ELIXIR_TGZS),$(eval $(call gen-elixir-tgz-target,$(tt))))
.PHONY: fmt .PHONY: fmt
fmt: $(REBAR) fmt: $(REBAR)
@./scripts/erlfmt -w '{apps,lib-ee}/*/{src,include,test}/**/*.{erl,hrl,app.src}' @$(SCRIPTS)/erlfmt -w '{apps,lib-ee}/*/{src,include,test}/**/*.{erl,hrl,app.src}'
@./scripts/erlfmt -w 'rebar.config.erl' @$(SCRIPTS)/erlfmt -w 'rebar.config.erl'
@mix format @mix format

View File

@ -24,7 +24,7 @@ EMQX 自 2013 年在 GitHub 发布开源版本以来,获得了来自 50 多个
#### EMQX Cloud #### EMQX Cloud
使用 EMQX 最简单的方式是在 EMQX Cloud 上创建完全托管的 MQTT 服务。[免费试用 EMQX Cloud](https://www.emqx.com/zh/signup?continue=https%3A%2F%2Fcloud.emqx.com%2Fconsole%2F),无需绑定信用卡。 使用 EMQX 最简单的方式是在 EMQX Cloud 上创建完全托管的 MQTT 服务。[免费试用 EMQX Cloud](https://www.emqx.com/zh/signup?utm_source=github.com&utm_medium=referral&utm_campaign=emqx-readme-to-cloud&continue=https://cloud.emqx.com/console/deployments/0?oper=new),无需绑定信用卡。
#### 使用 Docker 运行 EMQX #### 使用 Docker 运行 EMQX

View File

@ -15,7 +15,7 @@ English | [简体中文](./README-CN.md) | [日本語](./README-JP.md) | [рус
EMQX is the most scalable and popular open-source MQTT broker with a high performance that connects 100M+ IoT devices in 1 cluster at 1ms latency. Move and process millions of MQTT messages per second. EMQX is the most scalable and popular open-source MQTT broker with a high performance that connects 100M+ IoT devices in 1 cluster at 1ms latency. Move and process millions of MQTT messages per second.
The EMQX v5.0 has been verified in [test scenarios](https://www.emqx.com/en/blog/reaching-100m-mqtt-connections-with-emqx-5-0) to scale to 100 million concurrent device connections, which is a critically important milestone for IoT designers. It also comes with plenty of exciting new features and huge performance improvements, including a more powerful rule engine, enhanced security management, Mria database extension, and much more to enhance the scalability of IoT applications. The EMQX v5.0 has been verified in [test scenarios](https://www.emqx.com/en/blog/reaching-100m-mqtt-connections-with-emqx-5-0) to scale to 100 million concurrent device connections, which is a critically important milestone for IoT designers. It also comes with plenty of exciting new features and huge performance improvements, including a more powerful [rule engine](https://www.emqx.com/en/solutions/iot-rule-engine), enhanced security management, Mria database extension, and much more to enhance the scalability of IoT applications.
During the last several years, EMQX has gained popularity among IoT companies and is used by more than 20,000 global users from over 50 countries, with more than 100 million IoT device connections supported worldwide. During the last several years, EMQX has gained popularity among IoT companies and is used by more than 20,000 global users from over 50 countries, with more than 100 million IoT device connections supported worldwide.
@ -25,7 +25,7 @@ For more information, please visit [EMQX homepage](https://www.emqx.io/).
#### EMQX Cloud #### EMQX Cloud
The simplest way to set up EMQX is to create a managed deployment with EMQX Cloud. You can [try EMQX Cloud for free](https://www.emqx.com/en/signup?continue=https%3A%2F%2Fcloud-intl.emqx.com%2Fconsole%2F), no credit card required. The simplest way to set up EMQX is to create a managed deployment with EMQX Cloud. You can [try EMQX Cloud for free](https://www.emqx.com/en/signup?utm_source=github.com&utm_medium=referral&utm_campaign=emqx-readme-to-cloud&continue=https://cloud-intl.emqx.com/console/deployments/0?oper=new), no credit card required.
#### Run EMQX using Docker #### Run EMQX using Docker

View File

@ -89,10 +89,10 @@ the check/consume will succeed, but it will be forced to wait for a short period
} }
} }
per_client { client {
desc { desc {
en: """The rate limit for each user of the bucket, this field is not required""" en: """The rate limit for each user of the bucket"""
zh: """对桶的每个使用者的速率控制设置,这个不是必须的""" zh: """对桶的每个使用者的速率控制设置"""
} }
label: { label: {
en: """Per Client""" en: """Per Client"""
@ -124,20 +124,6 @@ the check/consume will succeed, but it will be forced to wait for a short period
} }
} }
batch {
desc {
en: """The batch limiter.
This is used for EMQX internal batch operation
e.g. limit the retainer's deliver rate"""
zh: """批量操作速率控制器。
这是给 EMQX 内部的批量操作使用的,比如用来控制保留消息的派发速率"""
}
label: {
en: """Batch"""
zh: """批量操作"""
}
}
message_routing { message_routing {
desc { desc {
en: """The message routing limiter. en: """The message routing limiter.
@ -193,4 +179,12 @@ Once the limit is reached, the restricted client will be slow down even be hung
zh: """流入字节率""" zh: """流入字节率"""
} }
} }
internal {
desc {
en: """Limiter for EMQX internal app."""
zh: """EMQX 内部功能所用限制器。"""
}
}
} }

View File

@ -1115,6 +1115,7 @@ special characters are allowed.
en: """Dispatch strategy for shared subscription. en: """Dispatch strategy for shared subscription.
- `random`: dispatch the message to a random selected subscriber - `random`: dispatch the message to a random selected subscriber
- `round_robin`: select the subscribers in a round-robin manner - `round_robin`: select the subscribers in a round-robin manner
- `round_robin_per_group`: select the subscribers in round-robin fashion within each shared subscriber group
- `sticky`: always use the last selected subscriber to dispatch, - `sticky`: always use the last selected subscriber to dispatch,
until the subscriber disconnects. until the subscriber disconnects.
- `hash`: select the subscribers by the hash of `clientIds` - `hash`: select the subscribers by the hash of `clientIds`
@ -1124,6 +1125,7 @@ subscriber was not found, send to a random subscriber cluster-wide
cn: """共享订阅的分发策略名称。 cn: """共享订阅的分发策略名称。
- `random`: 随机选择一个组内成员; - `random`: 随机选择一个组内成员;
- `round_robin`: 循环选择下一个成员; - `round_robin`: 循环选择下一个成员;
- `round_robin_per_group`: 在共享组内循环选择下一个成员;
- `sticky`: 使用上一次选中的成员; - `sticky`: 使用上一次选中的成员;
- `hash`: 根据 ClientID 哈希映射到一个成员; - `hash`: 根据 ClientID 哈希映射到一个成员;
- `local`: 随机分发到节点本地成成员,如果本地成员不存在,则随机分发 - `local`: 随机分发到节点本地成成员,如果本地成员不存在,则随机分发
@ -1841,6 +1843,23 @@ Maximum time duration allowed for the handshake to complete
} }
} }
server_ssl_opts_schema_gc_after_handshake {
desc {
en: """
Memory usage tuning. If enabled, will immediately perform a garbage collection after
the TLS/SSL handshake.
"""
zh: """
内存使用调优。如果启用将在TLS/SSL握手完成后立即执行垃圾回收。
TLS/SSL握手建立后立即进行GC。
"""
}
label: {
en: "Perform GC after handshake"
zh: "握手后执行GC"
}
}
fields_listeners_tcp { fields_listeners_tcp {
desc { desc {
en: """ en: """
@ -1948,11 +1967,10 @@ Path to the secret key file.
fields_mqtt_quic_listener_idle_timeout { fields_mqtt_quic_listener_idle_timeout {
desc { desc {
en: """ en: """
Close transport-layer connections from the clients that have not sent MQTT CONNECT How long a connection can go idle before it is gracefully shut down. 0 to disable
message within this interval.
""" """
zh: """ zh: """
关闭在此间隔内未发送 MQTT CONNECT 消息的客户端的传输层连接。 一个连接在被关闭之前可以空闲多长时间。0表示禁用
""" """
} }
label: { label: {
@ -1961,6 +1979,36 @@ message within this interval.
} }
} }
fields_mqtt_quic_listener_handshake_idle_timeout {
desc {
en: """
How long a handshake can idle before it is discarded.
"""
zh: """
一个握手在被丢弃之前可以空闲多长时间。
"""
}
label: {
en: "Handshake Idle Timeout"
zh: "握手发呆超时时间"
}
}
fields_mqtt_quic_listener_keep_alive_interval {
desc {
en: """
How often to send PING frames to keep a connection alive. 0 means disabled.
"""
zh: """
发送 PING 帧的频率,以保活连接. 设为0禁用
"""
}
label: {
en: "Keep Alive Interval"
zh: "PING 保活频率"
}
}
base_listener_bind { base_listener_bind {
desc { desc {
en: """ en: """

View File

@ -32,7 +32,7 @@
%% `apps/emqx/src/bpapi/README.md' %% `apps/emqx/src/bpapi/README.md'
%% Community edition %% Community edition
-define(EMQX_RELEASE_CE, "5.0.4"). -define(EMQX_RELEASE_CE, "5.0.5-beta.1").
%% Enterprise edition %% Enterprise edition
-define(EMQX_RELEASE_EE, "5.0.0-beta.1"). -define(EMQX_RELEASE_EE, "5.0.0-beta.1").

View File

@ -26,10 +26,10 @@
{gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}}, {gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}},
{jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}}, {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}},
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}}, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}},
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.3"}}}, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.4"}}},
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.3"}}}, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.3"}}},
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.29.0"}}}, {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.30.0"}}},
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}}, {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}}, {recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},
{snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.0"}}} {snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.0"}}}

View File

@ -252,11 +252,12 @@ init(
<<>> -> undefined; <<>> -> undefined;
MP -> MP MP -> MP
end, end,
ListenerId = emqx_listeners:listener_id(Type, Listener),
ClientInfo = set_peercert_infos( ClientInfo = set_peercert_infos(
Peercert, Peercert,
#{ #{
zone => Zone, zone => Zone,
listener => emqx_listeners:listener_id(Type, Listener), listener => ListenerId,
protocol => Protocol, protocol => Protocol,
peerhost => PeerHost, peerhost => PeerHost,
sockport => SockPort, sockport => SockPort,
@ -278,7 +279,9 @@ init(
outbound => #{} outbound => #{}
}, },
auth_cache = #{}, auth_cache = #{},
quota = emqx_limiter_container:get_limiter_by_names([?LIMITER_ROUTING], LimiterCfg), quota = emqx_limiter_container:get_limiter_by_types(
ListenerId, [?LIMITER_ROUTING], LimiterCfg
),
timers = #{}, timers = #{},
conn_state = idle, conn_state = idle,
takeover = false, takeover = false,
@ -354,7 +357,7 @@ handle_in(?CONNECT_PACKET(ConnPkt) = Packet, Channel) ->
}, },
case authenticate(?CONNECT_PACKET(NConnPkt), NChannel1) of case authenticate(?CONNECT_PACKET(NConnPkt), NChannel1) of
{ok, Properties, NChannel2} -> {ok, Properties, NChannel2} ->
process_connect(Properties, ensure_connected(NChannel2)); process_connect(Properties, NChannel2);
{continue, Properties, NChannel2} -> {continue, Properties, NChannel2} ->
handle_out(auth, {?RC_CONTINUE_AUTHENTICATION, Properties}, NChannel2); handle_out(auth, {?RC_CONTINUE_AUTHENTICATION, Properties}, NChannel2);
{error, ReasonCode} -> {error, ReasonCode} ->
@ -378,7 +381,7 @@ handle_in(
{ok, NProperties, NChannel} -> {ok, NProperties, NChannel} ->
case ConnState of case ConnState of
connecting -> connecting ->
process_connect(NProperties, ensure_connected(NChannel)); process_connect(NProperties, NChannel);
_ -> _ ->
handle_out( handle_out(
auth, auth,
@ -608,7 +611,7 @@ process_connect(
case emqx_cm:open_session(CleanStart, ClientInfo, ConnInfo) of case emqx_cm:open_session(CleanStart, ClientInfo, ConnInfo) of
{ok, #{session := Session, present := false}} -> {ok, #{session := Session, present := false}} ->
NChannel = Channel#channel{session = Session}, NChannel = Channel#channel{session = Session},
handle_out(connack, {?RC_SUCCESS, sp(false), AckProps}, NChannel); handle_out(connack, {?RC_SUCCESS, sp(false), AckProps}, ensure_connected(NChannel));
{ok, #{session := Session, present := true, pendings := Pendings}} -> {ok, #{session := Session, present := true, pendings := Pendings}} ->
Pendings1 = lists:usort(lists:append(Pendings, emqx_misc:drain_deliver())), Pendings1 = lists:usort(lists:append(Pendings, emqx_misc:drain_deliver())),
NChannel = Channel#channel{ NChannel = Channel#channel{
@ -616,7 +619,7 @@ process_connect(
resuming = true, resuming = true,
pendings = Pendings1 pendings = Pendings1
}, },
handle_out(connack, {?RC_SUCCESS, sp(true), AckProps}, NChannel); handle_out(connack, {?RC_SUCCESS, sp(true), AckProps}, ensure_connected(NChannel));
{error, client_id_unavailable} -> {error, client_id_unavailable} ->
handle_out(connack, ?RC_CLIENT_IDENTIFIER_NOT_VALID, Channel); handle_out(connack, ?RC_CLIENT_IDENTIFIER_NOT_VALID, Channel);
{error, Reason} -> {error, Reason} ->
@ -1199,9 +1202,6 @@ handle_call(
disconnect_and_shutdown(takenover, AllPendings, Channel); disconnect_and_shutdown(takenover, AllPendings, Channel);
handle_call(list_authz_cache, Channel) -> handle_call(list_authz_cache, Channel) ->
{reply, emqx_authz_cache:list_authz_cache(), Channel}; {reply, emqx_authz_cache:list_authz_cache(), Channel};
handle_call({quota, Bucket}, #channel{quota = Quota} = Channel) ->
Quota2 = emqx_limiter_container:update_by_name(message_routing, Bucket, Quota),
reply(ok, Channel#channel{quota = Quota2});
handle_call( handle_call(
{keepalive, Interval}, {keepalive, Interval},
Channel = #channel{ Channel = #channel{

View File

@ -321,7 +321,7 @@ init_state(
}, },
LimiterTypes = [?LIMITER_BYTES_IN, ?LIMITER_MESSAGE_IN], LimiterTypes = [?LIMITER_BYTES_IN, ?LIMITER_MESSAGE_IN],
Limiter = emqx_limiter_container:get_limiter_by_names(LimiterTypes, LimiterCfg), Limiter = emqx_limiter_container:get_limiter_by_types(Listener, LimiterTypes, LimiterCfg),
FrameOpts = #{ FrameOpts = #{
strict_mode => emqx_config:get_zone_conf(Zone, [mqtt, strict_mode]), strict_mode => emqx_config:get_zone_conf(Zone, [mqtt, strict_mode]),
@ -672,12 +672,6 @@ handle_call(_From, info, State) ->
{reply, info(State), State}; {reply, info(State), State};
handle_call(_From, stats, State) -> handle_call(_From, stats, State) ->
{reply, stats(State), State}; {reply, stats(State), State};
handle_call(_From, {ratelimit, Changes}, State = #state{limiter = Limiter}) ->
Fun = fun({Type, Bucket}, Acc) ->
emqx_limiter_container:update_by_name(Type, Bucket, Acc)
end,
Limiter2 = lists:foldl(Fun, Limiter, Changes),
{reply, ok, State#state{limiter = Limiter2}};
handle_call(_From, Req, State = #state{channel = Channel}) -> handle_call(_From, Req, State = #state{channel = Channel}) ->
case emqx_channel:handle_call(Req, Channel) of case emqx_channel:handle_call(Req, Channel) of
{reply, Reply, NChannel} -> {reply, Reply, NChannel} ->
@ -714,8 +708,6 @@ handle_timeout(
TRef, TRef,
keepalive, keepalive,
State = #state{ State = #state{
transport = Transport,
socket = Socket,
channel = Channel channel = Channel
} }
) -> ) ->
@ -723,12 +715,9 @@ handle_timeout(
disconnected -> disconnected ->
{ok, State}; {ok, State};
_ -> _ ->
case Transport:getstat(Socket, [recv_oct]) of %% recv_pkt: valid MQTT message
{ok, [{recv_oct, RecvOct}]} -> RecvCnt = emqx_pd:get_counter(recv_pkt),
handle_timeout(TRef, {keepalive, RecvOct}, State); handle_timeout(TRef, {keepalive, RecvCnt}, State)
{error, Reason} ->
handle_info({sock_error, Reason}, State)
end
end; end;
handle_timeout(TRef, Msg, State) -> handle_timeout(TRef, Msg, State) ->
with_channel(handle_timeout, [TRef, Msg], State). with_channel(handle_timeout, [TRef, Msg], State).

View File

@ -19,12 +19,13 @@
-behaviour(esockd_generic_limiter). -behaviour(esockd_generic_limiter).
%% API %% API
-export([new_create_options/2, create/1, delete/1, consume/2]). -export([new_create_options/3, create/1, delete/1, consume/2]).
-type create_options() :: #{ -type create_options() :: #{
module := ?MODULE, module := ?MODULE,
id := emqx_limiter_schema:limiter_id(),
type := emqx_limiter_schema:limiter_type(), type := emqx_limiter_schema:limiter_type(),
bucket := emqx_limiter_schema:bucket_name() bucket := hocons:config()
}. }.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -32,15 +33,16 @@
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-spec new_create_options( -spec new_create_options(
emqx_limiter_schema:limiter_id(),
emqx_limiter_schema:limiter_type(), emqx_limiter_schema:limiter_type(),
emqx_limiter_schema:bucket_name() hocons:config()
) -> create_options(). ) -> create_options().
new_create_options(Type, BucketName) -> new_create_options(Id, Type, BucketCfg) ->
#{module => ?MODULE, type => Type, bucket => BucketName}. #{module => ?MODULE, id => Id, type => Type, bucket => BucketCfg}.
-spec create(create_options()) -> esockd_generic_limiter:limiter(). -spec create(create_options()) -> esockd_generic_limiter:limiter().
create(#{module := ?MODULE, type := Type, bucket := BucketName}) -> create(#{module := ?MODULE, id := Id, type := Type, bucket := BucketCfg}) ->
{ok, Limiter} = emqx_limiter_server:connect(Type, BucketName), {ok, Limiter} = emqx_limiter_server:connect(Id, Type, BucketCfg),
#{module => ?MODULE, name => Type, limiter => Limiter}. #{module => ?MODULE, name => Type, limiter => Limiter}.
delete(_GLimiter) -> delete(_GLimiter) ->

View File

@ -22,10 +22,8 @@
%% API %% API
-export([ -export([
new/0, new/1, new/2, get_limiter_by_types/3,
get_limiter_by_names/2,
add_new/3, add_new/3,
update_by_name/3,
set_retry_context/2, set_retry_context/2,
check/3, check/3,
retry/2, retry/2,
@ -48,10 +46,10 @@
}. }.
-type future() :: pos_integer(). -type future() :: pos_integer().
-type limiter_id() :: emqx_limiter_schema:limiter_id().
-type limiter_type() :: emqx_limiter_schema:limiter_type(). -type limiter_type() :: emqx_limiter_schema:limiter_type().
-type limiter() :: emqx_htb_limiter:limiter(). -type limiter() :: emqx_htb_limiter:limiter().
-type retry_context() :: emqx_htb_limiter:retry_context(). -type retry_context() :: emqx_htb_limiter:retry_context().
-type bucket_name() :: emqx_limiter_schema:bucket_name().
-type millisecond() :: non_neg_integer(). -type millisecond() :: non_neg_integer().
-type check_result() :: -type check_result() ::
{ok, container()} {ok, container()}
@ -64,46 +62,24 @@
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% API %% API
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-spec new() -> container().
new() ->
new([]).
%% @doc generate default data according to the type of limiter
-spec new(list(limiter_type())) -> container().
new(Types) ->
new(Types, #{}).
-spec new(
list(limiter_type()),
#{limiter_type() => emqx_limiter_schema:bucket_name()}
) -> container().
new(Types, Names) ->
get_limiter_by_names(Types, Names).
%% @doc generate a container %% @doc generate a container
%% according to the type of limiter and the bucket name configuration of the limiter %% according to the type of limiter and the bucket name configuration of the limiter
%% @end %% @end
-spec get_limiter_by_names( -spec get_limiter_by_types(
limiter_id() | {atom(), atom()},
list(limiter_type()), list(limiter_type()),
#{limiter_type() => emqx_limiter_schema:bucket_name()} #{limiter_type() => hocons:config()}
) -> container(). ) -> container().
get_limiter_by_names(Types, BucketNames) -> get_limiter_by_types({Type, Listener}, Types, BucketCfgs) ->
Id = emqx_listeners:listener_id(Type, Listener),
get_limiter_by_types(Id, Types, BucketCfgs);
get_limiter_by_types(Id, Types, BucketCfgs) ->
Init = fun(Type, Acc) -> Init = fun(Type, Acc) ->
{ok, Limiter} = emqx_limiter_server:connect(Type, BucketNames), {ok, Limiter} = emqx_limiter_server:connect(Id, Type, BucketCfgs),
add_new(Type, Limiter, Acc) add_new(Type, Limiter, Acc)
end, end,
lists:foldl(Init, #{retry_ctx => undefined}, Types). lists:foldl(Init, #{retry_ctx => undefined}, Types).
%% @doc add the specified type of limiter to the container
-spec update_by_name(
limiter_type(),
bucket_name() | #{limiter_type() => bucket_name()},
container()
) -> container().
update_by_name(Type, Buckets, Container) ->
{ok, Limiter} = emqx_limiter_server:connect(Type, Buckets),
add_new(Type, Limiter, Container).
-spec add_new(limiter_type(), limiter(), container()) -> container(). -spec add_new(limiter_type(), limiter(), container()) -> container().
add_new(Type, Limiter, Container) -> add_new(Type, Limiter, Container) ->
Container#{ Container#{

View File

@ -24,11 +24,9 @@
%% API %% API
-export([ -export([
start_link/0, start_link/0,
find_bucket/1,
find_bucket/2, find_bucket/2,
insert_bucket/2,
insert_bucket/3, insert_bucket/3,
make_path/2, delete_bucket/2,
post_config_update/5 post_config_update/5
]). ]).
@ -50,20 +48,19 @@
format_status/2 format_status/2
]). ]).
-export_type([path/0]). -type limiter_id() :: emqx_limiter_schema:limiter_id().
-type path() :: list(atom()).
-type limiter_type() :: emqx_limiter_schema:limiter_type(). -type limiter_type() :: emqx_limiter_schema:limiter_type().
-type bucket_name() :: emqx_limiter_schema:bucket_name(). -type uid() :: {limiter_id(), limiter_type()}.
%% counter record in ets table %% counter record in ets table
-record(bucket, { -record(bucket, {
path :: path(), uid :: uid(),
bucket :: bucket_ref() bucket :: bucket_ref()
}). }).
-type bucket_ref() :: emqx_limiter_bucket_ref:bucket_ref(). -type bucket_ref() :: emqx_limiter_bucket_ref:bucket_ref().
-define(UID(Id, Type), {Id, Type}).
-define(TAB, emqx_limiter_counters). -define(TAB, emqx_limiter_counters).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -85,14 +82,10 @@ restart_server(Type) ->
stop_server(Type) -> stop_server(Type) ->
emqx_limiter_server_sup:stop(Type). emqx_limiter_server_sup:stop(Type).
-spec find_bucket(limiter_type(), bucket_name()) -> -spec find_bucket(limiter_id(), limiter_type()) ->
{ok, bucket_ref()} | undefined. {ok, bucket_ref()} | undefined.
find_bucket(Type, BucketName) -> find_bucket(Id, Type) ->
find_bucket(make_path(Type, BucketName)). case ets:lookup(?TAB, ?UID(Id, Type)) of
-spec find_bucket(path()) -> {ok, bucket_ref()} | undefined.
find_bucket(Path) ->
case ets:lookup(?TAB, Path) of
[#bucket{bucket = Bucket}] -> [#bucket{bucket = Bucket}] ->
{ok, Bucket}; {ok, Bucket};
_ -> _ ->
@ -100,20 +93,19 @@ find_bucket(Path) ->
end. end.
-spec insert_bucket( -spec insert_bucket(
limiter_id(),
limiter_type(), limiter_type(),
bucket_name(),
bucket_ref() bucket_ref()
) -> boolean(). ) -> boolean().
insert_bucket(Type, BucketName, Bucket) -> insert_bucket(Id, Type, Bucket) ->
inner_insert_bucket(make_path(Type, BucketName), Bucket). ets:insert(
?TAB,
#bucket{uid = ?UID(Id, Type), bucket = Bucket}
).
-spec insert_bucket(path(), bucket_ref()) -> true. -spec delete_bucket(limiter_id(), limiter_type()) -> true.
insert_bucket(Path, Bucket) -> delete_bucket(Type, Id) ->
inner_insert_bucket(Path, Bucket). ets:delete(?TAB, ?UID(Id, Type)).
-spec make_path(limiter_type(), bucket_name()) -> path().
make_path(Type, BucketName) ->
[Type | BucketName].
post_config_update([limiter, Type], _Config, NewConf, _OldConf, _AppEnvs) -> post_config_update([limiter, Type], _Config, NewConf, _OldConf, _AppEnvs) ->
Config = maps:get(Type, NewConf), Config = maps:get(Type, NewConf),
@ -159,7 +151,7 @@ init([]) ->
set, set,
public, public,
named_table, named_table,
{keypos, #bucket.path}, {keypos, #bucket.uid},
{write_concurrency, true}, {write_concurrency, true},
{read_concurrency, true}, {read_concurrency, true},
{heir, erlang:whereis(emqx_limiter_sup), none} {heir, erlang:whereis(emqx_limiter_sup), none}
@ -266,9 +258,3 @@ format_status(_Opt, Status) ->
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Internal functions %% Internal functions
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-spec inner_insert_bucket(path(), bucket_ref()) -> true.
inner_insert_bucket(Path, Bucket) ->
ets:insert(
?TAB,
#bucket{path = Path, bucket = Bucket}
).

View File

@ -41,8 +41,10 @@
| message_in | message_in
| connection | connection
| message_routing | message_routing
| batch. %% internal limiter for unclassified resources
| internal.
-type limiter_id() :: atom().
-type bucket_name() :: atom(). -type bucket_name() :: atom().
-type rate() :: infinity | float(). -type rate() :: infinity | float().
-type burst_rate() :: 0 | float(). -type burst_rate() :: 0 | float().
@ -76,7 +78,7 @@
bucket_name/0 bucket_name/0
]). ]).
-export_type([limiter_type/0, bucket_path/0]). -export_type([limiter_id/0, limiter_type/0, bucket_path/0]).
-define(UNIT_TIME_IN_MS, 1000). -define(UNIT_TIME_IN_MS, 1000).
@ -87,52 +89,50 @@ roots() -> [limiter].
fields(limiter) -> fields(limiter) ->
[ [
{Type, {Type,
?HOCON(?R_REF(limiter_opts), #{ ?HOCON(?R_REF(node_opts), #{
desc => ?DESC(Type), desc => ?DESC(Type),
default => make_limiter_default(Type) default => #{}
})} })}
|| Type <- types() || Type <- types()
] ++
[
{client,
?HOCON(
?R_REF(client_fields),
#{
desc => ?DESC(client),
default => maps:from_list([
{erlang:atom_to_binary(Type), #{}}
|| Type <- types()
])
}
)}
]; ];
fields(limiter_opts) -> fields(node_opts) ->
[ [
{rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => "infinity"})}, {rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => "infinity"})},
{burst, {burst,
?HOCON(burst_rate(), #{ ?HOCON(burst_rate(), #{
desc => ?DESC(burst), desc => ?DESC(burst),
default => 0 default => 0
})}, })}
{bucket, ];
?HOCON( fields(client_fields) ->
?MAP("bucket_name", ?R_REF(bucket_opts)), [
#{ {Type,
desc => ?DESC(bucket_cfg), ?HOCON(?R_REF(client_opts), #{
default => #{<<"default">> => #{}}, desc => ?DESC(Type),
example => #{ default => #{}
<<"mybucket-name">> => #{ })}
<<"rate">> => <<"infinity">>, || Type <- types()
<<"capcity">> => <<"infinity">>,
<<"initial">> => <<"100">>,
<<"per_client">> => #{<<"rate">> => <<"infinity">>}
}
}
}
)}
]; ];
fields(bucket_opts) -> fields(bucket_opts) ->
[ [
{rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => "infinity"})}, {rate, ?HOCON(rate(), #{desc => ?DESC(rate), default => "infinity"})},
{capacity, ?HOCON(capacity(), #{desc => ?DESC(capacity), default => "infinity"})}, {capacity, ?HOCON(capacity(), #{desc => ?DESC(capacity), default => "infinity"})},
{initial, ?HOCON(initial(), #{default => "0", desc => ?DESC(initial)})}, {initial, ?HOCON(initial(), #{default => "0", desc => ?DESC(initial)})}
{per_client,
?HOCON(
?R_REF(client_bucket),
#{
default => #{},
desc => ?DESC(per_client)
}
)}
]; ];
fields(client_bucket) -> fields(client_opts) ->
[ [
{rate, ?HOCON(rate(), #{default => "infinity", desc => ?DESC(rate)})}, {rate, ?HOCON(rate(), #{default => "infinity", desc => ?DESC(rate)})},
{initial, ?HOCON(initial(), #{default => "0", desc => ?DESC(initial)})}, {initial, ?HOCON(initial(), #{default => "0", desc => ?DESC(initial)})},
@ -177,16 +177,30 @@ fields(client_bucket) ->
default => force default => force
} }
)} )}
]. ];
fields(listener_fields) ->
bucket_fields([bytes_in, message_in, connection, message_routing], listener_client_fields);
fields(listener_client_fields) ->
client_fields([bytes_in, message_in, connection, message_routing]);
fields(Type) ->
bucket_field(Type).
desc(limiter) -> desc(limiter) ->
"Settings for the rate limiter."; "Settings for the rate limiter.";
desc(limiter_opts) -> desc(node_opts) ->
"Settings for the limiter."; "Settings for the limiter of the node level.";
desc(bucket_opts) -> desc(bucket_opts) ->
"Settings for the bucket."; "Settings for the bucket.";
desc(client_bucket) -> desc(client_opts) ->
"Settings for the client bucket."; "Settings for the client in bucket level.";
desc(client_fields) ->
"Fields of the client level.";
desc(listener_fields) ->
"Fields of the listener.";
desc(listener_client_fields) ->
"Fields of the client level of the listener.";
desc(internal) ->
"Internal limiter.";
desc(_) -> desc(_) ->
undefined. undefined.
@ -202,7 +216,7 @@ get_bucket_cfg_path(Type, BucketName) ->
[limiter, Type, bucket, BucketName]. [limiter, Type, bucket, BucketName].
types() -> types() ->
[bytes_in, message_in, connection, message_routing, batch]. [bytes_in, message_in, connection, message_routing, internal].
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Internal functions %% Internal functions
@ -322,16 +336,44 @@ apply_unit("mb", Val) -> Val * ?KILOBYTE * ?KILOBYTE;
apply_unit("gb", Val) -> Val * ?KILOBYTE * ?KILOBYTE * ?KILOBYTE; apply_unit("gb", Val) -> Val * ?KILOBYTE * ?KILOBYTE * ?KILOBYTE;
apply_unit(Unit, _) -> throw("invalid unit:" ++ Unit). apply_unit(Unit, _) -> throw("invalid unit:" ++ Unit).
make_limiter_default(connection) -> bucket_field(Type) when is_atom(Type) ->
fields(bucket_opts) ++
[
{client,
?HOCON(
?R_REF(?MODULE, client_opts),
#{ #{
<<"rate">> => <<"1000/s">>, desc => ?DESC(client),
<<"bucket">> => #{ required => false
<<"default">> => }
)}
].
bucket_fields(Types, ClientRef) ->
[
{Type,
?HOCON(?R_REF(?MODULE, bucket_opts), #{
desc => ?DESC(?MODULE, Type),
required => false
})}
|| Type <- Types
] ++
[
{client,
?HOCON(
?R_REF(?MODULE, ClientRef),
#{ #{
<<"rate">> => <<"1000/s">>, desc => ?DESC(client),
<<"capacity">> => 1000 required => false
} }
} )}
}; ].
make_limiter_default(_) ->
#{}. client_fields(Types) ->
[
{Type,
?HOCON(?R_REF(client_opts), #{
desc => ?DESC(Type),
required => false
})}
|| Type <- Types
].

View File

@ -42,11 +42,13 @@
-export([ -export([
start_link/2, start_link/2,
connect/2, connect/3,
add_bucket/3,
del_bucket/2,
get_initial_val/1,
whereis/1, whereis/1,
info/1, info/1,
name/1, name/1,
get_initial_val/1,
restart/1, restart/1,
update_config/2 update_config/2
]). ]).
@ -73,16 +75,17 @@
-type state() :: #{ -type state() :: #{
type := limiter_type(), type := limiter_type(),
root := undefined | root(), root := root(),
buckets := buckets(), buckets := buckets(),
%% current counter to alloc %% current counter to alloc
counter := undefined | counters:counters_ref(), counter := counters:counters_ref(),
index := index() index := 0 | index()
}. }.
-type buckets() :: #{bucket_name() => bucket()}. -type buckets() :: #{bucket_name() => bucket()}.
-type limiter_type() :: emqx_limiter_schema:limiter_type(). -type limiter_type() :: emqx_limiter_schema:limiter_type().
-type bucket_name() :: emqx_limiter_schema:bucket_name(). -type bucket_name() :: emqx_limiter_schema:bucket_name().
-type limiter_id() :: emqx_limiter_schema:limiter_id().
-type rate() :: decimal(). -type rate() :: decimal().
-type flow() :: decimal(). -type flow() :: decimal().
-type capacity() :: decimal(). -type capacity() :: decimal().
@ -94,7 +97,7 @@
%% minimum coefficient for overloaded limiter %% minimum coefficient for overloaded limiter
-define(OVERLOAD_MIN_ALLOC, 0.3). -define(OVERLOAD_MIN_ALLOC, 0.3).
-define(CURRYING(X, F2), fun(Y) -> F2(X, Y) end). -define(COUNTER_SIZE, 8).
-export_type([index/0]). -export_type([index/0]).
-import(emqx_limiter_decimal, [add/2, sub/2, mul/2, put_to_counter/3]). -import(emqx_limiter_decimal, [add/2, sub/2, mul/2, put_to_counter/3]).
@ -105,39 +108,49 @@
%% API %% API
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-spec connect( -spec connect(
limiter_id(),
limiter_type(), limiter_type(),
bucket_name() | #{limiter_type() => bucket_name() | undefined} bucket_name() | #{limiter_type() => bucket_name() | undefined}
) -> ) ->
{ok, emqx_htb_limiter:limiter()} | {error, _}. {ok, emqx_htb_limiter:limiter()} | {error, _}.
%% If no bucket path is set in config, there will be no limit %% If no bucket path is set in config, there will be no limit
connect(_Type, undefined) -> connect(_Id, _Type, undefined) ->
{ok, emqx_htb_limiter:make_infinity_limiter()}; {ok, emqx_htb_limiter:make_infinity_limiter()};
connect(Type, BucketName) when is_atom(BucketName) -> connect(Id, Type, Cfg) ->
case get_bucket_cfg(Type, BucketName) of case find_limiter_cfg(Type, Cfg) of
undefined -> {undefined, _} ->
?SLOG(error, #{msg => "bucket_config_not_found", type => Type, bucket => BucketName}), {ok, emqx_htb_limiter:make_infinity_limiter()};
{error, config_not_found}; {
#{ #{
rate := BucketRate, rate := BucketRate,
capacity := BucketSize, capacity := BucketSize
per_client := #{rate := CliRate, capacity := CliSize} = Cfg },
#{rate := CliRate, capacity := CliSize} = ClientCfg
} -> } ->
case emqx_limiter_manager:find_bucket(Type, BucketName) of case emqx_limiter_manager:find_bucket(Id, Type) of
{ok, Bucket} -> {ok, Bucket} ->
{ok, {ok,
if if
CliRate < BucketRate orelse CliSize < BucketSize -> CliRate < BucketRate orelse CliSize < BucketSize ->
emqx_htb_limiter:make_token_bucket_limiter(Cfg, Bucket); emqx_htb_limiter:make_token_bucket_limiter(ClientCfg, Bucket);
true -> true ->
emqx_htb_limiter:make_ref_limiter(Cfg, Bucket) emqx_htb_limiter:make_ref_limiter(ClientCfg, Bucket)
end}; end};
undefined -> undefined ->
?SLOG(error, #{msg => "bucket_not_found", type => Type, bucket => BucketName}), ?SLOG(error, #{msg => "bucket_not_found", type => Type, id => Id}),
{error, invalid_bucket} {error, invalid_bucket}
end end
end; end.
connect(Type, Paths) ->
connect(Type, maps:get(Type, Paths, undefined)). -spec add_bucket(limiter_id(), limiter_type(), hocons:config() | undefined) -> ok.
add_bucket(_Id, _Type, undefine) ->
ok;
add_bucket(Id, Type, Cfg) ->
?CALL(Type, {add_bucket, Id, Cfg}).
-spec del_bucket(limiter_id(), limiter_type()) -> ok.
del_bucket(Id, Type) ->
?CALL(Type, {del_bucket, Id}).
-spec info(limiter_type()) -> state() | {error, _}. -spec info(limiter_type()) -> state() | {error, _}.
info(Type) -> info(Type) ->
@ -213,6 +226,12 @@ handle_call(restart, _From, #{type := Type}) ->
handle_call({update_config, Type, Config}, _From, #{type := Type}) -> handle_call({update_config, Type, Config}, _From, #{type := Type}) ->
NewState = init_tree(Type, Config), NewState = init_tree(Type, Config),
{reply, ok, NewState}; {reply, ok, NewState};
handle_call({add_bucket, Id, Cfg}, _From, State) ->
NewState = do_add_bucket(Id, Cfg, State),
{reply, ok, NewState};
handle_call({del_bucket, Id}, _From, State) ->
NewState = do_del_bucket(Id, State),
{reply, ok, NewState};
handle_call(Req, _From, State) -> handle_call(Req, _From, State) ->
?SLOG(error, #{msg => "unexpected_call", call => Req}), ?SLOG(error, #{msg => "unexpected_call", call => Req}),
{reply, ignored, State}. {reply, ignored, State}.
@ -456,24 +475,14 @@ init_tree(Type) when is_atom(Type) ->
Cfg = emqx:get_config([limiter, Type]), Cfg = emqx:get_config([limiter, Type]),
init_tree(Type, Cfg). init_tree(Type, Cfg).
init_tree(Type, #{bucket := Buckets} = Cfg) -> init_tree(Type, Cfg) ->
State = #{ #{
type => Type, type => Type,
root => undefined, root => make_root(Cfg),
counter => undefined, counter => counters:new(?COUNTER_SIZE, [write_concurrency]),
index => 1, index => 0,
buckets => #{} buckets => #{}
}, }.
Root = make_root(Cfg),
{CounterNum, DelayBuckets} = make_bucket(maps:to_list(Buckets), Type, Cfg, 1, []),
State2 = State#{
root := Root,
counter := counters:new(CounterNum, [write_concurrency])
},
lists:foldl(fun(F, Acc) -> F(Acc) end, State2, DelayBuckets).
-spec make_root(hocons:confg()) -> root(). -spec make_root(hocons:confg()) -> root().
make_root(#{rate := Rate, burst := Burst}) -> make_root(#{rate := Rate, burst := Burst}) ->
@ -484,79 +493,50 @@ make_root(#{rate := Rate, burst := Burst}) ->
produced => 0.0 produced => 0.0
}. }.
make_bucket([{Name, Conf} | T], Type, GlobalCfg, CounterNum, DelayBuckets) -> do_add_bucket(Id, #{rate := Rate, capacity := Capacity} = Cfg, #{buckets := Buckets} = State) ->
Path = emqx_limiter_manager:make_path(Type, Name), case maps:get(Id, Buckets, undefined) of
Rate = get_counter_rate(Conf, GlobalCfg), undefined ->
#{capacity := Capacity} = Conf, make_bucket(Id, Cfg, State);
Initial = get_initial_val(Conf), Bucket ->
CounterNum2 = CounterNum + 1, Bucket2 = Bucket#{rate := Rate, capacity := Capacity},
InitFun = fun(#{name := BucketName} = Bucket, #{buckets := Buckets} = State) -> State#{buckets := Buckets#{Id := Bucket2}}
{Counter, Idx, State2} = alloc_counter(Path, Rate, Initial, State), end.
Bucket2 = Bucket#{counter := Counter, index := Idx},
State2#{buckets := Buckets#{BucketName => Bucket2}}
end,
make_bucket(Id, Cfg, #{index := ?COUNTER_SIZE} = State) ->
make_bucket(Id, Cfg, State#{
counter => counters:new(?COUNTER_SIZE, [write_concurrency]),
index => 0
});
make_bucket(
Id,
#{rate := Rate, capacity := Capacity} = Cfg,
#{type := Type, counter := Counter, index := Index, buckets := Buckets} = State
) ->
NewIndex = Index + 1,
Initial = get_initial_val(Cfg),
Bucket = #{ Bucket = #{
name => Name, name => Id,
rate => Rate, rate => Rate,
obtained => Initial, obtained => Initial,
correction => 0, correction => 0,
capacity => Capacity, capacity => Capacity,
counter => undefined, counter => Counter,
index => undefined index => NewIndex
}, },
_ = put_to_counter(Counter, NewIndex, Initial),
Ref = emqx_limiter_bucket_ref:new(Counter, NewIndex, Rate),
emqx_limiter_manager:insert_bucket(Id, Type, Ref),
State#{buckets := Buckets#{Id => Bucket}, index := NewIndex}.
DelayInit = ?CURRYING(Bucket, InitFun), do_del_bucket(Id, #{type := Type, buckets := Buckets} = State) ->
case maps:get(Id, Buckets, undefined) of
make_bucket( undefined ->
T, State;
Type,
GlobalCfg,
CounterNum2,
[DelayInit | DelayBuckets]
);
make_bucket([], _Type, _Global, CounterNum, DelayBuckets) ->
{CounterNum, DelayBuckets}.
-spec alloc_counter(emqx_limiter_manager:path(), rate(), capacity(), state()) ->
{counters:counters_ref(), pos_integer(), state()}.
alloc_counter(
Path,
Rate,
Initial,
#{counter := Counter, index := Index} = State
) ->
case emqx_limiter_manager:find_bucket(Path) of
{ok, #{
counter := ECounter,
index := EIndex
}} when ECounter =/= undefined ->
init_counter(Path, ECounter, EIndex, Rate, Initial, State);
_ -> _ ->
init_counter( emqx_limiter_manager:delete_bucket(Id, Type),
Path, State#{buckets := maps:remove(Id, Buckets)}
Counter,
Index,
Rate,
Initial,
State#{index := Index + 1}
)
end. end.
init_counter(Path, Counter, Index, Rate, Initial, State) ->
_ = put_to_counter(Counter, Index, Initial),
Ref = emqx_limiter_bucket_ref:new(Counter, Index, Rate),
emqx_limiter_manager:insert_bucket(Path, Ref),
{Counter, Index, State}.
%% @doc find first limited node
get_counter_rate(#{rate := Rate}, _GlobalCfg) when Rate =/= infinity ->
Rate;
get_counter_rate(_Cfg, #{rate := Rate}) when Rate =/= infinity ->
Rate;
get_counter_rate(_Cfg, _GlobalCfg) ->
emqx_limiter_schema:infinity_value().
-spec get_initial_val(hocons:config()) -> decimal(). -spec get_initial_val(hocons:config()) -> decimal().
get_initial_val( get_initial_val(
#{ #{
@ -587,8 +567,21 @@ call(Type, Msg) ->
gen_server:call(Pid, Msg) gen_server:call(Pid, Msg)
end. end.
-spec get_bucket_cfg(limiter_type(), bucket_name()) -> find_limiter_cfg(Type, #{rate := _} = Cfg) ->
undefined | limiter_not_started | hocons:config(). {Cfg, find_client_cfg(Type, maps:get(client, Cfg, undefined))};
get_bucket_cfg(Type, Bucket) -> find_limiter_cfg(Type, Cfg) ->
Path = emqx_limiter_schema:get_bucket_cfg_path(Type, Bucket), {
emqx:get_config(Path, undefined). maps:get(Type, Cfg, undefined),
find_client_cfg(Type, emqx_map_lib:deep_get([client, Type], Cfg, undefined))
}.
find_client_cfg(Type, BucketCfg) ->
NodeCfg = emqx:get_config([limiter, client, Type], undefined),
merge_client_cfg(NodeCfg, BucketCfg).
merge_client_cfg(undefined, BucketCfg) ->
BucketCfg;
merge_client_cfg(NodeCfg, undefined) ->
NodeCfg;
merge_client_cfg(NodeCfg, BucketCfg) ->
maps:merge(NodeCfg, BucketCfg).

View File

@ -279,12 +279,19 @@ stop_listener(Type, ListenerName, #{bind := Bind} = Conf) ->
end. end.
-spec do_stop_listener(atom(), atom(), map()) -> ok | {error, term()}. -spec do_stop_listener(atom(), atom(), map()) -> ok | {error, term()}.
do_stop_listener(Type, ListenerName, #{bind := ListenOn}) when Type == tcp; Type == ssl ->
esockd:close(listener_id(Type, ListenerName), ListenOn); do_stop_listener(Type, ListenerName, #{bind := ListenOn} = Conf) when Type == tcp; Type == ssl ->
do_stop_listener(Type, ListenerName, _Conf) when Type == ws; Type == wss -> Id = listener_id(Type, ListenerName),
cowboy:stop_listener(listener_id(Type, ListenerName)); del_limiter_bucket(Id, Conf),
do_stop_listener(quic, ListenerName, _Conf) -> esockd:close(Id, ListenOn);
quicer:stop_listener(listener_id(quic, ListenerName)). do_stop_listener(Type, ListenerName, Conf) when Type == ws; Type == wss ->
Id = listener_id(Type, ListenerName),
del_limiter_bucket(Id, Conf),
cowboy:stop_listener(Id);
do_stop_listener(quic, ListenerName, Conf) ->
Id = listener_id(quic, ListenerName),
del_limiter_bucket(Id, Conf),
quicer:stop_listener(Id).
-ifndef(TEST). -ifndef(TEST).
console_print(Fmt, Args) -> ?ULOG(Fmt, Args). console_print(Fmt, Args) -> ?ULOG(Fmt, Args).
@ -300,10 +307,12 @@ do_start_listener(_Type, _ListenerName, #{enabled := false}) ->
do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts) when do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts) when
Type == tcp; Type == ssl Type == tcp; Type == ssl
-> ->
Id = listener_id(Type, ListenerName),
add_limiter_bucket(Id, Opts),
esockd:open( esockd:open(
listener_id(Type, ListenerName), Id,
ListenOn, ListenOn,
merge_default(esockd_opts(Type, Opts)), merge_default(esockd_opts(Id, Type, Opts)),
{emqx_connection, start_link, [ {emqx_connection, start_link, [
#{ #{
listener => {Type, ListenerName}, listener => {Type, ListenerName},
@ -318,6 +327,7 @@ do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts) when
Type == ws; Type == wss Type == ws; Type == wss
-> ->
Id = listener_id(Type, ListenerName), Id = listener_id(Type, ListenerName),
add_limiter_bucket(Id, Opts),
RanchOpts = ranch_opts(Type, ListenOn, Opts), RanchOpts = ranch_opts(Type, ListenOn, Opts),
WsOpts = ws_opts(Type, ListenerName, Opts), WsOpts = ws_opts(Type, ListenerName, Opts),
case Type of case Type of
@ -325,23 +335,31 @@ do_start_listener(Type, ListenerName, #{bind := ListenOn} = Opts) when
wss -> cowboy:start_tls(Id, RanchOpts, WsOpts) wss -> cowboy:start_tls(Id, RanchOpts, WsOpts)
end; end;
%% Start MQTT/QUIC listener %% Start MQTT/QUIC listener
do_start_listener(quic, ListenerName, #{bind := ListenOn} = Opts) -> do_start_listener(quic, ListenerName, #{bind := Bind} = Opts) ->
ListenOn =
case Bind of
{Addr, Port} when tuple_size(Addr) == 4 ->
%% IPv4
lists:flatten(io_lib:format("~ts:~w", [inet:ntoa(Addr), Port]));
{Addr, Port} when tuple_size(Addr) == 8 ->
%% IPv6
lists:flatten(io_lib:format("[~ts]:~w", [inet:ntoa(Addr), Port]));
Port ->
Port
end,
case [A || {quicer, _, _} = A <- application:which_applications()] of case [A || {quicer, _, _} = A <- application:which_applications()] of
[_] -> [_] ->
DefAcceptors = erlang:system_info(schedulers_online) * 8, DefAcceptors = erlang:system_info(schedulers_online) * 8,
IdleTimeout = timer:seconds(maps:get(idle_timeout, Opts)),
ListenOpts = [ ListenOpts = [
{cert, maps:get(certfile, Opts)}, {cert, maps:get(certfile, Opts)},
{key, maps:get(keyfile, Opts)}, {key, maps:get(keyfile, Opts)},
{alpn, ["mqtt"]}, {alpn, ["mqtt"]},
{conn_acceptors, lists:max([DefAcceptors, maps:get(acceptors, Opts, 0)])}, {conn_acceptors, lists:max([DefAcceptors, maps:get(acceptors, Opts, 0)])},
{keep_alive_interval_ms, ceil(IdleTimeout / 3)}, {keep_alive_interval_ms, maps:get(keep_alive_interval, Opts, 0)},
{server_resumption_level, 2}, {idle_timeout_ms, maps:get(idle_timeout, Opts, 0)},
{idle_timeout_ms, {handshake_idle_timeout_ms, maps:get(handshake_idle_timeout, Opts, 10000)},
lists:max([ {server_resumption_level, 2}
emqx_config:get_zone_conf(zone(Opts), [mqtt, idle_timeout]) * 3,
IdleTimeout
])}
], ],
ConnectionOpts = #{ ConnectionOpts = #{
conn_callback => emqx_quic_connection, conn_callback => emqx_quic_connection,
@ -352,9 +370,11 @@ do_start_listener(quic, ListenerName, #{bind := ListenOn} = Opts) ->
limiter => limiter(Opts) limiter => limiter(Opts)
}, },
StreamOpts = [{stream_callback, emqx_quic_stream}], StreamOpts = [{stream_callback, emqx_quic_stream}],
Id = listener_id(quic, ListenerName),
add_limiter_bucket(Id, Opts),
quicer:start_listener( quicer:start_listener(
listener_id(quic, ListenerName), Id,
port(ListenOn), ListenOn,
{ListenOpts, ConnectionOpts, StreamOpts} {ListenOpts, ConnectionOpts, StreamOpts}
); );
[] -> [] ->
@ -410,16 +430,18 @@ post_config_update([listeners, Type, Name], {action, _Action, _}, NewConf, OldCo
post_config_update(_Path, _Request, _NewConf, _OldConf, _AppEnvs) -> post_config_update(_Path, _Request, _NewConf, _OldConf, _AppEnvs) ->
ok. ok.
esockd_opts(Type, Opts0) -> esockd_opts(ListenerId, Type, Opts0) ->
Opts1 = maps:with([acceptors, max_connections, proxy_protocol, proxy_protocol_timeout], Opts0), Opts1 = maps:with([acceptors, max_connections, proxy_protocol, proxy_protocol_timeout], Opts0),
Limiter = limiter(Opts0), Limiter = limiter(Opts0),
Opts2 = Opts2 =
case maps:get(connection, Limiter, undefined) of case maps:get(connection, Limiter, undefined) of
undefined -> undefined ->
Opts1; Opts1;
BucketName -> BucketCfg ->
Opts1#{ Opts1#{
limiter => emqx_esockd_htb_limiter:new_create_options(connection, BucketName) limiter => emqx_esockd_htb_limiter:new_create_options(
ListenerId, connection, BucketCfg
)
} }
end, end,
Opts3 = Opts2#{ Opts3 = Opts2#{
@ -468,9 +490,6 @@ ip_port(Port) when is_integer(Port) ->
ip_port({Addr, Port}) -> ip_port({Addr, Port}) ->
[{ip, Addr}, {port, Port}]. [{ip, Addr}, {port, Port}].
port(Port) when is_integer(Port) -> Port;
port({_Addr, Port}) when is_integer(Port) -> Port.
esockd_access_rules(StrRules) -> esockd_access_rules(StrRules) ->
Access = fun(S) -> Access = fun(S) ->
[A, CIDR] = string:tokens(S, " "), [A, CIDR] = string:tokens(S, " "),
@ -539,6 +558,27 @@ zone(Opts) ->
limiter(Opts) -> limiter(Opts) ->
maps:get(limiter, Opts, #{}). maps:get(limiter, Opts, #{}).
add_limiter_bucket(Id, #{limiter := Limiter}) ->
maps:fold(
fun(Type, Cfg, _) ->
emqx_limiter_server:add_bucket(Id, Type, Cfg)
end,
ok,
maps:without([client], Limiter)
);
add_limiter_bucket(_Id, _Cfg) ->
ok.
del_limiter_bucket(Id, #{limiter := Limiters}) ->
lists:foreach(
fun(Type) ->
emqx_limiter_server:del_bucket(Id, Type)
end,
maps:keys(Limiters)
);
del_limiter_bucket(_Id, _Cfg) ->
ok.
enable_authn(Opts) -> enable_authn(Opts) ->
maps:get(enable_authn, Opts, true). maps:get(enable_authn, Opts, true).

View File

@ -867,11 +867,27 @@ fields("mqtt_quic_listener") ->
{"ciphers", ciphers_schema(quic)}, {"ciphers", ciphers_schema(quic)},
{"idle_timeout", {"idle_timeout",
sc( sc(
duration(), duration_ms(),
#{ #{
default => "15s", default => "0",
desc => ?DESC(fields_mqtt_quic_listener_idle_timeout) desc => ?DESC(fields_mqtt_quic_listener_idle_timeout)
} }
)},
{"handshake_idle_timeout",
sc(
duration_ms(),
#{
default => "10s",
desc => ?DESC(fields_mqtt_quic_listener_handshake_idle_timeout)
}
)},
{"keep_alive_interval",
sc(
duration_ms(),
#{
default => 0,
desc => ?DESC(fields_mqtt_quic_listener_keep_alive_interval)
}
)} )}
] ++ base_listener(14567); ] ++ base_listener(14567);
fields("ws_opts") -> fields("ws_opts") ->
@ -905,7 +921,7 @@ fields("ws_opts") ->
duration(), duration(),
#{ #{
default => "7200s", default => "7200s",
desc => ?DESC(fields_mqtt_quic_listener_idle_timeout) desc => ?DESC(fields_ws_opts_idle_timeout)
} }
)}, )},
{"max_frame_size", {"max_frame_size",
@ -1160,7 +1176,15 @@ fields("broker") ->
)}, )},
{"shared_subscription_strategy", {"shared_subscription_strategy",
sc( sc(
hoconsc:enum([random, round_robin, sticky, local, hash_topic, hash_clientid]), hoconsc:enum([
random,
round_robin,
round_robin_per_group,
sticky,
local,
hash_topic,
hash_clientid
]),
#{ #{
default => round_robin, default => round_robin,
desc => ?DESC(broker_shared_subscription_strategy) desc => ?DESC(broker_shared_subscription_strategy)
@ -1200,7 +1224,15 @@ fields("shared_subscription_group") ->
[ [
{"strategy", {"strategy",
sc( sc(
hoconsc:enum([random, round_robin, sticky, local, hash_topic, hash_clientid]), hoconsc:enum([
random,
round_robin,
round_robin_per_group,
sticky,
local,
hash_topic,
hash_clientid
]),
#{ #{
default => random, default => random,
desc => ?DESC(shared_subscription_strategy_enum) desc => ?DESC(shared_subscription_strategy_enum)
@ -1619,10 +1651,15 @@ base_listener(Bind) ->
)}, )},
{"limiter", {"limiter",
sc( sc(
map("ratelimit_name", emqx_limiter_schema:bucket_name()), ?R_REF(
emqx_limiter_schema,
listener_fields
),
#{ #{
desc => ?DESC(base_listener_limiter), desc => ?DESC(base_listener_limiter),
default => #{<<"connection">> => <<"default">>} default => #{
<<"connection">> => #{<<"rate">> => <<"1000/s">>, <<"capacity">> => 1000}
}
} }
)}, )},
{"enable_authn", {"enable_authn",
@ -1948,6 +1985,14 @@ server_ssl_opts_schema(Defaults, IsRanchListener) ->
} }
)} )}
|| IsRanchListener || IsRanchListener
] ++
[
{"gc_after_handshake",
sc(boolean(), #{
default => false,
desc => ?DESC(server_ssl_opts_schema_gc_after_handshake)
})}
|| not IsRanchListener
] ]
]. ].

View File

@ -72,6 +72,7 @@
-type strategy() :: -type strategy() ::
random random
| round_robin | round_robin
| round_robin_per_group
| sticky | sticky
| local | local
%% same as hash_clientid, backward compatible %% same as hash_clientid, backward compatible
@ -81,6 +82,7 @@
-define(SERVER, ?MODULE). -define(SERVER, ?MODULE).
-define(TAB, emqx_shared_subscription). -define(TAB, emqx_shared_subscription).
-define(SHARED_SUBS_ROUND_ROBIN_COUNTER, emqx_shared_subscriber_round_robin_counter).
-define(SHARED_SUBS, emqx_shared_subscriber). -define(SHARED_SUBS, emqx_shared_subscriber).
-define(ALIVE_SUBS, emqx_alive_shared_subscribers). -define(ALIVE_SUBS, emqx_alive_shared_subscribers).
-define(SHARED_SUB_QOS1_DISPATCH_TIMEOUT_SECONDS, 5). -define(SHARED_SUB_QOS1_DISPATCH_TIMEOUT_SECONDS, 5).
@ -315,7 +317,14 @@ do_pick_subscriber(Group, Topic, round_robin, _ClientId, _SourceTopic, Count) ->
N -> (N + 1) rem Count N -> (N + 1) rem Count
end, end,
_ = erlang:put({shared_sub_round_robin, Group, Topic}, Rem), _ = erlang:put({shared_sub_round_robin, Group, Topic}, Rem),
Rem + 1. Rem + 1;
do_pick_subscriber(Group, Topic, round_robin_per_group, _ClientId, _SourceTopic, Count) ->
%% reset the counter to 1 if counter > subscriber count to avoid the counter to grow larger
%% than the current subscriber count.
%% if no counter for the given group topic exists - due to a configuration change - create a new one starting at 0
ets:update_counter(?SHARED_SUBS_ROUND_ROBIN_COUNTER, {Group, Topic}, {2, 1, Count, 1}, {
{Group, Topic}, 0
}).
subscribers(Group, Topic) -> subscribers(Group, Topic) ->
ets:select(?TAB, [{{emqx_shared_subscription, Group, Topic, '$1'}, [], ['$1']}]). ets:select(?TAB, [{{emqx_shared_subscription, Group, Topic, '$1'}, [], ['$1']}]).
@ -330,6 +339,7 @@ init([]) ->
{atomic, PMon} = mria:transaction(?SHARED_SUB_SHARD, fun init_monitors/0), {atomic, PMon} = mria:transaction(?SHARED_SUB_SHARD, fun init_monitors/0),
ok = emqx_tables:new(?SHARED_SUBS, [protected, bag]), ok = emqx_tables:new(?SHARED_SUBS, [protected, bag]),
ok = emqx_tables:new(?ALIVE_SUBS, [protected, set, {read_concurrency, true}]), ok = emqx_tables:new(?ALIVE_SUBS, [protected, set, {read_concurrency, true}]),
ok = emqx_tables:new(?SHARED_SUBS_ROUND_ROBIN_COUNTER, [public, set, {write_concurrency, true}]),
{ok, update_stats(#state{pmon = PMon})}. {ok, update_stats(#state{pmon = PMon})}.
init_monitors() -> init_monitors() ->
@ -348,12 +358,14 @@ handle_call({subscribe, Group, Topic, SubPid}, _From, State = #state{pmon = PMon
false -> ok = emqx_router:do_add_route(Topic, {Group, node()}) false -> ok = emqx_router:do_add_route(Topic, {Group, node()})
end, end,
ok = maybe_insert_alive_tab(SubPid), ok = maybe_insert_alive_tab(SubPid),
ok = maybe_insert_round_robin_count({Group, Topic}),
true = ets:insert(?SHARED_SUBS, {{Group, Topic}, SubPid}), true = ets:insert(?SHARED_SUBS, {{Group, Topic}, SubPid}),
{reply, ok, update_stats(State#state{pmon = emqx_pmon:monitor(SubPid, PMon)})}; {reply, ok, update_stats(State#state{pmon = emqx_pmon:monitor(SubPid, PMon)})};
handle_call({unsubscribe, Group, Topic, SubPid}, _From, State) -> handle_call({unsubscribe, Group, Topic, SubPid}, _From, State) ->
mria:dirty_delete_object(?TAB, record(Group, Topic, SubPid)), mria:dirty_delete_object(?TAB, record(Group, Topic, SubPid)),
true = ets:delete_object(?SHARED_SUBS, {{Group, Topic}, SubPid}), true = ets:delete_object(?SHARED_SUBS, {{Group, Topic}, SubPid}),
delete_route_if_needed({Group, Topic}), delete_route_if_needed({Group, Topic}),
maybe_delete_round_robin_count({Group, Topic}),
{reply, ok, State}; {reply, ok, State};
handle_call(Req, _From, State) -> handle_call(Req, _From, State) ->
?SLOG(error, #{msg => "unexpected_call", req => Req}), ?SLOG(error, #{msg => "unexpected_call", req => Req}),
@ -395,6 +407,25 @@ code_change(_OldVsn, State, _Extra) ->
%% Internal functions %% Internal functions
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
maybe_insert_round_robin_count({Group, _Topic} = GroupTopic) ->
strategy(Group) =:= round_robin_per_group andalso
ets:insert(?SHARED_SUBS_ROUND_ROBIN_COUNTER, {GroupTopic, 0}),
ok.
maybe_delete_round_robin_count({Group, _Topic} = GroupTopic) ->
strategy(Group) =:= round_robin_per_group andalso
if_no_more_subscribers(GroupTopic, fun() ->
ets:delete(?SHARED_SUBS_ROUND_ROBIN_COUNTER, GroupTopic)
end),
ok.
if_no_more_subscribers(GroupTopic, Fn) ->
case ets:member(?SHARED_SUBS, GroupTopic) of
true -> ok;
false -> Fn()
end,
ok.
%% keep track of alive remote pids %% keep track of alive remote pids
maybe_insert_alive_tab(Pid) when ?IS_LOCAL_PID(Pid) -> ok; maybe_insert_alive_tab(Pid) when ?IS_LOCAL_PID(Pid) -> ok;
maybe_insert_alive_tab(Pid) when is_pid(Pid) -> maybe_insert_alive_tab(Pid) when is_pid(Pid) ->
@ -407,6 +438,7 @@ cleanup_down(SubPid) ->
fun(Record = #emqx_shared_subscription{topic = Topic, group = Group}) -> fun(Record = #emqx_shared_subscription{topic = Topic, group = Group}) ->
ok = mria:dirty_delete_object(?TAB, Record), ok = mria:dirty_delete_object(?TAB, Record),
true = ets:delete_object(?SHARED_SUBS, {{Group, Topic}, SubPid}), true = ets:delete_object(?SHARED_SUBS, {{Group, Topic}, SubPid}),
maybe_delete_round_robin_count({Group, Topic}),
delete_route_if_needed({Group, Topic}) delete_route_if_needed({Group, Topic})
end, end,
mnesia:dirty_match_object(#emqx_shared_subscription{_ = '_', subpid = SubPid}) mnesia:dirty_match_object(#emqx_shared_subscription{_ = '_', subpid = SubPid})
@ -430,8 +462,7 @@ is_alive_sub(Pid) when ?IS_LOCAL_PID(Pid) ->
is_alive_sub(Pid) -> is_alive_sub(Pid) ->
[] =/= ets:lookup(?ALIVE_SUBS, Pid). [] =/= ets:lookup(?ALIVE_SUBS, Pid).
delete_route_if_needed({Group, Topic}) -> delete_route_if_needed({Group, Topic} = GroupTopic) ->
case ets:member(?SHARED_SUBS, {Group, Topic}) of if_no_more_subscribers(GroupTopic, fun() ->
true -> ok; ok = emqx_router:do_delete_route(Topic, {Group, node()})
false -> ok = emqx_router:do_delete_route(Topic, {Group, node()}) end).
end.

View File

@ -273,7 +273,7 @@ check_origin_header(Req, #{listener := {Type, Listener}} = Opts) ->
end. end.
websocket_init([Req, Opts]) -> websocket_init([Req, Opts]) ->
#{zone := Zone, limiter := LimiterCfg, listener := {Type, Listener}} = Opts, #{zone := Zone, limiter := LimiterCfg, listener := {Type, Listener} = ListenerCfg} = Opts,
case check_max_connection(Type, Listener) of case check_max_connection(Type, Listener) of
allow -> allow ->
{Peername, PeerCert} = get_peer_info(Type, Listener, Req, Opts), {Peername, PeerCert} = get_peer_info(Type, Listener, Req, Opts),
@ -287,8 +287,10 @@ websocket_init([Req, Opts]) ->
ws_cookie => WsCookie, ws_cookie => WsCookie,
conn_mod => ?MODULE conn_mod => ?MODULE
}, },
Limiter = emqx_limiter_container:get_limiter_by_names( Limiter = emqx_limiter_container:get_limiter_by_types(
[?LIMITER_BYTES_IN, ?LIMITER_MESSAGE_IN], LimiterCfg ListenerCfg,
[?LIMITER_BYTES_IN, ?LIMITER_MESSAGE_IN],
LimiterCfg
), ),
MQTTPiggyback = get_ws_opts(Type, Listener, mqtt_piggyback), MQTTPiggyback = get_ws_opts(Type, Listener, mqtt_piggyback),
FrameOpts = #{ FrameOpts = #{
@ -487,9 +489,6 @@ handle_call(From, info, State) ->
handle_call(From, stats, State) -> handle_call(From, stats, State) ->
gen_server:reply(From, stats(State)), gen_server:reply(From, stats(State)),
return(State); return(State);
handle_call(_From, {ratelimit, Type, Bucket}, State = #state{limiter = Limiter}) ->
Limiter2 = emqx_limiter_container:update_by_name(Type, Bucket, Limiter),
{reply, ok, State#state{limiter = Limiter2}};
handle_call(From, Req, State = #state{channel = Channel}) -> handle_call(From, Req, State = #state{channel = Channel}) ->
case emqx_channel:handle_call(Req, Channel) of case emqx_channel:handle_call(Req, Channel) of
{reply, Reply, NChannel} -> {reply, Reply, NChannel} ->

View File

@ -33,18 +33,6 @@ force_gc_conf() ->
force_shutdown_conf() -> force_shutdown_conf() ->
#{enable => true, max_heap_size => 4194304, max_message_queue_len => 1000}. #{enable => true, max_heap_size => 4194304, max_message_queue_len => 1000}.
rate_limit_conf() ->
#{
conn_bytes_in => ["100KB", "10s"],
conn_messages_in => ["100", "10s"],
max_conn_rate => 1000,
quota =>
#{
conn_messages_routing => infinity,
overall_messages_routing => infinity
}
}.
rpc_conf() -> rpc_conf() ->
#{ #{
async_batch_size => 256, async_batch_size => 256,
@ -173,27 +161,9 @@ listeners_conf() ->
limiter_conf() -> limiter_conf() ->
Make = fun() -> Make = fun() ->
#{ #{
bucket =>
#{
default =>
#{
capacity => infinity,
initial => 0,
rate => infinity,
per_client =>
#{
capacity => infinity,
divisible => false,
failure_strategy => force,
initial => 0,
low_watermark => 0,
max_retry_time => 5000,
rate => infinity
}
}
},
burst => 0, burst => 0,
rate => infinity rate => infinity,
capacity => infinity
} }
end, end,
@ -202,7 +172,7 @@ limiter_conf() ->
Acc#{Name => Make()} Acc#{Name => Make()}
end, end,
#{}, #{},
[bytes_in, message_in, message_routing, connection, batch] [bytes_in, message_in, message_routing, connection, internal]
). ).
stats_conf() -> stats_conf() ->
@ -213,7 +183,6 @@ zone_conf() ->
basic_conf() -> basic_conf() ->
#{ #{
rate_limit => rate_limit_conf(),
force_gc => force_gc_conf(), force_gc => force_gc_conf(),
force_shutdown => force_shutdown_conf(), force_shutdown => force_shutdown_conf(),
mqtt => mqtt_conf(), mqtt => mqtt_conf(),
@ -274,10 +243,9 @@ end_per_suite(_Config) ->
emqx_banned emqx_banned
]). ]).
init_per_testcase(TestCase, Config) -> init_per_testcase(_TestCase, Config) ->
OldConf = set_test_listener_confs(), OldConf = set_test_listener_confs(),
emqx_common_test_helpers:start_apps([]), emqx_common_test_helpers:start_apps([]),
check_modify_limiter(TestCase),
[{config, OldConf} | Config]. [{config, OldConf} | Config].
end_per_testcase(_TestCase, Config) -> end_per_testcase(_TestCase, Config) ->
@ -285,41 +253,6 @@ end_per_testcase(_TestCase, Config) ->
emqx_common_test_helpers:stop_apps([]), emqx_common_test_helpers:stop_apps([]),
Config. Config.
check_modify_limiter(TestCase) ->
Checks = [t_quota_qos0, t_quota_qos1, t_quota_qos2],
case lists:member(TestCase, Checks) of
true ->
modify_limiter();
_ ->
ok
end.
%% per_client 5/1s,5
%% aggregated 10/1s,10
modify_limiter() ->
Limiter = emqx_config:get([limiter]),
#{message_routing := #{bucket := Bucket} = Routing} = Limiter,
#{default := #{per_client := Client} = Default} = Bucket,
Client2 = Client#{
rate := 5,
initial := 0,
capacity := 5,
low_watermark := 1
},
Default2 = Default#{
per_client := Client2,
rate => 10,
initial => 0,
capacity => 10
},
Bucket2 = Bucket#{default := Default2},
Routing2 = Routing#{bucket := Bucket2},
emqx_config:put([limiter], Limiter#{message_routing := Routing2}),
emqx_limiter_manager:restart_server(message_routing),
timer:sleep(100),
ok.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Test cases for channel info/stats/caps %% Test cases for channel info/stats/caps
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -729,6 +662,7 @@ t_process_unsubscribe(_) ->
t_quota_qos0(_) -> t_quota_qos0(_) ->
esockd_limiter:start_link(), esockd_limiter:start_link(),
add_bucket(),
Cnter = counters:new(1, []), Cnter = counters:new(1, []),
ok = meck:expect(emqx_broker, publish, fun(_) -> [{node(), <<"topic">>, {ok, 4}}] end), ok = meck:expect(emqx_broker, publish, fun(_) -> [{node(), <<"topic">>, {ok, 4}}] end),
ok = meck:expect( ok = meck:expect(
@ -755,10 +689,12 @@ t_quota_qos0(_) ->
ok = meck:expect(emqx_metrics, inc, fun(_) -> ok end), ok = meck:expect(emqx_metrics, inc, fun(_) -> ok end),
ok = meck:expect(emqx_metrics, inc, fun(_, _) -> ok end), ok = meck:expect(emqx_metrics, inc, fun(_, _) -> ok end),
del_bucket(),
esockd_limiter:stop(). esockd_limiter:stop().
t_quota_qos1(_) -> t_quota_qos1(_) ->
esockd_limiter:start_link(), esockd_limiter:start_link(),
add_bucket(),
ok = meck:expect(emqx_broker, publish, fun(_) -> [{node(), <<"topic">>, {ok, 4}}] end), ok = meck:expect(emqx_broker, publish, fun(_) -> [{node(), <<"topic">>, {ok, 4}}] end),
Chann = channel(#{conn_state => connected, quota => quota()}), Chann = channel(#{conn_state => connected, quota => quota()}),
Pub = ?PUBLISH_PACKET(?QOS_1, <<"topic">>, 1, <<"payload">>), Pub = ?PUBLISH_PACKET(?QOS_1, <<"topic">>, 1, <<"payload">>),
@ -769,10 +705,12 @@ t_quota_qos1(_) ->
{ok, ?PUBACK_PACKET(1, ?RC_SUCCESS), Chann4} = emqx_channel:handle_in(Pub, Chann3), {ok, ?PUBACK_PACKET(1, ?RC_SUCCESS), Chann4} = emqx_channel:handle_in(Pub, Chann3),
%% Quota in overall %% Quota in overall
{ok, ?PUBACK_PACKET(1, ?RC_QUOTA_EXCEEDED), _} = emqx_channel:handle_in(Pub, Chann4), {ok, ?PUBACK_PACKET(1, ?RC_QUOTA_EXCEEDED), _} = emqx_channel:handle_in(Pub, Chann4),
del_bucket(),
esockd_limiter:stop(). esockd_limiter:stop().
t_quota_qos2(_) -> t_quota_qos2(_) ->
esockd_limiter:start_link(), esockd_limiter:start_link(),
add_bucket(),
ok = meck:expect(emqx_broker, publish, fun(_) -> [{node(), <<"topic">>, {ok, 4}}] end), ok = meck:expect(emqx_broker, publish, fun(_) -> [{node(), <<"topic">>, {ok, 4}}] end),
Chann = channel(#{conn_state => connected, quota => quota()}), Chann = channel(#{conn_state => connected, quota => quota()}),
Pub1 = ?PUBLISH_PACKET(?QOS_2, <<"topic">>, 1, <<"payload">>), Pub1 = ?PUBLISH_PACKET(?QOS_2, <<"topic">>, 1, <<"payload">>),
@ -786,6 +724,7 @@ t_quota_qos2(_) ->
{ok, ?PUBREC_PACKET(3, ?RC_SUCCESS), Chann4} = emqx_channel:handle_in(Pub3, Chann3), {ok, ?PUBREC_PACKET(3, ?RC_SUCCESS), Chann4} = emqx_channel:handle_in(Pub3, Chann3),
%% Quota in overall %% Quota in overall
{ok, ?PUBREC_PACKET(4, ?RC_QUOTA_EXCEEDED), _} = emqx_channel:handle_in(Pub4, Chann4), {ok, ?PUBREC_PACKET(4, ?RC_QUOTA_EXCEEDED), _} = emqx_channel:handle_in(Pub4, Chann4),
del_bucket(),
esockd_limiter:stop(). esockd_limiter:stop().
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -952,12 +891,6 @@ t_handle_call_takeover_end(_) ->
{shutdown, takenover, [], _, _Chan} = {shutdown, takenover, [], _, _Chan} =
emqx_channel:handle_call({takeover, 'end'}, channel()). emqx_channel:handle_call({takeover, 'end'}, channel()).
t_handle_call_quota(_) ->
{reply, ok, _Chan} = emqx_channel:handle_call(
{quota, default},
channel()
).
t_handle_call_unexpected(_) -> t_handle_call_unexpected(_) ->
{reply, ignored, _Chan} = emqx_channel:handle_call(unexpected_req, channel()). {reply, ignored, _Chan} = emqx_channel:handle_call(unexpected_req, channel()).
@ -1176,7 +1109,7 @@ t_ws_cookie_init(_) ->
ConnInfo, ConnInfo,
#{ #{
zone => default, zone => default,
limiter => limiter_cfg(), limiter => undefined,
listener => {tcp, default} listener => {tcp, default}
} }
), ),
@ -1210,7 +1143,7 @@ channel(InitFields) ->
ConnInfo, ConnInfo,
#{ #{
zone => default, zone => default,
limiter => limiter_cfg(), limiter => undefined,
listener => {tcp, default} listener => {tcp, default}
} }
), ),
@ -1270,9 +1203,31 @@ session(InitFields) when is_map(InitFields) ->
%% conn: 5/s; overall: 10/s %% conn: 5/s; overall: 10/s
quota() -> quota() ->
emqx_limiter_container:get_limiter_by_names([message_routing], limiter_cfg()). emqx_limiter_container:get_limiter_by_types(?MODULE, [message_routing], limiter_cfg()).
limiter_cfg() -> #{message_routing => default}. limiter_cfg() ->
Client = #{
rate => 5,
initial => 0,
capacity => 5,
low_watermark => 1,
divisible => false,
max_retry_time => timer:seconds(5),
failure_strategy => force
},
#{
message_routing => bucket_cfg(),
client => #{message_routing => Client}
}.
bucket_cfg() ->
#{rate => 10, initial => 0, capacity => 10}.
add_bucket() ->
emqx_limiter_server:add_bucket(?MODULE, message_routing, bucket_cfg()).
del_bucket() ->
emqx_limiter_server:del_bucket(?MODULE, message_routing).
v4(Channel) -> v4(Channel) ->
ConnInfo = emqx_channel:info(conninfo, Channel), ConnInfo = emqx_channel:info(conninfo, Channel),

View File

@ -78,6 +78,7 @@ end_per_suite(_Config) ->
init_per_testcase(TestCase, Config) when init_per_testcase(TestCase, Config) when
TestCase =/= t_ws_pingreq_before_connected TestCase =/= t_ws_pingreq_before_connected
-> ->
add_bucket(),
ok = meck:expect(emqx_transport, wait, fun(Sock) -> {ok, Sock} end), ok = meck:expect(emqx_transport, wait, fun(Sock) -> {ok, Sock} end),
ok = meck:expect(emqx_transport, type, fun(_Sock) -> tcp end), ok = meck:expect(emqx_transport, type, fun(_Sock) -> tcp end),
ok = meck:expect( ok = meck:expect(
@ -104,9 +105,11 @@ init_per_testcase(TestCase, Config) when
_ -> Config _ -> Config
end; end;
init_per_testcase(_, Config) -> init_per_testcase(_, Config) ->
add_bucket(),
Config. Config.
end_per_testcase(TestCase, Config) -> end_per_testcase(TestCase, Config) ->
del_bucket(),
case erlang:function_exported(?MODULE, TestCase, 2) of case erlang:function_exported(?MODULE, TestCase, 2) of
true -> ?MODULE:TestCase('end', Config); true -> ?MODULE:TestCase('end', Config);
false -> ok false -> ok
@ -291,11 +294,6 @@ t_handle_call(_) ->
?assertMatch({ok, _St}, handle_msg({event, undefined}, St)), ?assertMatch({ok, _St}, handle_msg({event, undefined}, St)),
?assertMatch({reply, _Info, _NSt}, handle_call(self(), info, St)), ?assertMatch({reply, _Info, _NSt}, handle_call(self(), info, St)),
?assertMatch({reply, _Stats, _NSt}, handle_call(self(), stats, St)), ?assertMatch({reply, _Stats, _NSt}, handle_call(self(), stats, St)),
?assertMatch({reply, ok, _NSt}, handle_call(self(), {ratelimit, []}, St)),
?assertMatch(
{reply, ok, _NSt},
handle_call(self(), {ratelimit, [{bytes_in, default}]}, St)
),
?assertEqual({reply, ignored, St}, handle_call(self(), for_testing, St)), ?assertEqual({reply, ignored, St}, handle_call(self(), for_testing, St)),
?assertMatch( ?assertMatch(
{stop, {shutdown, kicked}, ok, _NSt}, {stop, {shutdown, kicked}, ok, _NSt},
@ -318,11 +316,6 @@ t_handle_timeout(_) ->
emqx_connection:handle_timeout(TRef, keepalive, State) emqx_connection:handle_timeout(TRef, keepalive, State)
), ),
ok = meck:expect(emqx_transport, getstat, fun(_Sock, _Options) -> {error, for_testing} end),
?assertMatch(
{stop, {shutdown, for_testing}, _NState},
emqx_connection:handle_timeout(TRef, keepalive, State)
),
?assertMatch({ok, _NState}, emqx_connection:handle_timeout(TRef, undefined, State)). ?assertMatch({ok, _NState}, emqx_connection:handle_timeout(TRef, undefined, State)).
t_parse_incoming(_) -> t_parse_incoming(_) ->
@ -704,7 +697,34 @@ handle_msg(Msg, St) -> emqx_connection:handle_msg(Msg, St).
handle_call(Pid, Call, St) -> emqx_connection:handle_call(Pid, Call, St). handle_call(Pid, Call, St) -> emqx_connection:handle_call(Pid, Call, St).
limiter_cfg() -> #{}. -define(LIMITER_ID, 'tcp:default').
init_limiter() -> init_limiter() ->
emqx_limiter_container:get_limiter_by_names([bytes_in, message_in], limiter_cfg()). emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes_in, message_in], limiter_cfg()).
limiter_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(),
Cfg = bucket_cfg(),
Client = #{
rate => Infinity,
initial => 0,
capacity => Infinity,
low_watermark => 1,
divisible => false,
max_retry_time => timer:seconds(5),
failure_strategy => force
},
#{bytes_in => Cfg, message_in => Cfg, client => #{bytes_in => Client, message_in => Client}}.
bucket_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(),
#{rate => Infinity, initial => 0, capacity => Infinity}.
add_bucket() ->
Cfg = bucket_cfg(),
emqx_limiter_server:add_bucket(?LIMITER_ID, bytes_in, Cfg),
emqx_limiter_server:add_bucket(?LIMITER_ID, message_in, Cfg).
del_bucket() ->
emqx_limiter_server:del_bucket(?LIMITER_ID, bytes_in),
emqx_limiter_server:del_bucket(?LIMITER_ID, message_in).

View File

@ -24,48 +24,7 @@
-include_lib("eunit/include/eunit.hrl"). -include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl"). -include_lib("common_test/include/ct.hrl").
-define(BASE_CONF, << -define(BASE_CONF, <<"">>).
""
"\n"
"limiter {\n"
" bytes_in {\n"
" bucket.default {\n"
" rate = infinity\n"
" capacity = infinity\n"
" }\n"
" }\n"
"\n"
" message_in {\n"
" bucket.default {\n"
" rate = infinity\n"
" capacity = infinity\n"
" }\n"
" }\n"
"\n"
" connection {\n"
" bucket.default {\n"
" rate = infinity\n"
" capacity = infinity\n"
" }\n"
" }\n"
"\n"
" message_routing {\n"
" bucket.default {\n"
" rate = infinity\n"
" capacity = infinity\n"
" }\n"
" }\n"
"\n"
" batch {\n"
" bucket.retainer {\n"
" rate = infinity\n"
" capacity = infinity\n"
" }\n"
" }\n"
"}\n"
"\n"
""
>>).
-record(client, { -record(client, {
counter :: counters:counter_ref(), counter :: counters:counter_ref(),
@ -97,6 +56,9 @@ end_per_suite(_Config) ->
init_per_testcase(_TestCase, Config) -> init_per_testcase(_TestCase, Config) ->
Config. Config.
end_per_testcase(_TestCase, Config) ->
Config.
load_conf() -> load_conf() ->
emqx_common_test_helpers:load_config(emqx_limiter_schema, ?BASE_CONF). emqx_common_test_helpers:load_config(emqx_limiter_schema, ?BASE_CONF).
@ -116,12 +78,12 @@ t_consume(_) ->
failure_strategy := force failure_strategy := force
} }
end, end,
Case = fun() -> Case = fun(BucketCfg) ->
Client = connect(default), Client = connect(BucketCfg),
{ok, L2} = emqx_htb_limiter:consume(50, Client), {ok, L2} = emqx_htb_limiter:consume(50, Client),
{ok, _L3} = emqx_htb_limiter:consume(150, L2) {ok, _L3} = emqx_htb_limiter:consume(150, L2)
end, end,
with_per_client(default, Cfg, Case). with_per_client(Cfg, Case).
t_retry(_) -> t_retry(_) ->
Cfg = fun(Cfg) -> Cfg = fun(Cfg) ->
@ -133,15 +95,15 @@ t_retry(_) ->
failure_strategy := force failure_strategy := force
} }
end, end,
Case = fun() -> Case = fun(BucketCfg) ->
Client = connect(default), Client = connect(BucketCfg),
{ok, Client} = emqx_htb_limiter:retry(Client), {ok, Client2} = emqx_htb_limiter:retry(Client),
{_, _, Retry, L2} = emqx_htb_limiter:check(150, Client), {_, _, Retry, L2} = emqx_htb_limiter:check(150, Client2),
L3 = emqx_htb_limiter:set_retry(Retry, L2), L3 = emqx_htb_limiter:set_retry(Retry, L2),
timer:sleep(500), timer:sleep(500),
{ok, _L4} = emqx_htb_limiter:retry(L3) {ok, _L4} = emqx_htb_limiter:retry(L3)
end, end,
with_per_client(default, Cfg, Case). with_per_client(Cfg, Case).
t_restore(_) -> t_restore(_) ->
Cfg = fun(Cfg) -> Cfg = fun(Cfg) ->
@ -153,15 +115,15 @@ t_restore(_) ->
failure_strategy := force failure_strategy := force
} }
end, end,
Case = fun() -> Case = fun(BucketCfg) ->
Client = connect(default), Client = connect(BucketCfg),
{_, _, Retry, L2} = emqx_htb_limiter:check(150, Client), {_, _, Retry, L2} = emqx_htb_limiter:check(150, Client),
timer:sleep(200), timer:sleep(200),
{ok, L3} = emqx_htb_limiter:check(Retry, L2), {ok, L3} = emqx_htb_limiter:check(Retry, L2),
Avaiable = emqx_htb_limiter:available(L3), Avaiable = emqx_htb_limiter:available(L3),
?assert(Avaiable >= 50) ?assert(Avaiable >= 50)
end, end,
with_per_client(default, Cfg, Case). with_per_client(Cfg, Case).
t_max_retry_time(_) -> t_max_retry_time(_) ->
Cfg = fun(Cfg) -> Cfg = fun(Cfg) ->
@ -172,15 +134,15 @@ t_max_retry_time(_) ->
failure_strategy := drop failure_strategy := drop
} }
end, end,
Case = fun() -> Case = fun(BucketCfg) ->
Client = connect(default), Client = connect(BucketCfg),
Begin = ?NOW, Begin = ?NOW,
Result = emqx_htb_limiter:consume(101, Client), Result = emqx_htb_limiter:consume(101, Client),
?assertMatch({drop, _}, Result), ?assertMatch({drop, _}, Result),
Time = ?NOW - Begin, Time = ?NOW - Begin,
?assert(Time >= 500 andalso Time < 550) ?assert(Time >= 500 andalso Time < 550)
end, end,
with_per_client(default, Cfg, Case). with_per_client(Cfg, Case).
t_divisible(_) -> t_divisible(_) ->
Cfg = fun(Cfg) -> Cfg = fun(Cfg) ->
@ -191,8 +153,8 @@ t_divisible(_) ->
capacity := 600 capacity := 600
} }
end, end,
Case = fun() -> Case = fun(BucketCfg) ->
Client = connect(default), Client = connect(BucketCfg),
Result = emqx_htb_limiter:check(1000, Client), Result = emqx_htb_limiter:check(1000, Client),
?assertMatch( ?assertMatch(
{partial, 400, {partial, 400,
@ -206,7 +168,7 @@ t_divisible(_) ->
Result Result
) )
end, end,
with_per_client(default, Cfg, Case). with_per_client(Cfg, Case).
t_low_watermark(_) -> t_low_watermark(_) ->
Cfg = fun(Cfg) -> Cfg = fun(Cfg) ->
@ -217,8 +179,8 @@ t_low_watermark(_) ->
capacity := 1000 capacity := 1000
} }
end, end,
Case = fun() -> Case = fun(BucketCfg) ->
Client = connect(default), Client = connect(BucketCfg),
Result = emqx_htb_limiter:check(500, Client), Result = emqx_htb_limiter:check(500, Client),
?assertMatch({ok, _}, Result), ?assertMatch({ok, _}, Result),
{_, Client2} = Result, {_, Client2} = Result,
@ -233,28 +195,21 @@ t_low_watermark(_) ->
Result2 Result2
) )
end, end,
with_per_client(default, Cfg, Case). with_per_client(Cfg, Case).
t_infinity_client(_) -> t_infinity_client(_) ->
Fun = fun(#{per_client := Cli} = Bucket) -> Fun = fun(Cfg) -> Cfg end,
Bucket2 = Bucket#{ Case = fun(Cfg) ->
rate := infinity, Client = connect(Cfg),
capacity := infinity
},
Cli2 = Cli#{rate := infinity, capacity := infinity},
Bucket2#{per_client := Cli2}
end,
Case = fun() ->
Client = connect(default),
InfVal = emqx_limiter_schema:infinity_value(), InfVal = emqx_limiter_schema:infinity_value(),
?assertMatch(#{bucket := #{rate := InfVal}}, Client), ?assertMatch(#{bucket := #{rate := InfVal}}, Client),
Result = emqx_htb_limiter:check(100000, Client), Result = emqx_htb_limiter:check(100000, Client),
?assertEqual({ok, Client}, Result) ?assertEqual({ok, Client}, Result)
end, end,
with_bucket(default, Fun, Case). with_per_client(Fun, Case).
t_try_restore_agg(_) -> t_try_restore_agg(_) ->
Fun = fun(#{per_client := Cli} = Bucket) -> Fun = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := 1, rate := 1,
capacity := 200, capacity := 200,
@ -267,20 +222,20 @@ t_try_restore_agg(_) ->
max_retry_time := 100, max_retry_time := 100,
failure_strategy := force failure_strategy := force
}, },
Bucket2#{per_client := Cli2} Bucket2#{client := Cli2}
end, end,
Case = fun() -> Case = fun(Cfg) ->
Client = connect(default), Client = connect(Cfg),
{_, _, Retry, L2} = emqx_htb_limiter:check(150, Client), {_, _, Retry, L2} = emqx_htb_limiter:check(150, Client),
timer:sleep(200), timer:sleep(200),
{ok, L3} = emqx_htb_limiter:check(Retry, L2), {ok, L3} = emqx_htb_limiter:check(Retry, L2),
Avaiable = emqx_htb_limiter:available(L3), Avaiable = emqx_htb_limiter:available(L3),
?assert(Avaiable >= 50) ?assert(Avaiable >= 50)
end, end,
with_bucket(default, Fun, Case). with_bucket(Fun, Case).
t_short_board(_) -> t_short_board(_) ->
Fun = fun(#{per_client := Cli} = Bucket) -> Fun = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := ?RATE("100/1s"), rate := ?RATE("100/1s"),
initial := 0, initial := 0,
@ -291,18 +246,18 @@ t_short_board(_) ->
capacity := 600, capacity := 600,
initial := 600 initial := 600
}, },
Bucket2#{per_client := Cli2} Bucket2#{client := Cli2}
end, end,
Case = fun() -> Case = fun(Cfg) ->
Counter = counters:new(1, []), Counter = counters:new(1, []),
start_client(default, ?NOW + 2000, Counter, 20), start_client(Cfg, ?NOW + 2000, Counter, 20),
timer:sleep(2100), timer:sleep(2100),
check_average_rate(Counter, 2, 100) check_average_rate(Counter, 2, 100)
end, end,
with_bucket(default, Fun, Case). with_bucket(Fun, Case).
t_rate(_) -> t_rate(_) ->
Fun = fun(#{per_client := Cli} = Bucket) -> Fun = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := ?RATE("100/100ms"), rate := ?RATE("100/100ms"),
initial := 0, initial := 0,
@ -313,10 +268,10 @@ t_rate(_) ->
capacity := infinity, capacity := infinity,
initial := 0 initial := 0
}, },
Bucket2#{per_client := Cli2} Bucket2#{client := Cli2}
end, end,
Case = fun() -> Case = fun(Cfg) ->
Client = connect(default), Client = connect(Cfg),
Ts1 = erlang:system_time(millisecond), Ts1 = erlang:system_time(millisecond),
C1 = emqx_htb_limiter:available(Client), C1 = emqx_htb_limiter:available(Client),
timer:sleep(1000), timer:sleep(1000),
@ -326,11 +281,11 @@ t_rate(_) ->
Inc = C2 - C1, Inc = C2 - C1,
?assert(in_range(Inc, ShouldInc - 100, ShouldInc + 100), "test bucket rate") ?assert(in_range(Inc, ShouldInc - 100, ShouldInc + 100), "test bucket rate")
end, end,
with_bucket(default, Fun, Case). with_bucket(Fun, Case).
t_capacity(_) -> t_capacity(_) ->
Capacity = 600, Capacity = 600,
Fun = fun(#{per_client := Cli} = Bucket) -> Fun = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := ?RATE("100/100ms"), rate := ?RATE("100/100ms"),
initial := 0, initial := 0,
@ -341,25 +296,25 @@ t_capacity(_) ->
capacity := infinity, capacity := infinity,
initial := 0 initial := 0
}, },
Bucket2#{per_client := Cli2} Bucket2#{client := Cli2}
end, end,
Case = fun() -> Case = fun(Cfg) ->
Client = connect(default), Client = connect(Cfg),
timer:sleep(1000), timer:sleep(1000),
C1 = emqx_htb_limiter:available(Client), C1 = emqx_htb_limiter:available(Client),
?assertEqual(Capacity, C1, "test bucket capacity") ?assertEqual(Capacity, C1, "test bucket capacity")
end, end,
with_bucket(default, Fun, Case). with_bucket(Fun, Case).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Test Cases Global Level %% Test Cases Global Level
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
t_collaborative_alloc(_) -> t_collaborative_alloc(_) ->
GlobalMod = fun(Cfg) -> GlobalMod = fun(#{message_routing := MR} = Cfg) ->
Cfg#{rate := ?RATE("600/1s")} Cfg#{message_routing := MR#{rate := ?RATE("600/1s")}}
end, end,
Bucket1 = fun(#{per_client := Cli} = Bucket) -> Bucket1 = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := ?RATE("400/1s"), rate := ?RATE("400/1s"),
initial := 0, initial := 0,
@ -370,7 +325,7 @@ t_collaborative_alloc(_) ->
capacity := 100, capacity := 100,
initial := 100 initial := 100
}, },
Bucket2#{per_client := Cli2} Bucket2#{client := Cli2}
end, end,
Bucket2 = fun(Bucket) -> Bucket2 = fun(Bucket) ->
@ -381,8 +336,8 @@ t_collaborative_alloc(_) ->
Case = fun() -> Case = fun() ->
C1 = counters:new(1, []), C1 = counters:new(1, []),
C2 = counters:new(1, []), C2 = counters:new(1, []),
start_client(b1, ?NOW + 2000, C1, 20), start_client({b1, Bucket1}, ?NOW + 2000, C1, 20),
start_client(b2, ?NOW + 2000, C2, 30), start_client({b2, Bucket2}, ?NOW + 2000, C2, 30),
timer:sleep(2100), timer:sleep(2100),
check_average_rate(C1, 2, 300), check_average_rate(C1, 2, 300),
check_average_rate(C2, 2, 300) check_average_rate(C2, 2, 300)
@ -395,14 +350,16 @@ t_collaborative_alloc(_) ->
). ).
t_burst(_) -> t_burst(_) ->
GlobalMod = fun(Cfg) -> GlobalMod = fun(#{message_routing := MR} = Cfg) ->
Cfg#{ Cfg#{
message_routing := MR#{
rate := ?RATE("200/1s"), rate := ?RATE("200/1s"),
burst := ?RATE("400/1s") burst := ?RATE("400/1s")
} }
}
end, end,
Bucket = fun(#{per_client := Cli} = Bucket) -> Bucket = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := ?RATE("200/1s"), rate := ?RATE("200/1s"),
initial := 0, initial := 0,
@ -413,16 +370,16 @@ t_burst(_) ->
capacity := 200, capacity := 200,
divisible := true divisible := true
}, },
Bucket2#{per_client := Cli2} Bucket2#{client := Cli2}
end, end,
Case = fun() -> Case = fun() ->
C1 = counters:new(1, []), C1 = counters:new(1, []),
C2 = counters:new(1, []), C2 = counters:new(1, []),
C3 = counters:new(1, []), C3 = counters:new(1, []),
start_client(b1, ?NOW + 2000, C1, 20), start_client({b1, Bucket}, ?NOW + 2000, C1, 20),
start_client(b2, ?NOW + 2000, C2, 30), start_client({b2, Bucket}, ?NOW + 2000, C2, 30),
start_client(b3, ?NOW + 2000, C3, 30), start_client({b3, Bucket}, ?NOW + 2000, C3, 30),
timer:sleep(2100), timer:sleep(2100),
Total = lists:sum([counters:get(X, 1) || X <- [C1, C2, C3]]), Total = lists:sum([counters:get(X, 1) || X <- [C1, C2, C3]]),
@ -436,11 +393,11 @@ t_burst(_) ->
). ).
t_limit_global_with_unlimit_other(_) -> t_limit_global_with_unlimit_other(_) ->
GlobalMod = fun(Cfg) -> GlobalMod = fun(#{message_routing := MR} = Cfg) ->
Cfg#{rate := ?RATE("600/1s")} Cfg#{message_routing := MR#{rate := ?RATE("600/1s")}}
end, end,
Bucket = fun(#{per_client := Cli} = Bucket) -> Bucket = fun(#{client := Cli} = Bucket) ->
Bucket2 = Bucket#{ Bucket2 = Bucket#{
rate := infinity, rate := infinity,
initial := 0, initial := 0,
@ -451,12 +408,12 @@ t_limit_global_with_unlimit_other(_) ->
capacity := infinity, capacity := infinity,
initial := 0 initial := 0
}, },
Bucket2#{per_client := Cli2} Bucket2#{client := Cli2}
end, end,
Case = fun() -> Case = fun() ->
C1 = counters:new(1, []), C1 = counters:new(1, []),
start_client(b1, ?NOW + 2000, C1, 20), start_client({b1, Bucket}, ?NOW + 2000, C1, 20),
timer:sleep(2100), timer:sleep(2100),
check_average_rate(C1, 2, 600) check_average_rate(C1, 2, 600)
end, end,
@ -470,28 +427,6 @@ t_limit_global_with_unlimit_other(_) ->
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Test Cases container %% Test Cases container
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
t_new_container(_) ->
C1 = emqx_limiter_container:new(),
C2 = emqx_limiter_container:new([message_routing]),
C3 = emqx_limiter_container:update_by_name(message_routing, default, C1),
?assertMatch(
#{
message_routing := _,
retry_ctx := undefined,
{retry, message_routing} := _
},
C2
),
?assertMatch(
#{
message_routing := _,
retry_ctx := undefined,
{retry, message_routing} := _
},
C3
),
ok.
t_check_container(_) -> t_check_container(_) ->
Cfg = fun(Cfg) -> Cfg = fun(Cfg) ->
Cfg#{ Cfg#{
@ -500,10 +435,11 @@ t_check_container(_) ->
capacity := 1000 capacity := 1000
} }
end, end,
Case = fun() -> Case = fun(#{client := Client} = BucketCfg) ->
C1 = emqx_limiter_container:new( C1 = emqx_limiter_container:get_limiter_by_types(
?MODULE,
[message_routing], [message_routing],
#{message_routing => default} #{message_routing => BucketCfg, client => #{message_routing => Client}}
), ),
{ok, C2} = emqx_limiter_container:check(1000, message_routing, C1), {ok, C2} = emqx_limiter_container:check(1000, message_routing, C1),
{pause, Pause, C3} = emqx_limiter_container:check(1000, message_routing, C2), {pause, Pause, C3} = emqx_limiter_container:check(1000, message_routing, C2),
@ -514,7 +450,39 @@ t_check_container(_) ->
RetryData = emqx_limiter_container:get_retry_context(C5), RetryData = emqx_limiter_container:get_retry_context(C5),
?assertEqual(Context, RetryData) ?assertEqual(Context, RetryData)
end, end,
with_per_client(default, Cfg, Case). with_per_client(Cfg, Case).
%%--------------------------------------------------------------------
%% Test Override
%%--------------------------------------------------------------------
t_bucket_no_client(_) ->
Rate = ?RATE("1/s"),
GlobalMod = fun(#{client := #{message_routing := MR} = Client} = Cfg) ->
Cfg#{client := Client#{message_routing := MR#{rate := Rate}}}
end,
BucketMod = fun(Bucket) ->
maps:remove(client, Bucket)
end,
Case = fun() ->
Limiter = connect(BucketMod(make_limiter_cfg())),
?assertMatch(#{rate := Rate}, Limiter)
end,
with_global(GlobalMod, [BucketMod], Case).
t_bucket_client(_) ->
GlobalRate = ?RATE("1/s"),
BucketRate = ?RATE("10/s"),
GlobalMod = fun(#{client := #{message_routing := MR} = Client} = Cfg) ->
Cfg#{client := Client#{message_routing := MR#{rate := GlobalRate}}}
end,
BucketMod = fun(#{client := Client} = Bucket) ->
Bucket#{client := Client#{rate := BucketRate}}
end,
Case = fun() ->
Limiter = connect(BucketMod(make_limiter_cfg())),
?assertMatch(#{rate := BucketRate}, Limiter)
end,
with_global(GlobalMod, [BucketMod], Case).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Test Cases misc %% Test Cases misc
@ -607,19 +575,23 @@ t_schema_unit(_) ->
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%%% Internal functions %%% Internal functions
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
start_client(Name, EndTime, Counter, Number) -> start_client(Cfg, EndTime, Counter, Number) ->
lists:foreach( lists:foreach(
fun(_) -> fun(_) ->
spawn(fun() -> spawn(fun() ->
start_client(Name, EndTime, Counter) do_start_client(Cfg, EndTime, Counter)
end) end)
end, end,
lists:seq(1, Number) lists:seq(1, Number)
). ).
start_client(Name, EndTime, Counter) -> do_start_client({Name, CfgFun}, EndTime, Counter) ->
#{per_client := PerClient} = do_start_client(Name, CfgFun(make_limiter_cfg()), EndTime, Counter);
emqx_config:get([limiter, message_routing, bucket, Name]), do_start_client(Cfg, EndTime, Counter) ->
do_start_client(?MODULE, Cfg, EndTime, Counter).
do_start_client(Name, Cfg, EndTime, Counter) ->
#{client := PerClient} = Cfg,
#{rate := Rate} = PerClient, #{rate := Rate} = PerClient,
Client = #client{ Client = #client{
start = ?NOW, start = ?NOW,
@ -627,7 +599,7 @@ start_client(Name, EndTime, Counter) ->
counter = Counter, counter = Counter,
obtained = 0, obtained = 0,
rate = Rate, rate = Rate,
client = connect(Name) client = connect(Name, Cfg)
}, },
client_loop(Client). client_loop(Client).
@ -711,35 +683,50 @@ to_rate(Str) ->
{ok, Rate} = emqx_limiter_schema:to_rate(Str), {ok, Rate} = emqx_limiter_schema:to_rate(Str),
Rate. Rate.
with_global(Modifier, BuckeTemps, Case) -> with_global(Modifier, Buckets, Case) ->
Fun = fun(Cfg) -> with_config([limiter], Modifier, Buckets, Case).
#{bucket := #{default := BucketCfg}} = Cfg2 = Modifier(Cfg),
Fun = fun({Name, BMod}, Acc) ->
Acc#{Name => BMod(BucketCfg)}
end,
Buckets = lists:foldl(Fun, #{}, BuckeTemps),
Cfg2#{bucket := Buckets}
end,
with_config([limiter, message_routing], Fun, Case). with_bucket(Modifier, Case) ->
Cfg = Modifier(make_limiter_cfg()),
add_bucket(Cfg),
Case(Cfg),
del_bucket().
with_bucket(Bucket, Modifier, Case) -> with_per_client(Modifier, Case) ->
Path = [limiter, message_routing, bucket, Bucket], #{client := Client} = Cfg = make_limiter_cfg(),
with_config(Path, Modifier, Case). Cfg2 = Cfg#{client := Modifier(Client)},
add_bucket(Cfg2),
Case(Cfg2),
del_bucket().
with_per_client(Bucket, Modifier, Case) -> with_config(Path, Modifier, Buckets, Case) ->
Path = [limiter, message_routing, bucket, Bucket, per_client],
with_config(Path, Modifier, Case).
with_config(Path, Modifier, Case) ->
Cfg = emqx_config:get(Path), Cfg = emqx_config:get(Path),
NewCfg = Modifier(Cfg), NewCfg = Modifier(Cfg),
ct:pal("test with config:~p~n", [NewCfg]),
emqx_config:put(Path, NewCfg), emqx_config:put(Path, NewCfg),
emqx_limiter_server:restart(message_routing), emqx_limiter_server:restart(message_routing),
timer:sleep(500), timer:sleep(500),
BucketCfg = make_limiter_cfg(),
lists:foreach(
fun
({Name, BucketFun}) ->
add_bucket(Name, BucketFun(BucketCfg));
(BucketFun) ->
add_bucket(BucketFun(BucketCfg))
end,
Buckets
),
DelayReturn = delay_return(Case), DelayReturn = delay_return(Case),
lists:foreach(
fun
({Name, _Cfg}) ->
del_bucket(Name);
(_Cfg) ->
del_bucket()
end,
Buckets
),
emqx_config:put(Path, Cfg), emqx_config:put(Path, Cfg),
emqx_limiter_server:restart(message_routing),
DelayReturn(). DelayReturn().
delay_return(Case) -> delay_return(Case) ->
@ -751,10 +738,40 @@ delay_return(Case) ->
fun() -> erlang:raise(Type, Reason, Trace) end fun() -> erlang:raise(Type, Reason, Trace) end
end. end.
connect(Name) -> connect({Name, CfgFun}) ->
{ok, Limiter} = emqx_limiter_server:connect(message_routing, Name), connect(Name, CfgFun(make_limiter_cfg()));
connect(Cfg) ->
connect(?MODULE, Cfg).
connect(Name, Cfg) ->
{ok, Limiter} = emqx_limiter_server:connect(Name, message_routing, Cfg),
Limiter. Limiter.
make_limiter_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(),
Client = #{
rate => Infinity,
initial => 0,
capacity => Infinity,
low_watermark => 0,
divisible => false,
max_retry_time => timer:seconds(5),
failure_strategy => force
},
#{client => Client, rate => Infinity, initial => 0, capacity => Infinity}.
add_bucket(Cfg) ->
add_bucket(?MODULE, Cfg).
add_bucket(Name, Cfg) ->
emqx_limiter_server:add_bucket(Name, message_routing, Cfg).
del_bucket() ->
del_bucket(?MODULE).
del_bucket(Name) ->
emqx_limiter_server:del_bucket(Name, message_routing).
check_average_rate(Counter, Second, Rate) -> check_average_rate(Counter, Second, Rate) ->
Cost = counters:get(Counter, 1), Cost = counters:get(Counter, 1),
PerSec = Cost / Second, PerSec = Cost / Second,

View File

@ -141,3 +141,38 @@ bad_tls_version_test() ->
validate(Sc, #{<<"versions">> => [<<"foo">>]}) validate(Sc, #{<<"versions">> => [<<"foo">>]})
), ),
ok. ok.
ssl_opts_gc_after_handshake_test_rancher_listener_test() ->
Sc = emqx_schema:server_ssl_opts_schema(
#{
gc_after_handshake => false
},
_IsRanchListener = true
),
?assertThrow(
{_Sc, [
#{
kind := validation_error,
reason := unknown_fields,
unknown := <<"gc_after_handshake">>
}
]},
validate(Sc, #{<<"gc_after_handshake">> => true})
),
ok.
ssl_opts_gc_after_handshake_test_not_rancher_listener_test() ->
Sc = emqx_schema:server_ssl_opts_schema(
#{
gc_after_handshake => false
},
_IsRanchListener = false
),
Checked = validate(Sc, #{<<"gc_after_handshake">> => <<"true">>}),
?assertMatch(
#{
gc_after_handshake := true
},
Checked
),
ok.

View File

@ -195,6 +195,161 @@ t_round_robin(_) ->
ok = ensure_config(round_robin, true), ok = ensure_config(round_robin, true),
test_two_messages(round_robin). test_two_messages(round_robin).
t_round_robin_per_group(_) ->
ok = ensure_config(round_robin_per_group, true),
test_two_messages(round_robin_per_group).
%% this would fail if executed with the standard round_robin strategy
t_round_robin_per_group_even_distribution_one_group(_) ->
ok = ensure_config(round_robin_per_group, true),
Topic = <<"foo/bar">>,
Group = <<"group1">>,
{ok, ConnPid1} = emqtt:start_link([{clientid, <<"C0">>}]),
{ok, ConnPid2} = emqtt:start_link([{clientid, <<"C1">>}]),
{ok, _} = emqtt:connect(ConnPid1),
{ok, _} = emqtt:connect(ConnPid2),
emqtt:subscribe(ConnPid1, {<<"$share/", Group/binary, "/", Topic/binary>>, 0}),
emqtt:subscribe(ConnPid2, {<<"$share/", Group/binary, "/", Topic/binary>>, 0}),
%% publisher with persistent connection
{ok, PublisherPid} = emqtt:start_link(),
{ok, _} = emqtt:connect(PublisherPid),
lists:foreach(
fun(I) ->
Message = erlang:integer_to_binary(I),
emqtt:publish(PublisherPid, Topic, Message)
end,
lists:seq(0, 9)
),
AllReceivedMessages = lists:map(
fun(#{client_pid := SubscriberPid, payload := Payload}) -> {SubscriberPid, Payload} end,
lists:reverse(recv_msgs(10))
),
MessagesReceivedSubscriber1 = lists:filter(
fun({P, _Payload}) -> P == ConnPid1 end, AllReceivedMessages
),
MessagesReceivedSubscriber2 = lists:filter(
fun({P, _Payload}) -> P == ConnPid2 end, AllReceivedMessages
),
emqtt:stop(ConnPid1),
emqtt:stop(ConnPid2),
emqtt:stop(PublisherPid),
%% ensure each subscriber received 5 messages in alternating fashion
%% one receives all even and the other all uneven payloads
?assertEqual(
[
{ConnPid1, <<"0">>},
{ConnPid1, <<"2">>},
{ConnPid1, <<"4">>},
{ConnPid1, <<"6">>},
{ConnPid1, <<"8">>}
],
MessagesReceivedSubscriber1
),
?assertEqual(
[
{ConnPid2, <<"1">>},
{ConnPid2, <<"3">>},
{ConnPid2, <<"5">>},
{ConnPid2, <<"7">>},
{ConnPid2, <<"9">>}
],
MessagesReceivedSubscriber2
),
ok.
t_round_robin_per_group_even_distribution_two_groups(_) ->
ok = ensure_config(round_robin_per_group, true),
Topic = <<"foo/bar">>,
{ok, ConnPid1} = emqtt:start_link([{clientid, <<"C0">>}]),
{ok, ConnPid2} = emqtt:start_link([{clientid, <<"C1">>}]),
{ok, ConnPid3} = emqtt:start_link([{clientid, <<"C2">>}]),
{ok, ConnPid4} = emqtt:start_link([{clientid, <<"C3">>}]),
ConnPids = [ConnPid1, ConnPid2, ConnPid3, ConnPid4],
lists:foreach(fun(P) -> emqtt:connect(P) end, ConnPids),
%% group1 subscribers
emqtt:subscribe(ConnPid1, {<<"$share/group1/", Topic/binary>>, 0}),
emqtt:subscribe(ConnPid2, {<<"$share/group1/", Topic/binary>>, 0}),
%% group2 subscribers
emqtt:subscribe(ConnPid3, {<<"$share/group2/", Topic/binary>>, 0}),
emqtt:subscribe(ConnPid4, {<<"$share/group2/", Topic/binary>>, 0}),
publish_fire_and_forget(10, Topic),
AllReceivedMessages = lists:map(
fun(#{client_pid := SubscriberPid, payload := Payload}) -> {SubscriberPid, Payload} end,
lists:reverse(recv_msgs(20))
),
MessagesReceivedSubscriber1 = lists:filter(
fun({P, _Payload}) -> P == ConnPid1 end, AllReceivedMessages
),
MessagesReceivedSubscriber2 = lists:filter(
fun({P, _Payload}) -> P == ConnPid2 end, AllReceivedMessages
),
MessagesReceivedSubscriber3 = lists:filter(
fun({P, _Payload}) -> P == ConnPid3 end, AllReceivedMessages
),
MessagesReceivedSubscriber4 = lists:filter(
fun({P, _Payload}) -> P == ConnPid4 end, AllReceivedMessages
),
lists:foreach(fun(P) -> emqtt:stop(P) end, ConnPids),
%% ensure each subscriber received 5 messages in alternating fashion in each group
%% subscriber 1 and 3 should receive all even messages
%% subscriber 2 and 4 should receive all uneven messages
?assertEqual(
[
{ConnPid3, <<"0">>},
{ConnPid3, <<"2">>},
{ConnPid3, <<"4">>},
{ConnPid3, <<"6">>},
{ConnPid3, <<"8">>}
],
MessagesReceivedSubscriber3
),
?assertEqual(
[
{ConnPid2, <<"1">>},
{ConnPid2, <<"3">>},
{ConnPid2, <<"5">>},
{ConnPid2, <<"7">>},
{ConnPid2, <<"9">>}
],
MessagesReceivedSubscriber2
),
?assertEqual(
[
{ConnPid4, <<"1">>},
{ConnPid4, <<"3">>},
{ConnPid4, <<"5">>},
{ConnPid4, <<"7">>},
{ConnPid4, <<"9">>}
],
MessagesReceivedSubscriber4
),
?assertEqual(
[
{ConnPid1, <<"0">>},
{ConnPid1, <<"2">>},
{ConnPid1, <<"4">>},
{ConnPid1, <<"6">>},
{ConnPid1, <<"8">>}
],
MessagesReceivedSubscriber1
),
ok.
t_sticky(_) -> t_sticky(_) ->
ok = ensure_config(sticky, true), ok = ensure_config(sticky, true),
test_two_messages(sticky). test_two_messages(sticky).
@ -292,7 +447,7 @@ test_two_messages(Strategy, Group) ->
emqtt:subscribe(ConnPid2, {<<"$share/", Group/binary, "/", Topic/binary>>, 0}), emqtt:subscribe(ConnPid2, {<<"$share/", Group/binary, "/", Topic/binary>>, 0}),
Message1 = emqx_message:make(ClientId1, 0, Topic, <<"hello1">>), Message1 = emqx_message:make(ClientId1, 0, Topic, <<"hello1">>),
Message2 = emqx_message:make(ClientId1, 0, Topic, <<"hello2">>), Message2 = emqx_message:make(ClientId2, 0, Topic, <<"hello2">>),
ct:sleep(100), ct:sleep(100),
emqx:publish(Message1), emqx:publish(Message1),
@ -307,6 +462,7 @@ test_two_messages(Strategy, Group) ->
case Strategy of case Strategy of
sticky -> ?assertEqual(UsedSubPid1, UsedSubPid2); sticky -> ?assertEqual(UsedSubPid1, UsedSubPid2);
round_robin -> ?assertNotEqual(UsedSubPid1, UsedSubPid2); round_robin -> ?assertNotEqual(UsedSubPid1, UsedSubPid2);
round_robin_per_group -> ?assertNotEqual(UsedSubPid1, UsedSubPid2);
hash -> ?assertEqual(UsedSubPid1, UsedSubPid2); hash -> ?assertEqual(UsedSubPid1, UsedSubPid2);
_ -> ok _ -> ok
end, end,
@ -348,7 +504,8 @@ t_per_group_config(_) ->
ok = ensure_group_config(#{ ok = ensure_group_config(#{
<<"local_group">> => local, <<"local_group">> => local,
<<"round_robin_group">> => round_robin, <<"round_robin_group">> => round_robin,
<<"sticky_group">> => sticky <<"sticky_group">> => sticky,
<<"round_robin_per_group_group">> => round_robin_per_group
}), }),
%% Each test is repeated 4 times because random strategy may technically pass the test %% Each test is repeated 4 times because random strategy may technically pass the test
%% so we run 8 tests to make random pass in only 1/256 runs %% so we run 8 tests to make random pass in only 1/256 runs
@ -360,7 +517,9 @@ t_per_group_config(_) ->
test_two_messages(sticky, <<"sticky_group">>), test_two_messages(sticky, <<"sticky_group">>),
test_two_messages(sticky, <<"sticky_group">>), test_two_messages(sticky, <<"sticky_group">>),
test_two_messages(round_robin, <<"round_robin_group">>), test_two_messages(round_robin, <<"round_robin_group">>),
test_two_messages(round_robin, <<"round_robin_group">>). test_two_messages(round_robin, <<"round_robin_group">>),
test_two_messages(round_robin_per_group, <<"round_robin_per_group_group">>),
test_two_messages(round_robin_per_group, <<"round_robin_per_group_group">>).
t_local(_) -> t_local(_) ->
GroupConfig = #{ GroupConfig = #{
@ -482,6 +641,9 @@ ensure_config(Strategy, AckEnabled) ->
emqx_config:put([broker, shared_dispatch_ack_enabled], AckEnabled), emqx_config:put([broker, shared_dispatch_ack_enabled], AckEnabled),
ok. ok.
ensure_node_config(Node, Strategy) ->
rpc:call(Node, emqx_config, force_put, [[broker, shared_subscription_strategy], Strategy]).
ensure_group_config(Group2Strategy) -> ensure_group_config(Group2Strategy) ->
lists:foreach( lists:foreach(
fun({Group, Strategy}) -> fun({Group, Strategy}) ->
@ -505,6 +667,19 @@ ensure_group_config(Node, Group2Strategy) ->
maps:to_list(Group2Strategy) maps:to_list(Group2Strategy)
). ).
publish_fire_and_forget(Count, Topic) when Count > 1 ->
lists:foreach(
fun(I) ->
Message = erlang:integer_to_binary(I),
{ok, PublisherPid} = emqtt:start_link(),
{ok, _} = emqtt:connect(PublisherPid),
emqtt:publish(PublisherPid, Topic, Message),
emqtt:stop(PublisherPid),
ct:sleep(50)
end,
lists:seq(0, Count - 1)
).
subscribed(Group, Topic, Pid) -> subscribed(Group, Topic, Pid) ->
lists:member(Pid, emqx_shared_sub:subscribers(Group, Topic)). lists:member(Pid, emqx_shared_sub:subscribers(Group, Topic)).

View File

@ -59,6 +59,7 @@ init_per_testcase(TestCase, Config) when
TestCase =/= t_ws_pingreq_before_connected, TestCase =/= t_ws_pingreq_before_connected,
TestCase =/= t_ws_non_check_origin TestCase =/= t_ws_non_check_origin
-> ->
add_bucket(),
%% Meck Cm %% Meck Cm
ok = meck:new(emqx_cm, [passthrough, no_history, no_link]), ok = meck:new(emqx_cm, [passthrough, no_history, no_link]),
ok = meck:expect(emqx_cm, mark_channel_connected, fun(_) -> ok end), ok = meck:expect(emqx_cm, mark_channel_connected, fun(_) -> ok end),
@ -96,6 +97,7 @@ init_per_testcase(TestCase, Config) when
| Config | Config
]; ];
init_per_testcase(t_ws_non_check_origin, Config) -> init_per_testcase(t_ws_non_check_origin, Config) ->
add_bucket(),
ok = emqx_common_test_helpers:start_apps([]), ok = emqx_common_test_helpers:start_apps([]),
PrevConfig = emqx_config:get_listener_conf(ws, default, [websocket]), PrevConfig = emqx_config:get_listener_conf(ws, default, [websocket]),
emqx_config:put_listener_conf(ws, default, [websocket, check_origin_enable], false), emqx_config:put_listener_conf(ws, default, [websocket, check_origin_enable], false),
@ -105,6 +107,7 @@ init_per_testcase(t_ws_non_check_origin, Config) ->
| Config | Config
]; ];
init_per_testcase(_, Config) -> init_per_testcase(_, Config) ->
add_bucket(),
PrevConfig = emqx_config:get_listener_conf(ws, default, [websocket]), PrevConfig = emqx_config:get_listener_conf(ws, default, [websocket]),
ok = emqx_common_test_helpers:start_apps([]), ok = emqx_common_test_helpers:start_apps([]),
[ [
@ -119,6 +122,7 @@ end_per_testcase(TestCase, _Config) when
TestCase =/= t_ws_non_check_origin, TestCase =/= t_ws_non_check_origin,
TestCase =/= t_ws_pingreq_before_connected TestCase =/= t_ws_pingreq_before_connected
-> ->
del_bucket(),
lists:foreach( lists:foreach(
fun meck:unload/1, fun meck:unload/1,
[ [
@ -131,11 +135,13 @@ end_per_testcase(TestCase, _Config) when
] ]
); );
end_per_testcase(t_ws_non_check_origin, Config) -> end_per_testcase(t_ws_non_check_origin, Config) ->
del_bucket(),
PrevConfig = ?config(prev_config, Config), PrevConfig = ?config(prev_config, Config),
emqx_config:put_listener_conf(ws, default, [websocket], PrevConfig), emqx_config:put_listener_conf(ws, default, [websocket], PrevConfig),
emqx_common_test_helpers:stop_apps([]), emqx_common_test_helpers:stop_apps([]),
ok; ok;
end_per_testcase(_, Config) -> end_per_testcase(_, Config) ->
del_bucket(),
PrevConfig = ?config(prev_config, Config), PrevConfig = ?config(prev_config, Config),
emqx_config:put_listener_conf(ws, default, [websocket], PrevConfig), emqx_config:put_listener_conf(ws, default, [websocket], PrevConfig),
emqx_common_test_helpers:stop_apps([]), emqx_common_test_helpers:stop_apps([]),
@ -501,15 +507,12 @@ t_handle_timeout_emit_stats(_) ->
?assertEqual(undefined, ?ws_conn:info(stats_timer, St)). ?assertEqual(undefined, ?ws_conn:info(stats_timer, St)).
t_ensure_rate_limit(_) -> t_ensure_rate_limit(_) ->
%% XXX In the future, limiter should provide API for config update
Path = [limiter, bytes_in, bucket, default, per_client],
PerClient = emqx_config:get(Path),
{ok, Rate} = emqx_limiter_schema:to_rate("50MB"), {ok, Rate} = emqx_limiter_schema:to_rate("50MB"),
emqx_config:put(Path, PerClient#{rate := Rate}), Limiter = init_limiter(#{
emqx_limiter_server:restart(bytes_in), bytes_in => bucket_cfg(),
timer:sleep(100), message_in => bucket_cfg(),
client => #{bytes_in => client_cfg(Rate)}
Limiter = init_limiter(), }),
St = st(#{limiter => Limiter}), St = st(#{limiter => Limiter}),
%% must bigger than value in emqx_ratelimit_SUITE %% must bigger than value in emqx_ratelimit_SUITE
@ -522,11 +525,7 @@ t_ensure_rate_limit(_) ->
St St
), ),
?assertEqual(blocked, ?ws_conn:info(sockstate, St1)), ?assertEqual(blocked, ?ws_conn:info(sockstate, St1)),
?assertEqual([{active, false}], ?ws_conn:info(postponed, St1)), ?assertEqual([{active, false}], ?ws_conn:info(postponed, St1)).
emqx_config:put(Path, PerClient),
emqx_limiter_server:restart(bytes_in),
timer:sleep(100).
t_parse_incoming(_) -> t_parse_incoming(_) ->
{Packets, St} = ?ws_conn:parse_incoming(<<48, 3>>, [], st()), {Packets, St} = ?ws_conn:parse_incoming(<<48, 3>>, [], st()),
@ -691,7 +690,44 @@ ws_client(State) ->
ct:fail(ws_timeout) ct:fail(ws_timeout)
end. end.
limiter_cfg() -> #{bytes_in => default, message_in => default}. -define(LIMITER_ID, 'ws:default').
init_limiter() -> init_limiter() ->
emqx_limiter_container:get_limiter_by_names([bytes_in, message_in], limiter_cfg()). init_limiter(limiter_cfg()).
init_limiter(LimiterCfg) ->
emqx_limiter_container:get_limiter_by_types(?LIMITER_ID, [bytes_in, message_in], LimiterCfg).
limiter_cfg() ->
Cfg = bucket_cfg(),
Client = client_cfg(),
#{bytes_in => Cfg, message_in => Cfg, client => #{bytes_in => Client, message_in => Client}}.
client_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(),
client_cfg(Infinity).
client_cfg(Rate) ->
Infinity = emqx_limiter_schema:infinity_value(),
#{
rate => Rate,
initial => 0,
capacity => Infinity,
low_watermark => 1,
divisible => false,
max_retry_time => timer:seconds(5),
failure_strategy => force
}.
bucket_cfg() ->
Infinity = emqx_limiter_schema:infinity_value(),
#{rate => Infinity, initial => 0, capacity => Infinity}.
add_bucket() ->
Cfg = bucket_cfg(),
emqx_limiter_server:add_bucket(?LIMITER_ID, bytes_in, Cfg),
emqx_limiter_server:add_bucket(?LIMITER_ID, message_in, Cfg).
del_bucket() ->
emqx_limiter_server:del_bucket(?LIMITER_ID, bytes_in),
emqx_limiter_server:del_bucket(?LIMITER_ID, message_in).

View File

@ -38,4 +38,8 @@
-define(RESOURCE_GROUP, <<"emqx_authn">>). -define(RESOURCE_GROUP, <<"emqx_authn">>).
-define(WITH_SUCCESSFUL_RENDER(Code),
emqx_authn_utils:with_successful_render(?MODULE, fun() -> Code end)
).
-endif. -endif.

View File

@ -34,7 +34,8 @@
ensure_apps_started/1, ensure_apps_started/1,
cleanup_resources/0, cleanup_resources/0,
make_resource_id/1, make_resource_id/1,
without_password/1 without_password/1,
with_successful_render/2
]). ]).
-define(AUTHN_PLACEHOLDERS, [ -define(AUTHN_PLACEHOLDERS, [
@ -135,6 +136,18 @@ render_sql_params(ParamList, Credential) ->
#{return => rawlist, var_trans => fun handle_sql_var/2} #{return => rawlist, var_trans => fun handle_sql_var/2}
). ).
with_successful_render(Provider, Fun) when is_function(Fun, 0) ->
try
Fun()
catch
error:{cannot_get_variable, Name} ->
?TRACE_AUTHN(error, "placeholder_interpolation_failed", #{
provider => Provider,
placeholder => Name
}),
ignore
end.
%% true %% true
is_superuser(#{<<"is_superuser">> := <<"true">>}) -> is_superuser(#{<<"is_superuser">> := <<"true">>}) ->
#{is_superuser => true}; #{is_superuser => true};

View File

@ -187,6 +187,8 @@ authenticate(
request_timeout := RequestTimeout request_timeout := RequestTimeout
} = State } = State
) -> ) ->
?WITH_SUCCESSFUL_RENDER(
begin
Request = generate_request(Credential, State), Request = generate_request(Credential, State),
Response = emqx_resource:query(ResourceId, {Method, Request, RequestTimeout}), Response = emqx_resource:query(ResourceId, {Method, Request, RequestTimeout}),
?TRACE_AUTHN_PROVIDER("http_response", #{ ?TRACE_AUTHN_PROVIDER("http_response", #{
@ -205,7 +207,9 @@ authenticate(
ignore; ignore;
{error, _Reason} -> {error, _Reason} ->
ignore ignore
end. end
end
).
destroy(#{resource_id := ResourceId}) -> destroy(#{resource_id := ResourceId}) ->
_ = emqx_resource:remove_local(ResourceId), _ = emqx_resource:remove_local(ResourceId),

View File

@ -162,6 +162,8 @@ authenticate(
resource_id := ResourceId resource_id := ResourceId
} = State } = State
) -> ) ->
?WITH_SUCCESSFUL_RENDER(
begin
Filter = emqx_authn_utils:render_deep(FilterTemplate, Credential), Filter = emqx_authn_utils:render_deep(FilterTemplate, Credential),
case emqx_resource:query(ResourceId, {find_one, Collection, Filter, #{}}) of case emqx_resource:query(ResourceId, {find_one, Collection, Filter, #{}}) of
{ok, undefined} -> {ok, undefined} ->
@ -190,7 +192,9 @@ authenticate(
{error, Reason} -> {error, Reason} ->
{error, Reason} {error, Reason}
end end
end. end
end
).
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
%% Internal functions %% Internal functions

View File

@ -113,6 +113,8 @@ authenticate(
password_hash_algorithm := Algorithm password_hash_algorithm := Algorithm
} }
) -> ) ->
?WITH_SUCCESSFUL_RENDER(
begin
Params = emqx_authn_utils:render_sql_params(TmplToken, Credential), Params = emqx_authn_utils:render_sql_params(TmplToken, Credential),
case emqx_resource:query(ResourceId, {prepared_query, ?PREPARE_KEY, Params, Timeout}) of case emqx_resource:query(ResourceId, {prepared_query, ?PREPARE_KEY, Params, Timeout}) of
{ok, _Columns, []} -> {ok, _Columns, []} ->
@ -138,7 +140,9 @@ authenticate(
reason => Reason reason => Reason
}), }),
ignore ignore
end. end
end
).
parse_config( parse_config(
#{ #{

View File

@ -115,6 +115,8 @@ authenticate(
password_hash_algorithm := Algorithm password_hash_algorithm := Algorithm
} }
) -> ) ->
?WITH_SUCCESSFUL_RENDER(
begin
Params = emqx_authn_utils:render_sql_params(PlaceHolders, Credential), Params = emqx_authn_utils:render_sql_params(PlaceHolders, Credential),
case emqx_resource:query(ResourceId, {prepared_query, ResourceId, Params}) of case emqx_resource:query(ResourceId, {prepared_query, ResourceId, Params}) of
{ok, _Columns, []} -> {ok, _Columns, []} ->
@ -139,7 +141,9 @@ authenticate(
reason => Reason reason => Reason
}), }),
ignore ignore
end. end
end
).
parse_config( parse_config(
#{ #{

View File

@ -133,6 +133,8 @@ authenticate(
password_hash_algorithm := Algorithm password_hash_algorithm := Algorithm
} }
) -> ) ->
?WITH_SUCCESSFUL_RENDER(
begin
NKey = emqx_authn_utils:render_str(KeyTemplate, Credential), NKey = emqx_authn_utils:render_str(KeyTemplate, Credential),
Command = [CommandName, NKey | Fields], Command = [CommandName, NKey | Fields],
case emqx_resource:query(ResourceId, {cmd, Command}) of case emqx_resource:query(ResourceId, {cmd, Command}) of
@ -159,7 +161,9 @@ authenticate(
reason => Reason reason => Reason
}), }),
ignore ignore
end. end
end
).
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
%% Internal functions %% Internal functions

View File

@ -247,6 +247,27 @@ t_update(_Config) ->
emqx_access_control:authenticate(?CREDENTIALS) emqx_access_control:authenticate(?CREDENTIALS)
). ).
t_interpolation_error(_Config) ->
{ok, _} = emqx:update_config(
?PATH,
{create_authenticator, ?GLOBAL, raw_http_auth_config()}
),
Headers = #{<<"content-type">> => <<"application/json">>},
Response = ?SERVER_RESPONSE_JSON(allow),
ok = emqx_authn_http_test_server:set_handler(
fun(Req0, State) ->
Req = cowboy_req:reply(200, Headers, Response, Req0),
{ok, Req, State}
end
),
?assertMatch(
?EXCEPTION_DENY,
emqx_access_control:authenticate(maps:without([username], ?CREDENTIALS))
).
t_is_superuser(_Config) -> t_is_superuser(_Config) ->
Config = raw_http_auth_config(), Config = raw_http_auth_config(),
{ok, _} = emqx:update_config( {ok, _} = emqx:update_config(
@ -410,6 +431,26 @@ samples() ->
result => {ok, #{is_superuser => false, user_property => #{}}} result => {ok, #{is_superuser => false, user_property => #{}}}
}, },
%% simple get request, no username
#{
handler => fun(Req0, State) ->
#{
username := <<"plain">>,
password := <<"plain">>
} = cowboy_req:match_qs([username, password], Req0),
Req = cowboy_req:reply(
200,
#{<<"content-type">> => <<"application/json">>},
jiffy:encode(#{result => allow, is_superuser => false}),
Req0
),
{ok, Req, State}
end,
config_params => #{},
result => {ok, #{is_superuser => false, user_property => #{}}}
},
%% get request with json body response %% get request with json body response
#{ #{
handler => fun(Req0, State) -> handler => fun(Req0, State) ->

View File

@ -288,6 +288,20 @@ raw_mongo_auth_config() ->
user_seeds() -> user_seeds() ->
[ [
#{
data => #{
username => <<"plain">>,
password_hash => <<"plainsalt">>,
salt => <<"salt">>,
is_superuser => <<"1">>
},
credentials => #{
password => <<"plain">>
},
config_params => #{},
result => {error, not_authorized}
},
#{ #{
data => #{ data => #{
username => <<"plain">>, username => <<"plain">>,

View File

@ -258,6 +258,20 @@ raw_mysql_auth_config() ->
user_seeds() -> user_seeds() ->
[ [
#{
data => #{
username => "plain",
password_hash => "plainsalt",
salt => "salt",
is_superuser_str => "1"
},
credentials => #{
password => <<"plain">>
},
config_params => #{},
result => {error, not_authorized}
},
#{ #{
data => #{ data => #{
username => "plain", username => "plain",

View File

@ -320,6 +320,20 @@ raw_pgsql_auth_config() ->
user_seeds() -> user_seeds() ->
[ [
#{
data => #{
username => "plain",
password_hash => "plainsalt",
salt => "salt",
is_superuser_str => "1"
},
credentials => #{
password => <<"plain">>
},
config_params => #{},
result => {error, not_authorized}
},
#{ #{
data => #{ data => #{
username => "plain", username => "plain",

View File

@ -280,6 +280,20 @@ raw_redis_auth_config() ->
user_seeds() -> user_seeds() ->
[ [
#{
data => #{
password_hash => <<"plainsalt">>,
salt => <<"salt">>,
is_superuser => <<"1">>
},
credentials => #{
password => <<"plain">>
},
key => <<"mqtt_user:plain">>,
config_params => #{},
result => {error, not_authorized}
},
#{ #{
data => #{ data => #{
password_hash => <<"plainsalt">>, password_hash => <<"plainsalt">>,

View File

@ -402,6 +402,14 @@ do_authorize(
Matched -> Matched ->
{Matched, Type} {Matched, Type}
catch catch
error:{cannot_get_variable, Name} ->
emqx_metrics_worker:inc(authz_metrics, Type, nomatch),
?SLOG(warning, #{
msg => "placeholder_interpolation_failed",
placeholder => Name,
authorize_type => Type
}),
do_authorize(Client, PubSub, Topic, Tail);
Class:Reason:Stacktrace -> Class:Reason:Stacktrace ->
emqx_metrics_worker:inc(authz_metrics, Type, nomatch), emqx_metrics_worker:inc(authz_metrics, Type, nomatch),
?SLOG(warning, #{ ?SLOG(warning, #{

View File

@ -180,15 +180,15 @@ convert_client_var({dn, DN}) -> {cert_subject, DN};
convert_client_var({protocol, Proto}) -> {proto_name, Proto}; convert_client_var({protocol, Proto}) -> {proto_name, Proto};
convert_client_var(Other) -> Other. convert_client_var(Other) -> Other.
handle_var({var, _Name}, undefined) -> handle_var({var, Name}, undefined) ->
"undefined"; error({cannot_get_variable, Name});
handle_var({var, <<"peerhost">>}, IpAddr) -> handle_var({var, <<"peerhost">>}, IpAddr) ->
inet_parse:ntoa(IpAddr); inet_parse:ntoa(IpAddr);
handle_var(_Name, Value) -> handle_var(_Name, Value) ->
emqx_placeholder:bin(Value). emqx_placeholder:bin(Value).
handle_sql_var({var, _Name}, undefined) -> handle_sql_var({var, Name}, undefined) ->
"undefined"; error({cannot_get_variable, Name});
handle_sql_var({var, <<"peerhost">>}, IpAddr) -> handle_sql_var({var, <<"peerhost">>}, IpAddr) ->
inet_parse:ntoa(IpAddr); inet_parse:ntoa(IpAddr);
handle_sql_var(_Name, Value) -> handle_sql_var(_Name, Value) ->

View File

@ -51,7 +51,9 @@ basic_config() ->
} }
)} )}
] ++ webhook_creation_opts() ++ ] ++ webhook_creation_opts() ++
proplists:delete(base_url, emqx_connector_http:fields(config)). proplists:delete(
max_retries, proplists:delete(base_url, emqx_connector_http:fields(config))
).
request_config() -> request_config() ->
[ [

View File

@ -90,6 +90,16 @@ fields(config) ->
desc => ?DESC("connect_timeout") desc => ?DESC("connect_timeout")
} }
)}, )},
{max_retries,
sc(
non_neg_integer(),
#{deprecated => {since, "5.0.4"}}
)},
{retry_interval,
sc(
emqx_schema:duration(),
#{deprecated => {since, "5.0.4"}}
)},
{pool_type, {pool_type,
sc( sc(
pool_type(), pool_type(),

View File

@ -142,11 +142,14 @@ from_binary(Bin) -> binary_to_term(Bin).
%% @doc Estimate the size of a message. %% @doc Estimate the size of a message.
%% Count only the topic length + payload size %% Count only the topic length + payload size
%% There is no topic and payload for event message. So count all `Msg` term
-spec estimate_size(msg()) -> integer(). -spec estimate_size(msg()) -> integer().
estimate_size(#message{topic = Topic, payload = Payload}) -> estimate_size(#message{topic = Topic, payload = Payload}) ->
size(Topic) + size(Payload); size(Topic) + size(Payload);
estimate_size(#{topic := Topic, payload := Payload}) -> estimate_size(#{topic := Topic, payload := Payload}) ->
size(Topic) + size(Payload). size(Topic) + size(Payload);
estimate_size(Term) ->
erlang:external_size(Term).
set_headers(undefined, Msg) -> set_headers(undefined, Msg) ->
Msg; Msg;

View File

@ -197,4 +197,25 @@ its own from which a browser should permit loading resources."""
zh: "多语言支持" zh: "多语言支持"
} }
} }
bootstrap_user {
desc {
en: "Initialize users file."
zh: "初始化用户文件"
}
label {
en: """Is used to add an administrative user to Dashboard when emqx is first launched,
the format is:
```
username1:password1
username2:password2
```
"""
zh: """用于在首次启动 emqx 时,为 Dashboard 添加管理用户,其格式为:
```
username1:password1
username2:password2
```
"""
}
}
} }

View File

@ -73,6 +73,7 @@ start_listeners(Listeners) ->
Dispatch = [ Dispatch = [
{"/", cowboy_static, {priv_file, emqx_dashboard, "www/index.html"}}, {"/", cowboy_static, {priv_file, emqx_dashboard, "www/index.html"}},
{"/static/[...]", cowboy_static, {priv_dir, emqx_dashboard, "www/static"}}, {"/static/[...]", cowboy_static, {priv_dir, emqx_dashboard, "www/static"}},
{emqx_mgmt_api_status:path(), emqx_mgmt_api_status, []},
{?BASE_PATH ++ "/[...]", emqx_dashboard_bad_api, []}, {?BASE_PATH ++ "/[...]", emqx_dashboard_bad_api, []},
{'_', cowboy_static, {priv_file, emqx_dashboard, "www/index.html"}} {'_', cowboy_static, {priv_file, emqx_dashboard, "www/index.html"}}
], ],

View File

@ -19,6 +19,7 @@
-module(emqx_dashboard_admin). -module(emqx_dashboard_admin).
-include("emqx_dashboard.hrl"). -include("emqx_dashboard.hrl").
-include_lib("emqx/include/logger.hrl").
-include_lib("stdlib/include/ms_transform.hrl"). -include_lib("stdlib/include/ms_transform.hrl").
-boot_mnesia({mnesia, [boot]}). -boot_mnesia({mnesia, [boot]}).
@ -50,10 +51,12 @@
-export([ -export([
add_default_user/0, add_default_user/0,
default_username/0 default_username/0,
add_bootstrap_user/0
]). ]).
-type emqx_admin() :: #?ADMIN{}. -type emqx_admin() :: #?ADMIN{}.
-define(BOOTSTRAP_USER_TAG, <<"bootstrap user">>).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Mnesia bootstrap %% Mnesia bootstrap
@ -74,6 +77,29 @@ mnesia(boot) ->
]} ]}
]). ]).
%%--------------------------------------------------------------------
%% bootstrap API
%%--------------------------------------------------------------------
-spec add_default_user() -> {ok, map() | empty | default_user_exists} | {error, any()}.
add_default_user() ->
add_default_user(binenv(default_username), binenv(default_password)).
-spec add_bootstrap_user() -> ok | {error, _}.
add_bootstrap_user() ->
case emqx:get_config([dashboard, bootstrap_user], undefined) of
undefined ->
ok;
File ->
case mnesia:table_info(?ADMIN, size) of
0 ->
?SLOG(debug, #{msg => "Add dashboard bootstrap users", file => File}),
add_bootstrap_user(File);
_ ->
ok
end
end.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% API %% API
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -272,11 +298,6 @@ destroy_token_by_username(Username, Token) ->
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Internal functions %% Internal functions
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-spec add_default_user() -> {ok, map() | empty | default_user_exists} | {error, any()}.
add_default_user() ->
add_default_user(binenv(default_username), binenv(default_password)).
default_username() -> default_username() ->
binenv(default_username). binenv(default_username).
@ -290,3 +311,39 @@ add_default_user(Username, Password) ->
[] -> add_user(Username, Password, <<"administrator">>); [] -> add_user(Username, Password, <<"administrator">>);
_ -> {ok, default_user_exists} _ -> {ok, default_user_exists}
end. end.
add_bootstrap_user(File) ->
case file:open(File, [read]) of
{ok, Dev} ->
{ok, MP} = re:compile(<<"(\.+):(\.+$)">>, [ungreedy]),
try
load_bootstrap_user(Dev, MP)
catch
Type:Reason ->
{error, {Type, Reason}}
after
file:close(Dev)
end;
Error ->
Error
end.
load_bootstrap_user(Dev, MP) ->
case file:read_line(Dev) of
{ok, Line} ->
case re:run(Line, MP, [global, {capture, all_but_first, binary}]) of
{match, [[Username, Password]]} ->
case add_user(Username, Password, ?BOOTSTRAP_USER_TAG) of
{ok, _} ->
load_bootstrap_user(Dev, MP);
Error ->
Error
end;
_ ->
load_bootstrap_user(Dev, MP)
end;
eof ->
ok;
Error ->
Error
end.

View File

@ -31,8 +31,13 @@ start(_StartType, _StartArgs) ->
case emqx_dashboard:start_listeners() of case emqx_dashboard:start_listeners() of
ok -> ok ->
emqx_dashboard_cli:load(), emqx_dashboard_cli:load(),
case emqx_dashboard_admin:add_bootstrap_user() of
ok ->
{ok, _} = emqx_dashboard_admin:add_default_user(), {ok, _} = emqx_dashboard_admin:add_default_user(),
{ok, Sup}; {ok, Sup};
Error ->
Error
end;
{error, Reason} -> {error, Reason} ->
{error, Reason} {error, Reason}
end. end.

View File

@ -38,7 +38,12 @@
]). ]).
is_ready(Timeout) -> is_ready(Timeout) ->
ready =:= gen_server:call(?MODULE, is_ready, Timeout). try
ready =:= gen_server:call(?MODULE, is_ready, Timeout)
catch
exit:{timeout, _} ->
false
end.
start_link() -> start_link() ->
gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). gen_server:start_link({local, ?MODULE}, ?MODULE, [], []).

View File

@ -43,5 +43,6 @@ check_dispatch_ready(Env) ->
true; true;
true -> true ->
%% dashboard should always ready, if not, is_ready/1 will block until ready. %% dashboard should always ready, if not, is_ready/1 will block until ready.
emqx_dashboard_listener:is_ready(timer:seconds(15)) %% if not ready, dashboard will return 503.
emqx_dashboard_listener:is_ready(timer:seconds(20))
end. end.

View File

@ -54,7 +54,8 @@ fields("dashboard") ->
} }
)}, )},
{cors, fun cors/1}, {cors, fun cors/1},
{i18n_lang, fun i18n_lang/1} {i18n_lang, fun i18n_lang/1},
{bootstrap_user, ?HOCON(binary(), #{desc => ?DESC(bootstrap_user), required => false})}
]; ];
fields("listeners") -> fields("listeners") ->
[ [

View File

@ -784,6 +784,8 @@ to_bin(List) when is_list(List) ->
end; end;
to_bin(Boolean) when is_boolean(Boolean) -> Boolean; to_bin(Boolean) when is_boolean(Boolean) -> Boolean;
to_bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8); to_bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8);
to_bin({Type, Args}) ->
unicode:characters_to_binary(io_lib:format("~p(~p)", [Type, Args]));
to_bin(X) -> to_bin(X) ->
X. X.

View File

@ -1,7 +1,7 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, emqx_gateway, [ {application, emqx_gateway, [
{description, "The Gateway management application"}, {description, "The Gateway management application"},
{vsn, "0.1.2"}, {vsn, "0.1.3"},
{registered, []}, {registered, []},
{mod, {emqx_gateway_app, []}}, {mod, {emqx_gateway_app, []}},
{applications, [kernel, stdlib, grpc, emqx, emqx_authn]}, {applications, [kernel, stdlib, grpc, emqx, emqx_authn]},

View File

@ -221,14 +221,16 @@ read_resp_to_mqtt({ok, SuccessCode}, CoapPayload, Format, Ref) ->
Result = content_to_mqtt(CoapPayload, Format, Ref), Result = content_to_mqtt(CoapPayload, Format, Ref),
make_response(SuccessCode, Ref, Format, Result) make_response(SuccessCode, Ref, Format, Result)
catch catch
error:not_implemented -> throw:{bad_request, Reason} ->
make_response(not_implemented, Ref); ?SLOG(error, #{msg => "bad_request", payload => CoapPayload, reason => Reason}),
_:Ex:_ST -> make_response(bad_request, Ref);
E:R:ST ->
?SLOG(error, #{ ?SLOG(error, #{
msg => "bad_payload_format", msg => "bad_request",
payload => CoapPayload, payload => CoapPayload,
reason => Ex, exception => E,
stacktrace => _ST reason => R,
stacktrace => ST
}), }),
make_response(bad_request, Ref) make_response(bad_request, Ref)
end. end.

View File

@ -29,7 +29,7 @@
tlv_to_json(BaseName, TlvData) -> tlv_to_json(BaseName, TlvData) ->
DecodedTlv = emqx_lwm2m_tlv:parse(TlvData), DecodedTlv = emqx_lwm2m_tlv:parse(TlvData),
ObjectId = object_id(BaseName), ObjectId = object_id(BaseName),
ObjDefinition = emqx_lwm2m_xml_object:get_obj_def(ObjectId, true), ObjDefinition = emqx_lwm2m_xml_object:get_obj_def_assertive(ObjectId, true),
case DecodedTlv of case DecodedTlv of
[#{tlv_resource_with_value := Id, value := Value}] -> [#{tlv_resource_with_value := Id, value := Value}] ->
TrueBaseName = basename(BaseName, undefined, undefined, Id, 3), TrueBaseName = basename(BaseName, undefined, undefined, Id, 3),
@ -318,7 +318,7 @@ path([H | T], Acc) ->
text_to_json(BaseName, Text) -> text_to_json(BaseName, Text) ->
{ObjectId, ResourceId} = object_resource_id(BaseName), {ObjectId, ResourceId} = object_resource_id(BaseName),
ObjDefinition = emqx_lwm2m_xml_object:get_obj_def(ObjectId, true), ObjDefinition = emqx_lwm2m_xml_object:get_obj_def_assertive(ObjectId, true),
Val = text_value(Text, ResourceId, ObjDefinition), Val = text_value(Text, ResourceId, ObjDefinition),
[#{path => BaseName, value => Val}]. [#{path => BaseName, value => Val}].

View File

@ -21,6 +21,7 @@
-export([ -export([
get_obj_def/2, get_obj_def/2,
get_obj_def_assertive/2,
get_object_id/1, get_object_id/1,
get_object_name/1, get_object_name/1,
get_object_and_resource_id/2, get_object_and_resource_id/2,
@ -29,7 +30,13 @@
get_resource_operations/2 get_resource_operations/2
]). ]).
% This module is for future use. Disabled now. get_obj_def_assertive(ObjectId, IsInt) ->
case get_obj_def(ObjectId, IsInt) of
{error, no_xml_definition} ->
erlang:throw({bad_request, {unknown_object_id, ObjectId}});
Xml ->
Xml
end.
get_obj_def(ObjectIdInt, true) -> get_obj_def(ObjectIdInt, true) ->
emqx_lwm2m_xml_object_db:find_objectid(ObjectIdInt); emqx_lwm2m_xml_object_db:find_objectid(ObjectIdInt);

View File

@ -76,12 +76,9 @@ find_name(Name) ->
end, end,
case ets:lookup(?LWM2M_OBJECT_NAME_TO_ID_TAB, NameBinary) of case ets:lookup(?LWM2M_OBJECT_NAME_TO_ID_TAB, NameBinary) of
[] -> [] ->
undefined; {error, no_xml_definition};
[{NameBinary, ObjectId}] -> [{NameBinary, ObjectId}] ->
case ets:lookup(?LWM2M_OBJECT_DEF_TAB, ObjectId) of find_objectid(ObjectId)
[] -> undefined;
[{ObjectId, Xml}] -> Xml
end
end. end.
stop() -> stop() ->

View File

@ -239,6 +239,7 @@ t_gateway_exproto_with_ssl(_) ->
t_authn(_) -> t_authn(_) ->
GwConf = #{name => <<"stomp">>}, GwConf = #{name => <<"stomp">>},
{201, _} = request(post, "/gateway", GwConf), {201, _} = request(post, "/gateway", GwConf),
ct:sleep(500),
{204, _} = request(get, "/gateway/stomp/authentication"), {204, _} = request(get, "/gateway/stomp/authentication"),
AuthConf = #{ AuthConf = #{
@ -263,6 +264,7 @@ t_authn(_) ->
t_authn_data_mgmt(_) -> t_authn_data_mgmt(_) ->
GwConf = #{name => <<"stomp">>}, GwConf = #{name => <<"stomp">>},
{201, _} = request(post, "/gateway", GwConf), {201, _} = request(post, "/gateway", GwConf),
ct:sleep(500),
{204, _} = request(get, "/gateway/stomp/authentication"), {204, _} = request(get, "/gateway/stomp/authentication"),
AuthConf = #{ AuthConf = #{
@ -271,6 +273,7 @@ t_authn_data_mgmt(_) ->
user_id_type => <<"clientid">> user_id_type => <<"clientid">>
}, },
{201, _} = request(post, "/gateway/stomp/authentication", AuthConf), {201, _} = request(post, "/gateway/stomp/authentication", AuthConf),
ct:sleep(500),
{200, ConfResp} = request(get, "/gateway/stomp/authentication"), {200, ConfResp} = request(get, "/gateway/stomp/authentication"),
assert_confs(AuthConf, ConfResp), assert_confs(AuthConf, ConfResp),
@ -374,6 +377,7 @@ t_listeners_authn(_) ->
] ]
}, },
{201, _} = request(post, "/gateway", GwConf), {201, _} = request(post, "/gateway", GwConf),
ct:sleep(500),
{200, ConfResp} = request(get, "/gateway/stomp"), {200, ConfResp} = request(get, "/gateway/stomp"),
assert_confs(GwConf, ConfResp), assert_confs(GwConf, ConfResp),

View File

@ -143,7 +143,9 @@ on_start_auth(authn_http) ->
Setup = fun(Gateway) -> Setup = fun(Gateway) ->
Path = io_lib:format("/gateway/~ts/authentication", [Gateway]), Path = io_lib:format("/gateway/~ts/authentication", [Gateway]),
{204, _} = request(delete, Path), {204, _} = request(delete, Path),
{201, _} = request(post, Path, http_authn_config()) timer:sleep(200),
{201, _} = request(post, Path, http_authn_config()),
timer:sleep(200)
end, end,
lists:foreach(Setup, ?GATEWAYS), lists:foreach(Setup, ?GATEWAYS),

View File

@ -47,6 +47,7 @@ end_per_suite(_Conf) ->
init_per_testcase(_CaseName, Conf) -> init_per_testcase(_CaseName, Conf) ->
_ = emqx_gateway_conf:unload_gateway(stomp), _ = emqx_gateway_conf:unload_gateway(stomp),
ct:sleep(500),
Conf. Conf.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -282,6 +283,7 @@ t_load_remove_authn(_) ->
{ok, _} = emqx_gateway_conf:load_gateway(<<"stomp">>, StompConf), {ok, _} = emqx_gateway_conf:load_gateway(<<"stomp">>, StompConf),
assert_confs(StompConf, emqx:get_raw_config([gateway, stomp])), assert_confs(StompConf, emqx:get_raw_config([gateway, stomp])),
ct:sleep(500),
{ok, _} = emqx_gateway_conf:add_authn(<<"stomp">>, ?CONF_STOMP_AUTHN_1), {ok, _} = emqx_gateway_conf:add_authn(<<"stomp">>, ?CONF_STOMP_AUTHN_1),
assert_confs( assert_confs(
@ -314,6 +316,7 @@ t_load_remove_listeners(_) ->
{ok, _} = emqx_gateway_conf:load_gateway(<<"stomp">>, StompConf), {ok, _} = emqx_gateway_conf:load_gateway(<<"stomp">>, StompConf),
assert_confs(StompConf, emqx:get_raw_config([gateway, stomp])), assert_confs(StompConf, emqx:get_raw_config([gateway, stomp])),
ct:sleep(500),
{ok, _} = emqx_gateway_conf:add_listener( {ok, _} = emqx_gateway_conf:add_listener(
<<"stomp">>, <<"stomp">>,
@ -371,6 +374,7 @@ t_load_remove_listener_authn(_) ->
{ok, _} = emqx_gateway_conf:load_gateway(<<"stomp">>, StompConf), {ok, _} = emqx_gateway_conf:load_gateway(<<"stomp">>, StompConf),
assert_confs(StompConf, emqx:get_raw_config([gateway, stomp])), assert_confs(StompConf, emqx:get_raw_config([gateway, stomp])),
ct:sleep(500),
{ok, _} = emqx_gateway_conf:add_authn( {ok, _} = emqx_gateway_conf:add_authn(
<<"stomp">>, {<<"tcp">>, <<"default">>}, ?CONF_STOMP_AUTHN_1 <<"stomp">>, {<<"tcp">>, <<"default">>}, ?CONF_STOMP_AUTHN_1

View File

@ -850,6 +850,75 @@ case10_read(Config) ->
), ),
?assertEqual(ReadResult, test_recv_mqtt_response(RespTopic)). ?assertEqual(ReadResult, test_recv_mqtt_response(RespTopic)).
case10_read_bad_request(Config) ->
UdpSock = ?config(sock, Config),
Epn = "urn:oma:lwm2m:oma:3",
MsgId1 = 15,
RespTopic = list_to_binary("lwm2m/" ++ Epn ++ "/up/resp"),
emqtt:subscribe(?config(emqx_c, Config), RespTopic, qos0),
timer:sleep(200),
% step 1, device register ...
test_send_coap_request(
UdpSock,
post,
sprintf("coap://127.0.0.1:~b/rd?ep=~s&lt=345&lwm2m=1", [?PORT, Epn]),
#coap_content{
content_format = <<"text/plain">>,
payload =
<<"</lwm2m>;rt=\"oma.lwm2m\";ct=11543,</lwm2m/1/0>,</lwm2m/2/0>,</lwm2m/3/0>">>
},
[],
MsgId1
),
#coap_message{method = Method1} = test_recv_coap_response(UdpSock),
?assertEqual({ok, created}, Method1),
test_recv_mqtt_response(RespTopic),
% step2, send a READ command to device
CmdId = 206,
CommandTopic = <<"lwm2m/", (list_to_binary(Epn))/binary, "/dn/dm">>,
Command = #{
<<"requestID">> => CmdId,
<<"cacheID">> => CmdId,
<<"msgType">> => <<"read">>,
<<"data">> => #{
<<"path">> => <<"/3333/0/0">>
}
},
CommandJson = emqx_json:encode(Command),
?LOGT("CommandJson=~p", [CommandJson]),
test_mqtt_broker:publish(CommandTopic, CommandJson, 0),
timer:sleep(50),
Request2 = test_recv_coap_request(UdpSock),
#coap_message{method = Method2, payload = Payload2} = Request2,
?LOGT("LwM2M client got ~p", [Request2]),
?assertEqual(get, Method2),
?assertEqual(<<>>, Payload2),
timer:sleep(50),
test_send_coap_response(
UdpSock,
"127.0.0.1",
?PORT,
{ok, content},
#coap_content{content_format = <<"text/plain">>, payload = <<"EMQ">>},
Request2,
true
),
timer:sleep(100),
ReadResult = emqx_json:encode(#{
<<"requestID">> => CmdId,
<<"cacheID">> => CmdId,
<<"msgType">> => <<"read">>,
<<"data">> => #{
<<"code">> => <<"4.00">>,
<<"codeMsg">> => <<"bad_request">>,
<<"reqPath">> => <<"/3333/0/0">>
}
}),
?assertEqual(ReadResult, test_recv_mqtt_response(RespTopic)).
case10_read_separate_ack(Config) -> case10_read_separate_ack(Config) ->
UdpSock = ?config(sock, Config), UdpSock = ?config(sock, Config),
Epn = "urn:oma:lwm2m:oma:3", Epn = "urn:oma:lwm2m:oma:3",

View File

@ -2,7 +2,7 @@
{application, emqx_management, [ {application, emqx_management, [
{description, "EMQX Management API and CLI"}, {description, "EMQX Management API and CLI"},
% strict semver, bump manually! % strict semver, bump manually!
{vsn, "5.0.2"}, {vsn, "5.0.3"},
{modules, []}, {modules, []},
{registered, [emqx_management_sup]}, {registered, [emqx_management_sup]},
{applications, [kernel, stdlib, emqx_plugins, minirest, emqx]}, {applications, [kernel, stdlib, emqx_plugins, minirest, emqx]},

View File

@ -138,7 +138,7 @@ node_info() ->
max_fds, lists:usort(lists:flatten(erlang:system_info(check_io))) max_fds, lists:usort(lists:flatten(erlang:system_info(check_io)))
), ),
connections => ets:info(emqx_channel, size), connections => ets:info(emqx_channel, size),
node_status => 'Running', node_status => 'running',
uptime => proplists:get_value(uptime, BrokerInfo), uptime => proplists:get_value(uptime, BrokerInfo),
version => iolist_to_binary(proplists:get_value(version, BrokerInfo)), version => iolist_to_binary(proplists:get_value(version, BrokerInfo)),
role => mria_rlog:role() role => mria_rlog:role()
@ -156,7 +156,7 @@ node_info(Node) ->
wrap_rpc(emqx_management_proto_v2:node_info(Node)). wrap_rpc(emqx_management_proto_v2:node_info(Node)).
stopped_node_info(Node) -> stopped_node_info(Node) ->
#{name => Node, node_status => 'Stopped'}. #{name => Node, node_status => 'stopped'}.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Brokers %% Brokers

View File

@ -189,8 +189,8 @@ fields(node_info) ->
)}, )},
{node_status, {node_status,
mk( mk(
enum(['Running', 'Stopped']), enum(['running', 'stopped']),
#{desc => <<"Node status">>, example => "Running"} #{desc => <<"Node status">>, example => "running"}
)}, )},
{otp_release, {otp_release,
mk( mk(
@ -288,19 +288,18 @@ get_stats(Node) ->
%% internal function %% internal function
format(_Node, Info = #{memory_total := Total, memory_used := Used}) -> format(_Node, Info = #{memory_total := Total, memory_used := Used}) ->
{ok, SysPathBinary} = file:get_cwd(), RootDir = list_to_binary(code:root_dir()),
SysPath = list_to_binary(SysPathBinary),
LogPath = LogPath =
case log_path() of case log_path() of
undefined -> undefined ->
<<"log.file_handler.default.enable is false,only log to console">>; <<"log.file_handler.default.enable is false,only log to console">>;
Path -> Path ->
filename:join(SysPath, Path) filename:join(RootDir, Path)
end, end,
Info#{ Info#{
memory_total := emqx_mgmt_util:kmg(Total), memory_total := emqx_mgmt_util:kmg(Total),
memory_used := emqx_mgmt_util:kmg(Used), memory_used := emqx_mgmt_util:kmg(Used),
sys_path => SysPath, sys_path => RootDir,
log_path => LogPath log_path => LogPath
}. }.

View File

@ -14,55 +14,25 @@
%% limitations under the License. %% limitations under the License.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-module(emqx_mgmt_api_status). -module(emqx_mgmt_api_status).
%% API
-behaviour(minirest_api).
-export([ -export([
api_spec/0, init/2,
paths/0, path/0
schema/1
]). ]).
-export([running_status/2]). path() ->
"/status".
api_spec() -> init(Req0, State) ->
emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}). {Code, Headers, Body} = running_status(),
Req = cowboy_req:reply(Code, Headers, Body, Req0),
paths() -> {ok, Req, State}.
["/status"].
schema("/status") ->
#{
'operationId' => running_status,
get =>
#{
description => <<"Node running status">>,
tags => [<<"Status">>],
security => [],
responses =>
#{
200 =>
#{
description => <<"Node is running">>,
content =>
#{
'text/plain' =>
#{
schema => #{type => string},
example =>
<<"Node emqx@127.0.0.1 is started\nemqx is running">>
}
}
}
}
}
}.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% API Handler funcs %% API Handler funcs
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
running_status(get, _Params) -> running_status() ->
BrokerStatus = BrokerStatus =
case emqx:is_running() of case emqx:is_running() of
true -> true ->

View File

@ -31,7 +31,7 @@ end_per_suite(_) ->
emqx_mgmt_api_test_util:end_suite(). emqx_mgmt_api_test_util:end_suite().
t_status(_Config) -> t_status(_Config) ->
Path = emqx_mgmt_api_test_util:api_path(["status"]), Path = emqx_mgmt_api_test_util:api_path_without_base_path(["/status"]),
Status = io_lib:format("Node ~ts is ~ts~nemqx is ~ts", [node(), started, running]), Status = io_lib:format("Node ~ts is ~ts~nemqx is ~ts", [node(), started, running]),
{ok, Status} = emqx_mgmt_api_test_util:request_api(get, Path), {ok, Status} = emqx_mgmt_api_test_util:request_api(get, Path),
ok. ok.

View File

@ -110,6 +110,9 @@ build_http_header(X) ->
api_path(Parts) -> api_path(Parts) ->
?SERVER ++ filename:join([?BASE_PATH | Parts]). ?SERVER ++ filename:join([?BASE_PATH | Parts]).
api_path_without_base_path(Parts) ->
?SERVER ++ filename:join([Parts]).
%% Usage: %% Usage:
%% upload_request(<<"site.com/api/upload">>, <<"path/to/file.png">>, %% upload_request(<<"site.com/api/upload">>, <<"path/to/file.png">>,
%% <<"upload">>, <<"image/png">>, [], <<"some-token">>) %% <<"upload">>, <<"image/png">>, [], <<"some-token">>)

View File

@ -2,9 +2,7 @@
{deps, [ {deps, [
{emqx, {path, "../emqx"}}, {emqx, {path, "../emqx"}},
%% FIXME: tag this as v3.1.3 {prometheus, {git, "https://github.com/deadtrickster/prometheus.erl", {tag, "v4.8.1"}}}
{prometheus, {git, "https://github.com/deadtrickster/prometheus.erl", {tag, "v4.8.1"}}},
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.29.0"}}}
]}. ]}.
{edoc_opts, [{preprocess, true}]}. {edoc_opts, [{preprocess, true}]}.

View File

@ -2,7 +2,7 @@
{application, emqx_retainer, [ {application, emqx_retainer, [
{description, "EMQX Retainer"}, {description, "EMQX Retainer"},
% strict semver, bump manually! % strict semver, bump manually!
{vsn, "5.0.2"}, {vsn, "5.0.3"},
{modules, []}, {modules, []},
{registered, [emqx_retainer_sup]}, {registered, [emqx_retainer_sup]},
{applications, [kernel, stdlib, emqx]}, {applications, [kernel, stdlib, emqx]},

View File

@ -348,12 +348,16 @@ enable_retainer(
#{context_id := ContextId} = State, #{context_id := ContextId} = State,
#{ #{
msg_clear_interval := ClearInterval, msg_clear_interval := ClearInterval,
backend := BackendCfg backend := BackendCfg,
flow_control := FlowControl
} }
) -> ) ->
NewContextId = ContextId + 1, NewContextId = ContextId + 1,
Context = create_resource(new_context(NewContextId), BackendCfg), Context = create_resource(new_context(NewContextId), BackendCfg),
load(Context), load(Context),
emqx_limiter_server:add_bucket(
?APP, internal, maps:get(batch_deliver_limiter, FlowControl, undefined)
),
State#{ State#{
enable := true, enable := true,
context_id := NewContextId, context_id := NewContextId,
@ -369,6 +373,7 @@ disable_retainer(
} = State } = State
) -> ) ->
unload(), unload(),
emqx_limiter_server:del_bucket(?APP, internal),
ok = close_resource(Context), ok = close_resource(Context),
State#{ State#{
enable := false, enable := false,

View File

@ -151,13 +151,8 @@ config(get, _) ->
{200, emqx:get_raw_config([retainer])}; {200, emqx:get_raw_config([retainer])};
config(put, #{body := Body}) -> config(put, #{body := Body}) ->
try try
check_bucket_exists( {ok, _} = emqx_retainer:update_config(Body),
Body,
fun(Conf) ->
{ok, _} = emqx_retainer:update_config(Conf),
{200, emqx:get_raw_config([retainer])} {200, emqx:get_raw_config([retainer])}
end
)
catch catch
_:Reason:_ -> _:Reason:_ ->
{400, #{ {400, #{
@ -237,30 +232,3 @@ check_backend(Type, Params, Cont) ->
_ -> _ ->
{400, 'BAD_REQUEST', <<"This API only support built in database">>} {400, 'BAD_REQUEST', <<"This API only support built in database">>}
end. end.
check_bucket_exists(
#{
<<"flow_control">> :=
#{<<"batch_deliver_limiter">> := Name} = Flow
} = Conf,
Cont
) ->
case erlang:binary_to_atom(Name) of
'' ->
%% workaround, empty string means set the value to undefined,
%% but now, we can't store `undefined` in the config file correct,
%% but, we can delete this field
Cont(Conf#{
<<"flow_control">> := maps:remove(<<"batch_deliver_limiter">>, Flow)
});
Bucket ->
Path = emqx_limiter_schema:get_bucket_cfg_path(batch, Bucket),
case emqx:get_config(Path, undefined) of
undefined ->
{400, 'BAD_REQUEST', <<"The limiter bucket not exists">>};
_ ->
Cont(Conf)
end
end;
check_bucket_exists(Conf, Cont) ->
Cont(Conf).

View File

@ -115,8 +115,8 @@ start_link(Pool, Id) ->
init([Pool, Id]) -> init([Pool, Id]) ->
erlang:process_flag(trap_exit, true), erlang:process_flag(trap_exit, true),
true = gproc_pool:connect_worker(Pool, {Pool, Id}), true = gproc_pool:connect_worker(Pool, {Pool, Id}),
BucketName = emqx:get_config([retainer, flow_control, batch_deliver_limiter], undefined), BucketCfg = emqx:get_config([retainer, flow_control, batch_deliver_limiter], undefined),
{ok, Limiter} = emqx_limiter_server:connect(batch, BucketName), {ok, Limiter} = emqx_limiter_server:connect(?APP, internal, BucketCfg),
{ok, #{pool => Pool, id => Id, limiter => Limiter}}. {ok, #{pool => Pool, id => Id, limiter => Limiter}}.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
@ -155,8 +155,8 @@ handle_cast({dispatch, Context, Pid, Topic}, #{limiter := Limiter} = State) ->
{ok, Limiter2} = dispatch(Context, Pid, Topic, undefined, Limiter), {ok, Limiter2} = dispatch(Context, Pid, Topic, undefined, Limiter),
{noreply, State#{limiter := Limiter2}}; {noreply, State#{limiter := Limiter2}};
handle_cast({refresh_limiter, Conf}, State) -> handle_cast({refresh_limiter, Conf}, State) ->
BucketName = emqx_map_lib:deep_get([flow_control, batch_deliver_limiter], Conf, undefined), BucketCfg = emqx_map_lib:deep_get([flow_control, batch_deliver_limiter], Conf, undefined),
{ok, Limiter} = emqx_limiter_server:connect(batch, BucketName), {ok, Limiter} = emqx_limiter_server:connect(?APP, internal, BucketCfg),
{noreply, State#{limiter := Limiter}}; {noreply, State#{limiter := Limiter}};
handle_cast(Msg, State) -> handle_cast(Msg, State) ->
?SLOG(error, #{msg => "unexpected_cast", cast => Msg}), ?SLOG(error, #{msg => "unexpected_cast", cast => Msg}),

View File

@ -86,7 +86,7 @@ fields(flow_control) ->
)}, )},
{batch_deliver_limiter, {batch_deliver_limiter,
sc( sc(
emqx_limiter_schema:bucket_name(), ?R_REF(emqx_limiter_schema, internal),
batch_deliver_limiter, batch_deliver_limiter,
undefined undefined
)} )}

View File

@ -368,27 +368,16 @@ t_stop_publish_clear_msg(_) ->
ok = emqtt:disconnect(C1). ok = emqtt:disconnect(C1).
t_flow_control(_) -> t_flow_control(_) ->
#{per_client := PerClient} = RetainerCfg = emqx_config:get([limiter, batch, bucket, retainer]), Rate = emqx_ratelimiter_SUITE:to_rate("1/1s"),
RetainerCfg2 = RetainerCfg#{ LimiterCfg = make_limiter_cfg(Rate),
per_client := JsonCfg = make_limiter_json(<<"1/1s">>),
PerClient#{ emqx_limiter_server:add_bucket(emqx_retainer, internal, LimiterCfg),
rate := emqx_ratelimiter_SUITE:to_rate("1/1s"),
capacity := 1
}
},
emqx_config:put([limiter, batch, bucket, retainer], RetainerCfg2),
emqx_limiter_manager:restart_server(batch),
timer:sleep(500),
emqx_retainer_dispatcher:refresh_limiter(),
timer:sleep(500),
emqx_retainer:update_config(#{ emqx_retainer:update_config(#{
<<"flow_control">> => <<"flow_control">> =>
#{ #{
<<"batch_read_number">> => 1, <<"batch_read_number">> => 1,
<<"batch_deliver_number">> => 1, <<"batch_deliver_number">> => 1,
<<"batch_deliver_limiter">> => retainer <<"batch_deliver_limiter">> => JsonCfg
} }
}), }),
{ok, C1} = emqtt:start_link([{clean_start, true}, {proto_ver, v5}]), {ok, C1} = emqtt:start_link([{clean_start, true}, {proto_ver, v5}]),
@ -424,13 +413,14 @@ t_flow_control(_) ->
ok = emqtt:disconnect(C1), ok = emqtt:disconnect(C1),
%% recover the limiter emqx_limiter_server:del_bucket(emqx_retainer, internal),
emqx_config:put([limiter, batch, bucket, retainer], RetainerCfg), emqx_retainer:update_config(#{
emqx_limiter_manager:restart_server(batch), <<"flow_control">> =>
timer:sleep(500), #{
<<"batch_read_number">> => 1,
emqx_retainer_dispatcher:refresh_limiter(), <<"batch_deliver_number">> => 1
timer:sleep(500), }
}),
ok. ok.
t_clear_expired(_) -> t_clear_expired(_) ->
@ -684,3 +674,33 @@ with_conf(ConfMod, Case) ->
emqx_retainer:update_config(Conf), emqx_retainer:update_config(Conf),
erlang:raise(Type, Error, Strace) erlang:raise(Type, Error, Strace)
end. end.
make_limiter_cfg(Rate) ->
Infinity = emqx_limiter_schema:infinity_value(),
Client = #{
rate => Rate,
initial => 0,
capacity => Infinity,
low_watermark => 1,
divisible => false,
max_retry_time => timer:seconds(5),
failure_strategy => force
},
#{client => Client, rate => Infinity, initial => 0, capacity => Infinity}.
make_limiter_json(Rate) ->
Client = #{
<<"rate">> => Rate,
<<"initial">> => 0,
<<"capacity">> => <<"infinity">>,
<<"low_watermark">> => 0,
<<"divisible">> => <<"false">>,
<<"max_retry_time">> => <<"5s">>,
<<"failure_strategy">> => <<"force">>
},
#{
<<"client">> => Client,
<<"rate">> => <<"infinity">>,
<<"initial">> => 0,
<<"capacity">> => <<"infinity">>
}.

View File

@ -287,7 +287,8 @@ COMPATIBILITY_CHECK='
compatiblity_info() { compatiblity_info() {
# RELEASE_LIB is used by Elixir # RELEASE_LIB is used by Elixir
"$BINDIR/$PROGNAME" \ # set crash-dump bytes to zero to ensure no crash dump is generated when erl crashes
env ERL_CRASH_DUMP_BYTES=0 "$BINDIR/$PROGNAME" \
-noshell \ -noshell \
-boot_var RELEASE_LIB "$ERTS_LIB_DIR/lib" \ -boot_var RELEASE_LIB "$ERTS_LIB_DIR/lib" \
-boot "$REL_DIR/start_clean" \ -boot "$REL_DIR/start_clean" \

13
build
View File

@ -14,9 +14,18 @@ if [ "$DEBUG" -eq 1 ]; then
set -x set -x
fi fi
PROFILE="$1" PROFILE_ARG="$1"
ARTIFACT="$2" ARTIFACT="$2"
if [[ "${PROFILE:-${PROFILE_ARG}}" != "$PROFILE_ARG" ]]; then
echo "PROFILE env var is set to '$PROFILE', but '$0' arg1 is '$1'"
exit 1
fi
# make sure PROFILE is exported, it is needed by rebar.config.erl
PROFILE=$PROFILE_ARG
export PROFILE
# ensure dir # ensure dir
cd -P -- "$(dirname -- "${BASH_SOURCE[0]}")" cd -P -- "$(dirname -- "${BASH_SOURCE[0]}")"
@ -106,6 +115,7 @@ assert_no_compile_time_only_deps() {
} }
make_rel() { make_rel() {
./scripts/pre-compile.sh "$PROFILE"
# compile all beams # compile all beams
./rebar3 as "$PROFILE" compile ./rebar3 as "$PROFILE" compile
# generate docs (require beam compiled), generated to etc and priv dirs # generate docs (require beam compiled), generated to etc and priv dirs
@ -116,6 +126,7 @@ make_rel() {
} }
make_elixir_rel() { make_elixir_rel() {
./scripts/pre-compile.sh "$PROFILE"
export_release_vars "$PROFILE" export_release_vars "$PROFILE"
mix release --overwrite mix release --overwrite
assert_no_compile_time_only_deps assert_no_compile_time_only_deps

View File

@ -1,11 +1,14 @@
# Introduction # Introduction
This chart bootstraps an emqx deployment on a Kubernetes cluster using the Helm package manager. This chart bootstraps an emqx deployment on a Kubernetes cluster using the Helm package manager.
# Prerequisites # Prerequisites
+ Kubernetes 1.6+ + Kubernetes 1.6+
+ Helm + Helm
# Installing the Chart # Installing the Chart
To install the chart with the release name `my-emqx`: To install the chart with the release name `my-emqx`:
+ From github + From github
@ -23,12 +26,15 @@ To install the chart with the release name `my-emqx`:
> If you want to install an unstable version, you need to add `--devel` when you execute the `helm install` command. > If you want to install an unstable version, you need to add `--devel` when you execute the `helm install` command.
# Uninstalling the Chart # Uninstalling the Chart
To uninstall/delete the `my-emqx` deployment: To uninstall/delete the `my-emqx` deployment:
``` ```
$ helm del my-emqx $ helm del my-emqx
``` ```
# Configuration # Configuration
The following table lists the configurable parameters of the emqx chart and their default values. The following table lists the configurable parameters of the emqx chart and their default values.
| Parameter | Description | Default Value | | Parameter | Description | Default Value |
@ -83,10 +89,33 @@ The following table lists the configurable parameters of the emqx chart and thei
| `ingress.mgmt.annotations` | Ingress annotations for EMQX Mgmt API | {} | | `ingress.mgmt.annotations` | Ingress annotations for EMQX Mgmt API | {} |
| `metrics.enable` | If set to true, [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) needs to be installed, and emqx_prometheus needs to enable | false | | `metrics.enable` | If set to true, [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) needs to be installed, and emqx_prometheus needs to enable | false |
| `metrics.type` | Now we only supported "prometheus" | "prometheus" | | `metrics.type` | Now we only supported "prometheus" | "prometheus" |
| `ssl.enabled` | Enable SSL support | false |
| `ssl.useExisting` | Use existing certificate or let cert-manager generate one | false |
| `ssl.existingName` | Name of existing certificate | emqx-tls |
| `ssl.dnsnames` | DNS name(s) for certificate to be generated | {} |
| `ssl.issuer.name` | Issuer name for certificate generation | letsencrypt-dns |
| `ssl.issuer.kind` | Issuer kind for certificate generation | ClusterIssuer |
## EMQX specific settings ## EMQX specific settings
The following table lists the configurable [EMQX](https://www.emqx.io/)-specific parameters of the chart and their default values.
The following table lists the configurable [EMQX](https://www.emqx.io/)-specific parameters of the chart and their
default values.
Parameter | Description | Default Value Parameter | Description | Default Value
--- | --- | --- --- | --- | ---
`emqxConfig` | Map of [configuration](https://www.emqx.io/docs/en/latest/configuration/configuration.html) items expressed as [environment variables](https://www.emqx.io/docs/en/v4.3/configuration/environment-variable.html) (prefix can be omitted) or using the configuration files [namespaced dotted notation](https://www.emqx.io/docs/en/latest/configuration/configuration.html) | `nil` `emqxConfig` | Map of [configuration](https://www.emqx.io/docs/en/latest/configuration/configuration.html) items
expressed as [environment variables](https://www.emqx.io/docs/en/v4.3/configuration/environment-variable.html) (prefix
can be omitted) or using the configuration
files [namespaced dotted notation](https://www.emqx.io/docs/en/latest/configuration/configuration.html) | `nil`
`emqxLicenseSecretName` | Name of the secret that holds the license information | `nil` `emqxLicenseSecretName` | Name of the secret that holds the license information | `nil`
## SSL settings
`cert-manager` generates secrets with certificate data using the keys `tls.crt` and `tls.key`. The helm chart always mounts those keys as files to `/tmp/ssl/`
which needs to explicitly configured by either changing the emqx config file or by passing the following environment variables:
```
EMQX_LISTENERS__SSL__DEFAULT__SSL_OPTIONS__CERTFILE: /tmp/ssl/tls.crt
EMQX_LISTENERS__SSL__DEFAULT__SSL_OPTIONS__KEYFILE: /tmp/ssl/tls.key
```
If you chose to use an existing certificate, make sure, you update the filenames accordingly.

View File

@ -53,6 +53,11 @@ spec:
{{- end }} {{- end }}
spec: spec:
volumes: volumes:
{{- if .Values.ssl.enabled }}
- name: ssl-cert
secret:
secretName: {{ include "emqx.fullname" . }}-tls
{{- end }}
{{- if not .Values.persistence.enabled }} {{- if not .Values.persistence.enabled }}
- name: emqx-data - name: emqx-data
emptyDir: {} emptyDir: {}
@ -124,22 +129,27 @@ spec:
volumeMounts: volumeMounts:
- name: emqx-data - name: emqx-data
mountPath: "/opt/emqx/data" mountPath: "/opt/emqx/data"
{{- if .Values.ssl.enabled }}
- name: ssl-cert
mountPath: /tmp/ssl
readOnly: true
{{- end}}
{{ if .Values.emqxLicenseSecretName }} {{ if .Values.emqxLicenseSecretName }}
- name: emqx-license - name: emqx-license
mountPath: "/opt/emqx/etc/emqx.lic" mountPath: "/opt/emqx/etc/emqx.lic"
subPath: "emqx.lic" subPath: "emqx.lic"
readOnly: true readOnly: true
{{ end }} {{- end }}
readinessProbe: readinessProbe:
httpGet: httpGet:
path: /api/v5/status path: /status
port: {{ .Values.emqxConfig.EMQX_DASHBOARD__LISTENER__HTTP | default 18083 }} port: {{ .Values.emqxConfig.EMQX_DASHBOARD__LISTENER__HTTP | default 18083 }}
initialDelaySeconds: 10 initialDelaySeconds: 10
periodSeconds: 5 periodSeconds: 5
failureThreshold: 30 failureThreshold: 30
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /api/v5/status path: /status
port: {{ .Values.emqxConfig.EMQX_DASHBOARD__LISTENER__HTTP | default 18083 }} port: {{ .Values.emqxConfig.EMQX_DASHBOARD__LISTENER__HTTP | default 18083 }}
initialDelaySeconds: 60 initialDelaySeconds: 60
periodSeconds: 30 periodSeconds: 30

View File

@ -0,0 +1,16 @@
{{- if and (.Values.ssl.enable) (not .Values.ssl.useExisting) -}}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ include "emqx.fullname" . }}-tls
spec:
secretName: {{ include "emqx.fullname" . }}-tls
issuerRef:
name: {{ default "letsencrypt-staging" .Values.ssl.issuer.name }}
kind: {{ default "ClusterIssuer" .Values.ssl.issuer.kind }}
dnsNames:
{{- range .Values.ssl.dnsnames }}
- {{ . }}
{{- end }}
{{- end -}}

View File

@ -203,3 +203,12 @@ containerSecurityContext:
metrics: metrics:
enabled: false enabled: false
type: prometheus type: prometheus
ssl:
enabled: false
useExisting: false
existingName: emqx-tls
dnsnames: {}
issuer:
name: letsencrypt-dns
kind: ClusterIssuer

View File

@ -7,14 +7,15 @@ COPY . /emqx
ARG EMQX_NAME=emqx ARG EMQX_NAME=emqx
ENV EMQX_RELUP=false ENV EMQX_RELUP=false
RUN export PROFILE="$EMQX_NAME" \ RUN export PROFILE=${EMQX_NAME%%-elixir} \
&& export EMQX_NAME=${EMQX_NAME%%-elixir} \ && export EMQX_NAME1=$EMQX_NAME \
&& export EMQX_NAME=$PROFILE \
&& export EMQX_LIB_PATH="_build/$EMQX_NAME/lib" \ && export EMQX_LIB_PATH="_build/$EMQX_NAME/lib" \
&& export EMQX_REL_PATH="/emqx/_build/$EMQX_NAME/rel/emqx" \ && export EMQX_REL_PATH="/emqx/_build/$EMQX_NAME/rel/emqx" \
&& export EMQX_REL_FORM='docker' \ && export EMQX_REL_FORM='docker' \
&& cd /emqx \ && cd /emqx \
&& rm -rf $EMQX_LIB_PATH \ && rm -rf $EMQX_LIB_PATH \
&& make $PROFILE \ && make $EMQX_NAME1 \
&& mkdir -p /emqx-rel \ && mkdir -p /emqx-rel \
&& mv $EMQX_REL_PATH /emqx-rel && mv $EMQX_REL_PATH /emqx-rel

View File

@ -28,14 +28,15 @@ COPY . /emqx
ARG EMQX_NAME=emqx ARG EMQX_NAME=emqx
ENV EMQX_RELUP=false ENV EMQX_RELUP=false
RUN export PROFILE="$EMQX_NAME" \ RUN export PROFILE=${EMQX_NAME%%-elixir} \
&& export EMQX_NAME=${EMQX_NAME%%-elixir} \ && export EMQX_NAME1=$EMQX_NAME \
&& export EMQX_NAME=$PROFILE \
&& export EMQX_LIB_PATH="_build/$EMQX_NAME/lib" \ && export EMQX_LIB_PATH="_build/$EMQX_NAME/lib" \
&& export EMQX_REL_PATH="/emqx/_build/$EMQX_NAME/rel/emqx" \ && export EMQX_REL_PATH="/emqx/_build/$EMQX_NAME/rel/emqx" \
&& export EMQX_REL_FORM='docker' \ && export EMQX_REL_FORM='docker' \
&& cd /emqx \ && cd /emqx \
&& rm -rf $EMQX_LIB_PATH \ && rm -rf $EMQX_LIB_PATH \
&& make $PROFILE \ && make $EMQX_NAME1 \
&& mkdir -p /emqx-rel \ && mkdir -p /emqx-rel \
&& mv $EMQX_REL_PATH /emqx-rel && mv $EMQX_REL_PATH /emqx-rel

View File

@ -0,0 +1,34 @@
emqx_license_http_api {
desc_license_info_api {
desc {
en: "Get license info"
zh: "获取许可证信息"
}
label: {
en: "License info"
zh: "许可证信息"
}
}
desc_license_file_api {
desc {
en: "Upload a license file"
zh: "上传一个许可证文件"
}
label: {
en: "Update license"
zh: "更新许可证"
}
}
desc_license_key_api {
desc {
en: "Update a license key"
zh: "更新一个许可证密钥"
}
label: {
en: "Update license"
zh: "更新许可证"
}
}
}

View File

@ -22,6 +22,7 @@
read_license/0, read_license/0,
read_license/1, read_license/1,
update_file/1, update_file/1,
update_file_contents/1,
update_key/1, update_key/1,
license_dir/0, license_dir/0,
save_and_backup_license/1 save_and_backup_license/1
@ -70,15 +71,20 @@ relative_license_path() ->
update_file(Filename) when is_binary(Filename); is_list(Filename) -> update_file(Filename) when is_binary(Filename); is_list(Filename) ->
case file:read_file(Filename) of case file:read_file(Filename) of
{ok, Contents} -> {ok, Contents} ->
update_file_contents(Contents);
{error, Error} ->
{error, Error}
end.
-spec update_file_contents(binary() | string()) ->
{ok, emqx_config:update_result()} | {error, emqx_config:update_error()}.
update_file_contents(Contents) when is_binary(Contents) ->
Result = emqx_conf:update( Result = emqx_conf:update(
?CONF_KEY_PATH, ?CONF_KEY_PATH,
{file, Contents}, {file, Contents},
#{rawconf_with_defaults => true, override_to => local} #{rawconf_with_defaults => true, override_to => local}
), ),
handle_config_update_result(Result); handle_config_update_result(Result).
{error, Error} ->
{error, Error}
end.
-spec update_key(binary() | string()) -> -spec update_key(binary() | string()) ->
{ok, emqx_config:update_result()} | {error, emqx_config:update_error()}. {ok, emqx_config:update_result()} | {error, emqx_config:update_error()}.

View File

@ -0,0 +1,166 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_license_http_api).
-behaviour(minirest_api).
-include_lib("hocon/include/hoconsc.hrl").
-include_lib("emqx/include/logger.hrl").
-export([
namespace/0,
api_spec/0,
paths/0,
schema/1
]).
-export([
'/license'/2,
'/license/key'/2,
'/license/file'/2
]).
-define(BAD_REQUEST, 'BAD_REQUEST').
namespace() -> "license_http_api".
api_spec() ->
emqx_dashboard_swagger:spec(?MODULE, #{check_schema => false}).
paths() ->
[
"/license",
"/license/key",
"/license/file"
].
schema("/license") ->
#{
'operationId' => '/license',
get => #{
tags => [<<"license">>],
summary => <<"Get license info">>,
description => ?DESC("desc_license_info_api"),
responses => #{
200 => emqx_dashboard_swagger:schema_with_examples(
map(),
#{
sample_license_info => #{
value => sample_license_info_response()
}
}
)
}
}
};
schema("/license/file") ->
#{
'operationId' => '/license/file',
post => #{
tags => [<<"license">>],
summary => <<"Upload license file">>,
description => ?DESC("desc_license_file_api"),
'requestBody' => emqx_dashboard_swagger:file_schema(filename),
responses => #{
200 => emqx_dashboard_swagger:schema_with_examples(
map(),
#{
sample_license_info => #{
value => sample_license_info_response()
}
}
),
400 => emqx_dashboard_swagger:error_codes([?BAD_REQUEST], <<"Bad license file">>)
}
}
};
schema("/license/key") ->
#{
'operationId' => '/license/key',
post => #{
tags => [<<"license">>],
summary => <<"Update license key">>,
description => ?DESC("desc_license_key_api"),
'requestBody' => emqx_dashboard_swagger:schema_with_examples(
emqx_license_schema:key_license(),
#{
license_key => #{
summary => <<"License key string">>,
value => #{
<<"key">> => <<"xxx">>,
<<"connection_low_watermark">> => "75%",
<<"connection_high_watermark">> => "80%"
}
}
}
),
responses => #{
200 => emqx_dashboard_swagger:schema_with_examples(
map(),
#{
sample_license_info => #{
value => sample_license_info_response()
}
}
),
400 => emqx_dashboard_swagger:error_codes([?BAD_REQUEST], <<"Bad license file">>)
}
}
}.
sample_license_info_response() ->
#{
customer => "Foo",
customer_type => 10,
deployment => "bar-deployment",
email => "contact@foo.com",
expiry => false,
expiry_at => "2295-10-27",
max_connections => 10,
start_at => "2022-01-11",
type => "trial"
}.
error_msg(Code, Msg) ->
#{code => Code, message => emqx_misc:readable_error_msg(Msg)}.
'/license'(get, _Params) ->
License = maps:from_list(emqx_license_checker:dump()),
{200, License}.
'/license/file'(post, #{body := #{<<"filename">> := #{type := _} = File}}) ->
[{_Filename, Contents}] = maps:to_list(maps:without([type], File)),
case emqx_license:update_file_contents(Contents) of
{error, Error} ->
?SLOG(error, #{
msg => "bad_license_file",
reason => Error
}),
{400, error_msg(?BAD_REQUEST, <<"Bad license file">>)};
{ok, _} ->
?SLOG(info, #{
msg => "updated_license_file"
}),
License = maps:from_list(emqx_license_checker:dump()),
{200, License}
end;
'/license/file'(post, _Params) ->
{400, error_msg(?BAD_REQUEST, <<"Invalid request params">>)}.
'/license/key'(post, #{body := #{<<"key">> := Key}}) ->
case emqx_license:update_key(Key) of
{error, Error} ->
?SLOG(error, #{
msg => "bad_license_key",
reason => Error
}),
{400, error_msg(?BAD_REQUEST, <<"Bad license key">>)};
{ok, _} ->
?SLOG(info, #{msg => "updated_license_key"}),
License = maps:from_list(emqx_license_checker:dump()),
{200, License}
end;
'/license/key'(post, _Params) ->
{400, error_msg(?BAD_REQUEST, <<"Invalid request params">>)}.

View File

@ -72,9 +72,16 @@
%% API %% API
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
-ifdef(TEST).
-spec parse(string() | binary()) -> {ok, license()} | {error, term()}.
parse(Content) ->
PubKey = persistent_term:get(emqx_license_test_pubkey, ?PUBKEY),
parse(Content, PubKey).
-else.
-spec parse(string() | binary()) -> {ok, license()} | {error, term()}. -spec parse(string() | binary()) -> {ok, license()} | {error, term()}.
parse(Content) -> parse(Content) ->
parse(Content, ?PUBKEY). parse(Content, ?PUBKEY).
-endif.
parse(Content, Pem) -> parse(Content, Pem) ->
[PemEntry] = public_key:pem_decode(Pem), [PemEntry] = public_key:pem_decode(Pem),

View File

@ -15,7 +15,9 @@
-export([roots/0, fields/1, validations/0, desc/1]). -export([roots/0, fields/1, validations/0, desc/1]).
-export([ -export([
license_type/0 license_type/0,
key_license/0,
file_license/0
]). ]).
roots() -> roots() ->
@ -99,10 +101,16 @@ validations() ->
license_type() -> license_type() ->
hoconsc:union([ hoconsc:union([
hoconsc:ref(?MODULE, key_license), key_license(),
hoconsc:ref(?MODULE, file_license) file_license()
]). ]).
key_license() ->
hoconsc:ref(?MODULE, key_license).
file_license() ->
hoconsc:ref(?MODULE, file_license).
check_license_watermark(Conf) -> check_license_watermark(Conf) ->
case hocon_maps:get("license.connection_low_watermark", Conf) of case hocon_maps:get("license.connection_low_watermark", Conf) of
undefined -> undefined ->

View File

@ -141,17 +141,9 @@ setup_test(TestCase, Config) when
emqx_config:put([license], LicConfig), emqx_config:put([license], LicConfig),
RawConfig = #{<<"type">> => file, <<"file">> => LicensePath}, RawConfig = #{<<"type">> => file, <<"file">> => LicensePath},
emqx_config:put_raw([<<"license">>], RawConfig), emqx_config:put_raw([<<"license">>], RawConfig),
ok = meck:new(emqx_license, [non_strict, passthrough, no_history, no_link]), ok = persistent_term:put(
%% meck:expect(emqx_license, read_license, fun() -> {ok, License} end), emqx_license_test_pubkey,
meck:expect(
emqx_license_parser,
parse,
fun(X) ->
emqx_license_parser:parse(
X,
emqx_license_test_lib:public_key_pem() emqx_license_test_lib:public_key_pem()
)
end
), ),
ok; ok;
(_) -> (_) ->

View File

@ -0,0 +1,244 @@
%%--------------------------------------------------------------------
%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved.
%%--------------------------------------------------------------------
-module(emqx_license_http_api_SUITE).
-compile(nowarn_export_all).
-compile(export_all).
-include_lib("emqx/include/emqx_mqtt.hrl").
-include_lib("eunit/include/eunit.hrl").
-include_lib("common_test/include/ct.hrl").
%%------------------------------------------------------------------------------
%% CT boilerplate
%%------------------------------------------------------------------------------
all() ->
emqx_common_test_helpers:all(?MODULE).
init_per_suite(Config) ->
_ = application:load(emqx_conf),
emqx_config:save_schema_mod_and_names(emqx_license_schema),
emqx_common_test_helpers:start_apps([emqx_license, emqx_dashboard], fun set_special_configs/1),
Config.
end_per_suite(_) ->
emqx_common_test_helpers:stop_apps([emqx_license, emqx_dashboard]),
Config = #{type => file, file => emqx_license_test_lib:default_license()},
emqx_config:put([license], Config),
RawConfig = #{<<"type">> => file, <<"file">> => emqx_license_test_lib:default_license()},
emqx_config:put_raw([<<"license">>], RawConfig),
persistent_term:erase(emqx_license_test_pubkey),
ok.
set_special_configs(emqx_dashboard) ->
emqx_dashboard_api_test_helpers:set_default_config(<<"license_admin">>);
set_special_configs(emqx_license) ->
LicenseKey = emqx_license_test_lib:make_license(#{max_connections => "100"}),
Config = #{type => key, key => LicenseKey},
emqx_config:put([license], Config),
RawConfig = #{<<"type">> => key, <<"key">> => LicenseKey},
emqx_config:put_raw([<<"license">>], RawConfig),
ok = persistent_term:put(
emqx_license_test_pubkey,
emqx_license_test_lib:public_key_pem()
),
ok;
set_special_configs(_) ->
ok.
init_per_testcase(_TestCase, Config) ->
{ok, _} = emqx_cluster_rpc:start_link(node(), emqx_cluster_rpc, 1000),
Config.
end_per_testcase(_TestCase, _Config) ->
{ok, _} = reset_license(),
ok.
%%------------------------------------------------------------------------------
%% Helper fns
%%------------------------------------------------------------------------------
request(Method, Uri, Body) ->
emqx_dashboard_api_test_helpers:request(<<"license_admin">>, Method, Uri, Body).
uri(Segments) ->
emqx_dashboard_api_test_helpers:uri(Segments).
get_license() ->
maps:from_list(emqx_license_checker:dump()).
default_license() ->
emqx_license_test_lib:make_license(#{max_connections => "100"}).
reset_license() ->
emqx_license:update_key(default_license()).
assert_untouched_license() ->
?assertMatch(
#{max_connections := 100},
get_license()
).
multipart_formdata_request(Uri, File) ->
emqx_dashboard_api_test_helpers:multipart_formdata_request(
Uri,
_Username = <<"license_admin">>,
_Fields = [],
[File]
).
%%------------------------------------------------------------------------------
%% Testcases
%%------------------------------------------------------------------------------
t_license_info(_Config) ->
Res = request(get, uri(["license"]), []),
?assertMatch({ok, 200, _}, Res),
{ok, 200, Payload} = Res,
?assertEqual(
#{
<<"customer">> => <<"Foo">>,
<<"customer_type">> => 10,
<<"deployment">> => <<"bar-deployment">>,
<<"email">> => <<"contact@foo.com">>,
<<"expiry">> => false,
<<"expiry_at">> => <<"2295-10-27">>,
<<"max_connections">> => 100,
<<"start_at">> => <<"2022-01-11">>,
<<"type">> => <<"trial">>
},
emqx_json:decode(Payload, [return_maps])
),
ok.
t_license_upload_file_success(_Config) ->
NewKey = emqx_license_test_lib:make_license(#{max_connections => "999"}),
Res = multipart_formdata_request(
uri(["license", "file"]),
{filename, "emqx.lic", NewKey}
),
?assertMatch({ok, 200, _}, Res),
{ok, 200, Payload} = Res,
?assertEqual(
#{
<<"customer">> => <<"Foo">>,
<<"customer_type">> => 10,
<<"deployment">> => <<"bar-deployment">>,
<<"email">> => <<"contact@foo.com">>,
<<"expiry">> => false,
<<"expiry_at">> => <<"2295-10-27">>,
<<"max_connections">> => 999,
<<"start_at">> => <<"2022-01-11">>,
<<"type">> => <<"trial">>
},
emqx_json:decode(Payload, [return_maps])
),
?assertMatch(
#{max_connections := 999},
get_license()
),
ok.
t_license_upload_file_bad_license(_Config) ->
Res = multipart_formdata_request(
uri(["license", "file"]),
{filename, "bad.lic", <<"bad key">>}
),
?assertMatch({ok, 400, _}, Res),
{ok, 400, Payload} = Res,
?assertEqual(
#{
<<"code">> => <<"BAD_REQUEST">>,
<<"message">> => <<"Bad license file">>
},
emqx_json:decode(Payload, [return_maps])
),
assert_untouched_license(),
ok.
t_license_upload_file_not_json(_Config) ->
Res = request(
post,
uri(["license", "file"]),
<<"">>
),
?assertMatch({ok, 400, _}, Res),
{ok, 400, Payload} = Res,
?assertEqual(
#{
<<"code">> => <<"BAD_REQUEST">>,
<<"message">> => <<"Invalid request params">>
},
emqx_json:decode(Payload, [return_maps])
),
assert_untouched_license(),
ok.
t_license_upload_key_success(_Config) ->
NewKey = emqx_license_test_lib:make_license(#{max_connections => "999"}),
Res = request(
post,
uri(["license", "key"]),
#{key => NewKey}
),
?assertMatch({ok, 200, _}, Res),
{ok, 200, Payload} = Res,
?assertEqual(
#{
<<"customer">> => <<"Foo">>,
<<"customer_type">> => 10,
<<"deployment">> => <<"bar-deployment">>,
<<"email">> => <<"contact@foo.com">>,
<<"expiry">> => false,
<<"expiry_at">> => <<"2295-10-27">>,
<<"max_connections">> => 999,
<<"start_at">> => <<"2022-01-11">>,
<<"type">> => <<"trial">>
},
emqx_json:decode(Payload, [return_maps])
),
?assertMatch(
#{max_connections := 999},
get_license()
),
ok.
t_license_upload_key_bad_key(_Config) ->
BadKey = <<"bad key">>,
Res = request(
post,
uri(["license", "key"]),
#{key => BadKey}
),
?assertMatch({ok, 400, _}, Res),
{ok, 400, Payload} = Res,
?assertEqual(
#{
<<"code">> => <<"BAD_REQUEST">>,
<<"message">> => <<"Bad license key">>
},
emqx_json:decode(Payload, [return_maps])
),
assert_untouched_license(),
ok.
t_license_upload_key_not_json(_Config) ->
Res = request(
post,
uri(["license", "key"]),
<<"">>
),
?assertMatch({ok, 400, _}, Res),
{ok, 400, Payload} = Res,
?assertEqual(
#{
<<"code">> => <<"BAD_REQUEST">>,
<<"message">> => <<"Invalid request params">>
},
emqx_json:decode(Payload, [return_maps])
),
assert_untouched_license(),
ok.

View File

@ -47,6 +47,32 @@ test_key(Filename, Format) ->
public_key:pem_entry_decode(PemEntry) public_key:pem_entry_decode(PemEntry)
end. end.
make_license(Values0 = #{}) ->
Defaults = #{
license_format => "220111",
license_type => "0",
customer_type => "10",
name => "Foo",
email => "contact@foo.com",
deployment => "bar-deployment",
start_date => "20220111",
days => "100000",
max_connections => "10"
},
Values1 = maps:merge(Defaults, Values0),
Keys = [
license_format,
license_type,
customer_type,
name,
email,
deployment,
start_date,
days,
max_connections
],
Values = lists:map(fun(K) -> maps:get(K, Values1) end, Keys),
make_license(Values);
make_license(Values) -> make_license(Values) ->
Key = private_key(), Key = private_key(),
Text = string:join(Values, "\n"), Text = string:join(Values, "\n"),

View File

@ -51,11 +51,11 @@ defmodule EMQXUmbrella.MixProject do
{:gproc, github: "uwiger/gproc", tag: "0.8.0", override: true}, {:gproc, github: "uwiger/gproc", tag: "0.8.0", override: true},
{:jiffy, github: "emqx/jiffy", tag: "1.0.5", override: true}, {:jiffy, github: "emqx/jiffy", tag: "1.0.5", override: true},
{:cowboy, github: "emqx/cowboy", tag: "2.9.0", override: true}, {:cowboy, github: "emqx/cowboy", tag: "2.9.0", override: true},
{:esockd, github: "emqx/esockd", tag: "5.9.3", override: true}, {:esockd, github: "emqx/esockd", tag: "5.9.4", override: true},
{:ekka, github: "emqx/ekka", tag: "0.13.3", override: true}, {:ekka, github: "emqx/ekka", tag: "0.13.3", override: true},
{:gen_rpc, github: "emqx/gen_rpc", tag: "2.8.1", override: true}, {:gen_rpc, github: "emqx/gen_rpc", tag: "2.8.1", override: true},
{:grpc, github: "emqx/grpc-erl", tag: "0.6.6", override: true}, {:grpc, github: "emqx/grpc-erl", tag: "0.6.6", override: true},
{:minirest, github: "emqx/minirest", tag: "1.3.5", override: true}, {:minirest, github: "emqx/minirest", tag: "1.3.6", override: true},
{:ecpool, github: "emqx/ecpool", tag: "0.5.2", override: true}, {:ecpool, github: "emqx/ecpool", tag: "0.5.2", override: true},
{:replayq, "0.3.4", override: true}, {:replayq, "0.3.4", override: true},
{:pbkdf2, github: "emqx/erlang-pbkdf2", tag: "2.0.4", override: true}, {:pbkdf2, github: "emqx/erlang-pbkdf2", tag: "2.0.4", override: true},
@ -66,7 +66,7 @@ defmodule EMQXUmbrella.MixProject do
# in conflict by emqtt and hocon # in conflict by emqtt and hocon
{:getopt, "1.0.2", override: true}, {:getopt, "1.0.2", override: true},
{:snabbkaffe, github: "kafka4beam/snabbkaffe", tag: "1.0.0", override: true}, {:snabbkaffe, github: "kafka4beam/snabbkaffe", tag: "1.0.0", override: true},
{:hocon, github: "emqx/hocon", tag: "0.29.0", override: true}, {:hocon, github: "emqx/hocon", tag: "0.30.0", override: true},
{:emqx_http_lib, github: "emqx/emqx_http_lib", tag: "0.5.1", override: true}, {:emqx_http_lib, github: "emqx/emqx_http_lib", tag: "0.5.1", override: true},
{:esasl, github: "emqx/esasl", tag: "0.2.0"}, {:esasl, github: "emqx/esasl", tag: "0.2.0"},
{:jose, github: "potatosalad/erlang-jose", tag: "1.11.2"}, {:jose, github: "potatosalad/erlang-jose", tag: "1.11.2"},

View File

@ -53,11 +53,11 @@
, {gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}} , {gproc, {git, "https://github.com/uwiger/gproc", {tag, "0.8.0"}}}
, {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}} , {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}}
, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}} , {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}}
, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.3"}}} , {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.4"}}}
, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.3"}}} , {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.13.3"}}}
, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}} , {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}
, {grpc, {git, "https://github.com/emqx/grpc-erl", {tag, "0.6.6"}}} , {grpc, {git, "https://github.com/emqx/grpc-erl", {tag, "0.6.6"}}}
, {minirest, {git, "https://github.com/emqx/minirest", {tag, "1.3.5"}}} , {minirest, {git, "https://github.com/emqx/minirest", {tag, "1.3.6"}}}
, {ecpool, {git, "https://github.com/emqx/ecpool", {tag, "0.5.2"}}} , {ecpool, {git, "https://github.com/emqx/ecpool", {tag, "0.5.2"}}}
, {replayq, "0.3.4"} , {replayq, "0.3.4"}
, {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}} , {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}}
@ -67,7 +67,7 @@
, {system_monitor, {git, "https://github.com/ieQu1/system_monitor", {tag, "3.0.3"}}} , {system_monitor, {git, "https://github.com/ieQu1/system_monitor", {tag, "3.0.3"}}}
, {getopt, "1.0.2"} , {getopt, "1.0.2"}
, {snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.0"}}} , {snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.0"}}}
, {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.29.0"}}} , {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.30.0"}}}
, {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.1"}}} , {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.1"}}}
, {esasl, {git, "https://github.com/emqx/esasl", {tag, "0.2.0"}}} , {esasl, {git, "https://github.com/emqx/esasl", {tag, "0.2.0"}}}
, {jose, {git, "https://github.com/potatosalad/erlang-jose", {tag, "1.11.2"}}} , {jose, {git, "https://github.com/potatosalad/erlang-jose", {tag, "1.11.2"}}}

View File

@ -4,6 +4,7 @@
do(Dir, CONFIG) -> do(Dir, CONFIG) ->
ok = assert_otp(), ok = assert_otp(),
ok = warn_profile_env(),
case iolist_to_binary(Dir) of case iolist_to_binary(Dir) of
<<".">> -> <<".">> ->
C1 = deps(CONFIG), C1 = deps(CONFIG),
@ -117,6 +118,9 @@ is_raspbian() ->
is_win32() -> is_win32() ->
win32 =:= element(1, os:type()). win32 =:= element(1, os:type()).
project_app_dirs() ->
project_app_dirs(get_edition_from_profille_env()).
project_app_dirs(Edition) -> project_app_dirs(Edition) ->
["apps/*"] ++ ["apps/*"] ++
case is_enterprise(Edition) of case is_enterprise(Edition) of
@ -126,7 +130,7 @@ project_app_dirs(Edition) ->
plugins() -> plugins() ->
[ [
{relup_helper, {git, "https://github.com/emqx/relup_helper", {tag, "2.0.0"}}}, {relup_helper, {git, "https://github.com/emqx/relup_helper", {tag, "2.1.0"}}},
%% emqx main project does not require port-compiler %% emqx main project does not require port-compiler
%% pin at root level for deterministic %% pin at root level for deterministic
{pc, "v1.14.0"} {pc, "v1.14.0"}
@ -149,6 +153,9 @@ test_deps() ->
{er_coap_client, {git, "https://github.com/emqx/er_coap_client", {tag, "v1.0.5"}}} {er_coap_client, {git, "https://github.com/emqx/er_coap_client", {tag, "v1.0.5"}}}
]. ].
common_compile_opts(Vsn) ->
common_compile_opts(get_edition_from_profille_env(), Vsn).
common_compile_opts(Edition, Vsn) -> common_compile_opts(Edition, Vsn) ->
% always include debug_info % always include debug_info
[ [
@ -159,6 +166,36 @@ common_compile_opts(Edition, Vsn) ->
[{d, 'EMQX_BENCHMARK'} || os:getenv("EMQX_BENCHMARK") =:= "1"] ++ [{d, 'EMQX_BENCHMARK'} || os:getenv("EMQX_BENCHMARK") =:= "1"] ++
[{d, 'BUILD_WITHOUT_QUIC'} || not is_quicer_supported()]. [{d, 'BUILD_WITHOUT_QUIC'} || not is_quicer_supported()].
warn_profile_env() ->
case os:getenv("PROFILE") of
false ->
io:format(
standard_error,
"WARN: environment variable PROFILE is not set, using 'emqx-enterprise'~n",
[]
);
_ ->
ok
end.
%% this function is only used for test/check profiles
get_edition_from_profille_env() ->
case os:getenv("PROFILE") of
"emqx" ->
ce;
"emqx-" ++ _ ->
ce;
"emqx-enterprise" ->
ee;
"emqx-enterprise-" ++ _ ->
ee;
false ->
ee;
V ->
io:format(standard_error, "ERROR: bad_PROFILE ~p~n", [V]),
exit(bad_PROFILE)
end.
prod_compile_opts(Edition, Vsn) -> prod_compile_opts(Edition, Vsn) ->
[ [
compressed, compressed,
@ -212,14 +249,14 @@ profiles_dev() ->
Vsn = get_vsn('emqx-enterprise'), Vsn = get_vsn('emqx-enterprise'),
[ [
{check, [ {check, [
{erl_opts, common_compile_opts(ee, Vsn)}, {erl_opts, common_compile_opts(Vsn)},
{project_app_dirs, project_app_dirs(ee)} {project_app_dirs, project_app_dirs()}
]}, ]},
{test, [ {test, [
{deps, test_deps()}, {deps, test_deps()},
{erl_opts, common_compile_opts(ee, Vsn) ++ erl_opts_i()}, {erl_opts, common_compile_opts(Vsn) ++ erl_opts_i()},
{extra_src_dirs, [{"test", [{recursive, true}]}]}, {extra_src_dirs, [{"test", [{recursive, true}]}]},
{project_app_dirs, project_app_dirs(ee)} {project_app_dirs, project_app_dirs()}
]} ]}
]. ].

View File

@ -5,8 +5,20 @@ set -euo pipefail
# ensure dir # ensure dir
cd -P -- "$(dirname -- "${BASH_SOURCE[0]}")/.." cd -P -- "$(dirname -- "${BASH_SOURCE[0]}")/.."
VERSION="${1}"
case "$VERSION" in
v*)
RELEASE_ASSET_FILE="emqx-dashboard.zip" RELEASE_ASSET_FILE="emqx-dashboard.zip"
VERSION="${EMQX_DASHBOARD_VERSION}" ;;
e*)
RELEASE_ASSET_FILE="emqx-enterprise-dashboard.zip"
;;
*)
echo "Unknown version $VERSION"
exit 1
;;
esac
DASHBOARD_PATH='apps/emqx_dashboard/priv' DASHBOARD_PATH='apps/emqx_dashboard/priv'
DASHBOARD_REPO='emqx-dashboard-web-new' DASHBOARD_REPO='emqx-dashboard-web-new'
DIRECT_DOWNLOAD_URL="https://github.com/emqx/${DASHBOARD_REPO}/releases/download/${VERSION}/${RELEASE_ASSET_FILE}" DIRECT_DOWNLOAD_URL="https://github.com/emqx/${DASHBOARD_REPO}/releases/download/${VERSION}/${RELEASE_ASSET_FILE}"

Some files were not shown because too many files have changed in this diff Show More