Merge branch 'master' into changes-enable_pipelining-type
This commit is contained in:
commit
efc0ca2b62
2
Makefile
2
Makefile
|
@ -7,7 +7,7 @@ export EMQX_DEFAULT_BUILDER = ghcr.io/emqx/emqx-builder/5.0-15:1.13.3-24.2.1-1-a
|
||||||
export EMQX_DEFAULT_RUNNER = alpine:3.15.1
|
export EMQX_DEFAULT_RUNNER = alpine:3.15.1
|
||||||
export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh)
|
export OTP_VSN ?= $(shell $(CURDIR)/scripts/get-otp-vsn.sh)
|
||||||
export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh)
|
export ELIXIR_VSN ?= $(shell $(CURDIR)/scripts/get-elixir-vsn.sh)
|
||||||
export EMQX_DASHBOARD_VERSION ?= v0.34.0
|
export EMQX_DASHBOARD_VERSION ?= v0.35.0
|
||||||
export DOCKERFILE := deploy/docker/Dockerfile
|
export DOCKERFILE := deploy/docker/Dockerfile
|
||||||
export EMQX_REL_FORM ?= tgz
|
export EMQX_REL_FORM ?= tgz
|
||||||
ifeq ($(OS),Windows_NT)
|
ifeq ($(OS),Windows_NT)
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -27,9 +27,9 @@
|
||||||
{jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}},
|
{jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}},
|
||||||
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}},
|
{cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}},
|
||||||
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.2"}}},
|
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.2"}}},
|
||||||
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.12.6"}}},
|
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.12.8"}}},
|
||||||
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
|
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
|
||||||
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.27.5"}}},
|
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.28.0"}}},
|
||||||
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
|
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
|
||||||
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},
|
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},
|
||||||
{snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.0"}}}
|
{snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.0"}}}
|
||||||
|
|
|
@ -17,10 +17,12 @@
|
||||||
|
|
||||||
-compile({no_auto_import, [get/0, get/1, put/2, erase/1]}).
|
-compile({no_auto_import, [get/0, get/1, put/2, erase/1]}).
|
||||||
-elvis([{elvis_style, god_modules, disable}]).
|
-elvis([{elvis_style, god_modules, disable}]).
|
||||||
|
-include("logger.hrl").
|
||||||
|
|
||||||
-export([
|
-export([
|
||||||
init_load/1,
|
init_load/1,
|
||||||
init_load/2,
|
init_load/2,
|
||||||
|
init_load/3,
|
||||||
read_override_conf/1,
|
read_override_conf/1,
|
||||||
delete_override_conf_files/0,
|
delete_override_conf_files/0,
|
||||||
check_config/2,
|
check_config/2,
|
||||||
|
@ -85,6 +87,7 @@
|
||||||
]).
|
]).
|
||||||
|
|
||||||
-include("logger.hrl").
|
-include("logger.hrl").
|
||||||
|
-include_lib("hocon/include/hoconsc.hrl").
|
||||||
|
|
||||||
-define(CONF, conf).
|
-define(CONF, conf).
|
||||||
-define(RAW_CONF, raw_conf).
|
-define(RAW_CONF, raw_conf).
|
||||||
|
@ -304,15 +307,21 @@ put_raw(KeyPath, Config) ->
|
||||||
%%============================================================================
|
%%============================================================================
|
||||||
init_load(SchemaMod) ->
|
init_load(SchemaMod) ->
|
||||||
ConfFiles = application:get_env(emqx, config_files, []),
|
ConfFiles = application:get_env(emqx, config_files, []),
|
||||||
init_load(SchemaMod, ConfFiles).
|
init_load(SchemaMod, ConfFiles, #{raw_with_default => true}).
|
||||||
|
|
||||||
|
init_load(SchemaMod, Opts) when is_map(Opts) ->
|
||||||
|
ConfFiles = application:get_env(emqx, config_files, []),
|
||||||
|
init_load(SchemaMod, ConfFiles, Opts);
|
||||||
|
init_load(SchemaMod, ConfFiles) ->
|
||||||
|
init_load(SchemaMod, ConfFiles, #{raw_with_default => false}).
|
||||||
|
|
||||||
%% @doc Initial load of the given config files.
|
%% @doc Initial load of the given config files.
|
||||||
%% NOTE: The order of the files is significant, configs from files ordered
|
%% NOTE: The order of the files is significant, configs from files ordered
|
||||||
%% in the rear of the list overrides prior values.
|
%% in the rear of the list overrides prior values.
|
||||||
-spec init_load(module(), [string()] | binary() | hocon:config()) -> ok.
|
-spec init_load(module(), [string()] | binary() | hocon:config()) -> ok.
|
||||||
init_load(SchemaMod, Conf) when is_list(Conf) orelse is_binary(Conf) ->
|
init_load(SchemaMod, Conf, Opts) when is_list(Conf) orelse is_binary(Conf) ->
|
||||||
init_load(SchemaMod, parse_hocon(Conf));
|
init_load(SchemaMod, parse_hocon(Conf), Opts);
|
||||||
init_load(SchemaMod, RawConf) when is_map(RawConf) ->
|
init_load(SchemaMod, RawConf, Opts) when is_map(RawConf) ->
|
||||||
ok = save_schema_mod_and_names(SchemaMod),
|
ok = save_schema_mod_and_names(SchemaMod),
|
||||||
%% Merge environment variable overrides on top
|
%% Merge environment variable overrides on top
|
||||||
RawConfWithEnvs = merge_envs(SchemaMod, RawConf),
|
RawConfWithEnvs = merge_envs(SchemaMod, RawConf),
|
||||||
|
@ -320,14 +329,46 @@ init_load(SchemaMod, RawConf) when is_map(RawConf) ->
|
||||||
LocalOverrides = read_override_conf(#{override_to => local}),
|
LocalOverrides = read_override_conf(#{override_to => local}),
|
||||||
Overrides = hocon:deep_merge(ClusterOverrides, LocalOverrides),
|
Overrides = hocon:deep_merge(ClusterOverrides, LocalOverrides),
|
||||||
RawConfWithOverrides = hocon:deep_merge(RawConfWithEnvs, Overrides),
|
RawConfWithOverrides = hocon:deep_merge(RawConfWithEnvs, Overrides),
|
||||||
%% check configs against the schema
|
|
||||||
{_AppEnvs, CheckedConf} =
|
|
||||||
check_config(SchemaMod, RawConfWithOverrides, #{}),
|
|
||||||
RootNames = get_root_names(),
|
RootNames = get_root_names(),
|
||||||
ok = save_to_config_map(
|
RawConfAll = raw_conf_with_default(SchemaMod, RootNames, RawConfWithOverrides, Opts),
|
||||||
maps:with(get_atom_root_names(), CheckedConf),
|
%% check configs against the schema
|
||||||
maps:with(RootNames, RawConfWithOverrides)
|
{_AppEnvs, CheckedConf} = check_config(SchemaMod, RawConfAll, #{}),
|
||||||
).
|
ok = save_to_config_map(CheckedConf, RawConfAll).
|
||||||
|
|
||||||
|
%% keep the raw and non-raw conf has the same keys to make update raw conf easier.
|
||||||
|
raw_conf_with_default(SchemaMod, RootNames, RawConf, #{raw_with_default := true}) ->
|
||||||
|
Fun = fun(Name, Acc) ->
|
||||||
|
case maps:is_key(Name, RawConf) of
|
||||||
|
true ->
|
||||||
|
Acc;
|
||||||
|
false ->
|
||||||
|
case lists:keyfind(Name, 1, hocon_schema:roots(SchemaMod)) of
|
||||||
|
false ->
|
||||||
|
Acc;
|
||||||
|
{_, {_, Schema}} ->
|
||||||
|
Acc#{Name => schema_default(Schema)}
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end,
|
||||||
|
RawDefault = lists:foldl(Fun, #{}, RootNames),
|
||||||
|
maps:merge(RawConf, fill_defaults(SchemaMod, RawDefault, #{}));
|
||||||
|
raw_conf_with_default(_SchemaMod, _RootNames, RawConf, _Opts) ->
|
||||||
|
RawConf.
|
||||||
|
|
||||||
|
schema_default(Schema) ->
|
||||||
|
case hocon_schema:field_schema(Schema, type) of
|
||||||
|
?ARRAY(_) ->
|
||||||
|
[];
|
||||||
|
?LAZY(?ARRAY(_)) ->
|
||||||
|
[];
|
||||||
|
?LAZY(?UNION(Unions)) ->
|
||||||
|
case [A || ?ARRAY(A) <- Unions] of
|
||||||
|
[_ | _] -> [];
|
||||||
|
_ -> #{}
|
||||||
|
end;
|
||||||
|
_ ->
|
||||||
|
#{}
|
||||||
|
end.
|
||||||
|
|
||||||
parse_hocon(Conf) ->
|
parse_hocon(Conf) ->
|
||||||
IncDirs = include_dirs(),
|
IncDirs = include_dirs(),
|
||||||
|
@ -466,9 +507,6 @@ get_schema_mod(RootName) ->
|
||||||
get_root_names() ->
|
get_root_names() ->
|
||||||
maps:get(names, persistent_term:get(?PERSIS_SCHEMA_MODS, #{names => []})).
|
maps:get(names, persistent_term:get(?PERSIS_SCHEMA_MODS, #{names => []})).
|
||||||
|
|
||||||
get_atom_root_names() ->
|
|
||||||
[atom(N) || N <- get_root_names()].
|
|
||||||
|
|
||||||
-spec save_configs(app_envs(), config(), raw_config(), raw_config(), update_opts()) ->
|
-spec save_configs(app_envs(), config(), raw_config(), raw_config(), update_opts()) ->
|
||||||
ok | {error, term()}.
|
ok | {error, term()}.
|
||||||
save_configs(_AppEnvs, Conf, RawConf, OverrideConf, Opts) ->
|
save_configs(_AppEnvs, Conf, RawConf, OverrideConf, Opts) ->
|
||||||
|
|
|
@ -1,5 +0,0 @@
|
||||||
##--------------------------------------------------------------------
|
|
||||||
## EMQX Rate Limiter
|
|
||||||
##--------------------------------------------------------------------
|
|
||||||
|
|
||||||
limiter {}
|
|
|
@ -105,7 +105,18 @@ fields(limiter_opts) ->
|
||||||
{bucket,
|
{bucket,
|
||||||
sc(
|
sc(
|
||||||
map("bucket_name", ref(bucket_opts)),
|
map("bucket_name", ref(bucket_opts)),
|
||||||
#{desc => ?DESC(bucket_cfg), default => #{<<"default">> => #{}}}
|
#{
|
||||||
|
desc => ?DESC(bucket_cfg),
|
||||||
|
default => #{<<"default">> => #{}},
|
||||||
|
examples => #{
|
||||||
|
<<"mybucket-name">> => #{
|
||||||
|
<<"rate">> => <<"infinity">>,
|
||||||
|
<<"capcity">> => <<"infinity">>,
|
||||||
|
<<"initial">> => <<"100">>,
|
||||||
|
<<"per_client">> => #{<<"rate">> => <<"infinity">>}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
)}
|
)}
|
||||||
];
|
];
|
||||||
fields(bucket_opts) ->
|
fields(bucket_opts) ->
|
||||||
|
|
|
@ -337,12 +337,18 @@ all() ->
|
||||||
%% @doc Get metric value
|
%% @doc Get metric value
|
||||||
-spec val(metric_name()) -> non_neg_integer().
|
-spec val(metric_name()) -> non_neg_integer().
|
||||||
val(Name) ->
|
val(Name) ->
|
||||||
|
try
|
||||||
case ets:lookup(?TAB, Name) of
|
case ets:lookup(?TAB, Name) of
|
||||||
[#metric{idx = Idx}] ->
|
[#metric{idx = Idx}] ->
|
||||||
CRef = persistent_term:get(?MODULE),
|
CRef = persistent_term:get(?MODULE),
|
||||||
counters:get(CRef, Idx);
|
counters:get(CRef, Idx);
|
||||||
[] ->
|
[] ->
|
||||||
0
|
0
|
||||||
|
end
|
||||||
|
%% application will restart when join cluster, then ets not exist.
|
||||||
|
catch
|
||||||
|
error:badarg ->
|
||||||
|
0
|
||||||
end.
|
end.
|
||||||
|
|
||||||
%% @doc Increase counter
|
%% @doc Increase counter
|
||||||
|
|
|
@ -168,17 +168,12 @@ start_cpu_check_timer() ->
|
||||||
_ -> start_timer(Interval, cpu_check)
|
_ -> start_timer(Interval, cpu_check)
|
||||||
end.
|
end.
|
||||||
|
|
||||||
|
is_sysmem_check_supported() ->
|
||||||
|
{unix, linux} =:= os:type().
|
||||||
|
|
||||||
start_mem_check_timer() ->
|
start_mem_check_timer() ->
|
||||||
Interval = emqx:get_config([sysmon, os, mem_check_interval]),
|
Interval = emqx:get_config([sysmon, os, mem_check_interval]),
|
||||||
IsSupported =
|
case is_integer(Interval) andalso is_sysmem_check_supported() of
|
||||||
case os:type() of
|
|
||||||
{unix, linux} ->
|
|
||||||
true;
|
|
||||||
_ ->
|
|
||||||
%% sorry Mac and windows, for now
|
|
||||||
false
|
|
||||||
end,
|
|
||||||
case is_integer(Interval) andalso IsSupported of
|
|
||||||
true ->
|
true ->
|
||||||
start_timer(Interval, mem_check);
|
start_timer(Interval, mem_check);
|
||||||
false ->
|
false ->
|
||||||
|
@ -196,7 +191,12 @@ update_mem_alarm_status(HWM) when HWM > 1.0 orelse HWM < 0.0 ->
|
||||||
#{},
|
#{},
|
||||||
<<"Deactivated mem usage alarm due to out of range threshold">>
|
<<"Deactivated mem usage alarm due to out of range threshold">>
|
||||||
);
|
);
|
||||||
update_mem_alarm_status(HWM0) ->
|
update_mem_alarm_status(HWM) ->
|
||||||
|
is_sysmem_check_supported() andalso
|
||||||
|
do_update_mem_alarm_status(HWM),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
do_update_mem_alarm_status(HWM0) ->
|
||||||
HWM = HWM0 * 100,
|
HWM = HWM0 * 100,
|
||||||
Usage = current_sysmem_percent(),
|
Usage = current_sysmem_percent(),
|
||||||
case Usage > HWM of
|
case Usage > HWM of
|
||||||
|
|
|
@ -703,7 +703,7 @@ fields("conn_congestion") ->
|
||||||
sc(
|
sc(
|
||||||
boolean(),
|
boolean(),
|
||||||
#{
|
#{
|
||||||
default => false,
|
default => true,
|
||||||
desc => ?DESC(conn_congestion_enable_alarm)
|
desc => ?DESC(conn_congestion_enable_alarm)
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
|
@ -1582,7 +1582,7 @@ base_listener() ->
|
||||||
)},
|
)},
|
||||||
{"limiter",
|
{"limiter",
|
||||||
sc(
|
sc(
|
||||||
map("ratelimit's type", emqx_limiter_schema:bucket_name()),
|
map("ratelimit_name", emqx_limiter_schema:bucket_name()),
|
||||||
#{
|
#{
|
||||||
desc => ?DESC(base_listener_limiter),
|
desc => ?DESC(base_listener_limiter),
|
||||||
default => #{}
|
default => #{}
|
||||||
|
@ -2183,7 +2183,7 @@ authentication(Type) ->
|
||||||
%% authentication schema is lazy to make it more 'plugable'
|
%% authentication schema is lazy to make it more 'plugable'
|
||||||
%% the type checks are done in emqx_auth application when it boots.
|
%% the type checks are done in emqx_auth application when it boots.
|
||||||
%% and in emqx_authentication_config module for runtime changes.
|
%% and in emqx_authentication_config module for runtime changes.
|
||||||
Default = hoconsc:lazy(hoconsc:union([typerefl:map(), hoconsc:array(typerefl:map())])),
|
Default = hoconsc:lazy(hoconsc:union([hoconsc:array(typerefl:map())])),
|
||||||
%% as the type is lazy, the runtime module injection
|
%% as the type is lazy, the runtime module injection
|
||||||
%% from EMQX_AUTHENTICATION_SCHEMA_MODULE_PT_KEY
|
%% from EMQX_AUTHENTICATION_SCHEMA_MODULE_PT_KEY
|
||||||
%% is for now only affecting document generation.
|
%% is for now only affecting document generation.
|
||||||
|
|
|
@ -418,6 +418,7 @@ t_connected_client_count_persistent(Config) when is_list(Config) ->
|
||||||
{clientid, ClientID}
|
{clientid, ClientID}
|
||||||
| Config
|
| Config
|
||||||
]),
|
]),
|
||||||
|
|
||||||
{{ok, _}, {ok, [_, _]}} = wait_for_events(
|
{{ok, _}, {ok, [_, _]}} = wait_for_events(
|
||||||
fun() -> emqtt:ConnFun(ConnPid2) end,
|
fun() -> emqtt:ConnFun(ConnPid2) end,
|
||||||
[
|
[
|
||||||
|
|
|
@ -51,6 +51,7 @@
|
||||||
render_config_file/2,
|
render_config_file/2,
|
||||||
read_schema_configs/2,
|
read_schema_configs/2,
|
||||||
load_config/2,
|
load_config/2,
|
||||||
|
load_config/3,
|
||||||
is_tcp_server_available/2,
|
is_tcp_server_available/2,
|
||||||
is_tcp_server_available/3
|
is_tcp_server_available/3
|
||||||
]).
|
]).
|
||||||
|
@ -465,9 +466,18 @@ copy_certs(emqx_conf, Dest0) ->
|
||||||
copy_certs(_, _) ->
|
copy_certs(_, _) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
load_config(SchemaModule, Config) ->
|
load_config(SchemaModule, Config, Opts) ->
|
||||||
|
ConfigBin =
|
||||||
|
case is_map(Config) of
|
||||||
|
true -> jsx:encode(Config);
|
||||||
|
false -> Config
|
||||||
|
end,
|
||||||
ok = emqx_config:delete_override_conf_files(),
|
ok = emqx_config:delete_override_conf_files(),
|
||||||
ok = emqx_config:init_load(SchemaModule, Config).
|
ok = emqx_config:init_load(SchemaModule, ConfigBin, Opts),
|
||||||
|
ok.
|
||||||
|
|
||||||
|
load_config(SchemaModule, Config) ->
|
||||||
|
load_config(SchemaModule, Config, #{raw_with_default => false}).
|
||||||
|
|
||||||
-spec is_tcp_server_available(
|
-spec is_tcp_server_available(
|
||||||
Host :: inet:socket_address() | inet:hostname(),
|
Host :: inet:socket_address() | inet:hostname(),
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
authentication: []
|
|
||||||
|
|
|
@ -100,16 +100,19 @@ filter(_) ->
|
||||||
password_hash_field(type) -> binary();
|
password_hash_field(type) -> binary();
|
||||||
password_hash_field(desc) -> ?DESC(?FUNCTION_NAME);
|
password_hash_field(desc) -> ?DESC(?FUNCTION_NAME);
|
||||||
password_hash_field(required) -> false;
|
password_hash_field(required) -> false;
|
||||||
|
password_hash_field(default) -> <<"password_hash">>;
|
||||||
password_hash_field(_) -> undefined.
|
password_hash_field(_) -> undefined.
|
||||||
|
|
||||||
salt_field(type) -> binary();
|
salt_field(type) -> binary();
|
||||||
salt_field(desc) -> ?DESC(?FUNCTION_NAME);
|
salt_field(desc) -> ?DESC(?FUNCTION_NAME);
|
||||||
salt_field(required) -> false;
|
salt_field(required) -> false;
|
||||||
|
salt_field(default) -> <<"salt">>;
|
||||||
salt_field(_) -> undefined.
|
salt_field(_) -> undefined.
|
||||||
|
|
||||||
is_superuser_field(type) -> binary();
|
is_superuser_field(type) -> binary();
|
||||||
is_superuser_field(desc) -> ?DESC(?FUNCTION_NAME);
|
is_superuser_field(desc) -> ?DESC(?FUNCTION_NAME);
|
||||||
is_superuser_field(required) -> false;
|
is_superuser_field(required) -> false;
|
||||||
|
is_superuser_field(default) -> <<"is_superuser">>;
|
||||||
is_superuser_field(_) -> undefined.
|
is_superuser_field(_) -> undefined.
|
||||||
|
|
||||||
%%------------------------------------------------------------------------------
|
%%------------------------------------------------------------------------------
|
||||||
|
|
|
@ -1,66 +1,9 @@
|
||||||
authorization {
|
authorization {
|
||||||
sources = [
|
deny_action: ignore
|
||||||
# {
|
no_match: allow
|
||||||
# type: http
|
sources: [
|
||||||
# url: "https://emqx.com"
|
|
||||||
# headers: {
|
|
||||||
# Accept: "application/json"
|
|
||||||
# Content-Type: "application/json"
|
|
||||||
# }
|
|
||||||
# },
|
|
||||||
# {
|
|
||||||
# type: mysql
|
|
||||||
# server: "127.0.0.1:3306"
|
|
||||||
# database: mqtt
|
|
||||||
# pool_size: 1
|
|
||||||
# username: root
|
|
||||||
# password: public
|
|
||||||
# auto_reconnect: true
|
|
||||||
# ssl: {
|
|
||||||
# enable: true
|
|
||||||
# cacertfile: "{{ platform_etc_dir }}/certs/cacert.pem"
|
|
||||||
# certfile: "{{ platform_etc_dir }}/certs/client-cert.pem"
|
|
||||||
# keyfile: "{{ platform_etc_dir }}/certs/client-key.pem"
|
|
||||||
# }
|
|
||||||
# query: "select ipaddress, username, clientid, action, permission, topic from mqtt_authz where ipaddr = ${peerhost} or username = ${username} or clientid = ${clientid}"
|
|
||||||
# },
|
|
||||||
# {
|
|
||||||
# type: postgresql
|
|
||||||
# server: "127.0.0.1:5432"
|
|
||||||
# database: mqtt
|
|
||||||
# pool_size: 1
|
|
||||||
# username: root
|
|
||||||
# password: public
|
|
||||||
# auto_reconnect: true
|
|
||||||
# ssl: {enable: false}
|
|
||||||
# query: "select ipaddress, username, clientid, action, permission, topic from mqtt_authz where ipaddr = ${peerhost} or username = ${username} or username = '$all' or clientid = ${clientid}"
|
|
||||||
# },
|
|
||||||
# {
|
|
||||||
# type: redis
|
|
||||||
# server: "127.0.0.1:6379"
|
|
||||||
# database: 0
|
|
||||||
# pool_size: 1
|
|
||||||
# password: public
|
|
||||||
# auto_reconnect: true
|
|
||||||
# ssl: {enable: false}
|
|
||||||
# cmd: "HGETALL mqtt_authz:${username}"
|
|
||||||
# },
|
|
||||||
# {
|
|
||||||
# type: mongodb
|
|
||||||
# mongo_type: single
|
|
||||||
# server: "127.0.0.1:27017"
|
|
||||||
# pool_size: 1
|
|
||||||
# database: mqtt
|
|
||||||
# ssl: {enable: false}
|
|
||||||
# collection: mqtt_authz
|
|
||||||
# filter: { "$or": [ { "username": "${username}" }, { "clientid": "${clientid}" } ] }
|
|
||||||
# },
|
|
||||||
{
|
|
||||||
type: built_in_database
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
type: file
|
type: file
|
||||||
# file is loaded into cache
|
|
||||||
path: "{{ platform_etc_dir }}/acl.conf"
|
path: "{{ platform_etc_dir }}/acl.conf"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
|
@ -1,45 +0,0 @@
|
||||||
|
|
||||||
## When the device logs in successfully,
|
|
||||||
## the auto-subscribe will complete the subscription for the device
|
|
||||||
## according to the preset topics list.
|
|
||||||
## Placeholders are supported.
|
|
||||||
##
|
|
||||||
## - ${clientid}
|
|
||||||
## - ${username}
|
|
||||||
## - ${host}
|
|
||||||
## - ${port}
|
|
||||||
##
|
|
||||||
## Subscription options can be set at the same time.
|
|
||||||
## Please refer to the official MQTT definition for these configuration items.
|
|
||||||
## - qos
|
|
||||||
## - rh
|
|
||||||
## - rap
|
|
||||||
## - nl
|
|
||||||
##
|
|
||||||
auto_subscribe {
|
|
||||||
topics = [
|
|
||||||
## {
|
|
||||||
## topic = "/c/${clientid}"
|
|
||||||
## qos = 0
|
|
||||||
## rh = 0
|
|
||||||
## rap = 0
|
|
||||||
## nl = 0
|
|
||||||
## },
|
|
||||||
## {
|
|
||||||
## topic = "/u/${username}"
|
|
||||||
## },
|
|
||||||
## {
|
|
||||||
## topic = "/h/${host}"
|
|
||||||
## qos = 2
|
|
||||||
## },
|
|
||||||
## {
|
|
||||||
## topic = "/p/${port}"
|
|
||||||
## },
|
|
||||||
## {
|
|
||||||
## topic = "/topic/abc"
|
|
||||||
## },
|
|
||||||
## {
|
|
||||||
## topic = "/client/${clientid}/username/${username}/host/${host}/port/${port}"
|
|
||||||
## }
|
|
||||||
]
|
|
||||||
}
|
|
|
@ -21,8 +21,8 @@
|
||||||
-export([start/2, stop/1]).
|
-export([start/2, stop/1]).
|
||||||
|
|
||||||
start(_StartType, _StartArgs) ->
|
start(_StartType, _StartArgs) ->
|
||||||
{ok, Sup} = emqx_auto_subscribe_sup:start_link(),
|
|
||||||
ok = emqx_auto_subscribe:load(),
|
ok = emqx_auto_subscribe:load(),
|
||||||
|
{ok, Sup} = emqx_auto_subscribe_sup:start_link(),
|
||||||
{ok, Sup}.
|
{ok, Sup}.
|
||||||
|
|
||||||
stop(_State) ->
|
stop(_State) ->
|
||||||
|
|
|
@ -38,7 +38,7 @@ fields("auto_subscribe") ->
|
||||||
{topics,
|
{topics,
|
||||||
hoconsc:mk(
|
hoconsc:mk(
|
||||||
hoconsc:array(hoconsc:ref(?MODULE, "topic")),
|
hoconsc:array(hoconsc:ref(?MODULE, "topic")),
|
||||||
#{desc => ?DESC(auto_subscribe)}
|
#{desc => ?DESC(auto_subscribe), default => []}
|
||||||
)}
|
)}
|
||||||
];
|
];
|
||||||
fields("topic") ->
|
fields("topic") ->
|
||||||
|
|
|
@ -1,59 +0,0 @@
|
||||||
##--------------------------------------------------------------------
|
|
||||||
## EMQX Bridge
|
|
||||||
##--------------------------------------------------------------------
|
|
||||||
|
|
||||||
## MQTT bridges to/from another MQTT broker
|
|
||||||
#bridges.mqtt.my_ingress_mqtt_bridge {
|
|
||||||
# enable = true
|
|
||||||
# connector = "mqtt:my_mqtt_connector"
|
|
||||||
# direction = ingress
|
|
||||||
# ## topic mappings for this bridge
|
|
||||||
# remote_topic = "aws/#"
|
|
||||||
# remote_qos = 1
|
|
||||||
# local_topic = "from_aws/${topic}"
|
|
||||||
# local_qos = "${qos}"
|
|
||||||
# payload = "${payload}"
|
|
||||||
# retain = "${retain}"
|
|
||||||
#}
|
|
||||||
#
|
|
||||||
#bridges.mqtt.my_egress_mqtt_bridge {
|
|
||||||
# enable = true
|
|
||||||
# connector = "mqtt:my_mqtt_connector"
|
|
||||||
# direction = egress
|
|
||||||
# ## topic mappings for this bridge
|
|
||||||
# local_topic = "emqx/#"
|
|
||||||
# remote_topic = "from_emqx/${topic}"
|
|
||||||
# remote_qos = "${qos}"
|
|
||||||
# payload = "${payload}"
|
|
||||||
# retain = false
|
|
||||||
#}
|
|
||||||
#
|
|
||||||
## WebHook to an HTTP server
|
|
||||||
#bridges.webhook.my_webhook {
|
|
||||||
# enable = true
|
|
||||||
# direction = egress
|
|
||||||
# ## NOTE: we cannot use placehodler variables in the `scheme://host:port` part of the url
|
|
||||||
# url = "http://localhost:9901/messages/${topic}"
|
|
||||||
# request_timeout = "15s"
|
|
||||||
# connect_timeout = "15s"
|
|
||||||
# max_retries = 3
|
|
||||||
# retry_interval = "10s"
|
|
||||||
# pool_type = "random"
|
|
||||||
# pool_size = 4
|
|
||||||
# enable_pipelining = 100
|
|
||||||
# ssl {
|
|
||||||
# enable = false
|
|
||||||
# keyfile = "{{ platform_etc_dir }}/certs/client-key.pem"
|
|
||||||
# certfile = "{{ platform_etc_dir }}/certs/client-cert.pem"
|
|
||||||
# cacertfile = "{{ platform_etc_dir }}/certs/cacert.pem"
|
|
||||||
# }
|
|
||||||
#
|
|
||||||
# local_topic = "emqx_http/#"
|
|
||||||
# ## the following config entries can use placehodler variables:
|
|
||||||
# ## url, method, body, headers
|
|
||||||
# method = post
|
|
||||||
# body = "${payload}"
|
|
||||||
# headers {
|
|
||||||
# "content-type": "application/json"
|
|
||||||
# }
|
|
||||||
#}
|
|
|
@ -142,8 +142,9 @@ setup_fake_telemetry_data() ->
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
ok = emqx_common_test_helpers:load_config(emqx_connector_schema, ConnectorConf),
|
Opts = #{raw_with_default => true},
|
||||||
ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, Conf),
|
ok = emqx_common_test_helpers:load_config(emqx_connector_schema, ConnectorConf, Opts),
|
||||||
|
ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, Conf, Opts),
|
||||||
|
|
||||||
ok = snabbkaffe:start_trace(),
|
ok = snabbkaffe:start_trace(),
|
||||||
Predicate = fun(#{?snk_kind := K}) -> K =:= emqx_bridge_monitor_loaded_bridge end,
|
Predicate = fun(#{?snk_kind := K}) -> K =:= emqx_bridge_monitor_loaded_bridge end,
|
||||||
|
|
|
@ -6,845 +6,24 @@
|
||||||
##
|
##
|
||||||
## The *-override.conf files are overwritten at runtime when changes
|
## The *-override.conf files are overwritten at runtime when changes
|
||||||
## are made from EMQX dashboard UI, management HTTP API, or CLI.
|
## are made from EMQX dashboard UI, management HTTP API, or CLI.
|
||||||
|
## All configuration details can be found in emqx.conf.example
|
||||||
|
|
||||||
##==================================================================
|
|
||||||
## Node
|
|
||||||
##==================================================================
|
|
||||||
node {
|
node {
|
||||||
## Node name.
|
name: "emqx@127.0.0.1"
|
||||||
## See: http://erlang.org/doc/reference_manual/distributed.html
|
cookie: emqxsecretcookie
|
||||||
##
|
data_dir: "{{ platform_data_dir }}"
|
||||||
## @doc node.name
|
etc_dir: "{{ platform_etc_dir }}"
|
||||||
## ValueType: NodeName
|
applications: "{{ emqx_machine_boot_apps }}"
|
||||||
## Default: emqx@127.0.0.1
|
|
||||||
name = "emqx@127.0.0.1"
|
|
||||||
|
|
||||||
## Cookie for distributed node communication.
|
|
||||||
##
|
|
||||||
## @doc node.cookie
|
|
||||||
## ValueType: String
|
|
||||||
## Default: emqxsecretcookie
|
|
||||||
cookie = emqxsecretcookie
|
|
||||||
|
|
||||||
## Data dir for the node
|
|
||||||
##
|
|
||||||
## @doc node.data_dir
|
|
||||||
## ValueType: Folder
|
|
||||||
## Default: "{{ platform_data_dir }}"
|
|
||||||
data_dir = "{{ platform_data_dir }}"
|
|
||||||
|
|
||||||
## Location of crash dump file.
|
|
||||||
##
|
|
||||||
## @doc node.crash_dump_file
|
|
||||||
## ValueType: File
|
|
||||||
## Default: "{{ platform_log_dir }}/erl_crash.dump"
|
|
||||||
crash_dump_file = "{{ platform_log_dir }}/erl_crash.dump"
|
|
||||||
|
|
||||||
## The number of seconds that the broker is allowed to spend writing
|
|
||||||
## a crash dump
|
|
||||||
##
|
|
||||||
## @doc node.crash_dump_seconds
|
|
||||||
## ValueType: seconds
|
|
||||||
## Default: 30s
|
|
||||||
crash_dump_seconds = 30s
|
|
||||||
|
|
||||||
## The maximum size of a crash dump file in bytes.
|
|
||||||
##
|
|
||||||
## @doc node.crash_dump_bytes
|
|
||||||
## ValueType: bytes
|
|
||||||
## Default: 100MB
|
|
||||||
crash_dump_bytes = 100MB
|
|
||||||
|
|
||||||
## Global GC Interval.
|
|
||||||
##
|
|
||||||
## @doc node.global_gc_interval
|
|
||||||
## ValueType: Duration
|
|
||||||
## Default: 15m
|
|
||||||
global_gc_interval = 15m
|
|
||||||
|
|
||||||
## Sets the etc directory
|
|
||||||
etc_dir = "{{ platform_etc_dir }}"
|
|
||||||
|
|
||||||
## Sets the net_kernel tick time in seconds.
|
|
||||||
## Notice that all communicating nodes are to have the same
|
|
||||||
## TickTime value specified.
|
|
||||||
##
|
|
||||||
## See: http://www.erlang.org/doc/man/kernel_app.html#net_ticktime
|
|
||||||
##
|
|
||||||
## @doc node.dist_net_ticktime
|
|
||||||
## ValueType: Number
|
|
||||||
## Default: 2m
|
|
||||||
dist_net_ticktime = 2m
|
|
||||||
|
|
||||||
## Sets the maximum depth of call stack back-traces in the exit
|
|
||||||
## reason element of 'EXIT' tuples.
|
|
||||||
## The flag also limits the stacktrace depth returned by
|
|
||||||
## process_info item current_stacktrace.
|
|
||||||
##
|
|
||||||
## @doc node.backtrace_depth
|
|
||||||
## ValueType: Integer
|
|
||||||
## Range: [0,1024]
|
|
||||||
## Default: 23
|
|
||||||
backtrace_depth = 23
|
|
||||||
|
|
||||||
## Comma-separated list of applications to start with emqx_machine.
|
|
||||||
## These applications may restart on cluster leave/join.
|
|
||||||
##
|
|
||||||
## @doc node.applications
|
|
||||||
## ValueType: String
|
|
||||||
## Default: "gproc, esockd, ranch, cowboy, emqx"
|
|
||||||
applications = "{{ emqx_machine_boot_apps }}"
|
|
||||||
|
|
||||||
cluster_call {
|
|
||||||
retry_interval = 1s
|
|
||||||
max_history = 100
|
|
||||||
cleanup_interval = 5m
|
|
||||||
}
|
}
|
||||||
|
|
||||||
## Database backend
|
|
||||||
##
|
|
||||||
## @doc node.db_backend
|
|
||||||
## ValueType: mnesia | rlog
|
|
||||||
## Default: rlog
|
|
||||||
db_backend = rlog
|
|
||||||
|
|
||||||
## RLOG role
|
|
||||||
##
|
|
||||||
## @doc node.db_role
|
|
||||||
## ValueType: core | replicant
|
|
||||||
## Default: core
|
|
||||||
db_role = core
|
|
||||||
}
|
|
||||||
|
|
||||||
##==================================================================
|
|
||||||
## Cluster
|
|
||||||
##==================================================================
|
|
||||||
cluster {
|
|
||||||
## Cluster name.
|
|
||||||
##
|
|
||||||
## @doc cluster.name
|
|
||||||
## ValueType: String
|
|
||||||
## Default: emqxcl
|
|
||||||
name = emqxcl
|
|
||||||
|
|
||||||
## Enable cluster autoheal from network partition.
|
|
||||||
##
|
|
||||||
## @doc cluster.autoheal
|
|
||||||
## ValueType: Boolean
|
|
||||||
## Default: true
|
|
||||||
autoheal = true
|
|
||||||
|
|
||||||
## Autoclean down node. A down node will be removed from the cluster
|
|
||||||
## if this value > 0.
|
|
||||||
##
|
|
||||||
## @doc cluster.autoclean
|
|
||||||
## ValueType: Duration
|
|
||||||
## Default: 5m
|
|
||||||
autoclean = 5m
|
|
||||||
|
|
||||||
## Node discovery strategy to join the cluster.
|
|
||||||
##
|
|
||||||
## @doc cluster.discovery_strategy
|
|
||||||
## ValueType: manual | static | mcast | dns | etcd | k8s
|
|
||||||
## - manual: Manual join command
|
|
||||||
## - static: Static node list
|
|
||||||
## - mcast: IP Multicast
|
|
||||||
## - dns: DNS A Record
|
|
||||||
## - etcd: etcd
|
|
||||||
## - k8s: Kubernetes
|
|
||||||
##
|
|
||||||
## Default: manual
|
|
||||||
discovery_strategy = manual
|
|
||||||
|
|
||||||
## Replicant core nodes
|
|
||||||
##
|
|
||||||
## @doc cluster.core_nodes
|
|
||||||
## ValueType: comma-separated node list
|
|
||||||
## Default: ""
|
|
||||||
core_nodes = ""
|
|
||||||
|
|
||||||
##----------------------------------------------------------------
|
|
||||||
## Cluster using static node list
|
|
||||||
##----------------------------------------------------------------
|
|
||||||
static {
|
|
||||||
## Node list of the cluster
|
|
||||||
##
|
|
||||||
## @doc cluster.static.seeds
|
|
||||||
## ValueType: Array<NodeName>
|
|
||||||
## Default: []
|
|
||||||
seeds = ["emqx1@127.0.0.1", "emqx2@127.0.0.1"]
|
|
||||||
}
|
|
||||||
|
|
||||||
##----------------------------------------------------------------
|
|
||||||
## Cluster using IP Multicast
|
|
||||||
##----------------------------------------------------------------
|
|
||||||
mcast {
|
|
||||||
## IP Multicast Address.
|
|
||||||
##
|
|
||||||
## @doc cluster.mcast.addr
|
|
||||||
## ValueType: IPAddress
|
|
||||||
## Default: "239.192.0.1"
|
|
||||||
addr = "239.192.0.1"
|
|
||||||
|
|
||||||
## Multicast Ports.
|
|
||||||
##
|
|
||||||
## @doc cluster.mcast.ports
|
|
||||||
## ValueType: Array<Port>
|
|
||||||
## Default: [4369, 4370]
|
|
||||||
ports = [4369, 4370]
|
|
||||||
|
|
||||||
## Multicast Iface.
|
|
||||||
##
|
|
||||||
## @doc cluster.mcast.iface
|
|
||||||
## ValueType: IPAddress
|
|
||||||
## Default: "0.0.0.0"
|
|
||||||
iface = "0.0.0.0"
|
|
||||||
|
|
||||||
## Multicast Ttl.
|
|
||||||
##
|
|
||||||
## @doc cluster.mcast.ttl
|
|
||||||
## ValueType: Integer
|
|
||||||
## Range: [0,255]
|
|
||||||
## Default: 255
|
|
||||||
ttl = 255
|
|
||||||
|
|
||||||
## Multicast loop.
|
|
||||||
##
|
|
||||||
## @doc cluster.mcast.loop
|
|
||||||
## ValueType: Boolean
|
|
||||||
## Default: true
|
|
||||||
loop = true
|
|
||||||
}
|
|
||||||
|
|
||||||
##----------------------------------------------------------------
|
|
||||||
## Cluster using DNS A records
|
|
||||||
##----------------------------------------------------------------
|
|
||||||
dns {
|
|
||||||
## DNS name.
|
|
||||||
##
|
|
||||||
## @doc cluster.dns.name
|
|
||||||
## ValueType: String
|
|
||||||
## Default: localhost
|
|
||||||
name = localhost
|
|
||||||
|
|
||||||
## The App name is used to build 'node.name' with IP address.
|
|
||||||
##
|
|
||||||
## @doc cluster.dns.app
|
|
||||||
## ValueType: String
|
|
||||||
## Default: emqx
|
|
||||||
app = emqx
|
|
||||||
}
|
|
||||||
|
|
||||||
##----------------------------------------------------------------
|
|
||||||
## Cluster using etcd
|
|
||||||
##----------------------------------------------------------------
|
|
||||||
etcd {
|
|
||||||
## Etcd server list, separated by ','.
|
|
||||||
##
|
|
||||||
## @doc cluster.etcd.server
|
|
||||||
## ValueType: URL
|
|
||||||
## Required: true
|
|
||||||
server = "http://127.0.0.1:2379"
|
|
||||||
|
|
||||||
## The prefix helps build nodes path in etcd. Each node in the cluster
|
|
||||||
## will create a path in etcd: v2/keys/<prefix>/<name>/<node.name>
|
|
||||||
##
|
|
||||||
## @doc cluster.etcd.prefix
|
|
||||||
## ValueType: String
|
|
||||||
## Default: emqxcl
|
|
||||||
prefix = emqxcl
|
|
||||||
|
|
||||||
## The TTL for node's path in etcd.
|
|
||||||
##
|
|
||||||
## @doc cluster.etcd.node_ttl
|
|
||||||
## ValueType: Duration
|
|
||||||
## Default: 1m
|
|
||||||
node_ttl = 1m
|
|
||||||
|
|
||||||
## Path to the file containing the user's private PEM-encoded key.
|
|
||||||
##
|
|
||||||
## @doc cluster.etcd.ssl.keyfile
|
|
||||||
## ValueType: File
|
|
||||||
## Default: "{{ platform_etc_dir }}/certs/key.pem"
|
|
||||||
ssl.keyfile = "{{ platform_etc_dir }}/certs/key.pem"
|
|
||||||
|
|
||||||
## Path to a file containing the user certificate.
|
|
||||||
##
|
|
||||||
## @doc cluster.etcd.ssl.certfile
|
|
||||||
## ValueType: File
|
|
||||||
## Default: "{{ platform_etc_dir }}/certs/cert.pem"
|
|
||||||
ssl.certfile = "{{ platform_etc_dir }}/certs/cert.pem"
|
|
||||||
|
|
||||||
## Path to the file containing PEM-encoded CA certificates. The CA certificates
|
|
||||||
## are used during server authentication and when building the client certificate chain.
|
|
||||||
##
|
|
||||||
## @doc cluster.etcd.ssl.cacertfile
|
|
||||||
## ValueType: File
|
|
||||||
## Default: "{{ platform_etc_dir }}/certs/cacert.pem"
|
|
||||||
ssl.cacertfile = "{{ platform_etc_dir }}/certs/cacert.pem"
|
|
||||||
}
|
|
||||||
|
|
||||||
##----------------------------------------------------------------
|
|
||||||
## Cluster using Kubernetes
|
|
||||||
##----------------------------------------------------------------
|
|
||||||
k8s {
|
|
||||||
## Kubernetes API server list, separated by ','.
|
|
||||||
##
|
|
||||||
## @doc cluster.k8s.apiserver
|
|
||||||
## ValueType: URL
|
|
||||||
## Required: true
|
|
||||||
apiserver = "http://10.110.111.204:8080"
|
|
||||||
|
|
||||||
## The service name helps lookup EMQ nodes in the cluster.
|
|
||||||
##
|
|
||||||
## @doc cluster.k8s.service_name
|
|
||||||
## ValueType: String
|
|
||||||
## Default: emqx
|
|
||||||
service_name = emqx
|
|
||||||
|
|
||||||
## The address type is used to extract host from k8s service.
|
|
||||||
##
|
|
||||||
## @doc cluster.k8s.address_type
|
|
||||||
## ValueType: ip | dns | hostname
|
|
||||||
## Default: ip
|
|
||||||
address_type = ip
|
|
||||||
|
|
||||||
## The app name helps build 'node.name'.
|
|
||||||
##
|
|
||||||
## @doc cluster.k8s.app_name
|
|
||||||
## ValueType: String
|
|
||||||
## Default: emqx
|
|
||||||
app_name = emqx
|
|
||||||
|
|
||||||
## The suffix added to dns and hostname get from k8s service
|
|
||||||
##
|
|
||||||
## @doc cluster.k8s.suffix
|
|
||||||
## ValueType: String
|
|
||||||
## Default: "pod.local"
|
|
||||||
suffix = "pod.local"
|
|
||||||
|
|
||||||
## Kubernetes Namespace
|
|
||||||
##
|
|
||||||
## @doc cluster.k8s.namespace
|
|
||||||
## ValueType: String
|
|
||||||
## Default: default
|
|
||||||
namespace = default
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
##==================================================================
|
|
||||||
## Log
|
|
||||||
##==================================================================
|
|
||||||
log {
|
log {
|
||||||
##----------------------------------------------------------------
|
|
||||||
## The console log handler send log messages to emqx console
|
|
||||||
##----------------------------------------------------------------
|
|
||||||
console_handler {
|
|
||||||
## Log to single line
|
|
||||||
## @doc log.console_handler.<name>.enable
|
|
||||||
## ValueType: Boolean
|
|
||||||
## Default: false
|
|
||||||
enable = false
|
|
||||||
|
|
||||||
## The log level of this handler
|
|
||||||
## All the log messages with levels lower than this level will
|
|
||||||
## be dropped.
|
|
||||||
##
|
|
||||||
## @doc log.console_handler.<name>.level
|
|
||||||
## ValueType: debug | info | notice | warning | error | critical | alert | emergency
|
|
||||||
## Default: warning
|
|
||||||
level = warning
|
|
||||||
|
|
||||||
## Timezone offset to display in logs
|
|
||||||
##
|
|
||||||
## @doc log.console_handler.<name>.time_offset
|
|
||||||
## ValueType: system | utc | String
|
|
||||||
## - "system" use system zone
|
|
||||||
## - "utc" for Universal Coordinated Time (UTC)
|
|
||||||
## - "+hh:mm" or "-hh:mm" for a specified offset
|
|
||||||
## Default: system
|
|
||||||
time_offset = system
|
|
||||||
|
|
||||||
## Limits the total number of characters printed for each log event.
|
|
||||||
##
|
|
||||||
## @doc log.console_handler.<name>.chars_limit
|
|
||||||
## ValueType: unlimited | Integer
|
|
||||||
## Range: [0, +Inf)
|
|
||||||
## Default: unlimited
|
|
||||||
chars_limit = unlimited
|
|
||||||
|
|
||||||
## Maximum depth for Erlang term log formatting
|
|
||||||
## and Erlang process message queue inspection.
|
|
||||||
##
|
|
||||||
## @doc log.console_handler.<name>.max_depth
|
|
||||||
## ValueType: unlimited | Integer
|
|
||||||
## Default: 100
|
|
||||||
max_depth = 100
|
|
||||||
|
|
||||||
## Log formatter
|
|
||||||
## @doc log.console_handler.<name>.formatter
|
|
||||||
## ValueType: text | json
|
|
||||||
## Default: text
|
|
||||||
formatter = text
|
|
||||||
|
|
||||||
## Log to single line
|
|
||||||
## @doc log.console_handler.<name>.single_line
|
|
||||||
## ValueType: Boolean
|
|
||||||
## Default: true
|
|
||||||
single_line = true
|
|
||||||
|
|
||||||
## The max allowed queue length before switching to sync mode.
|
|
||||||
##
|
|
||||||
## Log overload protection parameter. If the message queue grows
|
|
||||||
## larger than this value the handler switches from anync to sync mode.
|
|
||||||
##
|
|
||||||
## @doc log.console_handler.<name>.sync_mode_qlen
|
|
||||||
## ValueType: Integer
|
|
||||||
## Range: [0, ${log.console_handler.<name>.drop_mode_qlen}]
|
|
||||||
## Default: 100
|
|
||||||
sync_mode_qlen = 100
|
|
||||||
|
|
||||||
## The max allowed queue length before switching to drop mode.
|
|
||||||
##
|
|
||||||
## Log overload protection parameter. When the message queue grows
|
|
||||||
## larger than this threshold, the handler switches to a mode in which
|
|
||||||
## it drops all new events that senders want to log.
|
|
||||||
##
|
|
||||||
## @doc log.console_handler.<name>.drop_mode_qlen
|
|
||||||
## ValueType: Integer
|
|
||||||
## Range: [${log.console_handler.<name>.sync_mode_qlen}, ${log.console_handler.<name>.flush_qlen}]
|
|
||||||
## Default: 3000
|
|
||||||
drop_mode_qlen = 3000
|
|
||||||
|
|
||||||
## The max allowed queue length before switching to flush mode.
|
|
||||||
##
|
|
||||||
## Log overload protection parameter. If the length of the message queue
|
|
||||||
## grows larger than this threshold, a flush (delete) operation takes place.
|
|
||||||
## To flush events, the handler discards the messages in the message queue
|
|
||||||
## by receiving them in a loop without logging.
|
|
||||||
##
|
|
||||||
## @doc log.console_handler.<name>.flush_qlen
|
|
||||||
## ValueType: Integer
|
|
||||||
## Range: [${log.console_handler.<name>.drop_mode_qlen}, infinity)
|
|
||||||
## Default: 8000
|
|
||||||
flush_qlen = 8000
|
|
||||||
|
|
||||||
## Kill the log handler when it gets overloaded.
|
|
||||||
##
|
|
||||||
## Log overload protection parameter. It is possible that a handler,
|
|
||||||
## even if it can successfully manage peaks of high load without crashing,
|
|
||||||
## can build up a large message queue, or use a large amount of memory.
|
|
||||||
## We could kill the log handler in these cases and restart it after a
|
|
||||||
## few seconds.
|
|
||||||
##
|
|
||||||
## @doc log.console_handler.<name>.overload_kill.enable
|
|
||||||
## ValueType: Boolean
|
|
||||||
## Default: true
|
|
||||||
overload_kill.enable = true
|
|
||||||
|
|
||||||
## The max allowed queue length before killing the log handler.
|
|
||||||
##
|
|
||||||
## Log overload protection parameter. This is the maximum allowed queue
|
|
||||||
## length. If the message queue grows larger than this, the handler
|
|
||||||
## process is terminated.
|
|
||||||
##
|
|
||||||
## @doc log.console_handler.<name>.overload_kill.qlen
|
|
||||||
## ValueType: Integer
|
|
||||||
## Range: [0, 1048576]
|
|
||||||
## Default: 20000
|
|
||||||
overload_kill.qlen = 20000
|
|
||||||
|
|
||||||
## The max allowed memory size before killing the log handler.
|
|
||||||
##
|
|
||||||
## Log overload protection parameter. This is the maximum memory size
|
|
||||||
## that the handler process is allowed to use. If the handler grows
|
|
||||||
## larger than this, the process is terminated.
|
|
||||||
##
|
|
||||||
## @doc log.console_handler.<name>.overload_kill.mem_size
|
|
||||||
## ValueType: Size
|
|
||||||
## Default: 30MB
|
|
||||||
overload_kill.mem_size = 30MB
|
|
||||||
|
|
||||||
## Restart the log handler after some seconds.
|
|
||||||
##
|
|
||||||
## Log overload protection parameter. If the handler is terminated,
|
|
||||||
## it restarts automatically after a delay specified in seconds.
|
|
||||||
##
|
|
||||||
## @doc log.console_handler.<name>.overload_kill.restart_after
|
|
||||||
## ValueType: Duration
|
|
||||||
## Default: 5s
|
|
||||||
overload_kill.restart_after = 5s
|
|
||||||
|
|
||||||
## Controlling Bursts of Log Requests.
|
|
||||||
##
|
|
||||||
## Log overload protection parameter. Large bursts of log events - many
|
|
||||||
## events received by the handler under a short period of time - can
|
|
||||||
## potentially cause problems. By specifying the maximum number of events
|
|
||||||
## to be handled within a certain time frame, the handler can avoid
|
|
||||||
## choking the log with massive amounts of printouts.
|
|
||||||
##
|
|
||||||
## Note that there would be no warning if any messages were
|
|
||||||
## dropped because of burst control.
|
|
||||||
##
|
|
||||||
## @doc log.console_handler.<name>.burst_limit.enable
|
|
||||||
## ValueType: Boolean
|
|
||||||
## Default: false
|
|
||||||
burst_limit.enable = false
|
|
||||||
|
|
||||||
## This config controls the maximum number of events to handle within
|
|
||||||
## a time frame. After the limit is reached, successive events are
|
|
||||||
## dropped until the end of the time frame defined by `window_time`.
|
|
||||||
##
|
|
||||||
## @doc log.console_handler.<name>.burst_limit.max_count
|
|
||||||
## ValueType: Integer
|
|
||||||
## Default: 10000
|
|
||||||
burst_limit.max_count = 10000
|
|
||||||
|
|
||||||
## See the previous description of burst_limit_max_count.
|
|
||||||
##
|
|
||||||
## @doc log.console_handler.<name>.burst_limit.window_time
|
|
||||||
## ValueType: duration
|
|
||||||
## Default: 1s
|
|
||||||
burst_limit.window_time = 1s
|
|
||||||
}
|
|
||||||
|
|
||||||
##----------------------------------------------------------------
|
|
||||||
## The file log handlers send log messages to files
|
|
||||||
##----------------------------------------------------------------
|
|
||||||
## file_handlers.<name>
|
|
||||||
file_handlers.default {
|
file_handlers.default {
|
||||||
enable = true
|
level: warning
|
||||||
## The log level filter of this handler
|
file: "{{ platform_log_dir }}/emqx.log"
|
||||||
## All the log messages with levels lower than this level will
|
|
||||||
## be dropped.
|
|
||||||
##
|
|
||||||
## @doc log.file_handlers.<name>.level
|
|
||||||
## ValueType: debug | info | notice | warning | error | critical | alert | emergency
|
|
||||||
## Default: warning
|
|
||||||
level = warning
|
|
||||||
|
|
||||||
## The log file for specified level.
|
|
||||||
##
|
|
||||||
## If `rotation` is disabled, this is the file of the log files.
|
|
||||||
##
|
|
||||||
## If `rotation` is enabled, this is the base name of the files.
|
|
||||||
## Each file in a rotated log is named <base_name>.N, where N is an integer.
|
|
||||||
##
|
|
||||||
## Note: Log files for a specific log level will only contain all the logs
|
|
||||||
## that higher than or equal to that level
|
|
||||||
##
|
|
||||||
## @doc log.file_handlers.<name>.file
|
|
||||||
## ValueType: File
|
|
||||||
## Required: true
|
|
||||||
file = "{{ platform_log_dir }}/emqx.log"
|
|
||||||
|
|
||||||
## Enables the log rotation.
|
|
||||||
## With this enabled, new log files will be created when the current
|
|
||||||
## log file is full, max to `rotation_count` files will be created.
|
|
||||||
##
|
|
||||||
## @doc log.file_handlers.<name>.rotation.enable
|
|
||||||
## ValueType: Boolean
|
|
||||||
## Default: true
|
|
||||||
rotation.enable = true
|
|
||||||
|
|
||||||
## Maximum rotation count of log files.
|
|
||||||
##
|
|
||||||
## @doc log.file_handlers.<name>.rotation.count
|
|
||||||
## ValueType: Integer
|
|
||||||
## Range: [1, 2048]
|
|
||||||
## Default: 10
|
|
||||||
rotation.count = 10
|
|
||||||
|
|
||||||
## Maximum size of each log file.
|
|
||||||
##
|
|
||||||
## If the max_size reached and `rotation` is disabled, the handler
|
|
||||||
## will stop sending log messages, if the `rotation` is enabled,
|
|
||||||
## the file rotates.
|
|
||||||
##
|
|
||||||
## @doc log.file_handlers.<name>.max_size
|
|
||||||
## ValueType: Size | infinity
|
|
||||||
## Default: 10MB
|
|
||||||
max_size = 10MB
|
|
||||||
|
|
||||||
## Timezone offset to display in logs
|
|
||||||
##
|
|
||||||
## @doc log.file_handlers.<name>.time_offset
|
|
||||||
## ValueType: system | utc | String
|
|
||||||
## - "system" use system zone
|
|
||||||
## - "utc" for Universal Coordinated Time (UTC)
|
|
||||||
## - "+hh:mm" or "-hh:mm" for a specified offset
|
|
||||||
## Default: system
|
|
||||||
time_offset = system
|
|
||||||
|
|
||||||
## Limits the total number of characters printed for each log event.
|
|
||||||
##
|
|
||||||
## @doc log.file_handlers.<name>.chars_limit
|
|
||||||
## ValueType: unlimited | Integer
|
|
||||||
## Range: [0, +Inf)
|
|
||||||
## Default: unlimited
|
|
||||||
chars_limit = unlimited
|
|
||||||
|
|
||||||
## Maximum depth for Erlang term log formatting
|
|
||||||
## and Erlang process message queue inspection.
|
|
||||||
##
|
|
||||||
## @doc log.file_handlers.<name>.max_depth
|
|
||||||
## ValueType: unlimited | Integer
|
|
||||||
## Default: 100
|
|
||||||
max_depth = 100
|
|
||||||
|
|
||||||
## Log formatter
|
|
||||||
## @doc log.file_handlers.<name>.formatter
|
|
||||||
## ValueType: text | json
|
|
||||||
## Default: text
|
|
||||||
formatter = text
|
|
||||||
|
|
||||||
## Log to single line
|
|
||||||
## @doc log.file_handlers.<name>.single_line
|
|
||||||
## ValueType: Boolean
|
|
||||||
## Default: true
|
|
||||||
single_line = true
|
|
||||||
|
|
||||||
## The max allowed queue length before switching to sync mode.
|
|
||||||
##
|
|
||||||
## Log overload protection parameter. If the message queue grows
|
|
||||||
## larger than this value the handler switches from anync to sync mode.
|
|
||||||
##
|
|
||||||
## @doc log.file_handlers.<name>.sync_mode_qlen
|
|
||||||
## ValueType: Integer
|
|
||||||
## Range: [0, ${log.file_handlers.<name>.drop_mode_qlen}]
|
|
||||||
## Default: 100
|
|
||||||
sync_mode_qlen = 100
|
|
||||||
|
|
||||||
## The max allowed queue length before switching to drop mode.
|
|
||||||
##
|
|
||||||
## Log overload protection parameter. When the message queue grows
|
|
||||||
## larger than this threshold, the handler switches to a mode in which
|
|
||||||
## it drops all new events that senders want to log.
|
|
||||||
##
|
|
||||||
## @doc log.file_handlers.<name>.drop_mode_qlen
|
|
||||||
## ValueType: Integer
|
|
||||||
## Range: [${log.file_handlers.<name>.sync_mode_qlen}, ${log.file_handlers.<name>.flush_qlen}]
|
|
||||||
## Default: 3000
|
|
||||||
drop_mode_qlen = 3000
|
|
||||||
|
|
||||||
## The max allowed queue length before switching to flush mode.
|
|
||||||
##
|
|
||||||
## Log overload protection parameter. If the length of the message queue
|
|
||||||
## grows larger than this threshold, a flush (delete) operation takes place.
|
|
||||||
## To flush events, the handler discards the messages in the message queue
|
|
||||||
## by receiving them in a loop without logging.
|
|
||||||
##
|
|
||||||
## @doc log.file_handlers.<name>.flush_qlen
|
|
||||||
## ValueType: Integer
|
|
||||||
## Range: [${log.file_handlers.<name>.drop_mode_qlen}, infinity)
|
|
||||||
## Default: 8000
|
|
||||||
flush_qlen = 8000
|
|
||||||
|
|
||||||
## Kill the log handler when it gets overloaded.
|
|
||||||
##
|
|
||||||
## Log overload protection parameter. It is possible that a handler,
|
|
||||||
## even if it can successfully manage peaks of high load without crashing,
|
|
||||||
## can build up a large message queue, or use a large amount of memory.
|
|
||||||
## We could kill the log handler in these cases and restart it after a
|
|
||||||
## few seconds.
|
|
||||||
##
|
|
||||||
## @doc log.file_handlers.<name>.overload_kill.enable
|
|
||||||
## ValueType: Boolean
|
|
||||||
## Default: true
|
|
||||||
overload_kill.enable = true
|
|
||||||
|
|
||||||
## The max allowed queue length before killing the log handler.
|
|
||||||
##
|
|
||||||
## Log overload protection parameter. This is the maximum allowed queue
|
|
||||||
## length. If the message queue grows larger than this, the handler
|
|
||||||
## process is terminated.
|
|
||||||
##
|
|
||||||
## @doc log.file_handlers.<name>.overload_kill.qlen
|
|
||||||
## ValueType: Integer
|
|
||||||
## Range: [0, 1048576]
|
|
||||||
## Default: 20000
|
|
||||||
overload_kill.qlen = 20000
|
|
||||||
|
|
||||||
## The max allowed memory size before killing the log handler.
|
|
||||||
##
|
|
||||||
## Log overload protection parameter. This is the maximum memory size
|
|
||||||
## that the handler process is allowed to use. If the handler grows
|
|
||||||
## larger than this, the process is terminated.
|
|
||||||
##
|
|
||||||
## @doc log.file_handlers.<name>.overload_kill.mem_size
|
|
||||||
## ValueType: Size
|
|
||||||
## Default: 30MB
|
|
||||||
overload_kill.mem_size = 30MB
|
|
||||||
|
|
||||||
## Restart the log handler after some seconds.
|
|
||||||
##
|
|
||||||
## Log overload protection parameter. If the handler is terminated,
|
|
||||||
## it restarts automatically after a delay specified in seconds.
|
|
||||||
##
|
|
||||||
## @doc log.file_handlers.<name>.overload_kill.restart_after
|
|
||||||
## ValueType: Duration
|
|
||||||
## Default: 5s
|
|
||||||
overload_kill.restart_after = 5s
|
|
||||||
|
|
||||||
## Controlling Bursts of Log Requests.
|
|
||||||
##
|
|
||||||
## Log overload protection parameter. Large bursts of log events - many
|
|
||||||
## events received by the handler under a short period of time - can
|
|
||||||
## potentially cause problems. By specifying the maximum number of events
|
|
||||||
## to be handled within a certain time frame, the handler can avoid
|
|
||||||
## choking the log with massive amounts of printouts.
|
|
||||||
##
|
|
||||||
## Note that there would be no warning if any messages were
|
|
||||||
## dropped because of burst control.
|
|
||||||
##
|
|
||||||
## @doc log.file_handlers.<name>.burst_limit.enable
|
|
||||||
## ValueType: Boolean
|
|
||||||
## Default: false
|
|
||||||
burst_limit.enable = false
|
|
||||||
|
|
||||||
## This config controls the maximum number of events to handle within
|
|
||||||
## a time frame. After the limit is reached, successive events are
|
|
||||||
## dropped until the end of the time frame defined by `window_time`.
|
|
||||||
##
|
|
||||||
## @doc log.file_handlers.<name>.burst_limit.max_count
|
|
||||||
## ValueType: Integer
|
|
||||||
## Default: 10000
|
|
||||||
burst_limit.max_count = 10000
|
|
||||||
|
|
||||||
## See the previous description of burst_limit_max_count.
|
|
||||||
##
|
|
||||||
## @doc log.file_handlers.<name>.burst_limit.window_time
|
|
||||||
## ValueType: duration
|
|
||||||
## Default: 1s
|
|
||||||
burst_limit.window_time = 1s
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
##==================================================================
|
cluster {
|
||||||
## RPC
|
name: emqxcl
|
||||||
##==================================================================
|
discovery_strategy: manual
|
||||||
rpc {
|
|
||||||
## RPC Mode.
|
|
||||||
##
|
|
||||||
## @doc rpc.mode
|
|
||||||
## ValueType: sync | async
|
|
||||||
## Default: async
|
|
||||||
mode = async
|
|
||||||
|
|
||||||
## Max batch size of async RPC requests.
|
|
||||||
##
|
|
||||||
## NOTE: RPC batch won't work when rpc.mode = sync
|
|
||||||
## Zero value disables rpc batching.
|
|
||||||
##
|
|
||||||
## @doc rpc.async_batch_size
|
|
||||||
## ValueType: Integer
|
|
||||||
## Range: [0, 1048576]
|
|
||||||
## Default: 0
|
|
||||||
async_batch_size = 256
|
|
||||||
|
|
||||||
## RPC port discovery
|
|
||||||
##
|
|
||||||
## The strategy for discovering the RPC listening port of
|
|
||||||
## other nodes.
|
|
||||||
##
|
|
||||||
## @doc cluster.discovery_strategy
|
|
||||||
## ValueType: manual | stateless
|
|
||||||
## - manual: discover ports by `tcp_server_port`.
|
|
||||||
## - stateless: discover ports in a stateless manner.
|
|
||||||
## If node name is `emqx<N>@127.0.0.1`, where the `<N>` is
|
|
||||||
## an integer, then the listening port will be `5370 + <N>`
|
|
||||||
##
|
|
||||||
## Default: `stateless`.
|
|
||||||
port_discovery = stateless
|
|
||||||
|
|
||||||
## TCP server port for RPC.
|
|
||||||
##
|
|
||||||
## Only takes effect when `rpc.port_discovery` = `manual`.
|
|
||||||
##
|
|
||||||
## @doc rpc.tcp_server_port
|
|
||||||
## ValueType: Integer
|
|
||||||
## Range: [1024-65535]
|
|
||||||
## Defaults: 5369
|
|
||||||
tcp_server_port = 5369
|
|
||||||
|
|
||||||
## Number of outgoing RPC connections.
|
|
||||||
##
|
|
||||||
## Set this to 1 to keep the message order sent from the same
|
|
||||||
## client.
|
|
||||||
##
|
|
||||||
## @doc rpc.tcp_client_num
|
|
||||||
## ValueType: Integer
|
|
||||||
## Range: [1, 256]
|
|
||||||
## Defaults: 10
|
|
||||||
tcp_client_num = 10
|
|
||||||
|
|
||||||
## RCP Client connect timeout.
|
|
||||||
##
|
|
||||||
## @doc rpc.connect_timeout
|
|
||||||
## ValueType: Duration
|
|
||||||
## Default: 5s
|
|
||||||
connect_timeout = 5s
|
|
||||||
|
|
||||||
## TCP send timeout of RPC client and server.
|
|
||||||
##
|
|
||||||
## @doc rpc.send_timeout
|
|
||||||
## ValueType: Duration
|
|
||||||
## Default: 5s
|
|
||||||
send_timeout = 5s
|
|
||||||
|
|
||||||
## Authentication timeout
|
|
||||||
##
|
|
||||||
## @doc rpc.authentication_timeout
|
|
||||||
## ValueType: Duration
|
|
||||||
## Default: 5s
|
|
||||||
authentication_timeout = 5s
|
|
||||||
|
|
||||||
## Default receive timeout for call() functions
|
|
||||||
##
|
|
||||||
## @doc rpc.call_receive_timeout
|
|
||||||
## ValueType: Duration
|
|
||||||
## Default: 15s
|
|
||||||
call_receive_timeout = 15s
|
|
||||||
|
|
||||||
## Socket idle keepalive.
|
|
||||||
##
|
|
||||||
## @doc rpc.socket_keepalive_idle
|
|
||||||
## ValueType: Duration
|
|
||||||
## Default: 900s
|
|
||||||
socket_keepalive_idle = 900s
|
|
||||||
|
|
||||||
## TCP Keepalive probes interval.
|
|
||||||
##
|
|
||||||
## @doc rpc.socket_keepalive_interval
|
|
||||||
## ValueType: Duration
|
|
||||||
## Default: 75s
|
|
||||||
socket_keepalive_interval = 75s
|
|
||||||
|
|
||||||
## Probes lost to close the connection
|
|
||||||
##
|
|
||||||
## @doc rpc.socket_keepalive_count
|
|
||||||
## ValueType: Integer
|
|
||||||
## Default: 9
|
|
||||||
socket_keepalive_count = 9
|
|
||||||
|
|
||||||
## Size of TCP send buffer.
|
|
||||||
##
|
|
||||||
## @doc rpc.socket_sndbuf
|
|
||||||
## ValueType: Size
|
|
||||||
## Default: 1MB
|
|
||||||
socket_sndbuf = 1MB
|
|
||||||
|
|
||||||
## Size of TCP receive buffer.
|
|
||||||
##
|
|
||||||
## @doc rpc.socket_recbuf
|
|
||||||
## ValueType: Size
|
|
||||||
## Default: 1MB
|
|
||||||
socket_recbuf = 1MB
|
|
||||||
|
|
||||||
## Size of user-level software socket buffer.
|
|
||||||
##
|
|
||||||
## @doc rpc.socket_buffer
|
|
||||||
## ValueType: Size
|
|
||||||
## Default: 1MB
|
|
||||||
socket_buffer = 1MB
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,13 +20,14 @@
|
||||||
-include_lib("hocon/include/hoconsc.hrl").
|
-include_lib("hocon/include/hoconsc.hrl").
|
||||||
|
|
||||||
-export([add_handler/2, remove_handler/1]).
|
-export([add_handler/2, remove_handler/1]).
|
||||||
-export([get/1, get/2, get_raw/2, get_all/1]).
|
-export([get/1, get/2, get_raw/1, get_raw/2, get_all/1]).
|
||||||
-export([get_by_node/2, get_by_node/3]).
|
-export([get_by_node/2, get_by_node/3]).
|
||||||
-export([update/3, update/4]).
|
-export([update/3, update/4]).
|
||||||
-export([remove/2, remove/3]).
|
-export([remove/2, remove/3]).
|
||||||
-export([reset/2, reset/3]).
|
-export([reset/2, reset/3]).
|
||||||
-export([dump_schema/1, dump_schema/3]).
|
-export([dump_schema/1, dump_schema/3]).
|
||||||
-export([schema_module/0]).
|
-export([schema_module/0]).
|
||||||
|
-export([gen_example_conf/4]).
|
||||||
|
|
||||||
%% for rpc
|
%% for rpc
|
||||||
-export([get_node_and_config/1]).
|
-export([get_node_and_config/1]).
|
||||||
|
@ -54,6 +55,10 @@ get(KeyPath, Default) ->
|
||||||
get_raw(KeyPath, Default) ->
|
get_raw(KeyPath, Default) ->
|
||||||
emqx_config:get_raw(KeyPath, Default).
|
emqx_config:get_raw(KeyPath, Default).
|
||||||
|
|
||||||
|
-spec get_raw(emqx_map_lib:config_key_path()) -> term().
|
||||||
|
get_raw(KeyPath) ->
|
||||||
|
emqx_config:get_raw(KeyPath).
|
||||||
|
|
||||||
%% @doc Returns all values in the cluster.
|
%% @doc Returns all values in the cluster.
|
||||||
-spec get_all(emqx_map_lib:config_key_path()) -> #{node() => term()}.
|
-spec get_all(emqx_map_lib:config_key_path()) -> #{node() => term()}.
|
||||||
get_all(KeyPath) ->
|
get_all(KeyPath) ->
|
||||||
|
@ -140,7 +145,8 @@ dump_schema(Dir, SchemaModule, I18nFile) ->
|
||||||
lists:foreach(
|
lists:foreach(
|
||||||
fun(Lang) ->
|
fun(Lang) ->
|
||||||
gen_config_md(Dir, I18nFile, SchemaModule, Lang),
|
gen_config_md(Dir, I18nFile, SchemaModule, Lang),
|
||||||
gen_hot_conf_schema_json(Dir, I18nFile, Lang)
|
gen_hot_conf_schema_json(Dir, I18nFile, Lang),
|
||||||
|
gen_example_conf(Dir, I18nFile, SchemaModule, Lang)
|
||||||
end,
|
end,
|
||||||
[en, zh]
|
[en, zh]
|
||||||
),
|
),
|
||||||
|
@ -169,6 +175,12 @@ gen_config_md(Dir, I18nFile, SchemaModule, Lang0) ->
|
||||||
io:format(user, "===< Generating: ~s~n", [SchemaMdFile]),
|
io:format(user, "===< Generating: ~s~n", [SchemaMdFile]),
|
||||||
ok = gen_doc(SchemaMdFile, SchemaModule, I18nFile, Lang).
|
ok = gen_doc(SchemaMdFile, SchemaModule, I18nFile, Lang).
|
||||||
|
|
||||||
|
gen_example_conf(Dir, I18nFile, SchemaModule, Lang0) ->
|
||||||
|
Lang = atom_to_list(Lang0),
|
||||||
|
SchemaMdFile = filename:join([Dir, "emqx-" ++ Lang ++ ".conf.example"]),
|
||||||
|
io:format(user, "===< Generating: ~s~n", [SchemaMdFile]),
|
||||||
|
ok = gen_example(SchemaMdFile, SchemaModule, I18nFile, Lang).
|
||||||
|
|
||||||
%% @doc return the root schema module.
|
%% @doc return the root schema module.
|
||||||
-spec schema_module() -> module().
|
-spec schema_module() -> module().
|
||||||
schema_module() ->
|
schema_module() ->
|
||||||
|
@ -191,6 +203,11 @@ gen_doc(File, SchemaModule, I18nFile, Lang) ->
|
||||||
Doc = hocon_schema_md:gen(SchemaModule, Opts),
|
Doc = hocon_schema_md:gen(SchemaModule, Opts),
|
||||||
file:write_file(File, Doc).
|
file:write_file(File, Doc).
|
||||||
|
|
||||||
|
gen_example(File, SchemaModule, I18nFile, Lang) ->
|
||||||
|
Opts = #{title => <<"Title">>, body => <<"Body">>, desc_file => I18nFile, lang => Lang},
|
||||||
|
Example = hocon_schema_example:gen(SchemaModule, Opts),
|
||||||
|
file:write_file(File, Example).
|
||||||
|
|
||||||
check_cluster_rpc_result(Result) ->
|
check_cluster_rpc_result(Result) ->
|
||||||
case Result of
|
case Result of
|
||||||
{ok, _TnxId, Res} ->
|
{ok, _TnxId, Res} ->
|
||||||
|
|
|
@ -60,10 +60,20 @@ get_override_config_file() ->
|
||||||
%% Internal functions
|
%% Internal functions
|
||||||
%% ------------------------------------------------------------------------------
|
%% ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
-ifdef(TEST).
|
||||||
|
init_load() ->
|
||||||
|
emqx_config:init_load(emqx_conf:schema_module(), #{raw_with_default => false}).
|
||||||
|
|
||||||
|
-else.
|
||||||
|
|
||||||
|
init_load() ->
|
||||||
|
emqx_config:init_load(emqx_conf:schema_module(), #{raw_with_default => true}).
|
||||||
|
-endif.
|
||||||
|
|
||||||
init_conf() ->
|
init_conf() ->
|
||||||
{ok, TnxId} = copy_override_conf_from_core_node(),
|
{ok, TnxId} = copy_override_conf_from_core_node(),
|
||||||
emqx_app:set_init_tnx_id(TnxId),
|
emqx_app:set_init_tnx_id(TnxId),
|
||||||
emqx_config:init_load(emqx_conf:schema_module()),
|
init_load(),
|
||||||
emqx_app:set_init_config_load_done().
|
emqx_app:set_init_config_load_done().
|
||||||
|
|
||||||
cluster_nodes() ->
|
cluster_nodes() ->
|
||||||
|
|
|
@ -76,22 +76,22 @@ roots() ->
|
||||||
[
|
[
|
||||||
{"node",
|
{"node",
|
||||||
sc(
|
sc(
|
||||||
ref("node"),
|
?R_REF("node"),
|
||||||
#{translate_to => ["emqx"]}
|
#{translate_to => ["emqx"]}
|
||||||
)},
|
)},
|
||||||
{"cluster",
|
{"cluster",
|
||||||
sc(
|
sc(
|
||||||
ref("cluster"),
|
?R_REF("cluster"),
|
||||||
#{translate_to => ["ekka"]}
|
#{translate_to => ["ekka"]}
|
||||||
)},
|
)},
|
||||||
{"log",
|
{"log",
|
||||||
sc(
|
sc(
|
||||||
ref("log"),
|
?R_REF("log"),
|
||||||
#{translate_to => ["kernel"]}
|
#{translate_to => ["kernel"]}
|
||||||
)},
|
)},
|
||||||
{"rpc",
|
{"rpc",
|
||||||
sc(
|
sc(
|
||||||
ref("rpc"),
|
?R_REF("rpc"),
|
||||||
#{translate_to => ["gen_rpc"]}
|
#{translate_to => ["gen_rpc"]}
|
||||||
)}
|
)}
|
||||||
] ++
|
] ++
|
||||||
|
@ -166,27 +166,27 @@ fields("cluster") ->
|
||||||
)},
|
)},
|
||||||
{"static",
|
{"static",
|
||||||
sc(
|
sc(
|
||||||
ref(cluster_static),
|
?R_REF(cluster_static),
|
||||||
#{}
|
#{}
|
||||||
)},
|
)},
|
||||||
{"mcast",
|
{"mcast",
|
||||||
sc(
|
sc(
|
||||||
ref(cluster_mcast),
|
?R_REF(cluster_mcast),
|
||||||
#{}
|
#{}
|
||||||
)},
|
)},
|
||||||
{"dns",
|
{"dns",
|
||||||
sc(
|
sc(
|
||||||
ref(cluster_dns),
|
?R_REF(cluster_dns),
|
||||||
#{}
|
#{}
|
||||||
)},
|
)},
|
||||||
{"etcd",
|
{"etcd",
|
||||||
sc(
|
sc(
|
||||||
ref(cluster_etcd),
|
?R_REF(cluster_etcd),
|
||||||
#{}
|
#{}
|
||||||
)},
|
)},
|
||||||
{"k8s",
|
{"k8s",
|
||||||
sc(
|
sc(
|
||||||
ref(cluster_k8s),
|
?R_REF(cluster_k8s),
|
||||||
#{}
|
#{}
|
||||||
)}
|
)}
|
||||||
];
|
];
|
||||||
|
@ -328,7 +328,7 @@ fields(cluster_etcd) ->
|
||||||
)},
|
)},
|
||||||
{"ssl",
|
{"ssl",
|
||||||
sc(
|
sc(
|
||||||
hoconsc:ref(emqx_schema, "ssl_client_opts"),
|
?R_REF(emqx_schema, "ssl_client_opts"),
|
||||||
#{
|
#{
|
||||||
desc => ?DESC(cluster_etcd_ssl),
|
desc => ?DESC(cluster_etcd_ssl),
|
||||||
'readOnly' => true
|
'readOnly' => true
|
||||||
|
@ -512,7 +512,7 @@ fields("node") ->
|
||||||
)},
|
)},
|
||||||
{"cluster_call",
|
{"cluster_call",
|
||||||
sc(
|
sc(
|
||||||
ref("cluster_call"),
|
?R_REF("cluster_call"),
|
||||||
#{'readOnly' => true}
|
#{'readOnly' => true}
|
||||||
)},
|
)},
|
||||||
{"db_backend",
|
{"db_backend",
|
||||||
|
@ -783,10 +783,10 @@ fields("rpc") ->
|
||||||
];
|
];
|
||||||
fields("log") ->
|
fields("log") ->
|
||||||
[
|
[
|
||||||
{"console_handler", ref("console_handler")},
|
{"console_handler", ?R_REF("console_handler")},
|
||||||
{"file_handlers",
|
{"file_handlers",
|
||||||
sc(
|
sc(
|
||||||
map(name, ref("log_file_handler")),
|
map(name, ?R_REF("log_file_handler")),
|
||||||
#{desc => ?DESC("log_file_handlers")}
|
#{desc => ?DESC("log_file_handlers")}
|
||||||
)},
|
)},
|
||||||
{"error_logger",
|
{"error_logger",
|
||||||
|
@ -801,7 +801,7 @@ fields("log") ->
|
||||||
)}
|
)}
|
||||||
];
|
];
|
||||||
fields("console_handler") ->
|
fields("console_handler") ->
|
||||||
log_handler_common_confs();
|
log_handler_common_confs(false);
|
||||||
fields("log_file_handler") ->
|
fields("log_file_handler") ->
|
||||||
[
|
[
|
||||||
{"file",
|
{"file",
|
||||||
|
@ -814,7 +814,7 @@ fields("log_file_handler") ->
|
||||||
)},
|
)},
|
||||||
{"rotation",
|
{"rotation",
|
||||||
sc(
|
sc(
|
||||||
ref("log_rotation"),
|
?R_REF("log_rotation"),
|
||||||
#{}
|
#{}
|
||||||
)},
|
)},
|
||||||
{"max_size",
|
{"max_size",
|
||||||
|
@ -825,7 +825,7 @@ fields("log_file_handler") ->
|
||||||
desc => ?DESC("log_file_handler_max_size")
|
desc => ?DESC("log_file_handler_max_size")
|
||||||
}
|
}
|
||||||
)}
|
)}
|
||||||
] ++ log_handler_common_confs();
|
] ++ log_handler_common_confs(true);
|
||||||
fields("log_rotation") ->
|
fields("log_rotation") ->
|
||||||
[
|
[
|
||||||
{"enable",
|
{"enable",
|
||||||
|
@ -1063,13 +1063,13 @@ tr_logger(Conf) ->
|
||||||
],
|
],
|
||||||
[{handler, default, undefined}] ++ ConsoleHandler ++ FileHandlers.
|
[{handler, default, undefined}] ++ ConsoleHandler ++ FileHandlers.
|
||||||
|
|
||||||
log_handler_common_confs() ->
|
log_handler_common_confs(Enable) ->
|
||||||
[
|
[
|
||||||
{"enable",
|
{"enable",
|
||||||
sc(
|
sc(
|
||||||
boolean(),
|
boolean(),
|
||||||
#{
|
#{
|
||||||
default => false,
|
default => Enable,
|
||||||
desc => ?DESC("common_handler_enable")
|
desc => ?DESC("common_handler_enable")
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
|
@ -1137,8 +1137,8 @@ log_handler_common_confs() ->
|
||||||
desc => ?DESC("common_handler_flush_qlen")
|
desc => ?DESC("common_handler_flush_qlen")
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
{"overload_kill", sc(ref("log_overload_kill"), #{})},
|
{"overload_kill", sc(?R_REF("log_overload_kill"), #{})},
|
||||||
{"burst_limit", sc(ref("log_burst_limit"), #{})},
|
{"burst_limit", sc(?R_REF("log_burst_limit"), #{})},
|
||||||
{"supervisor_reports",
|
{"supervisor_reports",
|
||||||
sc(
|
sc(
|
||||||
hoconsc:enum([error, progress]),
|
hoconsc:enum([error, progress]),
|
||||||
|
@ -1251,8 +1251,6 @@ sc(Type, Meta) -> hoconsc:mk(Type, Meta).
|
||||||
|
|
||||||
map(Name, Type) -> hoconsc:map(Name, Type).
|
map(Name, Type) -> hoconsc:map(Name, Type).
|
||||||
|
|
||||||
ref(Field) -> hoconsc:ref(?MODULE, Field).
|
|
||||||
|
|
||||||
options(static, Conf) ->
|
options(static, Conf) ->
|
||||||
[{seeds, conf_get("cluster.static.seeds", Conf, [])}];
|
[{seeds, conf_get("cluster.static.seeds", Conf, [])}];
|
||||||
options(mcast, Conf) ->
|
options(mcast, Conf) ->
|
||||||
|
@ -1321,7 +1319,7 @@ emqx_schema_high_prio_roots() ->
|
||||||
Authz =
|
Authz =
|
||||||
{"authorization",
|
{"authorization",
|
||||||
sc(
|
sc(
|
||||||
hoconsc:ref(?MODULE, "authorization"),
|
?R_REF("authorization"),
|
||||||
#{desc => ?DESC(authorization)}
|
#{desc => ?DESC(authorization)}
|
||||||
)},
|
)},
|
||||||
lists:keyreplace("authorization", 1, Roots, Authz).
|
lists:keyreplace("authorization", 1, Roots, Authz).
|
||||||
|
|
|
@ -1,23 +0,0 @@
|
||||||
#connectors.mqtt.my_mqtt_connector {
|
|
||||||
# mode = cluster_shareload
|
|
||||||
# server = "127.0.0.1:1883"
|
|
||||||
# proto_ver = "v4"
|
|
||||||
# username = "username1"
|
|
||||||
# password = ""
|
|
||||||
# clean_start = true
|
|
||||||
# keepalive = 300
|
|
||||||
# retry_interval = "30s"
|
|
||||||
# max_inflight = 32
|
|
||||||
# reconnect_interval = "30s"
|
|
||||||
# replayq {
|
|
||||||
# dir = "{{ platform_data_dir }}/replayq/bridge_mqtt/"
|
|
||||||
# seg_bytes = "100MB"
|
|
||||||
# offload = false
|
|
||||||
# }
|
|
||||||
# ssl {
|
|
||||||
# enable = false
|
|
||||||
# keyfile = "{{ platform_etc_dir }}/certs/client-key.pem"
|
|
||||||
# certfile = "{{ platform_etc_dir }}/certs/client-cert.pem"
|
|
||||||
# cacertfile = "{{ platform_etc_dir }}/certs/cacert.pem"
|
|
||||||
# }
|
|
||||||
#}
|
|
|
@ -64,7 +64,7 @@ fields("connectors") ->
|
||||||
mk(
|
mk(
|
||||||
hoconsc:map(
|
hoconsc:map(
|
||||||
name,
|
name,
|
||||||
hoconsc:union([ref(emqx_connector_mqtt_schema, "connector")])
|
ref(emqx_connector_mqtt_schema, "connector")
|
||||||
),
|
),
|
||||||
#{desc => ?DESC("mqtt")}
|
#{desc => ?DESC("mqtt")}
|
||||||
)}
|
)}
|
||||||
|
|
|
@ -1,39 +1,7 @@
|
||||||
##--------------------------------------------------------------------
|
|
||||||
## EMQX Dashboard
|
|
||||||
##--------------------------------------------------------------------
|
|
||||||
|
|
||||||
dashboard {
|
dashboard {
|
||||||
default_username = "admin"
|
|
||||||
default_password = "public"
|
|
||||||
## Note: sample_interval should be a divisor of 60.
|
|
||||||
## like 1s, 2s, 3s, 5s, 10s, 12s, 15s, 20s, 30s, 60s
|
|
||||||
sample_interval = 10s
|
|
||||||
## JWT token expiration time.
|
|
||||||
token_expired_time = 60m
|
|
||||||
listeners.http {
|
listeners.http {
|
||||||
num_acceptors = 4
|
bind: 18083
|
||||||
max_connections = 512
|
|
||||||
bind = 18083
|
|
||||||
backlog = 512
|
|
||||||
send_timeout = 5s
|
|
||||||
inet6 = false
|
|
||||||
ipv6_v6only = false
|
|
||||||
}
|
}
|
||||||
#listeners.https {
|
default_username: "admin"
|
||||||
# bind = "127.0.0.1:18084"
|
default_password: "public"
|
||||||
# num_acceptors = 4
|
|
||||||
# backlog = 512
|
|
||||||
# send_timeout = 5s
|
|
||||||
# inet6 = false
|
|
||||||
# ipv6_v6only = false
|
|
||||||
# certfile = "etc/certs/cert.pem"
|
|
||||||
# keyfile = "etc/certs/key.pem"
|
|
||||||
# cacertfile = "etc/certs/cacert.pem"
|
|
||||||
# verify = verify_peer
|
|
||||||
# versions = ["tlsv1.3","tlsv1.2","tlsv1.1","tlsv1"]
|
|
||||||
# ciphers = ["TLS_AES_256_GCM_SHA384","TLS_AES_128_GCM_SHA256","TLS_CHACHA20_POLY1305_SHA256","TLS_AES_128_CCM_SHA256","TLS_AES_128_CCM_8_SHA256","ECDHE-ECDSA-AES256-GCM-SHA384","ECDHE-RSA-AES256-GCM-SHA384","ECDHE-ECDSA-AES256-SHA384","ECDHE-RSA-AES256-SHA384","ECDHE-ECDSA-DES-CBC3-SHA","ECDH-ECDSA-AES256-GCM-SHA384","ECDH-RSA-AES256-GCM-SHA384","ECDH-ECDSA-AES256-SHA384","ECDH-RSA-AES256-SHA384","DHE-DSS-AES256-GCM-SHA384","DHE-DSS-AES256-SHA256","AES256-GCM-SHA384","AES256-SHA256","ECDHE-ECDSA-AES128-GCM-SHA256","ECDHE-RSA-AES128-GCM-SHA256","ECDHE-ECDSA-AES128-SHA256","ECDHE-RSA-AES128-SHA256","ECDH-ECDSA-AES128-GCM-SHA256","ECDH-RSA-AES128-GCM-SHA256","ECDH-ECDSA-AES128-SHA256","ECDH-RSA-AES128-SHA256","DHE-DSS-AES128-GCM-SHA256","DHE-DSS-AES128-SHA256","AES128-GCM-SHA256","AES128-SHA256","ECDHE-ECDSA-AES256-SHA","ECDHE-RSA-AES256-SHA","DHE-DSS-AES256-SHA","ECDH-ECDSA-AES256-SHA","ECDH-RSA-AES256-SHA","AES256-SHA","ECDHE-ECDSA-AES128-SHA","ECDHE-RSA-AES128-SHA","DHE-DSS-AES128-SHA","ECDH-ECDSA-AES128-SHA","ECDH-RSA-AES128-SHA","AES128-SHA"]
|
|
||||||
#}
|
|
||||||
|
|
||||||
## CORS Support. don't set cors true if you don't know what it means.
|
|
||||||
# cors = false
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,7 +49,7 @@ fields("dashboard") ->
|
||||||
sc(
|
sc(
|
||||||
emqx_schema:duration(),
|
emqx_schema:duration(),
|
||||||
#{
|
#{
|
||||||
default => "30m",
|
default => "60m",
|
||||||
desc => ?DESC(token_expired_time)
|
desc => ?DESC(token_expired_time)
|
||||||
}
|
}
|
||||||
)},
|
)},
|
||||||
|
@ -171,6 +171,7 @@ bind(Port) ->
|
||||||
#{
|
#{
|
||||||
default => Port,
|
default => Port,
|
||||||
required => true,
|
required => true,
|
||||||
|
extra => #{example => [Port, "0.0.0.0:" ++ integer_to_list(Port)]},
|
||||||
desc => ?DESC(bind)
|
desc => ?DESC(bind)
|
||||||
}
|
}
|
||||||
)}.
|
)}.
|
||||||
|
|
|
@ -1,46 +0,0 @@
|
||||||
##====================================================================
|
|
||||||
## EMQX Hooks
|
|
||||||
##====================================================================
|
|
||||||
|
|
||||||
exhook {
|
|
||||||
|
|
||||||
servers = [
|
|
||||||
##{
|
|
||||||
## name = default
|
|
||||||
##
|
|
||||||
## Whether to automatically reconnect (initialize) the gRPC server
|
|
||||||
## When gRPC is not available, exhook tries to request the gRPC service at
|
|
||||||
## that interval and reinitialize the list of mounted hooks.
|
|
||||||
##
|
|
||||||
## Default: false
|
|
||||||
## Value: false | Duration
|
|
||||||
## auto_reconnect = 60s
|
|
||||||
|
|
||||||
## The default value or action will be returned, while the request to
|
|
||||||
## the gRPC server failed or no available grpc server running.
|
|
||||||
##
|
|
||||||
## Default: deny
|
|
||||||
## Value: ignore | deny
|
|
||||||
## failed_action = deny
|
|
||||||
|
|
||||||
## The timeout to request grpc server
|
|
||||||
##
|
|
||||||
## Default: 5s
|
|
||||||
## Value: Duration
|
|
||||||
## request_timeout = 5s
|
|
||||||
|
|
||||||
## url = "http://127.0.0.1:9000"
|
|
||||||
## ssl {
|
|
||||||
## cacertfile: "{{ platform_etc_dir }}/certs/cacert.pem"
|
|
||||||
## certfile: "{{ platform_etc_dir }}/certs/cert.pem"
|
|
||||||
## keyfile: "{{ platform_etc_dir }}/certs/key.pem"
|
|
||||||
## }
|
|
||||||
##
|
|
||||||
## The process pool size for gRPC client
|
|
||||||
##
|
|
||||||
## Default: Equals cpu cores
|
|
||||||
## Value: Integer
|
|
||||||
## pool_size = 16
|
|
||||||
##}
|
|
||||||
]
|
|
||||||
}
|
|
|
@ -1,7 +1 @@
|
||||||
##--------------------------------------------------------------------
|
|
||||||
## EMQX Gateway configurations
|
|
||||||
##--------------------------------------------------------------------
|
|
||||||
|
|
||||||
## No gateway by default.
|
|
||||||
##
|
|
||||||
## If you want to get how to config it, please see emqx_gateway.conf.example.
|
|
||||||
|
|
|
@ -567,7 +567,8 @@ authentication_schema() ->
|
||||||
emqx_authn_schema:authenticator_type(),
|
emqx_authn_schema:authenticator_type(),
|
||||||
#{
|
#{
|
||||||
required => {false, recursively},
|
required => {false, recursively},
|
||||||
desc => ?DESC(gateway_common_authentication)
|
desc => ?DESC(gateway_common_authentication),
|
||||||
|
examples => emqx_authn_api:authenticator_examples()
|
||||||
}
|
}
|
||||||
).
|
).
|
||||||
|
|
||||||
|
@ -606,7 +607,7 @@ gateway_common_options() ->
|
||||||
].
|
].
|
||||||
|
|
||||||
mountpoint() ->
|
mountpoint() ->
|
||||||
mountpoint(<<>>).
|
mountpoint(<<"">>).
|
||||||
mountpoint(Default) ->
|
mountpoint(Default) ->
|
||||||
sc(
|
sc(
|
||||||
binary(),
|
binary(),
|
||||||
|
|
|
@ -178,6 +178,7 @@ t_dashboard(_Config) ->
|
||||||
|
|
||||||
{ok, Dashboard1} = get_config("dashboard"),
|
{ok, Dashboard1} = get_config("dashboard"),
|
||||||
?assertNotEqual(Dashboard, Dashboard1),
|
?assertNotEqual(Dashboard, Dashboard1),
|
||||||
|
timer:sleep(1000),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
get_config(Name) ->
|
get_config(Name) ->
|
||||||
|
|
|
@ -1,40 +1 @@
|
||||||
|
|
||||||
delayed {
|
|
||||||
enable = true
|
|
||||||
## 0 is no limit
|
|
||||||
max_delayed_messages = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
observer_cli {
|
|
||||||
enable = true
|
|
||||||
}
|
|
||||||
|
|
||||||
telemetry {
|
|
||||||
enable = true
|
|
||||||
}
|
|
||||||
|
|
||||||
topic_metrics: [
|
|
||||||
#{topic: "test/1"}
|
|
||||||
]
|
|
||||||
|
|
||||||
rewrite: [
|
|
||||||
# {
|
|
||||||
# action = publish
|
|
||||||
# source_topic = "x/#"
|
|
||||||
# re = "^x/y/(.+)$"
|
|
||||||
# dest_topic = "z/y/$1"
|
|
||||||
# },
|
|
||||||
# {
|
|
||||||
# action = subscribe
|
|
||||||
# source_topic = "x1/#"
|
|
||||||
# re = "^x1/y/(.+)$"
|
|
||||||
# dest_topic = "z1/y/$1"
|
|
||||||
# },
|
|
||||||
# {
|
|
||||||
# action = all
|
|
||||||
# source_topic = "x2/#"
|
|
||||||
# re = "^x2/y/(.+)$"
|
|
||||||
# dest_topic = "z2/y/$1"
|
|
||||||
# }
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
|
@ -382,7 +382,7 @@ do_publish(Key = {Ts, _Id}, Now, Acc) when Ts =< Now ->
|
||||||
delayed_count() -> mnesia:table_info(?TAB, size).
|
delayed_count() -> mnesia:table_info(?TAB, size).
|
||||||
|
|
||||||
enable(Enable) ->
|
enable(Enable) ->
|
||||||
case emqx:get_raw_config([delayed]) of
|
case emqx_conf:get_raw([delayed]) of
|
||||||
#{<<"enable">> := Enable} ->
|
#{<<"enable">> := Enable} ->
|
||||||
ok;
|
ok;
|
||||||
Cfg ->
|
Cfg ->
|
||||||
|
|
|
@ -35,7 +35,7 @@ stop(_State) ->
|
||||||
maybe_enable_modules() ->
|
maybe_enable_modules() ->
|
||||||
emqx_conf:get([delayed, enable], true) andalso emqx_delayed:enable(),
|
emqx_conf:get([delayed, enable], true) andalso emqx_delayed:enable(),
|
||||||
emqx_modules_conf:is_telemetry_enabled() andalso emqx_telemetry:enable(),
|
emqx_modules_conf:is_telemetry_enabled() andalso emqx_telemetry:enable(),
|
||||||
emqx_conf:get([observer_cli, enable], true) andalso emqx_observer_cli:enable(),
|
emqx_observer_cli:enable(),
|
||||||
emqx_conf_cli:load(),
|
emqx_conf_cli:load(),
|
||||||
ok = emqx_rewrite:enable(),
|
ok = emqx_rewrite:enable(),
|
||||||
emqx_topic_metrics:enable(),
|
emqx_topic_metrics:enable(),
|
||||||
|
|
|
@ -85,7 +85,7 @@ remove_topic_metrics(Topic) ->
|
||||||
-spec is_telemetry_enabled() -> boolean().
|
-spec is_telemetry_enabled() -> boolean().
|
||||||
is_telemetry_enabled() ->
|
is_telemetry_enabled() ->
|
||||||
IsOfficial = emqx_telemetry:official_version(emqx_release:version()),
|
IsOfficial = emqx_telemetry:official_version(emqx_release:version()),
|
||||||
emqx:get_config([telemetry, enable], IsOfficial).
|
emqx_conf:get([telemetry, enable], IsOfficial).
|
||||||
|
|
||||||
-spec set_telemetry_status(boolean()) -> ok | {error, term()}.
|
-spec set_telemetry_status(boolean()) -> ok | {error, term()}.
|
||||||
set_telemetry_status(Status) ->
|
set_telemetry_status(Status) ->
|
||||||
|
|
|
@ -39,11 +39,11 @@ roots() ->
|
||||||
].
|
].
|
||||||
|
|
||||||
fields("telemetry") ->
|
fields("telemetry") ->
|
||||||
[{enable, hoconsc:mk(boolean(), #{default => false, desc => "Enable telemetry."})}];
|
[{enable, hoconsc:mk(boolean(), #{default => true, desc => "Enable telemetry."})}];
|
||||||
fields("delayed") ->
|
fields("delayed") ->
|
||||||
[
|
[
|
||||||
{enable, hoconsc:mk(boolean(), #{default => false, desc => ?DESC(enable)})},
|
{enable, hoconsc:mk(boolean(), #{default => true, desc => ?DESC(enable)})},
|
||||||
{max_delayed_messages, sc(integer(), #{desc => ?DESC(max_delayed_messages)})}
|
{max_delayed_messages, sc(integer(), #{desc => ?DESC(max_delayed_messages), default => 0})}
|
||||||
];
|
];
|
||||||
fields("rewrite") ->
|
fields("rewrite") ->
|
||||||
[
|
[
|
||||||
|
|
|
@ -32,7 +32,9 @@ all() ->
|
||||||
emqx_common_test_helpers:all(?MODULE).
|
emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF),
|
ok = emqx_common_test_helpers:load_config(emqx_modules_schema, jsx:encode(?BASE_CONF), #{
|
||||||
|
raw_with_default => true
|
||||||
|
}),
|
||||||
|
|
||||||
ok = emqx_common_test_helpers:start_apps(
|
ok = emqx_common_test_helpers:start_apps(
|
||||||
[emqx_conf, emqx_modules, emqx_dashboard],
|
[emqx_conf, emqx_modules, emqx_dashboard],
|
||||||
|
|
|
@ -29,7 +29,9 @@ all() ->
|
||||||
emqx_common_test_helpers:all(?MODULE).
|
emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
||||||
init_per_suite(Conf) ->
|
init_per_suite(Conf) ->
|
||||||
emqx_common_test_helpers:load_config(emqx_modules_schema, <<"gateway {}">>),
|
emqx_common_test_helpers:load_config(emqx_modules_schema, <<"gateway {}">>, #{
|
||||||
|
raw_with_default => true
|
||||||
|
}),
|
||||||
emqx_common_test_helpers:start_apps([emqx_conf, emqx_modules]),
|
emqx_common_test_helpers:start_apps([emqx_conf, emqx_modules]),
|
||||||
Conf.
|
Conf.
|
||||||
|
|
||||||
|
|
|
@ -157,13 +157,17 @@ t_rewrite_re_error(_Config) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
t_list(_Config) ->
|
t_list(_Config) ->
|
||||||
ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE),
|
ok = emqx_common_test_helpers:load_config(emqx_modules_schema, jsx:encode(?REWRITE), #{
|
||||||
|
raw_with_default => true
|
||||||
|
}),
|
||||||
Expect = maps:get(<<"rewrite">>, ?REWRITE),
|
Expect = maps:get(<<"rewrite">>, ?REWRITE),
|
||||||
?assertEqual(Expect, emqx_rewrite:list()),
|
?assertEqual(Expect, emqx_rewrite:list()),
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
t_update(_Config) ->
|
t_update(_Config) ->
|
||||||
ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE),
|
ok = emqx_common_test_helpers:load_config(emqx_modules_schema, jsx:encode(?REWRITE), #{
|
||||||
|
raw_with_default => true
|
||||||
|
}),
|
||||||
Init = emqx_rewrite:list(),
|
Init = emqx_rewrite:list(),
|
||||||
Rules = [
|
Rules = [
|
||||||
#{
|
#{
|
||||||
|
@ -179,7 +183,9 @@ t_update(_Config) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
t_update_disable(_Config) ->
|
t_update_disable(_Config) ->
|
||||||
ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE),
|
ok = emqx_common_test_helpers:load_config(emqx_modules_schema, jsx:encode(?REWRITE), #{
|
||||||
|
raw_with_default => true
|
||||||
|
}),
|
||||||
?assertEqual(ok, emqx_rewrite:update([])),
|
?assertEqual(ok, emqx_rewrite:update([])),
|
||||||
timer:sleep(150),
|
timer:sleep(150),
|
||||||
|
|
||||||
|
@ -194,7 +200,9 @@ t_update_disable(_Config) ->
|
||||||
ok.
|
ok.
|
||||||
|
|
||||||
t_update_re_failed(_Config) ->
|
t_update_re_failed(_Config) ->
|
||||||
ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE),
|
ok = emqx_common_test_helpers:load_config(emqx_modules_schema, jsx:encode(?REWRITE), #{
|
||||||
|
raw_with_default => true
|
||||||
|
}),
|
||||||
Re = <<"*^test/*">>,
|
Re = <<"*^test/*">>,
|
||||||
Rules = [
|
Rules = [
|
||||||
#{
|
#{
|
||||||
|
@ -249,7 +257,9 @@ receive_publish(Timeout) ->
|
||||||
end.
|
end.
|
||||||
|
|
||||||
init() ->
|
init() ->
|
||||||
ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?REWRITE),
|
ok = emqx_common_test_helpers:load_config(emqx_modules_schema, jsx:encode(?REWRITE), #{
|
||||||
|
raw_with_default => true
|
||||||
|
}),
|
||||||
ok = emqx_rewrite:enable(),
|
ok = emqx_rewrite:enable(),
|
||||||
{ok, C} = emqtt:start_link([{clientid, <<"c1">>}, {username, <<"u1">>}]),
|
{ok, C} = emqtt:start_link([{clientid, <<"c1">>}, {username, <<"u1">>}]),
|
||||||
{ok, _} = emqtt:connect(C),
|
{ok, _} = emqtt:connect(C),
|
||||||
|
|
|
@ -33,7 +33,9 @@ init_per_testcase(_, Config) ->
|
||||||
Config.
|
Config.
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF),
|
ok = emqx_common_test_helpers:load_config(emqx_modules_schema, jsx:encode(?BASE_CONF), #{
|
||||||
|
raw_with_default => true
|
||||||
|
}),
|
||||||
|
|
||||||
ok = emqx_common_test_helpers:start_apps(
|
ok = emqx_common_test_helpers:start_apps(
|
||||||
[emqx_conf, emqx_modules, emqx_dashboard],
|
[emqx_conf, emqx_modules, emqx_dashboard],
|
||||||
|
|
|
@ -25,6 +25,11 @@
|
||||||
|
|
||||||
-import(proplists, [get_value/2]).
|
-import(proplists, [get_value/2]).
|
||||||
|
|
||||||
|
-define(BASE_CONF, #{
|
||||||
|
<<"dealyed">> => <<"true">>,
|
||||||
|
<<"max_delayed_messages">> => <<"0">>
|
||||||
|
}).
|
||||||
|
|
||||||
all() -> emqx_common_test_helpers:all(?MODULE).
|
all() -> emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
|
@ -36,6 +41,9 @@ init_per_suite(Config) ->
|
||||||
emqx_common_test_helpers:deps_path(emqx_authz, "etc/acl.conf")
|
emqx_common_test_helpers:deps_path(emqx_authz, "etc/acl.conf")
|
||||||
end
|
end
|
||||||
),
|
),
|
||||||
|
ok = emqx_common_test_helpers:load_config(emqx_modules_schema, jsx:encode(?BASE_CONF), #{
|
||||||
|
raw_with_default => true
|
||||||
|
}),
|
||||||
emqx_common_test_helpers:start_apps(
|
emqx_common_test_helpers:start_apps(
|
||||||
[emqx_conf, emqx_authn, emqx_authz, emqx_modules],
|
[emqx_conf, emqx_authn, emqx_authz, emqx_modules],
|
||||||
fun set_special_configs/1
|
fun set_special_configs/1
|
||||||
|
@ -144,7 +152,9 @@ init_per_testcase(t_exhook_info, Config) ->
|
||||||
{ok, _} = emqx_exhook_demo_svr:start(),
|
{ok, _} = emqx_exhook_demo_svr:start(),
|
||||||
{ok, Sock} = gen_tcp:connect("localhost", 9000, [], 3000),
|
{ok, Sock} = gen_tcp:connect("localhost", 9000, [], 3000),
|
||||||
_ = gen_tcp:close(Sock),
|
_ = gen_tcp:close(Sock),
|
||||||
ok = emqx_common_test_helpers:load_config(emqx_exhook_schema, ExhookConf),
|
ok = emqx_common_test_helpers:load_config(emqx_exhook_schema, ExhookConf, #{
|
||||||
|
raw_with_default => true
|
||||||
|
}),
|
||||||
{ok, _} = application:ensure_all_started(emqx_exhook),
|
{ok, _} = application:ensure_all_started(emqx_exhook),
|
||||||
Config;
|
Config;
|
||||||
init_per_testcase(t_cluster_uuid, Config) ->
|
init_per_testcase(t_cluster_uuid, Config) ->
|
||||||
|
@ -166,6 +176,9 @@ init_per_testcase(t_uuid_restored_from_file, Config) ->
|
||||||
%% clear the UUIDs in the DB
|
%% clear the UUIDs in the DB
|
||||||
{atomic, ok} = mria:clear_table(emqx_telemetry),
|
{atomic, ok} = mria:clear_table(emqx_telemetry),
|
||||||
emqx_common_test_helpers:stop_apps([emqx_conf, emqx_authn, emqx_authz, emqx_modules]),
|
emqx_common_test_helpers:stop_apps([emqx_conf, emqx_authn, emqx_authz, emqx_modules]),
|
||||||
|
ok = emqx_common_test_helpers:load_config(emqx_modules_schema, jsx:encode(?BASE_CONF), #{
|
||||||
|
raw_with_default => true
|
||||||
|
}),
|
||||||
emqx_common_test_helpers:start_apps(
|
emqx_common_test_helpers:start_apps(
|
||||||
[emqx_conf, emqx_authn, emqx_authz, emqx_modules],
|
[emqx_conf, emqx_authn, emqx_authz, emqx_modules],
|
||||||
fun set_special_configs/1
|
fun set_special_configs/1
|
||||||
|
@ -319,6 +332,9 @@ t_uuid_saved_to_file(_Config) ->
|
||||||
%% clear the UUIDs in the DB
|
%% clear the UUIDs in the DB
|
||||||
{atomic, ok} = mria:clear_table(emqx_telemetry),
|
{atomic, ok} = mria:clear_table(emqx_telemetry),
|
||||||
emqx_common_test_helpers:stop_apps([emqx_conf, emqx_authn, emqx_authz, emqx_modules]),
|
emqx_common_test_helpers:stop_apps([emqx_conf, emqx_authn, emqx_authz, emqx_modules]),
|
||||||
|
ok = emqx_common_test_helpers:load_config(emqx_modules_schema, jsx:encode(?BASE_CONF), #{
|
||||||
|
raw_with_default => true
|
||||||
|
}),
|
||||||
emqx_common_test_helpers:start_apps(
|
emqx_common_test_helpers:start_apps(
|
||||||
[emqx_conf, emqx_authn, emqx_authz, emqx_modules],
|
[emqx_conf, emqx_authn, emqx_authz, emqx_modules],
|
||||||
fun set_special_configs/1
|
fun set_special_configs/1
|
||||||
|
@ -841,6 +857,12 @@ setup_slave(Node) ->
|
||||||
(_) ->
|
(_) ->
|
||||||
ok
|
ok
|
||||||
end,
|
end,
|
||||||
|
ok = rpc:call(
|
||||||
|
Node,
|
||||||
|
emqx_common_test_helpers,
|
||||||
|
load_config,
|
||||||
|
[emqx_modules_schema, jsx:encode(?BASE_CONF), #{raw_with_default => true}]
|
||||||
|
),
|
||||||
ok = rpc:call(
|
ok = rpc:call(
|
||||||
Node,
|
Node,
|
||||||
emqx_common_test_helpers,
|
emqx_common_test_helpers,
|
||||||
|
|
|
@ -29,7 +29,9 @@ all() ->
|
||||||
emqx_common_test_helpers:all(?MODULE).
|
emqx_common_test_helpers:all(?MODULE).
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF),
|
ok = emqx_common_test_helpers:load_config(emqx_modules_schema, jsx:encode(?BASE_CONF), #{
|
||||||
|
raw_with_default => true
|
||||||
|
}),
|
||||||
|
|
||||||
ok = emqx_common_test_helpers:start_apps(
|
ok = emqx_common_test_helpers:start_apps(
|
||||||
[emqx_conf, emqx_authn, emqx_authz, emqx_modules, emqx_dashboard],
|
[emqx_conf, emqx_authn, emqx_authz, emqx_modules, emqx_dashboard],
|
||||||
|
|
|
@ -29,7 +29,9 @@ all() -> emqx_common_test_helpers:all(?MODULE).
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
emqx_common_test_helpers:boot_modules(all),
|
emqx_common_test_helpers:boot_modules(all),
|
||||||
emqx_common_test_helpers:start_apps([emqx_conf, emqx_modules]),
|
emqx_common_test_helpers:start_apps([emqx_conf, emqx_modules]),
|
||||||
ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?TOPIC),
|
ok = emqx_common_test_helpers:load_config(emqx_modules_schema, jsx:encode(?TOPIC), #{
|
||||||
|
raw_with_default => true
|
||||||
|
}),
|
||||||
Config.
|
Config.
|
||||||
|
|
||||||
end_per_suite(_Config) ->
|
end_per_suite(_Config) ->
|
||||||
|
|
|
@ -40,7 +40,9 @@ init_per_testcase(_, Config) ->
|
||||||
Config.
|
Config.
|
||||||
|
|
||||||
init_per_suite(Config) ->
|
init_per_suite(Config) ->
|
||||||
ok = emqx_common_test_helpers:load_config(emqx_modules_schema, ?BASE_CONF),
|
ok = emqx_common_test_helpers:load_config(emqx_modules_schema, jsx:encode(?BASE_CONF), #{
|
||||||
|
raw_with_default => true
|
||||||
|
}),
|
||||||
|
|
||||||
ok = emqx_common_test_helpers:start_apps(
|
ok = emqx_common_test_helpers:start_apps(
|
||||||
[emqx_conf, emqx_modules, emqx_dashboard],
|
[emqx_conf, emqx_modules, emqx_dashboard],
|
||||||
|
|
|
@ -1,7 +0,0 @@
|
||||||
plugins {
|
|
||||||
prebuilt {
|
|
||||||
}
|
|
||||||
external {
|
|
||||||
}
|
|
||||||
install_dir = "{{ platform_plugins_dir }}"
|
|
||||||
}
|
|
|
@ -1,8 +0,0 @@
|
||||||
##--------------------------------------------------------------------
|
|
||||||
## emqx_prometheus for EMQX
|
|
||||||
##--------------------------------------------------------------------
|
|
||||||
prometheus {
|
|
||||||
push_gateway_server = "http://127.0.0.1:9091"
|
|
||||||
interval = "15s"
|
|
||||||
enable = false
|
|
||||||
}
|
|
|
@ -4,7 +4,7 @@
|
||||||
{emqx, {path, "../emqx"}},
|
{emqx, {path, "../emqx"}},
|
||||||
%% FIXME: tag this as v3.1.3
|
%% FIXME: tag this as v3.1.3
|
||||||
{prometheus, {git, "https://github.com/deadtrickster/prometheus.erl", {tag, "v4.8.1"}}},
|
{prometheus, {git, "https://github.com/deadtrickster/prometheus.erl", {tag, "v4.8.1"}}},
|
||||||
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.27.5"}}}
|
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.28.0"}}}
|
||||||
]}.
|
]}.
|
||||||
|
|
||||||
{edoc_opts, [{preprocess, true}]}.
|
{edoc_opts, [{preprocess, true}]}.
|
||||||
|
|
|
@ -1,22 +1 @@
|
||||||
##--------------------------------------------------------------------
|
|
||||||
## EMQX PSK
|
|
||||||
##--------------------------------------------------------------------
|
|
||||||
|
|
||||||
psk_authentication {
|
|
||||||
## Whether to enable the PSK feature.
|
|
||||||
enable = false
|
|
||||||
|
|
||||||
## If init file is specified, emqx will import PSKs from the file
|
|
||||||
## into the built-in database at startup for use by the runtime.
|
|
||||||
##
|
|
||||||
## The file has to be structured line-by-line, each line must be in
|
|
||||||
## the format: <PSKIdentity>:<SharedSecret>
|
|
||||||
## init_file = "{{ platform_data_dir }}/init.psk"
|
|
||||||
|
|
||||||
## Specifies the separator for PSKIdentity and SharedSecret in the init file.
|
|
||||||
## The default is colon (:)
|
|
||||||
## separator = ":"
|
|
||||||
|
|
||||||
## The size of each chunk used to import to the built-in database from psk file
|
|
||||||
## chunk_size = 50
|
|
||||||
}
|
|
||||||
|
|
|
@ -44,6 +44,7 @@ fields() ->
|
||||||
{enable,
|
{enable,
|
||||||
sc(boolean(), #{
|
sc(boolean(), #{
|
||||||
default => false,
|
default => false,
|
||||||
|
require => true,
|
||||||
desc => ?DESC(enable)
|
desc => ?DESC(enable)
|
||||||
})},
|
})},
|
||||||
{init_file,
|
{init_file,
|
||||||
|
|
|
@ -1,82 +0,0 @@
|
||||||
##--------------------------------------------------------------------
|
|
||||||
## EMQX Retainer
|
|
||||||
##--------------------------------------------------------------------
|
|
||||||
|
|
||||||
## Where to store the retained messages.
|
|
||||||
##
|
|
||||||
## Notice that all nodes in the same cluster have to be configured to
|
|
||||||
retainer {
|
|
||||||
## enable/disable emqx_retainer
|
|
||||||
enable = true
|
|
||||||
|
|
||||||
## Periodic interval for cleaning up expired messages. Never clear if the value is 0.
|
|
||||||
##
|
|
||||||
## Value: Duration
|
|
||||||
## - h: hour
|
|
||||||
## - m: minute
|
|
||||||
## - s: second
|
|
||||||
##
|
|
||||||
## Examples:
|
|
||||||
## - 2h: 2 hours
|
|
||||||
## - 30m: 30 minutes
|
|
||||||
## - 20s: 20 seconds
|
|
||||||
##
|
|
||||||
## Default: 0s
|
|
||||||
msg_clear_interval = 0s
|
|
||||||
|
|
||||||
## Message retention time. 0 means message will never be expired.
|
|
||||||
##
|
|
||||||
## Default: 0s
|
|
||||||
msg_expiry_interval = 0s
|
|
||||||
|
|
||||||
## When the retained flag of the PUBLISH message is set and Payload is empty,
|
|
||||||
## whether to continue to publish the message.
|
|
||||||
## see: http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Toc398718038
|
|
||||||
##
|
|
||||||
## Default: false
|
|
||||||
#stop_publish_clear_msg = false
|
|
||||||
|
|
||||||
## The message read and deliver flow rate control
|
|
||||||
## When a client subscribe to a wildcard topic, may many retained messages will be loaded.
|
|
||||||
## If you don't want these data loaded to the memory all at once, you can use this to control.
|
|
||||||
## The processing flow:
|
|
||||||
## load batch_read_number retained message from storage ->
|
|
||||||
## deliver ->
|
|
||||||
## repeat this, until all retianed messages are delivered
|
|
||||||
##
|
|
||||||
flow_control {
|
|
||||||
## The messages batch number per read from storage. 0 means no limit
|
|
||||||
##
|
|
||||||
## Default: 0
|
|
||||||
batch_read_number = 0
|
|
||||||
|
|
||||||
## The number of retained message can be delivered per batch
|
|
||||||
## Range: [0, 1000]
|
|
||||||
## Note: If this value is too large, it may cause difficulty in applying for the token of deliver
|
|
||||||
##
|
|
||||||
## Default: 0
|
|
||||||
batch_deliver_number = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
## Maximum retained message size.
|
|
||||||
##
|
|
||||||
## Value: Bytes
|
|
||||||
max_payload_size = 1MB
|
|
||||||
|
|
||||||
## Storage backend parameters
|
|
||||||
##
|
|
||||||
## Value: built_in_database
|
|
||||||
##
|
|
||||||
backend {
|
|
||||||
|
|
||||||
type = built_in_database
|
|
||||||
|
|
||||||
## storage_type: ram | disc
|
|
||||||
storage_type = ram
|
|
||||||
|
|
||||||
## Maximum number of retained messages. 0 means no limit.
|
|
||||||
##
|
|
||||||
## Value: Number >= 0
|
|
||||||
max_retained_messages = 0
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -111,7 +111,7 @@ start_link(Pool, Id) ->
|
||||||
init([Pool, Id]) ->
|
init([Pool, Id]) ->
|
||||||
erlang:process_flag(trap_exit, true),
|
erlang:process_flag(trap_exit, true),
|
||||||
true = gproc_pool:connect_worker(Pool, {Pool, Id}),
|
true = gproc_pool:connect_worker(Pool, {Pool, Id}),
|
||||||
BucketName = emqx:get_config([retainer, flow_control, batch_deliver_limiter], undefined),
|
BucketName = emqx_conf:get([retainer, flow_control, batch_deliver_limiter], undefined),
|
||||||
{ok, Limiter} = emqx_limiter_server:connect(batch, BucketName),
|
{ok, Limiter} = emqx_limiter_server:connect(batch, BucketName),
|
||||||
{ok, #{pool => Pool, id => Id, limiter => Limiter}}.
|
{ok, #{pool => Pool, id => Id, limiter => Limiter}}.
|
||||||
|
|
||||||
|
@ -151,7 +151,7 @@ handle_cast({dispatch, Context, Pid, Topic}, #{limiter := Limiter} = State) ->
|
||||||
{ok, Limiter2} = dispatch(Context, Pid, Topic, undefined, Limiter),
|
{ok, Limiter2} = dispatch(Context, Pid, Topic, undefined, Limiter),
|
||||||
{noreply, State#{limiter := Limiter2}};
|
{noreply, State#{limiter := Limiter2}};
|
||||||
handle_cast(refresh_limiter, State) ->
|
handle_cast(refresh_limiter, State) ->
|
||||||
BucketName = emqx:get_config([retainer, flow_control, batch_deliver_limiter]),
|
BucketName = emqx_conf:get([retainer, flow_control, batch_deliver_limiter]),
|
||||||
{ok, Limiter} = emqx_limiter_server:connect(batch, BucketName),
|
{ok, Limiter} = emqx_limiter_server:connect(batch, BucketName),
|
||||||
{noreply, State#{limiter := Limiter}};
|
{noreply, State#{limiter := Limiter}};
|
||||||
handle_cast(Msg, State) ->
|
handle_cast(Msg, State) ->
|
||||||
|
@ -249,7 +249,7 @@ deliver(Result, Context, Pid, Topic, Cursor, Limiter) ->
|
||||||
false ->
|
false ->
|
||||||
{ok, Limiter};
|
{ok, Limiter};
|
||||||
_ ->
|
_ ->
|
||||||
DeliverNum = emqx:get_config([retainer, flow_control, batch_deliver_number]),
|
DeliverNum = emqx_conf:get([retainer, flow_control, batch_deliver_number], undefined),
|
||||||
case DeliverNum of
|
case DeliverNum of
|
||||||
0 ->
|
0 ->
|
||||||
do_deliver(Result, Pid, Topic),
|
do_deliver(Result, Pid, Topic),
|
||||||
|
|
|
@ -18,7 +18,7 @@ roots() -> ["retainer"].
|
||||||
|
|
||||||
fields("retainer") ->
|
fields("retainer") ->
|
||||||
[
|
[
|
||||||
{enable, sc(boolean(), enable, false)},
|
{enable, sc(boolean(), enable, true)},
|
||||||
{msg_expiry_interval,
|
{msg_expiry_interval,
|
||||||
sc(
|
sc(
|
||||||
emqx_schema:duration_ms(),
|
emqx_schema:duration_ms(),
|
||||||
|
@ -31,7 +31,11 @@ fields("retainer") ->
|
||||||
msg_clear_interval,
|
msg_clear_interval,
|
||||||
"0s"
|
"0s"
|
||||||
)},
|
)},
|
||||||
{flow_control, sc(hoconsc:ref(?MODULE, flow_control), flow_control)},
|
{flow_control,
|
||||||
|
sc(
|
||||||
|
?R_REF(flow_control),
|
||||||
|
flow_control
|
||||||
|
)},
|
||||||
{max_payload_size,
|
{max_payload_size,
|
||||||
sc(
|
sc(
|
||||||
emqx_schema:bytesize(),
|
emqx_schema:bytesize(),
|
||||||
|
@ -48,7 +52,7 @@ fields("retainer") ->
|
||||||
];
|
];
|
||||||
fields(mnesia_config) ->
|
fields(mnesia_config) ->
|
||||||
[
|
[
|
||||||
{type, sc(hoconsc:union([built_in_database]), mnesia_config_type, built_in_database)},
|
{type, sc(hoconsc:enum([built_in_database]), mnesia_config_type, built_in_database)},
|
||||||
{storage_type,
|
{storage_type,
|
||||||
sc(
|
sc(
|
||||||
hoconsc:union([ram, disc]),
|
hoconsc:union([ram, disc]),
|
||||||
|
@ -57,10 +61,9 @@ fields(mnesia_config) ->
|
||||||
)},
|
)},
|
||||||
{max_retained_messages,
|
{max_retained_messages,
|
||||||
sc(
|
sc(
|
||||||
integer(),
|
non_neg_integer(),
|
||||||
max_retained_messages,
|
max_retained_messages,
|
||||||
0,
|
0
|
||||||
fun is_pos_integer/1
|
|
||||||
)},
|
)},
|
||||||
{index_specs, fun retainer_indices/1}
|
{index_specs, fun retainer_indices/1}
|
||||||
];
|
];
|
||||||
|
@ -68,10 +71,9 @@ fields(flow_control) ->
|
||||||
[
|
[
|
||||||
{batch_read_number,
|
{batch_read_number,
|
||||||
sc(
|
sc(
|
||||||
integer(),
|
non_neg_integer(),
|
||||||
batch_read_number,
|
batch_read_number,
|
||||||
0,
|
0
|
||||||
fun is_pos_integer/1
|
|
||||||
)},
|
)},
|
||||||
{batch_deliver_number,
|
{batch_deliver_number,
|
||||||
sc(
|
sc(
|
||||||
|
@ -99,28 +101,15 @@ desc(_) ->
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
%% Internal functions
|
%% Internal functions
|
||||||
%%--------------------------------------------------------------------
|
%%--------------------------------------------------------------------
|
||||||
|
|
||||||
sc(Type, DescId) ->
|
sc(Type, DescId) ->
|
||||||
hoconsc:mk(Type, #{required => true, desc => ?DESC(DescId)}).
|
hoconsc:mk(Type, #{desc => ?DESC(DescId)}).
|
||||||
|
|
||||||
sc(Type, DescId, Default) ->
|
sc(Type, DescId, Default) ->
|
||||||
hoconsc:mk(Type, #{default => Default, desc => ?DESC(DescId)}).
|
hoconsc:mk(Type, #{default => Default, desc => ?DESC(DescId)}).
|
||||||
|
|
||||||
sc(Type, DescId, Default, Validator) ->
|
|
||||||
hoconsc:mk(Type, #{
|
|
||||||
default => Default,
|
|
||||||
desc => ?DESC(DescId),
|
|
||||||
validator => Validator
|
|
||||||
}).
|
|
||||||
|
|
||||||
is_pos_integer(V) ->
|
|
||||||
V >= 0.
|
|
||||||
|
|
||||||
backend_config() ->
|
backend_config() ->
|
||||||
sc(
|
hoconsc:mk(hoconsc:ref(?MODULE, mnesia_config), #{desc => ?DESC(backend)}).
|
||||||
hoconsc:union([hoconsc:ref(?MODULE, mnesia_config)]),
|
|
||||||
backend,
|
|
||||||
mnesia_config
|
|
||||||
).
|
|
||||||
|
|
||||||
retainer_indices(type) ->
|
retainer_indices(type) ->
|
||||||
list(list(integer()));
|
list(list(integer()));
|
||||||
|
|
|
@ -1,22 +0,0 @@
|
||||||
##====================================================================
|
|
||||||
## Rule Engine for EMQX R5.0
|
|
||||||
##====================================================================
|
|
||||||
rule_engine {
|
|
||||||
ignore_sys_message = true
|
|
||||||
jq_function_default_timeout = 10s
|
|
||||||
#rules.my_republish_rule {
|
|
||||||
# description = "A simple rule that republishs MQTT messages from topic 't/1' to 't/2'"
|
|
||||||
# enable = true
|
|
||||||
# sql = "SELECT * FROM \"t/1\""
|
|
||||||
# actions = [
|
|
||||||
# {
|
|
||||||
# function = republish
|
|
||||||
# args = {
|
|
||||||
# topic = "t/2"
|
|
||||||
# qos = "${qos}"
|
|
||||||
# payload = "${payload}"
|
|
||||||
# }
|
|
||||||
# }
|
|
||||||
# ]
|
|
||||||
#}
|
|
||||||
}
|
|
|
@ -186,7 +186,7 @@ rule_name() ->
|
||||||
binary(),
|
binary(),
|
||||||
#{
|
#{
|
||||||
desc => ?DESC("rules_name"),
|
desc => ?DESC("rules_name"),
|
||||||
default => "",
|
default => <<"">>,
|
||||||
required => false,
|
required => false,
|
||||||
example => "foo"
|
example => "foo"
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,35 +0,0 @@
|
||||||
##--------------------------------------------------------------------
|
|
||||||
## EMQX Slow Subscribers Statistics
|
|
||||||
##--------------------------------------------------------------------
|
|
||||||
|
|
||||||
slow_subs {
|
|
||||||
enable = false
|
|
||||||
|
|
||||||
threshold = 500ms
|
|
||||||
## The latency threshold for statistics, the minimum value is 100ms
|
|
||||||
##
|
|
||||||
## Default: 500ms
|
|
||||||
|
|
||||||
## The eviction time of the record, which in the statistics record table
|
|
||||||
##
|
|
||||||
## Default: 300s
|
|
||||||
expire_interval = 300s
|
|
||||||
|
|
||||||
## The maximum number of records in the slow subscription statistics record table
|
|
||||||
##
|
|
||||||
## Value: 10
|
|
||||||
top_k_num = 10
|
|
||||||
|
|
||||||
## The ways to calculate the latency are as follows:
|
|
||||||
##
|
|
||||||
## 1. whole
|
|
||||||
## From the time the message arrives at EMQX until the message completes transmission
|
|
||||||
##
|
|
||||||
## 2.internal
|
|
||||||
## From when the message arrives at EMQX until when EMQX starts delivering the message
|
|
||||||
##
|
|
||||||
## 3.response
|
|
||||||
## From the time EMQX starts delivering the message, until the message completes transmission
|
|
||||||
## Default: whole
|
|
||||||
stats_type = whole
|
|
||||||
}
|
|
|
@ -32,7 +32,7 @@ fields("slow_subs") ->
|
||||||
)},
|
)},
|
||||||
{stats_type,
|
{stats_type,
|
||||||
sc(
|
sc(
|
||||||
hoconsc:union([whole, internal, response]),
|
hoconsc:enum([whole, internal, response]),
|
||||||
whole,
|
whole,
|
||||||
stats_type
|
stats_type
|
||||||
)}
|
)}
|
||||||
|
|
|
@ -1,10 +0,0 @@
|
||||||
##--------------------------------------------------------------------
|
|
||||||
## Statsd for EMQX
|
|
||||||
##--------------------------------------------------------------------
|
|
||||||
|
|
||||||
statsd {
|
|
||||||
enable = false
|
|
||||||
server = "127.0.0.1:8125"
|
|
||||||
sample_time_interval = "10s"
|
|
||||||
flush_time_interval = "10s"
|
|
||||||
}
|
|
6
mix.exs
6
mix.exs
|
@ -52,8 +52,8 @@ defmodule EMQXUmbrella.MixProject do
|
||||||
{:jiffy, github: "emqx/jiffy", tag: "1.0.5", override: true},
|
{:jiffy, github: "emqx/jiffy", tag: "1.0.5", override: true},
|
||||||
{:cowboy, github: "emqx/cowboy", tag: "2.9.0", override: true},
|
{:cowboy, github: "emqx/cowboy", tag: "2.9.0", override: true},
|
||||||
{:esockd, github: "emqx/esockd", tag: "5.9.2", override: true},
|
{:esockd, github: "emqx/esockd", tag: "5.9.2", override: true},
|
||||||
{:mria, github: "emqx/mria", tag: "0.2.5", override: true},
|
{:mria, github: "emqx/mria", tag: "0.2.7", override: true},
|
||||||
{:ekka, github: "emqx/ekka", tag: "0.12.6", override: true},
|
{:ekka, github: "emqx/ekka", tag: "0.12.8", override: true},
|
||||||
{:gen_rpc, github: "emqx/gen_rpc", tag: "2.8.1", override: true},
|
{:gen_rpc, github: "emqx/gen_rpc", tag: "2.8.1", override: true},
|
||||||
{:minirest, github: "emqx/minirest", tag: "1.3.3", override: true},
|
{:minirest, github: "emqx/minirest", tag: "1.3.3", override: true},
|
||||||
{:ecpool, github: "emqx/ecpool", tag: "0.5.2"},
|
{:ecpool, github: "emqx/ecpool", tag: "0.5.2"},
|
||||||
|
@ -66,7 +66,7 @@ defmodule EMQXUmbrella.MixProject do
|
||||||
# in conflict by emqtt and hocon
|
# in conflict by emqtt and hocon
|
||||||
{:getopt, "1.0.2", override: true},
|
{:getopt, "1.0.2", override: true},
|
||||||
{:snabbkaffe, github: "kafka4beam/snabbkaffe", tag: "1.0.0", override: true},
|
{:snabbkaffe, github: "kafka4beam/snabbkaffe", tag: "1.0.0", override: true},
|
||||||
{:hocon, github: "emqx/hocon", tag: "0.27.5", override: true},
|
{:hocon, github: "emqx/hocon", tag: "0.28.0", override: true},
|
||||||
{:emqx_http_lib, github: "emqx/emqx_http_lib", tag: "0.5.1", override: true},
|
{:emqx_http_lib, github: "emqx/emqx_http_lib", tag: "0.5.1", override: true},
|
||||||
{:esasl, github: "emqx/esasl", tag: "0.2.0"},
|
{:esasl, github: "emqx/esasl", tag: "0.2.0"},
|
||||||
{:jose, github: "potatosalad/erlang-jose", tag: "1.11.2"},
|
{:jose, github: "potatosalad/erlang-jose", tag: "1.11.2"},
|
||||||
|
|
|
@ -54,8 +54,8 @@
|
||||||
, {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}}
|
, {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}}
|
||||||
, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}}
|
, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}}
|
||||||
, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.2"}}}
|
, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.2"}}}
|
||||||
, {mria, {git, "https://github.com/emqx/mria", {tag, "0.2.5"}}}
|
, {mria, {git, "https://github.com/emqx/mria", {tag, "0.2.7"}}}
|
||||||
, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.12.6"}}}
|
, {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.12.8"}}}
|
||||||
, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}
|
, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}
|
||||||
, {minirest, {git, "https://github.com/emqx/minirest", {tag, "1.3.3"}}}
|
, {minirest, {git, "https://github.com/emqx/minirest", {tag, "1.3.3"}}}
|
||||||
, {ecpool, {git, "https://github.com/emqx/ecpool", {tag, "0.5.2"}}}
|
, {ecpool, {git, "https://github.com/emqx/ecpool", {tag, "0.5.2"}}}
|
||||||
|
@ -67,7 +67,7 @@
|
||||||
, {system_monitor, {git, "https://github.com/ieQu1/system_monitor", {tag, "3.0.3"}}}
|
, {system_monitor, {git, "https://github.com/ieQu1/system_monitor", {tag, "3.0.3"}}}
|
||||||
, {getopt, "1.0.2"}
|
, {getopt, "1.0.2"}
|
||||||
, {snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.0"}}}
|
, {snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.0"}}}
|
||||||
, {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.27.5"}}}
|
, {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.28.0"}}}
|
||||||
, {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.1"}}}
|
, {emqx_http_lib, {git, "https://github.com/emqx/emqx_http_lib.git", {tag, "0.5.1"}}}
|
||||||
, {esasl, {git, "https://github.com/emqx/esasl", {tag, "0.2.0"}}}
|
, {esasl, {git, "https://github.com/emqx/esasl", {tag, "0.2.0"}}}
|
||||||
, {jose, {git, "https://github.com/potatosalad/erlang-jose", {tag, "1.11.2"}}}
|
, {jose, {git, "https://github.com/potatosalad/erlang-jose", {tag, "1.11.2"}}}
|
||||||
|
|
|
@ -9,15 +9,19 @@
|
||||||
%% edition due to backward-compatibility reasons.
|
%% edition due to backward-compatibility reasons.
|
||||||
|
|
||||||
-mode(compile).
|
-mode(compile).
|
||||||
|
-define(APPS, ["emqx", "emqx_dashboard", "emqx_authz"]).
|
||||||
|
|
||||||
main(_) ->
|
main(_) ->
|
||||||
{ok, BaseConf} = file:read_file("apps/emqx_conf/etc/emqx_conf.conf"),
|
{ok, BaseConf} = file:read_file("apps/emqx_conf/etc/emqx_conf.conf"),
|
||||||
|
|
||||||
Cfgs = get_all_cfgs("apps/"),
|
Cfgs = get_all_cfgs("apps/"),
|
||||||
Conf = [merge(BaseConf, Cfgs),
|
Conf = [
|
||||||
|
merge(BaseConf, Cfgs),
|
||||||
|
io_lib:nl(),
|
||||||
io_lib:nl(),
|
io_lib:nl(),
|
||||||
"include emqx_enterprise.conf",
|
"include emqx_enterprise.conf",
|
||||||
io_lib:nl()],
|
io_lib:nl()
|
||||||
|
],
|
||||||
ok = file:write_file("apps/emqx_conf/etc/emqx.conf.all", Conf),
|
ok = file:write_file("apps/emqx_conf/etc/emqx.conf.all", Conf),
|
||||||
|
|
||||||
EnterpriseCfgs = get_all_cfgs("lib-ee/"),
|
EnterpriseCfgs = get_all_cfgs("lib-ee/"),
|
||||||
|
@ -31,14 +35,22 @@ merge(BaseConf, Cfgs) ->
|
||||||
case filelib:is_regular(CfgFile) of
|
case filelib:is_regular(CfgFile) of
|
||||||
true ->
|
true ->
|
||||||
{ok, Bin1} = file:read_file(CfgFile),
|
{ok, Bin1} = file:read_file(CfgFile),
|
||||||
[Acc, io_lib:nl(), Bin1];
|
case string:trim(Bin1, both) of
|
||||||
false -> Acc
|
<<>> -> Acc;
|
||||||
|
Bin2 -> [Acc, io_lib:nl(), io_lib:nl(), Bin2]
|
||||||
|
end;
|
||||||
|
false ->
|
||||||
|
Acc
|
||||||
end
|
end
|
||||||
end, BaseConf, Cfgs).
|
end,
|
||||||
|
BaseConf,
|
||||||
|
Cfgs
|
||||||
|
).
|
||||||
|
|
||||||
get_all_cfgs(Root) ->
|
get_all_cfgs(Root) ->
|
||||||
Apps = filelib:wildcard("*", Root) -- ["emqx_machine", "emqx_conf"],
|
Apps0 = filelib:wildcard("*", Root) -- ["emqx_machine", "emqx_conf"],
|
||||||
Dirs = [filename:join([Root, App]) || App <- Apps],
|
Apps1 = (Apps0 -- ?APPS) ++ lists:reverse(?APPS),
|
||||||
|
Dirs = [filename:join([Root, App]) || App <- Apps1],
|
||||||
lists:foldl(fun get_cfgs/2, [], Dirs).
|
lists:foldl(fun get_cfgs/2, [], Dirs).
|
||||||
|
|
||||||
get_all_cfgs(Dir, Cfgs) ->
|
get_all_cfgs(Dir, Cfgs) ->
|
||||||
|
|
|
@ -91,9 +91,9 @@ emqx_test(){
|
||||||
export EMQX_MQTT__MAX_TOPIC_ALIAS=10
|
export EMQX_MQTT__MAX_TOPIC_ALIAS=10
|
||||||
export EMQX_LOG__CONSOLE_HANDLER__LEVEL=debug
|
export EMQX_LOG__CONSOLE_HANDLER__LEVEL=debug
|
||||||
export EMQX_LOG__FILE_HANDLERS__DEFAULT__LEVEL=debug
|
export EMQX_LOG__FILE_HANDLERS__DEFAULT__LEVEL=debug
|
||||||
if [[ $(arch) == *arm* || $(arch) == aarch64 ]]; then
|
# if [[ $(arch) == *arm* || $(arch) == aarch64 ]]; then
|
||||||
export EMQX_LISTENERS__QUIC__DEFAULT__ENABLED=false
|
# export EMQX_LISTENERS__QUIC__DEFAULT__ENABLED=false
|
||||||
fi
|
# fi
|
||||||
# sed -i '/emqx_telemetry/d' "${PACKAGE_PATH}"/emqx/data/loaded_plugins
|
# sed -i '/emqx_telemetry/d' "${PACKAGE_PATH}"/emqx/data/loaded_plugins
|
||||||
|
|
||||||
echo "running ${packagename} start"
|
echo "running ${packagename} start"
|
||||||
|
@ -184,9 +184,9 @@ export EMQX_LOG__CONSOLE_HANDLER__LEVEL=debug
|
||||||
export EMQX_LOG__FILE_HANDLERS__DEFAULT__LEVEL=debug
|
export EMQX_LOG__FILE_HANDLERS__DEFAULT__LEVEL=debug
|
||||||
EOF
|
EOF
|
||||||
## for ARM, due to CI env issue, skip start of quic listener for the moment
|
## for ARM, due to CI env issue, skip start of quic listener for the moment
|
||||||
[[ $(arch) == *arm* || $(arch) == aarch64 ]] && tee -a "$emqx_env_vars" <<EOF
|
# [[ $(arch) == *arm* || $(arch) == aarch64 ]] && tee -a "$emqx_env_vars" <<EOF
|
||||||
export EMQX_LISTENERS__QUIC__DEFAULT__ENABLED=false
|
# export EMQX_LISTENERS__QUIC__DEFAULT__ENABLED=false
|
||||||
EOF
|
# EOF
|
||||||
else
|
else
|
||||||
echo "Error: cannot locate emqx_vars"
|
echo "Error: cannot locate emqx_vars"
|
||||||
exit 1
|
exit 1
|
||||||
|
|
|
@ -7,7 +7,7 @@ else
|
||||||
SCHEMA="$1"
|
SCHEMA="$1"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
docker run -d --name langtool "ghcr.io/iequ1/emqx-schema-validate:0.3.1"
|
docker run -d --name langtool "ghcr.io/iequ1/emqx-schema-validate:0.3.2"
|
||||||
|
|
||||||
docker exec -i langtool ./emqx_schema_validate - < "${SCHEMA}"
|
docker exec -i langtool ./emqx_schema_validate - < "${SCHEMA}"
|
||||||
success="$?"
|
success="$?"
|
||||||
|
|
Loading…
Reference in New Issue