docs: fix typos

This commit is contained in:
Kian-Meng Ang 2022-01-27 08:41:44 +08:00 committed by Zaiming (Stone) Shi
parent d6a9a73431
commit fc92e4c8bf
97 changed files with 158 additions and 158 deletions

View File

@ -42,7 +42,7 @@ closed --> [*]
accepted -down--> assigned: priority review accepted -down--> assigned: priority review
accepted --> needs_information: need more information\n to proceeed accepted --> needs_information: need more information\n to proceed
needs_information --> accepted: updates needs_information --> accepted: updates
assigned --> InProgress: In sprint run\n or\n start to work on assigned --> InProgress: In sprint run\n or\n start to work on
InProgress --> closed: issue is solved InProgress --> closed: issue is solved

View File

@ -6,7 +6,7 @@ NOTE: The instructions and examples are based on Windows 10.
### Visual studio for C/C++ compile and link ### Visual studio for C/C++ compile and link
EMQ X includes Erlang NIF (Native Implmented Function) components, implemented EMQ X includes Erlang NIF (Native Implemented Function) components, implemented
in C/C++. To compile and link C/C++ libraries, the easiest way is perhaps to in C/C++. To compile and link C/C++ libraries, the easiest way is perhaps to
install Visual Studio. install Visual Studio.
@ -58,7 +58,7 @@ environment in Windows, there are quite a few options.
Cygwin is what we tested with. Cygwin is what we tested with.
* Add `cygwin\bin` dir to `Path` environment variable * Add `cygwin\bin` dir to `Path` environment variable
To do so, search for Edit environment variable in control pannel and To do so, search for Edit environment variable in control panel and
add `C:\tools\cygwin\bin` (depending on the location where it was installed) add `C:\tools\cygwin\bin` (depending on the location where it was installed)
to `Path` list. to `Path` list.
@ -107,8 +107,8 @@ scoop install git curl make cmake jq zip unzip
variables are not set. Make sure `vcvarsall.bat x86_amd64` is executed prior to the `make` command variables are not set. Make sure `vcvarsall.bat x86_amd64` is executed prior to the `make` command
* `link: extra operand 'some.obj'` * `link: extra operand 'some.obj'`
This is likely due ot the usage of GNU `lnik.exe` but not the one from Visual Studio. This is likely due to the usage of GNU `lnik.exe` but not the one from Visual Studio.
Exeucte `link.exe --version` to inspect which one is in use. The one installed from Execute `link.exe --version` to inspect which one is in use. The one installed from
Visual Studio should print out `Microsoft (R) Incremental Linker`. Visual Studio should print out `Microsoft (R) Incremental Linker`.
To fix it, Visual Studio's bin paths should be ordered prior to Cygwin's (or similar installation's) To fix it, Visual Studio's bin paths should be ordered prior to Cygwin's (or similar installation's)
bin paths in `Path` environment variable. bin paths in `Path` environment variable.

View File

@ -896,7 +896,7 @@ conn_congestion {
## ##
## Sometimes the mqtt connection (usually an MQTT subscriber) may ## Sometimes the mqtt connection (usually an MQTT subscriber) may
## get "congested" because there're too many packets to sent. ## get "congested" because there're too many packets to sent.
## The socket trys to buffer the packets until the buffer is ## The socket tries to buffer the packets until the buffer is
## full. If more packets comes after that, the packets will be ## full. If more packets comes after that, the packets will be
## "pending" in a queue and we consider the connection is ## "pending" in a queue and we consider the connection is
## "congested". ## "congested".

View File

@ -120,7 +120,7 @@
atom() => term()}. atom() => term()}.
%% @doc check_config takes raw config from config file, %% @doc check_config takes raw config from config file,
%% parse and validate it, and reutrn parsed result. %% parse and validate it, and return parsed result.
-callback check_config(config()) -> config(). -callback check_config(config()) -> config().
-callback create(AuthenticatorID, Config) -callback create(AuthenticatorID, Config)

View File

@ -112,9 +112,9 @@
post_config_update => #{module() => any()} post_config_update => #{module() => any()}
}. }.
%% raw_config() is the config that is NOT parsed and tranlated by hocon schema %% raw_config() is the config that is NOT parsed and translated by hocon schema
-type raw_config() :: #{binary() => term()} | list() | undefined. -type raw_config() :: #{binary() => term()} | list() | undefined.
%% config() is the config that is parsed and tranlated by hocon schema %% config() is the config that is parsed and translated by hocon schema
-type config() :: #{atom() => term()} | list() | undefined. -type config() :: #{atom() => term()} | list() | undefined.
-type app_envs() :: [proplists:property()]. -type app_envs() :: [proplists:property()].

View File

@ -70,7 +70,7 @@ stop() ->
-spec update_config(module(), emqx_config:config_key_path(), emqx_config:update_args()) -> -spec update_config(module(), emqx_config:config_key_path(), emqx_config:update_args()) ->
{ok, emqx_config:update_result()} | {error, emqx_config:update_error()}. {ok, emqx_config:update_result()} | {error, emqx_config:update_error()}.
update_config(SchemaModule, ConfKeyPath, UpdateArgs) -> update_config(SchemaModule, ConfKeyPath, UpdateArgs) ->
%% force covert the path to a list of atoms, as there maybe some wildcard names/ids in the path %% force convert the path to a list of atoms, as there maybe some wildcard names/ids in the path
AtomKeyPath = [atom(Key) || Key <- ConfKeyPath], AtomKeyPath = [atom(Key) || Key <- ConfKeyPath],
gen_server:call(?MODULE, {change_config, SchemaModule, AtomKeyPath, UpdateArgs}, infinity). gen_server:call(?MODULE, {change_config, SchemaModule, AtomKeyPath, UpdateArgs}, infinity).

View File

@ -880,7 +880,7 @@ handle_cast(Req, State) ->
-type check_succ_handler() :: -type check_succ_handler() ::
fun((any(), list(any()), state()) -> _). fun((any(), list(any()), state()) -> _).
%% check limiters, if successed call WhenOk with Data and Msgs %% check limiters, if succeeded call WhenOk with Data and Msgs
%% Data is the data to be processed %% Data is the data to be processed
%% Msgs include the next msg which after Data processed %% Msgs include the next msg which after Data processed
-spec check_limiter(list({pos_integer(), limiter_type()}), -spec check_limiter(list({pos_integer(), limiter_type()}),

View File

@ -1,6 +1,6 @@
%% -*- mode: erlang -*- %% -*- mode: erlang -*-
{application, emqx_limiter, {application, emqx_limiter,
[{description, "EMQ X Hierachical Limiter"}, [{description, "EMQ X Hierarchical Limiter"},
{vsn, "1.0.0"}, % strict semver, bump manually! {vsn, "1.0.0"}, % strict semver, bump manually!
{modules, []}, {modules, []},
{registered, [emqx_limiter_sup]}, {registered, [emqx_limiter_sup]},

View File

@ -338,11 +338,11 @@ longitudinal(#{id := Id,
end, end,
case lists:min([ShouldAlloc, Flow, Capacity]) of case lists:min([ShouldAlloc, Flow, Capacity]) of
Avaiable when Avaiable > 0 -> Available when Available > 0 ->
%% XXX if capacity is infinity, and flow always > 0, the value in %% XXX if capacity is infinity, and flow always > 0, the value in
%% counter will be overflow at some point in the future, do we need %% counter will be overflow at some point in the future, do we need
%% to deal with this situation??? %% to deal with this situation???
{Inc, Node2} = emqx_limiter_correction:add(Avaiable, Node), {Inc, Node2} = emqx_limiter_correction:add(Available, Node),
counters:add(Counter, Index, Inc), counters:add(Counter, Index, Inc),
{Inc, {Inc,

View File

@ -53,7 +53,7 @@ is_overloaded() ->
%% @doc Backoff with a delay if the system is overloaded, for tasks that could be deferred. %% @doc Backoff with a delay if the system is overloaded, for tasks that could be deferred.
%% returns `false' if backoff didn't happen, the system is cool. %% returns `false' if backoff didn't happen, the system is cool.
%% returns `ok' if backoff is triggered and get unblocked when the system is cool. %% returns `ok' if backoff is triggered and get unblocked when the system is cool.
%% returns `timeout' if backoff is trigged but get unblocked due to timeout as configured. %% returns `timeout' if backoff is triggered but get unblocked due to timeout as configured.
-spec backoff(Zone :: atom()) -> ok | false | timeout. -spec backoff(Zone :: atom()) -> ok | false | timeout.
backoff(Zone) -> backoff(Zone) ->
case emqx_config:get_zone_conf(Zone, [?overload_protection]) of case emqx_config:get_zone_conf(Zone, [?overload_protection]) of
@ -96,12 +96,12 @@ backoff_new_conn(Zone) ->
status() -> status() ->
is_overloaded(). is_overloaded().
%% @doc turn off backgroud runq check. %% @doc turn off background runq check.
-spec disable() -> ok | {error, timeout}. -spec disable() -> ok | {error, timeout}.
disable() -> disable() ->
load_ctl:stop_runq_flagman(5000). load_ctl:stop_runq_flagman(5000).
%% @doc turn on backgroud runq check. %% @doc turn on background runq check.
-spec enable() -> {ok, pid()} | {error, running | restarting | disabled}. -spec enable() -> {ok, pid()} | {error, running | restarting | disabled}.
enable() -> enable() ->
case load_ctl:restart_runq_flagman() of case load_ctl:restart_runq_flagman() of

View File

@ -33,7 +33,7 @@
%% When the queue contains items with non-zero priorities, it is %% When the queue contains items with non-zero priorities, it is
%% represented as a sorted kv list with the inverted Priority as the %% represented as a sorted kv list with the inverted Priority as the
%% key and an ordinary queue as the value. Here again we use our own %% key and an ordinary queue as the value. Here again we use our own
%% ordinary queue implemention for efficiency, often making recursive %% ordinary queue implementation for efficiency, often making recursive
%% calls into the same function knowing that ordinary queues represent %% calls into the same function knowing that ordinary queues represent
%% a base case. %% a base case.

View File

@ -70,7 +70,7 @@ edition(Desc) ->
%% @doc Return the release version. %% @doc Return the release version.
version() -> version() ->
case lists:keyfind(emqx_vsn, 1, ?MODULE:module_info(compile)) of case lists:keyfind(emqx_vsn, 1, ?MODULE:module_info(compile)) of
false -> %% For TEST build or depedency build. false -> %% For TEST build or dependency build.
build_vsn(); build_vsn();
{_, Vsn} -> %% For emqx release build {_, Vsn} -> %% For emqx release build
VsnStr = build_vsn(), VsnStr = build_vsn(),

View File

@ -107,7 +107,7 @@ and can not be deleted."""
sc(ref("mqtt"), sc(ref("mqtt"),
#{ desc => #{ desc =>
"""Global MQTT configuration.<br> """Global MQTT configuration.<br>
The configs here work as default values which can be overriden The configs here work as default values which can be overridden
in <code>zone</code> configs""" in <code>zone</code> configs"""
})} })}
, {?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME, , {?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME,
@ -1045,7 +1045,7 @@ when deactivated, but after the retention time.
fields("latency_stats") -> fields("latency_stats") ->
[ {"samples", sc(integer(), #{default => 10, [ {"samples", sc(integer(), #{default => 10,
desc => "the number of smaples for calculate the average latency of delivery"})} desc => "the number of samples for calculate the average latency of delivery"})}
]; ];
fields("trace") -> fields("trace") ->
[ {"payload_encode", sc(hoconsc:enum([hex, text, hidden]), #{ [ {"payload_encode", sc(hoconsc:enum([hex, text, hidden]), #{

View File

@ -119,14 +119,14 @@
mqueue :: emqx_mqueue:mqueue(), mqueue :: emqx_mqueue:mqueue(),
%% Next packet id of the session %% Next packet id of the session
next_pkt_id = 1 :: emqx_types:packet_id(), next_pkt_id = 1 :: emqx_types:packet_id(),
%% Retry interval for redelivering QoS1/2 messages (Unit: millsecond) %% Retry interval for redelivering QoS1/2 messages (Unit: millisecond)
retry_interval :: timeout(), retry_interval :: timeout(),
%% Client -> Broker: QoS2 messages received from the client, but %% Client -> Broker: QoS2 messages received from the client, but
%% have not been completely acknowledged %% have not been completely acknowledged
awaiting_rel :: map(), awaiting_rel :: map(),
%% Maximum number of awaiting QoS2 messages allowed %% Maximum number of awaiting QoS2 messages allowed
max_awaiting_rel :: non_neg_integer() | infinity, max_awaiting_rel :: non_neg_integer() | infinity,
%% Awaiting PUBREL Timeout (Unit: millsecond) %% Awaiting PUBREL Timeout (Unit: millisecond)
await_rel_timeout :: timeout(), await_rel_timeout :: timeout(),
%% Created at %% Created at
created_at :: pos_integer(), created_at :: pos_integer(),

View File

@ -310,7 +310,7 @@ is_valid_string(Binary) when is_binary(Binary) ->
_Otherwise -> false _Otherwise -> false
end. end.
%% Check if it is a valid PEM formated key. %% Check if it is a valid PEM formatted key.
is_pem(MaybePem) -> is_pem(MaybePem) ->
try public_key:pem_decode(MaybePem) =/= [] try public_key:pem_decode(MaybePem) =/= []
catch _ : _ -> false catch _ : _ -> false

View File

@ -136,7 +136,7 @@ delete(Topic, Trie) when is_binary(Topic) ->
false -> ok false -> ok
end. end.
%% @doc Find trie nodes that matchs the topic name. %% @doc Find trie nodes that matches the topic name.
-spec(match(emqx_types:topic()) -> list(emqx_types:topic())). -spec(match(emqx_types:topic()) -> list(emqx_types:topic())).
match(Topic) when is_binary(Topic) -> match(Topic) when is_binary(Topic) ->
match(Topic, ?TRIE). match(Topic, ?TRIE).

View File

@ -114,7 +114,7 @@ t_start_stop_log_handler2(_) ->
%% stop a no exists handler returns {not_started, Id} %% stop a no exists handler returns {not_started, Id}
?assertMatch({error, {not_started, invalid_handler_id}}, ?assertMatch({error, {not_started, invalid_handler_id}},
?LOGGER:stop_log_handler(invalid_handler_id)), ?LOGGER:stop_log_handler(invalid_handler_id)),
%% stop a handler that is already stopped retuns {not_started, Id} %% stop a handler that is already stopped returns {not_started, Id}
ok = ?LOGGER:stop_log_handler(default), ok = ?LOGGER:stop_log_handler(default),
?assertMatch({error, {not_started, default}}, ?assertMatch({error, {not_started, default}},
?LOGGER:stop_log_handler(default)). ?LOGGER:stop_log_handler(default)).

View File

@ -563,7 +563,7 @@ client_try_check(Need, #client{counter = Counter,
%% XXX not a god test, because client's rate maybe bigger than global rate %% XXX not a god test, because client's rate maybe bigger than global rate
%% so if client' rate = infinity %% so if client' rate = infinity
%% client's divisible should be true or capacity must be bigger than number of each comsume %% client's divisible should be true or capacity must be bigger than number of each consume
client_random_val(infinity) -> client_random_val(infinity) ->
1000; 1000;

View File

@ -49,9 +49,9 @@ request_response_per_qos(QoS) ->
I = b2i(ReqBin), I = b2i(ReqBin),
i2b(I * I) i2b(I * I)
end, end,
{ok, Responser} = emqx_request_handler:start_link(ReqTopic, QoS, Square, {ok, Responder} = emqx_request_handler:start_link(ReqTopic, QoS, Square,
[{proto_ver, v5}, [{proto_ver, v5},
{clientid, <<"responser">>} {clientid, <<"responder">>}
]), ]),
ok = emqx_request_sender:send(Requester, ReqTopic, RspTopic, <<"corr-1">>, <<"2">>, QoS), ok = emqx_request_sender:send(Requester, ReqTopic, RspTopic, <<"corr-1">>, <<"2">>, QoS),
receive receive
@ -64,7 +64,7 @@ request_response_per_qos(QoS) ->
erlang:error(timeout) erlang:error(timeout)
end, end,
ok = emqx_request_sender:stop(Requester), ok = emqx_request_sender:stop(Requester),
ok = emqx_request_handler:stop(Responser). ok = emqx_request_handler:stop(Responder).
b2i(B) -> binary_to_integer(B). b2i(B) -> binary_to_integer(B).
i2b(I) -> integer_to_binary(I). i2b(I) -> integer_to_binary(I).

View File

@ -27,7 +27,7 @@
-define(CNT, 100). -define(CNT, 100).
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Inital funcs %% Initial funcs
all() -> emqx_common_test_helpers:all(?MODULE). all() -> emqx_common_test_helpers:all(?MODULE).

View File

@ -68,7 +68,7 @@ prop_object_map_to_map() ->
true true
end). end).
%% The duplicated key will be overriden %% The duplicated key will be overridden
prop_object_proplist_to_map() -> prop_object_proplist_to_map() ->
?FORALL(T0, json_object(), ?FORALL(T0, json_object(),
begin begin

View File

@ -974,7 +974,7 @@ authenticator_examples() ->
} }
}, },
'password-based:http' => #{ 'password-based:http' => #{
summary => <<"Password-based authentication througth external HTTP API">>, summary => <<"Password-based authentication through external HTTP API">>,
value => #{ value => #{
mechanism => <<"password-based">>, mechanism => <<"password-based">>,
backend => <<"http">>, backend => <<"http">>,

View File

@ -46,9 +46,9 @@ authenticator_type() ->
config_refs(Modules) -> config_refs(Modules) ->
lists:append([Module:refs() || Module <- Modules]). lists:append([Module:refs() || Module <- Modules]).
%% authn is a core functionality however implemented outside fo emqx app %% authn is a core functionality however implemented outside of emqx app
%% in emqx_schema, 'authentication' is a map() type which is to allow %% in emqx_schema, 'authentication' is a map() type which is to allow
%% EMQ X more plugable. %% EMQ X more pluggable.
root_type() -> root_type() ->
hoconsc:array(authenticator_type()). hoconsc:array(authenticator_type()).

View File

@ -63,7 +63,7 @@ end_per_suite(_Config) ->
%% Tests %% Tests
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
%% emqx_connector_mongo connects asyncronously, %% emqx_connector_mongo connects asynchronously,
%% so we check failure/success indirectly (through snabbkaffe). %% so we check failure/success indirectly (through snabbkaffe).
%% openssl s_client -tls1_2 -cipher ECDHE-RSA-AES256-GCM-SHA384 \ %% openssl s_client -tls1_2 -cipher ECDHE-RSA-AES256-GCM-SHA384 \

View File

@ -362,7 +362,7 @@ type(postgresql) -> postgresql;
type(<<"postgresql">>) -> postgresql; type(<<"postgresql">>) -> postgresql;
type('built-in-database') -> 'built-in-database'; type('built-in-database') -> 'built-in-database';
type(<<"built-in-database">>) -> 'built-in-database'; type(<<"built-in-database">>) -> 'built-in-database';
%% should never happend if the input is type-checked by hocon schema %% should never happen if the input is type-checked by hocon schema
type(Unknown) -> error({unknown_authz_source_type, Unknown}). type(Unknown) -> error({unknown_authz_source_type, Unknown}).
%% @doc where the acl.conf file is stored. %% @doc where the acl.conf file is stored.

View File

@ -76,7 +76,7 @@ ordered to form a chain of access controls.<br>
When authorizing a publish or subscribe action, the configured When authorizing a publish or subscribe action, the configured
sources are checked in order. When checking an ACL source, sources are checked in order. When checking an ACL source,
in case the client (identified by username or client ID) is not found, in case the client (identified by username or client ID) is not found,
it moves on to the next source. And it stops immediatly it moves on to the next source. And it stops immediately
once an 'allow' or 'deny' decision is returned.<br> once an 'allow' or 'deny' decision is returned.<br>
If the client is not found in any of the sources, If the client is not found in any of the sources,

View File

@ -230,7 +230,7 @@ cluster {
## Cluster using etcd ## Cluster using etcd
##---------------------------------------------------------------- ##----------------------------------------------------------------
etcd { etcd {
## Etcd server list, seperated by ','. ## Etcd server list, separated by ','.
## ##
## @doc cluster.etcd.server ## @doc cluster.etcd.server
## ValueType: URL ## ValueType: URL
@ -279,7 +279,7 @@ cluster {
## Cluster using Kubernetes ## Cluster using Kubernetes
##---------------------------------------------------------------- ##----------------------------------------------------------------
k8s { k8s {
## Kubernetes API server list, seperated by ','. ## Kubernetes API server list, separated by ','.
## ##
## @doc cluster.k8s.apiserver ## @doc cluster.k8s.apiserver
## ValueType: URL ## ValueType: URL
@ -459,7 +459,7 @@ log {
## Default: true ## Default: true
overload_kill.enable = true overload_kill.enable = true
## The max allowed queue length before killing the log hanlder. ## The max allowed queue length before killing the log handler.
## ##
## Log overload protection parameter. This is the maximum allowed queue ## Log overload protection parameter. This is the maximum allowed queue
## length. If the message queue grows larger than this, the handler ## length. If the message queue grows larger than this, the handler
@ -471,7 +471,7 @@ log {
## Default: 20000 ## Default: 20000
overload_kill.qlen = 20000 overload_kill.qlen = 20000
## The max allowed memory size before killing the log hanlder. ## The max allowed memory size before killing the log handler.
## ##
## Log overload protection parameter. This is the maximum memory size ## Log overload protection parameter. This is the maximum memory size
## that the handler process is allowed to use. If the handler grows ## that the handler process is allowed to use. If the handler grows
@ -482,7 +482,7 @@ log {
## Default: 30MB ## Default: 30MB
overload_kill.mem_size = 30MB overload_kill.mem_size = 30MB
## Restart the log hanlder after some seconds. ## Restart the log handler after some seconds.
## ##
## Log overload protection parameter. If the handler is terminated, ## Log overload protection parameter. If the handler is terminated,
## it restarts automatically after a delay specified in seconds. ## it restarts automatically after a delay specified in seconds.
@ -669,7 +669,7 @@ log {
## Default: true ## Default: true
overload_kill.enable = true overload_kill.enable = true
## The max allowed queue length before killing the log hanlder. ## The max allowed queue length before killing the log handler.
## ##
## Log overload protection parameter. This is the maximum allowed queue ## Log overload protection parameter. This is the maximum allowed queue
## length. If the message queue grows larger than this, the handler ## length. If the message queue grows larger than this, the handler
@ -681,7 +681,7 @@ log {
## Default: 20000 ## Default: 20000
overload_kill.qlen = 20000 overload_kill.qlen = 20000
## The max allowed memory size before killing the log hanlder. ## The max allowed memory size before killing the log handler.
## ##
## Log overload protection parameter. This is the maximum memory size ## Log overload protection parameter. This is the maximum memory size
## that the handler process is allowed to use. If the handler grows ## that the handler process is allowed to use. If the handler grows
@ -692,7 +692,7 @@ log {
## Default: 30MB ## Default: 30MB
overload_kill.mem_size = 30MB overload_kill.mem_size = 30MB
## Restart the log hanlder after some seconds. ## Restart the log handler after some seconds.
## ##
## Log overload protection parameter. If the handler is terminated, ## Log overload protection parameter. If the handler is terminated,
## it restarts automatically after a delay specified in seconds. ## it restarts automatically after a delay specified in seconds.

View File

@ -58,7 +58,7 @@ There are 4 complex data types in EMQ X's HOCON config:
1. Struct: Named using an unquoted string, followed by a pre-defined list of fields, 1. Struct: Named using an unquoted string, followed by a pre-defined list of fields,
fields can not start with a number, and are only allowed to use fields can not start with a number, and are only allowed to use
lowercase letters and underscores as word separater. lowercase letters and underscores as word separator.
1. Map: Map is like Struct, however the fields are not pre-defined. 1. Map: Map is like Struct, however the fields are not pre-defined.
1-based index number can also be used as map keys for an alternative 1-based index number can also be used as map keys for an alternative
representation of an Array. representation of an Array.
@ -67,7 +67,7 @@ There are 4 complex data types in EMQ X's HOCON config:
### Primitive Data Types ### Primitive Data Types
Complex types define data 'boxes' wich may contain other complex data Complex types define data 'boxes' which may contain other complex data
or primitive values. or primitive values.
There are quite some different primitive types, to name a fiew: There are quite some different primitive types, to name a fiew:
@ -82,7 +82,7 @@ There are quite some different primitive types, to name a fiew:
* ... * ...
The primitive types are mostly self-describing, some are built-in, such The primitive types are mostly self-describing, some are built-in, such
as `atom()`, some are defiend in EMQ X modules, such as `emqx_schema:duration()`. as `atom()`, some are defined in EMQ X modules, such as `emqx_schema:duration()`.
### Config Paths ### Config Paths
@ -133,14 +133,14 @@ because the field name is `enable`, not `enabled`.
### Config overlay rules ### Config overlay rules
HOCON objects are overlayed, in general: HOCON objects are overlaid, in general:
- Within one file, objects defined 'later' recursively override objects defined 'earlier' - Within one file, objects defined 'later' recursively override objects defined 'earlier'
- When layered, 'later' (hihger lalyer) objects override objects defined 'earlier' (lower layer) - When layered, 'later' (hihger lalyer) objects override objects defined 'earlier' (lower layer)
Below are more detailed rules. Below are more detailed rules.
#### Struct Fileds #### Struct Fields
Later config values overwrites earlier values. Later config values overwrites earlier values.
For example, in below config, the last line `debug` overwrites `errro` for For example, in below config, the last line `debug` overwrites `errro` for
@ -171,7 +171,7 @@ zone {
} }
## The maximum packet size can be defined as above, ## The maximum packet size can be defined as above,
## then overriden as below ## then overridden as below
zone.zone1.mqtt.max_packet_size = 10M zone.zone1.mqtt.max_packet_size = 10M
``` ```

View File

@ -328,7 +328,7 @@ Select the backend for the embedded database.<br/>
<strong>Important!</strong> This setting should be the same on all nodes in the cluster.<br/> <strong>Important!</strong> This setting should be the same on all nodes in the cluster.<br/>
<strong>Important!</strong> Changing this setting in the runtime is not allowed.<br/> <strong>Important!</strong> Changing this setting in the runtime is not allowed.<br/>
<code>mnesia</code> is the default backend, that offers decent performance in small clusters.<br/> <code>mnesia</code> is the default backend, that offers decent performance in small clusters.<br/>
<code>rlog</code> is a new experimantal backend that is suitable for very large clusters. <code>rlog</code> is a new experimental backend that is suitable for very large clusters.
""" """
})} })}
, {"role", , {"role",
@ -836,7 +836,7 @@ emqx_schema_high_prio_roots() ->
sc(hoconsc:ref("authorization"), sc(hoconsc:ref("authorization"),
#{ desc => """ #{ desc => """
Authorization a.k.a ACL.<br> Authorization a.k.a ACL.<br>
In EMQ X, MQTT client access control is extremly flexible.<br> In EMQ X, MQTT client access control is extremely flexible.<br>
An out of the box set of authorization data sources are supported. An out of the box set of authorization data sources are supported.
For example,<br> For example,<br>
'file' source is to support concise and yet generic ACL rules in a file;<br> 'file' source is to support concise and yet generic ACL rules in a file;<br>

View File

@ -91,7 +91,7 @@ For example: http://localhost:9901/
, {pool_type, , {pool_type,
sc(pool_type(), sc(pool_type(),
#{ default => random #{ default => random
, desc => "The type of the pool. Canbe one of random, hash" , desc => "The type of the pool. Can be one of random, hash"
})} })}
, {pool_size, , {pool_size,
sc(non_neg_integer(), sc(non_neg_integer(),
@ -216,12 +216,12 @@ on_query(InstId, {KeyOrNum, Method, Request, Timeout}, AfterQuery,
{ok, StatusCode, _, _} when StatusCode >= 200 andalso StatusCode < 300 -> {ok, StatusCode, _, _} when StatusCode >= 200 andalso StatusCode < 300 ->
emqx_resource:query_success(AfterQuery); emqx_resource:query_success(AfterQuery);
{ok, StatusCode, _} -> {ok, StatusCode, _} ->
?SLOG(error, #{msg => "http connector do reqeust, received error response", ?SLOG(error, #{msg => "http connector do request, received error response",
request => NRequest, connector => InstId, request => NRequest, connector => InstId,
status_code => StatusCode}), status_code => StatusCode}),
emqx_resource:query_failed(AfterQuery); emqx_resource:query_failed(AfterQuery);
{ok, StatusCode, _, _} -> {ok, StatusCode, _, _} ->
?SLOG(error, #{msg => "http connector do reqeust, received error response", ?SLOG(error, #{msg => "http connector do request, received error response",
request => NRequest, connector => InstId, request => NRequest, connector => InstId,
status_code => StatusCode}), status_code => StatusCode}),
emqx_resource:query_failed(AfterQuery) emqx_resource:query_failed(AfterQuery)

View File

@ -148,7 +148,7 @@ send(#{client_pid := ClientPid} = Conn, [Msg | Rest], PktIds) ->
{ok, PktId} -> {ok, PktId} ->
send(Conn, Rest, [PktId | PktIds]); send(Conn, Rest, [PktId | PktIds]);
{error, Reason} -> {error, Reason} ->
%% NOTE: There is no partial sucess of a batch and recover from the middle %% NOTE: There is no partial success of a batch and recover from the middle
%% only to retry all messages in one batch %% only to retry all messages in one batch
{error, Reason} {error, Reason}
end. end.

View File

@ -48,7 +48,7 @@ The mode of the MQTT Bridge. Can be one of 'cluster_singleton' or 'cluster_share
In 'cluster_singleton' node, all messages toward the remote broker go through the same In 'cluster_singleton' node, all messages toward the remote broker go through the same
MQTT connection.<br> MQTT connection.<br>
- cluster_shareload: create an MQTT connection on each node in the emqx cluster.<br> - cluster_shareload: create an MQTT connection on each node in the emqx cluster.<br>
In 'cluster_shareload' mode, the incomming load from the remote broker is shared by In 'cluster_shareload' mode, the incoming load from the remote broker is shared by
using shared subscription.<br> using shared subscription.<br>
Note that the 'clientid' is suffixed by the node name, this is to avoid Note that the 'clientid' is suffixed by the node name, this is to avoid
clientid conflicts between different nodes. And we can only use shared subscription clientid conflicts between different nodes. And we can only use shared subscription

View File

@ -45,7 +45,7 @@
%% %%
%% (0): auto or manual start %% (0): auto or manual start
%% (1): retry timeout %% (1): retry timeout
%% (2): successfuly connected to remote node/cluster %% (2): successfully connected to remote node/cluster
%% (3): received {disconnected, Reason} OR %% (3): received {disconnected, Reason} OR
%% failed to send to remote node/cluster. %% failed to send to remote node/cluster.
%% %%
@ -53,7 +53,7 @@
%% local topics, and the underlying `emqx_bridge_connect' may subscribe to %% local topics, and the underlying `emqx_bridge_connect' may subscribe to
%% multiple remote topics, however, worker/connections are not designed %% multiple remote topics, however, worker/connections are not designed
%% to support automatic load-balancing, i.e. in case it can not keep up %% to support automatic load-balancing, i.e. in case it can not keep up
%% with the amount of messages comming in, administrator should split and %% with the amount of messages coming in, administrator should split and
%% balance topics between worker/connections manually. %% balance topics between worker/connections manually.
%% %%
%% NOTES: %% NOTES:

View File

@ -128,7 +128,7 @@ manual_start_stop_test() ->
Config0 = make_config(Ref, TestPid, {ok, #{client_pid => TestPid}}), Config0 = make_config(Ref, TestPid, {ok, #{client_pid => TestPid}}),
Config = Config0#{start_type := manual}, Config = Config0#{start_type := manual},
{ok, Pid} = emqx_connector_mqtt_worker:start_link(Config#{name => BridgeName}), {ok, Pid} = emqx_connector_mqtt_worker:start_link(Config#{name => BridgeName}),
%% call ensure_started again should yeld the same result %% call ensure_started again should yield the same result
ok = emqx_connector_mqtt_worker:ensure_started(BridgeName), ok = emqx_connector_mqtt_worker:ensure_started(BridgeName),
emqx_connector_mqtt_worker:ensure_stopped(BridgeName), emqx_connector_mqtt_worker:ensure_stopped(BridgeName),
emqx_metrics:stop(), emqx_metrics:stop(),

View File

@ -54,7 +54,7 @@ Build
make && make ct make && make ct
Configurtion Configuration
------------ ------------
``` ```

View File

@ -61,7 +61,7 @@ default_password(nullable) -> false;
default_password(sensitive) -> true; default_password(sensitive) -> true;
default_password(desc) -> """ default_password(desc) -> """
The initial default password for dashboard 'admin' user. The initial default password for dashboard 'admin' user.
For safty, it should be changed as soon as possible."""; For safety, it should be changed as soon as possible.""";
default_password(_) -> undefined. default_password(_) -> undefined.
cors(type) -> boolean(); cors(type) -> boolean();

View File

@ -481,7 +481,7 @@ typename_to_spec(Name, Mod) ->
Spec2 = typerefl_array(Spec1, Name, Mod), Spec2 = typerefl_array(Spec1, Name, Mod),
Spec3 = integer(Spec2, Name), Spec3 = integer(Spec2, Name),
Spec3 =:= nomatch andalso Spec3 =:= nomatch andalso
throw({error, #{msg => <<"Unsupport Type">>, type => Name, module => Mod}}), throw({error, #{msg => <<"Unsupported Type">>, type => Name, module => Mod}}),
Spec3. Spec3.
range(Name) -> range(Name) ->

View File

@ -1,6 +1,6 @@
# emqx_exhook # emqx_exhook
The `emqx_exhook` extremly enhance the extensibility for EMQ X. It allow using an others programming language to mount the hooks intead of erlang. The `emqx_exhook` extremely enhance the extensibility for EMQ X. It allow using an others programming language to mount the hooks instead of erlang.
## Feature ## Feature

View File

@ -236,10 +236,10 @@ message EmptySuccess { }
message ValuedResponse { message ValuedResponse {
// The responsed value type // The responded value type
// - contiune: Use the responsed value and execute the next hook // - contiune: Use the responded value and execute the next hook
// - ignore: Ignore the responsed value // - ignore: Ignore the responded value
// - stop_and_return: Use the responsed value and stop the chain executing // - stop_and_return: Use the responded value and stop the chain executing
enum ResponsedType { enum ResponsedType {
CONTINUE = 0; CONTINUE = 0;

View File

@ -94,7 +94,7 @@ load(Name, #{request_timeout := Timeout, failed_action := FailedAction} = Opts)
{ok, _ChannPoolPid} -> {ok, _ChannPoolPid} ->
case do_init(Name, ReqOpts) of case do_init(Name, ReqOpts) of
{ok, HookSpecs} -> {ok, HookSpecs} ->
%% Reigster metrics %% Register metrics
Prefix = lists:flatten(io_lib:format("exhook.~ts.", [Name])), Prefix = lists:flatten(io_lib:format("exhook.~ts.", [Name])),
ensure_metrics(Prefix, HookSpecs), ensure_metrics(Prefix, HookSpecs),
%% Ensure hooks %% Ensure hooks

View File

@ -4,7 +4,7 @@
## Concept ## Concept
EMQ X Gateway Managment EMQ X Gateway Management
- Gateway-Registry (or Gateway Type) - Gateway-Registry (or Gateway Type)
- *Load - *Load
- *UnLoad - *UnLoad
@ -317,7 +317,7 @@ emqx_ctl gateway-banned
emqx_ctl gateway-metrics [<GatewayId>] emqx_ctl gateway-metrics [<GatewayId>]
``` ```
#### Mangement by HTTP-API/Dashboard/ #### Management by HTTP-API/Dashboard/
#### How to integrate a protocol to your platform #### How to integrate a protocol to your platform

View File

@ -9,7 +9,7 @@ gateway.stomp {
idle_timeout = 30s idle_timeout = 30s
## To control whether write statistics data into ETS table ## To control whether write statistics data into ETS table
## for dashbord to read. ## for dashboard to read.
enable_stats = true enable_stats = true
## When publishing or subscribing, prefix all topics with a mountpoint string. ## When publishing or subscribing, prefix all topics with a mountpoint string.
@ -93,7 +93,7 @@ gateway.coap {
idle_timeout = 30s idle_timeout = 30s
## To control whether write statistics data into ETS table ## To control whether write statistics data into ETS table
## for dashbord to read. ## for dashboard to read.
enable_stats = true enable_stats = true
## When publishing or subscribing, prefix all topics with a mountpoint string. ## When publishing or subscribing, prefix all topics with a mountpoint string.
@ -112,7 +112,7 @@ gateway.coap {
## The type of delivered coap message can be set to: ## The type of delivered coap message can be set to:
## - non: Non-confirmable ## - non: Non-confirmable
## - con: Confirmable ## - con: Confirmable
## - qos: Mapping from QoS type of the recevied message. ## - qos: Mapping from QoS type of the received message.
## QoS0 -> non, QoS1,2 -> con. ## QoS0 -> non, QoS1,2 -> con.
## ##
## Enum: non | con | qos ## Enum: non | con | qos
@ -184,7 +184,7 @@ gateway.mqttsn {
idle_timeout = 30s idle_timeout = 30s
## To control whether write statistics data into ETS table ## To control whether write statistics data into ETS table
## for dashbord to read. ## for dashboard to read.
enable_stats = true enable_stats = true
## When publishing or subscribing, prefix all topics with a mountpoint string. ## When publishing or subscribing, prefix all topics with a mountpoint string.
@ -252,7 +252,7 @@ gateway.lwm2m {
idle_timeout = 30s idle_timeout = 30s
## To control whether write statistics data into ETS table ## To control whether write statistics data into ETS table
## for dashbord to read. ## for dashboard to read.
enable_stats = true enable_stats = true
## When publishing or subscribing, prefix all topics with a mountpoint string. ## When publishing or subscribing, prefix all topics with a mountpoint string.
@ -313,7 +313,7 @@ gateway.exproto {
idle_timeout = 30s idle_timeout = 30s
## To control whether write statistics data into ETS table ## To control whether write statistics data into ETS table
## for dashbord to read. ## for dashboard to read.
enable_stats = true enable_stats = true
## When publishing or subscribing, prefix all topics with a mountpoint string. ## When publishing or subscribing, prefix all topics with a mountpoint string.

View File

@ -19,7 +19,7 @@
-type gateway_name() :: atom(). -type gateway_name() :: atom().
%% @doc The Gateway defination %% @doc The Gateway definition
-type gateway() :: -type gateway() ::
#{ name := gateway_name() #{ name := gateway_name()
%% Description %% Description

View File

@ -25,7 +25,7 @@
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Info & Stats %% Info & Stats
%% @doc Get the channel detailed infomation. %% @doc Get the channel detailed information.
-callback info(channel()) -> emqx_types:infos(). -callback info(channel()) -> emqx_types:infos().
-callback info(Key :: atom() | [atom()], channel()) -> any(). -callback info(Key :: atom() | [atom()], channel()) -> any().

View File

@ -14,7 +14,7 @@
%% limitations under the License. %% limitations under the License.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% @doc The behavior abstrat for TCP based gateway conn %% @doc The behavior abstract for TCP based gateway conn
-module(emqx_gateway_conn). -module(emqx_gateway_conn).
-include_lib("emqx/include/types.hrl"). -include_lib("emqx/include/types.hrl").
@ -837,7 +837,7 @@ activate_socket(State = #state{sockstate = blocked}) ->
activate_socket(State = #state{socket = Socket, activate_socket(State = #state{socket = Socket,
active_n = N}) -> active_n = N}) ->
%% FIXME: Works on dtls/udp ??? %% FIXME: Works on dtls/udp ???
%% How to hanlde buffer? %% How to handle buffer?
case esockd_setopts(Socket, [{active, N}]) of case esockd_setopts(Socket, [{active, N}]) of
ok -> {ok, State#state{sockstate = running}}; ok -> {ok, State#state{sockstate = running}};
Error -> Error Error -> Error

View File

@ -133,7 +133,7 @@ The server manages the client through the ClientId. If the ClientId is wrong, EM
- clientid := client uid - clientid := client uid
- token - token
Resonse: Response:
- 2.01 "Deleted" when success - 2.01 "Deleted" when success
- 4.00 "Bad Request" when error - 4.00 "Bad Request" when error
@ -303,7 +303,7 @@ CoAP gateway uses some options in query string to conversion between MQTT CoAP.
2. Session 2. Session
Manager the "Transport Manager" "Observe Resouces Manger" and next message id Manager the "Transport Manager" "Observe Resources Manager" and next message id
3. Transport Mnager 3. Transport Mnager

View File

@ -244,7 +244,7 @@ handle_call({subscribe, Topic, SubOpts}, _From,
_ = run_hooks(Ctx, 'session.subscribed', _ = run_hooks(Ctx, 'session.subscribed',
[ClientInfo, MountedTopic, NSubOpts]), [ClientInfo, MountedTopic, NSubOpts]),
%% modifty session state %% modify session state
SubReq = {Topic, Token}, SubReq = {Topic, Token},
TempMsg = #coap_message{type = non}, TempMsg = #coap_message{type = non},
%% FIXME: The subopts is not used for emqx_coap_session %% FIXME: The subopts is not used for emqx_coap_session
@ -264,7 +264,7 @@ handle_call({unsubscribe, Topic}, _From,
_ = run_hooks(Ctx, 'session.unsubscribe', _ = run_hooks(Ctx, 'session.unsubscribe',
[ClientInfo, MountedTopic, #{}]), [ClientInfo, MountedTopic, #{}]),
%% modifty session state %% modify session state
UnSubReq = Topic, UnSubReq = Topic,
TempMsg = #coap_message{type = non}, TempMsg = #coap_message{type = non},
Result = emqx_coap_session:process_subscribe( Result = emqx_coap_session:process_subscribe(

View File

@ -66,7 +66,7 @@
outgoing => coap_message(), outgoing => coap_message(),
timeouts => list(ttimeout()), timeouts => list(ttimeout()),
has_sub => undefined | sub_register(), has_sub => undefined | sub_register(),
transport => emqx_coap_transport:transprot()}. transport => emqx_coap_transport:transport()}.
-define(TOKEN_ID(T), {token, T}). -define(TOKEN_ID(T), {token, T}).

View File

@ -423,7 +423,7 @@ schema("/gateway/:name/clients") ->
schema("/gateway/:name/clients/:clientid") -> schema("/gateway/:name/clients/:clientid") ->
#{ 'operationId' => clients_insta #{ 'operationId' => clients_insta
, get => , get =>
#{ description => <<"Get the gateway client infomation">> #{ description => <<"Get the gateway client information">>
, parameters => params_client_insta() , parameters => params_client_insta()
, responses => , responses =>
?STANDARD_RESP(#{200 => schema_client()}) ?STANDARD_RESP(#{200 => schema_client()})

View File

@ -111,7 +111,7 @@ gateway(_) ->
[ {"gateway list", [ {"gateway list",
"List all gateway"} "List all gateway"}
, {"gateway lookup <Name>", , {"gateway lookup <Name>",
"Lookup a gateway detailed informations"} "Lookup a gateway detailed information"}
, {"gateway load <Name> <JsonConf>", , {"gateway load <Name> <JsonConf>",
"Load a gateway with config"} "Load a gateway with config"}
, {"gateway unload <Name>", , {"gateway unload <Name>",

View File

@ -111,7 +111,7 @@ procname(GwName) ->
ChannInfoTab :: atom()}. ChannInfoTab :: atom()}.
cmtabs(GwName) -> cmtabs(GwName) ->
{ tabname(chan, GwName) %% Record: {ClientId, Pid} { tabname(chan, GwName) %% Record: {ClientId, Pid}
, tabname(conn, GwName) %% Recrod: {{ClientId, Pid}, ConnMod} , tabname(conn, GwName) %% Record: {{ClientId, Pid}, ConnMod}
, tabname(info, GwName) %% Record: {{ClientId, Pid}, Info, Stats} , tabname(info, GwName) %% Record: {{ClientId, Pid}, Info, Stats}
}. }.

View File

@ -45,7 +45,7 @@
-record(channel, {chid, pid}). -record(channel, {chid, pid}).
%% @doc Start the global channel registry for the gived gateway name. %% @doc Start the global channel registry for the given gateway name.
-spec(start_link(gateway_name()) -> gen_server:startlink_ret()). -spec(start_link(gateway_name()) -> gen_server:startlink_ret()).
start_link(Name) -> start_link(Name) ->
gen_server:start_link(?MODULE, [Name], []). gen_server:start_link(?MODULE, [Name], []).

View File

@ -223,7 +223,7 @@ detailed_gateway_info(State) ->
%% There are two layer authentication configs %% There are two layer authentication configs
%% stomp.authn %% stomp.authn
%% / \ %% / \
%% listeners.tcp.defautl.authn *.ssl.default.authn %% listeners.tcp.default.authn *.ssl.default.authn
%% %%
init_authn(GwName, Config) -> init_authn(GwName, Config) ->

View File

@ -75,7 +75,7 @@ reg(Name, RgOpts) ->
-spec unreg(gateway_name()) -> ok | {error, any()}. -spec unreg(gateway_name()) -> ok | {error, any()}.
unreg(Name) -> unreg(Name) ->
%% TODO: Checking ALL INSTACE HAS STOPPED %% TODO: Checking ALL INSTANCE HAS STOPPED
call({unreg, Name}). call({unreg, Name}).
%% TODO: %% TODO:

View File

@ -186,7 +186,7 @@ received on an observed topic.
The type of delivered coap message can be set to:<br> The type of delivered coap message can be set to:<br>
1. non: Non-confirmable;<br> 1. non: Non-confirmable;<br>
2. con: Confirmable;<br> 2. con: Confirmable;<br>
3. qos: Mapping from QoS type of recevied message, QoS0 -> non, QoS1,2 -> con" 3. qos: Mapping from QoS type of received message, QoS0 -> non, QoS1,2 -> con"
})} })}
, {subscribe_qos, , {subscribe_qos,
sc(hoconsc:union([qos0, qos1, qos2, coap]), sc(hoconsc:union([qos0, qos1, qos2, coap]),
@ -222,7 +222,7 @@ fields(lwm2m) ->
sc(binary(), sc(binary(),
#{ default =>"etc/lwm2m_xml" #{ default =>"etc/lwm2m_xml"
, nullable => false , nullable => false
, desc => "The Directory for LwM2M Resource defination" , desc => "The Directory for LwM2M Resource definition"
})} })}
, {lifetime_min, , {lifetime_min,
sc(duration(), sc(duration(),

View File

@ -1,6 +1,6 @@
# emqx-exproto # emqx-exproto
The `emqx_exproto` extremly enhance the extensibility for EMQ X. It allow using an others programming language to **replace the protocol handling layer in EMQ X Broker**. The `emqx_exproto` extremely enhance the extensibility for EMQ X. It allow using an others programming language to **replace the protocol handling layer in EMQ X Broker**.
## Feature ## Feature

View File

@ -169,7 +169,7 @@ call(ConnStr, Req) ->
, reason => {Class, Reason} , reason => {Class, Reason}
, stacktrace => Stk , stacktrace => Stk
}), }),
{error, ?RESP_UNKNOWN, <<"Unkwown crashs">>} {error, ?RESP_UNKNOWN, <<"Unknown crashes">>}
end. end.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------

View File

@ -137,7 +137,7 @@ start_grpc_server(GwName, Options = #{bind := ListenOn}) ->
console_print("Start ~ts gRPC server on ~p successfully.~n", console_print("Start ~ts gRPC server on ~p successfully.~n",
[GwName, ListenOn]); [GwName, ListenOn]);
{error, Reason} -> {error, Reason} ->
?ELOG("Falied to start ~ts gRPC server on ~p, reason: ~p", ?ELOG("Failed to start ~ts gRPC server on ~p, reason: ~p",
[GwName, ListenOn, Reason]) [GwName, ListenOn, Reason])
end. end.

View File

@ -18,8 +18,8 @@ syntax = "proto3";
package emqx.exproto.v1; package emqx.exproto.v1;
// The Broker side serivce. It provides a set of APIs to // The Broker side service. It provides a set of APIs to
// handle a protcol access // handle a protocol access
service ConnectionAdapter { service ConnectionAdapter {
// -- socket layer // -- socket layer

View File

@ -51,12 +51,12 @@
clientinfo :: emqx_types:clientinfo(), clientinfo :: emqx_types:clientinfo(),
%% Session %% Session
session :: emqx_lwm2m_session:session() | undefined, session :: emqx_lwm2m_session:session() | undefined,
%% Channl State %% Channel State
%% TODO: is there need %% TODO: is there need
conn_state :: conn_state(), conn_state :: conn_state(),
%% Timer %% Timer
timers :: #{atom() => disable | undefined | reference()}, timers :: #{atom() => disable | undefined | reference()},
%% FIXME: don't store anonymouse func %% FIXME: don't store anonymous func
with_context :: function() with_context :: function()
}). }).
@ -215,7 +215,7 @@ handle_call({subscribe, Topic, SubOpts}, _From,
_ = run_hooks(Ctx, 'session.subscribed', _ = run_hooks(Ctx, 'session.subscribed',
[ClientInfo, MountedTopic, NSubOpts]), [ClientInfo, MountedTopic, NSubOpts]),
%% modifty session state %% modify session state
Subs = emqx_lwm2m_session:info(subscriptions, Session), Subs = emqx_lwm2m_session:info(subscriptions, Session),
NSubs = maps:put(MountedTopic, NSubOpts, Subs), NSubs = maps:put(MountedTopic, NSubOpts, Subs),
NSession = emqx_lwm2m_session:set_subscriptions(NSubs, Session), NSession = emqx_lwm2m_session:set_subscriptions(NSubs, Session),
@ -231,7 +231,7 @@ handle_call({unsubscribe, Topic}, _From,
ok = emqx_broker:unsubscribe(MountedTopic), ok = emqx_broker:unsubscribe(MountedTopic),
_ = run_hooks(Ctx, 'session.unsubscribe', _ = run_hooks(Ctx, 'session.unsubscribe',
[ClientInfo, MountedTopic, #{}]), [ClientInfo, MountedTopic, #{}]),
%% modifty session state %% modify session state
Subs = emqx_lwm2m_session:info(subscriptions, Session), Subs = emqx_lwm2m_session:info(subscriptions, Session),
NSubs = maps:remove(MountedTopic, Subs), NSubs = maps:remove(MountedTopic, Subs),
NSession = emqx_lwm2m_session:set_subscriptions(NSubs, Session), NSession = emqx_lwm2m_session:set_subscriptions(NSubs, Session),

View File

@ -150,7 +150,7 @@ encode_value(Value) when is_integer(Value) ->
encode_value(Value) when is_float(Value) -> encode_value(Value) when is_float(Value) ->
<<Value:32/float>>; <<Value:32/float>>;
encode_value(Value) -> encode_value(Value) ->
error(io_lib:format("unsupport format ~p", [Value])). error(io_lib:format("unsupported format ~p", [Value])).

View File

@ -25,7 +25,7 @@ mqtt.sn.advertise_duration = 900
## Value: Number ## Value: Number
mqtt.sn.gateway_id = 1 mqtt.sn.gateway_id = 1
## To control whether write statistics data into ETS table for dashbord to read. ## To control whether write statistics data into ETS table for dashboard to read.
## ##
## Value: on | off ## Value: on | off
mqtt.sn.enable_stats = off mqtt.sn.enable_stats = off
@ -61,7 +61,7 @@ mqtt.sn.password = abc
- mqtt.sn.gateway_id - mqtt.sn.gateway_id
* Gateway id in ADVERTISE message. * Gateway id in ADVERTISE message.
- mqtt.sn.enable_stats - mqtt.sn.enable_stats
* To control whether write statistics data into ETS table for dashbord to read. * To control whether write statistics data into ETS table for dashboard to read.
- mqtt.sn.enable_qos3 - mqtt.sn.enable_qos3
* To control whether accept and process the received publish message with qos=-1. * To control whether accept and process the received publish message with qos=-1.
- mqtt.sn.predefined.topic.N - mqtt.sn.predefined.topic.N

View File

@ -261,7 +261,7 @@ maybe_assign_clientid(_Packet, ClientInfo) ->
fix_mountpoint(_Packet, #{mountpoint := undefined}) -> ok; fix_mountpoint(_Packet, #{mountpoint := undefined}) -> ok;
fix_mountpoint(_Packet, ClientInfo = #{mountpoint := Mountpoint}) -> fix_mountpoint(_Packet, ClientInfo = #{mountpoint := Mountpoint}) ->
%% TODO: Enrich the varibale replacement???? %% TODO: Enrich the variable replacement????
%% i.e: ${ClientInfo.auth_result.productKey} %% i.e: ${ClientInfo.auth_result.productKey}
Mountpoint1 = emqx_mountpoint:replvar(Mountpoint, ClientInfo), Mountpoint1 = emqx_mountpoint:replvar(Mountpoint, ClientInfo),
{ok, ClientInfo#{mountpoint := Mountpoint1}}. {ok, ClientInfo#{mountpoint := Mountpoint1}}.
@ -357,7 +357,7 @@ handle_in(?SN_SEARCHGW_MSG(_Radius),
{ok, {outgoing, ?SN_GWINFO_MSG(GwId, <<>>)}, Channel}; {ok, {outgoing, ?SN_GWINFO_MSG(GwId, <<>>)}, Channel};
handle_in(?SN_ADVERTISE_MSG(_GwId, _Radius), Channel) -> handle_in(?SN_ADVERTISE_MSG(_GwId, _Radius), Channel) ->
% ingore % ignore
shutdown(normal, Channel); shutdown(normal, Channel);
handle_in(?SN_PUBLISH_MSG(#mqtt_sn_flags{qos = ?QOS_NEG1, handle_in(?SN_PUBLISH_MSG(#mqtt_sn_flags{qos = ?QOS_NEG1,

View File

@ -89,7 +89,7 @@ on_gateway_load(_Gateway = #{ name := GwName,
{ok, ListenerPids, _GwState = #{ctx => Ctx}}; {ok, ListenerPids, _GwState = #{ctx => Ctx}};
{error, {Reason, Listener}} -> {error, {Reason, Listener}} ->
throw({badconf, #{ key => listeners throw({badconf, #{ key => listeners
, vallue => Listener , value => Listener
, reason => Reason , reason => Reason
}}) }})
end. end.

View File

@ -53,7 +53,7 @@
-define(SN_RC_CONGESTION, 16#01). -define(SN_RC_CONGESTION, 16#01).
-define(SN_RC_INVALID_TOPIC_ID, 16#02). -define(SN_RC_INVALID_TOPIC_ID, 16#02).
-define(SN_RC_NOT_SUPPORTED, 16#03). -define(SN_RC_NOT_SUPPORTED, 16#03).
%% Custome Reason code by emqx %% Custom Reason code by emqx
-define(SN_RC_NOT_AUTHORIZE, 16#04). -define(SN_RC_NOT_AUTHORIZE, 16#04).
-define(SN_RC_FAILED_SESSION, 16#05). -define(SN_RC_FAILED_SESSION, 16#05).
-define(SN_EXCEED_LIMITATION, 16#06). -define(SN_EXCEED_LIMITATION, 16#06).

View File

@ -263,7 +263,7 @@ parse_heartbeat(#stomp_frame{headers = Headers}, ClientInfo) ->
fix_mountpoint(_Packet, #{mountpoint := undefined}) -> ok; fix_mountpoint(_Packet, #{mountpoint := undefined}) -> ok;
fix_mountpoint(_Packet, ClientInfo = #{mountpoint := Mountpoint}) -> fix_mountpoint(_Packet, ClientInfo = #{mountpoint := Mountpoint}) ->
%% TODO: Enrich the varibale replacement???? %% TODO: Enrich the variable replacement????
%% i.e: ${ClientInfo.auth_result.productKey} %% i.e: ${ClientInfo.auth_result.productKey}
Mountpoint1 = emqx_mountpoint:replvar(Mountpoint, ClientInfo), Mountpoint1 = emqx_mountpoint:replvar(Mountpoint, ClientInfo),
{ok, ClientInfo#{mountpoint := Mountpoint1}}. {ok, ClientInfo#{mountpoint := Mountpoint1}}.
@ -492,7 +492,7 @@ handle_in(?PACKET(?CMD_COMMIT, Headers), Channel) ->
maybe_outgoing_receipt(receipt_id(Headers), Outgoings, Chann1); maybe_outgoing_receipt(receipt_id(Headers), Outgoings, Chann1);
{error, Reason, Chann1} -> {error, Reason, Chann1} ->
%% FIXME: atomic for transaction ?? %% FIXME: atomic for transaction ??
ErrMsg = io_lib:format("Execute transaction ~ts falied: ~0p", ErrMsg = io_lib:format("Execute transaction ~ts failed: ~0p",
[TxId, Reason] [TxId, Reason]
), ),
handle_out(error, {receipt_id(Headers), ErrMsg}, Chann1) handle_out(error, {receipt_id(Headers), ErrMsg}, Chann1)

View File

@ -108,7 +108,7 @@ t_gateway_registry_list(_) ->
t_gateway_usage(_) -> t_gateway_usage(_) ->
?assertEqual( ?assertEqual(
["gateway list # List all gateway\n", ["gateway list # List all gateway\n",
"gateway lookup <Name> # Lookup a gateway detailed informations\n", "gateway lookup <Name> # Lookup a gateway detailed information\n",
"gateway load <Name> <JsonConf> # Load a gateway with config\n", "gateway load <Name> <JsonConf> # Load a gateway with config\n",
"gateway unload <Name> # Unload the gateway\n", "gateway unload <Name> # Unload the gateway\n",
"gateway stop <Name> # Stop the gateway\n", "gateway stop <Name> # Stop the gateway\n",
@ -181,13 +181,13 @@ t_gateway_start_stop(_) ->
emqx_gateway_cli:gateway(["stop", "mqttsn"]), emqx_gateway_cli:gateway(["stop", "mqttsn"]),
?assertEqual("ok\n", acc_print()), ?assertEqual("ok\n", acc_print()),
%% dupliacted stop gateway, return ok %% duplicated stop gateway, return ok
emqx_gateway_cli:gateway(["stop", "mqttsn"]), emqx_gateway_cli:gateway(["stop", "mqttsn"]),
?assertEqual("ok\n", acc_print()), ?assertEqual("ok\n", acc_print()),
emqx_gateway_cli:gateway(["start", "mqttsn"]), emqx_gateway_cli:gateway(["start", "mqttsn"]),
?assertEqual("ok\n", acc_print()), ?assertEqual("ok\n", acc_print()),
%% dupliacted start gateway, return ok %% duplicated start gateway, return ok
emqx_gateway_cli:gateway(["start", "mqttsn"]), emqx_gateway_cli:gateway(["start", "mqttsn"]),
?assertEqual("ok\n", acc_print()), ?assertEqual("ok\n", acc_print()),

View File

@ -160,7 +160,7 @@ t_handle_process_down(Conf) ->
_ = Pid ! {'DOWN', mref, process, self(), normal}, _ = Pid ! {'DOWN', mref, process, self(), normal},
timer:sleep(200), %% wait the asycn clear task timer:sleep(200), %% wait the async clear task
?assertEqual( ?assertEqual(
[], [],
ets:tab2list(emqx_gateway_cm:tabname(chan, ?GWNAME))), ets:tab2list(emqx_gateway_cm:tabname(chan, ?GWNAME))),

View File

@ -1986,7 +1986,7 @@ verify_read_response_1(CmdId, UdpSock) ->
Request = test_recv_coap_request(UdpSock), Request = test_recv_coap_request(UdpSock),
?LOGT("LwM2M client got ~p", [Request]), ?LOGT("LwM2M client got ~p", [Request]),
%% device replies the commond %% device replies the command
test_send_coap_response( test_send_coap_response(
UdpSock, UdpSock,
"127.0.0.1", "127.0.0.1",

View File

@ -233,7 +233,7 @@ t_subscribe_case03(_) ->
?assertEqual(<<2, ?SN_DISCONNECT>>, receive_response(Socket)), ?assertEqual(<<2, ?SN_DISCONNECT>>, receive_response(Socket)),
gen_udp:close(Socket). gen_udp:close(Socket).
%% In this case We use predefined topic name to register and subcribe, %% In this case We use predefined topic name to register and subscribe,
%% and expect to receive the corresponding predefined topic id but not a new %% and expect to receive the corresponding predefined topic id but not a new
%% generated topic id from broker. We design this case to illustrate %% generated topic id from broker. We design this case to illustrate
%% emqx_sn_gateway's compatibility of dealing with predefined and normal %% emqx_sn_gateway's compatibility of dealing with predefined and normal

View File

@ -451,7 +451,7 @@ t_rest_clienit_info(_) ->
%% TODO: Mountpoint, AuthChain, Authorization + Mountpoint, ClientInfoOverride, %% TODO: Mountpoint, AuthChain, Authorization + Mountpoint, ClientInfoOverride,
%% Listeners, Metrics, Stats, ClientInfo %% Listeners, Metrics, Stats, ClientInfo
%% %%
%% TODO: Start/Stop, List Instace %% TODO: Start/Stop, List Instance
%% %%
%% TODO: RateLimit, OOM, %% TODO: RateLimit, OOM,

View File

@ -79,7 +79,7 @@ code_change(_OldVsn, State, _Extra) ->
{ok, State}. {ok, State}.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% Internel function %% Internal function
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
ensure_timer(State) -> ensure_timer(State) ->

View File

@ -14,7 +14,7 @@
%% limitations under the License. %% limitations under the License.
%%-------------------------------------------------------------------- %%--------------------------------------------------------------------
%% @doc The emqx-modules configration interoperable interfaces %% @doc The emqx-modules configuration interoperable interfaces
-module(emqx_modules_conf). -module(emqx_modules_conf).
-behaviour(emqx_config_handler). -behaviour(emqx_config_handler).

View File

@ -111,7 +111,7 @@ schema("/mqtt/topic_metrics/:topic") ->
fields(reset) -> fields(reset) ->
[ {topic [ {topic
, mk( binary() , mk( binary()
, #{ desc => <<"Topic Name. If this paramter is not present, all created topic metrics will be reseted">> , #{ desc => <<"Topic Name. If this parameter is not present, all created topic metrics will be reset">>
, example => <<"testtopic/1">> , example => <<"testtopic/1">>
, nullable => true})} , nullable => true})}
, {action , {action
@ -135,7 +135,7 @@ fields(topic_metrics) ->
, example => <<"2022-01-14T21:48:47+08:00">>})}, , example => <<"2022-01-14T21:48:47+08:00">>})},
{ reset_time { reset_time
, mk( emqx_schema:rfc3339_system_time() , mk( emqx_schema:rfc3339_system_time()
, #{ desc => <<"Topic Metrics reset date time, in rfc3339. Nullable if never reseted">> , #{ desc => <<"Topic Metrics reset date time, in rfc3339. Nullable if never reset">>
, nullable => true , nullable => true
, example => <<"2022-01-14T21:48:47+08:00">>})}, , example => <<"2022-01-14T21:48:47+08:00">>})},
{ metrics { metrics

View File

@ -418,7 +418,7 @@ load_plugin_app(AppName, AppVsn, Ebin, RunningApps) ->
{_, Vsn} -> {_, Vsn} ->
case bin(Vsn) =:= bin(AppVsn) of case bin(Vsn) =:= bin(AppVsn) of
true -> true ->
%% already started on the exact versio %% already started on the exact version
ok; ok;
false -> false ->
%% running but a different version %% running but a different version

View File

@ -40,7 +40,7 @@ describe(NameVsn, LogFun) ->
{ok, Plugin} -> {ok, Plugin} ->
LogFun("~ts~n", [to_json(Plugin)]); LogFun("~ts~n", [to_json(Plugin)]);
{error, Reason} -> {error, Reason} ->
%% this should not happend unless the package is manually installed %% this should not happen unless the package is manually installed
%% corrupted packages installed from emqx_plugins:ensure_installed %% corrupted packages installed from emqx_plugins:ensure_installed
%% should not leave behind corrupted files %% should not leave behind corrupted files
?SLOG(error, #{msg => "failed_to_describe_plugin", ?SLOG(error, #{msg => "failed_to_describe_plugin",

View File

@ -118,7 +118,7 @@ t_demo_install_start_stop_uninstall(Config) ->
?assertEqual([], emqx_plugins:list()), ?assertEqual([], emqx_plugins:list()),
ok. ok.
%% help funtion to create a info file. %% help function to create a info file.
%% The file is in JSON format when built %% The file is in JSON format when built
%% but since we are using hocon:load to load it %% but since we are using hocon:load to load it
%% ad-hoc test files can be in hocon format %% ad-hoc test files can be in hocon format

View File

@ -96,7 +96,7 @@ purge_test() ->
?assertEqual(ok, emqx_plugins:purge("a-1")), ?assertEqual(ok, emqx_plugins:purge("a-1")),
%% assert the dir is gone %% assert the dir is gone
?assertMatch({error, enoent}, file:read_file_info(Dir)), ?assertMatch({error, enoent}, file:read_file_info(Dir)),
%% wite a file for the dir path %% write a file for the dir path
ok = file:write_file(Dir, "a"), ok = file:write_file(Dir, "a"),
?assertEqual(ok, emqx_plugins:purge("a-1")) ?assertEqual(ok, emqx_plugins:purge("a-1"))
end). end).

View File

@ -120,7 +120,7 @@ handle_call({import, SrcFile}, _From, State) ->
handle_call(Req, _From, State) -> handle_call(Req, _From, State) ->
?SLOG(info, #{msg => "unexpected_call_discarded", req => Req}), ?SLOG(info, #{msg => "unexpected_call_discarded", req => Req}),
{reply, {error, unexecpted}, State}. {reply, {error, unexpected}, State}.
handle_cast(Req, State) -> handle_cast(Req, State) ->
?SLOG(info, #{msg => "unexpected_cast_discarded", req => Req}), ?SLOG(info, #{msg => "unexpected_cast_discarded", req => Req}),

View File

@ -60,7 +60,7 @@
-export([ restart/1 %% restart the instance. -export([ restart/1 %% restart the instance.
, restart/2 , restart/2
, health_check/1 %% verify if the resource is working normally , health_check/1 %% verify if the resource is working normally
, set_resource_status_stoped/1 %% set resource status to stoped , set_resource_status_stoped/1 %% set resource status to stopped
, stop/1 %% stop the instance , stop/1 %% stop the instance
, query/2 %% query the instance , query/2 %% query the instance
, query/3 %% query the instance with after_query() , query/3 %% query the instance with after_query()

View File

@ -82,7 +82,7 @@ health_check_timeout_checker(Pid, Name, SleepTime, Timeout) ->
health_check_finish -> timer:sleep(SleepTime) health_check_finish -> timer:sleep(SleepTime)
after Timeout -> after Timeout ->
emqx_alarm:activate(Name, #{name => Name}, emqx_alarm:activate(Name, #{name => Name},
<<Name/binary, " health check timout">>), <<Name/binary, " health check timeout">>),
emqx_resource:set_resource_status_stoped(Name), emqx_resource:set_resource_status_stoped(Name),
receive receive
health_check_finish -> timer:sleep(SleepTime) health_check_finish -> timer:sleep(SleepTime)

View File

@ -42,7 +42,7 @@ retainer.max_payload_size = 64KB
## - 30m: 30 minutes ## - 30m: 30 minutes
## - 20s: 20 seconds ## - 20s: 20 seconds
## ##
## Defaut: 0 ## Default: 0
retainer.expiry_interval = 0 retainer.expiry_interval = 0
``` ```

View File

@ -92,7 +92,7 @@ with_topic_api() ->
description => <<"delete matching messages">>, description => <<"delete matching messages">>,
parameters => parameters(), parameters => parameters(),
responses => #{ responses => #{
<<"204">> => schema(<<"Successed">>), <<"204">> => schema(<<"Succeeded">>),
<<"405">> => schema(<<"NotAllowed">>) <<"405">> => schema(<<"NotAllowed">>)
} }
} }

View File

@ -100,7 +100,7 @@ fields("metrics") ->
})} })}
, {"outputs.total", sc(integer(), #{ , {"outputs.total", sc(integer(), #{
desc => "How much times the outputs are called by the rule. " desc => "How much times the outputs are called by the rule. "
"This value may serveral times of 'sql.matched', depending on the " "This value may several times of 'sql.matched', depending on the "
"number of the outputs of the rule." "number of the outputs of the rule."
})} })}
, {"outputs.success", sc(integer(), #{ , {"outputs.success", sc(integer(), #{

View File

@ -87,7 +87,7 @@ fields("builtin_output_republish") ->
The arguments of the built-in 'republish' output.<br> The arguments of the built-in 'republish' output.<br>
We can use variables in the args.<br> We can use variables in the args.<br>
The variables are selected by the rule. For exmaple, if the rule SQL is defined as following: The variables are selected by the rule. For example, if the rule SQL is defined as following:
<code> <code>
SELECT clientid, qos, payload FROM \"t/1\" SELECT clientid, qos, payload FROM \"t/1\"
</code> </code>

View File

@ -89,7 +89,7 @@
, subbits/6 , subbits/6
]). ]).
%% Data Type Convertion %% Data Type Conversion
-export([ str/1 -export([ str/1
, str_utf8/1 , str_utf8/1
, bool/1 , bool/1
@ -150,7 +150,7 @@
, map_put/3 , map_put/3
]). ]).
%% For backword compatibility %% For backward compatibility
-export([ mget/2 -export([ mget/2
, mget/3 , mget/3
, mput/3 , mput/3
@ -503,7 +503,7 @@ do_get_subbits(Bits, Sz, Len, <<"bits">>, <<"signed">>, <<"little">>) ->
<<SubBits:Sz/bits-signed-little-unit:1>>). <<SubBits:Sz/bits-signed-little-unit:1>>).
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
%% Data Type Convertion Funcs %% Data Type Conversion Funcs
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
str(Data) -> str(Data) ->

View File

@ -81,7 +81,7 @@ t_payload(_) ->
?assertEqual(c, apply_func(payload, [<<"a.b.c">>], Input#{payload => NestedMap})). ?assertEqual(c, apply_func(payload, [<<"a.b.c">>], Input#{payload => NestedMap})).
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
%% Data Type Convertion Funcs %% Data Type Conversion Funcs
%%------------------------------------------------------------------------------ %%------------------------------------------------------------------------------
t_str(_) -> t_str(_) ->
?assertEqual(<<"abc">>, emqx_rule_funcs:str("abc")), ?assertEqual(<<"abc">>, emqx_rule_funcs:str("abc")),

View File

@ -111,7 +111,7 @@ on_stats_update(#{clientid := ClientId,
[_] -> [_] ->
%% if Latency > minimum value, we should update it %% if Latency > minimum value, we should update it
%% if Latency < minimum value, maybe it can replace the minimum value %% if Latency < minimum value, maybe it can replace the minimum value
%% so alwyas update at here %% so always update at here
%% do we need check if Latency == minimum ??? %% do we need check if Latency == minimum ???
ets:insert(?TOPK_TAB, ets:insert(?TOPK_TAB,
#top_k{index = Index, type = Type, last_update_time = Ts}), #top_k{index = Index, type = Type, last_update_time = Ts}),

View File

@ -258,7 +258,7 @@ check_user() {
for ARG in "$@"; do for ARG in "$@"; do
CMD="${CMD} \"$ARG\"" CMD="${CMD} \"$ARG\""
done done
# This will drop priviledges into the runner user # This will drop privileges into the runner user
# It exec's in a new shell and the current shell will exit # It exec's in a new shell and the current shell will exit
exec su - "$RUNNER_USER" -c "$CMD" exec su - "$RUNNER_USER" -c "$CMD"
fi fi
@ -367,7 +367,7 @@ relx_start_command() {
generate_config() { generate_config() {
local name_type="$1" local name_type="$1"
local node_name="$2" local node_name="$2"
## Delete the *.siz files first or it cann't start after ## Delete the *.siz files first or it can't start after
## changing the config 'log.rotation.size' ## changing the config 'log.rotation.size'
rm -rf "${RUNNER_LOG_DIR}"/*.siz rm -rf "${RUNNER_LOG_DIR}"/*.siz
@ -380,7 +380,7 @@ generate_config() {
local NOW_TIME local NOW_TIME
NOW_TIME="$(call_hocon now_time)" NOW_TIME="$(call_hocon now_time)"
## ths command populates two files: app.<time>.config and vm.<time>.args ## this command populates two files: app.<time>.config and vm.<time>.args
## NOTE: the generate command merges environment variables to the base config (emqx.conf), ## NOTE: the generate command merges environment variables to the base config (emqx.conf),
## but does not include the cluster-override.conf and local-override.conf ## but does not include the cluster-override.conf and local-override.conf
## meaning, certain overrides will not be mapped to app.<time>.config file ## meaning, certain overrides will not be mapped to app.<time>.config file
@ -413,7 +413,7 @@ generate_config() {
ARG_VALUE=$(echo "$ARG_LINE" | awk '{print $NF}') ARG_VALUE=$(echo "$ARG_LINE" | awk '{print $NF}')
## use the key to look up in vm.args file for the value ## use the key to look up in vm.args file for the value
TMP_ARG_VALUE=$(grep "^$ARG_KEY" "$TMP_ARG_FILE" || true | awk '{print $NF}') TMP_ARG_VALUE=$(grep "^$ARG_KEY" "$TMP_ARG_FILE" || true | awk '{print $NF}')
## compare generated (to override) value to original (to be overriden) value ## compare generated (to override) value to original (to be overridden) value
if [ "$ARG_VALUE" != "$TMP_ARG_VALUE" ] ; then if [ "$ARG_VALUE" != "$TMP_ARG_VALUE" ] ; then
## if they are different ## if they are different
if [ -n "$TMP_ARG_VALUE" ]; then if [ -n "$TMP_ARG_VALUE" ]; then
@ -499,7 +499,7 @@ latest_vm_args() {
if [ -f "$vm_args_file" ]; then if [ -f "$vm_args_file" ]; then
echo "$vm_args_file" echo "$vm_args_file"
else else
echoerr "ERRRO: node not initialized?" echoerr "ERROR: node not initialized?"
echoerr "Generated config file vm.*.args is not found for command '$COMMAND'" echoerr "Generated config file vm.*.args is not found for command '$COMMAND'"
echoerr "in config dir: $CONFIGS_DIR" echoerr "in config dir: $CONFIGS_DIR"
echoerr "In case the file has been deleted while the node is running," echoerr "In case the file has been deleted while the node is running,"

View File

@ -68,7 +68,7 @@ do(Args) ->
["getpid"] -> ["getpid"] ->
io:format("~p\n", [list_to_integer(rpc:call(TargetNode, os, getpid, []))]); io:format("~p\n", [list_to_integer(rpc:call(TargetNode, os, getpid, []))]);
["ping"] -> ["ping"] ->
%% If we got this far, the node already responsed to a ping, so just dump %% If we got this far, the node already responded to a ping, so just dump
%% a "pong" %% a "pong"
io:format("pong\n"); io:format("pong\n");
["stop"] -> ["stop"] ->

View File

@ -188,11 +188,11 @@ docker run -d --name emqx -p 18083:18083 -p 1883:1883 -p 4369:4369 \
-e EMQX_BACKEND_REDIS_POOL1__SERVER=127.0.0.1:6379 -e EMQX_BACKEND_REDIS_POOL1__SERVER=127.0.0.1:6379
[...] [...]
-e EMQX_BACKEND__REDIS__POOL5__SERVER=127.0.0.5:6379 -e EMQX_BACKEND__REDIS__POOL5__SERVER=127.0.0.5:6379
-e EMQX_BACKEND__REDIS__HOOK_MESSAGE__PUBLISH__1='{"topic": "persistant/topic1", "action": {"function": "on_message_publish"}, "pool": "pool1"}' -e EMQX_BACKEND__REDIS__HOOK_MESSAGE__PUBLISH__1='{"topic": "persistent/topic1", "action": {"function": "on_message_publish"}, "pool": "pool1"}'
-e EMQX_BACKEND__REDIS__HOOK_MESSAGE__PUBLISH__2='{"topic": "persistant/topic2", "action": {"function": "on_message_publish"}, "pool": "pool1"}' -e EMQX_BACKEND__REDIS__HOOK_MESSAGE__PUBLISH__2='{"topic": "persistent/topic2", "action": {"function": "on_message_publish"}, "pool": "pool1"}'
-e EMQX_BACKEND__REDIS__HOOK_MESSAGE__PUBLISH__3='{"topic": "persistant/topic3", "action": {"function": "on_message_publish"}, "pool": "pool1"}' -e EMQX_BACKEND__REDIS__HOOK_MESSAGE__PUBLISH__3='{"topic": "persistent/topic3", "action": {"function": "on_message_publish"}, "pool": "pool1"}'
[...] [...]
-e EMQX_BACKEND__REDIS__HOOK_MESSAGE__PUBLISH__13='{"topic": "persistant/topic13", "action": {"function": "on_message_publish"}, "pool": "pool1"}' -e EMQX_BACKEND__REDIS__HOOK_MESSAGE__PUBLISH__13='{"topic": "persistent/topic13", "action": {"function": "on_message_publish"}, "pool": "pool1"}'
emqx/emqx:latest emqx/emqx:latest
``` ```

View File

@ -5,7 +5,7 @@ BUILT := $(SRCDIR)/BUILT
dash := - dash := -
none := none :=
space := $(none) $(none) space := $(none) $(none)
## RPM does not allow '-' in version nubmer and release string, replace with '_' ## RPM does not allow '-' in version number and release string, replace with '_'
RPM_VSN := $(subst -,_,$(PKG_VSN)) RPM_VSN := $(subst -,_,$(PKG_VSN))
RPM_REL := otp$(subst -,_,$(OTP_VSN)) RPM_REL := otp$(subst -,_,$(OTP_VSN))

View File

@ -99,7 +99,7 @@ test_deps() ->
]. ].
common_compile_opts(Vsn) -> common_compile_opts(Vsn) ->
[ debug_info % alwyas include debug_info [ debug_info % always include debug_info
, {compile_info, [{emqx_vsn, Vsn}]} , {compile_info, [{emqx_vsn, Vsn}]}
] ++ ] ++
[{d, 'EMQX_BENCHMARK'} || os:getenv("EMQX_BENCHMARK") =:= "1" ]. [{d, 'EMQX_BENCHMARK'} || os:getenv("EMQX_BENCHMARK") =:= "1" ].

View File

@ -36,7 +36,7 @@ check_apps() {
exit 1 exit 1
fi fi
if [ -z "${old_app_version:-}" ]; then if [ -z "${old_app_version:-}" ]; then
echo "skiped checking new app ${app}" echo "skipped checking new app ${app}"
elif [ "$old_app_version" = "$now_app_version" ]; then elif [ "$old_app_version" = "$now_app_version" ]; then
lines="$(git diff --name-only "$latest_release"...HEAD \ lines="$(git diff --name-only "$latest_release"...HEAD \
-- "$app_path/src" \ -- "$app_path/src" \
@ -61,7 +61,7 @@ _main() {
if echo "${latest_release}" |grep -oE '[0-9]+.[0-9]+.[0-9]+' > /dev/null 2>&1; then if echo "${latest_release}" |grep -oE '[0-9]+.[0-9]+.[0-9]+' > /dev/null 2>&1; then
check_apps check_apps
else else
echo "skiped unstable tag: ${latest_release}" echo "skipped unstable tag: ${latest_release}"
fi fi
} }

View File

@ -2,7 +2,7 @@
set -euo pipefail set -euo pipefail
## This script takes the first argument as docker iamge name, ## This script takes the first argument as docker image name,
## starts two containers running with the built code mount ## starts two containers running with the built code mount
## into docker containers. ## into docker containers.
## ##

View File

@ -1,6 +1,6 @@
#!/usr/bin/env bash #!/usr/bin/env bash
## run this script onece a year ## run this script once a year
set -euo pipefail set -euo pipefail

View File

@ -26,7 +26,7 @@ Usage:
Options: Options:
--check Don't update the appfile, just check that they are complete --check Don't update the appfile, just check that they are complete
--repo Upsteam git repo URL --repo Upstream git repo URL
--remote Get upstream repo URL from the specified git remote --remote Get upstream repo URL from the specified git remote
--skip-build Don't rebuild the releases. May produce wrong results --skip-build Don't rebuild the releases. May produce wrong results
--make-command A command used to assemble the release --make-command A command used to assemble the release