Merge pull request #7461 from ieQu1/doc-schema-6

docs(schema): Document fields and records
This commit is contained in:
Dmitrii 2022-03-30 13:57:29 +02:00 committed by GitHub
commit 13e4feef13
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 266 additions and 137 deletions

View File

@ -894,7 +894,7 @@ conn_congestion {
## Whether to alarm the congested connections.
##
## Sometimes the mqtt connection (usually an MQTT subscriber) may
## get "congested", because there're too many packets to be sent.
## get "congested", because there's too many packets to sent.
## The socket tries to buffer the packets until the buffer is
## full. If more packets come after that, the packets will be
## "pending" in a queue and we consider the connection is
@ -914,7 +914,7 @@ conn_congestion {
enable_alarm = true
## Won't clear the congested alarm in how long time.
## The alarm is cleared only when there're no pending bytes in
## The alarm is cleared only when there's no pending bytes in
## the queue, and also it has been `min_alarm_sustain_duration`
## time since the last time we considered the connection is "congested".
##

View File

@ -20,7 +20,7 @@
-export([ roots/0, fields/1, to_rate/1, to_capacity/1
, minimum_period/0, to_burst_rate/1, to_initial/1
, namespace/0, get_bucket_cfg_path/2
, namespace/0, get_bucket_cfg_path/2, desc/1
]).
-define(KILOBYTE, 1024).
@ -149,6 +149,17 @@ the check/consume will succeed, but it will be forced to wait for a short period
, default => force})}
].
desc(limiter) ->
"Settings for the rate limiter.";
desc(limiter_opts) ->
"Settings for the limiter.";
desc(bucket_opts) ->
"Settings for the bucket.";
desc(client_bucket) ->
"Settings for the client bucket.";
desc(_) ->
undefined.
%% minimum period is 100ms
minimum_period() ->
100.

View File

@ -304,7 +304,7 @@ fields("stats") ->
boolean(),
#{
default => true,
desc => "Enable/disable statistic data collection"
desc => "Enable/disable statistic data collection."
}
)}
];
@ -339,17 +339,26 @@ fields("cache") ->
{"enable",
sc(
boolean(),
#{default => true}
#{
default => true,
desc => "Enable or disable the authorization cache."
}
)},
{"max_size",
sc(
range(1, 1048576),
#{default => 32}
#{
default => 32,
desc => "Maximum number of cached items."
}
)},
{"ttl",
sc(
duration(),
#{default => "1m"}
#{
default => "1m",
desc => "Time to live for the cached data."
}
)}
];
fields("mqtt") ->
@ -762,12 +771,23 @@ fields("conn_congestion") ->
{"enable_alarm",
sc(
boolean(),
#{default => false}
#{
default => false,
desc => "Enable or disable connection congestion alarm."
}
)},
{"min_alarm_sustain_duration",
sc(
duration(),
#{default => "1m"}
#{
default => "1m",
desc =>
"Minimal time before clearing the alarm.\n\n"
"The alarm is cleared only when there's no pending data in\n"
"the queue, and at least `min_alarm_sustain_duration`\n"
"milliseconds passed since the last time we considered the connection \"congested\".\n\n"
"This is to avoid clearing and raising the alarm again too often."
}
)}
];
fields("force_gc") ->
@ -1357,7 +1377,7 @@ fields("sysmon_vm") ->
desc =>
"The threshold, as percentage of processes, for how many\n"
" processes can simultaneously exist at the local node before the corresponding\n"
" alarm is set."
" alarm is raised."
}
)},
{"process_low_watermark",
@ -1431,7 +1451,7 @@ fields("sysmon_os") ->
default => "80%",
desc =>
"The threshold, as percentage of system CPU load,\n"
" for how much system cpu can be used before the corresponding alarm is set."
" for how much system cpu can be used before the corresponding alarm is raised."
}
)},
{"cpu_low_watermark",
@ -1459,7 +1479,7 @@ fields("sysmon_os") ->
default => "70%",
desc =>
"The threshold, as percentage of system memory,\n"
" for how much system memory can be allocated before the corresponding alarm is set."
" for how much system memory can be allocated before the corresponding alarm is raised."
}
)},
{"procmem_high_watermark",
@ -1470,7 +1490,7 @@ fields("sysmon_os") ->
desc =>
"The threshold, as percentage of system memory,\n"
" for how much system memory can be allocated by one Erlang process before\n"
" the corresponding alarm is set."
" the corresponding alarm is raised."
}
)}
];
@ -1704,7 +1724,13 @@ base_listener() ->
}
)},
{"limiter",
sc(map("ratelimit's type", emqx_limiter_schema:bucket_name()), #{default => #{}})}
sc(
map("ratelimit's type", emqx_limiter_schema:bucket_name()),
#{
default => #{},
desc => "Type of the rate limit."
}
)}
].
desc("persistent_session_store") ->

View File

@ -73,14 +73,17 @@ fields(other_algorithms) ->
{salt_position, fun salt_position/1}].
salt_position(type) -> {enum, [prefix, suffix]};
salt_position(desc) -> "Specifies whether the password salt is stored as a prefix or the suffix.";
salt_position(default) -> prefix;
salt_position(_) -> undefined.
salt_rounds(type) -> integer();
salt_rounds(desc) -> "Cost factor for the bcrypt hash.";
salt_rounds(default) -> 10;
salt_rounds(_) -> undefined.
dk_length(type) -> integer();
dk_length(desc) -> "Length of the derived key.";
dk_length(required) -> false;
dk_length(_) -> undefined.

View File

@ -55,11 +55,15 @@ root_type() ->
mechanism(Name) ->
hoconsc:mk(hoconsc:enum([Name]),
#{required => true}).
#{ required => true
, desc => "Authentication mechanism."
}).
backend(Name) ->
hoconsc:mk(hoconsc:enum([Name]),
#{required => true}).
#{ required => true
, desc => "Backend type."
}).
fields("metrics_status_fields") ->
[ {"metrics", mk(ref(?MODULE, "metrics"), #{desc => "The metrics of the resource"})}
@ -89,7 +93,7 @@ fields("node_metrics") ->
fields("node_status") ->
[ node_name()
, {"status", mk(status(), #{})}
, {"status", mk(status(), #{desc => "Status of the node."})}
].
status() ->

View File

@ -26,6 +26,7 @@
-export([ namespace/0
, roots/0
, fields/1
, desc/1
]).
-export([ refs/0
@ -55,6 +56,15 @@ fields('replica-set') ->
fields('sharded-cluster') ->
common_fields() ++ emqx_connector_mongo:fields(sharded).
desc(standalone) ->
"Configuration for a standalone MongoDB instance.";
desc('replica-set') ->
"Configuration for a replica set.";
desc('sharded-cluster') ->
"Configuration for a sharded cluster.";
desc(_) ->
undefined.
common_fields() ->
[ {mechanism, emqx_authn_schema:mechanism('password_based')}
, {backend, emqx_authn_schema:backend(mongodb)}
@ -67,19 +77,27 @@ common_fields() ->
] ++ emqx_authn_schema:common_fields().
collection(type) -> binary();
collection(desc) -> "Collection used to store authentication data.";
collection(_) -> undefined.
selector(type) -> map();
selector(desc) -> "Statement that is executed during the authentication process. "
"Commands can support following wildcards:\n"
" - `${username}`: substituted with client's username\n"
" - `${clientid}`: substituted with the clientid";
selector(_) -> undefined.
password_hash_field(type) -> binary();
password_hash_field(desc) -> "Document field that contains password hash.";
password_hash_field(_) -> undefined.
salt_field(type) -> binary();
salt_field(desc) -> "Document field that contains the password salt.";
salt_field(required) -> false;
salt_field(_) -> undefined.
is_superuser_field(type) -> binary();
is_superuser_field(desc) -> "Document field that defines if the user has superuser privileges.";
is_superuser_field(required) -> false;
is_superuser_field(_) -> undefined.

View File

@ -66,7 +66,7 @@ fields("authorization") ->
]),
default => [],
desc =>
"""
"
Authorization data sources.<br>
An array of authorization (ACL) data providers.
It is designed as an array, not a hash-map, so the sources can be
@ -84,7 +84,7 @@ the default action configured in 'authorization.no_match' is applied.<br>
NOTE:
The source elements are identified by their 'type'.
It is NOT allowed to configure two or more sources of the same type.
"""
"
}
}
];
@ -94,7 +94,7 @@ fields(file) ->
default => true}}
, {path, #{type => string(),
required => true,
desc => """
desc => "
Path to the file which contains the ACL rules.<br>
If the file provisioned before starting EMQX node,
it can be placed anywhere as long as EMQX has read access to it.
@ -102,7 +102,7 @@ it can be placed anywhere as long as EMQX has read access to it.
In case the rule-set is created from EMQX dashboard or management API,
the file will be placed in `authz` subdirectory inside EMQX's `data_dir`,
and the new rules will override all rules from the old config file.
"""
"
}}
];
fields(http_get) ->
@ -145,18 +145,19 @@ fields(redis_cluster) ->
http_common_fields() ->
[ {url, fun url/1}
, {request_timeout, mk_duration("Request timeout", #{default => "30s"})}
, {body, #{type => map(), required => false}}
, {request_timeout, mk_duration("Request timeout", #{default => "30s", desc => "Request timeout."})}
, {body, #{type => map(), required => false, desc => "HTTP request body."}}
] ++ maps:to_list(maps:without([ base_url
, pool_type],
maps:from_list(connector_fields(http)))).
mongo_common_fields() ->
[ {collection, #{type => atom()}}
, {selector, #{type => map()}}
, {type, #{type => mongodb}}
[ {collection, #{type => atom(), desc => "`MongoDB` collection containing the authorization data."}}
, {selector, #{type => map(), desc => "MQL query used to select the authorization record."}}
, {type, #{type => mongodb, desc => "Database backend."}}
, {enable, #{type => boolean(),
default => true}}
default => true,
desc => "Enable or disable the backend."}}
].
validations() ->
@ -165,6 +166,7 @@ validations() ->
].
headers(type) -> list({binary(), binary()});
headers(desc) -> "List of HTTP headers.";
headers(converter) ->
fun(Headers) ->
maps:to_list(maps:merge(default_headers(), transform_header_name(Headers)))
@ -173,6 +175,7 @@ headers(default) -> default_headers();
headers(_) -> undefined.
headers_no_content_type(type) -> list({binary(), binary()});
headers_no_content_type(desc) -> "List of HTTP headers.";
headers_no_content_type(converter) ->
fun(Headers) ->
maps:to_list(maps:merge(default_headers_no_content_type(), transform_header_name(Headers)))
@ -181,6 +184,7 @@ headers_no_content_type(default) -> default_headers_no_content_type();
headers_no_content_type(_) -> undefined.
url(type) -> binary();
url(desc) -> "URL of the auth server.";
url(validator) -> [?NOT_EMPTY("the value of the field 'url' cannot be empty")];
url(required) -> true;
url(_) -> undefined.
@ -244,6 +248,7 @@ union_array(Item) when is_list(Item) ->
query() ->
#{type => binary(),
desc => "",
validator => fun(S) ->
case size(S) > 0 of
true -> ok;
@ -264,9 +269,10 @@ connector_fields(DB, Fields) ->
error:Reason ->
erlang:error(Reason)
end,
[ {type, #{type => DB}}
[ {type, #{type => DB, desc => "Database backend."}}
, {enable, #{type => boolean(),
default => true}}
default => true,
desc => "Enable or disable the backend."}}
] ++ erlang:apply(Mod, fields, [Fields]).
to_list(A) when is_atom(A) ->

View File

@ -421,7 +421,7 @@ fields("db") ->
, 'readOnly' => true
, desc => """
Select the backend for the embedded database.<br/>
<code>rlog</code> is the default backend, a new experimental backend
<code>rlog</code> is the default backend,
that is suitable for very large clusters.<br/>
<code>mnesia</code> is a backend that offers decent performance in small clusters.
"""

View File

@ -24,12 +24,13 @@
-define(REDIS_DEFAULT_PORT, 6379).
-define(PGSQL_DEFAULT_PORT, 5432).
-define(SERVERS_DESC, "A Node list for Cluster to connect to. The nodes should be split with ',', such as: 'Node[,Node]'<br>\nFor each Node should be:<br>").
-define(SERVERS_DESC, "A Node list for Cluster to connect to. The nodes should be separated with commas, such as: `Node[,Node].<br/>`
For each Node should be: ").
-define(SERVER_DESC(TYPE, DEFAULT_PORT), """
The IPv4 or IPv6 address or host name to connect to.<br>
A host entry has the following form: 'Host[:Port]'<br>
The """ ++ TYPE ++ " default port " ++ DEFAULT_PORT ++ " is used if '[:Port]' isn't present"
-define(SERVER_DESC(TYPE, DEFAULT_PORT), "
The IPv4 or IPv6 address or the hostname to connect to.<br/>
A host entry has the following form: `Host[:Port]`.<br/>
The " ++ TYPE ++ " default port " ++ DEFAULT_PORT ++ " is used if `[:Port]` is not specified."
).
-define(THROW_ERROR(Str), erlang:throw({error, Str})).

View File

@ -54,13 +54,15 @@ roots() ->
fields(single) ->
[ {mongo_type, #{type => single,
default => single}}
default => single,
desc => "Standalone instance."}}
, {server, fun server/1}
, {w_mode, fun w_mode/1}
] ++ mongo_fields();
fields(rs) ->
[ {mongo_type, #{type => rs,
default => rs}}
default => rs,
desc => "Replica set."}}
, {servers, fun servers/1}
, {w_mode, fun w_mode/1}
, {r_mode, fun r_mode/1}
@ -68,7 +70,8 @@ fields(rs) ->
] ++ mongo_fields();
fields(sharded) ->
[ {mongo_type, #{type => sharded,
default => sharded}}
default => sharded,
desc => "Sharded cluster."}}
, {servers, fun servers/1}
, {w_mode, fun w_mode/1}
] ++ mongo_fields();
@ -306,22 +309,27 @@ servers(desc) -> ?SERVERS_DESC ++ server(desc);
servers(_) -> undefined.
w_mode(type) -> hoconsc:enum([unsafe, safe]);
w_mode(desc) -> "Write mode.";
w_mode(default) -> unsafe;
w_mode(_) -> undefined.
r_mode(type) -> hoconsc:enum([master, slave_ok]);
r_mode(desc) -> "Read mode.";
r_mode(default) -> master;
r_mode(_) -> undefined.
duration(type) -> emqx_schema:duration_ms();
duration(desc) -> "Time interval, such as timeout or TTL.";
duration(required) -> false;
duration(_) -> undefined.
replica_set_name(type) -> binary();
replica_set_name(desc) -> "Name of the replica set.";
replica_set_name(required) -> false;
replica_set_name(_) -> undefined.
srv_record(type) -> boolean();
srv_record(desc) -> "Use DNS SRV record.";
srv_record(default) -> false;
srv_record(_) -> undefined.

View File

@ -52,7 +52,8 @@ fields(_) -> [].
ssl_fields() ->
[ {ssl, #{type => hoconsc:ref(emqx_schema, ssl_client_opts),
default => #{<<"enable">> => false}
default => #{<<"enable">> => false},
desc => "SSL connection settings."
}
}
].
@ -66,24 +67,29 @@ relational_db_fields() ->
].
database(type) -> binary();
database(desc) -> "Database name.";
database(required) -> true;
database(validator) -> [?NOT_EMPTY("the value of the field 'database' cannot be empty")];
database(_) -> undefined.
pool_size(type) -> integer();
pool_size(desc) -> "Size of the connection pool.";
pool_size(default) -> 8;
pool_size(validator) -> [?MIN(1)];
pool_size(_) -> undefined.
username(type) -> binary();
username(desc) -> "EMQX's username in the external database.";
username(required) -> false;
username(_) -> undefined.
password(type) -> binary();
password(desc) -> "EMQX's password in the external database.";
password(required) -> false;
password(_) -> undefined.
auto_reconnect(type) -> boolean();
auto_reconnect(desc) -> "Enable automatic reconnect to the database.";
auto_reconnect(default) -> true;
auto_reconnect(_) -> undefined.

View File

@ -22,6 +22,7 @@
-export([ roots/0
, fields/1
, desc/1
]).
-export([ ingress_desc/0
@ -43,19 +44,19 @@ fields("connector") ->
[ {mode,
sc(hoconsc:enum([cluster_shareload]),
#{ default => cluster_shareload
, desc => """
The mode of the MQTT Bridge. Can be one of 'cluster_singleton' or 'cluster_shareload'<br>
, desc => "
The mode of the MQTT Bridge. Can be one of 'cluster_singleton' or 'cluster_shareload'<br/>
- cluster_singleton: create a unique MQTT connection within the emqx cluster.<br>
- cluster_singleton: create a unique MQTT connection within the emqx cluster.<br/>
In 'cluster_singleton' node, all messages toward the remote broker go through the same
MQTT connection.<br>
- cluster_shareload: create an MQTT connection on each node in the emqx cluster.<br>
MQTT connection.<br/>
- cluster_shareload: create an MQTT connection on each node in the emqx cluster.<br/>
In 'cluster_shareload' mode, the incoming load from the remote broker is shared by
using shared subscription.<br>
using shared subscription.<br/>
Note that the 'clientid' is suffixed by the node name, this is to avoid
clientid conflicts between different nodes. And we can only use shared subscription
topic filters for 'remote_topic' of ingress connections.
"""
"
})}
, {server,
sc(emqx_schema:ip_port(),
@ -97,11 +98,7 @@ topic filters for 'remote_topic' of ingress connections.
, desc => "Max inflight (sent, but un-acked) messages of the MQTT protocol"
})}
, {replayq,
sc(ref("replayq"),
#{ desc => """
Queue messages in disk files.
"""
})}
sc(ref("replayq"), #{})}
] ++ emqx_connector_schema_lib:ssl_fields();
fields("ingress") ->
@ -120,23 +117,23 @@ fields("ingress") ->
, {local_topic,
sc(binary(),
#{ validator => fun ?MODULE:non_empty_string/1
, desc => """
Send messages to which topic of the local broker.<br>
, desc => "
Send messages to which topic of the local broker.<br/>
Template with variables is allowed.
"""
"
})}
, {local_qos,
sc(qos(),
#{ default => <<"${qos}">>
, desc => """
The QoS of the MQTT message to be sent.<br>
Template with variables is allowed."""
, desc => "
The QoS of the MQTT message to be sent.<br/>
Template with variables is allowed."
})}
, {hookpoint,
sc(binary(),
#{ desc => """
#{ desc => "
The hook point will be triggered when there's any message received from the remote broker.
"""
"
})}
] ++ common_inout_confs();
@ -151,94 +148,101 @@ fields("egress") ->
sc(binary(),
#{ default => <<"${topic}">>
, validator => fun ?MODULE:non_empty_string/1
, desc => """
Forward to which topic of the remote broker.<br>
, desc => "
Forward to which topic of the remote broker.<br/>
Template with variables is allowed.
"""
"
})}
, {remote_qos,
sc(qos(),
#{ default => <<"${qos}">>
, desc => """
The QoS of the MQTT message to be sent.<br>
Template with variables is allowed."""
, desc => "
The QoS of the MQTT message to be sent.<br/>
Template with variables is allowed."
})}
] ++ common_inout_confs();
fields("replayq") ->
[ {dir,
sc(hoconsc:union([boolean(), string()]),
#{ desc => """
The dir where the replayq file saved.<br>
#{ desc => "
The dir where the replayq file saved.<br/>
Set to 'false' disables the replayq feature.
"""
"
})}
, {seg_bytes,
sc(emqx_schema:bytesize(),
#{ default => "100MB"
, desc => """
The size in bytes of a single segment.<br>
, desc => "
The size in bytes of a single segment.<br/>
A segment is mapping to a file in the replayq dir. If the current segment is full, a new segment
(file) will be opened to write.
"""
"
})}
, {offload,
sc(boolean(),
#{ default => false
, desc => """
In offload mode, the disk queue is only used to offload queue tail segments.<br>
, desc => "
In offload mode, the disk queue is only used to offload queue tail segments.<br/>
The messages are cached in the memory first, then it writes to the replayq files after the size of
the memory cache reaches 'seg_bytes'.
"""
"
})}
].
desc("ingress") ->
ingress_desc();
desc("egress") ->
egress_desc();
desc("replayq") ->
"Queue messages in disk files.";
desc(_) ->
undefined.
topic_mappings() ->
[ {ingress,
sc(ref("ingress"),
#{ default => #{}
, desc => ingress_desc()
})}
, {egress,
sc(ref("egress"),
#{ default => #{}
, desc => egress_desc()
})}
].
ingress_desc() -> """
ingress_desc() -> "
The ingress config defines how this bridge receive messages from the remote MQTT broker, and then
send them to the local broker.<br>
send them to the local broker.<br/>
Template with variables is allowed in 'local_topic', 'remote_qos', 'qos', 'retain',
'payload'.<br>
'payload'.<br/>
NOTE: if this bridge is used as the input of a rule (emqx rule engine), and also local_topic is
configured, then messages got from the remote broker will be sent to both the 'local_topic' and
the rule.
""".
".
egress_desc() -> """
egress_desc() -> "
The egress config defines how this bridge forwards messages from the local broker to the remote
broker.<br>
Template with variables is allowed in 'remote_topic', 'qos', 'retain', 'payload'.<br>
broker.<br/>
Template with variables is allowed in 'remote_topic', 'qos', 'retain', 'payload'.<br/>
NOTE: if this bridge is used as the output of a rule (emqx rule engine), and also local_topic
is configured, then both the data got from the rule and the MQTT messages that matches
local_topic will be forwarded.
""".
".
common_inout_confs() ->
[ {retain,
sc(hoconsc:union([boolean(), binary()]),
#{ default => <<"${retain}">>
, desc => """
The 'retain' flag of the MQTT message to be sent.<br>
Template with variables is allowed."""
, desc => "
The 'retain' flag of the MQTT message to be sent.<br/>
Template with variables is allowed."
})}
, {payload,
sc(binary(),
#{ default => <<"${payload}">>
, desc => """
The payload of the MQTT message to be sent.<br>
Template with variables is allowed."""
, desc => "
The payload of the MQTT message to be sent.<br/>
Template with variables is allowed."
})}
].

View File

@ -32,7 +32,7 @@
-reflect_type([duration/0]).
-export([namespace/0, roots/0, fields/1, server_config/0]).
-export([namespace/0, roots/0, fields/1, desc/1, server_config/0]).
namespace() -> exhook.
@ -41,7 +41,9 @@ roots() -> [exhook].
fields(exhook) ->
[{servers,
sc(hoconsc:array(ref(server)),
#{default => []})}
#{ default => []
, desc => "List of exhook servers."
})}
];
fields(server) ->
@ -61,7 +63,7 @@ fields(server) ->
})}
, {failed_action, failed_action()}
, {ssl,
sc(ref(ssl_conf), #{desc => "SSL client config"})}
sc(ref(ssl_conf), #{})}
, {auto_reconnect,
sc(hoconsc:union([false, duration()]),
#{ default => "60s"
@ -81,6 +83,15 @@ fields(ssl_conf) ->
Schema = emqx_schema:client_ssl_opts_schema(#{}),
lists:keydelete(user_lookup_fun, 1, Schema).
desc(exhook) ->
"External hook (exhook) configuration.";
desc(server) ->
"gRPC server configuration.";
desc(ssl_conf) ->
"SSL client configuration.";
desc(_) ->
undefined.
%% types
sc(Type, Meta) -> Meta#{type => Type}.

View File

@ -294,18 +294,22 @@ fields(exproto) ->
fields(exproto_grpc_server) ->
[ {bind,
sc(hoconsc:union([ip_port(), integer()]),
#{required => true})}
#{ required => true
, desc => "Listening address and port for the gRPC server."
})}
, {ssl,
sc(ref(ssl_server_opts),
#{ required => {false, recursively}
, desc => "SSL configuration for the gRPC server."
})}
];
fields(exproto_grpc_handler) ->
[ {address, sc(binary(), #{required => true})}
[ {address, sc(binary(), #{required => true, desc => "gRPC server address."})}
, {ssl,
sc(ref(emqx_schema, ssl_client_opts),
#{ required => {false, recursively}
, desc => "SSL configuration for the gRPC client."
})}
];
@ -318,9 +322,9 @@ fields(ssl_server_opts) ->
}, true);
fields(clientinfo_override) ->
[ {username, sc(binary())}
, {password, sc(binary())}
, {clientid, sc(binary())}
[ {username, sc(binary(), #{desc => "Template for overriding username."})}
, {password, sc(binary(), #{desc => "Template for overriding password."})}
, {clientid, sc(binary(), #{desc => "Template for overriding clientid."})}
];
fields(lwm2m_translators) ->
@ -362,25 +366,31 @@ the LwM2M client"
];
fields(translator) ->
[ {topic, sc(binary(), #{required => true})}
, {qos, sc(emqx_schema:qos(), #{default => 0})}
[ {topic, sc(binary(),
#{ required => true
, desc => "Which topic the device's upstream message is published to."
})}
, {qos, sc(emqx_schema:qos(),
#{ default => 0
, desc => "QoS of the published messages."
})}
];
fields(udp_listeners) ->
[ {udp, sc(map(name, ref(udp_listener)))}
, {dtls, sc(map(name, ref(dtls_listener)))}
[ {udp, sc(map(name, ref(udp_listener)), #{desc => "UDP configuration."})}
, {dtls, sc(map(name, ref(dtls_listener)), #{desc => "DTLS configuration."})}
];
fields(tcp_listeners) ->
[ {tcp, sc(map(name, ref(tcp_listener)))}
, {ssl, sc(map(name, ref(ssl_listener)))}
[ {tcp, sc(map(name, ref(tcp_listener)), #{desc => "TCP configuration."})}
, {ssl, sc(map(name, ref(ssl_listener)), #{desc => "SSL configuration."})}
];
fields(udp_tcp_listeners) ->
[ {udp, sc(map(name, ref(udp_listener)))}
, {dtls, sc(map(name, ref(dtls_listener)))}
, {tcp, sc(map(name, ref(tcp_listener)))}
, {ssl, sc(map(name, ref(ssl_listener)))}
[ {udp, sc(map(name, ref(udp_listener)), #{desc => "UDP configuration."})}
, {dtls, sc(map(name, ref(dtls_listener)), #{desc => "DTLS configuration."})}
, {tcp, sc(map(name, ref(tcp_listener)), #{desc => "TCP configuration."})}
, {ssl, sc(map(name, ref(ssl_listener)), #{desc => "SSL configuration."})}
];
fields(tcp_listener) ->
@ -524,9 +534,7 @@ It has two purposes:
, desc => ""
})}
, {clientinfo_override,
sc(ref(clientinfo_override),
#{ desc => "ClientInfo override"
})}
sc(ref(clientinfo_override), #{})}
, {?EMQX_AUTHENTICATION_CONFIG_ROOT_NAME_ATOM, authentication_schema()}
].

View File

@ -23,7 +23,8 @@
-export([
namespace/0,
roots/0,
fields/1
fields/1,
desc/1
]).
namespace() -> modules.
@ -65,6 +66,17 @@ fields("rewrite") ->
fields("topic_metrics") ->
[{topic, sc(binary(), #{})}].
desc("telemetry") ->
"Settings for the telemetry module.";
desc("delayed") ->
"Settings for the delayed module.";
desc("rewrite") ->
"Settings for the rewrite module.";
desc("topic_metrics") ->
"Settings for the topic metrics module.";
desc(_) ->
undefined.
regular_expression(type) -> binary();
regular_expression(desc) -> "Regular expressions";
regular_expression(example) -> "^x/y/(.+)$";

View File

@ -22,7 +22,9 @@
-export([ namespace/0
, roots/0
, fields/1]).
, fields/1
, desc/1
]).
-export([ validate_sql/1
]).
@ -82,30 +84,7 @@ counter of the function output or the bridge channel will increase.
fields("builtin_output_republish") ->
[ {function, sc(republish, #{desc => "Republish the message as a new MQTT message"})}
, {args, sc(ref("republish_args"),
#{ desc => """
The arguments of the built-in 'republish' output.<br>
We can use variables in the args.<br>
The variables are selected by the rule. For example, if the rule SQL is defined as following:
<code>
SELECT clientid, qos, payload FROM \"t/1\"
</code>
Then there are 3 variables available: <code>clientid</code>, <code>qos</code> and
<code>payload</code>. And if we've set the args to:
<code>
{
topic = \"t/${clientid}\"
qos = \"${qos}\"
payload = \"msg: ${payload}\"
}
</code>
When the rule is triggered by an MQTT message with payload = \"hello\", qos = 1,
clientid = \"Steve\", the rule will republish a new MQTT message to topic \"t/Steve\",
payload = \"msg: hello\", and qos = 1.
"""
, default => #{}
})}
, {args, sc(ref("republish_args"), #{default => #{}})}
];
fields("builtin_output_console") ->
@ -178,6 +157,38 @@ of the rule, then the string \"undefined\" is used.
})}
].
desc("rule_engine") ->
"Configuration for the EMQX Rule Engine.";
desc("rules") ->
"Configuration for a rule.";
desc("builtin_output_republish") ->
"Configuration for a built-in output.";
desc("builtin_output_console") ->
"Configuration for a built-in output.";
desc("user_provided_function") ->
"Configuration for a built-in output.";
desc("republish_args") ->
"The arguments of the built-in 'republish' output.<br>"
"One can use variables in the args.<br>\n"
"The variables are selected by the rule. For example, if the rule SQL is defined as following:\n"
"<code>\n"
" SELECT clientid, qos, payload FROM \"t/1\"\n"
"</code>\n"
"Then there are 3 variables available: <code>clientid</code>, <code>qos</code> and\n"
"<code>payload</code>. And if we've set the args to:\n"
"<code>\n"
" {\n"
" topic = \"t/${clientid}\"\n"
" qos = \"${qos}\"\n"
" payload = \"msg: ${payload}\"\n"
" }\n"
"</code>\n"
"When the rule is triggered by an MQTT message with payload = `hello`, qos = 1,\n"
"clientid = `Steve`, the rule will republish a new MQTT message to topic `t/Steve`,\n"
"payload = `msg: hello`, and `qos = 1`.";
desc(_) ->
undefined.
rule_name() ->
{"name", sc(binary(),
#{ desc => "The name of the rule"