Merge branch 'master' into chore/authn-fields
This commit is contained in:
commit
bd0d0d9797
|
|
@ -7,3 +7,4 @@ scripts/* text eol=lf
|
|||
*.jpg -text
|
||||
*.png -text
|
||||
*.pdf -text
|
||||
scripts/erlfmt -text
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ on:
|
|||
|
||||
jobs:
|
||||
linux:
|
||||
runs-on: ubuntu-20.04
|
||||
runs-on: aws-amd64
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
|
|
@ -43,6 +43,9 @@ jobs:
|
|||
container: "ghcr.io/emqx/emqx-builder/5.0-10:${{ matrix.elixir }}-${{ matrix.otp }}-${{ matrix.os }}"
|
||||
|
||||
steps:
|
||||
- name: cleanup
|
||||
run: |
|
||||
rm -rf "${GITHUB_WORKSPACE}/"
|
||||
- uses: actions/checkout@v1
|
||||
- name: prepare
|
||||
run: |
|
||||
|
|
|
|||
|
|
@ -604,8 +604,12 @@ mqtt 下所有的配置作为全局的默认值存在,它可以被 <code>zone<
|
|||
|
||||
mqtt_idle_timeout {
|
||||
desc {
|
||||
en: """Close TCP connections from the clients that have not sent MQTT CONNECT message within this interval."""
|
||||
zh: """关闭在此时间间隔内未发送 MQTT CONNECT 消息的客户端的 TCP 连接。"""
|
||||
en: """After the TCP connection is established, if the MQTT CONNECT packet from the client is not received within the time specified by <code>idle_timeout</code>, the connection will be disconnected."""
|
||||
zh: """TCP 连接建立后,如果在 <code>idle_timeout</code> 指定的时间内未收到客户端的 MQTT CONNECT 报文,则连接将被断开。"""
|
||||
}
|
||||
label: {
|
||||
en: """Idle Timeout"""
|
||||
zh: """空闲超时"""
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -614,19 +618,31 @@ mqtt 下所有的配置作为全局的默认值存在,它可以被 <code>zone<
|
|||
en: """Maximum MQTT packet size allowed."""
|
||||
zh: """允许的最大 MQTT 报文大小。"""
|
||||
}
|
||||
label: {
|
||||
en: """Max Packet Size"""
|
||||
zh: """最大报文大小"""
|
||||
}
|
||||
}
|
||||
|
||||
mqtt_max_clientid_len {
|
||||
desc {
|
||||
en: """Maximum allowed length of MQTT clientId."""
|
||||
zh: """允许的最大 MQTT Client ID 长度"""
|
||||
en: """Maximum allowed length of MQTT Client ID."""
|
||||
zh: """允许的最大 MQTT Client ID 长度。"""
|
||||
}
|
||||
label: {
|
||||
en: """Max Client ID Length"""
|
||||
zh: """最大 Client ID 长度"""
|
||||
}
|
||||
}
|
||||
|
||||
mqtt_max_topic_levels {
|
||||
desc {
|
||||
en: """Maximum topic levels allowed."""
|
||||
zh: """允许的 Topic 最大层级数"""
|
||||
zh: """允许的最大主题层级。"""
|
||||
}
|
||||
label: {
|
||||
en: """Max Topic Levels"""
|
||||
zh: """最大主题层级"""
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -635,40 +651,64 @@ mqtt 下所有的配置作为全局的默认值存在,它可以被 <code>zone<
|
|||
en: """Maximum QoS allowed."""
|
||||
zh: """允许的最大 QoS 等级。"""
|
||||
}
|
||||
label: {
|
||||
en: """Max QoS"""
|
||||
zh: """最大 QoS"""
|
||||
}
|
||||
}
|
||||
|
||||
mqtt_max_topic_alias {
|
||||
desc {
|
||||
en: """Maximum Topic Alias, 0 means no topic alias supported."""
|
||||
en: """Maximum topic alias, 0 means no topic alias supported."""
|
||||
zh: """允许的最大主题别名数,0 表示不支持主题别名。"""
|
||||
}
|
||||
label: {
|
||||
en: """Max Topic Alias"""
|
||||
zh: """最大主题别名"""
|
||||
}
|
||||
}
|
||||
|
||||
mqtt_retain_available {
|
||||
desc {
|
||||
en: """Support MQTT retained messages."""
|
||||
zh: """是否支持 retained 消息。"""
|
||||
en: """Whether to enable support for MQTT retained message."""
|
||||
zh: """是否启用对 MQTT 保留消息的支持。"""
|
||||
}
|
||||
label: {
|
||||
en: """Retain Available"""
|
||||
zh: """保留消息可用"""
|
||||
}
|
||||
}
|
||||
|
||||
mqtt_wildcard_subscription {
|
||||
desc {
|
||||
en: """Support MQTT Wildcard Subscriptions."""
|
||||
zh: """是否支持主题的通配符订阅。"""
|
||||
en: """Whether to enable support for MQTT wildcard subscription."""
|
||||
zh: """是否启用对 MQTT 通配符订阅的支持。"""
|
||||
}
|
||||
label: {
|
||||
en: """Wildcard Subscription Available"""
|
||||
zh: """通配符订阅可用"""
|
||||
}
|
||||
}
|
||||
|
||||
mqtt_shared_subscription {
|
||||
desc {
|
||||
en: """Support MQTT Shared Subscriptions."""
|
||||
zh: """是否支持 MQTT 共享订阅"""
|
||||
en: """Whether to enable support for MQTT shared subscription."""
|
||||
zh: """是否启用对 MQTT 共享订阅的支持。"""
|
||||
}
|
||||
label: {
|
||||
en: """Shared Subscription Available"""
|
||||
zh: """共享订阅可用"""
|
||||
}
|
||||
}
|
||||
|
||||
mqtt_ignore_loop_deliver {
|
||||
desc {
|
||||
en: """Ignore loop delivery of messages for MQTT v3.1.1/v3.1.0."""
|
||||
zh: """是否为 MQTT v3.1.1/v3.1.0 客户端忽略接收自己发布出消息"""
|
||||
en: """Ignore loop delivery of messages for MQTT v3.1.1/v3.1.0, similar to <code>No Local</code> subscription option in MQTT 5.0"""
|
||||
zh: """是否为 MQTT v3.1.1/v3.1.0 客户端忽略投递自己发布的消息,类似于 MQTT 5.0 中的 <code>No Local</code> 订阅选项"""
|
||||
}
|
||||
label: {
|
||||
en: """Ignore Loop Deliver"""
|
||||
zh: """忽略循环投递"""
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -679,35 +719,53 @@ When set to true, invalid utf8 strings in for example client ID, topic name, etc
|
|||
zh: """是否以严格模式解析 MQTT 消息。
|
||||
当设置为 true 时,例如客户端 ID、主题名称等中的无效 utf8 字符串将导致客户端断开连接。"""
|
||||
}
|
||||
label: {
|
||||
en: """Strict Mode"""
|
||||
zh: """严格模式"""
|
||||
}
|
||||
}
|
||||
|
||||
mqtt_response_information {
|
||||
desc {
|
||||
en: """Specify the response information returned to the client. This feature is disabled if is set to \"\"."""
|
||||
zh: """指定返回给客户端的响应信息。如果设置为 \"\",则禁用此功能。"""
|
||||
en: """Specify the response information returned to the client. This feature is disabled if is set to \"\". Applies only to clients using MQTT 5.0."""
|
||||
zh: """指定返回给客户端的响应信息。如果设置为 \"\",则禁用此功能。仅适用于使用 MQTT 5.0 协议的客户端。"""
|
||||
}
|
||||
label: {
|
||||
en: """Response Information"""
|
||||
zh: """响应信息"""
|
||||
}
|
||||
}
|
||||
|
||||
mqtt_server_keepalive {
|
||||
desc {
|
||||
en: """'Server Keep Alive' of MQTT 5.0.
|
||||
If the server returns a 'Server Keep Alive' in the CONNACK packet, the client MUST use that value instead of the value it sent as the 'Keep Alive'."""
|
||||
zh: """MQTT 5.0 的 'Server Keep Alive' 属性。
|
||||
如果服务器在 CONNACK 数据包中返回'Server Keep Alive',则客户端必须使用该值作为实际的 'Keep Alive' 值。"""
|
||||
en: """The keep alive that EMQX requires the client to use. If configured as <code>disabled</code>, it means that the keep alive specified by the client will be used. Requires <code>Server Keep Alive</code> in MQTT 5.0, so it is only applicable to clients using MQTT 5.0 protocol."""
|
||||
zh: """EMQX 要求客户端使用的保活时间,配置为 <code>disabled</code> 表示将使用客户端指定的保活时间。需要用到 MQTT 5.0 中的 <code>Server Keep Alive</code>,因此仅适用于使用 MQTT 5.0 协议的客户端。"""
|
||||
}
|
||||
label: {
|
||||
en: """Server Keep Alive"""
|
||||
zh: """服务端保持连接"""
|
||||
}
|
||||
}
|
||||
|
||||
mqtt_keepalive_backoff {
|
||||
desc {
|
||||
en: """The backoff for MQTT keepalive timeout. The broker will close the connection after idling for 'Keepalive * backoff * 2'."""
|
||||
zh: """Broker 判定客户端 Keep Alive 超时的退避乘数。EMQX 将在'Keepalive * backoff * 2' 空闲后关闭连接。"""
|
||||
en: """The backoff multiplier used by the broker to determine the client keep alive timeout. If EMQX doesn't receive any packet in <code>Keep Alive * Backoff * 2</code> seconds, EMQX will close the current connection."""
|
||||
zh: """Broker 判定客户端保活超时使用的退避乘数。如果 EMQX 在 <code>Keep Alive * Backoff * 2</code> 秒内未收到任何报文,EMQX 将关闭当前连接。"""
|
||||
}
|
||||
label: {
|
||||
en: """Keep Alive Backoff"""
|
||||
zh: """保持连接退避乘数"""
|
||||
}
|
||||
}
|
||||
|
||||
mqtt_max_subscriptions {
|
||||
desc {
|
||||
en: """Maximum number of subscriptions allowed."""
|
||||
zh: """允许的每个客户端最大订阅数"""
|
||||
en: """Maximum number of subscriptions allowed per client."""
|
||||
zh: """允许每个客户端建立的最大订阅数量。"""
|
||||
}
|
||||
label: {
|
||||
en: """Max Subscriptions"""
|
||||
zh: """最大订阅数量"""
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -716,40 +774,65 @@ If the server returns a 'Server Keep Alive' in the CONNACK packet, the client MU
|
|||
en: """Force upgrade of QoS level according to subscription."""
|
||||
zh: """投递消息时,是否根据订阅主题时的 QoS 等级来强制提升派发的消息的 QoS 等级。"""
|
||||
}
|
||||
label: {
|
||||
en: """Upgrade QoS"""
|
||||
zh: """升级 QoS"""
|
||||
}
|
||||
}
|
||||
|
||||
mqtt_max_inflight {
|
||||
desc {
|
||||
en: """Maximum size of the Inflight Window storing QoS1/2 messages delivered but un-acked."""
|
||||
zh: """飞行窗口的最大值。"""
|
||||
en: """Maximum number of QoS 1 and QoS 2 messages that are allowed to be delivered simultaneously before completing the acknowledgment."""
|
||||
zh: """允许在完成应答前同时投递的 QoS 1 和 QoS 2 消息的最大数量。"""
|
||||
}
|
||||
label: {
|
||||
en: """Max Inflight"""
|
||||
zh: """最大飞行窗口"""
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
mqtt_retry_interval {
|
||||
desc {
|
||||
en: """Retry interval for QoS1/2 message delivering."""
|
||||
zh: """QoS1/2 消息的重新投递间隔。"""
|
||||
en: """Retry interval for QoS 1/2 message delivering."""
|
||||
zh: """QoS 1/2 消息的重新投递间隔。"""
|
||||
}
|
||||
label: {
|
||||
en: """Retry Interval"""
|
||||
zh: """重试间隔"""
|
||||
}
|
||||
}
|
||||
|
||||
mqtt_max_awaiting_rel {
|
||||
desc {
|
||||
en: """Maximum QoS2 packets (Client -> Broker) awaiting PUBREL."""
|
||||
en: """Maximum QoS 2 packets (Client -> Broker) awaiting PUBREL."""
|
||||
zh: """PUBREL (Client -> Broker) 最大等待队列长度。"""
|
||||
}
|
||||
label: {
|
||||
en: """Max Awaiting PUBREL"""
|
||||
zh: """Max Awaiting PUBREL"""
|
||||
}
|
||||
}
|
||||
|
||||
mqtt_await_rel_timeout {
|
||||
desc {
|
||||
en: """The QoS2 messages (Client -> Broker) will be dropped if awaiting PUBREL timeout."""
|
||||
en: """The QoS 2 messages (Client -> Broker) will be dropped if awaiting PUBREL timeout."""
|
||||
zh: """PUBREL (Client -> Broker) 最大等待时间,超时则会被丢弃。"""
|
||||
}
|
||||
label: {
|
||||
en: """Max Awaiting PUBREL TIMEOUT"""
|
||||
zh: """Max Awaiting PUBREL TIMEOUT"""
|
||||
}
|
||||
}
|
||||
|
||||
mqtt_session_expiry_interval {
|
||||
desc {
|
||||
en: """Default session expiry interval for MQTT V3.1.1 connections."""
|
||||
zh: """Session 默认超时时间。"""
|
||||
en: """Specifies how long the session will expire after the connection is disconnected, only for non-MQTT 5.0 connections."""
|
||||
zh: """指定会话将在连接断开后多久过期,仅适用于非 MQTT 5.0 的连接。"""
|
||||
}
|
||||
label: {
|
||||
en: """Session Expiry Interval"""
|
||||
zh: """会话过期间隔"""
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -758,6 +841,10 @@ If the server returns a 'Server Keep Alive' in the CONNACK packet, the client MU
|
|||
en: """Maximum queue length. Enqueued messages when persistent client disconnected, or inflight window is full."""
|
||||
zh: """消息队列最大长度。持久客户端断开连接或飞行窗口已满时排队的消息长度。"""
|
||||
}
|
||||
label: {
|
||||
en: """Max Message Queue Length"""
|
||||
zh: """最大消息队列长度"""
|
||||
}
|
||||
}
|
||||
|
||||
mqtt_mqueue_priorities {
|
||||
|
|
@ -783,42 +870,96 @@ To configure <code>\"topic/1\" > \"topic/2\"</code>:
|
|||
<code>mqueue_priorities: {\"topic/1\": 10, \"topic/2\": 8}</code>
|
||||
"""
|
||||
}
|
||||
label: {
|
||||
en: """Topic Priorities"""
|
||||
zh: """主题优先级"""
|
||||
}
|
||||
}
|
||||
|
||||
mqtt_mqueue_default_priority {
|
||||
desc {
|
||||
en: """Default to the highest priority for topics not matching priority table."""
|
||||
zh: """主题默认的优先级,不在 <code>mqtt.mqueue_priorities</code> 中的主题将会使用该优先级。"""
|
||||
en: """Default topic priority, which will be used by topics not in <code>Topic Priorities</code> (<code>mqueue_priorities</code>)."""
|
||||
zh: """默认的主题优先级,不在 <code>主题优先级</code>(<code>mqueue_priorities</code>) 中的主题将会使用该优先级。"""
|
||||
}
|
||||
label: {
|
||||
en: """Default Topic Priorities"""
|
||||
zh: """默认主题优先级"""
|
||||
}
|
||||
}
|
||||
|
||||
mqtt_mqueue_store_qos0 {
|
||||
desc {
|
||||
en: """Support enqueue QoS0 messages."""
|
||||
zh: """消息队列是否存储 QoS0 消息。"""
|
||||
en: """Specifies whether to store QoS 0 messages in the message queue while the connection is down but the session remains."""
|
||||
zh: """指定在连接断开但会话保持期间,是否需要在消息队列中存储 QoS 0 消息。"""
|
||||
}
|
||||
label: {
|
||||
en: """Store QoS 0 Message"""
|
||||
zh: """存储 QoS 0 消息"""
|
||||
}
|
||||
}
|
||||
|
||||
mqtt_use_username_as_clientid {
|
||||
desc {
|
||||
en: """Replace client ID with the username."""
|
||||
zh: """是否使用 Username 替换 Client ID。"""
|
||||
en: """Whether to user Client ID as Username.
|
||||
This setting takes effect later than <code>Use Peer Certificate as Username</code> (<code>peer_cert_as_username</code>) and <code>Use peer certificate as Client ID</code> (<code>peer_cert_as_clientid</code>).
|
||||
"""
|
||||
zh: """是否使用用户名作为客户端 ID。
|
||||
此设置的作用时间晚于 <code>使用对端证书作为用户名</code>(<code>peer_cert_as_username</code>) 和 <code>使用对端证书作为客户端 ID</code>(<code>peer_cert_as_clientid</code>)。
|
||||
"""
|
||||
}
|
||||
label: {
|
||||
en: """Use Username as Client ID"""
|
||||
zh: """使用用户名作为客户端 ID"""
|
||||
}
|
||||
}
|
||||
|
||||
mqtt_peer_cert_as_username {
|
||||
desc {
|
||||
en: """Use the CN, DN or CRT field from the client certificate as a username.
|
||||
Only works for the TLS connection."""
|
||||
zh: """使用客户端证书中的 CN, DN 字段或整个证书来作为客户端用户名。"""
|
||||
en: """Use the CN, DN field in the peer certificate or the entire certificate content as Username. Only works for the TLS connection.
|
||||
Supported configurations are the following:
|
||||
- <code>cn</code>: Take the CN field of the certificate as Username
|
||||
- <code>dn</code>: Take the DN field of the certificate as Username
|
||||
- <code>crt</code>: Take the content of the <code>DER</code> or <code>PEM</code> certificate as Username
|
||||
- <code>pem</code>: Convert <code>DER</code> certificate content to <code>PEM</code> format as Username
|
||||
- <code>md5</code>: Take the MD5 value of the content of the <code>DER</code> or <code>PEM</code> certificate as Username
|
||||
"""
|
||||
zh: """使用对端证书中的 CN, DN 字段或整个证书内容来作为用户名。仅适用于 TLS 连接。
|
||||
目前支持配置为以下内容:
|
||||
- <code>cn</code>: 取证书的 CN 字段作为 Username
|
||||
- <code>dn</code>: 取证书的 DN 字段作为 Username
|
||||
- <code>crt</code>: 取 <code>DER</code> 或 <code>PEM</code> 证书的内容作为 Username
|
||||
- <code>pem</code>: 将 <code>DER</code> 证书内容转换为 <code>PEM</code> 格式后作为 Username
|
||||
- <code>md5</code>: 取 <code>DER</code> 或 <code>PEM</code> 证书的内容的 MD5 值作为 Username
|
||||
"""
|
||||
}
|
||||
label: {
|
||||
en: """Use Peer Certificate as Username"""
|
||||
zh: """使用对端证书作为用户名"""
|
||||
}
|
||||
}
|
||||
|
||||
mqtt_peer_cert_as_clientid {
|
||||
desc {
|
||||
en: """Use the CN, DN or CRT field from the client certificate as a clientid.
|
||||
Only works for the TLS connection."""
|
||||
zh: """使用客户端证书中的 CN, DN 字段或整个证书来作为客户端 ID。"""
|
||||
en: """Use the CN, DN field in the peer certificate or the entire certificate content as Client ID. Only works for the TLS connection.
|
||||
Supported configurations are the following:
|
||||
- <code>cn</code>: Take the CN field of the certificate as Client ID
|
||||
- <code>dn</code>: Take the DN field of the certificate as Client ID
|
||||
- <code>crt</code>: Take the content of the <code>DER</code> or <code>PEM</code> certificate as Client ID
|
||||
- <code>pem</code>: Convert <code>DER</code> certificate content to <code>PEM</code> format as Client ID
|
||||
- <code>md5</code>: Take the MD5 value of the content of the <code>DER</code> or <code>PEM</code> certificate as Client ID
|
||||
"""
|
||||
zh: """使用对端证书中的 CN, DN 字段或整个证书内容来作为客户端 ID。仅适用于 TLS 连接。
|
||||
目前支持配置为以下内容:
|
||||
- <code>cn</code>: 取证书的 CN 字段作为 Client ID
|
||||
- <code>dn</code>: 取证书的 DN 字段作为 Client ID
|
||||
- <code>crt</code>: 取 <code>DER</code> 或 <code>PEM</code> 证书的内容作为 Client ID
|
||||
- <code>pem</code>: 将 <code>DER</code> 证书内容转换为 <code>PEM</code> 格式后作为 Client ID
|
||||
- <code>md5</code>: 取 <code>DER</code> 或 <code>PEM</code> 证书的内容的 MD5 值作为 Client ID
|
||||
"""
|
||||
}
|
||||
label: {
|
||||
en: """Use Peer Certificate as Client ID"""
|
||||
zh: """使用对端证书作为客户端 ID"""
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -874,11 +1015,11 @@ Only works for the TLS connection."""
|
|||
|
||||
broker_shared_dispatch_ack_enabled {
|
||||
desc {
|
||||
en: """Enable/disable shared dispatch acknowledgement for QoS1 and QoS2 messages.
|
||||
en: """Enable/disable shared dispatch acknowledgement for QoS 1 and QoS 2 messages.
|
||||
This should allow messages to be dispatched to a different subscriber in the group in case the picked (based on `shared_subscription_strategy`) subscriber is offline.
|
||||
"""
|
||||
|
||||
zh: """启用/禁用 QoS1 和 QoS2 消息的共享派发确认。
|
||||
zh: """启用/禁用 QoS 1 和 QoS 2 消息的共享派发确认。
|
||||
开启后,允许将消息从未及时回复 ACK 的订阅者 (例如,客户端离线)重新派发给另外一个订阅者。
|
||||
"""
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@
|
|||
{esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.1"}}},
|
||||
{ekka, {git, "https://github.com/emqx/ekka", {tag, "0.12.4"}}},
|
||||
{gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}},
|
||||
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.27.3"}}},
|
||||
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.27.4"}}},
|
||||
{pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}},
|
||||
{recon, {git, "https://github.com/ferd/recon", {tag, "2.5.1"}}},
|
||||
{snabbkaffe, {git, "https://github.com/kafka4beam/snabbkaffe.git", {tag, "1.0.0"}}}
|
||||
|
|
|
|||
|
|
@ -57,6 +57,7 @@
|
|||
validate_heap_size/1,
|
||||
parse_user_lookup_fun/1,
|
||||
validate_alarm_actions/1,
|
||||
non_empty_string/1,
|
||||
validations/0
|
||||
]).
|
||||
|
||||
|
|
@ -1515,7 +1516,7 @@ base_listener() ->
|
|||
)},
|
||||
{"acceptors",
|
||||
sc(
|
||||
integer(),
|
||||
pos_integer(),
|
||||
#{
|
||||
default => 16,
|
||||
desc => ?DESC(base_listener_acceptors)
|
||||
|
|
@ -1523,7 +1524,7 @@ base_listener() ->
|
|||
)},
|
||||
{"max_connections",
|
||||
sc(
|
||||
hoconsc:union([infinity, integer()]),
|
||||
hoconsc:union([infinity, pos_integer()]),
|
||||
#{
|
||||
default => infinity,
|
||||
desc => ?DESC(base_listener_max_connections)
|
||||
|
|
@ -1823,13 +1824,7 @@ common_ssl_opts_schema(Defaults) ->
|
|||
%% @doc Make schema for SSL listener options.
|
||||
%% When it's for ranch listener, an extra field `handshake_timeout' is added.
|
||||
-spec server_ssl_opts_schema(map(), boolean()) -> hocon_schema:field_schema().
|
||||
server_ssl_opts_schema(Defaults1, IsRanchListener) ->
|
||||
Defaults0 = #{
|
||||
cacertfile => emqx:cert_file("cacert.pem"),
|
||||
certfile => emqx:cert_file("cert.pem"),
|
||||
keyfile => emqx:cert_file("key.pem")
|
||||
},
|
||||
Defaults = maps:merge(Defaults0, Defaults1),
|
||||
server_ssl_opts_schema(Defaults, IsRanchListener) ->
|
||||
D = fun(Field) -> maps:get(to_atom(Field), Defaults, undefined) end,
|
||||
Df = fun(Field, Default) -> maps:get(to_atom(Field), Defaults, Default) end,
|
||||
common_ssl_opts_schema(Defaults) ++
|
||||
|
|
@ -1882,15 +1877,7 @@ server_ssl_opts_schema(Defaults1, IsRanchListener) ->
|
|||
|
||||
%% @doc Make schema for SSL client.
|
||||
-spec client_ssl_opts_schema(map()) -> hocon_schema:field_schema().
|
||||
client_ssl_opts_schema(Defaults1) ->
|
||||
%% assert
|
||||
true = lists:all(fun(K) -> is_atom(K) end, maps:keys(Defaults1)),
|
||||
Defaults0 = #{
|
||||
cacertfile => emqx:cert_file("cacert.pem"),
|
||||
certfile => emqx:cert_file("client-cert.pem"),
|
||||
keyfile => emqx:cert_file("client-key.pem")
|
||||
},
|
||||
Defaults = maps:merge(Defaults0, Defaults1),
|
||||
client_ssl_opts_schema(Defaults) ->
|
||||
common_ssl_opts_schema(Defaults) ++
|
||||
[
|
||||
{"server_name_indication",
|
||||
|
|
@ -1898,6 +1885,7 @@ client_ssl_opts_schema(Defaults1) ->
|
|||
hoconsc:union([disable, string()]),
|
||||
#{
|
||||
required => false,
|
||||
validator => fun emqx_schema:non_empty_string/1,
|
||||
desc => ?DESC(client_ssl_opts_schema_server_name_indication)
|
||||
}
|
||||
)}
|
||||
|
|
@ -2177,3 +2165,8 @@ authentication(Type) ->
|
|||
-spec qos() -> typerefl:type().
|
||||
qos() ->
|
||||
typerefl:alias("qos", typerefl:union([0, 1, 2])).
|
||||
|
||||
non_empty_string(<<>>) -> {error, empty_string_not_allowed};
|
||||
non_empty_string("") -> {error, empty_string_not_allowed};
|
||||
non_empty_string(S) when is_binary(S); is_list(S) -> ok;
|
||||
non_empty_string(_) -> {error, invalid_string}.
|
||||
|
|
|
|||
|
|
@ -476,7 +476,7 @@ to_client_opts(Opts) ->
|
|||
CertFile = ensure_str(Get(certfile)),
|
||||
CAFile = ensure_str(Get(cacertfile)),
|
||||
Verify = GetD(verify, verify_none),
|
||||
SNI = ensure_str(Get(server_name_indication)),
|
||||
SNI = ensure_sni(Get(server_name_indication)),
|
||||
Versions = integral_versions(Get(versions)),
|
||||
Ciphers = integral_ciphers(Versions, Get(ciphers)),
|
||||
filter([
|
||||
|
|
@ -505,6 +505,11 @@ fuzzy_map_get(Key, Options, Default) ->
|
|||
Default
|
||||
end.
|
||||
|
||||
ensure_sni(disable) -> disable;
|
||||
ensure_sni(undefined) -> undefined;
|
||||
ensure_sni(L) when is_list(L) -> L;
|
||||
ensure_sni(B) when is_binary(B) -> unicode:characters_to_list(B, utf8).
|
||||
|
||||
ensure_str(undefined) -> undefined;
|
||||
ensure_str(L) when is_list(L) -> L;
|
||||
ensure_str(B) when is_binary(B) -> unicode:characters_to_list(B, utf8).
|
||||
|
|
|
|||
|
|
@ -272,78 +272,65 @@ check_origin_header(Req, #{listener := {Type, Listener}} = Opts) ->
|
|||
false -> ok
|
||||
end.
|
||||
|
||||
websocket_init([
|
||||
Req,
|
||||
#{zone := Zone, limiter := LimiterCfg, listener := {Type, Listener}} = Opts
|
||||
]) ->
|
||||
{Peername, Peercert} =
|
||||
case
|
||||
emqx_config:get_listener_conf(Type, Listener, [proxy_protocol]) andalso
|
||||
maps:get(proxy_header, Req)
|
||||
of
|
||||
#{src_address := SrcAddr, src_port := SrcPort, ssl := SSL} ->
|
||||
SourceName = {SrcAddr, SrcPort},
|
||||
%% Notice: Only CN is available in Proxy Protocol V2 additional info
|
||||
SourceSSL =
|
||||
case maps:get(cn, SSL, undefined) of
|
||||
undeined -> nossl;
|
||||
CN -> [{pp2_ssl_cn, CN}]
|
||||
end,
|
||||
{SourceName, SourceSSL};
|
||||
#{src_address := SrcAddr, src_port := SrcPort} ->
|
||||
SourceName = {SrcAddr, SrcPort},
|
||||
{SourceName, nossl};
|
||||
_ ->
|
||||
{get_peer(Req, Opts), cowboy_req:cert(Req)}
|
||||
end,
|
||||
Sockname = cowboy_req:sock(Req),
|
||||
WsCookie =
|
||||
try
|
||||
cowboy_req:parse_cookies(Req)
|
||||
catch
|
||||
error:badarg ->
|
||||
?SLOG(error, #{msg => "bad_cookie"}),
|
||||
undefined;
|
||||
Error:Reason ->
|
||||
?SLOG(error, #{
|
||||
msg => "failed_to_parse_cookie",
|
||||
exception => Error,
|
||||
reason => Reason
|
||||
}),
|
||||
undefined
|
||||
end,
|
||||
ConnInfo = #{
|
||||
socktype => ws,
|
||||
peername => Peername,
|
||||
sockname => Sockname,
|
||||
peercert => Peercert,
|
||||
ws_cookie => WsCookie,
|
||||
conn_mod => ?MODULE
|
||||
},
|
||||
Limiter = emqx_limiter_container:get_limiter_by_names(
|
||||
[?LIMITER_BYTES_IN, ?LIMITER_MESSAGE_IN], LimiterCfg
|
||||
),
|
||||
MQTTPiggyback = get_ws_opts(Type, Listener, mqtt_piggyback),
|
||||
FrameOpts = #{
|
||||
strict_mode => emqx_config:get_zone_conf(Zone, [mqtt, strict_mode]),
|
||||
max_size => emqx_config:get_zone_conf(Zone, [mqtt, max_packet_size])
|
||||
},
|
||||
ParseState = emqx_frame:initial_parse_state(FrameOpts),
|
||||
Serialize = emqx_frame:serialize_opts(),
|
||||
Channel = emqx_channel:init(ConnInfo, Opts),
|
||||
GcState =
|
||||
case emqx_config:get_zone_conf(Zone, [force_gc]) of
|
||||
#{enable := false} -> undefined;
|
||||
GcPolicy -> emqx_gc:init(GcPolicy)
|
||||
end,
|
||||
StatsTimer =
|
||||
case emqx_config:get_zone_conf(Zone, [stats, enable]) of
|
||||
true -> undefined;
|
||||
false -> disabled
|
||||
end,
|
||||
%% MQTT Idle Timeout
|
||||
IdleTimeout = emqx_channel:get_mqtt_conf(Zone, idle_timeout),
|
||||
IdleTimer = start_timer(IdleTimeout, idle_timeout),
|
||||
websocket_init([Req, Opts]) ->
|
||||
#{zone := Zone, limiter := LimiterCfg, listener := {Type, Listener}} = Opts,
|
||||
case check_max_connection(Type, Listener) of
|
||||
allow ->
|
||||
{Peername, PeerCert} = get_peer_info(Type, Listener, Req, Opts),
|
||||
Sockname = cowboy_req:sock(Req),
|
||||
WsCookie = get_ws_cookie(Req),
|
||||
ConnInfo = #{
|
||||
socktype => ws,
|
||||
peername => Peername,
|
||||
sockname => Sockname,
|
||||
peercert => PeerCert,
|
||||
ws_cookie => WsCookie,
|
||||
conn_mod => ?MODULE
|
||||
},
|
||||
Limiter = emqx_limiter_container:get_limiter_by_names(
|
||||
[?LIMITER_BYTES_IN, ?LIMITER_MESSAGE_IN], LimiterCfg
|
||||
),
|
||||
MQTTPiggyback = get_ws_opts(Type, Listener, mqtt_piggyback),
|
||||
FrameOpts = #{
|
||||
strict_mode => emqx_config:get_zone_conf(Zone, [mqtt, strict_mode]),
|
||||
max_size => emqx_config:get_zone_conf(Zone, [mqtt, max_packet_size])
|
||||
},
|
||||
ParseState = emqx_frame:initial_parse_state(FrameOpts),
|
||||
Serialize = emqx_frame:serialize_opts(),
|
||||
Channel = emqx_channel:init(ConnInfo, Opts),
|
||||
GcState = get_force_gc(Zone),
|
||||
StatsTimer = get_stats_enable(Zone),
|
||||
%% MQTT Idle Timeout
|
||||
IdleTimeout = emqx_channel:get_mqtt_conf(Zone, idle_timeout),
|
||||
IdleTimer = start_timer(IdleTimeout, idle_timeout),
|
||||
tune_heap_size(Channel),
|
||||
emqx_logger:set_metadata_peername(esockd:format(Peername)),
|
||||
{ok,
|
||||
#state{
|
||||
peername = Peername,
|
||||
sockname = Sockname,
|
||||
sockstate = running,
|
||||
mqtt_piggyback = MQTTPiggyback,
|
||||
limiter = Limiter,
|
||||
parse_state = ParseState,
|
||||
serialize = Serialize,
|
||||
channel = Channel,
|
||||
gc_state = GcState,
|
||||
postponed = [],
|
||||
stats_timer = StatsTimer,
|
||||
idle_timeout = IdleTimeout,
|
||||
idle_timer = IdleTimer,
|
||||
zone = Zone,
|
||||
listener = {Type, Listener},
|
||||
limiter_timer = undefined,
|
||||
limiter_cache = queue:new()
|
||||
},
|
||||
hibernate};
|
||||
{denny, Reason} ->
|
||||
{stop, Reason}
|
||||
end.
|
||||
|
||||
tune_heap_size(Channel) ->
|
||||
case
|
||||
emqx_config:get_zone_conf(
|
||||
emqx_channel:info(zone, Channel),
|
||||
|
|
@ -352,29 +339,56 @@ websocket_init([
|
|||
of
|
||||
#{enable := false} -> ok;
|
||||
ShutdownPolicy -> emqx_misc:tune_heap_size(ShutdownPolicy)
|
||||
end,
|
||||
emqx_logger:set_metadata_peername(esockd:format(Peername)),
|
||||
{ok,
|
||||
#state{
|
||||
peername = Peername,
|
||||
sockname = Sockname,
|
||||
sockstate = running,
|
||||
mqtt_piggyback = MQTTPiggyback,
|
||||
limiter = Limiter,
|
||||
parse_state = ParseState,
|
||||
serialize = Serialize,
|
||||
channel = Channel,
|
||||
gc_state = GcState,
|
||||
postponed = [],
|
||||
stats_timer = StatsTimer,
|
||||
idle_timeout = IdleTimeout,
|
||||
idle_timer = IdleTimer,
|
||||
zone = Zone,
|
||||
listener = {Type, Listener},
|
||||
limiter_timer = undefined,
|
||||
limiter_cache = queue:new()
|
||||
},
|
||||
hibernate}.
|
||||
end.
|
||||
|
||||
get_stats_enable(Zone) ->
|
||||
case emqx_config:get_zone_conf(Zone, [stats, enable]) of
|
||||
true -> undefined;
|
||||
false -> disabled
|
||||
end.
|
||||
|
||||
get_force_gc(Zone) ->
|
||||
case emqx_config:get_zone_conf(Zone, [force_gc]) of
|
||||
#{enable := false} -> undefined;
|
||||
GcPolicy -> emqx_gc:init(GcPolicy)
|
||||
end.
|
||||
|
||||
get_ws_cookie(Req) ->
|
||||
try
|
||||
cowboy_req:parse_cookies(Req)
|
||||
catch
|
||||
error:badarg ->
|
||||
?SLOG(error, #{msg => "bad_cookie"}),
|
||||
undefined;
|
||||
Error:Reason ->
|
||||
?SLOG(error, #{
|
||||
msg => "failed_to_parse_cookie",
|
||||
exception => Error,
|
||||
reason => Reason
|
||||
}),
|
||||
undefined
|
||||
end.
|
||||
|
||||
get_peer_info(Type, Listener, Req, Opts) ->
|
||||
case
|
||||
emqx_config:get_listener_conf(Type, Listener, [proxy_protocol]) andalso
|
||||
maps:get(proxy_header, Req)
|
||||
of
|
||||
#{src_address := SrcAddr, src_port := SrcPort, ssl := SSL} ->
|
||||
SourceName = {SrcAddr, SrcPort},
|
||||
%% Notice: Only CN is available in Proxy Protocol V2 additional info
|
||||
SourceSSL =
|
||||
case maps:get(cn, SSL, undefined) of
|
||||
undeined -> nossl;
|
||||
CN -> [{pp2_ssl_cn, CN}]
|
||||
end,
|
||||
{SourceName, SourceSSL};
|
||||
#{src_address := SrcAddr, src_port := SrcPort} ->
|
||||
SourceName = {SrcAddr, SrcPort},
|
||||
{SourceName, nossl};
|
||||
_ ->
|
||||
{get_peer(Req, Opts), cowboy_req:cert(Req)}
|
||||
end.
|
||||
|
||||
websocket_handle({binary, Data}, State) when is_list(Data) ->
|
||||
websocket_handle({binary, iolist_to_binary(Data)}, State);
|
||||
|
|
@ -1000,6 +1014,26 @@ get_peer(Req, #{listener := {Type, Listener}}) ->
|
|||
_:_ -> {Addr, PeerPort}
|
||||
end.
|
||||
|
||||
check_max_connection(Type, Listener) ->
|
||||
case emqx_config:get_listener_conf(Type, Listener, [max_connections]) of
|
||||
infinity ->
|
||||
allow;
|
||||
Max ->
|
||||
MatchSpec = [{{'_', emqx_ws_connection}, [], [true]}],
|
||||
Curr = ets:select_count(emqx_channel_conn, MatchSpec),
|
||||
case Curr >= Max of
|
||||
false ->
|
||||
allow;
|
||||
true ->
|
||||
Reason = #{
|
||||
max => Max,
|
||||
current => Curr,
|
||||
msg => "websocket_max_connections_limited"
|
||||
},
|
||||
?SLOG(warning, Reason),
|
||||
{denny, Reason}
|
||||
end
|
||||
end.
|
||||
%%--------------------------------------------------------------------
|
||||
%% For CT tests
|
||||
%%--------------------------------------------------------------------
|
||||
|
|
|
|||
|
|
@ -351,7 +351,7 @@ Filter supports the following placeholders:
|
|||
jwt {
|
||||
desc {
|
||||
en: """Authorization using ACL rules from authentication JWT."""
|
||||
zh: """Authorization using ACL rules from authentication JWT."""
|
||||
zh: """使用 JWT 登录认证中携带的 ACL 规则来进行发布和订阅的授权。"""
|
||||
}
|
||||
label {
|
||||
en: """jwt"""
|
||||
|
|
|
|||
|
|
@ -180,7 +180,6 @@ do_post_config_update({{?CMD_REPLACE, Type}, RawNewSource}, Sources) ->
|
|||
{OldSource, Front, Rear} = take(Type, OldSources),
|
||||
NewSource = get_source_by_type(type(RawNewSource), Sources),
|
||||
ok = ensure_resource_deleted(OldSource),
|
||||
clear_certs(OldSource),
|
||||
InitedSources = init_source(NewSource),
|
||||
Front ++ [InitedSources] ++ Rear;
|
||||
do_post_config_update({{?CMD_DELETE, Type}, _RawNewSource}, _Sources) ->
|
||||
|
|
|
|||
|
|
@ -1,8 +1,9 @@
|
|||
{erl_opts, [debug_info]}.
|
||||
{deps, [ {emqx, {path, "../emqx"}}
|
||||
]}.
|
||||
{deps, [{emqx, {path, "../emqx"}}]}.
|
||||
|
||||
{shell, [
|
||||
% {config, "config/sys.config"},
|
||||
% {config, "config/sys.config"},
|
||||
{apps, [emqx_bridge]}
|
||||
]}.
|
||||
|
||||
{project_plugins, [erlfmt]}.
|
||||
|
|
|
|||
|
|
@ -1,18 +1,18 @@
|
|||
%% -*- mode: erlang -*-
|
||||
{application, emqx_bridge,
|
||||
[{description, "An OTP application"},
|
||||
{vsn, "0.1.0"},
|
||||
{registered, []},
|
||||
{mod, {emqx_bridge_app, []}},
|
||||
{applications,
|
||||
[kernel,
|
||||
stdlib,
|
||||
emqx,
|
||||
emqx_connector
|
||||
]},
|
||||
{env,[]},
|
||||
{modules, []},
|
||||
{application, emqx_bridge, [
|
||||
{description, "An OTP application"},
|
||||
{vsn, "0.1.0"},
|
||||
{registered, []},
|
||||
{mod, {emqx_bridge_app, []}},
|
||||
{applications, [
|
||||
kernel,
|
||||
stdlib,
|
||||
emqx,
|
||||
emqx_connector
|
||||
]},
|
||||
{env, []},
|
||||
{modules, []},
|
||||
|
||||
{licenses, ["Apache 2.0"]},
|
||||
{links, []}
|
||||
]}.
|
||||
{licenses, ["Apache 2.0"]},
|
||||
{links, []}
|
||||
]}.
|
||||
|
|
|
|||
|
|
@ -18,48 +18,48 @@
|
|||
-include_lib("emqx/include/emqx.hrl").
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
|
||||
-export([ post_config_update/5
|
||||
]).
|
||||
-export([post_config_update/5]).
|
||||
|
||||
-export([ load_hook/0
|
||||
, unload_hook/0
|
||||
]).
|
||||
-export([
|
||||
load_hook/0,
|
||||
unload_hook/0
|
||||
]).
|
||||
|
||||
-export([on_message_publish/1]).
|
||||
|
||||
-export([ resource_type/1
|
||||
, bridge_type/1
|
||||
, resource_id/1
|
||||
, resource_id/2
|
||||
, bridge_id/2
|
||||
, parse_bridge_id/1
|
||||
]).
|
||||
-export([
|
||||
resource_type/1,
|
||||
bridge_type/1,
|
||||
resource_id/1,
|
||||
resource_id/2,
|
||||
bridge_id/2,
|
||||
parse_bridge_id/1
|
||||
]).
|
||||
|
||||
-export([ load/0
|
||||
, lookup/1
|
||||
, lookup/2
|
||||
, lookup/3
|
||||
, list/0
|
||||
, list_bridges_by_connector/1
|
||||
, create/2
|
||||
, create/3
|
||||
, recreate/2
|
||||
, recreate/3
|
||||
, create_dry_run/2
|
||||
, remove/1
|
||||
, remove/2
|
||||
, update/2
|
||||
, update/3
|
||||
, stop/2
|
||||
, restart/2
|
||||
, reset_metrics/1
|
||||
]).
|
||||
-export([
|
||||
load/0,
|
||||
lookup/1,
|
||||
lookup/2,
|
||||
lookup/3,
|
||||
list/0,
|
||||
list_bridges_by_connector/1,
|
||||
create/2,
|
||||
create/3,
|
||||
recreate/2,
|
||||
recreate/3,
|
||||
create_dry_run/2,
|
||||
remove/1,
|
||||
remove/2,
|
||||
update/2,
|
||||
update/3,
|
||||
stop/2,
|
||||
restart/2,
|
||||
reset_metrics/1
|
||||
]).
|
||||
|
||||
-export([ send_message/2
|
||||
]).
|
||||
-export([send_message/2]).
|
||||
|
||||
-export([ config_key_path/0
|
||||
]).
|
||||
-export([config_key_path/0]).
|
||||
|
||||
%% exported for `emqx_telemetry'
|
||||
-export([get_basic_usage_info/0]).
|
||||
|
|
@ -69,18 +69,25 @@ load_hook() ->
|
|||
load_hook(Bridges).
|
||||
|
||||
load_hook(Bridges) ->
|
||||
lists:foreach(fun({_Type, Bridge}) ->
|
||||
lists:foreach(fun({_Name, BridgeConf}) ->
|
||||
lists:foreach(
|
||||
fun({_Type, Bridge}) ->
|
||||
lists:foreach(
|
||||
fun({_Name, BridgeConf}) ->
|
||||
do_load_hook(BridgeConf)
|
||||
end, maps:to_list(Bridge))
|
||||
end, maps:to_list(Bridges)).
|
||||
end,
|
||||
maps:to_list(Bridge)
|
||||
)
|
||||
end,
|
||||
maps:to_list(Bridges)
|
||||
).
|
||||
|
||||
do_load_hook(#{local_topic := _} = Conf) ->
|
||||
case maps:get(direction, Conf, egress) of
|
||||
egress -> emqx_hooks:put('message.publish', {?MODULE, on_message_publish, []});
|
||||
ingress -> ok
|
||||
end;
|
||||
do_load_hook(_Conf) -> ok.
|
||||
do_load_hook(_Conf) ->
|
||||
ok.
|
||||
|
||||
unload_hook() ->
|
||||
ok = emqx_hooks:del('message.publish', {?MODULE, on_message_publish}).
|
||||
|
|
@ -90,23 +97,36 @@ on_message_publish(Message = #message{topic = Topic, flags = Flags}) ->
|
|||
false ->
|
||||
Msg = emqx_rule_events:eventmsg_publish(Message),
|
||||
send_to_matched_egress_bridges(Topic, Msg);
|
||||
true -> ok
|
||||
true ->
|
||||
ok
|
||||
end,
|
||||
{ok, Message}.
|
||||
|
||||
send_to_matched_egress_bridges(Topic, Msg) ->
|
||||
lists:foreach(fun (Id) ->
|
||||
try send_message(Id, Msg) of
|
||||
{error, Reason} ->
|
||||
?SLOG(error, #{msg => "send_message_to_bridge_failed",
|
||||
bridge => Id, error => Reason});
|
||||
_ -> ok
|
||||
catch Err:Reason:ST ->
|
||||
?SLOG(error, #{msg => "send_message_to_bridge_exception",
|
||||
bridge => Id, error => Err, reason => Reason,
|
||||
stacktrace => ST})
|
||||
end
|
||||
end, get_matched_bridges(Topic)).
|
||||
lists:foreach(
|
||||
fun(Id) ->
|
||||
try send_message(Id, Msg) of
|
||||
{error, Reason} ->
|
||||
?SLOG(error, #{
|
||||
msg => "send_message_to_bridge_failed",
|
||||
bridge => Id,
|
||||
error => Reason
|
||||
});
|
||||
_ ->
|
||||
ok
|
||||
catch
|
||||
Err:Reason:ST ->
|
||||
?SLOG(error, #{
|
||||
msg => "send_message_to_bridge_exception",
|
||||
bridge => Id,
|
||||
error => Err,
|
||||
reason => Reason,
|
||||
stacktrace => ST
|
||||
})
|
||||
end
|
||||
end,
|
||||
get_matched_bridges(Topic)
|
||||
).
|
||||
|
||||
send_message(BridgeId, Message) ->
|
||||
{BridgeType, BridgeName} = parse_bridge_id(BridgeId),
|
||||
|
|
@ -132,8 +152,8 @@ bridge_type(emqx_connector_mqtt) -> mqtt;
|
|||
bridge_type(emqx_connector_http) -> http.
|
||||
|
||||
post_config_update(_, _Req, NewConf, OldConf, _AppEnv) ->
|
||||
#{added := Added, removed := Removed, changed := Updated}
|
||||
= diff_confs(NewConf, OldConf),
|
||||
#{added := Added, removed := Removed, changed := Updated} =
|
||||
diff_confs(NewConf, OldConf),
|
||||
%% The config update will be failed if any task in `perform_bridge_changes` failed.
|
||||
Result = perform_bridge_changes([
|
||||
{fun remove/3, Removed},
|
||||
|
|
@ -150,15 +170,19 @@ perform_bridge_changes(Tasks) ->
|
|||
perform_bridge_changes([], Result) ->
|
||||
Result;
|
||||
perform_bridge_changes([{Action, MapConfs} | Tasks], Result0) ->
|
||||
Result = maps:fold(fun
|
||||
({_Type, _Name}, _Conf, {error, Reason}) ->
|
||||
{error, Reason};
|
||||
({Type, Name}, Conf, _) ->
|
||||
case Action(Type, Name, Conf) of
|
||||
{error, Reason} -> {error, Reason};
|
||||
Return -> Return
|
||||
end
|
||||
end, Result0, MapConfs),
|
||||
Result = maps:fold(
|
||||
fun
|
||||
({_Type, _Name}, _Conf, {error, Reason}) ->
|
||||
{error, Reason};
|
||||
({Type, Name}, Conf, _) ->
|
||||
case Action(Type, Name, Conf) of
|
||||
{error, Reason} -> {error, Reason};
|
||||
Return -> Return
|
||||
end
|
||||
end,
|
||||
Result0,
|
||||
MapConfs
|
||||
),
|
||||
perform_bridge_changes(Tasks, Result).
|
||||
|
||||
load() ->
|
||||
|
|
@ -184,18 +208,29 @@ parse_bridge_id(BridgeId) ->
|
|||
end.
|
||||
|
||||
list() ->
|
||||
lists:foldl(fun({Type, NameAndConf}, Bridges) ->
|
||||
lists:foldl(fun({Name, RawConf}, Acc) ->
|
||||
lists:foldl(
|
||||
fun({Type, NameAndConf}, Bridges) ->
|
||||
lists:foldl(
|
||||
fun({Name, RawConf}, Acc) ->
|
||||
case lookup(Type, Name, RawConf) of
|
||||
{error, not_found} -> Acc;
|
||||
{ok, Res} -> [Res | Acc]
|
||||
end
|
||||
end, Bridges, maps:to_list(NameAndConf))
|
||||
end, [], maps:to_list(emqx:get_raw_config([bridges], #{}))).
|
||||
end,
|
||||
Bridges,
|
||||
maps:to_list(NameAndConf)
|
||||
)
|
||||
end,
|
||||
[],
|
||||
maps:to_list(emqx:get_raw_config([bridges], #{}))
|
||||
).
|
||||
|
||||
list_bridges_by_connector(ConnectorId) ->
|
||||
[B || B = #{raw_config := #{<<"connector">> := Id}} <- list(),
|
||||
ConnectorId =:= Id].
|
||||
[
|
||||
B
|
||||
|| B = #{raw_config := #{<<"connector">> := Id}} <- list(),
|
||||
ConnectorId =:= Id
|
||||
].
|
||||
|
||||
lookup(Id) ->
|
||||
{Type, Name} = parse_bridge_id(Id),
|
||||
|
|
@ -206,10 +241,15 @@ lookup(Type, Name) ->
|
|||
lookup(Type, Name, RawConf).
|
||||
lookup(Type, Name, RawConf) ->
|
||||
case emqx_resource:get_instance(resource_id(Type, Name)) of
|
||||
{error, not_found} -> {error, not_found};
|
||||
{error, not_found} ->
|
||||
{error, not_found};
|
||||
{ok, _, Data} ->
|
||||
{ok, #{type => Type, name => Name, resource_data => Data,
|
||||
raw_config => RawConf}}
|
||||
{ok, #{
|
||||
type => Type,
|
||||
name => Name,
|
||||
resource_data => Data,
|
||||
raw_config => RawConf
|
||||
}}
|
||||
end.
|
||||
|
||||
reset_metrics(ResourceId) ->
|
||||
|
|
@ -227,13 +267,21 @@ create(BridgeId, Conf) ->
|
|||
create(BridgeType, BridgeName, Conf).
|
||||
|
||||
create(Type, Name, Conf) ->
|
||||
?SLOG(info, #{msg => "create bridge", type => Type, name => Name,
|
||||
config => Conf}),
|
||||
case emqx_resource:create_local(resource_id(Type, Name),
|
||||
<<"emqx_bridge">>,
|
||||
emqx_bridge:resource_type(Type),
|
||||
parse_confs(Type, Name, Conf),
|
||||
#{}) of
|
||||
?SLOG(info, #{
|
||||
msg => "create bridge",
|
||||
type => Type,
|
||||
name => Name,
|
||||
config => Conf
|
||||
}),
|
||||
case
|
||||
emqx_resource:create_local(
|
||||
resource_id(Type, Name),
|
||||
<<"emqx_bridge">>,
|
||||
emqx_bridge:resource_type(Type),
|
||||
parse_confs(Type, Name, Conf),
|
||||
#{}
|
||||
)
|
||||
of
|
||||
{ok, already_created} -> maybe_disable_bridge(Type, Name, Conf);
|
||||
{ok, _} -> maybe_disable_bridge(Type, Name, Conf);
|
||||
{error, Reason} -> {error, Reason}
|
||||
|
|
@ -254,15 +302,25 @@ update(Type, Name, {OldConf, Conf}) ->
|
|||
%%
|
||||
case if_only_to_toggle_enable(OldConf, Conf) of
|
||||
false ->
|
||||
?SLOG(info, #{msg => "update bridge", type => Type, name => Name,
|
||||
config => Conf}),
|
||||
?SLOG(info, #{
|
||||
msg => "update bridge",
|
||||
type => Type,
|
||||
name => Name,
|
||||
config => Conf
|
||||
}),
|
||||
case recreate(Type, Name, Conf) of
|
||||
{ok, _} -> maybe_disable_bridge(Type, Name, Conf);
|
||||
{ok, _} ->
|
||||
maybe_disable_bridge(Type, Name, Conf);
|
||||
{error, not_found} ->
|
||||
?SLOG(warning, #{ msg => "updating_a_non-exist_bridge_need_create_a_new_one"
|
||||
, type => Type, name => Name, config => Conf}),
|
||||
?SLOG(warning, #{
|
||||
msg => "updating_a_non-exist_bridge_need_create_a_new_one",
|
||||
type => Type,
|
||||
name => Name,
|
||||
config => Conf
|
||||
}),
|
||||
create(Type, Name, Conf);
|
||||
{error, Reason} -> {error, {update_bridge_failed, Reason}}
|
||||
{error, Reason} ->
|
||||
{error, {update_bridge_failed, Reason}}
|
||||
end;
|
||||
true ->
|
||||
%% we don't need to recreate the bridge if this config change is only to
|
||||
|
|
@ -277,22 +335,25 @@ recreate(Type, Name) ->
|
|||
recreate(Type, Name, emqx:get_config([bridges, Type, Name])).
|
||||
|
||||
recreate(Type, Name, Conf) ->
|
||||
emqx_resource:recreate_local(resource_id(Type, Name),
|
||||
emqx_resource:recreate_local(
|
||||
resource_id(Type, Name),
|
||||
emqx_bridge:resource_type(Type),
|
||||
parse_confs(Type, Name, Conf),
|
||||
#{}).
|
||||
#{}
|
||||
).
|
||||
|
||||
create_dry_run(Type, Conf) ->
|
||||
|
||||
Conf0 = Conf#{<<"egress">> =>
|
||||
#{ <<"remote_topic">> => <<"t">>
|
||||
, <<"remote_qos">> => 0
|
||||
, <<"retain">> => true
|
||||
, <<"payload">> => <<"val">>
|
||||
},
|
||||
<<"ingress">> =>
|
||||
#{ <<"remote_topic">> => <<"t">>
|
||||
}},
|
||||
Conf0 = Conf#{
|
||||
<<"egress">> =>
|
||||
#{
|
||||
<<"remote_topic">> => <<"t">>,
|
||||
<<"remote_qos">> => 0,
|
||||
<<"retain">> => true,
|
||||
<<"payload">> => <<"val">>
|
||||
},
|
||||
<<"ingress">> =>
|
||||
#{<<"remote_topic">> => <<"t">>}
|
||||
},
|
||||
case emqx_resource:check_config(emqx_bridge:resource_type(Type), Conf0) of
|
||||
{ok, Conf1} ->
|
||||
emqx_resource:create_dry_run_local(emqx_bridge:resource_type(Type), Conf1);
|
||||
|
|
@ -313,35 +374,48 @@ remove(Type, Name, _Conf) ->
|
|||
case emqx_resource:remove_local(resource_id(Type, Name)) of
|
||||
ok -> ok;
|
||||
{error, not_found} -> ok;
|
||||
{error, Reason} ->
|
||||
{error, Reason}
|
||||
{error, Reason} -> {error, Reason}
|
||||
end.
|
||||
|
||||
diff_confs(NewConfs, OldConfs) ->
|
||||
emqx_map_lib:diff_maps(flatten_confs(NewConfs),
|
||||
flatten_confs(OldConfs)).
|
||||
emqx_map_lib:diff_maps(
|
||||
flatten_confs(NewConfs),
|
||||
flatten_confs(OldConfs)
|
||||
).
|
||||
|
||||
flatten_confs(Conf0) ->
|
||||
maps:from_list(
|
||||
lists:flatmap(fun({Type, Conf}) ->
|
||||
lists:flatmap(
|
||||
fun({Type, Conf}) ->
|
||||
do_flatten_confs(Type, Conf)
|
||||
end, maps:to_list(Conf0))).
|
||||
end,
|
||||
maps:to_list(Conf0)
|
||||
)
|
||||
).
|
||||
|
||||
do_flatten_confs(Type, Conf0) ->
|
||||
[{{Type, Name}, Conf} || {Name, Conf} <- maps:to_list(Conf0)].
|
||||
|
||||
get_matched_bridges(Topic) ->
|
||||
Bridges = emqx:get_config([bridges], #{}),
|
||||
maps:fold(fun (BType, Conf, Acc0) ->
|
||||
maps:fold(fun
|
||||
%% Confs for MQTT, Kafka bridges have the `direction` flag
|
||||
(_BName, #{direction := ingress}, Acc1) ->
|
||||
Acc1;
|
||||
(BName, #{direction := egress} = Egress, Acc1) ->
|
||||
%% HTTP, MySQL bridges only have egress direction
|
||||
get_matched_bridge_id(Egress, Topic, BType, BName, Acc1)
|
||||
end, Acc0, Conf)
|
||||
end, [], Bridges).
|
||||
maps:fold(
|
||||
fun(BType, Conf, Acc0) ->
|
||||
maps:fold(
|
||||
fun
|
||||
%% Confs for MQTT, Kafka bridges have the `direction` flag
|
||||
(_BName, #{direction := ingress}, Acc1) ->
|
||||
Acc1;
|
||||
(BName, #{direction := egress} = Egress, Acc1) ->
|
||||
%% HTTP, MySQL bridges only have egress direction
|
||||
get_matched_bridge_id(Egress, Topic, BType, BName, Acc1)
|
||||
end,
|
||||
Acc0,
|
||||
Conf
|
||||
)
|
||||
end,
|
||||
[],
|
||||
Bridges
|
||||
).
|
||||
|
||||
get_matched_bridge_id(#{enable := false}, _Topic, _BType, _BName, Acc) ->
|
||||
Acc;
|
||||
|
|
@ -351,38 +425,56 @@ get_matched_bridge_id(#{local_topic := Filter}, Topic, BType, BName, Acc) ->
|
|||
false -> Acc
|
||||
end.
|
||||
|
||||
parse_confs(http, _Name,
|
||||
#{ url := Url
|
||||
, method := Method
|
||||
, body := Body
|
||||
, headers := Headers
|
||||
, request_timeout := ReqTimeout
|
||||
} = Conf) ->
|
||||
parse_confs(
|
||||
http,
|
||||
_Name,
|
||||
#{
|
||||
url := Url,
|
||||
method := Method,
|
||||
body := Body,
|
||||
headers := Headers,
|
||||
request_timeout := ReqTimeout
|
||||
} = Conf
|
||||
) ->
|
||||
{BaseUrl, Path} = parse_url(Url),
|
||||
{ok, BaseUrl2} = emqx_http_lib:uri_parse(BaseUrl),
|
||||
Conf#{ base_url => BaseUrl2
|
||||
, request =>
|
||||
#{ path => Path
|
||||
, method => Method
|
||||
, body => Body
|
||||
, headers => Headers
|
||||
, request_timeout => ReqTimeout
|
||||
}
|
||||
};
|
||||
parse_confs(Type, Name, #{connector := ConnId, direction := Direction} = Conf)
|
||||
when is_binary(ConnId) ->
|
||||
Conf#{
|
||||
base_url => BaseUrl2,
|
||||
request =>
|
||||
#{
|
||||
path => Path,
|
||||
method => Method,
|
||||
body => Body,
|
||||
headers => Headers,
|
||||
request_timeout => ReqTimeout
|
||||
}
|
||||
};
|
||||
parse_confs(Type, Name, #{connector := ConnId, direction := Direction} = Conf) when
|
||||
is_binary(ConnId)
|
||||
->
|
||||
case emqx_connector:parse_connector_id(ConnId) of
|
||||
{Type, ConnName} ->
|
||||
ConnectorConfs = emqx:get_config([connectors, Type, ConnName]),
|
||||
make_resource_confs(Direction, ConnectorConfs,
|
||||
maps:without([connector, direction], Conf), Type, Name);
|
||||
make_resource_confs(
|
||||
Direction,
|
||||
ConnectorConfs,
|
||||
maps:without([connector, direction], Conf),
|
||||
Type,
|
||||
Name
|
||||
);
|
||||
{_ConnType, _ConnName} ->
|
||||
error({cannot_use_connector_with_different_type, ConnId})
|
||||
end;
|
||||
parse_confs(Type, Name, #{connector := ConnectorConfs, direction := Direction} = Conf)
|
||||
when is_map(ConnectorConfs) ->
|
||||
make_resource_confs(Direction, ConnectorConfs,
|
||||
maps:without([connector, direction], Conf), Type, Name).
|
||||
parse_confs(Type, Name, #{connector := ConnectorConfs, direction := Direction} = Conf) when
|
||||
is_map(ConnectorConfs)
|
||||
->
|
||||
make_resource_confs(
|
||||
Direction,
|
||||
ConnectorConfs,
|
||||
maps:without([connector, direction], Conf),
|
||||
Type,
|
||||
Name
|
||||
).
|
||||
|
||||
make_resource_confs(ingress, ConnectorConfs, BridgeConf, Type, Name) ->
|
||||
BName = bridge_id(Type, Name),
|
||||
|
|
@ -417,39 +509,48 @@ if_only_to_toggle_enable(OldConf, Conf) ->
|
|||
#{added := Added, removed := Removed, changed := Updated} =
|
||||
emqx_map_lib:diff_maps(OldConf, Conf),
|
||||
case {Added, Removed, Updated} of
|
||||
{Added, Removed, #{enable := _}= Updated}
|
||||
when map_size(Added) =:= 0,
|
||||
map_size(Removed) =:= 0,
|
||||
map_size(Updated) =:= 1 -> true;
|
||||
{_, _, _} -> false
|
||||
{Added, Removed, #{enable := _} = Updated} when
|
||||
map_size(Added) =:= 0,
|
||||
map_size(Removed) =:= 0,
|
||||
map_size(Updated) =:= 1
|
||||
->
|
||||
true;
|
||||
{_, _, _} ->
|
||||
false
|
||||
end.
|
||||
|
||||
-spec get_basic_usage_info() ->
|
||||
#{ num_bridges => non_neg_integer()
|
||||
, count_by_type =>
|
||||
#{ BridgeType => non_neg_integer()
|
||||
}
|
||||
} when BridgeType :: atom().
|
||||
#{
|
||||
num_bridges => non_neg_integer(),
|
||||
count_by_type =>
|
||||
#{BridgeType => non_neg_integer()}
|
||||
}
|
||||
when
|
||||
BridgeType :: atom().
|
||||
get_basic_usage_info() ->
|
||||
InitialAcc = #{num_bridges => 0, count_by_type => #{}},
|
||||
try
|
||||
lists:foldl(
|
||||
fun(#{resource_data := #{config := #{enable := false}}}, Acc) ->
|
||||
Acc;
|
||||
(#{type := BridgeType}, Acc) ->
|
||||
NumBridges = maps:get(num_bridges, Acc),
|
||||
CountByType0 = maps:get(count_by_type, Acc),
|
||||
CountByType = maps:update_with(
|
||||
binary_to_atom(BridgeType, utf8),
|
||||
fun(X) -> X + 1 end,
|
||||
1,
|
||||
CountByType0),
|
||||
Acc#{ num_bridges => NumBridges + 1
|
||||
, count_by_type => CountByType
|
||||
}
|
||||
end,
|
||||
InitialAcc,
|
||||
list())
|
||||
fun
|
||||
(#{resource_data := #{config := #{enable := false}}}, Acc) ->
|
||||
Acc;
|
||||
(#{type := BridgeType}, Acc) ->
|
||||
NumBridges = maps:get(num_bridges, Acc),
|
||||
CountByType0 = maps:get(count_by_type, Acc),
|
||||
CountByType = maps:update_with(
|
||||
binary_to_atom(BridgeType, utf8),
|
||||
fun(X) -> X + 1 end,
|
||||
1,
|
||||
CountByType0
|
||||
),
|
||||
Acc#{
|
||||
num_bridges => NumBridges + 1,
|
||||
count_by_type => CountByType
|
||||
}
|
||||
end,
|
||||
InitialAcc,
|
||||
list()
|
||||
)
|
||||
catch
|
||||
%% for instance, when the bridge app is not ready yet.
|
||||
_:_ ->
|
||||
|
|
|
|||
|
|
@ -24,22 +24,23 @@
|
|||
-import(hoconsc, [mk/2, array/1, enum/1]).
|
||||
|
||||
%% Swagger specs from hocon schema
|
||||
-export([ api_spec/0
|
||||
, paths/0
|
||||
, schema/1
|
||||
, namespace/0
|
||||
]).
|
||||
-export([
|
||||
api_spec/0,
|
||||
paths/0,
|
||||
schema/1,
|
||||
namespace/0
|
||||
]).
|
||||
|
||||
%% API callbacks
|
||||
-export([ '/bridges'/2
|
||||
, '/bridges/:id'/2
|
||||
, '/bridges/:id/operation/:operation'/2
|
||||
, '/nodes/:node/bridges/:id/operation/:operation'/2
|
||||
, '/bridges/:id/reset_metrics'/2
|
||||
]).
|
||||
-export([
|
||||
'/bridges'/2,
|
||||
'/bridges/:id'/2,
|
||||
'/bridges/:id/operation/:operation'/2,
|
||||
'/nodes/:node/bridges/:id/operation/:operation'/2,
|
||||
'/bridges/:id/reset_metrics'/2
|
||||
]).
|
||||
|
||||
-export([ lookup_from_local_node/2
|
||||
]).
|
||||
-export([lookup_from_local_node/2]).
|
||||
|
||||
-define(TYPES, [mqtt, http]).
|
||||
|
||||
|
|
@ -51,35 +52,45 @@
|
|||
EXPR
|
||||
catch
|
||||
error:{invalid_bridge_id, Id0} ->
|
||||
{400, error_msg('INVALID_ID', <<"invalid_bridge_id: ", Id0/binary,
|
||||
". Bridge Ids must be of format {type}:{name}">>)}
|
||||
end).
|
||||
{400,
|
||||
error_msg(
|
||||
'INVALID_ID',
|
||||
<<"invalid_bridge_id: ", Id0/binary,
|
||||
". Bridge Ids must be of format {type}:{name}">>
|
||||
)}
|
||||
end
|
||||
).
|
||||
|
||||
-define(METRICS(MATCH, SUCC, FAILED, RATE, RATE_5, RATE_MAX),
|
||||
#{ matched => MATCH,
|
||||
success => SUCC,
|
||||
failed => FAILED,
|
||||
rate => RATE,
|
||||
rate_last5m => RATE_5,
|
||||
rate_max => RATE_MAX
|
||||
}).
|
||||
-define(metrics(MATCH, SUCC, FAILED, RATE, RATE_5, RATE_MAX),
|
||||
#{ matched := MATCH,
|
||||
success := SUCC,
|
||||
failed := FAILED,
|
||||
rate := RATE,
|
||||
rate_last5m := RATE_5,
|
||||
rate_max := RATE_MAX
|
||||
}).
|
||||
-define(METRICS(MATCH, SUCC, FAILED, RATE, RATE_5, RATE_MAX), #{
|
||||
matched => MATCH,
|
||||
success => SUCC,
|
||||
failed => FAILED,
|
||||
rate => RATE,
|
||||
rate_last5m => RATE_5,
|
||||
rate_max => RATE_MAX
|
||||
}).
|
||||
-define(metrics(MATCH, SUCC, FAILED, RATE, RATE_5, RATE_MAX), #{
|
||||
matched := MATCH,
|
||||
success := SUCC,
|
||||
failed := FAILED,
|
||||
rate := RATE,
|
||||
rate_last5m := RATE_5,
|
||||
rate_max := RATE_MAX
|
||||
}).
|
||||
|
||||
namespace() -> "bridge".
|
||||
|
||||
api_spec() ->
|
||||
emqx_dashboard_swagger:spec(?MODULE, #{check_schema => false}).
|
||||
|
||||
paths() -> ["/bridges", "/bridges/:id", "/bridges/:id/operation/:operation",
|
||||
"/nodes/:node/bridges/:id/operation/:operation",
|
||||
"/bridges/:id/reset_metrics"].
|
||||
paths() ->
|
||||
[
|
||||
"/bridges",
|
||||
"/bridges/:id",
|
||||
"/bridges/:id/operation/:operation",
|
||||
"/nodes/:node/bridges/:id/operation/:operation",
|
||||
"/bridges/:id/reset_metrics"
|
||||
].
|
||||
|
||||
error_schema(Code, Message) when is_atom(Code) ->
|
||||
error_schema([Code], Message);
|
||||
|
|
@ -89,40 +100,58 @@ error_schema(Codes, Message) when is_list(Codes) andalso is_binary(Message) ->
|
|||
emqx_dashboard_swagger:error_codes(Codes, Message).
|
||||
|
||||
get_response_body_schema() ->
|
||||
emqx_dashboard_swagger:schema_with_examples(emqx_bridge_schema:get_response(),
|
||||
bridge_info_examples(get)).
|
||||
emqx_dashboard_swagger:schema_with_examples(
|
||||
emqx_bridge_schema:get_response(),
|
||||
bridge_info_examples(get)
|
||||
).
|
||||
|
||||
param_path_operation_cluster() ->
|
||||
{operation, mk(enum([enable, disable, stop, restart]),
|
||||
#{ in => path
|
||||
, required => true
|
||||
, example => <<"start">>
|
||||
, desc => ?DESC("desc_param_path_operation_cluster")
|
||||
})}.
|
||||
{operation,
|
||||
mk(
|
||||
enum([enable, disable, stop, restart]),
|
||||
#{
|
||||
in => path,
|
||||
required => true,
|
||||
example => <<"start">>,
|
||||
desc => ?DESC("desc_param_path_operation_cluster")
|
||||
}
|
||||
)}.
|
||||
|
||||
param_path_operation_on_node() ->
|
||||
{operation, mk(enum([stop, restart]),
|
||||
#{ in => path
|
||||
, required => true
|
||||
, example => <<"start">>
|
||||
, desc => ?DESC("desc_param_path_operation_on_node")
|
||||
})}.
|
||||
{operation,
|
||||
mk(
|
||||
enum([stop, restart]),
|
||||
#{
|
||||
in => path,
|
||||
required => true,
|
||||
example => <<"start">>,
|
||||
desc => ?DESC("desc_param_path_operation_on_node")
|
||||
}
|
||||
)}.
|
||||
|
||||
param_path_node() ->
|
||||
{node, mk(binary(),
|
||||
#{ in => path
|
||||
, required => true
|
||||
, example => <<"emqx@127.0.0.1">>
|
||||
, desc => ?DESC("desc_param_path_node")
|
||||
})}.
|
||||
{node,
|
||||
mk(
|
||||
binary(),
|
||||
#{
|
||||
in => path,
|
||||
required => true,
|
||||
example => <<"emqx@127.0.0.1">>,
|
||||
desc => ?DESC("desc_param_path_node")
|
||||
}
|
||||
)}.
|
||||
|
||||
param_path_id() ->
|
||||
{id, mk(binary(),
|
||||
#{ in => path
|
||||
, required => true
|
||||
, example => <<"http:my_http_bridge">>
|
||||
, desc => ?DESC("desc_param_path_id")
|
||||
})}.
|
||||
{id,
|
||||
mk(
|
||||
binary(),
|
||||
#{
|
||||
in => path,
|
||||
required => true,
|
||||
example => <<"http:my_http_bridge">>,
|
||||
desc => ?DESC("desc_param_path_id")
|
||||
}
|
||||
)}.
|
||||
|
||||
bridge_info_array_example(Method) ->
|
||||
[Config || #{value := Config} <- maps:values(bridge_info_examples(Method))].
|
||||
|
|
@ -136,7 +165,8 @@ bridge_info_examples(Method) ->
|
|||
}).
|
||||
|
||||
conn_bridge_examples(Method) ->
|
||||
lists:foldl(fun(Type, Acc) ->
|
||||
lists:foldl(
|
||||
fun(Type, Acc) ->
|
||||
SType = atom_to_list(Type),
|
||||
KeyIngress = bin(SType ++ "_ingress"),
|
||||
KeyEgress = bin(SType ++ "_egress"),
|
||||
|
|
@ -150,19 +180,25 @@ conn_bridge_examples(Method) ->
|
|||
value => info_example(Type, egress, Method)
|
||||
}
|
||||
})
|
||||
end, #{}, ?CONN_TYPES).
|
||||
end,
|
||||
#{},
|
||||
?CONN_TYPES
|
||||
).
|
||||
|
||||
info_example(Type, Direction, Method) ->
|
||||
maps:merge(info_example_basic(Type, Direction),
|
||||
method_example(Type, Direction, Method)).
|
||||
maps:merge(
|
||||
info_example_basic(Type, Direction),
|
||||
method_example(Type, Direction, Method)
|
||||
).
|
||||
|
||||
method_example(Type, Direction, Method) when Method == get; Method == post ->
|
||||
SType = atom_to_list(Type),
|
||||
SDir = atom_to_list(Direction),
|
||||
SName = case Type of
|
||||
http -> "my_" ++ SType ++ "_bridge";
|
||||
_ -> "my_" ++ SDir ++ "_" ++ SType ++ "_bridge"
|
||||
end,
|
||||
SName =
|
||||
case Type of
|
||||
http -> "my_" ++ SType ++ "_bridge";
|
||||
_ -> "my_" ++ SDir ++ "_" ++ SType ++ "_bridge"
|
||||
end,
|
||||
TypeNameExamp = #{
|
||||
type => bin(SType),
|
||||
name => bin(SName)
|
||||
|
|
@ -175,8 +211,10 @@ maybe_with_metrics_example(TypeNameExamp, get) ->
|
|||
TypeNameExamp#{
|
||||
metrics => ?METRICS(0, 0, 0, 0, 0, 0),
|
||||
node_metrics => [
|
||||
#{node => node(),
|
||||
metrics => ?METRICS(0, 0, 0, 0, 0, 0)}
|
||||
#{
|
||||
node => node(),
|
||||
metrics => ?METRICS(0, 0, 0, 0, 0, 0)
|
||||
}
|
||||
]
|
||||
};
|
||||
maybe_with_metrics_example(TypeNameExamp, _) ->
|
||||
|
|
@ -231,8 +269,9 @@ schema("/bridges") ->
|
|||
description => ?DESC("desc_api1"),
|
||||
responses => #{
|
||||
200 => emqx_dashboard_swagger:schema_with_example(
|
||||
array(emqx_bridge_schema:get_response()),
|
||||
bridge_info_array_example(get))
|
||||
array(emqx_bridge_schema:get_response()),
|
||||
bridge_info_array_example(get)
|
||||
)
|
||||
}
|
||||
},
|
||||
post => #{
|
||||
|
|
@ -240,15 +279,15 @@ schema("/bridges") ->
|
|||
summary => <<"Create Bridge">>,
|
||||
description => ?DESC("desc_api2"),
|
||||
'requestBody' => emqx_dashboard_swagger:schema_with_examples(
|
||||
emqx_bridge_schema:post_request(),
|
||||
bridge_info_examples(post)),
|
||||
emqx_bridge_schema:post_request(),
|
||||
bridge_info_examples(post)
|
||||
),
|
||||
responses => #{
|
||||
201 => get_response_body_schema(),
|
||||
400 => error_schema('ALREADY_EXISTS', "Bridge already exists")
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
schema("/bridges/:id") ->
|
||||
#{
|
||||
'operationId' => '/bridges/:id',
|
||||
|
|
@ -268,8 +307,9 @@ schema("/bridges/:id") ->
|
|||
description => ?DESC("desc_api4"),
|
||||
parameters => [param_path_id()],
|
||||
'requestBody' => emqx_dashboard_swagger:schema_with_examples(
|
||||
emqx_bridge_schema:put_request(),
|
||||
bridge_info_examples(put)),
|
||||
emqx_bridge_schema:put_request(),
|
||||
bridge_info_examples(put)
|
||||
),
|
||||
responses => #{
|
||||
200 => get_response_body_schema(),
|
||||
404 => error_schema('NOT_FOUND', "Bridge not found"),
|
||||
|
|
@ -287,7 +327,6 @@ schema("/bridges/:id") ->
|
|||
}
|
||||
}
|
||||
};
|
||||
|
||||
schema("/bridges/:id/reset_metrics") ->
|
||||
#{
|
||||
'operationId' => '/bridges/:id/reset_metrics',
|
||||
|
|
@ -319,7 +358,6 @@ schema("/bridges/:id/operation/:operation") ->
|
|||
}
|
||||
}
|
||||
};
|
||||
|
||||
schema("/nodes/:node/bridges/:id/operation/:operation") ->
|
||||
#{
|
||||
'operationId' => '/nodes/:node/bridges/:id/operation/:operation',
|
||||
|
|
@ -336,7 +374,6 @@ schema("/nodes/:node/bridges/:id/operation/:operation") ->
|
|||
200 => <<"Operation success">>,
|
||||
400 => error_schema('INVALID_ID', "Bad bridge ID"),
|
||||
403 => error_schema('FORBIDDEN_REQUEST', "forbidden operation")
|
||||
|
||||
}
|
||||
}
|
||||
}.
|
||||
|
|
@ -353,15 +390,18 @@ schema("/nodes/:node/bridges/:id/operation/:operation") ->
|
|||
end
|
||||
end;
|
||||
'/bridges'(get, _Params) ->
|
||||
{200, zip_bridges([[format_resp(Data) || Data <- emqx_bridge_proto_v1:list_bridges(Node)]
|
||||
|| Node <- mria_mnesia:running_nodes()])}.
|
||||
{200,
|
||||
zip_bridges([
|
||||
[format_resp(Data) || Data <- emqx_bridge_proto_v1:list_bridges(Node)]
|
||||
|| Node <- mria_mnesia:running_nodes()
|
||||
])}.
|
||||
|
||||
'/bridges/:id'(get, #{bindings := #{id := Id}}) ->
|
||||
?TRY_PARSE_ID(Id, lookup_from_all_nodes(BridgeType, BridgeName, 200));
|
||||
|
||||
'/bridges/:id'(put, #{bindings := #{id := Id}, body := Conf0}) ->
|
||||
Conf = filter_out_request_body(Conf0),
|
||||
?TRY_PARSE_ID(Id,
|
||||
?TRY_PARSE_ID(
|
||||
Id,
|
||||
case emqx_bridge:lookup(BridgeType, BridgeName) of
|
||||
{ok, _} ->
|
||||
case ensure_bridge_created(BridgeType, BridgeName, Conf) of
|
||||
|
|
@ -371,24 +411,31 @@ schema("/nodes/:node/bridges/:id/operation/:operation") ->
|
|||
{400, Error}
|
||||
end;
|
||||
{error, not_found} ->
|
||||
{404, error_msg('NOT_FOUND',<<"bridge not found">>)}
|
||||
end);
|
||||
|
||||
{404, error_msg('NOT_FOUND', <<"bridge not found">>)}
|
||||
end
|
||||
);
|
||||
'/bridges/:id'(delete, #{bindings := #{id := Id}}) ->
|
||||
?TRY_PARSE_ID(Id,
|
||||
case emqx_conf:remove(emqx_bridge:config_key_path() ++ [BridgeType, BridgeName],
|
||||
#{override_to => cluster}) of
|
||||
?TRY_PARSE_ID(
|
||||
Id,
|
||||
case
|
||||
emqx_conf:remove(
|
||||
emqx_bridge:config_key_path() ++ [BridgeType, BridgeName],
|
||||
#{override_to => cluster}
|
||||
)
|
||||
of
|
||||
{ok, _} -> {204};
|
||||
{error, Reason} ->
|
||||
{500, error_msg('INTERNAL_ERROR', Reason)}
|
||||
end).
|
||||
{error, Reason} -> {500, error_msg('INTERNAL_ERROR', Reason)}
|
||||
end
|
||||
).
|
||||
|
||||
'/bridges/:id/reset_metrics'(put, #{bindings := #{id := Id}}) ->
|
||||
?TRY_PARSE_ID(Id,
|
||||
?TRY_PARSE_ID(
|
||||
Id,
|
||||
case emqx_bridge:reset_metrics(emqx_bridge:resource_id(BridgeType, BridgeName)) of
|
||||
ok -> {200, <<"Reset success">>};
|
||||
Reason -> {400, error_msg('BAD_REQUEST', Reason)}
|
||||
end).
|
||||
end
|
||||
).
|
||||
|
||||
lookup_from_all_nodes(BridgeType, BridgeName, SuccCode) ->
|
||||
Nodes = mria_mnesia:running_nodes(),
|
||||
|
|
@ -407,40 +454,58 @@ lookup_from_local_node(BridgeType, BridgeName) ->
|
|||
Error -> Error
|
||||
end.
|
||||
|
||||
'/bridges/:id/operation/:operation'(post, #{bindings :=
|
||||
#{id := Id, operation := Op}}) ->
|
||||
?TRY_PARSE_ID(Id, case operation_func(Op) of
|
||||
invalid -> {400, error_msg('BAD_REQUEST', <<"invalid operation">>)};
|
||||
OperFunc when OperFunc == enable; OperFunc == disable ->
|
||||
case emqx_conf:update(emqx_bridge:config_key_path() ++ [BridgeType, BridgeName],
|
||||
{OperFunc, BridgeType, BridgeName}, #{override_to => cluster}) of
|
||||
{ok, _} -> {200};
|
||||
{error, {pre_config_update, _, bridge_not_found}} ->
|
||||
{404, error_msg('NOT_FOUND', <<"bridge not found">>)};
|
||||
{error, Reason} ->
|
||||
{500, error_msg('INTERNAL_ERROR', Reason)}
|
||||
end;
|
||||
OperFunc ->
|
||||
Nodes = mria_mnesia:running_nodes(),
|
||||
operation_to_all_nodes(Nodes, OperFunc, BridgeType, BridgeName)
|
||||
end).
|
||||
'/bridges/:id/operation/:operation'(post, #{
|
||||
bindings :=
|
||||
#{id := Id, operation := Op}
|
||||
}) ->
|
||||
?TRY_PARSE_ID(
|
||||
Id,
|
||||
case operation_func(Op) of
|
||||
invalid ->
|
||||
{400, error_msg('BAD_REQUEST', <<"invalid operation">>)};
|
||||
OperFunc when OperFunc == enable; OperFunc == disable ->
|
||||
case
|
||||
emqx_conf:update(
|
||||
emqx_bridge:config_key_path() ++ [BridgeType, BridgeName],
|
||||
{OperFunc, BridgeType, BridgeName},
|
||||
#{override_to => cluster}
|
||||
)
|
||||
of
|
||||
{ok, _} ->
|
||||
{200};
|
||||
{error, {pre_config_update, _, bridge_not_found}} ->
|
||||
{404, error_msg('NOT_FOUND', <<"bridge not found">>)};
|
||||
{error, Reason} ->
|
||||
{500, error_msg('INTERNAL_ERROR', Reason)}
|
||||
end;
|
||||
OperFunc ->
|
||||
Nodes = mria_mnesia:running_nodes(),
|
||||
operation_to_all_nodes(Nodes, OperFunc, BridgeType, BridgeName)
|
||||
end
|
||||
).
|
||||
|
||||
'/nodes/:node/bridges/:id/operation/:operation'(post, #{bindings :=
|
||||
#{id := Id, operation := Op}}) ->
|
||||
?TRY_PARSE_ID(Id, case operation_func(Op) of
|
||||
invalid -> {400, error_msg('BAD_REQUEST', <<"invalid operation">>)};
|
||||
OperFunc when OperFunc == restart; OperFunc == stop ->
|
||||
ConfMap = emqx:get_config([bridges, BridgeType, BridgeName]),
|
||||
case maps:get(enable, ConfMap, false) of
|
||||
false -> {403, error_msg('FORBIDDEN_REQUEST', <<"forbidden operation">>)};
|
||||
true ->
|
||||
case emqx_bridge:OperFunc(BridgeType, BridgeName) of
|
||||
ok -> {200};
|
||||
{error, Reason} ->
|
||||
{500, error_msg('INTERNAL_ERROR', Reason)}
|
||||
end
|
||||
end
|
||||
end).
|
||||
'/nodes/:node/bridges/:id/operation/:operation'(post, #{
|
||||
bindings :=
|
||||
#{id := Id, operation := Op}
|
||||
}) ->
|
||||
?TRY_PARSE_ID(
|
||||
Id,
|
||||
case operation_func(Op) of
|
||||
invalid ->
|
||||
{400, error_msg('BAD_REQUEST', <<"invalid operation">>)};
|
||||
OperFunc when OperFunc == restart; OperFunc == stop ->
|
||||
ConfMap = emqx:get_config([bridges, BridgeType, BridgeName]),
|
||||
case maps:get(enable, ConfMap, false) of
|
||||
false ->
|
||||
{403, error_msg('FORBIDDEN_REQUEST', <<"forbidden operation">>)};
|
||||
true ->
|
||||
case emqx_bridge:OperFunc(BridgeType, BridgeName) of
|
||||
ok -> {200};
|
||||
{error, Reason} -> {500, error_msg('INTERNAL_ERROR', Reason)}
|
||||
end
|
||||
end
|
||||
end
|
||||
).
|
||||
|
||||
operation_func(<<"stop">>) -> stop;
|
||||
operation_func(<<"restart">>) -> restart;
|
||||
|
|
@ -449,10 +514,11 @@ operation_func(<<"disable">>) -> disable;
|
|||
operation_func(_) -> invalid.
|
||||
|
||||
operation_to_all_nodes(Nodes, OperFunc, BridgeType, BridgeName) ->
|
||||
RpcFunc = case OperFunc of
|
||||
restart -> restart_bridges_to_all_nodes;
|
||||
stop -> stop_bridges_to_all_nodes
|
||||
end,
|
||||
RpcFunc =
|
||||
case OperFunc of
|
||||
restart -> restart_bridges_to_all_nodes;
|
||||
stop -> stop_bridges_to_all_nodes
|
||||
end,
|
||||
case is_ok(emqx_bridge_proto_v1:RpcFunc(Nodes, BridgeType, BridgeName)) of
|
||||
{ok, _} ->
|
||||
{200};
|
||||
|
|
@ -461,48 +527,70 @@ operation_to_all_nodes(Nodes, OperFunc, BridgeType, BridgeName) ->
|
|||
end.
|
||||
|
||||
ensure_bridge_created(BridgeType, BridgeName, Conf) ->
|
||||
case emqx_conf:update(emqx_bridge:config_key_path() ++ [BridgeType, BridgeName],
|
||||
Conf, #{override_to => cluster}) of
|
||||
case
|
||||
emqx_conf:update(
|
||||
emqx_bridge:config_key_path() ++ [BridgeType, BridgeName],
|
||||
Conf,
|
||||
#{override_to => cluster}
|
||||
)
|
||||
of
|
||||
{ok, _} -> ok;
|
||||
{error, Reason} ->
|
||||
{error, error_msg('BAD_REQUEST', Reason)}
|
||||
{error, Reason} -> {error, error_msg('BAD_REQUEST', Reason)}
|
||||
end.
|
||||
|
||||
zip_bridges([BridgesFirstNode | _] = BridgesAllNodes) ->
|
||||
lists:foldl(fun(#{type := Type, name := Name}, Acc) ->
|
||||
lists:foldl(
|
||||
fun(#{type := Type, name := Name}, Acc) ->
|
||||
Bridges = pick_bridges_by_id(Type, Name, BridgesAllNodes),
|
||||
[format_bridge_info(Bridges) | Acc]
|
||||
end, [], BridgesFirstNode).
|
||||
end,
|
||||
[],
|
||||
BridgesFirstNode
|
||||
).
|
||||
|
||||
pick_bridges_by_id(Type, Name, BridgesAllNodes) ->
|
||||
lists:foldl(fun(BridgesOneNode, Acc) ->
|
||||
case [Bridge || Bridge = #{type := Type0, name := Name0} <- BridgesOneNode,
|
||||
Type0 == Type, Name0 == Name] of
|
||||
[BridgeInfo] -> [BridgeInfo | Acc];
|
||||
lists:foldl(
|
||||
fun(BridgesOneNode, Acc) ->
|
||||
case
|
||||
[
|
||||
Bridge
|
||||
|| Bridge = #{type := Type0, name := Name0} <- BridgesOneNode,
|
||||
Type0 == Type,
|
||||
Name0 == Name
|
||||
]
|
||||
of
|
||||
[BridgeInfo] ->
|
||||
[BridgeInfo | Acc];
|
||||
[] ->
|
||||
?SLOG(warning, #{msg => "bridge_inconsistent_in_cluster",
|
||||
bridge => emqx_bridge:bridge_id(Type, Name)}),
|
||||
?SLOG(warning, #{
|
||||
msg => "bridge_inconsistent_in_cluster",
|
||||
bridge => emqx_bridge:bridge_id(Type, Name)
|
||||
}),
|
||||
Acc
|
||||
end
|
||||
end, [], BridgesAllNodes).
|
||||
end,
|
||||
[],
|
||||
BridgesAllNodes
|
||||
).
|
||||
|
||||
format_bridge_info([FirstBridge | _] = Bridges) ->
|
||||
Res = maps:remove(node, FirstBridge),
|
||||
NodeStatus = collect_status(Bridges),
|
||||
NodeMetrics = collect_metrics(Bridges),
|
||||
Res#{ status => aggregate_status(NodeStatus)
|
||||
, node_status => NodeStatus
|
||||
, metrics => aggregate_metrics(NodeMetrics)
|
||||
, node_metrics => NodeMetrics
|
||||
}.
|
||||
Res#{
|
||||
status => aggregate_status(NodeStatus),
|
||||
node_status => NodeStatus,
|
||||
metrics => aggregate_metrics(NodeMetrics),
|
||||
node_metrics => NodeMetrics
|
||||
}.
|
||||
|
||||
collect_status(Bridges) ->
|
||||
[maps:with([node, status], B) || B <- Bridges].
|
||||
|
||||
aggregate_status(AllStatus) ->
|
||||
Head = fun ([A | _]) -> A end,
|
||||
Head = fun([A | _]) -> A end,
|
||||
HeadVal = maps:get(status, Head(AllStatus), connecting),
|
||||
AllRes = lists:all(fun (#{status := Val}) -> Val == HeadVal end, AllStatus),
|
||||
AllRes = lists:all(fun(#{status := Val}) -> Val == HeadVal end, AllStatus),
|
||||
case AllRes of
|
||||
true -> HeadVal;
|
||||
false -> inconsistent
|
||||
|
|
@ -512,15 +600,31 @@ collect_metrics(Bridges) ->
|
|||
[maps:with([node, metrics], B) || B <- Bridges].
|
||||
|
||||
aggregate_metrics(AllMetrics) ->
|
||||
InitMetrics = ?METRICS(0,0,0,0,0,0),
|
||||
lists:foldl(fun(#{metrics := ?metrics(Match1, Succ1, Failed1, Rate1, Rate5m1, RateMax1)},
|
||||
?metrics(Match0, Succ0, Failed0, Rate0, Rate5m0, RateMax0)) ->
|
||||
?METRICS(Match1 + Match0, Succ1 + Succ0, Failed1 + Failed0,
|
||||
Rate1 + Rate0, Rate5m1 + Rate5m0, RateMax1 + RateMax0)
|
||||
end, InitMetrics, AllMetrics).
|
||||
InitMetrics = ?METRICS(0, 0, 0, 0, 0, 0),
|
||||
lists:foldl(
|
||||
fun(
|
||||
#{metrics := ?metrics(Match1, Succ1, Failed1, Rate1, Rate5m1, RateMax1)},
|
||||
?metrics(Match0, Succ0, Failed0, Rate0, Rate5m0, RateMax0)
|
||||
) ->
|
||||
?METRICS(
|
||||
Match1 + Match0,
|
||||
Succ1 + Succ0,
|
||||
Failed1 + Failed0,
|
||||
Rate1 + Rate0,
|
||||
Rate5m1 + Rate5m0,
|
||||
RateMax1 + RateMax0
|
||||
)
|
||||
end,
|
||||
InitMetrics,
|
||||
AllMetrics
|
||||
).
|
||||
|
||||
format_resp(#{type := Type, name := BridgeName, raw_config := RawConf,
|
||||
resource_data := #{status := Status, metrics := Metrics}}) ->
|
||||
format_resp(#{
|
||||
type := Type,
|
||||
name := BridgeName,
|
||||
raw_config := RawConf,
|
||||
resource_data := #{status := Status, metrics := Metrics}
|
||||
}) ->
|
||||
RawConfFull = fill_defaults(Type, RawConf),
|
||||
RawConfFull#{
|
||||
type => Type,
|
||||
|
|
@ -531,10 +635,11 @@ format_resp(#{type := Type, name := BridgeName, raw_config := RawConf,
|
|||
}.
|
||||
|
||||
format_metrics(#{
|
||||
counters := #{failed := Failed, exception := Ex, matched := Match, success := Succ},
|
||||
rate := #{
|
||||
matched := #{current := Rate, last5m := Rate5m, max := RateMax}
|
||||
} }) ->
|
||||
counters := #{failed := Failed, exception := Ex, matched := Match, success := Succ},
|
||||
rate := #{
|
||||
matched := #{current := Rate, last5m := Rate5m, max := RateMax}
|
||||
}
|
||||
}) ->
|
||||
?METRICS(Match, Succ, Failed + Ex, Rate, Rate5m, RateMax).
|
||||
|
||||
fill_defaults(Type, RawConf) ->
|
||||
|
|
@ -551,14 +656,31 @@ unpack_bridge_conf(Type, PackedConf) ->
|
|||
RawConf.
|
||||
|
||||
is_ok(ResL) ->
|
||||
case lists:filter(fun({ok, _}) -> false; (ok) -> false; (_) -> true end, ResL) of
|
||||
case
|
||||
lists:filter(
|
||||
fun
|
||||
({ok, _}) -> false;
|
||||
(ok) -> false;
|
||||
(_) -> true
|
||||
end,
|
||||
ResL
|
||||
)
|
||||
of
|
||||
[] -> {ok, [Res || {ok, Res} <- ResL]};
|
||||
ErrL -> {error, ErrL}
|
||||
end.
|
||||
|
||||
filter_out_request_body(Conf) ->
|
||||
ExtraConfs = [<<"id">>, <<"type">>, <<"name">>, <<"status">>, <<"node_status">>,
|
||||
<<"node_metrics">>, <<"metrics">>, <<"node">>],
|
||||
ExtraConfs = [
|
||||
<<"id">>,
|
||||
<<"type">>,
|
||||
<<"name">>,
|
||||
<<"status">>,
|
||||
<<"node_status">>,
|
||||
<<"node_metrics">>,
|
||||
<<"metrics">>,
|
||||
<<"node">>
|
||||
],
|
||||
maps:without(ExtraConfs, Conf).
|
||||
|
||||
error_msg(Code, Msg) when is_binary(Msg) ->
|
||||
|
|
|
|||
|
|
@ -19,8 +19,10 @@
|
|||
|
||||
-export([start/2, stop/1]).
|
||||
|
||||
-export([ pre_config_update/3
|
||||
]).
|
||||
-export([
|
||||
pre_config_update/3,
|
||||
post_config_update/5
|
||||
]).
|
||||
|
||||
-define(TOP_LELVE_HDLR_PATH, (emqx_bridge:config_key_path())).
|
||||
-define(LEAF_NODE_HDLR_PATH, (emqx_bridge:config_key_path() ++ ['?', '?'])).
|
||||
|
|
@ -46,8 +48,18 @@ pre_config_update(_, {_Oper, _, _}, undefined) ->
|
|||
pre_config_update(_, {Oper, _Type, _Name}, OldConfig) ->
|
||||
%% to save the 'enable' to the config files
|
||||
{ok, OldConfig#{<<"enable">> => operation_to_enable(Oper)}};
|
||||
pre_config_update(_, Conf, _OldConfig) when is_map(Conf) ->
|
||||
{ok, Conf}.
|
||||
pre_config_update(Path, Conf, _OldConfig) when is_map(Conf) ->
|
||||
case emqx_connector_ssl:convert_certs(filename:join(Path), Conf) of
|
||||
{error, Reason} ->
|
||||
{error, Reason};
|
||||
{ok, ConfNew} ->
|
||||
{ok, ConfNew}
|
||||
end.
|
||||
|
||||
post_config_update(Path, '$remove', _, OldConf, _AppEnvs) ->
|
||||
_ = emqx_connector_ssl:clear_certs(filename:join(Path), OldConf);
|
||||
post_config_update(_Path, _Req, _, _OldConf, _AppEnvs) ->
|
||||
ok.
|
||||
|
||||
%% internal functions
|
||||
operation_to_enable(disable) -> false;
|
||||
|
|
|
|||
|
|
@ -15,45 +15,66 @@ roots() -> [].
|
|||
|
||||
fields("config") ->
|
||||
basic_config() ++
|
||||
[ {url, mk(binary(),
|
||||
#{ required => true
|
||||
, desc => ?DESC("config_url")
|
||||
})}
|
||||
, {local_topic, mk(binary(),
|
||||
#{ desc => ?DESC("config_local_topic")
|
||||
})}
|
||||
, {method, mk(method(),
|
||||
#{ default => post
|
||||
, desc => ?DESC("config_method")
|
||||
})}
|
||||
, {headers, mk(map(),
|
||||
#{ default => #{
|
||||
<<"accept">> => <<"application/json">>,
|
||||
<<"cache-control">> => <<"no-cache">>,
|
||||
<<"connection">> => <<"keep-alive">>,
|
||||
<<"content-type">> => <<"application/json">>,
|
||||
<<"keep-alive">> => <<"timeout=5">>}
|
||||
, desc => ?DESC("config_headers")
|
||||
})
|
||||
}
|
||||
, {body, mk(binary(),
|
||||
#{ default => <<"${payload}">>
|
||||
, desc => ?DESC("config_body")
|
||||
})}
|
||||
, {request_timeout, mk(emqx_schema:duration_ms(),
|
||||
#{ default => <<"15s">>
|
||||
, desc => ?DESC("config_request_timeout")
|
||||
})}
|
||||
];
|
||||
|
||||
[
|
||||
{url,
|
||||
mk(
|
||||
binary(),
|
||||
#{
|
||||
required => true,
|
||||
desc => ?DESC("config_url")
|
||||
}
|
||||
)},
|
||||
{local_topic,
|
||||
mk(
|
||||
binary(),
|
||||
#{desc => ?DESC("config_local_topic")}
|
||||
)},
|
||||
{method,
|
||||
mk(
|
||||
method(),
|
||||
#{
|
||||
default => post,
|
||||
desc => ?DESC("config_method")
|
||||
}
|
||||
)},
|
||||
{headers,
|
||||
mk(
|
||||
map(),
|
||||
#{
|
||||
default => #{
|
||||
<<"accept">> => <<"application/json">>,
|
||||
<<"cache-control">> => <<"no-cache">>,
|
||||
<<"connection">> => <<"keep-alive">>,
|
||||
<<"content-type">> => <<"application/json">>,
|
||||
<<"keep-alive">> => <<"timeout=5">>
|
||||
},
|
||||
desc => ?DESC("config_headers")
|
||||
}
|
||||
)},
|
||||
{body,
|
||||
mk(
|
||||
binary(),
|
||||
#{
|
||||
default => <<"${payload}">>,
|
||||
desc => ?DESC("config_body")
|
||||
}
|
||||
)},
|
||||
{request_timeout,
|
||||
mk(
|
||||
emqx_schema:duration_ms(),
|
||||
#{
|
||||
default => <<"15s">>,
|
||||
desc => ?DESC("config_request_timeout")
|
||||
}
|
||||
)}
|
||||
];
|
||||
fields("post") ->
|
||||
[ type_field()
|
||||
, name_field()
|
||||
[
|
||||
type_field(),
|
||||
name_field()
|
||||
] ++ fields("config");
|
||||
|
||||
fields("put") ->
|
||||
fields("config");
|
||||
|
||||
fields("get") ->
|
||||
emqx_bridge_schema:metrics_status_fields() ++ fields("post").
|
||||
|
||||
|
|
@ -65,32 +86,47 @@ desc(_) ->
|
|||
undefined.
|
||||
|
||||
basic_config() ->
|
||||
[ {enable,
|
||||
mk(boolean(),
|
||||
#{ desc => ?DESC("config_enable")
|
||||
, default => true
|
||||
})}
|
||||
, {direction,
|
||||
mk(egress,
|
||||
#{ desc => ?DESC("config_direction")
|
||||
, default => egress
|
||||
})}
|
||||
]
|
||||
++ proplists:delete(base_url, emqx_connector_http:fields(config)).
|
||||
[
|
||||
{enable,
|
||||
mk(
|
||||
boolean(),
|
||||
#{
|
||||
desc => ?DESC("config_enable"),
|
||||
default => true
|
||||
}
|
||||
)},
|
||||
{direction,
|
||||
mk(
|
||||
egress,
|
||||
#{
|
||||
desc => ?DESC("config_direction"),
|
||||
default => egress
|
||||
}
|
||||
)}
|
||||
] ++
|
||||
proplists:delete(base_url, emqx_connector_http:fields(config)).
|
||||
|
||||
%%======================================================================================
|
||||
|
||||
type_field() ->
|
||||
{type, mk(http,
|
||||
#{ required => true
|
||||
, desc => ?DESC("desc_type")
|
||||
})}.
|
||||
{type,
|
||||
mk(
|
||||
http,
|
||||
#{
|
||||
required => true,
|
||||
desc => ?DESC("desc_type")
|
||||
}
|
||||
)}.
|
||||
|
||||
name_field() ->
|
||||
{name, mk(binary(),
|
||||
#{ required => true
|
||||
, desc => ?DESC("desc_name")
|
||||
})}.
|
||||
{name,
|
||||
mk(
|
||||
binary(),
|
||||
#{
|
||||
required => true,
|
||||
desc => ?DESC("desc_name")
|
||||
}
|
||||
)}.
|
||||
|
||||
method() ->
|
||||
enum([post, put, get, delete]).
|
||||
|
|
|
|||
|
|
@ -22,17 +22,20 @@
|
|||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||
|
||||
%% API functions
|
||||
-export([ start_link/0
|
||||
, ensure_all_started/1
|
||||
]).
|
||||
-export([
|
||||
start_link/0,
|
||||
ensure_all_started/1
|
||||
]).
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2,
|
||||
code_change/3]).
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
terminate/2,
|
||||
code_change/3
|
||||
]).
|
||||
|
||||
-record(state, {}).
|
||||
|
||||
|
|
@ -52,7 +55,6 @@ handle_call(_Request, _From, State) ->
|
|||
handle_cast({start_and_monitor, Configs}, State) ->
|
||||
ok = load_bridges(Configs),
|
||||
{noreply, State};
|
||||
|
||||
handle_cast(_Msg, State) ->
|
||||
{noreply, State}.
|
||||
|
||||
|
|
@ -67,13 +69,22 @@ code_change(_OldVsn, State, _Extra) ->
|
|||
|
||||
%%============================================================================
|
||||
load_bridges(Configs) ->
|
||||
lists:foreach(fun({Type, NamedConf}) ->
|
||||
lists:foreach(fun({Name, Conf}) ->
|
||||
lists:foreach(
|
||||
fun({Type, NamedConf}) ->
|
||||
lists:foreach(
|
||||
fun({Name, Conf}) ->
|
||||
_Res = emqx_bridge:create(Type, Name, Conf),
|
||||
?tp(emqx_bridge_monitor_loaded_bridge,
|
||||
#{ type => Type
|
||||
, name => Name
|
||||
, res => _Res
|
||||
})
|
||||
end, maps:to_list(NamedConf))
|
||||
end, maps:to_list(Configs)).
|
||||
?tp(
|
||||
emqx_bridge_monitor_loaded_bridge,
|
||||
#{
|
||||
type => Type,
|
||||
name => Name,
|
||||
res => _Res
|
||||
}
|
||||
)
|
||||
end,
|
||||
maps:to_list(NamedConf)
|
||||
)
|
||||
end,
|
||||
maps:to_list(Configs)
|
||||
).
|
||||
|
|
|
|||
|
|
@ -12,31 +12,27 @@
|
|||
roots() -> [].
|
||||
|
||||
fields("ingress") ->
|
||||
[ emqx_bridge_schema:direction_field(ingress, emqx_connector_mqtt_schema:ingress_desc())
|
||||
]
|
||||
++ emqx_bridge_schema:common_bridge_fields()
|
||||
++ proplists:delete(hookpoint, emqx_connector_mqtt_schema:fields("ingress"));
|
||||
|
||||
[emqx_bridge_schema:direction_field(ingress, emqx_connector_mqtt_schema:ingress_desc())] ++
|
||||
emqx_bridge_schema:common_bridge_fields() ++
|
||||
proplists:delete(hookpoint, emqx_connector_mqtt_schema:fields("ingress"));
|
||||
fields("egress") ->
|
||||
[ emqx_bridge_schema:direction_field(egress, emqx_connector_mqtt_schema:egress_desc())
|
||||
]
|
||||
++ emqx_bridge_schema:common_bridge_fields()
|
||||
++ emqx_connector_mqtt_schema:fields("egress");
|
||||
|
||||
[emqx_bridge_schema:direction_field(egress, emqx_connector_mqtt_schema:egress_desc())] ++
|
||||
emqx_bridge_schema:common_bridge_fields() ++
|
||||
emqx_connector_mqtt_schema:fields("egress");
|
||||
fields("post_ingress") ->
|
||||
[ type_field()
|
||||
, name_field()
|
||||
[
|
||||
type_field(),
|
||||
name_field()
|
||||
] ++ proplists:delete(enable, fields("ingress"));
|
||||
fields("post_egress") ->
|
||||
[ type_field()
|
||||
, name_field()
|
||||
[
|
||||
type_field(),
|
||||
name_field()
|
||||
] ++ proplists:delete(enable, fields("egress"));
|
||||
|
||||
fields("put_ingress") ->
|
||||
proplists:delete(enable, fields("ingress"));
|
||||
fields("put_egress") ->
|
||||
proplists:delete(enable, fields("egress"));
|
||||
|
||||
fields("get_ingress") ->
|
||||
emqx_bridge_schema:metrics_status_fields() ++ fields("post_ingress");
|
||||
fields("get_egress") ->
|
||||
|
|
@ -49,13 +45,21 @@ desc(_) ->
|
|||
|
||||
%%======================================================================================
|
||||
type_field() ->
|
||||
{type, mk(mqtt,
|
||||
#{ required => true
|
||||
, desc => ?DESC("desc_type")
|
||||
})}.
|
||||
{type,
|
||||
mk(
|
||||
mqtt,
|
||||
#{
|
||||
required => true,
|
||||
desc => ?DESC("desc_type")
|
||||
}
|
||||
)}.
|
||||
|
||||
name_field() ->
|
||||
{name, mk(binary(),
|
||||
#{ required => true
|
||||
, desc => ?DESC("desc_name")
|
||||
})}.
|
||||
{name,
|
||||
mk(
|
||||
binary(),
|
||||
#{
|
||||
required => true,
|
||||
desc => ?DESC("desc_name")
|
||||
}
|
||||
)}.
|
||||
|
|
|
|||
|
|
@ -7,15 +7,17 @@
|
|||
|
||||
-export([roots/0, fields/1, desc/1, namespace/0]).
|
||||
|
||||
-export([ get_response/0
|
||||
, put_request/0
|
||||
, post_request/0
|
||||
]).
|
||||
-export([
|
||||
get_response/0,
|
||||
put_request/0,
|
||||
post_request/0
|
||||
]).
|
||||
|
||||
-export([ common_bridge_fields/0
|
||||
, metrics_status_fields/0
|
||||
, direction_field/2
|
||||
]).
|
||||
-export([
|
||||
common_bridge_fields/0,
|
||||
metrics_status_fields/0,
|
||||
direction_field/2
|
||||
]).
|
||||
|
||||
%%======================================================================================
|
||||
%% Hocon Schema Definitions
|
||||
|
|
@ -34,43 +36,68 @@ post_request() ->
|
|||
http_schema("post").
|
||||
|
||||
http_schema(Method) ->
|
||||
Schemas = lists:flatmap(fun(Type) ->
|
||||
[ref(schema_mod(Type), Method ++ "_ingress"),
|
||||
ref(schema_mod(Type), Method ++ "_egress")]
|
||||
end, ?CONN_TYPES),
|
||||
hoconsc:union([ref(emqx_bridge_http_schema, Method)
|
||||
| Schemas]).
|
||||
Schemas = lists:flatmap(
|
||||
fun(Type) ->
|
||||
[
|
||||
ref(schema_mod(Type), Method ++ "_ingress"),
|
||||
ref(schema_mod(Type), Method ++ "_egress")
|
||||
]
|
||||
end,
|
||||
?CONN_TYPES
|
||||
),
|
||||
hoconsc:union([
|
||||
ref(emqx_bridge_http_schema, Method)
|
||||
| Schemas
|
||||
]).
|
||||
|
||||
common_bridge_fields() ->
|
||||
[ {enable,
|
||||
mk(boolean(),
|
||||
#{ desc => ?DESC("desc_enable")
|
||||
, default => true
|
||||
})}
|
||||
, {connector,
|
||||
mk(binary(),
|
||||
#{ required => true
|
||||
, example => <<"mqtt:my_mqtt_connector">>
|
||||
, desc => ?DESC("desc_connector")
|
||||
})}
|
||||
[
|
||||
{enable,
|
||||
mk(
|
||||
boolean(),
|
||||
#{
|
||||
desc => ?DESC("desc_enable"),
|
||||
default => true
|
||||
}
|
||||
)},
|
||||
{connector,
|
||||
mk(
|
||||
binary(),
|
||||
#{
|
||||
required => true,
|
||||
example => <<"mqtt:my_mqtt_connector">>,
|
||||
desc => ?DESC("desc_connector")
|
||||
}
|
||||
)}
|
||||
].
|
||||
|
||||
metrics_status_fields() ->
|
||||
[ {"metrics", mk(ref(?MODULE, "metrics"), #{desc => ?DESC("desc_metrics")})}
|
||||
, {"node_metrics", mk(hoconsc:array(ref(?MODULE, "node_metrics")),
|
||||
#{ desc => ?DESC("desc_node_metrics")})}
|
||||
, {"status", mk(status(), #{desc => ?DESC("desc_status")})}
|
||||
, {"node_status", mk(hoconsc:array(ref(?MODULE, "node_status")),
|
||||
#{ desc => ?DESC("desc_node_status")})}
|
||||
[
|
||||
{"metrics", mk(ref(?MODULE, "metrics"), #{desc => ?DESC("desc_metrics")})},
|
||||
{"node_metrics",
|
||||
mk(
|
||||
hoconsc:array(ref(?MODULE, "node_metrics")),
|
||||
#{desc => ?DESC("desc_node_metrics")}
|
||||
)},
|
||||
{"status", mk(status(), #{desc => ?DESC("desc_status")})},
|
||||
{"node_status",
|
||||
mk(
|
||||
hoconsc:array(ref(?MODULE, "node_status")),
|
||||
#{desc => ?DESC("desc_node_status")}
|
||||
)}
|
||||
].
|
||||
|
||||
direction_field(Dir, Desc) ->
|
||||
{direction, mk(Dir,
|
||||
#{ required => true
|
||||
, default => egress
|
||||
, desc => "The direction of the bridge. Can be one of 'ingress' or 'egress'.</br>"
|
||||
++ Desc
|
||||
})}.
|
||||
{direction,
|
||||
mk(
|
||||
Dir,
|
||||
#{
|
||||
required => true,
|
||||
default => egress,
|
||||
desc => "The direction of the bridge. Can be one of 'ingress' or 'egress'.</br>" ++
|
||||
Desc
|
||||
}
|
||||
)}.
|
||||
|
||||
%%======================================================================================
|
||||
%% For config files
|
||||
|
|
@ -80,31 +107,49 @@ namespace() -> "bridge".
|
|||
roots() -> [bridges].
|
||||
|
||||
fields(bridges) ->
|
||||
[{http, mk(hoconsc:map(name, ref(emqx_bridge_http_schema, "config")),
|
||||
#{desc => ?DESC("bridges_http")})}]
|
||||
++ [{T, mk(hoconsc:map(name, hoconsc:union([ ref(schema_mod(T), "ingress")
|
||||
, ref(schema_mod(T), "egress")
|
||||
])),
|
||||
#{desc => ?DESC("bridges_name")})} || T <- ?CONN_TYPES];
|
||||
|
||||
[
|
||||
{http,
|
||||
mk(
|
||||
hoconsc:map(name, ref(emqx_bridge_http_schema, "config")),
|
||||
#{desc => ?DESC("bridges_http")}
|
||||
)}
|
||||
] ++
|
||||
[
|
||||
{T,
|
||||
mk(
|
||||
hoconsc:map(
|
||||
name,
|
||||
hoconsc:union([
|
||||
ref(schema_mod(T), "ingress"),
|
||||
ref(schema_mod(T), "egress")
|
||||
])
|
||||
),
|
||||
#{desc => ?DESC("bridges_name")}
|
||||
)}
|
||||
|| T <- ?CONN_TYPES
|
||||
];
|
||||
fields("metrics") ->
|
||||
[ {"matched", mk(integer(), #{desc => ?DESC("metric_matched")})}
|
||||
, {"success", mk(integer(), #{desc => ?DESC("metric_success")})}
|
||||
, {"failed", mk(integer(), #{desc => ?DESC("metric_failed")})}
|
||||
, {"rate", mk(float(), #{desc => ?DESC("metric_rate")})}
|
||||
, {"rate_max", mk(float(), #{desc => ?DESC("metric_rate_max")})}
|
||||
, {"rate_last5m", mk(float(),
|
||||
#{desc => ?DESC("metric_rate_last5m")})}
|
||||
[
|
||||
{"matched", mk(integer(), #{desc => ?DESC("metric_matched")})},
|
||||
{"success", mk(integer(), #{desc => ?DESC("metric_success")})},
|
||||
{"failed", mk(integer(), #{desc => ?DESC("metric_failed")})},
|
||||
{"rate", mk(float(), #{desc => ?DESC("metric_rate")})},
|
||||
{"rate_max", mk(float(), #{desc => ?DESC("metric_rate_max")})},
|
||||
{"rate_last5m",
|
||||
mk(
|
||||
float(),
|
||||
#{desc => ?DESC("metric_rate_last5m")}
|
||||
)}
|
||||
];
|
||||
|
||||
fields("node_metrics") ->
|
||||
[ node_name()
|
||||
, {"metrics", mk(ref(?MODULE, "metrics"), #{})}
|
||||
[
|
||||
node_name(),
|
||||
{"metrics", mk(ref(?MODULE, "metrics"), #{})}
|
||||
];
|
||||
|
||||
fields("node_status") ->
|
||||
[ node_name()
|
||||
, {"status", mk(status(), #{})}
|
||||
[
|
||||
node_name(),
|
||||
{"status", mk(status(), #{})}
|
||||
].
|
||||
|
||||
desc(bridges) ->
|
||||
|
|
|
|||
|
|
@ -27,15 +27,19 @@ start_link() ->
|
|||
supervisor:start_link({local, ?SERVER}, ?MODULE, []).
|
||||
|
||||
init([]) ->
|
||||
SupFlags = #{strategy => one_for_one,
|
||||
intensity => 10,
|
||||
period => 10},
|
||||
SupFlags = #{
|
||||
strategy => one_for_one,
|
||||
intensity => 10,
|
||||
period => 10
|
||||
},
|
||||
ChildSpecs = [
|
||||
#{id => emqx_bridge_monitor,
|
||||
start => {emqx_bridge_monitor, start_link, []},
|
||||
restart => permanent,
|
||||
type => worker,
|
||||
modules => [emqx_bridge_monitor]}
|
||||
#{
|
||||
id => emqx_bridge_monitor,
|
||||
start => {emqx_bridge_monitor, start_link, []},
|
||||
restart => permanent,
|
||||
type => worker,
|
||||
modules => [emqx_bridge_monitor]
|
||||
}
|
||||
],
|
||||
{ok, {SupFlags, ChildSpecs}}.
|
||||
|
||||
|
|
|
|||
|
|
@ -18,13 +18,14 @@
|
|||
|
||||
-behaviour(emqx_bpapi).
|
||||
|
||||
-export([ introduced_in/0
|
||||
-export([
|
||||
introduced_in/0,
|
||||
|
||||
, list_bridges/1
|
||||
, lookup_from_all_nodes/3
|
||||
, restart_bridges_to_all_nodes/3
|
||||
, stop_bridges_to_all_nodes/3
|
||||
]).
|
||||
list_bridges/1,
|
||||
lookup_from_all_nodes/3,
|
||||
restart_bridges_to_all_nodes/3,
|
||||
stop_bridges_to_all_nodes/3
|
||||
]).
|
||||
|
||||
-include_lib("emqx/include/bpapi.hrl").
|
||||
|
||||
|
|
@ -40,19 +41,34 @@ list_bridges(Node) ->
|
|||
-type key() :: atom() | binary() | [byte()].
|
||||
|
||||
-spec restart_bridges_to_all_nodes([node()], key(), key()) ->
|
||||
emqx_rpc:erpc_multicall().
|
||||
emqx_rpc:erpc_multicall().
|
||||
restart_bridges_to_all_nodes(Nodes, BridgeType, BridgeName) ->
|
||||
erpc:multicall(Nodes, emqx_bridge, restart,
|
||||
[BridgeType, BridgeName], ?TIMEOUT).
|
||||
erpc:multicall(
|
||||
Nodes,
|
||||
emqx_bridge,
|
||||
restart,
|
||||
[BridgeType, BridgeName],
|
||||
?TIMEOUT
|
||||
).
|
||||
|
||||
-spec stop_bridges_to_all_nodes([node()], key(), key()) ->
|
||||
emqx_rpc:erpc_multicall().
|
||||
emqx_rpc:erpc_multicall().
|
||||
stop_bridges_to_all_nodes(Nodes, BridgeType, BridgeName) ->
|
||||
erpc:multicall(Nodes, emqx_bridge, stop,
|
||||
[BridgeType, BridgeName], ?TIMEOUT).
|
||||
erpc:multicall(
|
||||
Nodes,
|
||||
emqx_bridge,
|
||||
stop,
|
||||
[BridgeType, BridgeName],
|
||||
?TIMEOUT
|
||||
).
|
||||
|
||||
-spec lookup_from_all_nodes([node()], key(), key()) ->
|
||||
emqx_rpc:erpc_multicall().
|
||||
emqx_rpc:erpc_multicall().
|
||||
lookup_from_all_nodes(Nodes, BridgeType, BridgeName) ->
|
||||
erpc:multicall(Nodes, emqx_bridge_api, lookup_from_local_node,
|
||||
[BridgeType, BridgeName], ?TIMEOUT).
|
||||
erpc:multicall(
|
||||
Nodes,
|
||||
emqx_bridge_api,
|
||||
lookup_from_local_node,
|
||||
[BridgeType, BridgeName],
|
||||
?TIMEOUT
|
||||
).
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@
|
|||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||
|
||||
all() ->
|
||||
emqx_common_test_helpers:all(?MODULE).
|
||||
emqx_common_test_helpers:all(?MODULE).
|
||||
|
||||
init_per_suite(Config) ->
|
||||
%% to avoid inter-suite dependencies
|
||||
|
|
@ -32,8 +32,12 @@ init_per_suite(Config) ->
|
|||
Config.
|
||||
|
||||
end_per_suite(_Config) ->
|
||||
emqx_common_test_helpers:stop_apps([emqx, emqx_bridge,
|
||||
emqx_resource, emqx_connector]).
|
||||
emqx_common_test_helpers:stop_apps([
|
||||
emqx,
|
||||
emqx_bridge,
|
||||
emqx_resource,
|
||||
emqx_connector
|
||||
]).
|
||||
|
||||
init_per_testcase(t_get_basic_usage_info_1, Config) ->
|
||||
setup_fake_telemetry_data(),
|
||||
|
|
@ -43,13 +47,15 @@ init_per_testcase(_TestCase, Config) ->
|
|||
|
||||
end_per_testcase(t_get_basic_usage_info_1, _Config) ->
|
||||
lists:foreach(
|
||||
fun({BridgeType, BridgeName}) ->
|
||||
ok = emqx_bridge:remove(BridgeType, BridgeName)
|
||||
end,
|
||||
[ {http, <<"basic_usage_info_http">>}
|
||||
, {http, <<"basic_usage_info_http_disabled">>}
|
||||
, {mqtt, <<"basic_usage_info_mqtt">>}
|
||||
]),
|
||||
fun({BridgeType, BridgeName}) ->
|
||||
ok = emqx_bridge:remove(BridgeType, BridgeName)
|
||||
end,
|
||||
[
|
||||
{http, <<"basic_usage_info_http">>},
|
||||
{http, <<"basic_usage_info_http_disabled">>},
|
||||
{mqtt, <<"basic_usage_info_mqtt">>}
|
||||
]
|
||||
),
|
||||
ok = emqx_config:delete_override_conf_files(),
|
||||
ok = emqx_config:put([bridges], #{}),
|
||||
ok = emqx_config:put_raw([bridges], #{}),
|
||||
|
|
@ -59,53 +65,68 @@ end_per_testcase(_TestCase, _Config) ->
|
|||
|
||||
t_get_basic_usage_info_0(_Config) ->
|
||||
?assertEqual(
|
||||
#{ num_bridges => 0
|
||||
, count_by_type => #{}
|
||||
#{
|
||||
num_bridges => 0,
|
||||
count_by_type => #{}
|
||||
},
|
||||
emqx_bridge:get_basic_usage_info()).
|
||||
emqx_bridge:get_basic_usage_info()
|
||||
).
|
||||
|
||||
t_get_basic_usage_info_1(_Config) ->
|
||||
BasicUsageInfo = emqx_bridge:get_basic_usage_info(),
|
||||
?assertEqual(
|
||||
#{ num_bridges => 2
|
||||
, count_by_type => #{ http => 1
|
||||
, mqtt => 1
|
||||
}
|
||||
#{
|
||||
num_bridges => 2,
|
||||
count_by_type => #{
|
||||
http => 1,
|
||||
mqtt => 1
|
||||
}
|
||||
},
|
||||
BasicUsageInfo).
|
||||
BasicUsageInfo
|
||||
).
|
||||
|
||||
setup_fake_telemetry_data() ->
|
||||
ConnectorConf =
|
||||
#{<<"connectors">> =>
|
||||
#{<<"mqtt">> => #{<<"my_mqtt_connector">> =>
|
||||
#{ server => "127.0.0.1:1883" }}}},
|
||||
MQTTConfig = #{ connector => <<"mqtt:my_mqtt_connector">>
|
||||
, enable => true
|
||||
, direction => ingress
|
||||
, remote_topic => <<"aws/#">>
|
||||
, remote_qos => 1
|
||||
},
|
||||
HTTPConfig = #{ url => <<"http://localhost:9901/messages/${topic}">>
|
||||
, enable => true
|
||||
, direction => egress
|
||||
, local_topic => "emqx_http/#"
|
||||
, method => post
|
||||
, body => <<"${payload}">>
|
||||
, headers => #{}
|
||||
, request_timeout => "15s"
|
||||
},
|
||||
Conf =
|
||||
#{ <<"bridges">> =>
|
||||
#{ <<"http">> =>
|
||||
#{ <<"basic_usage_info_http">> => HTTPConfig
|
||||
, <<"basic_usage_info_http_disabled">> =>
|
||||
HTTPConfig#{enable => false}
|
||||
}
|
||||
, <<"mqtt">> =>
|
||||
#{ <<"basic_usage_info_mqtt">> => MQTTConfig
|
||||
}
|
||||
#{
|
||||
<<"connectors">> =>
|
||||
#{
|
||||
<<"mqtt">> => #{
|
||||
<<"my_mqtt_connector">> =>
|
||||
#{server => "127.0.0.1:1883"}
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
MQTTConfig = #{
|
||||
connector => <<"mqtt:my_mqtt_connector">>,
|
||||
enable => true,
|
||||
direction => ingress,
|
||||
remote_topic => <<"aws/#">>,
|
||||
remote_qos => 1
|
||||
},
|
||||
HTTPConfig = #{
|
||||
url => <<"http://localhost:9901/messages/${topic}">>,
|
||||
enable => true,
|
||||
direction => egress,
|
||||
local_topic => "emqx_http/#",
|
||||
method => post,
|
||||
body => <<"${payload}">>,
|
||||
headers => #{},
|
||||
request_timeout => "15s"
|
||||
},
|
||||
Conf =
|
||||
#{
|
||||
<<"bridges">> =>
|
||||
#{
|
||||
<<"http">> =>
|
||||
#{
|
||||
<<"basic_usage_info_http">> => HTTPConfig,
|
||||
<<"basic_usage_info_http_disabled">> =>
|
||||
HTTPConfig#{enable => false}
|
||||
},
|
||||
<<"mqtt">> =>
|
||||
#{<<"basic_usage_info_mqtt">> => MQTTConfig}
|
||||
}
|
||||
},
|
||||
ok = emqx_common_test_helpers:load_config(emqx_connector_schema, ConnectorConf),
|
||||
ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, Conf),
|
||||
|
||||
|
|
|
|||
|
|
@ -25,11 +25,15 @@
|
|||
-define(CONF_DEFAULT, <<"bridges: {}">>).
|
||||
-define(BRIDGE_TYPE, <<"http">>).
|
||||
-define(BRIDGE_NAME, <<"test_bridge">>).
|
||||
-define(URL(PORT, PATH), list_to_binary(
|
||||
io_lib:format("http://localhost:~s/~s",
|
||||
[integer_to_list(PORT), PATH]))).
|
||||
-define(HTTP_BRIDGE(URL, TYPE, NAME),
|
||||
#{
|
||||
-define(URL(PORT, PATH),
|
||||
list_to_binary(
|
||||
io_lib:format(
|
||||
"http://localhost:~s/~s",
|
||||
[integer_to_list(PORT), PATH]
|
||||
)
|
||||
)
|
||||
).
|
||||
-define(HTTP_BRIDGE(URL, TYPE, NAME), #{
|
||||
<<"type">> => TYPE,
|
||||
<<"name">> => NAME,
|
||||
<<"url">> => URL,
|
||||
|
|
@ -40,7 +44,6 @@
|
|||
<<"headers">> => #{
|
||||
<<"content-type">> => <<"application/json">>
|
||||
}
|
||||
|
||||
}).
|
||||
|
||||
all() ->
|
||||
|
|
@ -50,15 +53,17 @@ groups() ->
|
|||
[].
|
||||
|
||||
suite() ->
|
||||
[{timetrap,{seconds,60}}].
|
||||
[{timetrap, {seconds, 60}}].
|
||||
|
||||
init_per_suite(Config) ->
|
||||
_ = application:load(emqx_conf),
|
||||
%% some testcases (may from other app) already get emqx_connector started
|
||||
_ = application:stop(emqx_resource),
|
||||
_ = application:stop(emqx_connector),
|
||||
ok = emqx_common_test_helpers:start_apps([emqx_bridge, emqx_dashboard],
|
||||
fun set_special_configs/1),
|
||||
ok = emqx_common_test_helpers:start_apps(
|
||||
[emqx_bridge, emqx_dashboard],
|
||||
fun set_special_configs/1
|
||||
),
|
||||
ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, ?CONF_DEFAULT),
|
||||
Config.
|
||||
|
||||
|
|
@ -79,9 +84,12 @@ end_per_testcase(_, _Config) ->
|
|||
ok.
|
||||
|
||||
clear_resources() ->
|
||||
lists:foreach(fun(#{type := Type, name := Name}) ->
|
||||
lists:foreach(
|
||||
fun(#{type := Type, name := Name}) ->
|
||||
ok = emqx_bridge:remove(Type, Name)
|
||||
end, emqx_bridge:list()).
|
||||
end,
|
||||
emqx_bridge:list()
|
||||
).
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% HTTP server for testing
|
||||
|
|
@ -95,12 +103,12 @@ start_http_server(HandleFun) ->
|
|||
end),
|
||||
receive
|
||||
{port, Port} -> Port
|
||||
after
|
||||
2000 -> error({timeout, start_http_server})
|
||||
after 2000 -> error({timeout, start_http_server})
|
||||
end.
|
||||
|
||||
listen_on_random_port() ->
|
||||
Min = 1024, Max = 65000,
|
||||
Min = 1024,
|
||||
Max = 65000,
|
||||
Port = rand:uniform(Max - Min) + Min,
|
||||
case gen_tcp:listen(Port, [{active, false}, {reuseaddr, true}, binary]) of
|
||||
{ok, Sock} -> {Port, Sock};
|
||||
|
|
@ -109,16 +117,18 @@ listen_on_random_port() ->
|
|||
|
||||
loop(Sock, HandleFun, Parent) ->
|
||||
{ok, Conn} = gen_tcp:accept(Sock),
|
||||
Handler = spawn(fun () -> HandleFun(Conn, Parent) end),
|
||||
Handler = spawn(fun() -> HandleFun(Conn, Parent) end),
|
||||
gen_tcp:controlling_process(Conn, Handler),
|
||||
loop(Sock, HandleFun, Parent).
|
||||
|
||||
make_response(CodeStr, Str) ->
|
||||
B = iolist_to_binary(Str),
|
||||
iolist_to_binary(
|
||||
io_lib:fwrite(
|
||||
"HTTP/1.0 ~s\r\nContent-Type: text/html\r\nContent-Length: ~p\r\n\r\n~s",
|
||||
[CodeStr, size(B), B])).
|
||||
io_lib:fwrite(
|
||||
"HTTP/1.0 ~s\r\nContent-Type: text/html\r\nContent-Length: ~p\r\n\r\n~s",
|
||||
[CodeStr, size(B), B]
|
||||
)
|
||||
).
|
||||
|
||||
handle_fun_200_ok(Conn, Parent) ->
|
||||
case gen_tcp:recv(Conn, 0) of
|
||||
|
|
@ -151,18 +161,22 @@ t_http_crud_apis(_) ->
|
|||
%% then we add a http bridge, using POST
|
||||
%% POST /bridges/ will create a bridge
|
||||
URL1 = ?URL(Port, "path1"),
|
||||
{ok, 201, Bridge} = request(post, uri(["bridges"]),
|
||||
?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME)),
|
||||
{ok, 201, Bridge} = request(
|
||||
post,
|
||||
uri(["bridges"]),
|
||||
?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME)
|
||||
),
|
||||
|
||||
%ct:pal("---bridge: ~p", [Bridge]),
|
||||
#{ <<"type">> := ?BRIDGE_TYPE
|
||||
, <<"name">> := ?BRIDGE_NAME
|
||||
, <<"status">> := _
|
||||
, <<"node_status">> := [_|_]
|
||||
, <<"metrics">> := _
|
||||
, <<"node_metrics">> := [_|_]
|
||||
, <<"url">> := URL1
|
||||
} = jsx:decode(Bridge),
|
||||
#{
|
||||
<<"type">> := ?BRIDGE_TYPE,
|
||||
<<"name">> := ?BRIDGE_NAME,
|
||||
<<"status">> := _,
|
||||
<<"node_status">> := [_ | _],
|
||||
<<"metrics">> := _,
|
||||
<<"node_metrics">> := [_ | _],
|
||||
<<"url">> := URL1
|
||||
} = jsx:decode(Bridge),
|
||||
|
||||
BridgeID = emqx_bridge:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME),
|
||||
%% send an message to emqx and the message should be forwarded to the HTTP server
|
||||
|
|
@ -170,49 +184,70 @@ t_http_crud_apis(_) ->
|
|||
emqx:publish(emqx_message:make(<<"emqx_http/1">>, Body)),
|
||||
?assert(
|
||||
receive
|
||||
{http_server, received, #{method := <<"POST">>, path := <<"/path1">>,
|
||||
body := Body}} ->
|
||||
{http_server, received, #{
|
||||
method := <<"POST">>,
|
||||
path := <<"/path1">>,
|
||||
body := Body
|
||||
}} ->
|
||||
true;
|
||||
Msg ->
|
||||
ct:pal("error: http got unexpected request: ~p", [Msg]),
|
||||
false
|
||||
after 100 ->
|
||||
false
|
||||
end),
|
||||
end
|
||||
),
|
||||
%% update the request-path of the bridge
|
||||
URL2 = ?URL(Port, "path2"),
|
||||
{ok, 200, Bridge2} = request(put, uri(["bridges", BridgeID]),
|
||||
?HTTP_BRIDGE(URL2, ?BRIDGE_TYPE, ?BRIDGE_NAME)),
|
||||
?assertMatch(#{ <<"type">> := ?BRIDGE_TYPE
|
||||
, <<"name">> := ?BRIDGE_NAME
|
||||
, <<"status">> := _
|
||||
, <<"node_status">> := [_|_]
|
||||
, <<"metrics">> := _
|
||||
, <<"node_metrics">> := [_|_]
|
||||
, <<"url">> := URL2
|
||||
}, jsx:decode(Bridge2)),
|
||||
{ok, 200, Bridge2} = request(
|
||||
put,
|
||||
uri(["bridges", BridgeID]),
|
||||
?HTTP_BRIDGE(URL2, ?BRIDGE_TYPE, ?BRIDGE_NAME)
|
||||
),
|
||||
?assertMatch(
|
||||
#{
|
||||
<<"type">> := ?BRIDGE_TYPE,
|
||||
<<"name">> := ?BRIDGE_NAME,
|
||||
<<"status">> := _,
|
||||
<<"node_status">> := [_ | _],
|
||||
<<"metrics">> := _,
|
||||
<<"node_metrics">> := [_ | _],
|
||||
<<"url">> := URL2
|
||||
},
|
||||
jsx:decode(Bridge2)
|
||||
),
|
||||
|
||||
%% list all bridges again, assert Bridge2 is in it
|
||||
{ok, 200, Bridge2Str} = request(get, uri(["bridges"]), []),
|
||||
?assertMatch([#{ <<"type">> := ?BRIDGE_TYPE
|
||||
, <<"name">> := ?BRIDGE_NAME
|
||||
, <<"status">> := _
|
||||
, <<"node_status">> := [_|_]
|
||||
, <<"metrics">> := _
|
||||
, <<"node_metrics">> := [_|_]
|
||||
, <<"url">> := URL2
|
||||
}], jsx:decode(Bridge2Str)),
|
||||
?assertMatch(
|
||||
[
|
||||
#{
|
||||
<<"type">> := ?BRIDGE_TYPE,
|
||||
<<"name">> := ?BRIDGE_NAME,
|
||||
<<"status">> := _,
|
||||
<<"node_status">> := [_ | _],
|
||||
<<"metrics">> := _,
|
||||
<<"node_metrics">> := [_ | _],
|
||||
<<"url">> := URL2
|
||||
}
|
||||
],
|
||||
jsx:decode(Bridge2Str)
|
||||
),
|
||||
|
||||
%% get the bridge by id
|
||||
{ok, 200, Bridge3Str} = request(get, uri(["bridges", BridgeID]), []),
|
||||
?assertMatch(#{ <<"type">> := ?BRIDGE_TYPE
|
||||
, <<"name">> := ?BRIDGE_NAME
|
||||
, <<"status">> := _
|
||||
, <<"node_status">> := [_|_]
|
||||
, <<"metrics">> := _
|
||||
, <<"node_metrics">> := [_|_]
|
||||
, <<"url">> := URL2
|
||||
}, jsx:decode(Bridge3Str)),
|
||||
?assertMatch(
|
||||
#{
|
||||
<<"type">> := ?BRIDGE_TYPE,
|
||||
<<"name">> := ?BRIDGE_NAME,
|
||||
<<"status">> := _,
|
||||
<<"node_status">> := [_ | _],
|
||||
<<"metrics">> := _,
|
||||
<<"node_metrics">> := [_ | _],
|
||||
<<"url">> := URL2
|
||||
},
|
||||
jsx:decode(Bridge3Str)
|
||||
),
|
||||
|
||||
%% send an message to emqx again, check the path has been changed
|
||||
emqx:publish(emqx_message:make(<<"emqx_http/1">>, Body)),
|
||||
|
|
@ -225,25 +260,35 @@ t_http_crud_apis(_) ->
|
|||
false
|
||||
after 100 ->
|
||||
false
|
||||
end),
|
||||
end
|
||||
),
|
||||
|
||||
%% delete the bridge
|
||||
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []),
|
||||
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
|
||||
|
||||
%% update a deleted bridge returns an error
|
||||
{ok, 404, ErrMsg2} = request(put, uri(["bridges", BridgeID]),
|
||||
?HTTP_BRIDGE(URL2, ?BRIDGE_TYPE, ?BRIDGE_NAME)),
|
||||
{ok, 404, ErrMsg2} = request(
|
||||
put,
|
||||
uri(["bridges", BridgeID]),
|
||||
?HTTP_BRIDGE(URL2, ?BRIDGE_TYPE, ?BRIDGE_NAME)
|
||||
),
|
||||
?assertMatch(
|
||||
#{ <<"code">> := _
|
||||
, <<"message">> := <<"bridge not found">>
|
||||
}, jsx:decode(ErrMsg2)),
|
||||
#{
|
||||
<<"code">> := _,
|
||||
<<"message">> := <<"bridge not found">>
|
||||
},
|
||||
jsx:decode(ErrMsg2)
|
||||
),
|
||||
ok.
|
||||
|
||||
t_start_stop_bridges(_) ->
|
||||
lists:foreach(fun(Type) ->
|
||||
lists:foreach(
|
||||
fun(Type) ->
|
||||
do_start_stop_bridges(Type)
|
||||
end, [node, cluster]).
|
||||
end,
|
||||
[node, cluster]
|
||||
).
|
||||
|
||||
do_start_stop_bridges(Type) ->
|
||||
%% assert we there's no bridges at first
|
||||
|
|
@ -251,40 +296,40 @@ do_start_stop_bridges(Type) ->
|
|||
|
||||
Port = start_http_server(fun handle_fun_200_ok/2),
|
||||
URL1 = ?URL(Port, "abc"),
|
||||
{ok, 201, Bridge} = request(post, uri(["bridges"]),
|
||||
?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME)),
|
||||
{ok, 201, Bridge} = request(
|
||||
post,
|
||||
uri(["bridges"]),
|
||||
?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME)
|
||||
),
|
||||
%ct:pal("the bridge ==== ~p", [Bridge]),
|
||||
#{ <<"type">> := ?BRIDGE_TYPE
|
||||
, <<"name">> := ?BRIDGE_NAME
|
||||
, <<"status">> := <<"connected">>
|
||||
, <<"node_status">> := [_|_]
|
||||
, <<"metrics">> := _
|
||||
, <<"node_metrics">> := [_|_]
|
||||
, <<"url">> := URL1
|
||||
} = jsx:decode(Bridge),
|
||||
#{
|
||||
<<"type">> := ?BRIDGE_TYPE,
|
||||
<<"name">> := ?BRIDGE_NAME,
|
||||
<<"status">> := <<"connected">>,
|
||||
<<"node_status">> := [_ | _],
|
||||
<<"metrics">> := _,
|
||||
<<"node_metrics">> := [_ | _],
|
||||
<<"url">> := URL1
|
||||
} = jsx:decode(Bridge),
|
||||
BridgeID = emqx_bridge:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME),
|
||||
%% stop it
|
||||
{ok, 200, <<>>} = request(post, operation_path(Type, stop, BridgeID), <<"">>),
|
||||
{ok, 200, Bridge2} = request(get, uri(["bridges", BridgeID]), []),
|
||||
?assertMatch(#{ <<"status">> := <<"disconnected">>
|
||||
}, jsx:decode(Bridge2)),
|
||||
?assertMatch(#{<<"status">> := <<"disconnected">>}, jsx:decode(Bridge2)),
|
||||
%% start again
|
||||
{ok, 200, <<>>} = request(post, operation_path(Type, restart, BridgeID), <<"">>),
|
||||
{ok, 200, Bridge3} = request(get, uri(["bridges", BridgeID]), []),
|
||||
?assertMatch(#{ <<"status">> := <<"connected">>
|
||||
}, jsx:decode(Bridge3)),
|
||||
?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(Bridge3)),
|
||||
%% restart an already started bridge
|
||||
{ok, 200, <<>>} = request(post, operation_path(Type, restart, BridgeID), <<"">>),
|
||||
{ok, 200, Bridge3} = request(get, uri(["bridges", BridgeID]), []),
|
||||
?assertMatch(#{ <<"status">> := <<"connected">>
|
||||
}, jsx:decode(Bridge3)),
|
||||
?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(Bridge3)),
|
||||
%% stop it again
|
||||
{ok, 200, <<>>} = request(post, operation_path(Type, stop, BridgeID), <<"">>),
|
||||
%% restart a stopped bridge
|
||||
{ok, 200, <<>>} = request(post, operation_path(Type, restart, BridgeID), <<"">>),
|
||||
{ok, 200, Bridge4} = request(get, uri(["bridges", BridgeID]), []),
|
||||
?assertMatch(#{ <<"status">> := <<"connected">>
|
||||
}, jsx:decode(Bridge4)),
|
||||
?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(Bridge4)),
|
||||
%% delete the bridge
|
||||
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []),
|
||||
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []).
|
||||
|
|
@ -295,33 +340,34 @@ t_enable_disable_bridges(_) ->
|
|||
|
||||
Port = start_http_server(fun handle_fun_200_ok/2),
|
||||
URL1 = ?URL(Port, "abc"),
|
||||
{ok, 201, Bridge} = request(post, uri(["bridges"]),
|
||||
?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME)),
|
||||
{ok, 201, Bridge} = request(
|
||||
post,
|
||||
uri(["bridges"]),
|
||||
?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME)
|
||||
),
|
||||
%ct:pal("the bridge ==== ~p", [Bridge]),
|
||||
#{ <<"type">> := ?BRIDGE_TYPE
|
||||
, <<"name">> := ?BRIDGE_NAME
|
||||
, <<"status">> := <<"connected">>
|
||||
, <<"node_status">> := [_|_]
|
||||
, <<"metrics">> := _
|
||||
, <<"node_metrics">> := [_|_]
|
||||
, <<"url">> := URL1
|
||||
} = jsx:decode(Bridge),
|
||||
#{
|
||||
<<"type">> := ?BRIDGE_TYPE,
|
||||
<<"name">> := ?BRIDGE_NAME,
|
||||
<<"status">> := <<"connected">>,
|
||||
<<"node_status">> := [_ | _],
|
||||
<<"metrics">> := _,
|
||||
<<"node_metrics">> := [_ | _],
|
||||
<<"url">> := URL1
|
||||
} = jsx:decode(Bridge),
|
||||
BridgeID = emqx_bridge:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME),
|
||||
%% disable it
|
||||
{ok, 200, <<>>} = request(post, operation_path(cluster, disable, BridgeID), <<"">>),
|
||||
{ok, 200, Bridge2} = request(get, uri(["bridges", BridgeID]), []),
|
||||
?assertMatch(#{ <<"status">> := <<"disconnected">>
|
||||
}, jsx:decode(Bridge2)),
|
||||
?assertMatch(#{<<"status">> := <<"disconnected">>}, jsx:decode(Bridge2)),
|
||||
%% enable again
|
||||
{ok, 200, <<>>} = request(post, operation_path(cluster, enable, BridgeID), <<"">>),
|
||||
{ok, 200, Bridge3} = request(get, uri(["bridges", BridgeID]), []),
|
||||
?assertMatch(#{ <<"status">> := <<"connected">>
|
||||
}, jsx:decode(Bridge3)),
|
||||
?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(Bridge3)),
|
||||
%% enable an already started bridge
|
||||
{ok, 200, <<>>} = request(post, operation_path(cluster, enable, BridgeID), <<"">>),
|
||||
{ok, 200, Bridge3} = request(get, uri(["bridges", BridgeID]), []),
|
||||
?assertMatch(#{ <<"status">> := <<"connected">>
|
||||
}, jsx:decode(Bridge3)),
|
||||
?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(Bridge3)),
|
||||
%% disable it again
|
||||
{ok, 200, <<>>} = request(post, operation_path(cluster, disable, BridgeID), <<"">>),
|
||||
|
||||
|
|
@ -331,8 +377,7 @@ t_enable_disable_bridges(_) ->
|
|||
%% enable a stopped bridge
|
||||
{ok, 200, <<>>} = request(post, operation_path(cluster, enable, BridgeID), <<"">>),
|
||||
{ok, 200, Bridge4} = request(get, uri(["bridges", BridgeID]), []),
|
||||
?assertMatch(#{ <<"status">> := <<"connected">>
|
||||
}, jsx:decode(Bridge4)),
|
||||
?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(Bridge4)),
|
||||
%% delete the bridge
|
||||
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []),
|
||||
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []).
|
||||
|
|
@ -343,17 +388,21 @@ t_reset_bridges(_) ->
|
|||
|
||||
Port = start_http_server(fun handle_fun_200_ok/2),
|
||||
URL1 = ?URL(Port, "abc"),
|
||||
{ok, 201, Bridge} = request(post, uri(["bridges"]),
|
||||
?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME)),
|
||||
{ok, 201, Bridge} = request(
|
||||
post,
|
||||
uri(["bridges"]),
|
||||
?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, ?BRIDGE_NAME)
|
||||
),
|
||||
%ct:pal("the bridge ==== ~p", [Bridge]),
|
||||
#{ <<"type">> := ?BRIDGE_TYPE
|
||||
, <<"name">> := ?BRIDGE_NAME
|
||||
, <<"status">> := <<"connected">>
|
||||
, <<"node_status">> := [_|_]
|
||||
, <<"metrics">> := _
|
||||
, <<"node_metrics">> := [_|_]
|
||||
, <<"url">> := URL1
|
||||
} = jsx:decode(Bridge),
|
||||
#{
|
||||
<<"type">> := ?BRIDGE_TYPE,
|
||||
<<"name">> := ?BRIDGE_NAME,
|
||||
<<"status">> := <<"connected">>,
|
||||
<<"node_status">> := [_ | _],
|
||||
<<"metrics">> := _,
|
||||
<<"node_metrics">> := [_ | _],
|
||||
<<"url">> := URL1
|
||||
} = jsx:decode(Bridge),
|
||||
BridgeID = emqx_bridge:bridge_id(?BRIDGE_TYPE, ?BRIDGE_NAME),
|
||||
{ok, 200, <<"Reset success">>} = request(put, uri(["bridges", BridgeID, "reset_metrics"]), []),
|
||||
|
||||
|
|
|
|||
|
|
@ -941,26 +941,15 @@ until the RPC connection is considered lost."""
|
|||
log_file_handlers {
|
||||
desc {
|
||||
en: """Key-value list of file-based log handlers."""
|
||||
zh: """基于文件的日志处理进程的键值列表。"""
|
||||
zh: """需要持久化到文件的日志处理进程列表。默认只有 default 一个处理进程。"""
|
||||
}
|
||||
label {
|
||||
en: "Log Handlers Key Val List"
|
||||
zh: "日志处理进程键值列表"
|
||||
zh: "日志 Handler 列表"
|
||||
}
|
||||
}
|
||||
|
||||
log_error_logger {
|
||||
desc {
|
||||
en: """Deprecated."""
|
||||
zh: """该配置已弃用。"""
|
||||
}
|
||||
label {
|
||||
en: "Deprecate"
|
||||
zh: "配置已弃用"
|
||||
}
|
||||
}
|
||||
|
||||
console_handler_enable {
|
||||
common_handler_enable {
|
||||
desc {
|
||||
en: """Enable this log handler."""
|
||||
zh: """启用此日志处理进程。"""
|
||||
|
|
@ -971,21 +960,23 @@ until the RPC connection is considered lost."""
|
|||
}
|
||||
}
|
||||
|
||||
console_handler_level {
|
||||
common_handler_level {
|
||||
desc {
|
||||
en: """Global log level. This includes the primary log level and all log handlers."""
|
||||
zh: """全局日志级别。 这包括主日志级别和所有日志处理进程。"""
|
||||
zh: """设置日志级别。 默认为warning。"""
|
||||
}
|
||||
label {
|
||||
en: "Global Log Level"
|
||||
zh: "全局日志级别"
|
||||
zh: "日志级别"
|
||||
}
|
||||
}
|
||||
|
||||
console_handler_time_offset {
|
||||
common_handler_time_offset {
|
||||
desc {
|
||||
en: """The time offset to be used when formatting the timestamp."""
|
||||
zh: """格式化时间戳时,使用的时间偏移量。"""
|
||||
zh: """日志格式中的时间戳,使用的时间偏移量。默认使用系统时区system,当为utc为无时间偏移量
|
||||
为具体的N(1-24)数字时,则代表时间偏移量+N。
|
||||
"""
|
||||
}
|
||||
label {
|
||||
en: "Time Offset"
|
||||
|
|
@ -993,10 +984,10 @@ until the RPC connection is considered lost."""
|
|||
}
|
||||
}
|
||||
|
||||
console_handler_chars_limit {
|
||||
common_handler_chars_limit {
|
||||
desc {
|
||||
en: """Set the maximum length of a single log message. If this length is exceeded, the log message will be truncated."""
|
||||
zh: """设置单个日志消息的最大长度。 如果超过此长度,则日志消息将被截断。"""
|
||||
zh: """设置单个日志消息的最大长度。 如果超过此长度,则日志消息将被截断。最小可设置的长度为100。"""
|
||||
}
|
||||
label {
|
||||
en: "Single Log Max Length"
|
||||
|
|
@ -1004,10 +995,10 @@ until the RPC connection is considered lost."""
|
|||
}
|
||||
}
|
||||
|
||||
console_handler_formatter {
|
||||
common_handler_formatter {
|
||||
desc {
|
||||
en: """Choose log format. <code>text</code> for free text, and <code>json</code> for structured logging."""
|
||||
zh: """选择日志格式。 <code>text</code> 用于自由文本,<code>json</code> 用于结构化日志记录。"""
|
||||
zh: """选择日志格式。 <code>text</code> 用于纯文本,<code>json</code> 用于结构化日志记录。"""
|
||||
}
|
||||
label {
|
||||
en: "Log Format"
|
||||
|
|
@ -1015,10 +1006,10 @@ until the RPC connection is considered lost."""
|
|||
}
|
||||
}
|
||||
|
||||
console_handler_single_line {
|
||||
common_handler_single_line {
|
||||
desc {
|
||||
en: """Print logs in a single line if set to true. Otherwise, log messages may span multiple lines."""
|
||||
zh: """如果设置为 true,则在一行中打印日志。 否则,日志消息可能跨越多行。"""
|
||||
zh: """如果设置为 true,则单行打印日志。 否则,日志消息可能跨越多行。"""
|
||||
}
|
||||
label {
|
||||
en: "Single Line Mode"
|
||||
|
|
@ -1026,10 +1017,24 @@ until the RPC connection is considered lost."""
|
|||
}
|
||||
}
|
||||
|
||||
console_handler_sync_mode_qlen {
|
||||
common_handler_sync_mode_qlen {
|
||||
desc {
|
||||
en: """As long as the number of buffered log events is lower than this value, all log events are handled asynchronously."""
|
||||
zh: """只要缓冲的日志事件的数量低于这个值,所有的日志事件都会被异步处理。"""
|
||||
en: """As long as the number of buffered log events is lower than this value,
|
||||
all log events are handled asynchronously. This means that the client process sending the log event,
|
||||
by calling a log function in the Logger API, does not wait for a response from the handler
|
||||
but continues executing immediately after the event is sent.
|
||||
It is not affected by the time it takes the handler to print the event to the log device.
|
||||
If the message queue grows larger than this value,
|
||||
the handler starts handling log events synchronously instead,
|
||||
meaning that the client process sending the event must wait for a response.
|
||||
When the handler reduces the message queue to a level below the sync_mode_qlen threshold,
|
||||
asynchronous operation is resumed.
|
||||
"""
|
||||
zh: """只要缓冲的日志事件的数量低于这个值,所有的日志事件都会被异步处理。
|
||||
这意味着,日志落地速度不会影响正常的业务进程,因为它们不需要等待日志处理进程的响应。
|
||||
如果消息队列的增长超过了这个值,处理程序开始同步处理日志事件。也就是说,发送事件的客户进程必须等待响应。
|
||||
当处理程序将消息队列减少到低于sync_mode_qlen阈值的水平时,异步操作就会恢复。
|
||||
默认为100条信息,当等待的日志事件大于100条时,就开始同步处理日志。"""
|
||||
}
|
||||
label {
|
||||
en: "Sync Mode Max Log Events"
|
||||
|
|
@ -1037,10 +1042,17 @@ until the RPC connection is considered lost."""
|
|||
}
|
||||
}
|
||||
|
||||
console_handler_drop_mode_qlen {
|
||||
common_handler_drop_mode_qlen {
|
||||
desc {
|
||||
en: """When the number of buffered log events is larger than this value, the new log events are dropped.</br>When drop mode is activated or deactivated, a message is printed in the logs."""
|
||||
zh: """当缓冲的日志事件数大于此值时,新的日志事件将被丢弃。</br>启用或停用丢弃模式时,会在日志中打印一条消息。"""
|
||||
en: """When the number of buffered log events is larger than this value, the new log events are dropped.
|
||||
When drop mode is activated or deactivated, a message is printed in the logs."""
|
||||
zh: """当缓冲的日志事件数大于此值时,新的日志事件将被丢弃。起到过载保护的功能。
|
||||
为了使过载保护算法正常工作必须要:<code> sync_mode_qlen =< drop_mode_qlen =< flush_qlen <\code> 且 drop_mode_qlen > 1
|
||||
要禁用某些模式,请执行以下操作。
|
||||
- 如果sync_mode_qlen被设置为0,所有的日志事件都被同步处理。也就是说,异步日志被禁用。
|
||||
- 如果sync_mode_qlen被设置为与drop_mode_qlen相同的值,同步模式被禁用。也就是说,处理程序总是以异步模式运行,除非调用drop或flushing。
|
||||
- 如果drop_mode_qlen被设置为与flush_qlen相同的值,则drop模式被禁用,永远不会发生。
|
||||
"""
|
||||
}
|
||||
label {
|
||||
en: "Drop Mode Max Log Events"
|
||||
|
|
@ -1048,10 +1060,11 @@ until the RPC connection is considered lost."""
|
|||
}
|
||||
}
|
||||
|
||||
console_handler_flush_qlen {
|
||||
common_handler_flush_qlen {
|
||||
desc {
|
||||
en: """If the number of buffered log events grows larger than this threshold, a flush (delete) operation takes place. To flush events, the handler discards the buffered log messages without logging."""
|
||||
zh: """如果缓冲日志事件的数量增长大于此阈值,则会发生刷新(删除)操作。 为了完成刷新事件,处理进程丢弃缓冲的日志消息。"""
|
||||
zh: """如果缓冲日志事件的数量增长大于此阈值,则会发生刷新(删除)操作。 日志处理进程会丢弃缓冲的日志消息。
|
||||
来缓解自身不会由于内存瀑涨而影响其它业务进程。日志内容会提醒有多少事件被删除。"""
|
||||
}
|
||||
label {
|
||||
en: "Flush Threshold"
|
||||
|
|
@ -1059,14 +1072,14 @@ until the RPC connection is considered lost."""
|
|||
}
|
||||
}
|
||||
|
||||
console_handler_supervisor_reports {
|
||||
common_handler_supervisor_reports {
|
||||
desc {
|
||||
en: """Type of supervisor reports that are logged.
|
||||
- `error`: only log errors in the Erlang processes.
|
||||
- `progress`: log process startup."""
|
||||
zh: """ supervisor 报告的类型。
|
||||
zh: """ supervisor 报告的类型。默认为 error 类型。
|
||||
- `error`:仅记录 Erlang 进程中的错误。
|
||||
- `progress`:记录进程启动。"""
|
||||
- `progress`:除了 error 信息外,还需要记录进程启动的详细信息。"""
|
||||
}
|
||||
label {
|
||||
en: "Report Type"
|
||||
|
|
@ -1074,7 +1087,7 @@ until the RPC connection is considered lost."""
|
|||
}
|
||||
}
|
||||
|
||||
console_handler_max_depth {
|
||||
common_handler_max_depth {
|
||||
desc {
|
||||
en: """Maximum depth for Erlang term log formatting and Erlang process message queue inspection."""
|
||||
zh: """Erlang 内部格式日志格式化和 Erlang 进程消息队列检查的最大深度。"""
|
||||
|
|
@ -1088,7 +1101,7 @@ until the RPC connection is considered lost."""
|
|||
log_file_handler_file {
|
||||
desc {
|
||||
en: """Name the log file."""
|
||||
zh: """日志文件名字。"""
|
||||
zh: """日志文件路径及名字。"""
|
||||
}
|
||||
label {
|
||||
en: "Log File Name"
|
||||
|
|
@ -1099,7 +1112,9 @@ until the RPC connection is considered lost."""
|
|||
log_file_handler_max_size {
|
||||
desc {
|
||||
en: """This parameter controls log file rotation. The value `infinity` means the log file will grow indefinitely, otherwise the log file will be rotated once it reaches `max_size` in bytes."""
|
||||
zh: """此参数控制日志文件轮换。 `infinity` 意味着日志文件将无限增长,否则日志文件将在达到 `max_size`(以字节为单位)时进行轮换。"""
|
||||
zh: """此参数控制日志文件轮换。 `infinity` 意味着日志文件将无限增长,否则日志文件将在达到 `max_size`(以字节为单位)时进行轮换。
|
||||
与 rotation count配合使用。如果 counter 为 10,则是10个文件轮换。
|
||||
"""
|
||||
}
|
||||
label {
|
||||
en: "Rotation Size"
|
||||
|
|
@ -1107,128 +1122,14 @@ until the RPC connection is considered lost."""
|
|||
}
|
||||
}
|
||||
|
||||
log_file_handler_enable {
|
||||
log_error_logger {
|
||||
desc {
|
||||
en: """Enable this log handler."""
|
||||
zh: """启用此日志处理进程。"""
|
||||
en: """Keep error_logger silent."""
|
||||
zh: """让 error_logger 日志处理进程关闭,防止一条异常信息被记录多次。"""
|
||||
}
|
||||
label {
|
||||
en: "Enable Log Handler"
|
||||
zh: "启用此日志处理进程"
|
||||
}
|
||||
}
|
||||
|
||||
log_file_handler_level {
|
||||
desc {
|
||||
en: """Global log level. This includes the primary log level and all log handlers."""
|
||||
zh: """全局日志级别。 这包括主日志级别和所有日志处理进程。"""
|
||||
}
|
||||
label {
|
||||
en: "Global Level"
|
||||
zh: "全局日志级别"
|
||||
}
|
||||
}
|
||||
|
||||
log_file_handler_time_offset {
|
||||
desc {
|
||||
en: """The time offset to be used when formatting the timestamp."""
|
||||
zh: """格式化时间戳时要使用的时间偏移量。"""
|
||||
}
|
||||
label {
|
||||
en: "Time Offset"
|
||||
zh: "时间偏移"
|
||||
}
|
||||
}
|
||||
|
||||
log_file_handler_chars_limit {
|
||||
desc {
|
||||
en: """Set the maximum length of a single log message. If this length is exceeded, the log message will be truncated."""
|
||||
zh: """设置单个日志消息的最大长度。 如果超过此长度,则日志消息将被截断。"""
|
||||
}
|
||||
label {
|
||||
en: "Single Log Max Length"
|
||||
zh: "单个日志消息最大长度"
|
||||
}
|
||||
}
|
||||
|
||||
log_file_handler_formatter {
|
||||
desc {
|
||||
en: """Choose log format. <code>text</code> for free text, and <code>json</code> for structured logging."""
|
||||
zh: """选择日志格式。 <code>text</code> 用于自由文本,<code>json</code> 用于结构化日志记录。"""
|
||||
}
|
||||
label {
|
||||
en: "Log Format"
|
||||
zh: "日志格式"
|
||||
}
|
||||
}
|
||||
|
||||
log_file_handler_single_line {
|
||||
desc {
|
||||
en: """Print logs in a single line if set to true. Otherwise, log messages may span multiple lines."""
|
||||
zh: """如果设置为 true,则在一行中打印日志。 否则,日志消息可能跨越多行。"""
|
||||
}
|
||||
label {
|
||||
en: "Single Line Mode"
|
||||
zh: "单行模式"
|
||||
}
|
||||
}
|
||||
|
||||
log_file_handler_sync_mode_qlen {
|
||||
desc {
|
||||
en: """As long as the number of buffered log events is lower than this value, all log events are handled asynchronously."""
|
||||
zh: """只要缓冲的日志事件的数量低于这个值,所有的日志事件都会被异步处理。"""
|
||||
}
|
||||
label {
|
||||
en: "Sync Mode Max Log Events"
|
||||
zh: "异步模式最大事件数"
|
||||
}
|
||||
}
|
||||
|
||||
log_file_handler_drop_mode_qlen {
|
||||
desc {
|
||||
en: """When the number of buffered log events is larger than this value, the new log events are dropped.</br>When drop mode is activated or deactivated, a message is printed in the logs."""
|
||||
zh: """当缓冲的日志事件数大于此值时,新的日志事件将被丢弃。</br>启用或停用丢弃模式时,会在日志中打印一条消息。"""
|
||||
}
|
||||
label {
|
||||
en: "Drop Mode Max Log Events"
|
||||
zh: "缓存最大日志事件数"
|
||||
}
|
||||
}
|
||||
|
||||
log_file_handler_flush_qlen {
|
||||
desc {
|
||||
en: """If the number of buffered log events grows larger than this threshold, a flush (delete) operation takes place. To flush events, the handler discards the buffered log messages without logging."""
|
||||
zh: """如果缓冲日志事件的数量增长大于此阈值,则会发生刷新(删除)操作。 为了完成刷新事件,处理进程丢弃缓冲的日志消息。"""
|
||||
}
|
||||
label {
|
||||
en: "Flush Threshold"
|
||||
zh: "刷新阈值"
|
||||
}
|
||||
}
|
||||
|
||||
log_file_handler_supervisor_reports {
|
||||
desc {
|
||||
en: """Type of supervisor reports that are logged.
|
||||
- `error`: only log errors in the Erlang processes.
|
||||
- `progress`: log process startup."""
|
||||
zh: """ supervisor 报告的类型。
|
||||
- `error`:仅记录 Erlang 进程中的错误。
|
||||
- `progress`:记录进程启动。"""
|
||||
}
|
||||
label {
|
||||
en: "Report Type"
|
||||
zh: "报告类型"
|
||||
}
|
||||
}
|
||||
|
||||
log_file_handler_max_depth {
|
||||
desc {
|
||||
en: """Maximum depth for Erlang term log formatting and Erlang process message queue inspection."""
|
||||
zh: """Erlang 内部格式日志格式化和 Erlang 进程消息队列检查的最大深度。"""
|
||||
}
|
||||
label {
|
||||
en: "Max Depth"
|
||||
zh: "最大深度"
|
||||
en: "error_logger"
|
||||
zh: "error_logger"
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1257,11 +1158,11 @@ until the RPC connection is considered lost."""
|
|||
log_overload_kill_enable {
|
||||
desc {
|
||||
en: """Enable log handler overload kill feature."""
|
||||
zh: """启用日志处理进程过载终止功能。"""
|
||||
zh: """日志处理进程过载时为保护自己节点其它的业务能正常,强制杀死日志处理进程。"""
|
||||
}
|
||||
label {
|
||||
en: "Log Handler Overload Kill"
|
||||
zh: "日志处理进程过载终止"
|
||||
zh: "日志处理进程过载保护"
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1290,22 +1191,22 @@ until the RPC connection is considered lost."""
|
|||
log_overload_kill_restart_after {
|
||||
desc {
|
||||
en: """If the handler is terminated, it restarts automatically after a delay specified in milliseconds. The value `infinity` prevents restarts."""
|
||||
zh: """如果处理进程终止,它会在以毫秒为单位指定的延迟后自动重新启动。 `infinity` 防止重新启动。"""
|
||||
zh: """如果处理进程终止,它会在以指定的时间后后自动重新启动。 `infinity` 不自动重启。"""
|
||||
}
|
||||
label {
|
||||
en: "Handler Restart Delay"
|
||||
zh: "处理进程重启延迟"
|
||||
en: "Handler Restart Timer"
|
||||
zh: "处理进程重启机制"
|
||||
}
|
||||
}
|
||||
|
||||
log_burst_limit_enable {
|
||||
desc {
|
||||
en: """Enable log burst control feature."""
|
||||
zh: """启用日志突发控制功能。"""
|
||||
zh: """启用日志限流保护机制。"""
|
||||
}
|
||||
label {
|
||||
en: "Enable Burst"
|
||||
zh: "启用日志突发控制"
|
||||
zh: "日志限流保护"
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1509,10 +1410,10 @@ By default, the logs are stored in `./log` directory (for installation from zip
|
|||
This section of the configuration controls the number of files kept for each log handler.
|
||||
"""
|
||||
zh:
|
||||
"""
|
||||
默认情况下,日志存储在 `./log` 目录(用于从 zip 文件安装)或 `/var/log/emqx`(用于二进制安装)。</br>
|
||||
这部分配置,控制每个日志处理进程保留的文件数量。
|
||||
"""
|
||||
"""
|
||||
默认情况下,日志存储在 `./log` 目录(用于从 zip 文件安装)或 `/var/log/emqx`(用于二进制安装)。</br>
|
||||
这部分配置,控制每个日志处理进程保留的文件数量。
|
||||
"""
|
||||
}
|
||||
label {
|
||||
en: "Log Rotation"
|
||||
|
|
@ -1568,5 +1469,4 @@ Log burst limit feature can temporarily disable logging to avoid these issues.""
|
|||
zh: "授权"
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -42,6 +42,8 @@
|
|||
code_change/3
|
||||
]).
|
||||
|
||||
-export([get_tables_status/0]).
|
||||
|
||||
-export_type([txn_id/0, succeed_num/0, multicall_return/1, multicall_return/0]).
|
||||
|
||||
-ifdef(TEST).
|
||||
|
|
@ -172,6 +174,29 @@ get_node_tnx_id(Node) ->
|
|||
[#cluster_rpc_commit{tnx_id = TnxId}] -> TnxId
|
||||
end.
|
||||
|
||||
%% Checks whether the Mnesia tables used by this module are waiting to
|
||||
%% be loaded and from where.
|
||||
-spec get_tables_status() -> #{atom() => {waiting, [node()]} | {disc | network, node()}}.
|
||||
get_tables_status() ->
|
||||
maps:from_list([
|
||||
{Tab, do_get_tables_status(Tab)}
|
||||
|| Tab <- [?CLUSTER_COMMIT, ?CLUSTER_MFA]
|
||||
]).
|
||||
|
||||
do_get_tables_status(Tab) ->
|
||||
Props = mnesia:table_info(Tab, all),
|
||||
TabNodes = proplists:get_value(all_nodes, Props),
|
||||
KnownDown = mnesia_recover:get_mnesia_downs(),
|
||||
LocalNode = node(),
|
||||
case proplists:get_value(load_node, Props) of
|
||||
unknown ->
|
||||
{waiting, TabNodes -- [LocalNode | KnownDown]};
|
||||
LocalNode ->
|
||||
{disc, LocalNode};
|
||||
Node ->
|
||||
{network, Node}
|
||||
end.
|
||||
|
||||
%% Regardless of what MFA is returned, consider it a success),
|
||||
%% then move to the next tnxId.
|
||||
%% if the next TnxId failed, need call the function again to skip.
|
||||
|
|
|
|||
|
|
@ -24,6 +24,8 @@
|
|||
-include_lib("emqx/include/logger.hrl").
|
||||
-include("emqx_conf.hrl").
|
||||
|
||||
-define(DEFAULT_INIT_TXN_ID, -1).
|
||||
|
||||
start(_StartType, _StartArgs) ->
|
||||
init_conf(),
|
||||
emqx_conf_sup:start_link().
|
||||
|
|
@ -31,19 +33,48 @@ start(_StartType, _StartArgs) ->
|
|||
stop(_State) ->
|
||||
ok.
|
||||
|
||||
%% internal functions
|
||||
get_override_config_file() ->
|
||||
Node = node(),
|
||||
case emqx_app:get_init_config_load_done() of
|
||||
false ->
|
||||
{error, #{node => Node, msg => "init_conf_load_not_done"}};
|
||||
true ->
|
||||
case erlang:whereis(emqx_config_handler) of
|
||||
undefined ->
|
||||
{error, #{node => Node, msg => "emqx_config_handler_not_ready"}};
|
||||
_ ->
|
||||
Fun = fun() ->
|
||||
TnxId = emqx_cluster_rpc:get_node_tnx_id(Node),
|
||||
WallClock = erlang:statistics(wall_clock),
|
||||
Conf = emqx_config_handler:get_raw_cluster_override_conf(),
|
||||
#{wall_clock => WallClock, conf => Conf, tnx_id => TnxId, node => Node}
|
||||
end,
|
||||
case mria:ro_transaction(?CLUSTER_RPC_SHARD, Fun) of
|
||||
{atomic, Res} -> {ok, Res};
|
||||
{aborted, Reason} -> {error, #{node => Node, msg => Reason}}
|
||||
end
|
||||
end
|
||||
end.
|
||||
|
||||
%% ------------------------------------------------------------------------------
|
||||
%% Internal functions
|
||||
%% ------------------------------------------------------------------------------
|
||||
|
||||
init_conf() ->
|
||||
{ok, TnxId} = copy_override_conf_from_core_node(),
|
||||
emqx_app:set_init_tnx_id(TnxId),
|
||||
emqx_config:init_load(emqx_conf:schema_module()),
|
||||
emqx_app:set_init_config_load_done().
|
||||
|
||||
cluster_nodes() ->
|
||||
maps:get(running_nodes, ekka_cluster:info()) -- [node()].
|
||||
|
||||
copy_override_conf_from_core_node() ->
|
||||
case mria_mnesia:running_nodes() -- [node()] of
|
||||
case cluster_nodes() of
|
||||
%% The first core nodes is self.
|
||||
[] ->
|
||||
?SLOG(debug, #{msg => "skip_copy_overide_conf_from_core_node"}),
|
||||
{ok, -1};
|
||||
{ok, ?DEFAULT_INIT_TXN_ID};
|
||||
Nodes ->
|
||||
{Results, Failed} = emqx_conf_proto_v1:get_override_config_file(Nodes),
|
||||
{Ready, NotReady0} = lists:partition(fun(Res) -> element(1, Res) =:= ok end, Results),
|
||||
|
|
@ -64,12 +95,39 @@ copy_override_conf_from_core_node() ->
|
|||
[] ->
|
||||
%% Other core nodes running but no one replicated it successfully.
|
||||
?SLOG(error, #{
|
||||
msg => "copy_overide_conf_from_core_node_failed",
|
||||
msg => "copy_override_conf_from_core_node_failed",
|
||||
nodes => Nodes,
|
||||
failed => Failed,
|
||||
not_ready => NotReady
|
||||
}),
|
||||
{error, "core node not ready"};
|
||||
|
||||
case should_proceed_with_boot() of
|
||||
true ->
|
||||
%% Act as if this node is alone, so it can
|
||||
%% finish the boot sequence and load the
|
||||
%% config for other nodes to copy it.
|
||||
?SLOG(info, #{
|
||||
msg => "skip_copy_overide_conf_from_core_node",
|
||||
loading_from_disk => true,
|
||||
nodes => Nodes,
|
||||
failed => Failed,
|
||||
not_ready => NotReady
|
||||
}),
|
||||
{ok, ?DEFAULT_INIT_TXN_ID};
|
||||
false ->
|
||||
%% retry in some time
|
||||
Jitter = rand:uniform(2_000),
|
||||
Timeout = 10_000 + Jitter,
|
||||
?SLOG(info, #{
|
||||
msg => "copy_overide_conf_from_core_node_retry",
|
||||
timeout => Timeout,
|
||||
nodes => Nodes,
|
||||
failed => Failed,
|
||||
not_ready => NotReady
|
||||
}),
|
||||
timer:sleep(Timeout),
|
||||
copy_override_conf_from_core_node()
|
||||
end;
|
||||
_ ->
|
||||
SortFun = fun(
|
||||
{ok, #{wall_clock := W1}},
|
||||
|
|
@ -79,7 +137,10 @@ copy_override_conf_from_core_node() ->
|
|||
end,
|
||||
[{ok, Info} | _] = lists:sort(SortFun, Ready),
|
||||
#{node := Node, conf := RawOverrideConf, tnx_id := TnxId} = Info,
|
||||
Msg = #{msg => "copy_overide_conf_from_core_node_success", node => Node},
|
||||
Msg = #{
|
||||
msg => "copy_overide_conf_from_core_node_success",
|
||||
node => Node
|
||||
},
|
||||
?SLOG(debug, Msg),
|
||||
ok = emqx_config:save_to_override_conf(
|
||||
RawOverrideConf,
|
||||
|
|
@ -89,28 +150,16 @@ copy_override_conf_from_core_node() ->
|
|||
end
|
||||
end.
|
||||
|
||||
get_override_config_file() ->
|
||||
Node = node(),
|
||||
Role = mria_rlog:role(),
|
||||
case emqx_app:get_init_config_load_done() of
|
||||
false ->
|
||||
{error, #{node => Node, msg => "init_conf_load_not_done"}};
|
||||
true when Role =:= core ->
|
||||
case erlang:whereis(emqx_config_handler) of
|
||||
undefined ->
|
||||
{error, #{node => Node, msg => "emqx_config_handler_not_ready"}};
|
||||
_ ->
|
||||
Fun = fun() ->
|
||||
TnxId = emqx_cluster_rpc:get_node_tnx_id(Node),
|
||||
WallClock = erlang:statistics(wall_clock),
|
||||
Conf = emqx_config_handler:get_raw_cluster_override_conf(),
|
||||
#{wall_clock => WallClock, conf => Conf, tnx_id => TnxId, node => Node}
|
||||
end,
|
||||
case mria:ro_transaction(?CLUSTER_RPC_SHARD, Fun) of
|
||||
{atomic, Res} -> {ok, Res};
|
||||
{aborted, Reason} -> {error, #{node => Node, msg => Reason}}
|
||||
end
|
||||
end;
|
||||
true when Role =:= replicant ->
|
||||
{ignore, #{node => Node}}
|
||||
should_proceed_with_boot() ->
|
||||
TablesStatus = emqx_cluster_rpc:get_tables_status(),
|
||||
LocalNode = node(),
|
||||
case maps:get(?CLUSTER_COMMIT, TablesStatus) of
|
||||
{disc, LocalNode} ->
|
||||
%% Loading locally; let this node finish its boot sequence
|
||||
%% so others can copy the config from this one.
|
||||
true;
|
||||
_ ->
|
||||
%% Loading from another node or still waiting for nodes to
|
||||
%% be up. Try again.
|
||||
false
|
||||
end.
|
||||
|
|
|
|||
|
|
@ -800,6 +800,7 @@ fields("log") ->
|
|||
#{
|
||||
mapping => "kernel.error_logger",
|
||||
default => silent,
|
||||
'readOnly' => true,
|
||||
desc => ?DESC("log_error_logger")
|
||||
}
|
||||
)}
|
||||
|
|
@ -811,7 +812,10 @@ fields("log_file_handler") ->
|
|||
{"file",
|
||||
sc(
|
||||
file(),
|
||||
#{desc => ?DESC("log_file_handler_file")}
|
||||
#{
|
||||
desc => ?DESC("log_file_handler_file"),
|
||||
validator => fun file_location/1
|
||||
}
|
||||
)},
|
||||
{"rotation",
|
||||
sc(
|
||||
|
|
@ -822,7 +826,7 @@ fields("log_file_handler") ->
|
|||
sc(
|
||||
hoconsc:union([infinity, emqx_schema:bytesize()]),
|
||||
#{
|
||||
default => "10MB",
|
||||
default => "50MB",
|
||||
desc => ?DESC("log_file_handler_max_size")
|
||||
}
|
||||
)}
|
||||
|
|
@ -866,7 +870,7 @@ fields("log_overload_kill") ->
|
|||
)},
|
||||
{"qlen",
|
||||
sc(
|
||||
integer(),
|
||||
pos_integer(),
|
||||
#{
|
||||
default => 20000,
|
||||
desc => ?DESC("log_overload_kill_qlen")
|
||||
|
|
@ -874,7 +878,7 @@ fields("log_overload_kill") ->
|
|||
)},
|
||||
{"restart_after",
|
||||
sc(
|
||||
hoconsc:union([emqx_schema:duration(), infinity]),
|
||||
hoconsc:union([emqx_schema:duration_ms(), infinity]),
|
||||
#{
|
||||
default => "5s",
|
||||
desc => ?DESC("log_overload_kill_restart_after")
|
||||
|
|
@ -893,7 +897,7 @@ fields("log_burst_limit") ->
|
|||
)},
|
||||
{"max_count",
|
||||
sc(
|
||||
integer(),
|
||||
pos_integer(),
|
||||
#{
|
||||
default => 10000,
|
||||
desc => ?DESC("log_burst_limit_max_count")
|
||||
|
|
@ -1073,7 +1077,7 @@ log_handler_common_confs() ->
|
|||
boolean(),
|
||||
#{
|
||||
default => false,
|
||||
desc => ?DESC("log_file_handler_enable")
|
||||
desc => ?DESC("common_handler_enable")
|
||||
}
|
||||
)},
|
||||
{"level",
|
||||
|
|
@ -1081,7 +1085,7 @@ log_handler_common_confs() ->
|
|||
log_level(),
|
||||
#{
|
||||
default => warning,
|
||||
desc => ?DESC("log_file_handler_level")
|
||||
desc => ?DESC("common_handler_level")
|
||||
}
|
||||
)},
|
||||
{"time_offset",
|
||||
|
|
@ -1089,15 +1093,15 @@ log_handler_common_confs() ->
|
|||
string(),
|
||||
#{
|
||||
default => "system",
|
||||
desc => ?DESC("log_file_handler_time_offset")
|
||||
desc => ?DESC("common_handler_time_offset")
|
||||
}
|
||||
)},
|
||||
{"chars_limit",
|
||||
sc(
|
||||
hoconsc:union([unlimited, range(1, inf)]),
|
||||
hoconsc:union([unlimited, range(100, inf)]),
|
||||
#{
|
||||
default => unlimited,
|
||||
desc => ?DESC("log_file_handler_chars_limit")
|
||||
desc => ?DESC("common_handler_chars_limit")
|
||||
}
|
||||
)},
|
||||
{"formatter",
|
||||
|
|
@ -1105,7 +1109,7 @@ log_handler_common_confs() ->
|
|||
hoconsc:enum([text, json]),
|
||||
#{
|
||||
default => text,
|
||||
desc => ?DESC("log_file_handler_formatter")
|
||||
desc => ?DESC("common_handler_formatter")
|
||||
}
|
||||
)},
|
||||
{"single_line",
|
||||
|
|
@ -1113,31 +1117,31 @@ log_handler_common_confs() ->
|
|||
boolean(),
|
||||
#{
|
||||
default => true,
|
||||
desc => ?DESC("log_file_handler_single_line")
|
||||
desc => ?DESC("common_handler_single_line")
|
||||
}
|
||||
)},
|
||||
{"sync_mode_qlen",
|
||||
sc(
|
||||
integer(),
|
||||
non_neg_integer(),
|
||||
#{
|
||||
default => 100,
|
||||
desc => ?DESC("log_file_handler_sync_mode_qlen")
|
||||
desc => ?DESC("common_handler_sync_mode_qlen")
|
||||
}
|
||||
)},
|
||||
{"drop_mode_qlen",
|
||||
sc(
|
||||
integer(),
|
||||
pos_integer(),
|
||||
#{
|
||||
default => 3000,
|
||||
desc => ?DESC("log_file_handler_drop_mode_qlen")
|
||||
desc => ?DESC("common_handler_drop_mode_qlen")
|
||||
}
|
||||
)},
|
||||
{"flush_qlen",
|
||||
sc(
|
||||
integer(),
|
||||
pos_integer(),
|
||||
#{
|
||||
default => 8000,
|
||||
desc => ?DESC("log_file_handler_flush_qlen")
|
||||
desc => ?DESC("common_handler_flush_qlen")
|
||||
}
|
||||
)},
|
||||
{"overload_kill", sc(ref("log_overload_kill"), #{})},
|
||||
|
|
@ -1147,7 +1151,7 @@ log_handler_common_confs() ->
|
|||
hoconsc:enum([error, progress]),
|
||||
#{
|
||||
default => error,
|
||||
desc => ?DESC("log_file_handler_supervisor_reports")
|
||||
desc => ?DESC("common_handler_supervisor_reports")
|
||||
}
|
||||
)},
|
||||
{"max_depth",
|
||||
|
|
@ -1155,7 +1159,7 @@ log_handler_common_confs() ->
|
|||
hoconsc:union([unlimited, non_neg_integer()]),
|
||||
#{
|
||||
default => 100,
|
||||
desc => ?DESC("log_file_handler_max_depth")
|
||||
desc => ?DESC("common_handler_max_depth")
|
||||
}
|
||||
)}
|
||||
].
|
||||
|
|
@ -1328,3 +1332,15 @@ emqx_schema_high_prio_roots() ->
|
|||
#{desc => ?DESC(authorization)}
|
||||
)},
|
||||
lists:keyreplace("authorization", 1, Roots, Authz).
|
||||
|
||||
-define(VALID_FILE, "^[/\_a-zA-Z0-9\.\-]*$").
|
||||
file_location(File) ->
|
||||
Error = {error, "Invalid file name: " ++ ?VALID_FILE},
|
||||
try
|
||||
case re:run(File, ?VALID_FILE) of
|
||||
nomatch -> Error;
|
||||
_ -> ok
|
||||
end
|
||||
catch
|
||||
_:_ -> Error
|
||||
end.
|
||||
|
|
|
|||
|
|
@ -0,0 +1,222 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
%% Licensed under the Apache License, Version 2.0 (the "License");
|
||||
%% you may not use this file except in compliance with the License.
|
||||
%% You may obtain a copy of the License at
|
||||
%%
|
||||
%% http://www.apache.org/licenses/LICENSE-2.0
|
||||
%%
|
||||
%% Unless required by applicable law or agreed to in writing, software
|
||||
%% distributed under the License is distributed on an "AS IS" BASIS,
|
||||
%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
%% See the License for the specific language governing permissions and
|
||||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_conf_app_SUITE).
|
||||
|
||||
-compile(export_all).
|
||||
-compile(nowarn_export_all).
|
||||
|
||||
-include("emqx_conf.hrl").
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
-include_lib("common_test/include/ct.hrl").
|
||||
-include_lib("snabbkaffe/include/snabbkaffe.hrl").
|
||||
|
||||
all() ->
|
||||
emqx_common_test_helpers:all(?MODULE).
|
||||
|
||||
t_copy_conf_override_on_restarts(_Config) ->
|
||||
ct:timetrap({seconds, 120}),
|
||||
snabbkaffe:fix_ct_logging(),
|
||||
Cluster = cluster([core, core, core]),
|
||||
try
|
||||
%% 1. Start all nodes
|
||||
Nodes = start_cluster(Cluster),
|
||||
[join_cluster(Spec) || Spec <- Cluster],
|
||||
assert_config_load_done(Nodes),
|
||||
|
||||
%% 2. Stop each in order.
|
||||
lists:foreach(fun stop_slave/1, Nodes),
|
||||
|
||||
%% 3. Restart nodes in the same order. This should not
|
||||
%% crash and eventually all nodes should be ready.
|
||||
start_cluster_async(Cluster),
|
||||
|
||||
timer:sleep(15_000),
|
||||
|
||||
assert_config_load_done(Nodes),
|
||||
|
||||
ok
|
||||
after
|
||||
teardown_cluster(Cluster)
|
||||
end.
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% Helper functions
|
||||
%%------------------------------------------------------------------------------
|
||||
|
||||
assert_config_load_done(Nodes) ->
|
||||
lists:foreach(
|
||||
fun(Node) ->
|
||||
Done = rpc:call(Node, emqx_app, get_init_config_load_done, []),
|
||||
?assert(Done, #{node => Node})
|
||||
end,
|
||||
Nodes
|
||||
).
|
||||
|
||||
start_cluster(Specs) ->
|
||||
[start_slave(I) || I <- Specs].
|
||||
|
||||
start_cluster_async(Specs) ->
|
||||
[
|
||||
begin
|
||||
spawn_link(fun() -> start_slave(I) end),
|
||||
timer:sleep(7_000)
|
||||
end
|
||||
|| I <- Specs
|
||||
].
|
||||
|
||||
cluster(Specs) ->
|
||||
cluster(Specs, []).
|
||||
|
||||
cluster(Specs0, CommonEnv) ->
|
||||
Specs1 = lists:zip(Specs0, lists:seq(1, length(Specs0))),
|
||||
Specs = expand_node_specs(Specs1, CommonEnv),
|
||||
CoreNodes = [node_id(Name) || {{core, Name, _}, _} <- Specs],
|
||||
%% Assign grpc ports:
|
||||
BaseGenRpcPort = 9000,
|
||||
GenRpcPorts = maps:from_list([
|
||||
{node_id(Name), {tcp, BaseGenRpcPort + Num}}
|
||||
|| {{_, Name, _}, Num} <- Specs
|
||||
]),
|
||||
%% Set the default node of the cluster:
|
||||
JoinTo =
|
||||
case CoreNodes of
|
||||
[First | _] -> #{join_to => First};
|
||||
_ -> #{}
|
||||
end,
|
||||
[
|
||||
JoinTo#{
|
||||
name => Name,
|
||||
node => node_id(Name),
|
||||
env => [
|
||||
{mria, core_nodes, CoreNodes},
|
||||
{mria, node_role, Role},
|
||||
{gen_rpc, tcp_server_port, BaseGenRpcPort + Number},
|
||||
{gen_rpc, client_config_per_node, {internal, GenRpcPorts}}
|
||||
| Env
|
||||
],
|
||||
number => Number,
|
||||
role => Role
|
||||
}
|
||||
|| {{Role, Name, Env}, Number} <- Specs
|
||||
].
|
||||
|
||||
start_apps(Node) ->
|
||||
Handler = fun
|
||||
(emqx) ->
|
||||
application:set_env(emqx, boot_modules, []),
|
||||
ok;
|
||||
(_) ->
|
||||
ok
|
||||
end,
|
||||
{Node, ok} =
|
||||
{Node, rpc:call(Node, emqx_common_test_helpers, start_apps, [[emqx_conf], Handler])},
|
||||
ok.
|
||||
|
||||
stop_apps(Node) ->
|
||||
ok = rpc:call(Node, emqx_common_test_helpers, stop_apps, [[emqx_conf]]).
|
||||
|
||||
join_cluster(#{node := Node, join_to := JoinTo}) ->
|
||||
case rpc:call(Node, ekka, join, [JoinTo]) of
|
||||
ok -> ok;
|
||||
ignore -> ok;
|
||||
Err -> error({failed_to_join_cluster, #{node => Node, error => Err}})
|
||||
end.
|
||||
|
||||
start_slave(#{node := Node, env := Env}) ->
|
||||
%% We want VMs to only occupy a single core
|
||||
CommonBeamOpts =
|
||||
"+S 1:1 " ++
|
||||
%% redirect logs to the master test node
|
||||
" -master " ++ atom_to_list(node()) ++ " ",
|
||||
%% We use `ct_slave' instead of `slave' because, in
|
||||
%% `t_copy_conf_override_on_restarts', the nodes might be stuck
|
||||
%% some time during boot up, and `slave' has a hard-coded boot
|
||||
%% timeout.
|
||||
{ok, Node} = ct_slave:start(
|
||||
Node,
|
||||
[
|
||||
{erl_flags, CommonBeamOpts ++ ebin_path()},
|
||||
{kill_if_fail, true},
|
||||
{monitor_master, true},
|
||||
{init_timeout, 30_000},
|
||||
{startup_timeout, 30_000}
|
||||
]
|
||||
),
|
||||
|
||||
%% Load apps before setting the enviroment variables to avoid
|
||||
%% overriding the environment during app start:
|
||||
[rpc:call(Node, application, load, [App]) || App <- [gen_rpc]],
|
||||
%% Disable gen_rpc listener by default:
|
||||
Env1 = [{gen_rpc, tcp_server_port, false} | Env],
|
||||
setenv(Node, Env1),
|
||||
ok = start_apps(Node),
|
||||
Node.
|
||||
|
||||
expand_node_specs(Specs, CommonEnv) ->
|
||||
lists:map(
|
||||
fun({Spec, Num}) ->
|
||||
{
|
||||
case Spec of
|
||||
core ->
|
||||
{core, gen_node_name(Num), CommonEnv};
|
||||
replicant ->
|
||||
{replicant, gen_node_name(Num), CommonEnv};
|
||||
{Role, Name} when is_atom(Name) ->
|
||||
{Role, Name, CommonEnv};
|
||||
{Role, Env} when is_list(Env) ->
|
||||
{Role, gen_node_name(Num), CommonEnv ++ Env};
|
||||
{Role, Name, Env} ->
|
||||
{Role, Name, CommonEnv ++ Env}
|
||||
end,
|
||||
Num
|
||||
}
|
||||
end,
|
||||
Specs
|
||||
).
|
||||
|
||||
setenv(Node, Env) ->
|
||||
[rpc:call(Node, application, set_env, [App, Key, Val]) || {App, Key, Val} <- Env].
|
||||
|
||||
teardown_cluster(Specs) ->
|
||||
Nodes = [I || #{node := I} <- Specs],
|
||||
[rpc:call(I, emqx_common_test_helpers, stop_apps, [emqx_conf]) || I <- Nodes],
|
||||
[stop_slave(I) || I <- Nodes],
|
||||
ok.
|
||||
|
||||
stop_slave(Node) ->
|
||||
ct_slave:stop(Node).
|
||||
|
||||
host() ->
|
||||
[_, Host] = string:tokens(atom_to_list(node()), "@"),
|
||||
Host.
|
||||
|
||||
node_id(Name) ->
|
||||
list_to_atom(lists:concat([Name, "@", host()])).
|
||||
|
||||
gen_node_name(N) ->
|
||||
list_to_atom("n" ++ integer_to_list(N)).
|
||||
|
||||
ebin_path() ->
|
||||
string:join(["-pa" | paths()], " ").
|
||||
|
||||
paths() ->
|
||||
[
|
||||
Path
|
||||
|| Path <- code:get_path(),
|
||||
string:prefix(Path, code:lib_dir()) =:= nomatch,
|
||||
string:str(Path, "_build/default/plugins") =:= 0
|
||||
].
|
||||
|
|
@ -24,13 +24,16 @@
|
|||
-define(REDIS_DEFAULT_PORT, 6379).
|
||||
-define(PGSQL_DEFAULT_PORT, 5432).
|
||||
|
||||
-define(SERVERS_DESC, "A Node list for Cluster to connect to. The nodes should be separated with commas, such as: `Node[,Node].`
|
||||
For each Node should be: ").
|
||||
-define(SERVERS_DESC,
|
||||
"A Node list for Cluster to connect to. The nodes should be separated with commas, such as: `Node[,Node].`\n"
|
||||
"For each Node should be: "
|
||||
).
|
||||
|
||||
-define(SERVER_DESC(TYPE, DEFAULT_PORT), "
|
||||
The IPv4 or IPv6 address or the hostname to connect to.</br>
|
||||
A host entry has the following form: `Host[:Port]`.</br>
|
||||
The " ++ TYPE ++ " default port " ++ DEFAULT_PORT ++ " is used if `[:Port]` is not specified."
|
||||
-define(SERVER_DESC(TYPE, DEFAULT_PORT),
|
||||
"\n"
|
||||
"The IPv4 or IPv6 address or the hostname to connect to.</br>\n"
|
||||
"A host entry has the following form: `Host[:Port]`.</br>\n"
|
||||
"The " ++ TYPE ++ " default port " ++ DEFAULT_PORT ++ " is used if `[:Port]` is not specified."
|
||||
).
|
||||
|
||||
-define(THROW_ERROR(Str), erlang:throw({error, Str})).
|
||||
|
|
|
|||
|
|
@ -1,30 +1,32 @@
|
|||
%% -*- mode: erlang -*-
|
||||
|
||||
{erl_opts, [
|
||||
nowarn_unused_import,
|
||||
debug_info
|
||||
nowarn_unused_import,
|
||||
debug_info
|
||||
]}.
|
||||
|
||||
{deps, [
|
||||
{emqx, {path, "../emqx"}},
|
||||
{eldap2, {git, "https://github.com/emqx/eldap2", {tag, "v0.2.2"}}},
|
||||
{mysql, {git, "https://github.com/emqx/mysql-otp", {tag, "1.7.1"}}},
|
||||
{epgsql, {git, "https://github.com/emqx/epgsql", {tag, "4.7-emqx.2"}}},
|
||||
%% NOTE: mind poolboy version when updating mongodb-erlang version
|
||||
{mongodb, {git,"https://github.com/emqx/mongodb-erlang", {tag, "v3.0.13"}}},
|
||||
%% NOTE: mind poolboy version when updating eredis_cluster version
|
||||
{eredis_cluster, {git, "https://github.com/emqx/eredis_cluster", {tag, "0.7.1"}}},
|
||||
%% mongodb-erlang uses a special fork https://github.com/comtihon/poolboy.git
|
||||
%% (which has overflow_ttl feature added).
|
||||
%% However, it references `{branch, "master}` (commit 9c06a9a on 2021-04-07).
|
||||
%% By accident, We have always been using the upstream fork due to
|
||||
%% eredis_cluster's dependency getting resolved earlier.
|
||||
%% Here we pin 1.5.2 to avoid surprises in the future.
|
||||
{poolboy, {git, "https://github.com/emqx/poolboy.git", {tag, "1.5.2"}}},
|
||||
{emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.5.0"}}}
|
||||
{emqx, {path, "../emqx"}},
|
||||
{eldap2, {git, "https://github.com/emqx/eldap2", {tag, "v0.2.2"}}},
|
||||
{mysql, {git, "https://github.com/emqx/mysql-otp", {tag, "1.7.1"}}},
|
||||
{epgsql, {git, "https://github.com/emqx/epgsql", {tag, "4.7-emqx.2"}}},
|
||||
%% NOTE: mind poolboy version when updating mongodb-erlang version
|
||||
{mongodb, {git, "https://github.com/emqx/mongodb-erlang", {tag, "v3.0.13"}}},
|
||||
%% NOTE: mind poolboy version when updating eredis_cluster version
|
||||
{eredis_cluster, {git, "https://github.com/emqx/eredis_cluster", {tag, "0.7.1"}}},
|
||||
%% mongodb-erlang uses a special fork https://github.com/comtihon/poolboy.git
|
||||
%% (which has overflow_ttl feature added).
|
||||
%% However, it references `{branch, "master}` (commit 9c06a9a on 2021-04-07).
|
||||
%% By accident, We have always been using the upstream fork due to
|
||||
%% eredis_cluster's dependency getting resolved earlier.
|
||||
%% Here we pin 1.5.2 to avoid surprises in the future.
|
||||
{poolboy, {git, "https://github.com/emqx/poolboy.git", {tag, "1.5.2"}}},
|
||||
{emqtt, {git, "https://github.com/emqx/emqtt", {tag, "1.5.0"}}}
|
||||
]}.
|
||||
|
||||
{shell, [
|
||||
% {config, "config/sys.config"},
|
||||
% {config, "config/sys.config"},
|
||||
{apps, [emqx_connector]}
|
||||
]}.
|
||||
|
||||
{project_plugins, [erlfmt]}.
|
||||
|
|
|
|||
|
|
@ -1,27 +1,27 @@
|
|||
%% -*- mode: erlang -*-
|
||||
{application, emqx_connector,
|
||||
[{description, "An OTP application"},
|
||||
{vsn, "0.1.1"},
|
||||
{registered, []},
|
||||
{mod, {emqx_connector_app, []}},
|
||||
{applications,
|
||||
[kernel,
|
||||
stdlib,
|
||||
ecpool,
|
||||
emqx_resource,
|
||||
eredis_cluster,
|
||||
eredis,
|
||||
epgsql,
|
||||
eldap2,
|
||||
mysql,
|
||||
mongodb,
|
||||
ehttpc,
|
||||
emqx,
|
||||
emqtt
|
||||
]},
|
||||
{env,[]},
|
||||
{modules, []},
|
||||
{application, emqx_connector, [
|
||||
{description, "An OTP application"},
|
||||
{vsn, "0.1.1"},
|
||||
{registered, []},
|
||||
{mod, {emqx_connector_app, []}},
|
||||
{applications, [
|
||||
kernel,
|
||||
stdlib,
|
||||
ecpool,
|
||||
emqx_resource,
|
||||
eredis_cluster,
|
||||
eredis,
|
||||
epgsql,
|
||||
eldap2,
|
||||
mysql,
|
||||
mongodb,
|
||||
ehttpc,
|
||||
emqx,
|
||||
emqtt
|
||||
]},
|
||||
{env, []},
|
||||
{modules, []},
|
||||
|
||||
{licenses, ["Apache 2.0"]},
|
||||
{links, []}
|
||||
]}.
|
||||
{licenses, ["Apache 2.0"]},
|
||||
{links, []}
|
||||
]}.
|
||||
|
|
|
|||
|
|
@ -15,47 +15,68 @@
|
|||
%%--------------------------------------------------------------------
|
||||
-module(emqx_connector).
|
||||
|
||||
-export([config_key_path/0]).
|
||||
-export([
|
||||
config_key_path/0,
|
||||
pre_config_update/3,
|
||||
post_config_update/5
|
||||
]).
|
||||
|
||||
-export([ parse_connector_id/1
|
||||
, connector_id/2
|
||||
]).
|
||||
-export([
|
||||
parse_connector_id/1,
|
||||
connector_id/2
|
||||
]).
|
||||
|
||||
-export([ list_raw/0
|
||||
, lookup_raw/1
|
||||
, lookup_raw/2
|
||||
, create_dry_run/2
|
||||
, update/2
|
||||
, update/3
|
||||
, delete/1
|
||||
, delete/2
|
||||
]).
|
||||
|
||||
-export([ post_config_update/5
|
||||
]).
|
||||
-export([
|
||||
list_raw/0,
|
||||
lookup_raw/1,
|
||||
lookup_raw/2,
|
||||
create_dry_run/2,
|
||||
update/2,
|
||||
update/3,
|
||||
delete/1,
|
||||
delete/2
|
||||
]).
|
||||
|
||||
config_key_path() ->
|
||||
[connectors].
|
||||
|
||||
pre_config_update(Path, Conf, _OldConfig) when is_map(Conf) ->
|
||||
case emqx_connector_ssl:convert_certs(filename:join(Path), Conf) of
|
||||
{error, Reason} ->
|
||||
{error, Reason};
|
||||
{ok, ConfNew} ->
|
||||
{ok, ConfNew}
|
||||
end.
|
||||
|
||||
-dialyzer([{nowarn_function, [post_config_update/5]}, error_handling]).
|
||||
post_config_update([connectors, Type, Name], '$remove', _, _OldConf, _AppEnvs) ->
|
||||
post_config_update([connectors, Type, Name] = Path, '$remove', _, OldConf, _AppEnvs) ->
|
||||
ConnId = connector_id(Type, Name),
|
||||
try foreach_linked_bridges(ConnId, fun(#{type := BType, name := BName}) ->
|
||||
try
|
||||
foreach_linked_bridges(ConnId, fun(#{type := BType, name := BName}) ->
|
||||
throw({dependency_bridges_exist, emqx_bridge:bridge_id(BType, BName)})
|
||||
end)
|
||||
catch throw:Error -> {error, Error}
|
||||
end),
|
||||
_ = emqx_connector_ssl:clear_certs(filename:join(Path), OldConf)
|
||||
catch
|
||||
throw:Error -> {error, Error}
|
||||
end;
|
||||
post_config_update([connectors, Type, Name], _Req, NewConf, OldConf, _AppEnvs) ->
|
||||
ConnId = connector_id(Type, Name),
|
||||
foreach_linked_bridges(ConnId,
|
||||
foreach_linked_bridges(
|
||||
ConnId,
|
||||
fun(#{type := BType, name := BName}) ->
|
||||
BridgeConf = emqx:get_config([bridges, BType, BName]),
|
||||
case emqx_bridge:update(BType, BName, {BridgeConf#{connector => OldConf},
|
||||
BridgeConf#{connector => NewConf}}) of
|
||||
case
|
||||
emqx_bridge:update(
|
||||
BType,
|
||||
BName,
|
||||
{BridgeConf#{connector => OldConf}, BridgeConf#{connector => NewConf}}
|
||||
)
|
||||
of
|
||||
ok -> ok;
|
||||
{error, Reason} -> error({update_bridge_error, Reason})
|
||||
end
|
||||
end).
|
||||
end
|
||||
).
|
||||
|
||||
connector_id(Type0, Name0) ->
|
||||
Type = bin(Type0),
|
||||
|
|
@ -70,13 +91,22 @@ parse_connector_id(ConnectorId) ->
|
|||
|
||||
list_raw() ->
|
||||
case get_raw_connector_conf() of
|
||||
not_found -> [];
|
||||
not_found ->
|
||||
[];
|
||||
Config ->
|
||||
lists:foldl(fun({Type, NameAndConf}, Connectors) ->
|
||||
lists:foldl(fun({Name, RawConf}, Acc) ->
|
||||
[RawConf#{<<"type">> => Type, <<"name">> => Name} | Acc]
|
||||
end, Connectors, maps:to_list(NameAndConf))
|
||||
end, [], maps:to_list(Config))
|
||||
lists:foldl(
|
||||
fun({Type, NameAndConf}, Connectors) ->
|
||||
lists:foldl(
|
||||
fun({Name, RawConf}, Acc) ->
|
||||
[RawConf#{<<"type">> => Type, <<"name">> => Name} | Acc]
|
||||
end,
|
||||
Connectors,
|
||||
maps:to_list(NameAndConf)
|
||||
)
|
||||
end,
|
||||
[],
|
||||
maps:to_list(Config)
|
||||
)
|
||||
end.
|
||||
|
||||
lookup_raw(Id) when is_binary(Id) ->
|
||||
|
|
@ -86,7 +116,8 @@ lookup_raw(Id) when is_binary(Id) ->
|
|||
lookup_raw(Type, Name) ->
|
||||
Path = [bin(P) || P <- [Type, Name]],
|
||||
case get_raw_connector_conf() of
|
||||
not_found -> {error, not_found};
|
||||
not_found ->
|
||||
{error, not_found};
|
||||
Conf ->
|
||||
case emqx_map_lib:deep_get(Path, Conf, not_found) of
|
||||
not_found -> {error, not_found};
|
||||
|
|
@ -113,7 +144,8 @@ delete(Type, Name) ->
|
|||
|
||||
get_raw_connector_conf() ->
|
||||
case emqx:get_raw_config(config_key_path(), not_found) of
|
||||
not_found -> not_found;
|
||||
not_found ->
|
||||
not_found;
|
||||
RawConf ->
|
||||
#{<<"connectors">> := Conf} =
|
||||
emqx_config:fill_defaults(#{<<"connectors">> => RawConf}),
|
||||
|
|
@ -125,8 +157,12 @@ bin(Str) when is_list(Str) -> list_to_binary(Str);
|
|||
bin(Atom) when is_atom(Atom) -> atom_to_binary(Atom, utf8).
|
||||
|
||||
foreach_linked_bridges(ConnId, Do) ->
|
||||
lists:foreach(fun
|
||||
(#{raw_config := #{<<"connector">> := ConnId0}} = Bridge) when ConnId0 == ConnId ->
|
||||
Do(Bridge);
|
||||
(_) -> ok
|
||||
end, emqx_bridge:list()).
|
||||
lists:foreach(
|
||||
fun
|
||||
(#{raw_config := #{<<"connector">> := ConnId0}} = Bridge) when ConnId0 == ConnId ->
|
||||
Do(Bridge);
|
||||
(_) ->
|
||||
ok
|
||||
end,
|
||||
emqx_bridge:list()
|
||||
).
|
||||
|
|
|
|||
|
|
@ -40,9 +40,14 @@
|
|||
EXPR
|
||||
catch
|
||||
error:{invalid_connector_id, Id0} ->
|
||||
{400, #{code => 'INVALID_ID', message => <<"invalid_connector_id: ", Id0/binary,
|
||||
". Connector Ids must be of format {type}:{name}">>}}
|
||||
end).
|
||||
{400, #{
|
||||
code => 'INVALID_ID',
|
||||
message =>
|
||||
<<"invalid_connector_id: ", Id0/binary,
|
||||
". Connector Ids must be of format {type}:{name}">>
|
||||
}}
|
||||
end
|
||||
).
|
||||
|
||||
namespace() -> "connector".
|
||||
|
||||
|
|
@ -58,21 +63,25 @@ error_schema(Codes, Message) when is_binary(Message) ->
|
|||
|
||||
put_request_body_schema() ->
|
||||
emqx_dashboard_swagger:schema_with_examples(
|
||||
emqx_connector_schema:put_request(), connector_info_examples(put)).
|
||||
emqx_connector_schema:put_request(), connector_info_examples(put)
|
||||
).
|
||||
|
||||
post_request_body_schema() ->
|
||||
emqx_dashboard_swagger:schema_with_examples(
|
||||
emqx_connector_schema:post_request(), connector_info_examples(post)).
|
||||
emqx_connector_schema:post_request(), connector_info_examples(post)
|
||||
).
|
||||
|
||||
get_response_body_schema() ->
|
||||
emqx_dashboard_swagger:schema_with_examples(
|
||||
emqx_connector_schema:get_response(), connector_info_examples(get)).
|
||||
emqx_connector_schema:get_response(), connector_info_examples(get)
|
||||
).
|
||||
|
||||
connector_info_array_example(Method) ->
|
||||
[Config || #{value := Config} <- maps:values(connector_info_examples(Method))].
|
||||
|
||||
connector_info_examples(Method) ->
|
||||
lists:foldl(fun(Type, Acc) ->
|
||||
lists:foldl(
|
||||
fun(Type, Acc) ->
|
||||
SType = atom_to_list(Type),
|
||||
maps:merge(Acc, #{
|
||||
Type => #{
|
||||
|
|
@ -80,11 +89,16 @@ connector_info_examples(Method) ->
|
|||
value => info_example(Type, Method)
|
||||
}
|
||||
})
|
||||
end, #{}, ?CONN_TYPES).
|
||||
end,
|
||||
#{},
|
||||
?CONN_TYPES
|
||||
).
|
||||
|
||||
info_example(Type, Method) ->
|
||||
maps:merge(info_example_basic(Type),
|
||||
method_example(Type, Method)).
|
||||
maps:merge(
|
||||
info_example_basic(Type),
|
||||
method_example(Type, Method)
|
||||
).
|
||||
|
||||
method_example(Type, Method) when Method == get; Method == post ->
|
||||
SType = atom_to_list(Type),
|
||||
|
|
@ -115,11 +129,17 @@ info_example_basic(mqtt) ->
|
|||
}.
|
||||
|
||||
param_path_id() ->
|
||||
[{id, mk(binary(),
|
||||
#{ in => path
|
||||
, example => <<"mqtt:my_mqtt_connector">>
|
||||
, desc => ?DESC("id")
|
||||
})}].
|
||||
[
|
||||
{id,
|
||||
mk(
|
||||
binary(),
|
||||
#{
|
||||
in => path,
|
||||
example => <<"mqtt:my_mqtt_connector">>,
|
||||
desc => ?DESC("id")
|
||||
}
|
||||
)}
|
||||
].
|
||||
|
||||
schema("/connectors_test") ->
|
||||
#{
|
||||
|
|
@ -135,7 +155,6 @@ schema("/connectors_test") ->
|
|||
}
|
||||
}
|
||||
};
|
||||
|
||||
schema("/connectors") ->
|
||||
#{
|
||||
'operationId' => '/connectors',
|
||||
|
|
@ -145,8 +164,9 @@ schema("/connectors") ->
|
|||
summary => <<"List connectors">>,
|
||||
responses => #{
|
||||
200 => emqx_dashboard_swagger:schema_with_example(
|
||||
array(emqx_connector_schema:get_response()),
|
||||
connector_info_array_example(get))
|
||||
array(emqx_connector_schema:get_response()),
|
||||
connector_info_array_example(get)
|
||||
)
|
||||
}
|
||||
},
|
||||
post => #{
|
||||
|
|
@ -160,7 +180,6 @@ schema("/connectors") ->
|
|||
}
|
||||
}
|
||||
};
|
||||
|
||||
schema("/connectors/:id") ->
|
||||
#{
|
||||
'operationId' => '/connectors/:id',
|
||||
|
|
@ -185,7 +204,8 @@ schema("/connectors/:id") ->
|
|||
200 => get_response_body_schema(),
|
||||
404 => error_schema(['NOT_FOUND'], "Connector not found"),
|
||||
400 => error_schema(['INVALID_ID'], "Bad connector ID")
|
||||
}},
|
||||
}
|
||||
},
|
||||
delete => #{
|
||||
tags => [<<"connectors">>],
|
||||
desc => ?DESC("conn_id_delete"),
|
||||
|
|
@ -196,7 +216,8 @@ schema("/connectors/:id") ->
|
|||
403 => error_schema(['DEPENDENCY_EXISTS'], "Cannot remove dependent connector"),
|
||||
404 => error_schema(['NOT_FOUND'], "Delete failed, not found"),
|
||||
400 => error_schema(['INVALID_ID'], "Bad connector ID")
|
||||
}}
|
||||
}
|
||||
}
|
||||
}.
|
||||
|
||||
'/connectors_test'(post, #{body := #{<<"type">> := ConnType} = Params}) ->
|
||||
|
|
@ -209,67 +230,83 @@ schema("/connectors/:id") ->
|
|||
|
||||
'/connectors'(get, _Request) ->
|
||||
{200, [format_resp(Conn) || Conn <- emqx_connector:list_raw()]};
|
||||
|
||||
'/connectors'(post, #{body := #{<<"type">> := ConnType, <<"name">> := ConnName} = Params}) ->
|
||||
case emqx_connector:lookup_raw(ConnType, ConnName) of
|
||||
{ok, _} ->
|
||||
{400, error_msg('ALREADY_EXISTS', <<"connector already exists">>)};
|
||||
{error, not_found} ->
|
||||
case emqx_connector:update(ConnType, ConnName,
|
||||
filter_out_request_body(Params)) of
|
||||
case
|
||||
emqx_connector:update(
|
||||
ConnType,
|
||||
ConnName,
|
||||
filter_out_request_body(Params)
|
||||
)
|
||||
of
|
||||
{ok, #{raw_config := RawConf}} ->
|
||||
{201, format_resp(RawConf#{<<"type">> => ConnType,
|
||||
<<"name">> => ConnName})};
|
||||
{201,
|
||||
format_resp(RawConf#{
|
||||
<<"type">> => ConnType,
|
||||
<<"name">> => ConnName
|
||||
})};
|
||||
{error, Error} ->
|
||||
{400, error_msg('BAD_REQUEST', Error)}
|
||||
end
|
||||
end;
|
||||
|
||||
'/connectors'(post, _) ->
|
||||
{400, error_msg('BAD_REQUEST', <<"missing some required fields: [name, type]">>)}.
|
||||
|
||||
'/connectors/:id'(get, #{bindings := #{id := Id}}) ->
|
||||
?TRY_PARSE_ID(Id,
|
||||
?TRY_PARSE_ID(
|
||||
Id,
|
||||
case emqx_connector:lookup_raw(ConnType, ConnName) of
|
||||
{ok, Conf} ->
|
||||
{200, format_resp(Conf)};
|
||||
{error, not_found} ->
|
||||
{404, error_msg('NOT_FOUND', <<"connector not found">>)}
|
||||
end);
|
||||
|
||||
end
|
||||
);
|
||||
'/connectors/:id'(put, #{bindings := #{id := Id}, body := Params0}) ->
|
||||
Params = filter_out_request_body(Params0),
|
||||
?TRY_PARSE_ID(Id,
|
||||
?TRY_PARSE_ID(
|
||||
Id,
|
||||
case emqx_connector:lookup_raw(ConnType, ConnName) of
|
||||
{ok, _} ->
|
||||
case emqx_connector:update(ConnType, ConnName, Params) of
|
||||
{ok, #{raw_config := RawConf}} ->
|
||||
{200, format_resp(RawConf#{<<"type">> => ConnType,
|
||||
<<"name">> => ConnName})};
|
||||
{200,
|
||||
format_resp(RawConf#{
|
||||
<<"type">> => ConnType,
|
||||
<<"name">> => ConnName
|
||||
})};
|
||||
{error, Error} ->
|
||||
{500, error_msg('INTERNAL_ERROR', Error)}
|
||||
end;
|
||||
{error, not_found} ->
|
||||
{404, error_msg('NOT_FOUND', <<"connector not found">>)}
|
||||
end);
|
||||
|
||||
end
|
||||
);
|
||||
'/connectors/:id'(delete, #{bindings := #{id := Id}}) ->
|
||||
?TRY_PARSE_ID(Id,
|
||||
?TRY_PARSE_ID(
|
||||
Id,
|
||||
case emqx_connector:lookup_raw(ConnType, ConnName) of
|
||||
{ok, _} ->
|
||||
case emqx_connector:delete(ConnType, ConnName) of
|
||||
{ok, _} ->
|
||||
{204};
|
||||
{error, {post_config_update, _, {dependency_bridges_exist, BridgeID}}} ->
|
||||
{403, error_msg('DEPENDENCY_EXISTS',
|
||||
<<"Cannot remove the connector as it's in use by a bridge: ",
|
||||
BridgeID/binary>>)};
|
||||
{403,
|
||||
error_msg(
|
||||
'DEPENDENCY_EXISTS',
|
||||
<<"Cannot remove the connector as it's in use by a bridge: ",
|
||||
BridgeID/binary>>
|
||||
)};
|
||||
{error, Error} ->
|
||||
{500, error_msg('INTERNAL_ERROR', Error)}
|
||||
end;
|
||||
{error, not_found} ->
|
||||
{404, error_msg('NOT_FOUND', <<"connector not found">>)}
|
||||
end).
|
||||
end
|
||||
).
|
||||
|
||||
error_msg(Code, Msg) when is_binary(Msg) ->
|
||||
#{code => Code, message => Msg};
|
||||
|
|
@ -277,8 +314,11 @@ error_msg(Code, Msg) ->
|
|||
#{code => Code, message => bin(io_lib:format("~p", [Msg]))}.
|
||||
|
||||
format_resp(#{<<"type">> := ConnType, <<"name">> := ConnName} = RawConf) ->
|
||||
NumOfBridges = length(emqx_bridge:list_bridges_by_connector(
|
||||
emqx_connector:connector_id(ConnType, ConnName))),
|
||||
NumOfBridges = length(
|
||||
emqx_bridge:list_bridges_by_connector(
|
||||
emqx_connector:connector_id(ConnType, ConnName)
|
||||
)
|
||||
),
|
||||
RawConf#{
|
||||
<<"type">> => ConnType,
|
||||
<<"name">> => ConnName,
|
||||
|
|
|
|||
|
|
@ -25,32 +25,34 @@
|
|||
-behaviour(emqx_resource).
|
||||
|
||||
%% callbacks of behaviour emqx_resource
|
||||
-export([ on_start/2
|
||||
, on_stop/2
|
||||
, on_query/4
|
||||
, on_health_check/2
|
||||
]).
|
||||
-export([
|
||||
on_start/2,
|
||||
on_stop/2,
|
||||
on_query/4,
|
||||
on_health_check/2
|
||||
]).
|
||||
|
||||
-type url() :: emqx_http_lib:uri_map().
|
||||
-reflect_type([url/0]).
|
||||
-typerefl_from_string({url/0, emqx_http_lib, uri_parse}).
|
||||
|
||||
-export([ roots/0
|
||||
, fields/1
|
||||
, desc/1
|
||||
, validations/0
|
||||
, namespace/0
|
||||
]).
|
||||
-export([
|
||||
roots/0,
|
||||
fields/1,
|
||||
desc/1,
|
||||
validations/0,
|
||||
namespace/0
|
||||
]).
|
||||
|
||||
-export([ check_ssl_opts/2
|
||||
]).
|
||||
-export([check_ssl_opts/2]).
|
||||
|
||||
-type connect_timeout() :: emqx_schema:duration() | infinity.
|
||||
-type pool_type() :: random | hash.
|
||||
|
||||
-reflect_type([ connect_timeout/0
|
||||
, pool_type/0
|
||||
]).
|
||||
-reflect_type([
|
||||
connect_timeout/0,
|
||||
pool_type/0
|
||||
]).
|
||||
|
||||
%%=====================================================================
|
||||
%% Hocon schema
|
||||
|
|
@ -61,63 +63,96 @@ roots() ->
|
|||
fields(config).
|
||||
|
||||
fields(config) ->
|
||||
[ {base_url,
|
||||
sc(url(),
|
||||
#{ required => true
|
||||
, validator => fun(#{query := _Query}) ->
|
||||
[
|
||||
{base_url,
|
||||
sc(
|
||||
url(),
|
||||
#{
|
||||
required => true,
|
||||
validator => fun
|
||||
(#{query := _Query}) ->
|
||||
{error, "There must be no query in the base_url"};
|
||||
(_) -> ok
|
||||
end
|
||||
, desc => ?DESC("base_url")
|
||||
})}
|
||||
, {connect_timeout,
|
||||
sc(emqx_schema:duration_ms(),
|
||||
#{ default => "15s"
|
||||
, desc => ?DESC("connect_timeout")
|
||||
})}
|
||||
, {max_retries,
|
||||
sc(non_neg_integer(),
|
||||
#{ default => 5
|
||||
, desc => ?DESC("max_retries")
|
||||
})}
|
||||
, {retry_interval,
|
||||
sc(emqx_schema:duration(),
|
||||
#{ default => "1s"
|
||||
, desc => ?DESC("retry_interval")
|
||||
})}
|
||||
, {pool_type,
|
||||
sc(pool_type(),
|
||||
#{ default => random
|
||||
, desc => ?DESC("pool_type")
|
||||
})}
|
||||
, {pool_size,
|
||||
sc(pos_integer(),
|
||||
#{ default => 8
|
||||
, desc => ?DESC("pool_size")
|
||||
})}
|
||||
, {enable_pipelining,
|
||||
sc(boolean(),
|
||||
#{ default => true
|
||||
, desc => ?DESC("enable_pipelining")
|
||||
})}
|
||||
, {request, hoconsc:mk(
|
||||
ref("request"),
|
||||
#{ default => undefined
|
||||
, required => false
|
||||
, desc => ?DESC("request")
|
||||
})}
|
||||
(_) ->
|
||||
ok
|
||||
end,
|
||||
desc => ?DESC("base_url")
|
||||
}
|
||||
)},
|
||||
{connect_timeout,
|
||||
sc(
|
||||
emqx_schema:duration_ms(),
|
||||
#{
|
||||
default => "15s",
|
||||
desc => ?DESC("connect_timeout")
|
||||
}
|
||||
)},
|
||||
{max_retries,
|
||||
sc(
|
||||
non_neg_integer(),
|
||||
#{
|
||||
default => 5,
|
||||
desc => ?DESC("max_retries")
|
||||
}
|
||||
)},
|
||||
{retry_interval,
|
||||
sc(
|
||||
emqx_schema:duration(),
|
||||
#{
|
||||
default => "1s",
|
||||
desc => ?DESC("retry_interval")
|
||||
}
|
||||
)},
|
||||
{pool_type,
|
||||
sc(
|
||||
pool_type(),
|
||||
#{
|
||||
default => random,
|
||||
desc => ?DESC("pool_type")
|
||||
}
|
||||
)},
|
||||
{pool_size,
|
||||
sc(
|
||||
pos_integer(),
|
||||
#{
|
||||
default => 8,
|
||||
desc => ?DESC("pool_size")
|
||||
}
|
||||
)},
|
||||
{enable_pipelining,
|
||||
sc(
|
||||
boolean(),
|
||||
#{
|
||||
default => true,
|
||||
desc => ?DESC("enable_pipelining")
|
||||
}
|
||||
)},
|
||||
{request,
|
||||
hoconsc:mk(
|
||||
ref("request"),
|
||||
#{
|
||||
default => undefined,
|
||||
required => false,
|
||||
desc => ?DESC("request")
|
||||
}
|
||||
)}
|
||||
] ++ emqx_connector_schema_lib:ssl_fields();
|
||||
|
||||
fields("request") ->
|
||||
[ {method, hoconsc:mk(hoconsc:enum([post, put, get, delete]), #{required => false, desc => ?DESC("method")})}
|
||||
, {path, hoconsc:mk(binary(), #{required => false, desc => ?DESC("path")})}
|
||||
, {body, hoconsc:mk(binary(), #{required => false, desc => ?DESC("body")})}
|
||||
, {headers, hoconsc:mk(map(), #{required => false, desc => ?DESC("headers")})}
|
||||
, {request_timeout,
|
||||
sc(emqx_schema:duration_ms(),
|
||||
#{ required => false
|
||||
, desc => ?DESC("request_timeout")
|
||||
})}
|
||||
[
|
||||
{method,
|
||||
hoconsc:mk(hoconsc:enum([post, put, get, delete]), #{
|
||||
required => false, desc => ?DESC("method")
|
||||
})},
|
||||
{path, hoconsc:mk(binary(), #{required => false, desc => ?DESC("path")})},
|
||||
{body, hoconsc:mk(binary(), #{required => false, desc => ?DESC("body")})},
|
||||
{headers, hoconsc:mk(map(), #{required => false, desc => ?DESC("headers")})},
|
||||
{request_timeout,
|
||||
sc(
|
||||
emqx_schema:duration_ms(),
|
||||
#{
|
||||
required => false,
|
||||
desc => ?DESC("request_timeout")
|
||||
}
|
||||
)}
|
||||
].
|
||||
|
||||
desc(config) ->
|
||||
|
|
@ -128,24 +163,34 @@ desc(_) ->
|
|||
undefined.
|
||||
|
||||
validations() ->
|
||||
[ {check_ssl_opts, fun check_ssl_opts/1} ].
|
||||
[{check_ssl_opts, fun check_ssl_opts/1}].
|
||||
|
||||
sc(Type, Meta) -> hoconsc:mk(Type, Meta).
|
||||
ref(Field) -> hoconsc:ref(?MODULE, Field).
|
||||
|
||||
%% ===================================================================
|
||||
|
||||
on_start(InstId, #{base_url := #{scheme := Scheme,
|
||||
host := Host,
|
||||
port := Port,
|
||||
path := BasePath},
|
||||
connect_timeout := ConnectTimeout,
|
||||
max_retries := MaxRetries,
|
||||
retry_interval := RetryInterval,
|
||||
pool_type := PoolType,
|
||||
pool_size := PoolSize} = Config) ->
|
||||
?SLOG(info, #{msg => "starting_http_connector",
|
||||
connector => InstId, config => Config}),
|
||||
on_start(
|
||||
InstId,
|
||||
#{
|
||||
base_url := #{
|
||||
scheme := Scheme,
|
||||
host := Host,
|
||||
port := Port,
|
||||
path := BasePath
|
||||
},
|
||||
connect_timeout := ConnectTimeout,
|
||||
max_retries := MaxRetries,
|
||||
retry_interval := RetryInterval,
|
||||
pool_type := PoolType,
|
||||
pool_size := PoolSize
|
||||
} = Config
|
||||
) ->
|
||||
?SLOG(info, #{
|
||||
msg => "starting_http_connector",
|
||||
connector => InstId,
|
||||
config => Config
|
||||
}),
|
||||
{Transport, TransportOpts} =
|
||||
case Scheme of
|
||||
http ->
|
||||
|
|
@ -155,16 +200,18 @@ on_start(InstId, #{base_url := #{scheme := Scheme,
|
|||
{tls, SSLOpts}
|
||||
end,
|
||||
NTransportOpts = emqx_misc:ipv6_probe(TransportOpts),
|
||||
PoolOpts = [ {host, Host}
|
||||
, {port, Port}
|
||||
, {connect_timeout, ConnectTimeout}
|
||||
, {retry, MaxRetries}
|
||||
, {retry_timeout, RetryInterval}
|
||||
, {keepalive, 30000}
|
||||
, {pool_type, PoolType}
|
||||
, {pool_size, PoolSize}
|
||||
, {transport, Transport}
|
||||
, {transport_opts, NTransportOpts}],
|
||||
PoolOpts = [
|
||||
{host, Host},
|
||||
{port, Port},
|
||||
{connect_timeout, ConnectTimeout},
|
||||
{retry, MaxRetries},
|
||||
{retry_timeout, RetryInterval},
|
||||
{keepalive, 30000},
|
||||
{pool_type, PoolType},
|
||||
{pool_size, PoolSize},
|
||||
{transport, Transport},
|
||||
{transport_opts, NTransportOpts}
|
||||
],
|
||||
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
|
||||
State = #{
|
||||
pool_name => PoolName,
|
||||
|
|
@ -177,54 +224,84 @@ on_start(InstId, #{base_url := #{scheme := Scheme,
|
|||
case ehttpc_sup:start_pool(PoolName, PoolOpts) of
|
||||
{ok, _} -> {ok, State};
|
||||
{error, {already_started, _}} -> {ok, State};
|
||||
{error, Reason} ->
|
||||
{error, Reason}
|
||||
{error, Reason} -> {error, Reason}
|
||||
end.
|
||||
|
||||
on_stop(InstId, #{pool_name := PoolName}) ->
|
||||
?SLOG(info, #{msg => "stopping_http_connector",
|
||||
connector => InstId}),
|
||||
?SLOG(info, #{
|
||||
msg => "stopping_http_connector",
|
||||
connector => InstId
|
||||
}),
|
||||
ehttpc_sup:stop_pool(PoolName).
|
||||
|
||||
on_query(InstId, {send_message, Msg}, AfterQuery, State) ->
|
||||
case maps:get(request, State, undefined) of
|
||||
undefined -> ?SLOG(error, #{msg => "request_not_found", connector => InstId});
|
||||
undefined ->
|
||||
?SLOG(error, #{msg => "request_not_found", connector => InstId});
|
||||
Request ->
|
||||
#{method := Method, path := Path, body := Body, headers := Headers,
|
||||
request_timeout := Timeout} = process_request(Request, Msg),
|
||||
#{
|
||||
method := Method,
|
||||
path := Path,
|
||||
body := Body,
|
||||
headers := Headers,
|
||||
request_timeout := Timeout
|
||||
} = process_request(Request, Msg),
|
||||
on_query(InstId, {Method, {Path, Headers, Body}, Timeout}, AfterQuery, State)
|
||||
end;
|
||||
on_query(InstId, {Method, Request}, AfterQuery, State) ->
|
||||
on_query(InstId, {undefined, Method, Request, 5000}, AfterQuery, State);
|
||||
on_query(InstId, {Method, Request, Timeout}, AfterQuery, State) ->
|
||||
on_query(InstId, {undefined, Method, Request, Timeout}, AfterQuery, State);
|
||||
on_query(InstId, {KeyOrNum, Method, Request, Timeout}, AfterQuery,
|
||||
#{pool_name := PoolName, base_path := BasePath} = State) ->
|
||||
?TRACE("QUERY", "http_connector_received",
|
||||
#{request => Request, connector => InstId, state => State}),
|
||||
on_query(
|
||||
InstId,
|
||||
{KeyOrNum, Method, Request, Timeout},
|
||||
AfterQuery,
|
||||
#{pool_name := PoolName, base_path := BasePath} = State
|
||||
) ->
|
||||
?TRACE(
|
||||
"QUERY",
|
||||
"http_connector_received",
|
||||
#{request => Request, connector => InstId, state => State}
|
||||
),
|
||||
NRequest = formalize_request(Method, BasePath, Request),
|
||||
case Result = ehttpc:request(case KeyOrNum of
|
||||
undefined -> PoolName;
|
||||
_ -> {PoolName, KeyOrNum}
|
||||
end, Method, NRequest, Timeout) of
|
||||
case
|
||||
Result = ehttpc:request(
|
||||
case KeyOrNum of
|
||||
undefined -> PoolName;
|
||||
_ -> {PoolName, KeyOrNum}
|
||||
end,
|
||||
Method,
|
||||
NRequest,
|
||||
Timeout
|
||||
)
|
||||
of
|
||||
{error, Reason} ->
|
||||
?SLOG(error, #{msg => "http_connector_do_reqeust_failed",
|
||||
request => NRequest, reason => Reason,
|
||||
connector => InstId}),
|
||||
?SLOG(error, #{
|
||||
msg => "http_connector_do_reqeust_failed",
|
||||
request => NRequest,
|
||||
reason => Reason,
|
||||
connector => InstId
|
||||
}),
|
||||
emqx_resource:query_failed(AfterQuery);
|
||||
{ok, StatusCode, _} when StatusCode >= 200 andalso StatusCode < 300 ->
|
||||
emqx_resource:query_success(AfterQuery);
|
||||
{ok, StatusCode, _, _} when StatusCode >= 200 andalso StatusCode < 300 ->
|
||||
emqx_resource:query_success(AfterQuery);
|
||||
{ok, StatusCode, _} ->
|
||||
?SLOG(error, #{msg => "http connector do request, received error response",
|
||||
request => NRequest, connector => InstId,
|
||||
status_code => StatusCode}),
|
||||
?SLOG(error, #{
|
||||
msg => "http connector do request, received error response",
|
||||
request => NRequest,
|
||||
connector => InstId,
|
||||
status_code => StatusCode
|
||||
}),
|
||||
emqx_resource:query_failed(AfterQuery);
|
||||
{ok, StatusCode, _, _} ->
|
||||
?SLOG(error, #{msg => "http connector do request, received error response",
|
||||
request => NRequest, connector => InstId,
|
||||
status_code => StatusCode}),
|
||||
?SLOG(error, #{
|
||||
msg => "http connector do request, received error response",
|
||||
request => NRequest,
|
||||
connector => InstId,
|
||||
status_code => StatusCode
|
||||
}),
|
||||
emqx_resource:query_failed(AfterQuery)
|
||||
end,
|
||||
Result.
|
||||
|
|
@ -232,14 +309,16 @@ on_query(InstId, {KeyOrNum, Method, Request, Timeout}, AfterQuery,
|
|||
on_health_check(_InstId, #{host := Host, port := Port, connect_timeout := Timeout} = State) ->
|
||||
case do_health_check(Host, Port, Timeout) of
|
||||
ok -> {ok, State};
|
||||
{error, Reason} ->
|
||||
{error, {http_health_check_failed, Reason}, State}
|
||||
{error, Reason} -> {error, {http_health_check_failed, Reason}, State}
|
||||
end.
|
||||
|
||||
do_health_check(Host, Port, Timeout) ->
|
||||
case gen_tcp:connect(Host, Port, emqx_misc:ipv6_probe([]), Timeout) of
|
||||
{ok, Sock} -> gen_tcp:close(Sock), ok;
|
||||
{error, Reason} -> {error, Reason}
|
||||
{ok, Sock} ->
|
||||
gen_tcp:close(Sock),
|
||||
ok;
|
||||
{error, Reason} ->
|
||||
{error, Reason}
|
||||
end.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
|
@ -250,47 +329,64 @@ preprocess_request(undefined) ->
|
|||
undefined;
|
||||
preprocess_request(Req) when map_size(Req) == 0 ->
|
||||
undefined;
|
||||
preprocess_request(#{
|
||||
method := Method,
|
||||
path := Path,
|
||||
body := Body,
|
||||
headers := Headers
|
||||
} = Req) ->
|
||||
#{ method => emqx_plugin_libs_rule:preproc_tmpl(bin(Method))
|
||||
, path => emqx_plugin_libs_rule:preproc_tmpl(Path)
|
||||
, body => emqx_plugin_libs_rule:preproc_tmpl(Body)
|
||||
, headers => preproc_headers(Headers)
|
||||
, request_timeout => maps:get(request_timeout, Req, 30000)
|
||||
}.
|
||||
preprocess_request(
|
||||
#{
|
||||
method := Method,
|
||||
path := Path,
|
||||
body := Body,
|
||||
headers := Headers
|
||||
} = Req
|
||||
) ->
|
||||
#{
|
||||
method => emqx_plugin_libs_rule:preproc_tmpl(bin(Method)),
|
||||
path => emqx_plugin_libs_rule:preproc_tmpl(Path),
|
||||
body => emqx_plugin_libs_rule:preproc_tmpl(Body),
|
||||
headers => preproc_headers(Headers),
|
||||
request_timeout => maps:get(request_timeout, Req, 30000)
|
||||
}.
|
||||
|
||||
preproc_headers(Headers) when is_map(Headers) ->
|
||||
maps:fold(fun(K, V, Acc) ->
|
||||
[{
|
||||
maps:fold(
|
||||
fun(K, V, Acc) ->
|
||||
[
|
||||
{
|
||||
emqx_plugin_libs_rule:preproc_tmpl(bin(K)),
|
||||
emqx_plugin_libs_rule:preproc_tmpl(bin(V))
|
||||
}
|
||||
| Acc
|
||||
]
|
||||
end,
|
||||
[],
|
||||
Headers
|
||||
);
|
||||
preproc_headers(Headers) when is_list(Headers) ->
|
||||
lists:map(
|
||||
fun({K, V}) ->
|
||||
{
|
||||
emqx_plugin_libs_rule:preproc_tmpl(bin(K)),
|
||||
emqx_plugin_libs_rule:preproc_tmpl(bin(V))
|
||||
} | Acc]
|
||||
end, [], Headers);
|
||||
preproc_headers(Headers) when is_list(Headers) ->
|
||||
lists:map(fun({K, V}) ->
|
||||
{
|
||||
emqx_plugin_libs_rule:preproc_tmpl(bin(K)),
|
||||
emqx_plugin_libs_rule:preproc_tmpl(bin(V))
|
||||
}
|
||||
end, Headers).
|
||||
}
|
||||
end,
|
||||
Headers
|
||||
).
|
||||
|
||||
process_request(#{
|
||||
method := MethodTks,
|
||||
path := PathTks,
|
||||
body := BodyTks,
|
||||
headers := HeadersTks,
|
||||
request_timeout := ReqTimeout
|
||||
} = Conf, Msg) ->
|
||||
Conf#{ method => make_method(emqx_plugin_libs_rule:proc_tmpl(MethodTks, Msg))
|
||||
, path => emqx_plugin_libs_rule:proc_tmpl(PathTks, Msg)
|
||||
, body => process_request_body(BodyTks, Msg)
|
||||
, headers => proc_headers(HeadersTks, Msg)
|
||||
, request_timeout => ReqTimeout
|
||||
}.
|
||||
process_request(
|
||||
#{
|
||||
method := MethodTks,
|
||||
path := PathTks,
|
||||
body := BodyTks,
|
||||
headers := HeadersTks,
|
||||
request_timeout := ReqTimeout
|
||||
} = Conf,
|
||||
Msg
|
||||
) ->
|
||||
Conf#{
|
||||
method => make_method(emqx_plugin_libs_rule:proc_tmpl(MethodTks, Msg)),
|
||||
path => emqx_plugin_libs_rule:proc_tmpl(PathTks, Msg),
|
||||
body => process_request_body(BodyTks, Msg),
|
||||
headers => proc_headers(HeadersTks, Msg),
|
||||
request_timeout => ReqTimeout
|
||||
}.
|
||||
|
||||
process_request_body([], Msg) ->
|
||||
emqx_json:encode(Msg);
|
||||
|
|
@ -298,12 +394,15 @@ process_request_body(BodyTks, Msg) ->
|
|||
emqx_plugin_libs_rule:proc_tmpl(BodyTks, Msg).
|
||||
|
||||
proc_headers(HeaderTks, Msg) ->
|
||||
lists:map(fun({K, V}) ->
|
||||
lists:map(
|
||||
fun({K, V}) ->
|
||||
{
|
||||
emqx_plugin_libs_rule:proc_tmpl(K, Msg),
|
||||
emqx_plugin_libs_rule:proc_tmpl(V, Msg)
|
||||
}
|
||||
end, HeaderTks).
|
||||
end,
|
||||
HeaderTks
|
||||
).
|
||||
|
||||
make_method(M) when M == <<"POST">>; M == <<"post">> -> post;
|
||||
make_method(M) when M == <<"PUT">>; M == <<"put">> -> put;
|
||||
|
|
@ -315,19 +414,19 @@ check_ssl_opts(Conf) ->
|
|||
|
||||
check_ssl_opts(URLFrom, Conf) ->
|
||||
#{scheme := Scheme} = hocon_maps:get(URLFrom, Conf),
|
||||
SSL= hocon_maps:get("ssl", Conf),
|
||||
SSL = hocon_maps:get("ssl", Conf),
|
||||
case {Scheme, maps:get(enable, SSL, false)} of
|
||||
{http, false} -> true;
|
||||
{https, true} -> true;
|
||||
{_, _} -> false
|
||||
end.
|
||||
|
||||
formalize_request(Method, BasePath, {Path, Headers, _Body})
|
||||
when Method =:= get; Method =:= delete ->
|
||||
formalize_request(Method, BasePath, {Path, Headers, _Body}) when
|
||||
Method =:= get; Method =:= delete
|
||||
->
|
||||
formalize_request(Method, BasePath, {Path, Headers});
|
||||
formalize_request(_Method, BasePath, {Path, Headers, Body}) ->
|
||||
{filename:join(BasePath, Path), Headers, Body};
|
||||
|
||||
formalize_request(_Method, BasePath, {Path, Headers}) ->
|
||||
{filename:join(BasePath, Path), Headers}.
|
||||
|
||||
|
|
|
|||
|
|
@ -24,11 +24,12 @@
|
|||
-behaviour(emqx_resource).
|
||||
|
||||
%% callbacks of behaviour emqx_resource
|
||||
-export([ on_start/2
|
||||
, on_stop/2
|
||||
, on_query/4
|
||||
, on_health_check/2
|
||||
]).
|
||||
-export([
|
||||
on_start/2,
|
||||
on_stop/2,
|
||||
on_query/4,
|
||||
on_health_check/2
|
||||
]).
|
||||
|
||||
-export([do_health_check/1]).
|
||||
|
||||
|
|
@ -43,54 +44,84 @@ roots() ->
|
|||
fields(_) -> [].
|
||||
|
||||
%% ===================================================================
|
||||
on_start(InstId, #{servers := Servers0,
|
||||
port := Port,
|
||||
bind_dn := BindDn,
|
||||
bind_password := BindPassword,
|
||||
timeout := Timeout,
|
||||
pool_size := PoolSize,
|
||||
auto_reconnect := AutoReconn,
|
||||
ssl := SSL} = Config) ->
|
||||
?SLOG(info, #{msg => "starting_ldap_connector",
|
||||
connector => InstId, config => Config}),
|
||||
Servers = [begin proplists:get_value(host, S) end || S <- Servers0],
|
||||
SslOpts = case maps:get(enable, SSL) of
|
||||
true ->
|
||||
[{ssl, true},
|
||||
{sslopts, emqx_tls_lib:to_client_opts(SSL)}
|
||||
];
|
||||
false -> [{ssl, false}]
|
||||
end,
|
||||
Opts = [{servers, Servers},
|
||||
{port, Port},
|
||||
{bind_dn, BindDn},
|
||||
{bind_password, BindPassword},
|
||||
{timeout, Timeout},
|
||||
{pool_size, PoolSize},
|
||||
{auto_reconnect, reconn_interval(AutoReconn)},
|
||||
{servers, Servers}],
|
||||
on_start(
|
||||
InstId,
|
||||
#{
|
||||
servers := Servers0,
|
||||
port := Port,
|
||||
bind_dn := BindDn,
|
||||
bind_password := BindPassword,
|
||||
timeout := Timeout,
|
||||
pool_size := PoolSize,
|
||||
auto_reconnect := AutoReconn,
|
||||
ssl := SSL
|
||||
} = Config
|
||||
) ->
|
||||
?SLOG(info, #{
|
||||
msg => "starting_ldap_connector",
|
||||
connector => InstId,
|
||||
config => Config
|
||||
}),
|
||||
Servers = [
|
||||
begin
|
||||
proplists:get_value(host, S)
|
||||
end
|
||||
|| S <- Servers0
|
||||
],
|
||||
SslOpts =
|
||||
case maps:get(enable, SSL) of
|
||||
true ->
|
||||
[
|
||||
{ssl, true},
|
||||
{sslopts, emqx_tls_lib:to_client_opts(SSL)}
|
||||
];
|
||||
false ->
|
||||
[{ssl, false}]
|
||||
end,
|
||||
Opts = [
|
||||
{servers, Servers},
|
||||
{port, Port},
|
||||
{bind_dn, BindDn},
|
||||
{bind_password, BindPassword},
|
||||
{timeout, Timeout},
|
||||
{pool_size, PoolSize},
|
||||
{auto_reconnect, reconn_interval(AutoReconn)},
|
||||
{servers, Servers}
|
||||
],
|
||||
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
|
||||
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts ++ SslOpts) of
|
||||
ok -> {ok, #{poolname => PoolName}};
|
||||
ok -> {ok, #{poolname => PoolName}};
|
||||
{error, Reason} -> {error, Reason}
|
||||
end.
|
||||
|
||||
on_stop(InstId, #{poolname := PoolName}) ->
|
||||
?SLOG(info, #{msg => "stopping_ldap_connector",
|
||||
connector => InstId}),
|
||||
?SLOG(info, #{
|
||||
msg => "stopping_ldap_connector",
|
||||
connector => InstId
|
||||
}),
|
||||
emqx_plugin_libs_pool:stop_pool(PoolName).
|
||||
|
||||
on_query(InstId, {search, Base, Filter, Attributes}, AfterQuery, #{poolname := PoolName} = State) ->
|
||||
Request = {Base, Filter, Attributes},
|
||||
?TRACE("QUERY", "ldap_connector_received",
|
||||
#{request => Request, connector => InstId, state => State}),
|
||||
case Result = ecpool:pick_and_do(
|
||||
PoolName,
|
||||
{?MODULE, search, [Base, Filter, Attributes]},
|
||||
no_handover) of
|
||||
?TRACE(
|
||||
"QUERY",
|
||||
"ldap_connector_received",
|
||||
#{request => Request, connector => InstId, state => State}
|
||||
),
|
||||
case
|
||||
Result = ecpool:pick_and_do(
|
||||
PoolName,
|
||||
{?MODULE, search, [Base, Filter, Attributes]},
|
||||
no_handover
|
||||
)
|
||||
of
|
||||
{error, Reason} ->
|
||||
?SLOG(error, #{msg => "ldap_connector_do_request_failed",
|
||||
request => Request, connector => InstId, reason => Reason}),
|
||||
?SLOG(error, #{
|
||||
msg => "ldap_connector_do_request_failed",
|
||||
request => Request,
|
||||
connector => InstId,
|
||||
reason => Reason
|
||||
}),
|
||||
emqx_resource:query_failed(AfterQuery);
|
||||
_ ->
|
||||
emqx_resource:query_success(AfterQuery)
|
||||
|
|
@ -107,38 +138,45 @@ reconn_interval(true) -> 15;
|
|||
reconn_interval(false) -> false.
|
||||
|
||||
search(Conn, Base, Filter, Attributes) ->
|
||||
eldap2:search(Conn, [{base, Base},
|
||||
{filter, Filter},
|
||||
{attributes, Attributes},
|
||||
{deref, eldap2:'derefFindingBaseObj'()}]).
|
||||
eldap2:search(Conn, [
|
||||
{base, Base},
|
||||
{filter, Filter},
|
||||
{attributes, Attributes},
|
||||
{deref, eldap2:'derefFindingBaseObj'()}
|
||||
]).
|
||||
|
||||
%% ===================================================================
|
||||
connect(Opts) ->
|
||||
Servers = proplists:get_value(servers, Opts, ["localhost"]),
|
||||
Port = proplists:get_value(port, Opts, 389),
|
||||
Timeout = proplists:get_value(timeout, Opts, 30),
|
||||
BindDn = proplists:get_value(bind_dn, Opts),
|
||||
Servers = proplists:get_value(servers, Opts, ["localhost"]),
|
||||
Port = proplists:get_value(port, Opts, 389),
|
||||
Timeout = proplists:get_value(timeout, Opts, 30),
|
||||
BindDn = proplists:get_value(bind_dn, Opts),
|
||||
BindPassword = proplists:get_value(bind_password, Opts),
|
||||
SslOpts = case proplists:get_value(ssl, Opts, false) of
|
||||
true ->
|
||||
[{sslopts, proplists:get_value(sslopts, Opts, [])}, {ssl, true}];
|
||||
false ->
|
||||
[{ssl, false}]
|
||||
end,
|
||||
LdapOpts = [{port, Port},
|
||||
{timeout, Timeout}] ++ SslOpts,
|
||||
SslOpts =
|
||||
case proplists:get_value(ssl, Opts, false) of
|
||||
true ->
|
||||
[{sslopts, proplists:get_value(sslopts, Opts, [])}, {ssl, true}];
|
||||
false ->
|
||||
[{ssl, false}]
|
||||
end,
|
||||
LdapOpts =
|
||||
[
|
||||
{port, Port},
|
||||
{timeout, Timeout}
|
||||
] ++ SslOpts,
|
||||
{ok, LDAP} = eldap2:open(Servers, LdapOpts),
|
||||
ok = eldap2:simple_bind(LDAP, BindDn, BindPassword),
|
||||
{ok, LDAP}.
|
||||
|
||||
ldap_fields() ->
|
||||
[ {servers, fun servers/1}
|
||||
, {port, fun port/1}
|
||||
, {pool_size, fun emqx_connector_schema_lib:pool_size/1}
|
||||
, {bind_dn, fun bind_dn/1}
|
||||
, {bind_password, fun emqx_connector_schema_lib:password/1}
|
||||
, {timeout, fun duration/1}
|
||||
, {auto_reconnect, fun emqx_connector_schema_lib:auto_reconnect/1}
|
||||
[
|
||||
{servers, fun servers/1},
|
||||
{port, fun port/1},
|
||||
{pool_size, fun emqx_connector_schema_lib:pool_size/1},
|
||||
{bind_dn, fun bind_dn/1},
|
||||
{bind_password, fun emqx_connector_schema_lib:password/1},
|
||||
{timeout, fun duration/1},
|
||||
{auto_reconnect, fun emqx_connector_schema_lib:auto_reconnect/1}
|
||||
].
|
||||
|
||||
servers(type) -> list();
|
||||
|
|
@ -159,14 +197,18 @@ duration(type) -> emqx_schema:duration_ms();
|
|||
duration(_) -> undefined.
|
||||
|
||||
to_servers_raw(Servers) ->
|
||||
{ok, lists:map( fun(Server) ->
|
||||
case string:tokens(Server, ": ") of
|
||||
[Ip] ->
|
||||
[{host, Ip}];
|
||||
[Ip, Port] ->
|
||||
[{host, Ip}, {port, list_to_integer(Port)}]
|
||||
end
|
||||
end, string:tokens(str(Servers), ", "))}.
|
||||
{ok,
|
||||
lists:map(
|
||||
fun(Server) ->
|
||||
case string:tokens(Server, ": ") of
|
||||
[Ip] ->
|
||||
[{host, Ip}];
|
||||
[Ip, Port] ->
|
||||
[{host, Ip}, {port, list_to_integer(Port)}]
|
||||
end
|
||||
end,
|
||||
string:tokens(str(Servers), ", ")
|
||||
)}.
|
||||
|
||||
str(A) when is_atom(A) ->
|
||||
atom_to_list(A);
|
||||
|
|
|
|||
|
|
@ -24,11 +24,12 @@
|
|||
-behaviour(emqx_resource).
|
||||
|
||||
%% callbacks of behaviour emqx_resource
|
||||
-export([ on_start/2
|
||||
, on_stop/2
|
||||
, on_query/4
|
||||
, on_health_check/2
|
||||
]).
|
||||
-export([
|
||||
on_start/2,
|
||||
on_stop/2,
|
||||
on_query/4,
|
||||
on_health_check/2
|
||||
]).
|
||||
|
||||
%% ecpool callback
|
||||
-export([connect/1]).
|
||||
|
|
@ -40,57 +41,73 @@
|
|||
-define(HEALTH_CHECK_TIMEOUT, 10000).
|
||||
|
||||
%% mongo servers don't need parse
|
||||
-define( MONGO_HOST_OPTIONS
|
||||
, #{ host_type => hostname
|
||||
, default_port => ?MONGO_DEFAULT_PORT}).
|
||||
-define(MONGO_HOST_OPTIONS, #{
|
||||
host_type => hostname,
|
||||
default_port => ?MONGO_DEFAULT_PORT
|
||||
}).
|
||||
|
||||
%%=====================================================================
|
||||
roots() ->
|
||||
[ {config, #{type => hoconsc:union(
|
||||
[ hoconsc:ref(?MODULE, single)
|
||||
, hoconsc:ref(?MODULE, rs)
|
||||
, hoconsc:ref(?MODULE, sharded)
|
||||
])}}
|
||||
[
|
||||
{config, #{
|
||||
type => hoconsc:union(
|
||||
[
|
||||
hoconsc:ref(?MODULE, single),
|
||||
hoconsc:ref(?MODULE, rs),
|
||||
hoconsc:ref(?MODULE, sharded)
|
||||
]
|
||||
)
|
||||
}}
|
||||
].
|
||||
|
||||
fields(single) ->
|
||||
[ {mongo_type, #{type => single,
|
||||
default => single,
|
||||
required => true,
|
||||
desc => ?DESC("single_mongo_type")}}
|
||||
, {server, fun server/1}
|
||||
, {w_mode, fun w_mode/1}
|
||||
[
|
||||
{mongo_type, #{
|
||||
type => single,
|
||||
default => single,
|
||||
required => true,
|
||||
desc => ?DESC("single_mongo_type")
|
||||
}},
|
||||
{server, fun server/1},
|
||||
{w_mode, fun w_mode/1}
|
||||
] ++ mongo_fields();
|
||||
fields(rs) ->
|
||||
[ {mongo_type, #{type => rs,
|
||||
default => rs,
|
||||
required => true,
|
||||
desc => ?DESC("rs_mongo_type")}}
|
||||
, {servers, fun servers/1}
|
||||
, {w_mode, fun w_mode/1}
|
||||
, {r_mode, fun r_mode/1}
|
||||
, {replica_set_name, fun replica_set_name/1}
|
||||
[
|
||||
{mongo_type, #{
|
||||
type => rs,
|
||||
default => rs,
|
||||
required => true,
|
||||
desc => ?DESC("rs_mongo_type")
|
||||
}},
|
||||
{servers, fun servers/1},
|
||||
{w_mode, fun w_mode/1},
|
||||
{r_mode, fun r_mode/1},
|
||||
{replica_set_name, fun replica_set_name/1}
|
||||
] ++ mongo_fields();
|
||||
fields(sharded) ->
|
||||
[ {mongo_type, #{type => sharded,
|
||||
default => sharded,
|
||||
required => true,
|
||||
desc => ?DESC("sharded_mongo_type")}}
|
||||
, {servers, fun servers/1}
|
||||
, {w_mode, fun w_mode/1}
|
||||
[
|
||||
{mongo_type, #{
|
||||
type => sharded,
|
||||
default => sharded,
|
||||
required => true,
|
||||
desc => ?DESC("sharded_mongo_type")
|
||||
}},
|
||||
{servers, fun servers/1},
|
||||
{w_mode, fun w_mode/1}
|
||||
] ++ mongo_fields();
|
||||
fields(topology) ->
|
||||
[ {pool_size, fun emqx_connector_schema_lib:pool_size/1}
|
||||
, {max_overflow, fun max_overflow/1}
|
||||
, {overflow_ttl, fun duration/1}
|
||||
, {overflow_check_period, fun duration/1}
|
||||
, {local_threshold_ms, fun duration/1}
|
||||
, {connect_timeout_ms, fun duration/1}
|
||||
, {socket_timeout_ms, fun duration/1}
|
||||
, {server_selection_timeout_ms, fun duration/1}
|
||||
, {wait_queue_timeout_ms, fun duration/1}
|
||||
, {heartbeat_frequency_ms, fun duration/1}
|
||||
, {min_heartbeat_frequency_ms, fun duration/1}
|
||||
[
|
||||
{pool_size, fun emqx_connector_schema_lib:pool_size/1},
|
||||
{max_overflow, fun max_overflow/1},
|
||||
{overflow_ttl, fun duration/1},
|
||||
{overflow_check_period, fun duration/1},
|
||||
{local_threshold_ms, fun duration/1},
|
||||
{connect_timeout_ms, fun duration/1},
|
||||
{socket_timeout_ms, fun duration/1},
|
||||
{server_selection_timeout_ms, fun duration/1},
|
||||
{wait_queue_timeout_ms, fun duration/1},
|
||||
{heartbeat_frequency_ms, fun duration/1},
|
||||
{min_heartbeat_frequency_ms, fun duration/1}
|
||||
].
|
||||
|
||||
desc(single) ->
|
||||
|
|
@ -105,53 +122,68 @@ desc(_) ->
|
|||
undefined.
|
||||
|
||||
mongo_fields() ->
|
||||
[ {srv_record, fun srv_record/1}
|
||||
, {pool_size, fun emqx_connector_schema_lib:pool_size/1}
|
||||
, {username, fun emqx_connector_schema_lib:username/1}
|
||||
, {password, fun emqx_connector_schema_lib:password/1}
|
||||
, {auth_source, #{ type => binary()
|
||||
, required => false
|
||||
, desc => ?DESC("auth_source")
|
||||
}}
|
||||
, {database, fun emqx_connector_schema_lib:database/1}
|
||||
, {topology, #{type => hoconsc:ref(?MODULE, topology), required => false}}
|
||||
[
|
||||
{srv_record, fun srv_record/1},
|
||||
{pool_size, fun emqx_connector_schema_lib:pool_size/1},
|
||||
{username, fun emqx_connector_schema_lib:username/1},
|
||||
{password, fun emqx_connector_schema_lib:password/1},
|
||||
{auth_source, #{
|
||||
type => binary(),
|
||||
required => false,
|
||||
desc => ?DESC("auth_source")
|
||||
}},
|
||||
{database, fun emqx_connector_schema_lib:database/1},
|
||||
{topology, #{type => hoconsc:ref(?MODULE, topology), required => false}}
|
||||
] ++
|
||||
emqx_connector_schema_lib:ssl_fields().
|
||||
emqx_connector_schema_lib:ssl_fields().
|
||||
|
||||
%% ===================================================================
|
||||
|
||||
on_start(InstId, Config = #{mongo_type := Type,
|
||||
pool_size := PoolSize,
|
||||
ssl := SSL}) ->
|
||||
Msg = case Type of
|
||||
single -> "starting_mongodb_single_connector";
|
||||
rs -> "starting_mongodb_replica_set_connector";
|
||||
sharded -> "starting_mongodb_sharded_connector"
|
||||
end,
|
||||
on_start(
|
||||
InstId,
|
||||
Config = #{
|
||||
mongo_type := Type,
|
||||
pool_size := PoolSize,
|
||||
ssl := SSL
|
||||
}
|
||||
) ->
|
||||
Msg =
|
||||
case Type of
|
||||
single -> "starting_mongodb_single_connector";
|
||||
rs -> "starting_mongodb_replica_set_connector";
|
||||
sharded -> "starting_mongodb_sharded_connector"
|
||||
end,
|
||||
?SLOG(info, #{msg => Msg, connector => InstId, config => Config}),
|
||||
NConfig = #{hosts := Hosts} = may_parse_srv_and_txt_records(Config),
|
||||
SslOpts = case maps:get(enable, SSL) of
|
||||
true ->
|
||||
[{ssl, true},
|
||||
{ssl_opts, emqx_tls_lib:to_client_opts(SSL)}
|
||||
];
|
||||
false -> [{ssl, false}]
|
||||
end,
|
||||
SslOpts =
|
||||
case maps:get(enable, SSL) of
|
||||
true ->
|
||||
[
|
||||
{ssl, true},
|
||||
{ssl_opts, emqx_tls_lib:to_client_opts(SSL)}
|
||||
];
|
||||
false ->
|
||||
[{ssl, false}]
|
||||
end,
|
||||
Topology = maps:get(topology, NConfig, #{}),
|
||||
Opts = [{mongo_type, init_type(NConfig)},
|
||||
{hosts, Hosts},
|
||||
{pool_size, PoolSize},
|
||||
{options, init_topology_options(maps:to_list(Topology), [])},
|
||||
{worker_options, init_worker_options(maps:to_list(NConfig), SslOpts)}],
|
||||
Opts = [
|
||||
{mongo_type, init_type(NConfig)},
|
||||
{hosts, Hosts},
|
||||
{pool_size, PoolSize},
|
||||
{options, init_topology_options(maps:to_list(Topology), [])},
|
||||
{worker_options, init_worker_options(maps:to_list(NConfig), SslOpts)}
|
||||
],
|
||||
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
|
||||
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts) of
|
||||
ok -> {ok, #{poolname => PoolName, type => Type}};
|
||||
ok -> {ok, #{poolname => PoolName, type => Type}};
|
||||
{error, Reason} -> {error, Reason}
|
||||
end.
|
||||
|
||||
on_stop(InstId, #{poolname := PoolName}) ->
|
||||
?SLOG(info, #{msg => "stopping_mongodb_connector",
|
||||
connector => InstId}),
|
||||
?SLOG(info, #{
|
||||
msg => "stopping_mongodb_connector",
|
||||
connector => InstId
|
||||
}),
|
||||
emqx_plugin_libs_pool:stop_pool(PoolName).
|
||||
|
||||
on_query(InstId,
|
||||
|
|
@ -164,10 +196,32 @@ on_query(InstId,
|
|||
case ecpool:pick_and_do(PoolName,
|
||||
{?MODULE, mongo_query, [Action, Collection, Filter, Projector]},
|
||||
no_handover) of
|
||||
on_query(
|
||||
InstId,
|
||||
{Action, Collection, Filter, Projector},
|
||||
AfterQuery,
|
||||
#{poolname := PoolName} = State
|
||||
) ->
|
||||
Request = {Action, Collection, Selector, Projector},
|
||||
?TRACE(
|
||||
"QUERY",
|
||||
"mongodb_connector_received",
|
||||
#{request => Request, connector => InstId, state => State}
|
||||
),
|
||||
case
|
||||
ecpool:pick_and_do(
|
||||
PoolName,
|
||||
{?MODULE, mongo_query, [Action, Collection, Selector, Projector]},
|
||||
no_handover
|
||||
)
|
||||
of
|
||||
{error, Reason} ->
|
||||
?SLOG(error, #{msg => "mongodb_connector_do_query_failed",
|
||||
request => Request, reason => Reason,
|
||||
connector => InstId}),
|
||||
?SLOG(error, #{
|
||||
msg => "mongodb_connector_do_query_failed",
|
||||
request => Request,
|
||||
reason => Reason,
|
||||
connector => InstId
|
||||
}),
|
||||
emqx_resource:query_failed(AfterQuery),
|
||||
{error, Reason};
|
||||
{ok, Cursor} when is_pid(Cursor) ->
|
||||
|
|
@ -182,12 +236,16 @@ on_query(InstId,
|
|||
on_health_check(InstId, #{poolname := PoolName} = State) ->
|
||||
case health_check(PoolName) of
|
||||
true ->
|
||||
?tp(debug, emqx_connector_mongo_health_check, #{instance_id => InstId,
|
||||
status => ok}),
|
||||
?tp(debug, emqx_connector_mongo_health_check, #{
|
||||
instance_id => InstId,
|
||||
status => ok
|
||||
}),
|
||||
{ok, State};
|
||||
false ->
|
||||
?tp(warning, emqx_connector_mongo_health_check, #{instance_id => InstId,
|
||||
status => failed}),
|
||||
?tp(warning, emqx_connector_mongo_health_check, #{
|
||||
instance_id => InstId,
|
||||
status => failed
|
||||
}),
|
||||
{error, health_check_failed, State}
|
||||
end.
|
||||
|
||||
|
|
@ -204,36 +262,43 @@ check_worker_health(Worker) ->
|
|||
%% we don't care if this returns something or not, we just to test the connection
|
||||
try do_test_query(Conn) of
|
||||
{error, Reason} ->
|
||||
?SLOG(warning, #{msg => "mongo_connection_health_check_error",
|
||||
worker => Worker,
|
||||
reason => Reason}),
|
||||
?SLOG(warning, #{
|
||||
msg => "mongo_connection_health_check_error",
|
||||
worker => Worker,
|
||||
reason => Reason
|
||||
}),
|
||||
false;
|
||||
_ ->
|
||||
true
|
||||
catch
|
||||
Class:Error ->
|
||||
?SLOG(warning, #{msg => "mongo_connection_health_check_exception",
|
||||
worker => Worker,
|
||||
class => Class,
|
||||
error => Error}),
|
||||
?SLOG(warning, #{
|
||||
msg => "mongo_connection_health_check_exception",
|
||||
worker => Worker,
|
||||
class => Class,
|
||||
error => Error
|
||||
}),
|
||||
false
|
||||
end;
|
||||
_ ->
|
||||
?SLOG(warning, #{msg => "mongo_connection_health_check_error",
|
||||
worker => Worker,
|
||||
reason => worker_not_found}),
|
||||
?SLOG(warning, #{
|
||||
msg => "mongo_connection_health_check_error",
|
||||
worker => Worker,
|
||||
reason => worker_not_found
|
||||
}),
|
||||
false
|
||||
end.
|
||||
|
||||
do_test_query(Conn) ->
|
||||
mongoc:transaction_query(
|
||||
Conn,
|
||||
fun(Conf = #{pool := Worker}) ->
|
||||
Query = mongoc:find_one_query(Conf, <<"foo">>, #{}, #{}, 0),
|
||||
mc_worker_api:find_one(Worker, Query)
|
||||
end,
|
||||
#{},
|
||||
?HEALTH_CHECK_TIMEOUT).
|
||||
Conn,
|
||||
fun(Conf = #{pool := Worker}) ->
|
||||
Query = mongoc:find_one_query(Conf, <<"foo">>, #{}, #{}, 0),
|
||||
mc_worker_api:find_one(Worker, Query)
|
||||
end,
|
||||
#{},
|
||||
?HEALTH_CHECK_TIMEOUT
|
||||
).
|
||||
|
||||
connect(Opts) ->
|
||||
Type = proplists:get_value(mongo_type, Opts, single),
|
||||
|
|
@ -244,10 +309,8 @@ connect(Opts) ->
|
|||
|
||||
mongo_query(Conn, find, Collection, Filter, Projector) ->
|
||||
mongo_api:find(Conn, Collection, Filter, Projector);
|
||||
|
||||
mongo_query(Conn, find_one, Collection, Filter, Projector) ->
|
||||
mongo_api:find_one(Conn, Collection, Filter, Projector);
|
||||
|
||||
%% Todo xxx
|
||||
mongo_query(_Conn, _Action, _Collection, _Filter, _Projector) ->
|
||||
ok.
|
||||
|
|
@ -298,7 +361,8 @@ init_worker_options([{r_mode, V} | R], Acc) ->
|
|||
init_worker_options(R, [{r_mode, V} | Acc]);
|
||||
init_worker_options([_ | R], Acc) ->
|
||||
init_worker_options(R, Acc);
|
||||
init_worker_options([], Acc) -> Acc.
|
||||
init_worker_options([], Acc) ->
|
||||
Acc.
|
||||
|
||||
%% ===================================================================
|
||||
%% Schema funcs
|
||||
|
|
@ -356,59 +420,76 @@ may_parse_srv_and_txt_records(#{server := Server} = Config) ->
|
|||
may_parse_srv_and_txt_records(Config) ->
|
||||
may_parse_srv_and_txt_records_(Config).
|
||||
|
||||
may_parse_srv_and_txt_records_(#{mongo_type := Type,
|
||||
srv_record := false,
|
||||
servers := Servers} = Config) ->
|
||||
may_parse_srv_and_txt_records_(
|
||||
#{
|
||||
mongo_type := Type,
|
||||
srv_record := false,
|
||||
servers := Servers
|
||||
} = Config
|
||||
) ->
|
||||
case Type =:= rs andalso maps:is_key(replica_set_name, Config) =:= false of
|
||||
true ->
|
||||
error({missing_parameter, replica_set_name});
|
||||
false ->
|
||||
Config#{hosts => servers_to_bin(Servers)}
|
||||
end;
|
||||
may_parse_srv_and_txt_records_(#{mongo_type := Type,
|
||||
srv_record := true,
|
||||
servers := Servers} = Config) ->
|
||||
may_parse_srv_and_txt_records_(
|
||||
#{
|
||||
mongo_type := Type,
|
||||
srv_record := true,
|
||||
servers := Servers
|
||||
} = Config
|
||||
) ->
|
||||
Hosts = parse_srv_records(Type, Servers),
|
||||
ExtraOpts = parse_txt_records(Type, Servers),
|
||||
maps:merge(Config#{hosts => Hosts}, ExtraOpts).
|
||||
|
||||
parse_srv_records(Type, Servers) ->
|
||||
Fun = fun(AccIn, {IpOrHost, _Port}) ->
|
||||
case inet_res:lookup("_mongodb._tcp."
|
||||
++ ip_or_host_to_string(IpOrHost), in, srv) of
|
||||
[] ->
|
||||
error(service_not_found);
|
||||
Services ->
|
||||
[ [server_to_bin({Host, Port}) || {_, _, Port, Host} <- Services]
|
||||
| AccIn]
|
||||
end
|
||||
end,
|
||||
case
|
||||
inet_res:lookup(
|
||||
"_mongodb._tcp." ++
|
||||
ip_or_host_to_string(IpOrHost),
|
||||
in,
|
||||
srv
|
||||
)
|
||||
of
|
||||
[] ->
|
||||
error(service_not_found);
|
||||
Services ->
|
||||
[
|
||||
[server_to_bin({Host, Port}) || {_, _, Port, Host} <- Services]
|
||||
| AccIn
|
||||
]
|
||||
end
|
||||
end,
|
||||
Res = lists:foldl(Fun, [], Servers),
|
||||
case Type of
|
||||
single -> lists:nth(1, Res);
|
||||
_ -> Res
|
||||
_ -> Res
|
||||
end.
|
||||
|
||||
parse_txt_records(Type, Servers) ->
|
||||
Fields = case Type of
|
||||
rs -> ["authSource", "replicaSet"];
|
||||
_ -> ["authSource"]
|
||||
end,
|
||||
Fields =
|
||||
case Type of
|
||||
rs -> ["authSource", "replicaSet"];
|
||||
_ -> ["authSource"]
|
||||
end,
|
||||
Fun = fun(AccIn, {IpOrHost, _Port}) ->
|
||||
case inet_res:lookup(IpOrHost, in, txt) of
|
||||
[] ->
|
||||
#{};
|
||||
[[QueryString]] ->
|
||||
case uri_string:dissect_query(QueryString) of
|
||||
{error, _, _} ->
|
||||
error({invalid_txt_record, invalid_query_string});
|
||||
Options ->
|
||||
maps:merge(AccIn, take_and_convert(Fields, Options))
|
||||
end;
|
||||
_ ->
|
||||
error({invalid_txt_record, multiple_records})
|
||||
end
|
||||
end,
|
||||
case inet_res:lookup(IpOrHost, in, txt) of
|
||||
[] ->
|
||||
#{};
|
||||
[[QueryString]] ->
|
||||
case uri_string:dissect_query(QueryString) of
|
||||
{error, _, _} ->
|
||||
error({invalid_txt_record, invalid_query_string});
|
||||
Options ->
|
||||
maps:merge(AccIn, take_and_convert(Fields, Options))
|
||||
end;
|
||||
_ ->
|
||||
error({invalid_txt_record, multiple_records})
|
||||
end
|
||||
end,
|
||||
lists:foldl(Fun, #{}, Servers).
|
||||
|
||||
take_and_convert(Fields, Options) ->
|
||||
|
|
@ -430,8 +511,8 @@ take_and_convert([Field | More], Options, Acc) ->
|
|||
take_and_convert(More, Options, Acc)
|
||||
end.
|
||||
|
||||
-spec ip_or_host_to_string(binary() | string() | tuple())
|
||||
-> string().
|
||||
-spec ip_or_host_to_string(binary() | string() | tuple()) ->
|
||||
string().
|
||||
ip_or_host_to_string(Ip) when is_tuple(Ip) ->
|
||||
inet:ntoa(Ip);
|
||||
ip_or_host_to_string(Host) ->
|
||||
|
|
@ -448,18 +529,20 @@ server_to_bin({IpOrHost, Port}) ->
|
|||
%% ===================================================================
|
||||
%% typereflt funcs
|
||||
|
||||
-spec to_server_raw(string())
|
||||
-> {string(), pos_integer()}.
|
||||
-spec to_server_raw(string()) ->
|
||||
{string(), pos_integer()}.
|
||||
to_server_raw(Server) ->
|
||||
emqx_connector_schema_lib:parse_server(Server, ?MONGO_HOST_OPTIONS).
|
||||
|
||||
-spec to_servers_raw(string())
|
||||
-> [{string(), pos_integer()}].
|
||||
-spec to_servers_raw(string()) ->
|
||||
[{string(), pos_integer()}].
|
||||
to_servers_raw(Servers) ->
|
||||
lists:map( fun(Server) ->
|
||||
emqx_connector_schema_lib:parse_server(Server, ?MONGO_HOST_OPTIONS)
|
||||
end
|
||||
, string:tokens(str(Servers), ", ")).
|
||||
lists:map(
|
||||
fun(Server) ->
|
||||
emqx_connector_schema_lib:parse_server(Server, ?MONGO_HOST_OPTIONS)
|
||||
end,
|
||||
string:tokens(str(Servers), ", ")
|
||||
).
|
||||
|
||||
str(A) when is_atom(A) ->
|
||||
atom_to_list(A);
|
||||
|
|
|
|||
|
|
@ -23,28 +23,32 @@
|
|||
-behaviour(emqx_resource).
|
||||
|
||||
%% API and callbacks for supervisor
|
||||
-export([ start_link/0
|
||||
, init/1
|
||||
, create_bridge/1
|
||||
, drop_bridge/1
|
||||
, bridges/0
|
||||
]).
|
||||
-export([
|
||||
start_link/0,
|
||||
init/1,
|
||||
create_bridge/1,
|
||||
drop_bridge/1,
|
||||
bridges/0
|
||||
]).
|
||||
|
||||
-export([on_message_received/3]).
|
||||
|
||||
%% callbacks of behaviour emqx_resource
|
||||
-export([ on_start/2
|
||||
, on_stop/2
|
||||
, on_query/4
|
||||
, on_health_check/2
|
||||
]).
|
||||
-export([
|
||||
on_start/2,
|
||||
on_stop/2,
|
||||
on_query/4,
|
||||
on_health_check/2
|
||||
]).
|
||||
|
||||
-behaviour(hocon_schema).
|
||||
|
||||
-import(hoconsc, [mk/2]).
|
||||
|
||||
-export([ roots/0
|
||||
, fields/1]).
|
||||
-export([
|
||||
roots/0,
|
||||
fields/1
|
||||
]).
|
||||
|
||||
%%=====================================================================
|
||||
%% Hocon schema
|
||||
|
|
@ -53,25 +57,34 @@ roots() ->
|
|||
|
||||
fields("config") ->
|
||||
emqx_connector_mqtt_schema:fields("config");
|
||||
|
||||
fields("get") ->
|
||||
[ {num_of_bridges, mk(integer(),
|
||||
#{ desc => ?DESC("num_of_bridges")
|
||||
})}
|
||||
[
|
||||
{num_of_bridges,
|
||||
mk(
|
||||
integer(),
|
||||
#{desc => ?DESC("num_of_bridges")}
|
||||
)}
|
||||
] ++ fields("post");
|
||||
|
||||
fields("put") ->
|
||||
emqx_connector_mqtt_schema:fields("connector");
|
||||
|
||||
fields("post") ->
|
||||
[ {type, mk(mqtt,
|
||||
#{ required => true
|
||||
, desc => ?DESC("type")
|
||||
})}
|
||||
, {name, mk(binary(),
|
||||
#{ required => true
|
||||
, desc => ?DESC("name")
|
||||
})}
|
||||
[
|
||||
{type,
|
||||
mk(
|
||||
mqtt,
|
||||
#{
|
||||
required => true,
|
||||
desc => ?DESC("type")
|
||||
}
|
||||
)},
|
||||
{name,
|
||||
mk(
|
||||
binary(),
|
||||
#{
|
||||
required => true,
|
||||
desc => ?DESC("name")
|
||||
}
|
||||
)}
|
||||
] ++ fields("put").
|
||||
|
||||
%% ===================================================================
|
||||
|
|
@ -80,23 +93,29 @@ start_link() ->
|
|||
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
|
||||
|
||||
init([]) ->
|
||||
SupFlag = #{strategy => one_for_one,
|
||||
intensity => 100,
|
||||
period => 10},
|
||||
SupFlag = #{
|
||||
strategy => one_for_one,
|
||||
intensity => 100,
|
||||
period => 10
|
||||
},
|
||||
{ok, {SupFlag, []}}.
|
||||
|
||||
bridge_spec(Config) ->
|
||||
#{id => maps:get(name, Config),
|
||||
start => {emqx_connector_mqtt_worker, start_link, [Config]},
|
||||
restart => permanent,
|
||||
shutdown => 5000,
|
||||
type => worker,
|
||||
modules => [emqx_connector_mqtt_worker]}.
|
||||
#{
|
||||
id => maps:get(name, Config),
|
||||
start => {emqx_connector_mqtt_worker, start_link, [Config]},
|
||||
restart => permanent,
|
||||
shutdown => 5000,
|
||||
type => worker,
|
||||
modules => [emqx_connector_mqtt_worker]
|
||||
}.
|
||||
|
||||
-spec(bridges() -> [{node(), map()}]).
|
||||
-spec bridges() -> [{node(), map()}].
|
||||
bridges() ->
|
||||
[{Name, emqx_connector_mqtt_worker:status(Name)}
|
||||
|| {Name, _Pid, _, _} <- supervisor:which_children(?MODULE)].
|
||||
[
|
||||
{Name, emqx_connector_mqtt_worker:status(Name)}
|
||||
|| {Name, _Pid, _, _} <- supervisor:which_children(?MODULE)
|
||||
].
|
||||
|
||||
create_bridge(Config) ->
|
||||
supervisor:start_child(?MODULE, bridge_spec(Config)).
|
||||
|
|
@ -121,8 +140,11 @@ on_message_received(Msg, HookPoint, InstId) ->
|
|||
%% ===================================================================
|
||||
on_start(InstId, Conf) ->
|
||||
InstanceId = binary_to_atom(InstId, utf8),
|
||||
?SLOG(info, #{msg => "starting_mqtt_connector",
|
||||
connector => InstanceId, config => Conf}),
|
||||
?SLOG(info, #{
|
||||
msg => "starting_mqtt_connector",
|
||||
connector => InstanceId,
|
||||
config => Conf
|
||||
}),
|
||||
BasicConf = basic_config(Conf),
|
||||
BridgeConf = BasicConf#{
|
||||
name => InstanceId,
|
||||
|
|
@ -142,19 +164,25 @@ on_start(InstId, Conf) ->
|
|||
end.
|
||||
|
||||
on_stop(_InstId, #{name := InstanceId}) ->
|
||||
?SLOG(info, #{msg => "stopping_mqtt_connector",
|
||||
connector => InstanceId}),
|
||||
?SLOG(info, #{
|
||||
msg => "stopping_mqtt_connector",
|
||||
connector => InstanceId
|
||||
}),
|
||||
case ?MODULE:drop_bridge(InstanceId) of
|
||||
ok -> ok;
|
||||
{error, not_found} -> ok;
|
||||
ok ->
|
||||
ok;
|
||||
{error, not_found} ->
|
||||
ok;
|
||||
{error, Reason} ->
|
||||
?SLOG(error, #{msg => "stop_mqtt_connector",
|
||||
connector => InstanceId, reason => Reason})
|
||||
?SLOG(error, #{
|
||||
msg => "stop_mqtt_connector",
|
||||
connector => InstanceId,
|
||||
reason => Reason
|
||||
})
|
||||
end.
|
||||
|
||||
on_query(_InstId, {message_received, _Msg}, AfterQuery, _State) ->
|
||||
emqx_resource:query_success(AfterQuery);
|
||||
|
||||
on_query(_InstId, {send_message, Msg}, AfterQuery, #{name := InstanceId}) ->
|
||||
?TRACE("QUERY", "send_msg_to_remote_node", #{message => Msg, connector => InstanceId}),
|
||||
emqx_connector_mqtt_worker:send_to_remote(InstanceId, Msg),
|
||||
|
|
@ -178,7 +206,8 @@ make_sub_confs(undefined, _) ->
|
|||
undefined;
|
||||
make_sub_confs(SubRemoteConf, InstId) ->
|
||||
case maps:take(hookpoint, SubRemoteConf) of
|
||||
error -> SubRemoteConf;
|
||||
error ->
|
||||
SubRemoteConf;
|
||||
{HookPoint, SubConf} ->
|
||||
MFA = {?MODULE, on_message_received, [HookPoint, InstId]},
|
||||
SubConf#{on_message_received => MFA}
|
||||
|
|
@ -192,22 +221,24 @@ make_forward_confs(FrowardConf) ->
|
|||
FrowardConf.
|
||||
|
||||
basic_config(#{
|
||||
server := Server,
|
||||
reconnect_interval := ReconnIntv,
|
||||
proto_ver := ProtoVer,
|
||||
username := User,
|
||||
password := Password,
|
||||
clean_start := CleanStart,
|
||||
keepalive := KeepAlive,
|
||||
retry_interval := RetryIntv,
|
||||
max_inflight := MaxInflight,
|
||||
replayq := ReplayQ,
|
||||
ssl := #{enable := EnableSsl} = Ssl}) ->
|
||||
server := Server,
|
||||
reconnect_interval := ReconnIntv,
|
||||
proto_ver := ProtoVer,
|
||||
username := User,
|
||||
password := Password,
|
||||
clean_start := CleanStart,
|
||||
keepalive := KeepAlive,
|
||||
retry_interval := RetryIntv,
|
||||
max_inflight := MaxInflight,
|
||||
replayq := ReplayQ,
|
||||
ssl := #{enable := EnableSsl} = Ssl
|
||||
}) ->
|
||||
#{
|
||||
replayq => ReplayQ,
|
||||
%% connection opts
|
||||
server => Server,
|
||||
connect_timeout => 30, %% 30s
|
||||
%% 30s
|
||||
connect_timeout => 30,
|
||||
reconnect_interval => ReconnIntv,
|
||||
proto_ver => ProtoVer,
|
||||
bridge_mode => true,
|
||||
|
|
|
|||
|
|
@ -23,11 +23,12 @@
|
|||
-behaviour(emqx_resource).
|
||||
|
||||
%% callbacks of behaviour emqx_resource
|
||||
-export([ on_start/2
|
||||
, on_stop/2
|
||||
, on_query/4
|
||||
, on_health_check/2
|
||||
]).
|
||||
-export([
|
||||
on_start/2,
|
||||
on_stop/2,
|
||||
on_query/4,
|
||||
on_health_check/2
|
||||
]).
|
||||
|
||||
%% ecpool connect & reconnect
|
||||
-export([connect/1, prepare_sql_to_conn/2]).
|
||||
|
|
@ -38,9 +39,10 @@
|
|||
|
||||
-export([do_health_check/1]).
|
||||
|
||||
-define( MYSQL_HOST_OPTIONS
|
||||
, #{ host_type => inet_addr
|
||||
, default_port => ?MYSQL_DEFAULT_PORT}).
|
||||
-define(MYSQL_HOST_OPTIONS, #{
|
||||
host_type => inet_addr,
|
||||
default_port => ?MYSQL_DEFAULT_PORT
|
||||
}).
|
||||
|
||||
%%=====================================================================
|
||||
%% Hocon schema
|
||||
|
|
@ -48,11 +50,10 @@ roots() ->
|
|||
[{config, #{type => hoconsc:ref(?MODULE, config)}}].
|
||||
|
||||
fields(config) ->
|
||||
[ {server, fun server/1}
|
||||
] ++
|
||||
emqx_connector_schema_lib:relational_db_fields() ++
|
||||
emqx_connector_schema_lib:ssl_fields() ++
|
||||
emqx_connector_schema_lib:prepare_statement_fields().
|
||||
[{server, fun server/1}] ++
|
||||
emqx_connector_schema_lib:relational_db_fields() ++
|
||||
emqx_connector_schema_lib:ssl_fields() ++
|
||||
emqx_connector_schema_lib:prepare_statement_fields().
|
||||
|
||||
server(type) -> emqx_schema:ip_port();
|
||||
server(required) -> true;
|
||||
|
|
@ -62,47 +63,64 @@ server(desc) -> ?DESC("server");
|
|||
server(_) -> undefined.
|
||||
|
||||
%% ===================================================================
|
||||
on_start(InstId, #{server := {Host, Port},
|
||||
database := DB,
|
||||
username := User,
|
||||
password := Password,
|
||||
auto_reconnect := AutoReconn,
|
||||
pool_size := PoolSize,
|
||||
ssl := SSL } = Config) ->
|
||||
?SLOG(info, #{msg => "starting_mysql_connector",
|
||||
connector => InstId, config => Config}),
|
||||
SslOpts = case maps:get(enable, SSL) of
|
||||
true ->
|
||||
[{ssl, emqx_tls_lib:to_client_opts(SSL)}];
|
||||
false ->
|
||||
[]
|
||||
end,
|
||||
Options = [{host, Host},
|
||||
{port, Port},
|
||||
{user, User},
|
||||
{password, Password},
|
||||
{database, DB},
|
||||
{auto_reconnect, reconn_interval(AutoReconn)},
|
||||
{pool_size, PoolSize}],
|
||||
on_start(
|
||||
InstId,
|
||||
#{
|
||||
server := {Host, Port},
|
||||
database := DB,
|
||||
username := User,
|
||||
password := Password,
|
||||
auto_reconnect := AutoReconn,
|
||||
pool_size := PoolSize,
|
||||
ssl := SSL
|
||||
} = Config
|
||||
) ->
|
||||
?SLOG(info, #{
|
||||
msg => "starting_mysql_connector",
|
||||
connector => InstId,
|
||||
config => Config
|
||||
}),
|
||||
SslOpts =
|
||||
case maps:get(enable, SSL) of
|
||||
true ->
|
||||
[{ssl, emqx_tls_lib:to_client_opts(SSL)}];
|
||||
false ->
|
||||
[]
|
||||
end,
|
||||
Options = [
|
||||
{host, Host},
|
||||
{port, Port},
|
||||
{user, User},
|
||||
{password, Password},
|
||||
{database, DB},
|
||||
{auto_reconnect, reconn_interval(AutoReconn)},
|
||||
{pool_size, PoolSize}
|
||||
],
|
||||
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
|
||||
Prepares = maps:get(prepare_statement, Config, #{}),
|
||||
State = init_prepare(#{poolname => PoolName, prepare_statement => Prepares}),
|
||||
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options ++ SslOpts) of
|
||||
ok -> {ok, State};
|
||||
ok -> {ok, State};
|
||||
{error, Reason} -> {error, Reason}
|
||||
end.
|
||||
|
||||
on_stop(InstId, #{poolname := PoolName}) ->
|
||||
?SLOG(info, #{msg => "stopping_mysql_connector",
|
||||
connector => InstId}),
|
||||
?SLOG(info, #{
|
||||
msg => "stopping_mysql_connector",
|
||||
connector => InstId
|
||||
}),
|
||||
emqx_plugin_libs_pool:stop_pool(PoolName).
|
||||
|
||||
on_query(InstId, {Type, SQLOrKey}, AfterQuery, State) ->
|
||||
on_query(InstId, {Type, SQLOrKey, [], default_timeout}, AfterQuery, State);
|
||||
on_query(InstId, {Type, SQLOrKey, Params}, AfterQuery, State) ->
|
||||
on_query(InstId, {Type, SQLOrKey, Params, default_timeout}, AfterQuery, State);
|
||||
on_query(InstId, {Type, SQLOrKey, Params, Timeout}, AfterQuery,
|
||||
#{poolname := PoolName, prepare_statement := Prepares} = State) ->
|
||||
on_query(
|
||||
InstId,
|
||||
{Type, SQLOrKey, Params, Timeout},
|
||||
AfterQuery,
|
||||
#{poolname := PoolName, prepare_statement := Prepares} = State
|
||||
) ->
|
||||
LogMeta = #{connector => InstId, sql => SQLOrKey, state => State},
|
||||
?TRACE("QUERY", "mysql_connector_received", LogMeta),
|
||||
Worker = ecpool:get_client(PoolName),
|
||||
|
|
@ -111,28 +129,36 @@ on_query(InstId, {Type, SQLOrKey, Params, Timeout}, AfterQuery,
|
|||
Result = erlang:apply(mysql, MySqlFunction, [Conn, SQLOrKey, Params, Timeout]),
|
||||
case Result of
|
||||
{error, disconnected} ->
|
||||
?SLOG(error,
|
||||
LogMeta#{msg => "mysql_connector_do_sql_query_failed", reason => disconnected}),
|
||||
?SLOG(
|
||||
error,
|
||||
LogMeta#{msg => "mysql_connector_do_sql_query_failed", reason => disconnected}
|
||||
),
|
||||
%% kill the poll worker to trigger reconnection
|
||||
_ = exit(Conn, restart),
|
||||
emqx_resource:query_failed(AfterQuery),
|
||||
Result;
|
||||
{error, not_prepared} ->
|
||||
?SLOG(warning,
|
||||
LogMeta#{msg => "mysql_connector_prepare_query_failed", reason => not_prepared}),
|
||||
?SLOG(
|
||||
warning,
|
||||
LogMeta#{msg => "mysql_connector_prepare_query_failed", reason => not_prepared}
|
||||
),
|
||||
case prepare_sql(Prepares, PoolName) of
|
||||
ok ->
|
||||
%% not return result, next loop will try again
|
||||
on_query(InstId, {Type, SQLOrKey, Params, Timeout}, AfterQuery, State);
|
||||
{error, Reason} ->
|
||||
?SLOG(error,
|
||||
LogMeta#{msg => "mysql_connector_do_prepare_failed", reason => Reason}),
|
||||
?SLOG(
|
||||
error,
|
||||
LogMeta#{msg => "mysql_connector_do_prepare_failed", reason => Reason}
|
||||
),
|
||||
emqx_resource:query_failed(AfterQuery),
|
||||
{error, Reason}
|
||||
end;
|
||||
{error, Reason} ->
|
||||
?SLOG(error,
|
||||
LogMeta#{msg => "mysql_connector_do_sql_query_failed", reason => Reason}),
|
||||
?SLOG(
|
||||
error,
|
||||
LogMeta#{msg => "mysql_connector_do_sql_query_failed", reason => Reason}
|
||||
),
|
||||
emqx_resource:query_failed(AfterQuery),
|
||||
Result;
|
||||
_ ->
|
||||
|
|
@ -147,7 +173,7 @@ on_health_check(_InstId, #{poolname := PoolName} = State) ->
|
|||
case emqx_plugin_libs_pool:health_check(PoolName, fun ?MODULE:do_health_check/1, State) of
|
||||
{ok, State} ->
|
||||
case do_health_check_prepares(State) of
|
||||
ok->
|
||||
ok ->
|
||||
{ok, State};
|
||||
{ok, NState} ->
|
||||
{ok, NState};
|
||||
|
|
@ -161,7 +187,7 @@ on_health_check(_InstId, #{poolname := PoolName} = State) ->
|
|||
do_health_check(Conn) ->
|
||||
ok == element(1, mysql:query(Conn, <<"SELECT count(1) AS T">>)).
|
||||
|
||||
do_health_check_prepares(#{prepare_statement := Prepares})when is_map(Prepares) ->
|
||||
do_health_check_prepares(#{prepare_statement := Prepares}) when is_map(Prepares) ->
|
||||
ok;
|
||||
do_health_check_prepares(State = #{poolname := PoolName, prepare_statement := {error, Prepares}}) ->
|
||||
%% retry to prepare
|
||||
|
|
@ -180,8 +206,8 @@ reconn_interval(false) -> false.
|
|||
connect(Options) ->
|
||||
mysql:start_link(Options).
|
||||
|
||||
-spec to_server(string())
|
||||
-> {inet:ip_address() | inet:hostname(), pos_integer()}.
|
||||
-spec to_server(string()) ->
|
||||
{inet:ip_address() | inet:hostname(), pos_integer()}.
|
||||
to_server(Str) ->
|
||||
emqx_connector_schema_lib:parse_server(Str, ?MYSQL_HOST_OPTIONS).
|
||||
|
||||
|
|
@ -215,20 +241,27 @@ prepare_sql(Prepares, PoolName) ->
|
|||
|
||||
do_prepare_sql(Prepares, PoolName) ->
|
||||
Conns =
|
||||
[begin
|
||||
{ok, Conn} = ecpool_worker:client(Worker),
|
||||
Conn
|
||||
end || {_Name, Worker} <- ecpool:workers(PoolName)],
|
||||
[
|
||||
begin
|
||||
{ok, Conn} = ecpool_worker:client(Worker),
|
||||
Conn
|
||||
end
|
||||
|| {_Name, Worker} <- ecpool:workers(PoolName)
|
||||
],
|
||||
prepare_sql_to_conn_list(Conns, Prepares).
|
||||
|
||||
prepare_sql_to_conn_list([], _PrepareList) -> ok;
|
||||
prepare_sql_to_conn_list([], _PrepareList) ->
|
||||
ok;
|
||||
prepare_sql_to_conn_list([Conn | ConnList], PrepareList) ->
|
||||
case prepare_sql_to_conn(Conn, PrepareList) of
|
||||
ok ->
|
||||
prepare_sql_to_conn_list(ConnList, PrepareList);
|
||||
{error, R} ->
|
||||
%% rollback
|
||||
Fun = fun({Key, _}) -> _ = unprepare_sql_to_conn(Conn, Key), ok end,
|
||||
Fun = fun({Key, _}) ->
|
||||
_ = unprepare_sql_to_conn(Conn, Key),
|
||||
ok
|
||||
end,
|
||||
lists:foreach(Fun, PrepareList),
|
||||
{error, R}
|
||||
end.
|
||||
|
|
|
|||
|
|
@ -26,24 +26,26 @@
|
|||
-behaviour(emqx_resource).
|
||||
|
||||
%% callbacks of behaviour emqx_resource
|
||||
-export([ on_start/2
|
||||
, on_stop/2
|
||||
, on_query/4
|
||||
, on_health_check/2
|
||||
]).
|
||||
-export([
|
||||
on_start/2,
|
||||
on_stop/2,
|
||||
on_query/4,
|
||||
on_health_check/2
|
||||
]).
|
||||
|
||||
-export([connect/1]).
|
||||
|
||||
-export([ query/3
|
||||
, prepared_query/3
|
||||
]).
|
||||
-export([
|
||||
query/3,
|
||||
prepared_query/3
|
||||
]).
|
||||
|
||||
-export([do_health_check/1]).
|
||||
|
||||
-define( PGSQL_HOST_OPTIONS
|
||||
, #{ host_type => inet_addr
|
||||
, default_port => ?PGSQL_DEFAULT_PORT}).
|
||||
|
||||
-define(PGSQL_HOST_OPTIONS, #{
|
||||
host_type => inet_addr,
|
||||
default_port => ?PGSQL_DEFAULT_PORT
|
||||
}).
|
||||
|
||||
%%=====================================================================
|
||||
|
||||
|
|
@ -52,9 +54,9 @@ roots() ->
|
|||
|
||||
fields(config) ->
|
||||
[{server, fun server/1}] ++
|
||||
emqx_connector_schema_lib:relational_db_fields() ++
|
||||
emqx_connector_schema_lib:ssl_fields() ++
|
||||
emqx_connector_schema_lib:prepare_statement_fields().
|
||||
emqx_connector_schema_lib:relational_db_fields() ++
|
||||
emqx_connector_schema_lib:ssl_fields() ++
|
||||
emqx_connector_schema_lib:prepare_statement_fields().
|
||||
|
||||
server(type) -> emqx_schema:ip_port();
|
||||
server(required) -> true;
|
||||
|
|
@ -64,52 +66,73 @@ server(desc) -> ?DESC("server");
|
|||
server(_) -> undefined.
|
||||
|
||||
%% ===================================================================
|
||||
on_start(InstId, #{server := {Host, Port},
|
||||
database := DB,
|
||||
username := User,
|
||||
password := Password,
|
||||
auto_reconnect := AutoReconn,
|
||||
pool_size := PoolSize,
|
||||
ssl := SSL} = Config) ->
|
||||
?SLOG(info, #{msg => "starting_postgresql_connector",
|
||||
connector => InstId, config => Config}),
|
||||
SslOpts = case maps:get(enable, SSL) of
|
||||
true ->
|
||||
[{ssl, true},
|
||||
{ssl_opts, emqx_tls_lib:to_client_opts(SSL)}];
|
||||
false ->
|
||||
[{ssl, false}]
|
||||
end,
|
||||
Options = [{host, Host},
|
||||
{port, Port},
|
||||
{username, User},
|
||||
{password, Password},
|
||||
{database, DB},
|
||||
{auto_reconnect, reconn_interval(AutoReconn)},
|
||||
{pool_size, PoolSize},
|
||||
{prepare_statement, maps:to_list(maps:get(prepare_statement, Config, #{}))}],
|
||||
on_start(
|
||||
InstId,
|
||||
#{
|
||||
server := {Host, Port},
|
||||
database := DB,
|
||||
username := User,
|
||||
password := Password,
|
||||
auto_reconnect := AutoReconn,
|
||||
pool_size := PoolSize,
|
||||
ssl := SSL
|
||||
} = Config
|
||||
) ->
|
||||
?SLOG(info, #{
|
||||
msg => "starting_postgresql_connector",
|
||||
connector => InstId,
|
||||
config => Config
|
||||
}),
|
||||
SslOpts =
|
||||
case maps:get(enable, SSL) of
|
||||
true ->
|
||||
[
|
||||
{ssl, true},
|
||||
{ssl_opts, emqx_tls_lib:to_client_opts(SSL)}
|
||||
];
|
||||
false ->
|
||||
[{ssl, false}]
|
||||
end,
|
||||
Options = [
|
||||
{host, Host},
|
||||
{port, Port},
|
||||
{username, User},
|
||||
{password, Password},
|
||||
{database, DB},
|
||||
{auto_reconnect, reconn_interval(AutoReconn)},
|
||||
{pool_size, PoolSize},
|
||||
{prepare_statement, maps:to_list(maps:get(prepare_statement, Config, #{}))}
|
||||
],
|
||||
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
|
||||
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options ++ SslOpts) of
|
||||
ok -> {ok, #{poolname => PoolName}};
|
||||
ok -> {ok, #{poolname => PoolName}};
|
||||
{error, Reason} -> {error, Reason}
|
||||
end.
|
||||
|
||||
on_stop(InstId, #{poolname := PoolName}) ->
|
||||
?SLOG(info, #{msg => "stopping postgresql connector",
|
||||
connector => InstId}),
|
||||
?SLOG(info, #{
|
||||
msg => "stopping postgresql connector",
|
||||
connector => InstId
|
||||
}),
|
||||
emqx_plugin_libs_pool:stop_pool(PoolName).
|
||||
|
||||
on_query(InstId, {Type, NameOrSQL}, AfterQuery, #{poolname := _PoolName} = State) ->
|
||||
on_query(InstId, {Type, NameOrSQL, []}, AfterQuery, State);
|
||||
|
||||
on_query(InstId, {Type, NameOrSQL, Params}, AfterQuery, #{poolname := PoolName} = State) ->
|
||||
?SLOG(debug, #{msg => "postgresql connector received sql query",
|
||||
connector => InstId, sql => NameOrSQL, state => State}),
|
||||
?SLOG(debug, #{
|
||||
msg => "postgresql connector received sql query",
|
||||
connector => InstId,
|
||||
sql => NameOrSQL,
|
||||
state => State
|
||||
}),
|
||||
case Result = ecpool:pick_and_do(PoolName, {?MODULE, Type, [NameOrSQL, Params]}, no_handover) of
|
||||
{error, Reason} ->
|
||||
?SLOG(error, #{
|
||||
msg => "postgresql connector do sql query failed",
|
||||
connector => InstId, sql => NameOrSQL, reason => Reason}),
|
||||
connector => InstId,
|
||||
sql => NameOrSQL,
|
||||
reason => Reason
|
||||
}),
|
||||
emqx_resource:query_failed(AfterQuery);
|
||||
_ ->
|
||||
emqx_resource:query_success(AfterQuery)
|
||||
|
|
@ -127,7 +150,7 @@ reconn_interval(true) -> 15;
|
|||
reconn_interval(false) -> false.
|
||||
|
||||
connect(Opts) ->
|
||||
Host = proplists:get_value(host, Opts),
|
||||
Host = proplists:get_value(host, Opts),
|
||||
Username = proplists:get_value(username, Opts),
|
||||
Password = proplists:get_value(password, Opts),
|
||||
PrepareStatement = proplists:get_value(prepare_statement, Opts),
|
||||
|
|
@ -177,7 +200,7 @@ conn_opts([_Opt | Opts], Acc) ->
|
|||
%% ===================================================================
|
||||
%% typereflt funcs
|
||||
|
||||
-spec to_server(string())
|
||||
-> {inet:ip_address() | inet:hostname(), pos_integer()}.
|
||||
-spec to_server(string()) ->
|
||||
{inet:ip_address() | inet:hostname(), pos_integer()}.
|
||||
to_server(Str) ->
|
||||
emqx_connector_schema_lib:parse_server(Str, ?PGSQL_HOST_OPTIONS).
|
||||
|
|
|
|||
|
|
@ -25,11 +25,12 @@
|
|||
-behaviour(emqx_resource).
|
||||
|
||||
%% callbacks of behaviour emqx_resource
|
||||
-export([ on_start/2
|
||||
, on_stop/2
|
||||
, on_query/4
|
||||
, on_health_check/2
|
||||
]).
|
||||
-export([
|
||||
on_start/2,
|
||||
on_stop/2,
|
||||
on_query/4,
|
||||
on_health_check/2
|
||||
]).
|
||||
|
||||
-export([do_health_check/1]).
|
||||
|
||||
|
|
@ -38,50 +39,59 @@
|
|||
-export([cmd/3]).
|
||||
|
||||
%% redis host don't need parse
|
||||
-define( REDIS_HOST_OPTIONS
|
||||
, #{ host_type => hostname
|
||||
, default_port => ?REDIS_DEFAULT_PORT}).
|
||||
|
||||
-define(REDIS_HOST_OPTIONS, #{
|
||||
host_type => hostname,
|
||||
default_port => ?REDIS_DEFAULT_PORT
|
||||
}).
|
||||
|
||||
%%=====================================================================
|
||||
roots() ->
|
||||
[ {config, #{type => hoconsc:union(
|
||||
[ hoconsc:ref(?MODULE, cluster)
|
||||
, hoconsc:ref(?MODULE, single)
|
||||
, hoconsc:ref(?MODULE, sentinel)
|
||||
])}
|
||||
}
|
||||
[
|
||||
{config, #{
|
||||
type => hoconsc:union(
|
||||
[
|
||||
hoconsc:ref(?MODULE, cluster),
|
||||
hoconsc:ref(?MODULE, single),
|
||||
hoconsc:ref(?MODULE, sentinel)
|
||||
]
|
||||
)
|
||||
}}
|
||||
].
|
||||
|
||||
fields(single) ->
|
||||
[ {server, fun server/1}
|
||||
, {redis_type, #{type => hoconsc:enum([single]),
|
||||
required => true,
|
||||
desc => ?DESC("single")
|
||||
}}
|
||||
[
|
||||
{server, fun server/1},
|
||||
{redis_type, #{
|
||||
type => hoconsc:enum([single]),
|
||||
required => true,
|
||||
desc => ?DESC("single")
|
||||
}}
|
||||
] ++
|
||||
redis_fields() ++
|
||||
emqx_connector_schema_lib:ssl_fields();
|
||||
redis_fields() ++
|
||||
emqx_connector_schema_lib:ssl_fields();
|
||||
fields(cluster) ->
|
||||
[ {servers, fun servers/1}
|
||||
, {redis_type, #{type => hoconsc:enum([cluster]),
|
||||
required => true,
|
||||
desc => ?DESC("cluster")
|
||||
}}
|
||||
[
|
||||
{servers, fun servers/1},
|
||||
{redis_type, #{
|
||||
type => hoconsc:enum([cluster]),
|
||||
required => true,
|
||||
desc => ?DESC("cluster")
|
||||
}}
|
||||
] ++
|
||||
redis_fields() ++
|
||||
emqx_connector_schema_lib:ssl_fields();
|
||||
redis_fields() ++
|
||||
emqx_connector_schema_lib:ssl_fields();
|
||||
fields(sentinel) ->
|
||||
[ {servers, fun servers/1}
|
||||
, {redis_type, #{type => hoconsc:enum([sentinel]),
|
||||
required => true,
|
||||
desc => ?DESC("sentinel")
|
||||
}}
|
||||
, {sentinel, #{type => string(), desc => ?DESC("sentinel_desc")
|
||||
}}
|
||||
[
|
||||
{servers, fun servers/1},
|
||||
{redis_type, #{
|
||||
type => hoconsc:enum([sentinel]),
|
||||
required => true,
|
||||
desc => ?DESC("sentinel")
|
||||
}},
|
||||
{sentinel, #{type => string(), desc => ?DESC("sentinel_desc")}}
|
||||
] ++
|
||||
redis_fields() ++
|
||||
emqx_connector_schema_lib:ssl_fields().
|
||||
redis_fields() ++
|
||||
emqx_connector_schema_lib:ssl_fields().
|
||||
|
||||
server(type) -> emqx_schema:ip_port();
|
||||
server(required) -> true;
|
||||
|
|
@ -98,62 +108,89 @@ servers(desc) -> ?DESC("servers");
|
|||
servers(_) -> undefined.
|
||||
|
||||
%% ===================================================================
|
||||
on_start(InstId, #{redis_type := Type,
|
||||
database := Database,
|
||||
pool_size := PoolSize,
|
||||
auto_reconnect := AutoReconn,
|
||||
ssl := SSL } = Config) ->
|
||||
?SLOG(info, #{msg => "starting_redis_connector",
|
||||
connector => InstId, config => Config}),
|
||||
Servers = case Type of
|
||||
single -> [{servers, [maps:get(server, Config)]}];
|
||||
_ ->[{servers, maps:get(servers, Config)}]
|
||||
end,
|
||||
Opts = [{pool_size, PoolSize},
|
||||
on_start(
|
||||
InstId,
|
||||
#{
|
||||
redis_type := Type,
|
||||
database := Database,
|
||||
pool_size := PoolSize,
|
||||
auto_reconnect := AutoReconn,
|
||||
ssl := SSL
|
||||
} = Config
|
||||
) ->
|
||||
?SLOG(info, #{
|
||||
msg => "starting_redis_connector",
|
||||
connector => InstId,
|
||||
config => Config
|
||||
}),
|
||||
Servers =
|
||||
case Type of
|
||||
single -> [{servers, [maps:get(server, Config)]}];
|
||||
_ -> [{servers, maps:get(servers, Config)}]
|
||||
end,
|
||||
Opts =
|
||||
[
|
||||
{pool_size, PoolSize},
|
||||
{database, Database},
|
||||
{password, maps:get(password, Config, "")},
|
||||
{auto_reconnect, reconn_interval(AutoReconn)}
|
||||
] ++ Servers,
|
||||
Options = case maps:get(enable, SSL) of
|
||||
true ->
|
||||
[{ssl, true},
|
||||
{ssl_options, emqx_tls_lib:to_client_opts(SSL)}];
|
||||
false -> [{ssl, false}]
|
||||
end ++ [{sentinel, maps:get(sentinel, Config, undefined)}],
|
||||
] ++ Servers,
|
||||
Options =
|
||||
case maps:get(enable, SSL) of
|
||||
true ->
|
||||
[
|
||||
{ssl, true},
|
||||
{ssl_options, emqx_tls_lib:to_client_opts(SSL)}
|
||||
];
|
||||
false ->
|
||||
[{ssl, false}]
|
||||
end ++ [{sentinel, maps:get(sentinel, Config, undefined)}],
|
||||
PoolName = emqx_plugin_libs_pool:pool_name(InstId),
|
||||
case Type of
|
||||
cluster ->
|
||||
case eredis_cluster:start_pool(PoolName, Opts ++ [{options, Options}]) of
|
||||
{ok, _} -> {ok, #{poolname => PoolName, type => Type}};
|
||||
{ok, _, _} -> {ok, #{poolname => PoolName, type => Type}};
|
||||
{ok, _} -> {ok, #{poolname => PoolName, type => Type}};
|
||||
{ok, _, _} -> {ok, #{poolname => PoolName, type => Type}};
|
||||
{error, Reason} -> {error, Reason}
|
||||
end;
|
||||
_ ->
|
||||
case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts ++ [{options, Options}]) of
|
||||
ok -> {ok, #{poolname => PoolName, type => Type}};
|
||||
case
|
||||
emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Opts ++ [{options, Options}])
|
||||
of
|
||||
ok -> {ok, #{poolname => PoolName, type => Type}};
|
||||
{error, Reason} -> {error, Reason}
|
||||
end
|
||||
end.
|
||||
|
||||
on_stop(InstId, #{poolname := PoolName, type := Type}) ->
|
||||
?SLOG(info, #{msg => "stopping_redis_connector",
|
||||
connector => InstId}),
|
||||
?SLOG(info, #{
|
||||
msg => "stopping_redis_connector",
|
||||
connector => InstId
|
||||
}),
|
||||
case Type of
|
||||
cluster -> eredis_cluster:stop_pool(PoolName);
|
||||
_ -> emqx_plugin_libs_pool:stop_pool(PoolName)
|
||||
end.
|
||||
|
||||
on_query(InstId, {cmd, Command}, AfterCommand, #{poolname := PoolName, type := Type} = State) ->
|
||||
?TRACE("QUERY", "redis_connector_received",
|
||||
#{connector => InstId, sql => Command, state => State}),
|
||||
Result = case Type of
|
||||
cluster -> eredis_cluster:q(PoolName, Command);
|
||||
_ -> ecpool:pick_and_do(PoolName, {?MODULE, cmd, [Type, Command]}, no_handover)
|
||||
end,
|
||||
?TRACE(
|
||||
"QUERY",
|
||||
"redis_connector_received",
|
||||
#{connector => InstId, sql => Command, state => State}
|
||||
),
|
||||
Result =
|
||||
case Type of
|
||||
cluster -> eredis_cluster:q(PoolName, Command);
|
||||
_ -> ecpool:pick_and_do(PoolName, {?MODULE, cmd, [Type, Command]}, no_handover)
|
||||
end,
|
||||
case Result of
|
||||
{error, Reason} ->
|
||||
?SLOG(error, #{msg => "redis_connector_do_cmd_query_failed",
|
||||
connector => InstId, sql => Command, reason => Reason}),
|
||||
?SLOG(error, #{
|
||||
msg => "redis_connector_do_cmd_query_failed",
|
||||
connector => InstId,
|
||||
sql => Command,
|
||||
reason => Reason
|
||||
}),
|
||||
emqx_resource:query_failed(AfterCommand);
|
||||
_ ->
|
||||
emqx_resource:query_success(AfterCommand)
|
||||
|
|
@ -161,14 +198,19 @@ on_query(InstId, {cmd, Command}, AfterCommand, #{poolname := PoolName, type := T
|
|||
Result.
|
||||
|
||||
extract_eredis_cluster_workers(PoolName) ->
|
||||
lists:flatten([gen_server:call(PoolPid, get_all_workers) ||
|
||||
PoolPid <- eredis_cluster_monitor:get_all_pools(PoolName)]).
|
||||
lists:flatten([
|
||||
gen_server:call(PoolPid, get_all_workers)
|
||||
|| PoolPid <- eredis_cluster_monitor:get_all_pools(PoolName)
|
||||
]).
|
||||
|
||||
eredis_cluster_workers_exist_and_are_connected(Workers) ->
|
||||
length(Workers) > 0 andalso lists:all(
|
||||
fun({_, Pid, _, _}) ->
|
||||
eredis_cluster_pool_worker:is_connected(Pid) =:= true
|
||||
end, Workers).
|
||||
length(Workers) > 0 andalso
|
||||
lists:all(
|
||||
fun({_, Pid, _, _}) ->
|
||||
eredis_cluster_pool_worker:is_connected(Pid) =:= true
|
||||
end,
|
||||
Workers
|
||||
).
|
||||
|
||||
on_health_check(_InstId, #{type := cluster, poolname := PoolName} = State) ->
|
||||
case eredis_cluster:pool_exists(PoolName) of
|
||||
|
|
@ -178,12 +220,9 @@ on_health_check(_InstId, #{type := cluster, poolname := PoolName} = State) ->
|
|||
true -> {ok, State};
|
||||
false -> {error, health_check_failed, State}
|
||||
end;
|
||||
|
||||
false ->
|
||||
{error, health_check_failed, State}
|
||||
end;
|
||||
|
||||
|
||||
on_health_check(_InstId, #{poolname := PoolName} = State) ->
|
||||
emqx_plugin_libs_pool:health_check(PoolName, fun ?MODULE:do_health_check/1, State).
|
||||
|
||||
|
|
@ -206,28 +245,32 @@ connect(Opts) ->
|
|||
eredis:start_link(Opts).
|
||||
|
||||
redis_fields() ->
|
||||
[ {pool_size, fun emqx_connector_schema_lib:pool_size/1}
|
||||
, {password, fun emqx_connector_schema_lib:password/1}
|
||||
, {database, #{type => integer(),
|
||||
default => 0,
|
||||
required => true,
|
||||
desc => ?DESC("database")
|
||||
}}
|
||||
, {auto_reconnect, fun emqx_connector_schema_lib:auto_reconnect/1}
|
||||
[
|
||||
{pool_size, fun emqx_connector_schema_lib:pool_size/1},
|
||||
{password, fun emqx_connector_schema_lib:password/1},
|
||||
{database, #{
|
||||
type => integer(),
|
||||
default => 0,
|
||||
required => true,
|
||||
desc => ?DESC("database")
|
||||
}},
|
||||
{auto_reconnect, fun emqx_connector_schema_lib:auto_reconnect/1}
|
||||
].
|
||||
|
||||
-spec to_server_raw(string())
|
||||
-> {string(), pos_integer()}.
|
||||
-spec to_server_raw(string()) ->
|
||||
{string(), pos_integer()}.
|
||||
to_server_raw(Server) ->
|
||||
emqx_connector_schema_lib:parse_server(Server, ?REDIS_HOST_OPTIONS).
|
||||
|
||||
-spec to_servers_raw(string())
|
||||
-> [{string(), pos_integer()}].
|
||||
-spec to_servers_raw(string()) ->
|
||||
[{string(), pos_integer()}].
|
||||
to_servers_raw(Servers) ->
|
||||
lists:map( fun(Server) ->
|
||||
emqx_connector_schema_lib:parse_server(Server, ?REDIS_HOST_OPTIONS)
|
||||
end
|
||||
, string:tokens(str(Servers), ", ")).
|
||||
lists:map(
|
||||
fun(Server) ->
|
||||
emqx_connector_schema_lib:parse_server(Server, ?REDIS_HOST_OPTIONS)
|
||||
end,
|
||||
string:tokens(str(Servers), ", ")
|
||||
).
|
||||
|
||||
str(A) when is_atom(A) ->
|
||||
atom_to_list(A);
|
||||
|
|
|
|||
|
|
@ -24,10 +24,11 @@
|
|||
|
||||
-export([namespace/0, roots/0, fields/1, desc/1]).
|
||||
|
||||
-export([ get_response/0
|
||||
, put_request/0
|
||||
, post_request/0
|
||||
]).
|
||||
-export([
|
||||
get_response/0,
|
||||
put_request/0,
|
||||
post_request/0
|
||||
]).
|
||||
|
||||
%% the config for http bridges do not need connectors
|
||||
-define(CONN_TYPES, [mqtt]).
|
||||
|
|
@ -55,18 +56,25 @@ namespace() -> connector.
|
|||
|
||||
roots() -> ["connectors"].
|
||||
|
||||
fields(connectors) -> fields("connectors");
|
||||
fields(connectors) ->
|
||||
fields("connectors");
|
||||
fields("connectors") ->
|
||||
[ {mqtt,
|
||||
mk(hoconsc:map(name,
|
||||
hoconsc:union([ ref(emqx_connector_mqtt_schema, "connector")
|
||||
])),
|
||||
#{ desc => ?DESC("mqtt")
|
||||
})}
|
||||
[
|
||||
{mqtt,
|
||||
mk(
|
||||
hoconsc:map(
|
||||
name,
|
||||
hoconsc:union([ref(emqx_connector_mqtt_schema, "connector")])
|
||||
),
|
||||
#{desc => ?DESC("mqtt")}
|
||||
)}
|
||||
].
|
||||
|
||||
desc(Record) when Record =:= connectors;
|
||||
Record =:= "connectors" -> ?DESC("desc_connector");
|
||||
desc(Record) when
|
||||
Record =:= connectors;
|
||||
Record =:= "connectors"
|
||||
->
|
||||
?DESC("desc_connector");
|
||||
desc(_) ->
|
||||
undefined.
|
||||
|
||||
|
|
|
|||
|
|
@ -19,32 +19,36 @@
|
|||
-include_lib("typerefl/include/types.hrl").
|
||||
-include_lib("hocon/include/hoconsc.hrl").
|
||||
|
||||
-export([ relational_db_fields/0
|
||||
, ssl_fields/0
|
||||
, prepare_statement_fields/0
|
||||
]).
|
||||
-export([
|
||||
relational_db_fields/0,
|
||||
ssl_fields/0,
|
||||
prepare_statement_fields/0
|
||||
]).
|
||||
|
||||
-export([ ip_port_to_string/1
|
||||
, parse_server/2
|
||||
]).
|
||||
-export([
|
||||
ip_port_to_string/1,
|
||||
parse_server/2
|
||||
]).
|
||||
|
||||
-export([ pool_size/1
|
||||
, database/1
|
||||
, username/1
|
||||
, password/1
|
||||
, auto_reconnect/1
|
||||
]).
|
||||
-export([
|
||||
pool_size/1,
|
||||
database/1,
|
||||
username/1,
|
||||
password/1,
|
||||
auto_reconnect/1
|
||||
]).
|
||||
|
||||
-type database() :: binary().
|
||||
-type pool_size() :: pos_integer().
|
||||
-type username() :: binary().
|
||||
-type password() :: binary().
|
||||
|
||||
-reflect_type([ database/0
|
||||
, pool_size/0
|
||||
, username/0
|
||||
, password/0
|
||||
]).
|
||||
-reflect_type([
|
||||
database/0,
|
||||
pool_size/0,
|
||||
username/0,
|
||||
password/0
|
||||
]).
|
||||
|
||||
-export([roots/0, fields/1]).
|
||||
|
||||
|
|
@ -53,24 +57,25 @@ roots() -> [].
|
|||
fields(_) -> [].
|
||||
|
||||
ssl_fields() ->
|
||||
[ {ssl, #{type => hoconsc:ref(emqx_schema, "ssl_client_opts"),
|
||||
default => #{<<"enable">> => false},
|
||||
desc => ?DESC("ssl")
|
||||
}
|
||||
}
|
||||
[
|
||||
{ssl, #{
|
||||
type => hoconsc:ref(emqx_schema, "ssl_client_opts"),
|
||||
default => #{<<"enable">> => false},
|
||||
desc => ?DESC("ssl")
|
||||
}}
|
||||
].
|
||||
|
||||
relational_db_fields() ->
|
||||
[ {database, fun database/1}
|
||||
, {pool_size, fun pool_size/1}
|
||||
, {username, fun username/1}
|
||||
, {password, fun password/1}
|
||||
, {auto_reconnect, fun auto_reconnect/1}
|
||||
[
|
||||
{database, fun database/1},
|
||||
{pool_size, fun pool_size/1},
|
||||
{username, fun username/1},
|
||||
{password, fun password/1},
|
||||
{auto_reconnect, fun auto_reconnect/1}
|
||||
].
|
||||
|
||||
prepare_statement_fields() ->
|
||||
[ {prepare_statement, fun prepare_statement/1}
|
||||
].
|
||||
[{prepare_statement, fun prepare_statement/1}].
|
||||
|
||||
prepare_statement(type) -> map();
|
||||
prepare_statement(desc) -> ?DESC("prepare_statement");
|
||||
|
|
@ -113,16 +118,16 @@ parse_server(Str, #{host_type := inet_addr, default_port := DefaultPort}) ->
|
|||
try string:tokens(str(Str), ": ") of
|
||||
[Ip, Port] ->
|
||||
case parse_ip(Ip) of
|
||||
{ok, R} -> {R, list_to_integer(Port)}
|
||||
{ok, R} -> {R, list_to_integer(Port)}
|
||||
end;
|
||||
[Ip] ->
|
||||
case parse_ip(Ip) of
|
||||
{ok, R} -> {R, DefaultPort}
|
||||
{ok, R} -> {R, DefaultPort}
|
||||
end;
|
||||
_ ->
|
||||
?THROW_ERROR("Bad server schema.")
|
||||
catch
|
||||
error : Reason ->
|
||||
error:Reason ->
|
||||
?THROW_ERROR(Reason)
|
||||
end;
|
||||
parse_server(Str, #{host_type := hostname, default_port := DefaultPort}) ->
|
||||
|
|
@ -134,7 +139,7 @@ parse_server(Str, #{host_type := hostname, default_port := DefaultPort}) ->
|
|||
_ ->
|
||||
?THROW_ERROR("Bad server schema.")
|
||||
catch
|
||||
error : Reason ->
|
||||
error:Reason ->
|
||||
?THROW_ERROR(Reason)
|
||||
end;
|
||||
parse_server(_, _) ->
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Copyright (c) 2020-2022 EMQ Technologies Co., Ltd. All Rights Reserved.
|
||||
%%
|
||||
|
|
@ -15,37 +14,38 @@
|
|||
%% limitations under the License.
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
-module(emqx_resource_ssl).
|
||||
-module(emqx_connector_ssl).
|
||||
|
||||
-export([ convert_certs/2
|
||||
, convert_certs/3
|
||||
, clear_certs/2
|
||||
]).
|
||||
-export([
|
||||
convert_certs/2,
|
||||
clear_certs/2
|
||||
]).
|
||||
|
||||
convert_certs(ResId, NewConfig) ->
|
||||
convert_certs(ResId, NewConfig, #{}).
|
||||
|
||||
convert_certs(ResId, NewConfig, OldConfig) ->
|
||||
OldSSL = drop_invalid_certs(maps:get(ssl, OldConfig, undefined)),
|
||||
NewSSL = drop_invalid_certs(maps:get(ssl, NewConfig, undefined)),
|
||||
CertsDir = cert_dir(ResId),
|
||||
case emqx_tls_lib:ensure_ssl_files(CertsDir, NewSSL) of
|
||||
convert_certs(RltvDir, NewConfig) ->
|
||||
NewSSL = drop_invalid_certs(maps:get(<<"ssl">>, NewConfig, undefined)),
|
||||
case emqx_tls_lib:ensure_ssl_files(RltvDir, NewSSL) of
|
||||
{ok, NewSSL1} ->
|
||||
ok = emqx_tls_lib:delete_ssl_files(CertsDir, NewSSL1, OldSSL),
|
||||
{ok, new_ssl_config(NewConfig, NewSSL1)};
|
||||
{error, Reason} ->
|
||||
{error, {bad_ssl_config, Reason}}
|
||||
end.
|
||||
|
||||
clear_certs(ResId, Config) ->
|
||||
OldSSL = drop_invalid_certs(maps:get(ssl, Config, undefined)),
|
||||
ok = emqx_tls_lib:delete_ssl_files(cert_dir(ResId), undefined, OldSSL).
|
||||
|
||||
cert_dir(ResId) ->
|
||||
filename:join(["resources", ResId]).
|
||||
clear_certs(RltvDir, Config) ->
|
||||
OldSSL = drop_invalid_certs(map_get_oneof([<<"ssl">>, ssl], Config, undefined)),
|
||||
ok = emqx_tls_lib:delete_ssl_files(RltvDir, undefined, OldSSL).
|
||||
|
||||
new_ssl_config(Config, undefined) -> Config;
|
||||
new_ssl_config(Config, SSL) -> Config#{ssl => SSL}.
|
||||
new_ssl_config(Config, SSL) -> Config#{<<"ssl">> => SSL}.
|
||||
|
||||
drop_invalid_certs(undefined) -> undefined;
|
||||
drop_invalid_certs(SSL) -> emqx_tls_lib:drop_invalid_certs(SSL).
|
||||
|
||||
map_get_oneof([], _Map, Default) ->
|
||||
Default;
|
||||
map_get_oneof([Key | Keys], Map, Default) ->
|
||||
case maps:find(Key, Map) of
|
||||
error ->
|
||||
map_get_oneof(Keys, Map, Default);
|
||||
{ok, Value} ->
|
||||
Value
|
||||
end.
|
||||
|
|
@ -27,20 +27,24 @@ start_link() ->
|
|||
supervisor:start_link({local, ?SERVER}, ?MODULE, []).
|
||||
|
||||
init([]) ->
|
||||
SupFlags = #{strategy => one_for_all,
|
||||
intensity => 5,
|
||||
period => 20},
|
||||
SupFlags = #{
|
||||
strategy => one_for_all,
|
||||
intensity => 5,
|
||||
period => 20
|
||||
},
|
||||
ChildSpecs = [
|
||||
child_spec(emqx_connector_mqtt)
|
||||
],
|
||||
{ok, {SupFlags, ChildSpecs}}.
|
||||
|
||||
child_spec(Mod) ->
|
||||
#{id => Mod,
|
||||
start => {Mod, start_link, []},
|
||||
restart => permanent,
|
||||
shutdown => 3000,
|
||||
type => supervisor,
|
||||
modules => [Mod]}.
|
||||
#{
|
||||
id => Mod,
|
||||
start => {Mod, start_link, []},
|
||||
restart => permanent,
|
||||
shutdown => 3000,
|
||||
type => supervisor,
|
||||
modules => [Mod]
|
||||
}.
|
||||
|
||||
%% internal functions
|
||||
|
|
|
|||
|
|
@ -18,21 +18,24 @@
|
|||
|
||||
-module(emqx_connector_mqtt_mod).
|
||||
|
||||
-export([ start/1
|
||||
, send/2
|
||||
, stop/1
|
||||
, ping/1
|
||||
]).
|
||||
-export([
|
||||
start/1,
|
||||
send/2,
|
||||
stop/1,
|
||||
ping/1
|
||||
]).
|
||||
|
||||
-export([ ensure_subscribed/3
|
||||
, ensure_unsubscribed/2
|
||||
]).
|
||||
-export([
|
||||
ensure_subscribed/3,
|
||||
ensure_unsubscribed/2
|
||||
]).
|
||||
|
||||
%% callbacks for emqtt
|
||||
-export([ handle_puback/2
|
||||
, handle_publish/3
|
||||
, handle_disconnected/2
|
||||
]).
|
||||
-export([
|
||||
handle_puback/2,
|
||||
handle_publish/3,
|
||||
handle_disconnected/2
|
||||
]).
|
||||
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
-include_lib("emqx/include/emqx_mqtt.hrl").
|
||||
|
|
@ -69,7 +72,7 @@ start(Config) ->
|
|||
ok = sub_remote_topics(Pid, Subscriptions),
|
||||
{ok, #{client_pid => Pid, subscriptions => Subscriptions}}
|
||||
catch
|
||||
throw : Reason ->
|
||||
throw:Reason ->
|
||||
ok = stop(#{client_pid => Pid}),
|
||||
{error, error_reason(Reason, ServerStr)}
|
||||
end;
|
||||
|
|
@ -90,13 +93,14 @@ stop(#{client_pid := Pid}) ->
|
|||
|
||||
ping(undefined) ->
|
||||
pang;
|
||||
|
||||
ping(#{client_pid := Pid}) ->
|
||||
emqtt:ping(Pid).
|
||||
|
||||
ensure_subscribed(#{client_pid := Pid, subscriptions := Subs} = Conn, Topic, QoS) when is_pid(Pid) ->
|
||||
ensure_subscribed(#{client_pid := Pid, subscriptions := Subs} = Conn, Topic, QoS) when
|
||||
is_pid(Pid)
|
||||
->
|
||||
case emqtt:subscribe(Pid, Topic, QoS) of
|
||||
{ok, _, _} -> Conn#{subscriptions => [{Topic, QoS}|Subs]};
|
||||
{ok, _, _} -> Conn#{subscriptions => [{Topic, QoS} | Subs]};
|
||||
Error -> {error, Error}
|
||||
end;
|
||||
ensure_subscribed(_Conn, _Topic, _QoS) ->
|
||||
|
|
@ -120,15 +124,14 @@ safe_stop(Pid, StopF, Timeout) ->
|
|||
try
|
||||
StopF()
|
||||
catch
|
||||
_ : _ ->
|
||||
_:_ ->
|
||||
ok
|
||||
end,
|
||||
receive
|
||||
{'DOWN', MRef, _, _, _} ->
|
||||
ok
|
||||
after
|
||||
Timeout ->
|
||||
exit(Pid, kill)
|
||||
after Timeout ->
|
||||
exit(Pid, kill)
|
||||
end.
|
||||
|
||||
send(Conn, Msgs) ->
|
||||
|
|
@ -157,26 +160,38 @@ send(#{client_pid := ClientPid} = Conn, [Msg | Rest], PktIds) ->
|
|||
{error, Reason}
|
||||
end.
|
||||
|
||||
handle_puback(#{packet_id := PktId, reason_code := RC}, Parent)
|
||||
when RC =:= ?RC_SUCCESS;
|
||||
RC =:= ?RC_NO_MATCHING_SUBSCRIBERS ->
|
||||
Parent ! {batch_ack, PktId}, ok;
|
||||
handle_puback(#{packet_id := PktId, reason_code := RC}, Parent) when
|
||||
RC =:= ?RC_SUCCESS;
|
||||
RC =:= ?RC_NO_MATCHING_SUBSCRIBERS
|
||||
->
|
||||
Parent ! {batch_ack, PktId},
|
||||
ok;
|
||||
handle_puback(#{packet_id := PktId, reason_code := RC}, _Parent) ->
|
||||
?SLOG(warning, #{msg => "publish_to_remote_node_falied",
|
||||
packet_id => PktId, reason_code => RC}).
|
||||
?SLOG(warning, #{
|
||||
msg => "publish_to_remote_node_falied",
|
||||
packet_id => PktId,
|
||||
reason_code => RC
|
||||
}).
|
||||
|
||||
handle_publish(Msg, undefined, _Opts) ->
|
||||
?SLOG(error, #{msg => "cannot_publish_to_local_broker_as"
|
||||
"_'ingress'_is_not_configured",
|
||||
message => Msg});
|
||||
?SLOG(error, #{
|
||||
msg =>
|
||||
"cannot_publish_to_local_broker_as"
|
||||
"_'ingress'_is_not_configured",
|
||||
message => Msg
|
||||
});
|
||||
handle_publish(#{properties := Props} = Msg0, Vars, Opts) ->
|
||||
Msg = format_msg_received(Msg0, Opts),
|
||||
?SLOG(debug, #{msg => "publish_to_local_broker",
|
||||
message => Msg, vars => Vars}),
|
||||
?SLOG(debug, #{
|
||||
msg => "publish_to_local_broker",
|
||||
message => Msg,
|
||||
vars => Vars
|
||||
}),
|
||||
case Vars of
|
||||
#{on_message_received := {Mod, Func, Args}} ->
|
||||
_ = erlang:apply(Mod, Func, [Msg | Args]);
|
||||
_ -> ok
|
||||
_ ->
|
||||
ok
|
||||
end,
|
||||
maybe_publish_to_local_broker(Msg, Vars, Props).
|
||||
|
||||
|
|
@ -184,12 +199,14 @@ handle_disconnected(Reason, Parent) ->
|
|||
Parent ! {disconnected, self(), Reason}.
|
||||
|
||||
make_hdlr(Parent, Vars, Opts) ->
|
||||
#{puback => {fun ?MODULE:handle_puback/2, [Parent]},
|
||||
publish => {fun ?MODULE:handle_publish/3, [Vars, Opts]},
|
||||
disconnected => {fun ?MODULE:handle_disconnected/2, [Parent]}
|
||||
}.
|
||||
#{
|
||||
puback => {fun ?MODULE:handle_puback/2, [Parent]},
|
||||
publish => {fun ?MODULE:handle_publish/3, [Vars, Opts]},
|
||||
disconnected => {fun ?MODULE:handle_disconnected/2, [Parent]}
|
||||
}.
|
||||
|
||||
sub_remote_topics(_ClientPid, undefined) -> ok;
|
||||
sub_remote_topics(_ClientPid, undefined) ->
|
||||
ok;
|
||||
sub_remote_topics(ClientPid, #{remote_topic := FromTopic, remote_qos := QoS}) ->
|
||||
case emqtt:subscribe(ClientPid, FromTopic, QoS) of
|
||||
{ok, _, _} -> ok;
|
||||
|
|
@ -199,52 +216,82 @@ sub_remote_topics(ClientPid, #{remote_topic := FromTopic, remote_qos := QoS}) ->
|
|||
process_config(Config) ->
|
||||
maps:without([conn_type, address, receive_mountpoint, subscriptions, name], Config).
|
||||
|
||||
maybe_publish_to_local_broker(#{topic := Topic} = Msg, #{remote_topic := SubTopic} = Vars,
|
||||
Props) ->
|
||||
maybe_publish_to_local_broker(
|
||||
#{topic := Topic} = Msg,
|
||||
#{remote_topic := SubTopic} = Vars,
|
||||
Props
|
||||
) ->
|
||||
case maps:get(local_topic, Vars, undefined) of
|
||||
undefined ->
|
||||
ok; %% local topic is not set, discard it
|
||||
%% local topic is not set, discard it
|
||||
ok;
|
||||
_ ->
|
||||
case emqx_topic:match(Topic, SubTopic) of
|
||||
true ->
|
||||
_ = emqx_broker:publish(emqx_connector_mqtt_msg:to_broker_msg(Msg, Vars, Props)),
|
||||
_ = emqx_broker:publish(
|
||||
emqx_connector_mqtt_msg:to_broker_msg(Msg, Vars, Props)
|
||||
),
|
||||
ok;
|
||||
false ->
|
||||
?SLOG(warning, #{msg => "discard_message_as_topic_not_matched",
|
||||
message => Msg, subscribed => SubTopic, got_topic => Topic})
|
||||
?SLOG(warning, #{
|
||||
msg => "discard_message_as_topic_not_matched",
|
||||
message => Msg,
|
||||
subscribed => SubTopic,
|
||||
got_topic => Topic
|
||||
})
|
||||
end
|
||||
end.
|
||||
|
||||
format_msg_received(#{dup := Dup, payload := Payload, properties := Props,
|
||||
qos := QoS, retain := Retain, topic := Topic}, #{server := Server}) ->
|
||||
#{ id => emqx_guid:to_hexstr(emqx_guid:gen())
|
||||
, server => Server
|
||||
, payload => Payload
|
||||
, topic => Topic
|
||||
, qos => QoS
|
||||
, dup => Dup
|
||||
, retain => Retain
|
||||
, pub_props => printable_maps(Props)
|
||||
, message_received_at => erlang:system_time(millisecond)
|
||||
}.
|
||||
format_msg_received(
|
||||
#{
|
||||
dup := Dup,
|
||||
payload := Payload,
|
||||
properties := Props,
|
||||
qos := QoS,
|
||||
retain := Retain,
|
||||
topic := Topic
|
||||
},
|
||||
#{server := Server}
|
||||
) ->
|
||||
#{
|
||||
id => emqx_guid:to_hexstr(emqx_guid:gen()),
|
||||
server => Server,
|
||||
payload => Payload,
|
||||
topic => Topic,
|
||||
qos => QoS,
|
||||
dup => Dup,
|
||||
retain => Retain,
|
||||
pub_props => printable_maps(Props),
|
||||
message_received_at => erlang:system_time(millisecond)
|
||||
}.
|
||||
|
||||
printable_maps(undefined) -> #{};
|
||||
printable_maps(undefined) ->
|
||||
#{};
|
||||
printable_maps(Headers) ->
|
||||
maps:fold(
|
||||
fun ('User-Property', V0, AccIn) when is_list(V0) ->
|
||||
fun
|
||||
('User-Property', V0, AccIn) when is_list(V0) ->
|
||||
AccIn#{
|
||||
'User-Property' => maps:from_list(V0),
|
||||
'User-Property-Pairs' => [#{
|
||||
key => Key,
|
||||
value => Value
|
||||
} || {Key, Value} <- V0]
|
||||
'User-Property-Pairs' => [
|
||||
#{
|
||||
key => Key,
|
||||
value => Value
|
||||
}
|
||||
|| {Key, Value} <- V0
|
||||
]
|
||||
};
|
||||
(K, V0, AccIn) -> AccIn#{K => V0}
|
||||
end, #{}, Headers).
|
||||
(K, V0, AccIn) ->
|
||||
AccIn#{K => V0}
|
||||
end,
|
||||
#{},
|
||||
Headers
|
||||
).
|
||||
|
||||
ip_port_to_server_str(Host, Port) ->
|
||||
HostStr = case inet:ntoa(Host) of
|
||||
{error, einval} -> Host;
|
||||
IPStr -> IPStr
|
||||
end,
|
||||
HostStr =
|
||||
case inet:ntoa(Host) of
|
||||
{error, einval} -> Host;
|
||||
IPStr -> IPStr
|
||||
end,
|
||||
list_to_binary(io_lib:format("~s:~w", [HostStr, Port])).
|
||||
|
|
|
|||
|
|
@ -16,17 +16,19 @@
|
|||
|
||||
-module(emqx_connector_mqtt_msg).
|
||||
|
||||
-export([ to_binary/1
|
||||
, from_binary/1
|
||||
, make_pub_vars/2
|
||||
, to_remote_msg/2
|
||||
, to_broker_msg/3
|
||||
, estimate_size/1
|
||||
]).
|
||||
-export([
|
||||
to_binary/1,
|
||||
from_binary/1,
|
||||
make_pub_vars/2,
|
||||
to_remote_msg/2,
|
||||
to_broker_msg/3,
|
||||
estimate_size/1
|
||||
]).
|
||||
|
||||
-export([ replace_vars_in_str/2
|
||||
, replace_simple_var/2
|
||||
]).
|
||||
-export([
|
||||
replace_vars_in_str/2,
|
||||
replace_simple_var/2
|
||||
]).
|
||||
|
||||
-export_type([msg/0]).
|
||||
|
||||
|
|
@ -34,7 +36,6 @@
|
|||
|
||||
-include_lib("emqtt/include/emqtt.hrl").
|
||||
|
||||
|
||||
-type msg() :: emqx_types:message().
|
||||
-type exp_msg() :: emqx_types:message() | #mqtt_msg{}.
|
||||
|
||||
|
|
@ -46,7 +47,8 @@
|
|||
payload := binary()
|
||||
}.
|
||||
|
||||
make_pub_vars(_, undefined) -> undefined;
|
||||
make_pub_vars(_, undefined) ->
|
||||
undefined;
|
||||
make_pub_vars(Mountpoint, Conf) when is_map(Conf) ->
|
||||
Conf#{mountpoint => Mountpoint}.
|
||||
|
||||
|
|
@ -57,37 +59,56 @@ make_pub_vars(Mountpoint, Conf) when is_map(Conf) ->
|
|||
%% Shame that we have to know the callback module here
|
||||
%% would be great if we can get rid of #mqtt_msg{} record
|
||||
%% and use #message{} in all places.
|
||||
-spec to_remote_msg(msg() | map(), variables())
|
||||
-> exp_msg().
|
||||
-spec to_remote_msg(msg() | map(), variables()) ->
|
||||
exp_msg().
|
||||
to_remote_msg(#message{flags = Flags0} = Msg, Vars) ->
|
||||
Retain0 = maps:get(retain, Flags0, false),
|
||||
MapMsg = maps:put(retain, Retain0, emqx_rule_events:eventmsg_publish(Msg)),
|
||||
to_remote_msg(MapMsg, Vars);
|
||||
to_remote_msg(MapMsg, #{remote_topic := TopicToken, payload := PayloadToken,
|
||||
remote_qos := QoSToken, retain := RetainToken, mountpoint := Mountpoint}) when is_map(MapMsg) ->
|
||||
to_remote_msg(MapMsg, #{
|
||||
remote_topic := TopicToken,
|
||||
payload := PayloadToken,
|
||||
remote_qos := QoSToken,
|
||||
retain := RetainToken,
|
||||
mountpoint := Mountpoint
|
||||
}) when is_map(MapMsg) ->
|
||||
Topic = replace_vars_in_str(TopicToken, MapMsg),
|
||||
Payload = process_payload(PayloadToken, MapMsg),
|
||||
QoS = replace_simple_var(QoSToken, MapMsg),
|
||||
Retain = replace_simple_var(RetainToken, MapMsg),
|
||||
#mqtt_msg{qos = QoS,
|
||||
retain = Retain,
|
||||
topic = topic(Mountpoint, Topic),
|
||||
props = #{},
|
||||
payload = Payload};
|
||||
#mqtt_msg{
|
||||
qos = QoS,
|
||||
retain = Retain,
|
||||
topic = topic(Mountpoint, Topic),
|
||||
props = #{},
|
||||
payload = Payload
|
||||
};
|
||||
to_remote_msg(#message{topic = Topic} = Msg, #{mountpoint := Mountpoint}) ->
|
||||
Msg#message{topic = topic(Mountpoint, Topic)}.
|
||||
|
||||
%% published from remote node over a MQTT connection
|
||||
to_broker_msg(#{dup := Dup} = MapMsg,
|
||||
#{local_topic := TopicToken, payload := PayloadToken,
|
||||
local_qos := QoSToken, retain := RetainToken, mountpoint := Mountpoint}, Props) ->
|
||||
to_broker_msg(
|
||||
#{dup := Dup} = MapMsg,
|
||||
#{
|
||||
local_topic := TopicToken,
|
||||
payload := PayloadToken,
|
||||
local_qos := QoSToken,
|
||||
retain := RetainToken,
|
||||
mountpoint := Mountpoint
|
||||
},
|
||||
Props
|
||||
) ->
|
||||
Topic = replace_vars_in_str(TopicToken, MapMsg),
|
||||
Payload = process_payload(PayloadToken, MapMsg),
|
||||
QoS = replace_simple_var(QoSToken, MapMsg),
|
||||
Retain = replace_simple_var(RetainToken, MapMsg),
|
||||
set_headers(Props,
|
||||
emqx_message:set_flags(#{dup => Dup, retain => Retain},
|
||||
emqx_message:make(bridge, QoS, topic(Mountpoint, Topic), Payload))).
|
||||
set_headers(
|
||||
Props,
|
||||
emqx_message:set_flags(
|
||||
#{dup => Dup, retain => Retain},
|
||||
emqx_message:make(bridge, QoS, topic(Mountpoint, Topic), Payload)
|
||||
)
|
||||
).
|
||||
|
||||
process_payload([], Msg) ->
|
||||
emqx_json:encode(Msg);
|
||||
|
|
|
|||
|
|
@ -21,17 +21,17 @@
|
|||
|
||||
-behaviour(hocon_schema).
|
||||
|
||||
-export([ namespace/0
|
||||
, roots/0
|
||||
, fields/1
|
||||
, desc/1
|
||||
]).
|
||||
-export([
|
||||
namespace/0,
|
||||
roots/0,
|
||||
fields/1,
|
||||
desc/1
|
||||
]).
|
||||
|
||||
-export([ ingress_desc/0
|
||||
, egress_desc/0
|
||||
]).
|
||||
|
||||
-export([non_empty_string/1]).
|
||||
-export([
|
||||
ingress_desc/0,
|
||||
egress_desc/0
|
||||
]).
|
||||
|
||||
-import(emqx_schema, [mk_duration/2]).
|
||||
|
||||
|
|
@ -42,146 +42,210 @@ roots() ->
|
|||
|
||||
fields("config") ->
|
||||
fields("connector") ++
|
||||
topic_mappings();
|
||||
|
||||
topic_mappings();
|
||||
fields("connector") ->
|
||||
[ {mode,
|
||||
sc(hoconsc:enum([cluster_shareload]),
|
||||
#{ default => cluster_shareload
|
||||
, desc => ?DESC("mode")
|
||||
})}
|
||||
, {server,
|
||||
sc(emqx_schema:ip_port(),
|
||||
#{ required => true
|
||||
, desc => ?DESC("server")
|
||||
})}
|
||||
, {reconnect_interval, mk_duration(
|
||||
"Reconnect interval. Delay for the MQTT bridge to retry establishing the connection "
|
||||
"in case of transportation failure.",
|
||||
#{default => "15s"})}
|
||||
, {proto_ver,
|
||||
sc(hoconsc:enum([v3, v4, v5]),
|
||||
#{ default => v4
|
||||
, desc => ?DESC("proto_ver")
|
||||
})}
|
||||
, {username,
|
||||
sc(binary(),
|
||||
#{ default => "emqx"
|
||||
, desc => ?DESC("username")
|
||||
})}
|
||||
, {password,
|
||||
sc(binary(),
|
||||
#{ default => "emqx"
|
||||
, desc => ?DESC("password")
|
||||
})}
|
||||
, {clean_start,
|
||||
sc(boolean(),
|
||||
#{ default => true
|
||||
, desc => ?DESC("clean_start")
|
||||
})}
|
||||
, {keepalive, mk_duration("MQTT Keepalive.", #{default => "300s"})}
|
||||
, {retry_interval, mk_duration(
|
||||
"Message retry interval. Delay for the MQTT bridge to retry sending the QoS1/QoS2 "
|
||||
"messages in case of ACK not received.",
|
||||
#{default => "15s"})}
|
||||
, {max_inflight,
|
||||
sc(non_neg_integer(),
|
||||
#{ default => 32
|
||||
, desc => ?DESC("max_inflight")
|
||||
})}
|
||||
, {replayq,
|
||||
sc(ref("replayq"), #{})}
|
||||
[
|
||||
{mode,
|
||||
sc(
|
||||
hoconsc:enum([cluster_shareload]),
|
||||
#{
|
||||
default => cluster_shareload,
|
||||
desc => ?DESC("mode")
|
||||
}
|
||||
)},
|
||||
{server,
|
||||
sc(
|
||||
emqx_schema:ip_port(),
|
||||
#{
|
||||
required => true,
|
||||
desc => ?DESC("server")
|
||||
}
|
||||
)},
|
||||
{reconnect_interval,
|
||||
mk_duration(
|
||||
"Reconnect interval. Delay for the MQTT bridge to retry establishing the connection "
|
||||
"in case of transportation failure.",
|
||||
#{default => "15s"}
|
||||
)},
|
||||
{proto_ver,
|
||||
sc(
|
||||
hoconsc:enum([v3, v4, v5]),
|
||||
#{
|
||||
default => v4,
|
||||
desc => ?DESC("proto_ver")
|
||||
}
|
||||
)},
|
||||
{username,
|
||||
sc(
|
||||
binary(),
|
||||
#{
|
||||
default => "emqx",
|
||||
desc => ?DESC("username")
|
||||
}
|
||||
)},
|
||||
{password,
|
||||
sc(
|
||||
binary(),
|
||||
#{
|
||||
default => "emqx",
|
||||
desc => ?DESC("password")
|
||||
}
|
||||
)},
|
||||
{clean_start,
|
||||
sc(
|
||||
boolean(),
|
||||
#{
|
||||
default => true,
|
||||
desc => ?DESC("clean_start")
|
||||
}
|
||||
)},
|
||||
{keepalive, mk_duration("MQTT Keepalive.", #{default => "300s"})},
|
||||
{retry_interval,
|
||||
mk_duration(
|
||||
"Message retry interval. Delay for the MQTT bridge to retry sending the QoS1/QoS2 "
|
||||
"messages in case of ACK not received.",
|
||||
#{default => "15s"}
|
||||
)},
|
||||
{max_inflight,
|
||||
sc(
|
||||
non_neg_integer(),
|
||||
#{
|
||||
default => 32,
|
||||
desc => ?DESC("max_inflight")
|
||||
}
|
||||
)},
|
||||
{replayq, sc(ref("replayq"), #{})}
|
||||
] ++ emqx_connector_schema_lib:ssl_fields();
|
||||
|
||||
fields("ingress") ->
|
||||
%% the message maybe subscribed by rules, in this case 'local_topic' is not necessary
|
||||
[ {remote_topic,
|
||||
sc(binary(),
|
||||
#{ required => true
|
||||
, validator => fun ?MODULE:non_empty_string/1
|
||||
, desc => ?DESC("ingress_remote_topic")
|
||||
})}
|
||||
, {remote_qos,
|
||||
sc(qos(),
|
||||
#{ default => 1
|
||||
, desc => ?DESC("ingress_remote_qos")
|
||||
})}
|
||||
, {local_topic,
|
||||
sc(binary(),
|
||||
#{ validator => fun ?MODULE:non_empty_string/1
|
||||
, desc => ?DESC("ingress_local_topic")
|
||||
})}
|
||||
, {local_qos,
|
||||
sc(qos(),
|
||||
#{ default => <<"${qos}">>
|
||||
, desc => ?DESC("ingress_local_qos")
|
||||
})}
|
||||
, {hookpoint,
|
||||
sc(binary(),
|
||||
#{ desc => ?DESC("ingress_hookpoint")
|
||||
})}
|
||||
[
|
||||
{remote_topic,
|
||||
sc(
|
||||
binary(),
|
||||
#{
|
||||
required => true,
|
||||
validator => fun emqx_schema:non_empty_string/1,
|
||||
desc => ?DESC("ingress_remote_topic")
|
||||
}
|
||||
)},
|
||||
{remote_qos,
|
||||
sc(
|
||||
qos(),
|
||||
#{
|
||||
default => 1,
|
||||
desc => ?DESC("ingress_remote_qos")
|
||||
}
|
||||
)},
|
||||
{local_topic,
|
||||
sc(
|
||||
binary(),
|
||||
#{
|
||||
validator => fun emqx_schema:non_empty_string/1,
|
||||
desc => ?DESC("ingress_local_topic")
|
||||
}
|
||||
)},
|
||||
{local_qos,
|
||||
sc(
|
||||
qos(),
|
||||
#{
|
||||
default => <<"${qos}">>,
|
||||
desc => ?DESC("ingress_local_qos")
|
||||
}
|
||||
)},
|
||||
{hookpoint,
|
||||
sc(
|
||||
binary(),
|
||||
#{desc => ?DESC("ingress_hookpoint")}
|
||||
)},
|
||||
|
||||
, {retain,
|
||||
sc(hoconsc:union([boolean(), binary()]),
|
||||
#{ default => <<"${retain}">>
|
||||
, desc => ?DESC("retain")
|
||||
})}
|
||||
{retain,
|
||||
sc(
|
||||
hoconsc:union([boolean(), binary()]),
|
||||
#{
|
||||
default => <<"${retain}">>,
|
||||
desc => ?DESC("retain")
|
||||
}
|
||||
)},
|
||||
|
||||
, {payload,
|
||||
sc(binary(),
|
||||
#{ default => <<"${payload}">>
|
||||
, desc => ?DESC("payload")
|
||||
})}
|
||||
{payload,
|
||||
sc(
|
||||
binary(),
|
||||
#{
|
||||
default => <<"${payload}">>,
|
||||
desc => ?DESC("payload")
|
||||
}
|
||||
)}
|
||||
];
|
||||
|
||||
|
||||
fields("egress") ->
|
||||
%% the message maybe sent from rules, in this case 'local_topic' is not necessary
|
||||
[ {local_topic,
|
||||
sc(binary(),
|
||||
#{ desc => ?DESC("egress_local_topic")
|
||||
, validator => fun ?MODULE:non_empty_string/1
|
||||
})}
|
||||
, {remote_topic,
|
||||
sc(binary(),
|
||||
#{ required => true
|
||||
, validator => fun ?MODULE:non_empty_string/1
|
||||
, desc => ?DESC("egress_remote_topic")
|
||||
})}
|
||||
, {remote_qos,
|
||||
sc(qos(),
|
||||
#{ required => true
|
||||
, desc => ?DESC("egress_remote_qos")
|
||||
})}
|
||||
[
|
||||
{local_topic,
|
||||
sc(
|
||||
binary(),
|
||||
#{
|
||||
desc => ?DESC("egress_local_topic"),
|
||||
validator => fun emqx_schema:non_empty_string/1
|
||||
}
|
||||
)},
|
||||
{remote_topic,
|
||||
sc(
|
||||
binary(),
|
||||
#{
|
||||
required => true,
|
||||
validator => fun emqx_schema:non_empty_string/1,
|
||||
desc => ?DESC("egress_remote_topic")
|
||||
}
|
||||
)},
|
||||
{remote_qos,
|
||||
sc(
|
||||
qos(),
|
||||
#{
|
||||
required => true,
|
||||
desc => ?DESC("egress_remote_qos")
|
||||
}
|
||||
)},
|
||||
|
||||
, {retain,
|
||||
sc(hoconsc:union([boolean(), binary()]),
|
||||
#{ required => true
|
||||
, desc => ?DESC("retain")
|
||||
})}
|
||||
{retain,
|
||||
sc(
|
||||
hoconsc:union([boolean(), binary()]),
|
||||
#{
|
||||
required => true,
|
||||
desc => ?DESC("retain")
|
||||
}
|
||||
)},
|
||||
|
||||
, {payload,
|
||||
sc(binary(),
|
||||
#{ required => true
|
||||
, desc => ?DESC("payload")
|
||||
})}
|
||||
{payload,
|
||||
sc(
|
||||
binary(),
|
||||
#{
|
||||
required => true,
|
||||
desc => ?DESC("payload")
|
||||
}
|
||||
)}
|
||||
];
|
||||
|
||||
fields("replayq") ->
|
||||
[ {dir,
|
||||
sc(hoconsc:union([boolean(), string()]),
|
||||
#{ desc => ?DESC("dir")
|
||||
})}
|
||||
, {seg_bytes,
|
||||
sc(emqx_schema:bytesize(),
|
||||
#{ default => "100MB"
|
||||
, desc => ?DESC("seg_bytes")
|
||||
})}
|
||||
, {offload,
|
||||
sc(boolean(),
|
||||
#{ default => false
|
||||
, desc => ?DESC("offload")
|
||||
})}
|
||||
[
|
||||
{dir,
|
||||
sc(
|
||||
hoconsc:union([boolean(), string()]),
|
||||
#{desc => ?DESC("dir")}
|
||||
)},
|
||||
{seg_bytes,
|
||||
sc(
|
||||
emqx_schema:bytesize(),
|
||||
#{
|
||||
default => "100MB",
|
||||
desc => ?DESC("seg_bytes")
|
||||
}
|
||||
)},
|
||||
{offload,
|
||||
sc(
|
||||
boolean(),
|
||||
#{
|
||||
default => false,
|
||||
desc => ?DESC("offload")
|
||||
}
|
||||
)}
|
||||
].
|
||||
|
||||
desc("connector") ->
|
||||
|
|
@ -196,42 +260,40 @@ desc(_) ->
|
|||
undefined.
|
||||
|
||||
topic_mappings() ->
|
||||
[ {ingress,
|
||||
sc(ref("ingress"),
|
||||
#{ default => #{}
|
||||
})}
|
||||
, {egress,
|
||||
sc(ref("egress"),
|
||||
#{ default => #{}
|
||||
})}
|
||||
[
|
||||
{ingress,
|
||||
sc(
|
||||
ref("ingress"),
|
||||
#{default => #{}}
|
||||
)},
|
||||
{egress,
|
||||
sc(
|
||||
ref("egress"),
|
||||
#{default => #{}}
|
||||
)}
|
||||
].
|
||||
|
||||
ingress_desc() -> "
|
||||
The ingress config defines how this bridge receive messages from the remote MQTT broker, and then
|
||||
send them to the local broker.</br>
|
||||
Template with variables is allowed in 'local_topic', 'remote_qos', 'qos', 'retain',
|
||||
'payload'.</br>
|
||||
NOTE: if this bridge is used as the input of a rule (emqx rule engine), and also local_topic is
|
||||
configured, then messages got from the remote broker will be sent to both the 'local_topic' and
|
||||
the rule.
|
||||
".
|
||||
ingress_desc() ->
|
||||
"\n"
|
||||
"The ingress config defines how this bridge receive messages from the remote MQTT broker, and then\n"
|
||||
"send them to the local broker.</br>\n"
|
||||
"Template with variables is allowed in 'local_topic', 'remote_qos', 'qos', 'retain',\n"
|
||||
"'payload'.</br>\n"
|
||||
"NOTE: if this bridge is used as the input of a rule (emqx rule engine), and also local_topic is\n"
|
||||
"configured, then messages got from the remote broker will be sent to both the 'local_topic' and\n"
|
||||
"the rule.\n".
|
||||
|
||||
egress_desc() -> "
|
||||
The egress config defines how this bridge forwards messages from the local broker to the remote
|
||||
broker.</br>
|
||||
Template with variables is allowed in 'remote_topic', 'qos', 'retain', 'payload'.</br>
|
||||
NOTE: if this bridge is used as the output of a rule (emqx rule engine), and also local_topic
|
||||
is configured, then both the data got from the rule and the MQTT messages that matches
|
||||
local_topic will be forwarded.
|
||||
".
|
||||
egress_desc() ->
|
||||
"\n"
|
||||
"The egress config defines how this bridge forwards messages from the local broker to the remote\n"
|
||||
"broker.</br>\n"
|
||||
"Template with variables is allowed in 'remote_topic', 'qos', 'retain', 'payload'.</br>\n"
|
||||
"NOTE: if this bridge is used as the output of a rule (emqx rule engine), and also local_topic\n"
|
||||
"is configured, then both the data got from the rule and the MQTT messages that matches\n"
|
||||
"local_topic will be forwarded.\n".
|
||||
|
||||
qos() ->
|
||||
hoconsc:union([emqx_schema:qos(), binary()]).
|
||||
|
||||
non_empty_string(<<>>) -> {error, empty_string_not_allowed};
|
||||
non_empty_string("") -> {error, empty_string_not_allowed};
|
||||
non_empty_string(S) when is_binary(S); is_list(S) -> ok;
|
||||
non_empty_string(_) -> {error, invalid_string}.
|
||||
|
||||
sc(Type, Meta) -> hoconsc:mk(Type, Meta).
|
||||
ref(Field) -> hoconsc:ref(?MODULE, Field).
|
||||
|
|
|
|||
|
|
@ -66,43 +66,46 @@
|
|||
-include_lib("emqx/include/logger.hrl").
|
||||
|
||||
%% APIs
|
||||
-export([ start_link/1
|
||||
, register_metrics/0
|
||||
, stop/1
|
||||
]).
|
||||
-export([
|
||||
start_link/1,
|
||||
register_metrics/0,
|
||||
stop/1
|
||||
]).
|
||||
|
||||
%% gen_statem callbacks
|
||||
-export([ terminate/3
|
||||
, code_change/4
|
||||
, init/1
|
||||
, callback_mode/0
|
||||
]).
|
||||
-export([
|
||||
terminate/3,
|
||||
code_change/4,
|
||||
init/1,
|
||||
callback_mode/0
|
||||
]).
|
||||
|
||||
%% state functions
|
||||
-export([ idle/3
|
||||
, connected/3
|
||||
]).
|
||||
-export([
|
||||
idle/3,
|
||||
connected/3
|
||||
]).
|
||||
|
||||
%% management APIs
|
||||
-export([ ensure_started/1
|
||||
, ensure_stopped/1
|
||||
, status/1
|
||||
, ping/1
|
||||
, send_to_remote/2
|
||||
]).
|
||||
-export([
|
||||
ensure_started/1,
|
||||
ensure_stopped/1,
|
||||
status/1,
|
||||
ping/1,
|
||||
send_to_remote/2
|
||||
]).
|
||||
|
||||
-export([ get_forwards/1
|
||||
]).
|
||||
-export([get_forwards/1]).
|
||||
|
||||
-export([ get_subscriptions/1
|
||||
]).
|
||||
-export([get_subscriptions/1]).
|
||||
|
||||
%% Internal
|
||||
-export([msg_marshaller/1]).
|
||||
|
||||
-export_type([ config/0
|
||||
, ack_ref/0
|
||||
]).
|
||||
-export_type([
|
||||
config/0,
|
||||
ack_ref/0
|
||||
]).
|
||||
|
||||
-type id() :: atom() | string() | pid().
|
||||
-type qos() :: emqx_types:qos().
|
||||
|
|
@ -113,7 +116,6 @@
|
|||
-include_lib("emqx/include/logger.hrl").
|
||||
-include_lib("emqx/include/emqx_mqtt.hrl").
|
||||
|
||||
|
||||
%% same as default in-flight limit for emqtt
|
||||
-define(DEFAULT_INFLIGHT_SIZE, 32).
|
||||
-define(DEFAULT_RECONNECT_DELAY_MS, timer:seconds(5)).
|
||||
|
|
@ -188,8 +190,10 @@ callback_mode() -> [state_functions].
|
|||
|
||||
%% @doc Config should be a map().
|
||||
init(#{name := Name} = ConnectOpts) ->
|
||||
?SLOG(debug, #{msg => "starting_bridge_worker",
|
||||
name => Name}),
|
||||
?SLOG(debug, #{
|
||||
msg => "starting_bridge_worker",
|
||||
name => Name
|
||||
}),
|
||||
erlang:process_flag(trap_exit, true),
|
||||
Queue = open_replayq(Name, maps:get(replayq, ConnectOpts, #{})),
|
||||
State = init_state(ConnectOpts),
|
||||
|
|
@ -205,31 +209,44 @@ init_state(Opts) ->
|
|||
Mountpoint = maps:get(forward_mountpoint, Opts, undefined),
|
||||
MaxInflightSize = maps:get(max_inflight, Opts, ?DEFAULT_INFLIGHT_SIZE),
|
||||
Name = maps:get(name, Opts, undefined),
|
||||
#{start_type => StartType,
|
||||
reconnect_interval => ReconnDelayMs,
|
||||
mountpoint => format_mountpoint(Mountpoint),
|
||||
inflight => [],
|
||||
max_inflight => MaxInflightSize,
|
||||
connection => undefined,
|
||||
name => Name}.
|
||||
#{
|
||||
start_type => StartType,
|
||||
reconnect_interval => ReconnDelayMs,
|
||||
mountpoint => format_mountpoint(Mountpoint),
|
||||
inflight => [],
|
||||
max_inflight => MaxInflightSize,
|
||||
connection => undefined,
|
||||
name => Name
|
||||
}.
|
||||
|
||||
open_replayq(Name, QCfg) ->
|
||||
Dir = maps:get(dir, QCfg, undefined),
|
||||
SegBytes = maps:get(seg_bytes, QCfg, ?DEFAULT_SEG_BYTES),
|
||||
MaxTotalSize = maps:get(max_total_size, QCfg, ?DEFAULT_MAX_TOTAL_SIZE),
|
||||
QueueConfig = case Dir =:= undefined orelse Dir =:= "" of
|
||||
true -> #{mem_only => true};
|
||||
false -> #{dir => filename:join([Dir, node(), Name]),
|
||||
seg_bytes => SegBytes, max_total_size => MaxTotalSize}
|
||||
end,
|
||||
replayq:open(QueueConfig#{sizer => fun emqx_connector_mqtt_msg:estimate_size/1,
|
||||
marshaller => fun ?MODULE:msg_marshaller/1}).
|
||||
QueueConfig =
|
||||
case Dir =:= undefined orelse Dir =:= "" of
|
||||
true ->
|
||||
#{mem_only => true};
|
||||
false ->
|
||||
#{
|
||||
dir => filename:join([Dir, node(), Name]),
|
||||
seg_bytes => SegBytes,
|
||||
max_total_size => MaxTotalSize
|
||||
}
|
||||
end,
|
||||
replayq:open(QueueConfig#{
|
||||
sizer => fun emqx_connector_mqtt_msg:estimate_size/1,
|
||||
marshaller => fun ?MODULE:msg_marshaller/1
|
||||
}).
|
||||
|
||||
pre_process_opts(#{subscriptions := InConf, forwards := OutConf} = ConnectOpts) ->
|
||||
ConnectOpts#{subscriptions => pre_process_in_out(in, InConf),
|
||||
forwards => pre_process_in_out(out, OutConf)}.
|
||||
ConnectOpts#{
|
||||
subscriptions => pre_process_in_out(in, InConf),
|
||||
forwards => pre_process_in_out(out, OutConf)
|
||||
}.
|
||||
|
||||
pre_process_in_out(_, undefined) -> undefined;
|
||||
pre_process_in_out(_, undefined) ->
|
||||
undefined;
|
||||
pre_process_in_out(in, Conf) when is_map(Conf) ->
|
||||
Conf1 = pre_process_conf(local_topic, Conf),
|
||||
Conf2 = pre_process_conf(local_qos, Conf1),
|
||||
|
|
@ -245,7 +262,8 @@ pre_process_in_out_common(Conf) ->
|
|||
|
||||
pre_process_conf(Key, Conf) ->
|
||||
case maps:find(Key, Conf) of
|
||||
error -> Conf;
|
||||
error ->
|
||||
Conf;
|
||||
{ok, Val} when is_binary(Val) ->
|
||||
Conf#{Key => emqx_plugin_libs_rule:preproc_tmpl(Val)};
|
||||
{ok, Val} ->
|
||||
|
|
@ -276,7 +294,6 @@ idle(info, idle, #{start_type := auto} = State) ->
|
|||
connecting(State);
|
||||
idle(state_timeout, reconnect, State) ->
|
||||
connecting(State);
|
||||
|
||||
idle(Type, Content, State) ->
|
||||
common(idle, Type, Content, State).
|
||||
|
||||
|
|
@ -298,13 +315,16 @@ connected(state_timeout, connected, #{inflight := Inflight} = State) ->
|
|||
connected(internal, maybe_send, State) ->
|
||||
{_, NewState} = pop_and_send(State),
|
||||
{keep_state, NewState};
|
||||
|
||||
connected(info, {disconnected, Conn, Reason},
|
||||
#{connection := Connection, name := Name, reconnect_interval := ReconnectDelayMs} = State) ->
|
||||
connected(
|
||||
info,
|
||||
{disconnected, Conn, Reason},
|
||||
#{connection := Connection, name := Name, reconnect_interval := ReconnectDelayMs} = State
|
||||
) ->
|
||||
?tp(info, disconnected, #{name => Name, reason => Reason}),
|
||||
case Conn =:= maps:get(client_pid, Connection, undefined) of
|
||||
case Conn =:= maps:get(client_pid, Connection, undefined) of
|
||||
true ->
|
||||
{next_state, idle, State#{connection => undefined}, {state_timeout, ReconnectDelayMs, reconnect}};
|
||||
{next_state, idle, State#{connection => undefined},
|
||||
{state_timeout, ReconnectDelayMs, reconnect}};
|
||||
false ->
|
||||
keep_state_and_data
|
||||
end;
|
||||
|
|
@ -317,7 +337,7 @@ connected(Type, Content, State) ->
|
|||
%% Common handlers
|
||||
common(StateName, {call, From}, status, _State) ->
|
||||
{keep_state_and_data, [{reply, From, StateName}]};
|
||||
common(_StateName, {call, From}, ping, #{connection := Conn} =_State) ->
|
||||
common(_StateName, {call, From}, ping, #{connection := Conn} = _State) ->
|
||||
Reply = emqx_connector_mqtt_mod:ping(Conn),
|
||||
{keep_state_and_data, [{reply, From, Reply}]};
|
||||
common(_StateName, {call, From}, ensure_stopped, #{connection := undefined} = _State) ->
|
||||
|
|
@ -335,27 +355,39 @@ common(_StateName, cast, {send_to_remote, Msg}, #{replayq := Q} = State) ->
|
|||
NewQ = replayq:append(Q, [Msg]),
|
||||
{keep_state, State#{replayq => NewQ}, {next_event, internal, maybe_send}};
|
||||
common(StateName, Type, Content, #{name := Name} = State) ->
|
||||
?SLOG(notice, #{msg => "bridge_discarded_event",
|
||||
name => Name, type => Type, state_name => StateName,
|
||||
content => Content}),
|
||||
?SLOG(notice, #{
|
||||
msg => "bridge_discarded_event",
|
||||
name => Name,
|
||||
type => Type,
|
||||
state_name => StateName,
|
||||
content => Content
|
||||
}),
|
||||
{keep_state, State}.
|
||||
|
||||
do_connect(#{connect_opts := ConnectOpts,
|
||||
inflight := Inflight,
|
||||
name := Name} = State) ->
|
||||
do_connect(
|
||||
#{
|
||||
connect_opts := ConnectOpts,
|
||||
inflight := Inflight,
|
||||
name := Name
|
||||
} = State
|
||||
) ->
|
||||
case emqx_connector_mqtt_mod:start(ConnectOpts) of
|
||||
{ok, Conn} ->
|
||||
?tp(info, connected, #{name => Name, inflight => length(Inflight)}),
|
||||
{ok, State#{connection => Conn}};
|
||||
{error, Reason} ->
|
||||
ConnectOpts1 = obfuscate(ConnectOpts),
|
||||
?SLOG(error, #{msg => "failed_to_connect",
|
||||
config => ConnectOpts1, reason => Reason}),
|
||||
?SLOG(error, #{
|
||||
msg => "failed_to_connect",
|
||||
config => ConnectOpts1,
|
||||
reason => Reason
|
||||
}),
|
||||
{error, Reason, State}
|
||||
end.
|
||||
|
||||
%% Retry all inflight (previously sent but not acked) batches.
|
||||
retry_inflight(State, []) -> {ok, State};
|
||||
retry_inflight(State, []) ->
|
||||
{ok, State};
|
||||
retry_inflight(State, [#{q_ack_ref := QAckRef, msg := Msg} | Rest] = OldInf) ->
|
||||
case do_send(State, QAckRef, Msg) of
|
||||
{ok, State1} ->
|
||||
|
|
@ -386,28 +418,49 @@ pop_and_send_loop(#{replayq := Q} = State, N) ->
|
|||
end.
|
||||
|
||||
do_send(#{connect_opts := #{forwards := undefined}}, _QAckRef, Msg) ->
|
||||
?SLOG(error, #{msg => "cannot_forward_messages_to_remote_broker"
|
||||
"_as_'egress'_is_not_configured",
|
||||
messages => Msg});
|
||||
do_send(#{inflight := Inflight,
|
||||
connection := Connection,
|
||||
mountpoint := Mountpoint,
|
||||
connect_opts := #{forwards := Forwards}} = State, QAckRef, Msg) ->
|
||||
?SLOG(error, #{
|
||||
msg =>
|
||||
"cannot_forward_messages_to_remote_broker"
|
||||
"_as_'egress'_is_not_configured",
|
||||
messages => Msg
|
||||
});
|
||||
do_send(
|
||||
#{
|
||||
inflight := Inflight,
|
||||
connection := Connection,
|
||||
mountpoint := Mountpoint,
|
||||
connect_opts := #{forwards := Forwards}
|
||||
} = State,
|
||||
QAckRef,
|
||||
Msg
|
||||
) ->
|
||||
Vars = emqx_connector_mqtt_msg:make_pub_vars(Mountpoint, Forwards),
|
||||
ExportMsg = fun(Message) ->
|
||||
emqx_metrics:inc('bridge.mqtt.message_sent_to_remote'),
|
||||
emqx_connector_mqtt_msg:to_remote_msg(Message, Vars)
|
||||
end,
|
||||
?SLOG(debug, #{msg => "publish_to_remote_broker",
|
||||
message => Msg, vars => Vars}),
|
||||
emqx_metrics:inc('bridge.mqtt.message_sent_to_remote'),
|
||||
emqx_connector_mqtt_msg:to_remote_msg(Message, Vars)
|
||||
end,
|
||||
?SLOG(debug, #{
|
||||
msg => "publish_to_remote_broker",
|
||||
message => Msg,
|
||||
vars => Vars
|
||||
}),
|
||||
case emqx_connector_mqtt_mod:send(Connection, [ExportMsg(Msg)]) of
|
||||
{ok, Refs} ->
|
||||
{ok, State#{inflight := Inflight ++ [#{q_ack_ref => QAckRef,
|
||||
send_ack_ref => map_set(Refs),
|
||||
msg => Msg}]}};
|
||||
{ok, State#{
|
||||
inflight := Inflight ++
|
||||
[
|
||||
#{
|
||||
q_ack_ref => QAckRef,
|
||||
send_ack_ref => map_set(Refs),
|
||||
msg => Msg
|
||||
}
|
||||
]
|
||||
}};
|
||||
{error, Reason} ->
|
||||
?SLOG(info, #{msg => "mqtt_bridge_produce_failed",
|
||||
reason => Reason}),
|
||||
?SLOG(info, #{
|
||||
msg => "mqtt_bridge_produce_failed",
|
||||
reason => Reason
|
||||
}),
|
||||
{error, State}
|
||||
end.
|
||||
|
||||
|
|
@ -427,8 +480,10 @@ handle_batch_ack(#{inflight := Inflight0, replayq := Q} = State, Ref) ->
|
|||
State#{inflight := Inflight}.
|
||||
|
||||
do_ack([], Ref) ->
|
||||
?SLOG(debug, #{msg => "stale_batch_ack_reference",
|
||||
ref => Ref}),
|
||||
?SLOG(debug, #{
|
||||
msg => "stale_batch_ack_reference",
|
||||
ref => Ref
|
||||
}),
|
||||
[];
|
||||
do_ack([#{send_ack_ref := Refs} = First | Rest], Ref) ->
|
||||
case maps:is_key(Ref, Refs) of
|
||||
|
|
@ -443,8 +498,16 @@ do_ack([#{send_ack_ref := Refs} = First | Rest], Ref) ->
|
|||
drop_acked_batches(_Q, []) ->
|
||||
?tp(debug, inflight_drained, #{}),
|
||||
[];
|
||||
drop_acked_batches(Q, [#{send_ack_ref := Refs,
|
||||
q_ack_ref := QAckRef} | Rest] = All) ->
|
||||
drop_acked_batches(
|
||||
Q,
|
||||
[
|
||||
#{
|
||||
send_ack_ref := Refs,
|
||||
q_ack_ref := QAckRef
|
||||
}
|
||||
| Rest
|
||||
] = All
|
||||
) ->
|
||||
case maps:size(Refs) of
|
||||
0 ->
|
||||
%% all messages are acked by bridge target
|
||||
|
|
@ -475,18 +538,25 @@ format_mountpoint(Prefix) ->
|
|||
name(Id) -> list_to_atom(str(Id)).
|
||||
|
||||
register_metrics() ->
|
||||
lists:foreach(fun emqx_metrics:ensure/1,
|
||||
['bridge.mqtt.message_sent_to_remote',
|
||||
'bridge.mqtt.message_received_from_remote'
|
||||
]).
|
||||
lists:foreach(
|
||||
fun emqx_metrics:ensure/1,
|
||||
[
|
||||
'bridge.mqtt.message_sent_to_remote',
|
||||
'bridge.mqtt.message_received_from_remote'
|
||||
]
|
||||
).
|
||||
|
||||
obfuscate(Map) ->
|
||||
maps:fold(fun(K, V, Acc) ->
|
||||
case is_sensitive(K) of
|
||||
true -> [{K, '***'} | Acc];
|
||||
false -> [{K, V} | Acc]
|
||||
end
|
||||
end, [], Map).
|
||||
maps:fold(
|
||||
fun(K, V, Acc) ->
|
||||
case is_sensitive(K) of
|
||||
true -> [{K, '***'} | Acc];
|
||||
false -> [{K, V} | Acc]
|
||||
end
|
||||
end,
|
||||
[],
|
||||
Map
|
||||
).
|
||||
|
||||
is_sensitive(password) -> true;
|
||||
is_sensitive(_) -> false.
|
||||
|
|
|
|||
|
|
@ -26,27 +26,23 @@
|
|||
-include("emqx_dashboard/include/emqx_dashboard.hrl").
|
||||
|
||||
%% output functions
|
||||
-export([ inspect/3
|
||||
]).
|
||||
-export([inspect/3]).
|
||||
|
||||
-define(BRIDGE_CONF_DEFAULT, <<"bridges: {}">>).
|
||||
-define(CONNECTR_TYPE, <<"mqtt">>).
|
||||
-define(CONNECTR_NAME, <<"test_connector">>).
|
||||
-define(BRIDGE_NAME_INGRESS, <<"ingress_test_bridge">>).
|
||||
-define(BRIDGE_NAME_EGRESS, <<"egress_test_bridge">>).
|
||||
-define(MQTT_CONNECTOR(Username),
|
||||
#{
|
||||
-define(MQTT_CONNECTOR(Username), #{
|
||||
<<"server">> => <<"127.0.0.1:1883">>,
|
||||
<<"username">> => Username,
|
||||
<<"password">> => <<"">>,
|
||||
<<"proto_ver">> => <<"v4">>,
|
||||
<<"ssl">> => #{<<"enable">> => false}
|
||||
}).
|
||||
-define(MQTT_CONNECTOR2(Server),
|
||||
?MQTT_CONNECTOR(<<"user1">>)#{<<"server">> => Server}).
|
||||
-define(MQTT_CONNECTOR2(Server), ?MQTT_CONNECTOR(<<"user1">>)#{<<"server">> => Server}).
|
||||
|
||||
-define(MQTT_BRIDGE_INGRESS(ID),
|
||||
#{
|
||||
-define(MQTT_BRIDGE_INGRESS(ID), #{
|
||||
<<"connector">> => ID,
|
||||
<<"direction">> => <<"ingress">>,
|
||||
<<"remote_topic">> => <<"remote_topic/#">>,
|
||||
|
|
@ -57,8 +53,7 @@
|
|||
<<"retain">> => <<"${retain}">>
|
||||
}).
|
||||
|
||||
-define(MQTT_BRIDGE_EGRESS(ID),
|
||||
#{
|
||||
-define(MQTT_BRIDGE_EGRESS(ID), #{
|
||||
<<"connector">> => ID,
|
||||
<<"direction">> => <<"egress">>,
|
||||
<<"local_topic">> => <<"local_topic/#">>,
|
||||
|
|
@ -68,10 +63,14 @@
|
|||
<<"retain">> => <<"${retain}">>
|
||||
}).
|
||||
|
||||
-define(metrics(MATCH, SUCC, FAILED, SPEED, SPEED5M, SPEEDMAX),
|
||||
#{<<"matched">> := MATCH, <<"success">> := SUCC,
|
||||
<<"failed">> := FAILED, <<"rate">> := SPEED,
|
||||
<<"rate_last5m">> := SPEED5M, <<"rate_max">> := SPEEDMAX}).
|
||||
-define(metrics(MATCH, SUCC, FAILED, SPEED, SPEED5M, SPEEDMAX), #{
|
||||
<<"matched">> := MATCH,
|
||||
<<"success">> := SUCC,
|
||||
<<"failed">> := FAILED,
|
||||
<<"rate">> := SPEED,
|
||||
<<"rate_last5m">> := SPEED5M,
|
||||
<<"rate_max">> := SPEEDMAX
|
||||
}).
|
||||
|
||||
inspect(Selected, _Envs, _Args) ->
|
||||
persistent_term:put(?MODULE, #{inspect => Selected}).
|
||||
|
|
@ -83,24 +82,37 @@ groups() ->
|
|||
[].
|
||||
|
||||
suite() ->
|
||||
[{timetrap,{seconds,30}}].
|
||||
[{timetrap, {seconds, 30}}].
|
||||
|
||||
init_per_suite(Config) ->
|
||||
_ = application:load(emqx_conf),
|
||||
%% some testcases (may from other app) already get emqx_connector started
|
||||
_ = application:stop(emqx_resource),
|
||||
_ = application:stop(emqx_connector),
|
||||
ok = emqx_common_test_helpers:start_apps([emqx_rule_engine, emqx_connector,
|
||||
emqx_bridge, emqx_dashboard], fun set_special_configs/1),
|
||||
ok = emqx_common_test_helpers:start_apps(
|
||||
[
|
||||
emqx_rule_engine,
|
||||
emqx_connector,
|
||||
emqx_bridge,
|
||||
emqx_dashboard
|
||||
],
|
||||
fun set_special_configs/1
|
||||
),
|
||||
ok = emqx_common_test_helpers:load_config(emqx_connector_schema, <<"connectors: {}">>),
|
||||
ok = emqx_common_test_helpers:load_config(emqx_rule_engine_schema,
|
||||
<<"rule_engine {rules {}}">>),
|
||||
ok = emqx_common_test_helpers:load_config(
|
||||
emqx_rule_engine_schema,
|
||||
<<"rule_engine {rules {}}">>
|
||||
),
|
||||
ok = emqx_common_test_helpers:load_config(emqx_bridge_schema, ?BRIDGE_CONF_DEFAULT),
|
||||
Config.
|
||||
|
||||
end_per_suite(_Config) ->
|
||||
emqx_common_test_helpers:stop_apps([emqx_rule_engine, emqx_connector, emqx_bridge,
|
||||
emqx_dashboard]),
|
||||
emqx_common_test_helpers:stop_apps([
|
||||
emqx_rule_engine,
|
||||
emqx_connector,
|
||||
emqx_bridge,
|
||||
emqx_dashboard
|
||||
]),
|
||||
ok.
|
||||
|
||||
set_special_configs(emqx_dashboard) ->
|
||||
|
|
@ -116,15 +128,24 @@ end_per_testcase(_, _Config) ->
|
|||
ok.
|
||||
|
||||
clear_resources() ->
|
||||
lists:foreach(fun(#{id := Id}) ->
|
||||
lists:foreach(
|
||||
fun(#{id := Id}) ->
|
||||
ok = emqx_rule_engine:delete_rule(Id)
|
||||
end, emqx_rule_engine:get_rules()),
|
||||
lists:foreach(fun(#{type := Type, name := Name}) ->
|
||||
end,
|
||||
emqx_rule_engine:get_rules()
|
||||
),
|
||||
lists:foreach(
|
||||
fun(#{type := Type, name := Name}) ->
|
||||
ok = emqx_bridge:remove(Type, Name)
|
||||
end, emqx_bridge:list()),
|
||||
lists:foreach(fun(#{<<"type">> := Type, <<"name">> := Name}) ->
|
||||
end,
|
||||
emqx_bridge:list()
|
||||
),
|
||||
lists:foreach(
|
||||
fun(#{<<"type">> := Type, <<"name">> := Name}) ->
|
||||
ok = emqx_connector:delete(Type, Name)
|
||||
end, emqx_connector:list_raw()).
|
||||
end,
|
||||
emqx_connector:list_raw()
|
||||
).
|
||||
|
||||
%%------------------------------------------------------------------------------
|
||||
%% Testcases
|
||||
|
|
@ -137,103 +158,144 @@ t_mqtt_crud_apis(_) ->
|
|||
%% then we add a mqtt connector, using POST
|
||||
%% POST /connectors/ will create a connector
|
||||
User1 = <<"user1">>,
|
||||
{ok, 400, <<"{\"code\":\"BAD_REQUEST\",\"message\""
|
||||
":\"missing some required fields: [name, type]\"}">>}
|
||||
= request(post, uri(["connectors"]),
|
||||
?MQTT_CONNECTOR(User1)#{ <<"type">> => ?CONNECTR_TYPE
|
||||
}),
|
||||
{ok, 201, Connector} = request(post, uri(["connectors"]),
|
||||
?MQTT_CONNECTOR(User1)#{ <<"type">> => ?CONNECTR_TYPE
|
||||
, <<"name">> => ?CONNECTR_NAME
|
||||
}),
|
||||
{ok, 400, <<
|
||||
"{\"code\":\"BAD_REQUEST\",\"message\""
|
||||
":\"missing some required fields: [name, type]\"}"
|
||||
>>} =
|
||||
request(
|
||||
post,
|
||||
uri(["connectors"]),
|
||||
?MQTT_CONNECTOR(User1)#{<<"type">> => ?CONNECTR_TYPE}
|
||||
),
|
||||
{ok, 201, Connector} = request(
|
||||
post,
|
||||
uri(["connectors"]),
|
||||
?MQTT_CONNECTOR(User1)#{
|
||||
<<"type">> => ?CONNECTR_TYPE,
|
||||
<<"name">> => ?CONNECTR_NAME
|
||||
}
|
||||
),
|
||||
|
||||
#{ <<"type">> := ?CONNECTR_TYPE
|
||||
, <<"name">> := ?CONNECTR_NAME
|
||||
, <<"server">> := <<"127.0.0.1:1883">>
|
||||
, <<"username">> := User1
|
||||
, <<"password">> := <<"">>
|
||||
, <<"proto_ver">> := <<"v4">>
|
||||
, <<"ssl">> := #{<<"enable">> := false}
|
||||
} = jsx:decode(Connector),
|
||||
#{
|
||||
<<"type">> := ?CONNECTR_TYPE,
|
||||
<<"name">> := ?CONNECTR_NAME,
|
||||
<<"server">> := <<"127.0.0.1:1883">>,
|
||||
<<"username">> := User1,
|
||||
<<"password">> := <<"">>,
|
||||
<<"proto_ver">> := <<"v4">>,
|
||||
<<"ssl">> := #{<<"enable">> := false}
|
||||
} = jsx:decode(Connector),
|
||||
ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME),
|
||||
%% update the request-path of the connector
|
||||
User2 = <<"user2">>,
|
||||
{ok, 200, Connector2} = request(put, uri(["connectors", ConnctorID]),
|
||||
?MQTT_CONNECTOR(User2)),
|
||||
?assertMatch(#{ <<"type">> := ?CONNECTR_TYPE
|
||||
, <<"name">> := ?CONNECTR_NAME
|
||||
, <<"server">> := <<"127.0.0.1:1883">>
|
||||
, <<"username">> := User2
|
||||
, <<"password">> := <<"">>
|
||||
, <<"proto_ver">> := <<"v4">>
|
||||
, <<"ssl">> := #{<<"enable">> := false}
|
||||
}, jsx:decode(Connector2)),
|
||||
{ok, 200, Connector2} = request(
|
||||
put,
|
||||
uri(["connectors", ConnctorID]),
|
||||
?MQTT_CONNECTOR(User2)
|
||||
),
|
||||
?assertMatch(
|
||||
#{
|
||||
<<"type">> := ?CONNECTR_TYPE,
|
||||
<<"name">> := ?CONNECTR_NAME,
|
||||
<<"server">> := <<"127.0.0.1:1883">>,
|
||||
<<"username">> := User2,
|
||||
<<"password">> := <<"">>,
|
||||
<<"proto_ver">> := <<"v4">>,
|
||||
<<"ssl">> := #{<<"enable">> := false}
|
||||
},
|
||||
jsx:decode(Connector2)
|
||||
),
|
||||
|
||||
%% list all connectors again, assert Connector2 is in it
|
||||
{ok, 200, Connector2Str} = request(get, uri(["connectors"]), []),
|
||||
?assertMatch([#{ <<"type">> := ?CONNECTR_TYPE
|
||||
, <<"name">> := ?CONNECTR_NAME
|
||||
, <<"server">> := <<"127.0.0.1:1883">>
|
||||
, <<"username">> := User2
|
||||
, <<"password">> := <<"">>
|
||||
, <<"proto_ver">> := <<"v4">>
|
||||
, <<"ssl">> := #{<<"enable">> := false}
|
||||
}], jsx:decode(Connector2Str)),
|
||||
?assertMatch(
|
||||
[
|
||||
#{
|
||||
<<"type">> := ?CONNECTR_TYPE,
|
||||
<<"name">> := ?CONNECTR_NAME,
|
||||
<<"server">> := <<"127.0.0.1:1883">>,
|
||||
<<"username">> := User2,
|
||||
<<"password">> := <<"">>,
|
||||
<<"proto_ver">> := <<"v4">>,
|
||||
<<"ssl">> := #{<<"enable">> := false}
|
||||
}
|
||||
],
|
||||
jsx:decode(Connector2Str)
|
||||
),
|
||||
|
||||
%% get the connector by id
|
||||
{ok, 200, Connector3Str} = request(get, uri(["connectors", ConnctorID]), []),
|
||||
?assertMatch(#{ <<"type">> := ?CONNECTR_TYPE
|
||||
, <<"name">> := ?CONNECTR_NAME
|
||||
, <<"server">> := <<"127.0.0.1:1883">>
|
||||
, <<"username">> := User2
|
||||
, <<"password">> := <<"">>
|
||||
, <<"proto_ver">> := <<"v4">>
|
||||
, <<"ssl">> := #{<<"enable">> := false}
|
||||
}, jsx:decode(Connector3Str)),
|
||||
?assertMatch(
|
||||
#{
|
||||
<<"type">> := ?CONNECTR_TYPE,
|
||||
<<"name">> := ?CONNECTR_NAME,
|
||||
<<"server">> := <<"127.0.0.1:1883">>,
|
||||
<<"username">> := User2,
|
||||
<<"password">> := <<"">>,
|
||||
<<"proto_ver">> := <<"v4">>,
|
||||
<<"ssl">> := #{<<"enable">> := false}
|
||||
},
|
||||
jsx:decode(Connector3Str)
|
||||
),
|
||||
|
||||
%% delete the connector
|
||||
{ok, 204, <<>>} = request(delete, uri(["connectors", ConnctorID]), []),
|
||||
{ok, 200, <<"[]">>} = request(get, uri(["connectors"]), []),
|
||||
|
||||
%% update a deleted connector returns an error
|
||||
{ok, 404, ErrMsg2} = request(put, uri(["connectors", ConnctorID]),
|
||||
?MQTT_CONNECTOR(User2)),
|
||||
{ok, 404, ErrMsg2} = request(
|
||||
put,
|
||||
uri(["connectors", ConnctorID]),
|
||||
?MQTT_CONNECTOR(User2)
|
||||
),
|
||||
?assertMatch(
|
||||
#{ <<"code">> := _
|
||||
, <<"message">> := <<"connector not found">>
|
||||
}, jsx:decode(ErrMsg2)),
|
||||
#{
|
||||
<<"code">> := _,
|
||||
<<"message">> := <<"connector not found">>
|
||||
},
|
||||
jsx:decode(ErrMsg2)
|
||||
),
|
||||
ok.
|
||||
|
||||
t_mqtt_conn_bridge_ingress(_) ->
|
||||
%% then we add a mqtt connector, using POST
|
||||
User1 = <<"user1">>,
|
||||
{ok, 201, Connector} = request(post, uri(["connectors"]),
|
||||
?MQTT_CONNECTOR(User1)#{ <<"type">> => ?CONNECTR_TYPE
|
||||
, <<"name">> => ?CONNECTR_NAME
|
||||
}),
|
||||
{ok, 201, Connector} = request(
|
||||
post,
|
||||
uri(["connectors"]),
|
||||
?MQTT_CONNECTOR(User1)#{
|
||||
<<"type">> => ?CONNECTR_TYPE,
|
||||
<<"name">> => ?CONNECTR_NAME
|
||||
}
|
||||
),
|
||||
|
||||
#{ <<"type">> := ?CONNECTR_TYPE
|
||||
, <<"name">> := ?CONNECTR_NAME
|
||||
, <<"server">> := <<"127.0.0.1:1883">>
|
||||
, <<"num_of_bridges">> := 0
|
||||
, <<"username">> := User1
|
||||
, <<"password">> := <<"">>
|
||||
, <<"proto_ver">> := <<"v4">>
|
||||
, <<"ssl">> := #{<<"enable">> := false}
|
||||
} = jsx:decode(Connector),
|
||||
#{
|
||||
<<"type">> := ?CONNECTR_TYPE,
|
||||
<<"name">> := ?CONNECTR_NAME,
|
||||
<<"server">> := <<"127.0.0.1:1883">>,
|
||||
<<"num_of_bridges">> := 0,
|
||||
<<"username">> := User1,
|
||||
<<"password">> := <<"">>,
|
||||
<<"proto_ver">> := <<"v4">>,
|
||||
<<"ssl">> := #{<<"enable">> := false}
|
||||
} = jsx:decode(Connector),
|
||||
ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME),
|
||||
%% ... and a MQTT bridge, using POST
|
||||
%% we bind this bridge to the connector created just now
|
||||
timer:sleep(50),
|
||||
{ok, 201, Bridge} = request(post, uri(["bridges"]),
|
||||
{ok, 201, Bridge} = request(
|
||||
post,
|
||||
uri(["bridges"]),
|
||||
?MQTT_BRIDGE_INGRESS(ConnctorID)#{
|
||||
<<"type">> => ?CONNECTR_TYPE,
|
||||
<<"name">> => ?BRIDGE_NAME_INGRESS
|
||||
}),
|
||||
#{ <<"type">> := ?CONNECTR_TYPE
|
||||
, <<"name">> := ?BRIDGE_NAME_INGRESS
|
||||
, <<"connector">> := ConnctorID
|
||||
} = jsx:decode(Bridge),
|
||||
}
|
||||
),
|
||||
#{
|
||||
<<"type">> := ?CONNECTR_TYPE,
|
||||
<<"name">> := ?BRIDGE_NAME_INGRESS,
|
||||
<<"connector">> := ConnctorID
|
||||
} = jsx:decode(Bridge),
|
||||
BridgeIDIngress = emqx_bridge:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_INGRESS),
|
||||
wait_for_resource_ready(BridgeIDIngress, 5),
|
||||
|
||||
|
|
@ -257,12 +319,12 @@ t_mqtt_conn_bridge_ingress(_) ->
|
|||
false
|
||||
after 100 ->
|
||||
false
|
||||
end),
|
||||
end
|
||||
),
|
||||
|
||||
%% get the connector by id, verify the num_of_bridges now is 1
|
||||
{ok, 200, Connector1Str} = request(get, uri(["connectors", ConnctorID]), []),
|
||||
?assertMatch(#{ <<"num_of_bridges">> := 1
|
||||
}, jsx:decode(Connector1Str)),
|
||||
?assertMatch(#{<<"num_of_bridges">> := 1}, jsx:decode(Connector1Str)),
|
||||
|
||||
%% delete the bridge
|
||||
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDIngress]), []),
|
||||
|
|
@ -276,30 +338,39 @@ t_mqtt_conn_bridge_ingress(_) ->
|
|||
t_mqtt_conn_bridge_egress(_) ->
|
||||
%% then we add a mqtt connector, using POST
|
||||
User1 = <<"user1">>,
|
||||
{ok, 201, Connector} = request(post, uri(["connectors"]),
|
||||
?MQTT_CONNECTOR(User1)#{ <<"type">> => ?CONNECTR_TYPE
|
||||
, <<"name">> => ?CONNECTR_NAME
|
||||
}),
|
||||
{ok, 201, Connector} = request(
|
||||
post,
|
||||
uri(["connectors"]),
|
||||
?MQTT_CONNECTOR(User1)#{
|
||||
<<"type">> => ?CONNECTR_TYPE,
|
||||
<<"name">> => ?CONNECTR_NAME
|
||||
}
|
||||
),
|
||||
|
||||
%ct:pal("---connector: ~p", [Connector]),
|
||||
#{ <<"server">> := <<"127.0.0.1:1883">>
|
||||
, <<"username">> := User1
|
||||
, <<"password">> := <<"">>
|
||||
, <<"proto_ver">> := <<"v4">>
|
||||
, <<"ssl">> := #{<<"enable">> := false}
|
||||
} = jsx:decode(Connector),
|
||||
#{
|
||||
<<"server">> := <<"127.0.0.1:1883">>,
|
||||
<<"username">> := User1,
|
||||
<<"password">> := <<"">>,
|
||||
<<"proto_ver">> := <<"v4">>,
|
||||
<<"ssl">> := #{<<"enable">> := false}
|
||||
} = jsx:decode(Connector),
|
||||
ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME),
|
||||
%% ... and a MQTT bridge, using POST
|
||||
%% we bind this bridge to the connector created just now
|
||||
{ok, 201, Bridge} = request(post, uri(["bridges"]),
|
||||
{ok, 201, Bridge} = request(
|
||||
post,
|
||||
uri(["bridges"]),
|
||||
?MQTT_BRIDGE_EGRESS(ConnctorID)#{
|
||||
<<"type">> => ?CONNECTR_TYPE,
|
||||
<<"name">> => ?BRIDGE_NAME_EGRESS
|
||||
}),
|
||||
#{ <<"type">> := ?CONNECTR_TYPE
|
||||
, <<"name">> := ?BRIDGE_NAME_EGRESS
|
||||
, <<"connector">> := ConnctorID
|
||||
} = jsx:decode(Bridge),
|
||||
}
|
||||
),
|
||||
#{
|
||||
<<"type">> := ?CONNECTR_TYPE,
|
||||
<<"name">> := ?BRIDGE_NAME_EGRESS,
|
||||
<<"connector">> := ConnctorID
|
||||
} = jsx:decode(Bridge),
|
||||
BridgeIDEgress = emqx_bridge:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_EGRESS),
|
||||
wait_for_resource_ready(BridgeIDEgress, 5),
|
||||
|
||||
|
|
@ -324,14 +395,19 @@ t_mqtt_conn_bridge_egress(_) ->
|
|||
false
|
||||
after 100 ->
|
||||
false
|
||||
end),
|
||||
end
|
||||
),
|
||||
|
||||
%% verify the metrics of the bridge
|
||||
{ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDEgress]), []),
|
||||
?assertMatch(#{ <<"metrics">> := ?metrics(1, 1, 0, _, _, _)
|
||||
, <<"node_metrics">> :=
|
||||
[#{<<"node">> := _, <<"metrics">> := ?metrics(1, 1, 0, _, _, _)}]
|
||||
}, jsx:decode(BridgeStr)),
|
||||
?assertMatch(
|
||||
#{
|
||||
<<"metrics">> := ?metrics(1, 1, 0, _, _, _),
|
||||
<<"node_metrics">> :=
|
||||
[#{<<"node">> := _, <<"metrics">> := ?metrics(1, 1, 0, _, _, _)}]
|
||||
},
|
||||
jsx:decode(BridgeStr)
|
||||
),
|
||||
|
||||
%% delete the bridge
|
||||
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []),
|
||||
|
|
@ -347,38 +423,50 @@ t_mqtt_conn_bridge_egress(_) ->
|
|||
%% - cannot delete a connector that is used by at least one bridge
|
||||
t_mqtt_conn_update(_) ->
|
||||
%% then we add a mqtt connector, using POST
|
||||
{ok, 201, Connector} = request(post, uri(["connectors"]),
|
||||
?MQTT_CONNECTOR2(<<"127.0.0.1:1883">>)
|
||||
#{ <<"type">> => ?CONNECTR_TYPE
|
||||
, <<"name">> => ?CONNECTR_NAME
|
||||
}),
|
||||
{ok, 201, Connector} = request(
|
||||
post,
|
||||
uri(["connectors"]),
|
||||
?MQTT_CONNECTOR2(<<"127.0.0.1:1883">>)#{
|
||||
<<"type">> => ?CONNECTR_TYPE,
|
||||
<<"name">> => ?CONNECTR_NAME
|
||||
}
|
||||
),
|
||||
|
||||
%ct:pal("---connector: ~p", [Connector]),
|
||||
#{ <<"server">> := <<"127.0.0.1:1883">>
|
||||
} = jsx:decode(Connector),
|
||||
#{<<"server">> := <<"127.0.0.1:1883">>} = jsx:decode(Connector),
|
||||
ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME),
|
||||
%% ... and a MQTT bridge, using POST
|
||||
%% we bind this bridge to the connector created just now
|
||||
{ok, 201, Bridge} = request(post, uri(["bridges"]),
|
||||
{ok, 201, Bridge} = request(
|
||||
post,
|
||||
uri(["bridges"]),
|
||||
?MQTT_BRIDGE_EGRESS(ConnctorID)#{
|
||||
<<"type">> => ?CONNECTR_TYPE,
|
||||
<<"name">> => ?BRIDGE_NAME_EGRESS
|
||||
}),
|
||||
#{ <<"type">> := ?CONNECTR_TYPE
|
||||
, <<"name">> := ?BRIDGE_NAME_EGRESS
|
||||
, <<"connector">> := ConnctorID
|
||||
} = jsx:decode(Bridge),
|
||||
}
|
||||
),
|
||||
#{
|
||||
<<"type">> := ?CONNECTR_TYPE,
|
||||
<<"name">> := ?BRIDGE_NAME_EGRESS,
|
||||
<<"connector">> := ConnctorID
|
||||
} = jsx:decode(Bridge),
|
||||
BridgeIDEgress = emqx_bridge:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_EGRESS),
|
||||
wait_for_resource_ready(BridgeIDEgress, 5),
|
||||
|
||||
%% Then we try to update 'server' of the connector, to an unavailable IP address
|
||||
%% The update OK, we recreate the resource even if the resource is current connected,
|
||||
%% and the target resource we're going to update is unavailable.
|
||||
{ok, 200, _} = request(put, uri(["connectors", ConnctorID]),
|
||||
?MQTT_CONNECTOR2(<<"127.0.0.1:2603">>)),
|
||||
{ok, 200, _} = request(
|
||||
put,
|
||||
uri(["connectors", ConnctorID]),
|
||||
?MQTT_CONNECTOR2(<<"127.0.0.1:2603">>)
|
||||
),
|
||||
%% we fix the 'server' parameter to a normal one, it should work
|
||||
{ok, 200, _} = request(put, uri(["connectors", ConnctorID]),
|
||||
?MQTT_CONNECTOR2(<<"127.0.0.1 : 1883">>)),
|
||||
{ok, 200, _} = request(
|
||||
put,
|
||||
uri(["connectors", ConnctorID]),
|
||||
?MQTT_CONNECTOR2(<<"127.0.0.1 : 1883">>)
|
||||
),
|
||||
%% delete the bridge
|
||||
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []),
|
||||
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
|
||||
|
|
@ -390,40 +478,51 @@ t_mqtt_conn_update(_) ->
|
|||
t_mqtt_conn_update2(_) ->
|
||||
%% then we add a mqtt connector, using POST
|
||||
%% but this connector is point to a unreachable server "2603"
|
||||
{ok, 201, Connector} = request(post, uri(["connectors"]),
|
||||
?MQTT_CONNECTOR2(<<"127.0.0.1:2603">>)
|
||||
#{ <<"type">> => ?CONNECTR_TYPE
|
||||
, <<"name">> => ?CONNECTR_NAME
|
||||
}),
|
||||
{ok, 201, Connector} = request(
|
||||
post,
|
||||
uri(["connectors"]),
|
||||
?MQTT_CONNECTOR2(<<"127.0.0.1:2603">>)#{
|
||||
<<"type">> => ?CONNECTR_TYPE,
|
||||
<<"name">> => ?CONNECTR_NAME
|
||||
}
|
||||
),
|
||||
|
||||
#{ <<"server">> := <<"127.0.0.1:2603">>
|
||||
} = jsx:decode(Connector),
|
||||
#{<<"server">> := <<"127.0.0.1:2603">>} = jsx:decode(Connector),
|
||||
ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME),
|
||||
%% ... and a MQTT bridge, using POST
|
||||
%% we bind this bridge to the connector created just now
|
||||
{ok, 201, Bridge} = request(post, uri(["bridges"]),
|
||||
{ok, 201, Bridge} = request(
|
||||
post,
|
||||
uri(["bridges"]),
|
||||
?MQTT_BRIDGE_EGRESS(ConnctorID)#{
|
||||
<<"type">> => ?CONNECTR_TYPE,
|
||||
<<"name">> => ?BRIDGE_NAME_EGRESS
|
||||
}),
|
||||
#{ <<"type">> := ?CONNECTR_TYPE
|
||||
, <<"name">> := ?BRIDGE_NAME_EGRESS
|
||||
, <<"status">> := <<"disconnected">>
|
||||
, <<"connector">> := ConnctorID
|
||||
} = jsx:decode(Bridge),
|
||||
}
|
||||
),
|
||||
#{
|
||||
<<"type">> := ?CONNECTR_TYPE,
|
||||
<<"name">> := ?BRIDGE_NAME_EGRESS,
|
||||
<<"status">> := <<"disconnected">>,
|
||||
<<"connector">> := ConnctorID
|
||||
} = jsx:decode(Bridge),
|
||||
BridgeIDEgress = emqx_bridge:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_EGRESS),
|
||||
%% We try to fix the 'server' parameter, to another unavailable server..
|
||||
%% The update should success: we don't check the connectivity of the new config
|
||||
%% if the resource is now disconnected.
|
||||
{ok, 200, _} = request(put, uri(["connectors", ConnctorID]),
|
||||
?MQTT_CONNECTOR2(<<"127.0.0.1:2604">>)),
|
||||
{ok, 200, _} = request(
|
||||
put,
|
||||
uri(["connectors", ConnctorID]),
|
||||
?MQTT_CONNECTOR2(<<"127.0.0.1:2604">>)
|
||||
),
|
||||
%% we fix the 'server' parameter to a normal one, it should work
|
||||
{ok, 200, _} = request(put, uri(["connectors", ConnctorID]),
|
||||
?MQTT_CONNECTOR2(<<"127.0.0.1:1883">>)),
|
||||
{ok, 200, _} = request(
|
||||
put,
|
||||
uri(["connectors", ConnctorID]),
|
||||
?MQTT_CONNECTOR2(<<"127.0.0.1:1883">>)
|
||||
),
|
||||
wait_for_resource_ready(BridgeIDEgress, 5),
|
||||
{ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDEgress]), []),
|
||||
?assertMatch(#{ <<"status">> := <<"connected">>
|
||||
}, jsx:decode(BridgeStr)),
|
||||
?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(BridgeStr)),
|
||||
%% delete the bridge
|
||||
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []),
|
||||
{ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []),
|
||||
|
|
@ -434,21 +533,26 @@ t_mqtt_conn_update2(_) ->
|
|||
|
||||
t_mqtt_conn_update3(_) ->
|
||||
%% we add a mqtt connector, using POST
|
||||
{ok, 201, _} = request(post, uri(["connectors"]),
|
||||
?MQTT_CONNECTOR2(<<"127.0.0.1:1883">>)
|
||||
#{ <<"type">> => ?CONNECTR_TYPE
|
||||
, <<"name">> => ?CONNECTR_NAME
|
||||
}),
|
||||
{ok, 201, _} = request(
|
||||
post,
|
||||
uri(["connectors"]),
|
||||
?MQTT_CONNECTOR2(<<"127.0.0.1:1883">>)#{
|
||||
<<"type">> => ?CONNECTR_TYPE,
|
||||
<<"name">> => ?CONNECTR_NAME
|
||||
}
|
||||
),
|
||||
ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME),
|
||||
%% ... and a MQTT bridge, using POST
|
||||
%% we bind this bridge to the connector created just now
|
||||
{ok, 201, Bridge} = request(post, uri(["bridges"]),
|
||||
{ok, 201, Bridge} = request(
|
||||
post,
|
||||
uri(["bridges"]),
|
||||
?MQTT_BRIDGE_EGRESS(ConnctorID)#{
|
||||
<<"type">> => ?CONNECTR_TYPE,
|
||||
<<"name">> => ?BRIDGE_NAME_EGRESS
|
||||
}),
|
||||
#{ <<"connector">> := ConnctorID
|
||||
} = jsx:decode(Bridge),
|
||||
}
|
||||
),
|
||||
#{<<"connector">> := ConnctorID} = jsx:decode(Bridge),
|
||||
BridgeIDEgress = emqx_bridge:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_EGRESS),
|
||||
wait_for_resource_ready(BridgeIDEgress, 5),
|
||||
|
||||
|
|
@ -462,37 +566,54 @@ t_mqtt_conn_update3(_) ->
|
|||
t_mqtt_conn_testing(_) ->
|
||||
%% APIs for testing the connectivity
|
||||
%% then we add a mqtt connector, using POST
|
||||
{ok, 204, <<>>} = request(post, uri(["connectors_test"]),
|
||||
{ok, 204, <<>>} = request(
|
||||
post,
|
||||
uri(["connectors_test"]),
|
||||
?MQTT_CONNECTOR2(<<"127.0.0.1:1883">>)#{
|
||||
<<"type">> => ?CONNECTR_TYPE,
|
||||
<<"name">> => ?BRIDGE_NAME_EGRESS
|
||||
}),
|
||||
{ok, 400, _} = request(post, uri(["connectors_test"]),
|
||||
}
|
||||
),
|
||||
{ok, 400, _} = request(
|
||||
post,
|
||||
uri(["connectors_test"]),
|
||||
?MQTT_CONNECTOR2(<<"127.0.0.1:2883">>)#{
|
||||
<<"type">> => ?CONNECTR_TYPE,
|
||||
<<"name">> => ?BRIDGE_NAME_EGRESS
|
||||
}).
|
||||
}
|
||||
).
|
||||
|
||||
t_ingress_mqtt_bridge_with_rules(_) ->
|
||||
{ok, 201, _} = request(post, uri(["connectors"]),
|
||||
?MQTT_CONNECTOR(<<"user1">>)#{ <<"type">> => ?CONNECTR_TYPE
|
||||
, <<"name">> => ?CONNECTR_NAME
|
||||
}),
|
||||
{ok, 201, _} = request(
|
||||
post,
|
||||
uri(["connectors"]),
|
||||
?MQTT_CONNECTOR(<<"user1">>)#{
|
||||
<<"type">> => ?CONNECTR_TYPE,
|
||||
<<"name">> => ?CONNECTR_NAME
|
||||
}
|
||||
),
|
||||
ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME),
|
||||
|
||||
{ok, 201, _} = request(post, uri(["bridges"]),
|
||||
{ok, 201, _} = request(
|
||||
post,
|
||||
uri(["bridges"]),
|
||||
?MQTT_BRIDGE_INGRESS(ConnctorID)#{
|
||||
<<"type">> => ?CONNECTR_TYPE,
|
||||
<<"name">> => ?BRIDGE_NAME_INGRESS
|
||||
}),
|
||||
}
|
||||
),
|
||||
BridgeIDIngress = emqx_bridge:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_INGRESS),
|
||||
|
||||
{ok, 201, Rule} = request(post, uri(["rules"]),
|
||||
#{<<"name">> => <<"A rule get messages from a source mqtt bridge">>,
|
||||
<<"enable">> => true,
|
||||
<<"outputs">> => [#{<<"function">> => "emqx_connector_api_SUITE:inspect"}],
|
||||
<<"sql">> => <<"SELECT * from \"$bridges/", BridgeIDIngress/binary, "\"">>
|
||||
}),
|
||||
{ok, 201, Rule} = request(
|
||||
post,
|
||||
uri(["rules"]),
|
||||
#{
|
||||
<<"name">> => <<"A rule get messages from a source mqtt bridge">>,
|
||||
<<"enable">> => true,
|
||||
<<"outputs">> => [#{<<"function">> => "emqx_connector_api_SUITE:inspect"}],
|
||||
<<"sql">> => <<"SELECT * from \"$bridges/", BridgeIDIngress/binary, "\"">>
|
||||
}
|
||||
),
|
||||
#{<<"id">> := RuleId} = jsx:decode(Rule),
|
||||
|
||||
%% we now test if the bridge works as expected
|
||||
|
|
@ -517,63 +638,81 @@ t_ingress_mqtt_bridge_with_rules(_) ->
|
|||
false
|
||||
after 100 ->
|
||||
false
|
||||
end),
|
||||
end
|
||||
),
|
||||
%% and also the rule should be matched, with matched + 1:
|
||||
{ok, 200, Rule1} = request(get, uri(["rules", RuleId]), []),
|
||||
#{ <<"id">> := RuleId
|
||||
, <<"metrics">> := #{
|
||||
<<"sql.matched">> := 1,
|
||||
<<"sql.passed">> := 1,
|
||||
<<"sql.failed">> := 0,
|
||||
<<"sql.failed.exception">> := 0,
|
||||
<<"sql.failed.no_result">> := 0,
|
||||
<<"sql.matched.rate">> := _,
|
||||
<<"sql.matched.rate.max">> := _,
|
||||
<<"sql.matched.rate.last5m">> := _,
|
||||
<<"outputs.total">> := 1,
|
||||
<<"outputs.success">> := 1,
|
||||
<<"outputs.failed">> := 0,
|
||||
<<"outputs.failed.out_of_service">> := 0,
|
||||
<<"outputs.failed.unknown">> := 0
|
||||
}
|
||||
} = jsx:decode(Rule1),
|
||||
#{
|
||||
<<"id">> := RuleId,
|
||||
<<"metrics">> := #{
|
||||
<<"sql.matched">> := 1,
|
||||
<<"sql.passed">> := 1,
|
||||
<<"sql.failed">> := 0,
|
||||
<<"sql.failed.exception">> := 0,
|
||||
<<"sql.failed.no_result">> := 0,
|
||||
<<"sql.matched.rate">> := _,
|
||||
<<"sql.matched.rate.max">> := _,
|
||||
<<"sql.matched.rate.last5m">> := _,
|
||||
<<"outputs.total">> := 1,
|
||||
<<"outputs.success">> := 1,
|
||||
<<"outputs.failed">> := 0,
|
||||
<<"outputs.failed.out_of_service">> := 0,
|
||||
<<"outputs.failed.unknown">> := 0
|
||||
}
|
||||
} = jsx:decode(Rule1),
|
||||
%% we also check if the outputs of the rule is triggered
|
||||
?assertMatch(#{inspect := #{
|
||||
event := <<"$bridges/mqtt", _/binary>>,
|
||||
id := MsgId,
|
||||
payload := Payload,
|
||||
topic := RemoteTopic,
|
||||
qos := 0,
|
||||
dup := false,
|
||||
retain := false,
|
||||
pub_props := #{},
|
||||
timestamp := _
|
||||
}} when is_binary(MsgId), persistent_term:get(?MODULE)),
|
||||
?assertMatch(
|
||||
#{
|
||||
inspect := #{
|
||||
event := <<"$bridges/mqtt", _/binary>>,
|
||||
id := MsgId,
|
||||
payload := Payload,
|
||||
topic := RemoteTopic,
|
||||
qos := 0,
|
||||
dup := false,
|
||||
retain := false,
|
||||
pub_props := #{},
|
||||
timestamp := _
|
||||
}
|
||||
} when is_binary(MsgId),
|
||||
persistent_term:get(?MODULE)
|
||||
),
|
||||
|
||||
{ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []),
|
||||
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDIngress]), []),
|
||||
{ok, 204, <<>>} = request(delete, uri(["connectors", ConnctorID]), []).
|
||||
|
||||
t_egress_mqtt_bridge_with_rules(_) ->
|
||||
{ok, 201, _} = request(post, uri(["connectors"]),
|
||||
?MQTT_CONNECTOR(<<"user1">>)#{ <<"type">> => ?CONNECTR_TYPE
|
||||
, <<"name">> => ?CONNECTR_NAME
|
||||
}),
|
||||
{ok, 201, _} = request(
|
||||
post,
|
||||
uri(["connectors"]),
|
||||
?MQTT_CONNECTOR(<<"user1">>)#{
|
||||
<<"type">> => ?CONNECTR_TYPE,
|
||||
<<"name">> => ?CONNECTR_NAME
|
||||
}
|
||||
),
|
||||
ConnctorID = emqx_connector:connector_id(?CONNECTR_TYPE, ?CONNECTR_NAME),
|
||||
{ok, 201, Bridge} = request(post, uri(["bridges"]),
|
||||
{ok, 201, Bridge} = request(
|
||||
post,
|
||||
uri(["bridges"]),
|
||||
?MQTT_BRIDGE_EGRESS(ConnctorID)#{
|
||||
<<"type">> => ?CONNECTR_TYPE,
|
||||
<<"name">> => ?BRIDGE_NAME_EGRESS
|
||||
}),
|
||||
#{ <<"type">> := ?CONNECTR_TYPE, <<"name">> := ?BRIDGE_NAME_EGRESS } = jsx:decode(Bridge),
|
||||
}
|
||||
),
|
||||
#{<<"type">> := ?CONNECTR_TYPE, <<"name">> := ?BRIDGE_NAME_EGRESS} = jsx:decode(Bridge),
|
||||
BridgeIDEgress = emqx_bridge:bridge_id(?CONNECTR_TYPE, ?BRIDGE_NAME_EGRESS),
|
||||
|
||||
{ok, 201, Rule} = request(post, uri(["rules"]),
|
||||
#{<<"name">> => <<"A rule send messages to a sink mqtt bridge">>,
|
||||
<<"enable">> => true,
|
||||
<<"outputs">> => [BridgeIDEgress],
|
||||
<<"sql">> => <<"SELECT * from \"t/1\"">>
|
||||
}),
|
||||
{ok, 201, Rule} = request(
|
||||
post,
|
||||
uri(["rules"]),
|
||||
#{
|
||||
<<"name">> => <<"A rule send messages to a sink mqtt bridge">>,
|
||||
<<"enable">> => true,
|
||||
<<"outputs">> => [BridgeIDEgress],
|
||||
<<"sql">> => <<"SELECT * from \"t/1\"">>
|
||||
}
|
||||
),
|
||||
#{<<"id">> := RuleId} = jsx:decode(Rule),
|
||||
|
||||
%% we now test if the bridge works as expected
|
||||
|
|
@ -597,7 +736,8 @@ t_egress_mqtt_bridge_with_rules(_) ->
|
|||
false
|
||||
after 100 ->
|
||||
false
|
||||
end),
|
||||
end
|
||||
),
|
||||
emqx:unsubscribe(RemoteTopic),
|
||||
|
||||
%% PUBLISH a message to the rule.
|
||||
|
|
@ -609,23 +749,24 @@ t_egress_mqtt_bridge_with_rules(_) ->
|
|||
wait_for_resource_ready(BridgeIDEgress, 5),
|
||||
emqx:publish(emqx_message:make(RuleTopic, Payload2)),
|
||||
{ok, 200, Rule1} = request(get, uri(["rules", RuleId]), []),
|
||||
#{ <<"id">> := RuleId
|
||||
, <<"metrics">> := #{
|
||||
<<"sql.matched">> := 1,
|
||||
<<"sql.passed">> := 1,
|
||||
<<"sql.failed">> := 0,
|
||||
<<"sql.failed.exception">> := 0,
|
||||
<<"sql.failed.no_result">> := 0,
|
||||
<<"sql.matched.rate">> := _,
|
||||
<<"sql.matched.rate.max">> := _,
|
||||
<<"sql.matched.rate.last5m">> := _,
|
||||
<<"outputs.total">> := 1,
|
||||
<<"outputs.success">> := 1,
|
||||
<<"outputs.failed">> := 0,
|
||||
<<"outputs.failed.out_of_service">> := 0,
|
||||
<<"outputs.failed.unknown">> := 0
|
||||
}
|
||||
} = jsx:decode(Rule1),
|
||||
#{
|
||||
<<"id">> := RuleId,
|
||||
<<"metrics">> := #{
|
||||
<<"sql.matched">> := 1,
|
||||
<<"sql.passed">> := 1,
|
||||
<<"sql.failed">> := 0,
|
||||
<<"sql.failed.exception">> := 0,
|
||||
<<"sql.failed.no_result">> := 0,
|
||||
<<"sql.matched.rate">> := _,
|
||||
<<"sql.matched.rate.max">> := _,
|
||||
<<"sql.matched.rate.last5m">> := _,
|
||||
<<"outputs.total">> := 1,
|
||||
<<"outputs.success">> := 1,
|
||||
<<"outputs.failed">> := 0,
|
||||
<<"outputs.failed.out_of_service">> := 0,
|
||||
<<"outputs.failed.unknown">> := 0
|
||||
}
|
||||
} = jsx:decode(Rule1),
|
||||
%% we should receive a message on the "remote" broker, with specified topic
|
||||
?assert(
|
||||
receive
|
||||
|
|
@ -637,14 +778,19 @@ t_egress_mqtt_bridge_with_rules(_) ->
|
|||
false
|
||||
after 100 ->
|
||||
false
|
||||
end),
|
||||
end
|
||||
),
|
||||
|
||||
%% verify the metrics of the bridge
|
||||
{ok, 200, BridgeStr} = request(get, uri(["bridges", BridgeIDEgress]), []),
|
||||
?assertMatch(#{ <<"metrics">> := ?metrics(2, 2, 0, _, _, _)
|
||||
, <<"node_metrics">> :=
|
||||
[#{<<"node">> := _, <<"metrics">> := ?metrics(2, 2, 0, _, _, _)}]
|
||||
}, jsx:decode(BridgeStr)),
|
||||
?assertMatch(
|
||||
#{
|
||||
<<"metrics">> := ?metrics(2, 2, 0, _, _, _),
|
||||
<<"node_metrics">> :=
|
||||
[#{<<"node">> := _, <<"metrics">> := ?metrics(2, 2, 0, _, _, _)}]
|
||||
},
|
||||
jsx:decode(BridgeStr)
|
||||
),
|
||||
|
||||
{ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []),
|
||||
{ok, 204, <<>>} = request(delete, uri(["bridges", BridgeIDEgress]), []),
|
||||
|
|
@ -658,8 +804,9 @@ wait_for_resource_ready(InstId, 0) ->
|
|||
ct:fail(wait_resource_timeout);
|
||||
wait_for_resource_ready(InstId, Retry) ->
|
||||
case emqx_bridge:lookup(InstId) of
|
||||
{ok, #{resource_data := #{status := connected}}} -> ok;
|
||||
{ok, #{resource_data := #{status := connected}}} ->
|
||||
ok;
|
||||
_ ->
|
||||
timer:sleep(100),
|
||||
wait_for_resource_ready(InstId, Retry-1)
|
||||
wait_for_resource_ready(InstId, Retry - 1)
|
||||
end.
|
||||
|
|
|
|||
|
|
@ -65,20 +65,24 @@ t_lifecycle(_Config) ->
|
|||
perform_lifecycle_check(PoolName, InitialConfig) ->
|
||||
{ok, #{config := CheckedConfig}} =
|
||||
emqx_resource:check_config(?MONGO_RESOURCE_MOD, InitialConfig),
|
||||
{ok, #{state := #{poolname := ReturnedPoolName} = State,
|
||||
status := InitialStatus}}
|
||||
= emqx_resource:create_local(
|
||||
PoolName,
|
||||
?CONNECTOR_RESOURCE_GROUP,
|
||||
?MONGO_RESOURCE_MOD,
|
||||
CheckedConfig,
|
||||
#{}
|
||||
),
|
||||
{ok, #{
|
||||
state := #{poolname := ReturnedPoolName} = State,
|
||||
status := InitialStatus
|
||||
}} =
|
||||
emqx_resource:create_local(
|
||||
PoolName,
|
||||
?CONNECTOR_RESOURCE_GROUP,
|
||||
?MONGO_RESOURCE_MOD,
|
||||
CheckedConfig,
|
||||
#{}
|
||||
),
|
||||
?assertEqual(InitialStatus, connected),
|
||||
% Instance should match the state and status of the just started resource
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{state := State,
|
||||
status := InitialStatus}}
|
||||
= emqx_resource:get_instance(PoolName),
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{
|
||||
state := State,
|
||||
status := InitialStatus
|
||||
}} =
|
||||
emqx_resource:get_instance(PoolName),
|
||||
?assertEqual(ok, emqx_resource:health_check(PoolName)),
|
||||
% % Perform query as further check that the resource is working as expected
|
||||
?assertMatch([], emqx_resource:query(PoolName, test_query_find())),
|
||||
|
|
@ -86,11 +90,13 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
|
|||
?assertEqual(ok, emqx_resource:stop(PoolName)),
|
||||
% Resource will be listed still, but state will be changed and healthcheck will fail
|
||||
% as the worker no longer exists.
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{state := State,
|
||||
status := StoppedStatus}}
|
||||
= emqx_resource:get_instance(PoolName),
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{
|
||||
state := State,
|
||||
status := StoppedStatus
|
||||
}} =
|
||||
emqx_resource:get_instance(PoolName),
|
||||
?assertEqual(StoppedStatus, disconnected),
|
||||
?assertEqual({error,health_check_failed}, emqx_resource:health_check(PoolName)),
|
||||
?assertEqual({error, health_check_failed}, emqx_resource:health_check(PoolName)),
|
||||
% Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself.
|
||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
|
||||
% Can call stop/1 again on an already stopped instance
|
||||
|
|
@ -99,8 +105,8 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
|
|||
?assertEqual(ok, emqx_resource:restart(PoolName)),
|
||||
% async restart, need to wait resource
|
||||
timer:sleep(500),
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}}
|
||||
= emqx_resource:get_instance(PoolName),
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} =
|
||||
emqx_resource:get_instance(PoolName),
|
||||
?assertEqual(ok, emqx_resource:health_check(PoolName)),
|
||||
?assertMatch([], emqx_resource:query(PoolName, test_query_find())),
|
||||
?assertMatch(undefined, emqx_resource:query(PoolName, test_query_find_one())),
|
||||
|
|
@ -115,12 +121,19 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
|
|||
% %%------------------------------------------------------------------------------
|
||||
|
||||
mongo_config() ->
|
||||
RawConfig = list_to_binary(io_lib:format("""
|
||||
mongo_type = single
|
||||
database = mqtt
|
||||
pool_size = 8
|
||||
server = \"~s:~b\"
|
||||
""", [?MONGO_HOST, ?MONGO_DEFAULT_PORT])),
|
||||
RawConfig = list_to_binary(
|
||||
io_lib:format(
|
||||
""
|
||||
"\n"
|
||||
" mongo_type = single\n"
|
||||
" database = mqtt\n"
|
||||
" pool_size = 8\n"
|
||||
" server = \"~s:~b\"\n"
|
||||
" "
|
||||
"",
|
||||
[?MONGO_HOST, ?MONGO_DEFAULT_PORT]
|
||||
)
|
||||
),
|
||||
|
||||
{ok, Config} = hocon:binary(RawConfig),
|
||||
#{<<"config">> => Config}.
|
||||
|
|
|
|||
|
|
@ -22,23 +22,36 @@
|
|||
send_and_ack_test() ->
|
||||
%% delegate from gen_rpc to rpc for unit test
|
||||
meck:new(emqtt, [passthrough, no_history]),
|
||||
meck:expect(emqtt, start_link, 1,
|
||||
fun(_) ->
|
||||
{ok, spawn_link(fun() -> ok end)}
|
||||
end),
|
||||
meck:expect(
|
||||
emqtt,
|
||||
start_link,
|
||||
1,
|
||||
fun(_) ->
|
||||
{ok, spawn_link(fun() -> ok end)}
|
||||
end
|
||||
),
|
||||
meck:expect(emqtt, connect, 1, {ok, dummy}),
|
||||
meck:expect(emqtt, stop, 1,
|
||||
fun(Pid) -> Pid ! stop end),
|
||||
meck:expect(emqtt, publish, 2,
|
||||
fun(Client, Msg) ->
|
||||
Client ! {publish, Msg},
|
||||
{ok, Msg} %% as packet id
|
||||
end),
|
||||
meck:expect(
|
||||
emqtt,
|
||||
stop,
|
||||
1,
|
||||
fun(Pid) -> Pid ! stop end
|
||||
),
|
||||
meck:expect(
|
||||
emqtt,
|
||||
publish,
|
||||
2,
|
||||
fun(Client, Msg) ->
|
||||
Client ! {publish, Msg},
|
||||
%% as packet id
|
||||
{ok, Msg}
|
||||
end
|
||||
),
|
||||
try
|
||||
Max = 1,
|
||||
Batch = lists:seq(1, Max),
|
||||
{ok, Conn} = emqx_connector_mqtt_mod:start(#{server => {{127,0,0,1}, 1883}}),
|
||||
% %% return last packet id as batch reference
|
||||
{ok, Conn} = emqx_connector_mqtt_mod:start(#{server => {{127, 0, 0, 1}, 1883}}),
|
||||
% %% return last packet id as batch reference
|
||||
{ok, _AckRef} = emqx_connector_mqtt_mod:send(Conn, Batch),
|
||||
|
||||
ok = emqx_connector_mqtt_mod:stop(Conn)
|
||||
|
|
|
|||
|
|
@ -23,13 +23,13 @@
|
|||
-define(BRIDGE_NAME, test).
|
||||
-define(BRIDGE_REG_NAME, emqx_connector_mqtt_worker_test).
|
||||
-define(WAIT(PATTERN, TIMEOUT),
|
||||
receive
|
||||
PATTERN ->
|
||||
ok
|
||||
after
|
||||
TIMEOUT ->
|
||||
error(timeout)
|
||||
end).
|
||||
receive
|
||||
PATTERN ->
|
||||
ok
|
||||
after TIMEOUT ->
|
||||
error(timeout)
|
||||
end
|
||||
).
|
||||
|
||||
-export([start/1, send/2, stop/1]).
|
||||
|
||||
|
|
@ -125,7 +125,7 @@ manual_start_stop_test() ->
|
|||
Ref = make_ref(),
|
||||
TestPid = self(),
|
||||
BridgeName = manual_start_stop,
|
||||
Config0 = make_config(Ref, TestPid, {ok, #{client_pid => TestPid}}),
|
||||
Config0 = make_config(Ref, TestPid, {ok, #{client_pid => TestPid}}),
|
||||
Config = Config0#{start_type := manual},
|
||||
{ok, Pid} = emqx_connector_mqtt_worker:start_link(Config#{name => BridgeName}),
|
||||
%% call ensure_started again should yield the same result
|
||||
|
|
|
|||
|
|
@ -64,9 +64,11 @@ t_lifecycle(_Config) ->
|
|||
|
||||
perform_lifecycle_check(PoolName, InitialConfig) ->
|
||||
{ok, #{config := CheckedConfig}} =
|
||||
emqx_resource:check_config(?MYSQL_RESOURCE_MOD, InitialConfig),
|
||||
{ok, #{state := #{poolname := ReturnedPoolName} = State,
|
||||
status := InitialStatus}} = emqx_resource:create_local(
|
||||
emqx_resource:check_config(?MYSQL_RESOURCE_MOD, InitialConfig),
|
||||
{ok, #{
|
||||
state := #{poolname := ReturnedPoolName} = State,
|
||||
status := InitialStatus
|
||||
}} = emqx_resource:create_local(
|
||||
PoolName,
|
||||
?CONNECTOR_RESOURCE_GROUP,
|
||||
?MYSQL_RESOURCE_MOD,
|
||||
|
|
@ -75,23 +77,32 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
|
|||
),
|
||||
?assertEqual(InitialStatus, connected),
|
||||
% Instance should match the state and status of the just started resource
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{state := State,
|
||||
status := InitialStatus}}
|
||||
= emqx_resource:get_instance(PoolName),
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{
|
||||
state := State,
|
||||
status := InitialStatus
|
||||
}} =
|
||||
emqx_resource:get_instance(PoolName),
|
||||
?assertEqual(ok, emqx_resource:health_check(PoolName)),
|
||||
% % Perform query as further check that the resource is working as expected
|
||||
?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_no_params())),
|
||||
?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_with_params())),
|
||||
?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName,
|
||||
test_query_with_params_and_timeout())),
|
||||
?assertMatch(
|
||||
{ok, _, [[1]]},
|
||||
emqx_resource:query(
|
||||
PoolName,
|
||||
test_query_with_params_and_timeout()
|
||||
)
|
||||
),
|
||||
?assertEqual(ok, emqx_resource:stop(PoolName)),
|
||||
% Resource will be listed still, but state will be changed and healthcheck will fail
|
||||
% as the worker no longer exists.
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{state := State,
|
||||
status := StoppedStatus}}
|
||||
= emqx_resource:get_instance(PoolName),
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{
|
||||
state := State,
|
||||
status := StoppedStatus
|
||||
}} =
|
||||
emqx_resource:get_instance(PoolName),
|
||||
?assertEqual(StoppedStatus, disconnected),
|
||||
?assertEqual({error,health_check_failed}, emqx_resource:health_check(PoolName)),
|
||||
?assertEqual({error, health_check_failed}, emqx_resource:health_check(PoolName)),
|
||||
% Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself.
|
||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
|
||||
% Can call stop/1 again on an already stopped instance
|
||||
|
|
@ -105,8 +116,13 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
|
|||
?assertEqual(ok, emqx_resource:health_check(PoolName)),
|
||||
?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_no_params())),
|
||||
?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName, test_query_with_params())),
|
||||
?assertMatch({ok, _, [[1]]}, emqx_resource:query(PoolName,
|
||||
test_query_with_params_and_timeout())),
|
||||
?assertMatch(
|
||||
{ok, _, [[1]]},
|
||||
emqx_resource:query(
|
||||
PoolName,
|
||||
test_query_with_params_and_timeout()
|
||||
)
|
||||
),
|
||||
% Stop and remove the resource in one go.
|
||||
?assertEqual(ok, emqx_resource:remove_local(PoolName)),
|
||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
|
||||
|
|
@ -118,14 +134,21 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
|
|||
% %%------------------------------------------------------------------------------
|
||||
|
||||
mysql_config() ->
|
||||
RawConfig = list_to_binary(io_lib:format("""
|
||||
auto_reconnect = true
|
||||
database = mqtt
|
||||
username= root
|
||||
password = public
|
||||
pool_size = 8
|
||||
server = \"~s:~b\"
|
||||
""", [?MYSQL_HOST, ?MYSQL_DEFAULT_PORT])),
|
||||
RawConfig = list_to_binary(
|
||||
io_lib:format(
|
||||
""
|
||||
"\n"
|
||||
" auto_reconnect = true\n"
|
||||
" database = mqtt\n"
|
||||
" username= root\n"
|
||||
" password = public\n"
|
||||
" pool_size = 8\n"
|
||||
" server = \"~s:~b\"\n"
|
||||
" "
|
||||
"",
|
||||
[?MYSQL_HOST, ?MYSQL_DEFAULT_PORT]
|
||||
)
|
||||
),
|
||||
|
||||
{ok, Config} = hocon:binary(RawConfig),
|
||||
#{<<"config">> => Config}.
|
||||
|
|
|
|||
|
|
@ -65,20 +65,24 @@ t_lifecycle(_Config) ->
|
|||
perform_lifecycle_check(PoolName, InitialConfig) ->
|
||||
{ok, #{config := CheckedConfig}} =
|
||||
emqx_resource:check_config(?PGSQL_RESOURCE_MOD, InitialConfig),
|
||||
{ok, #{state := #{poolname := ReturnedPoolName} = State,
|
||||
status := InitialStatus}}
|
||||
= emqx_resource:create_local(
|
||||
PoolName,
|
||||
?CONNECTOR_RESOURCE_GROUP,
|
||||
?PGSQL_RESOURCE_MOD,
|
||||
CheckedConfig,
|
||||
#{}
|
||||
),
|
||||
{ok, #{
|
||||
state := #{poolname := ReturnedPoolName} = State,
|
||||
status := InitialStatus
|
||||
}} =
|
||||
emqx_resource:create_local(
|
||||
PoolName,
|
||||
?CONNECTOR_RESOURCE_GROUP,
|
||||
?PGSQL_RESOURCE_MOD,
|
||||
CheckedConfig,
|
||||
#{}
|
||||
),
|
||||
?assertEqual(InitialStatus, connected),
|
||||
% Instance should match the state and status of the just started resource
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{state := State,
|
||||
status := InitialStatus}}
|
||||
= emqx_resource:get_instance(PoolName),
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{
|
||||
state := State,
|
||||
status := InitialStatus
|
||||
}} =
|
||||
emqx_resource:get_instance(PoolName),
|
||||
?assertEqual(ok, emqx_resource:health_check(PoolName)),
|
||||
% % Perform query as further check that the resource is working as expected
|
||||
?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_no_params())),
|
||||
|
|
@ -86,11 +90,13 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
|
|||
?assertEqual(ok, emqx_resource:stop(PoolName)),
|
||||
% Resource will be listed still, but state will be changed and healthcheck will fail
|
||||
% as the worker no longer exists.
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{state := State,
|
||||
status := StoppedStatus}}
|
||||
= emqx_resource:get_instance(PoolName),
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{
|
||||
state := State,
|
||||
status := StoppedStatus
|
||||
}} =
|
||||
emqx_resource:get_instance(PoolName),
|
||||
?assertEqual(StoppedStatus, disconnected),
|
||||
?assertEqual({error,health_check_failed}, emqx_resource:health_check(PoolName)),
|
||||
?assertEqual({error, health_check_failed}, emqx_resource:health_check(PoolName)),
|
||||
% Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself.
|
||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
|
||||
% Can call stop/1 again on an already stopped instance
|
||||
|
|
@ -99,8 +105,8 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
|
|||
?assertEqual(ok, emqx_resource:restart(PoolName)),
|
||||
% async restart, need to wait resource
|
||||
timer:sleep(500),
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}}
|
||||
= emqx_resource:get_instance(PoolName),
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} =
|
||||
emqx_resource:get_instance(PoolName),
|
||||
?assertEqual(ok, emqx_resource:health_check(PoolName)),
|
||||
?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_no_params())),
|
||||
?assertMatch({ok, _, [{1}]}, emqx_resource:query(PoolName, test_query_with_params())),
|
||||
|
|
@ -115,14 +121,21 @@ perform_lifecycle_check(PoolName, InitialConfig) ->
|
|||
% %%------------------------------------------------------------------------------
|
||||
|
||||
pgsql_config() ->
|
||||
RawConfig = list_to_binary(io_lib:format("""
|
||||
auto_reconnect = true
|
||||
database = mqtt
|
||||
username= root
|
||||
password = public
|
||||
pool_size = 8
|
||||
server = \"~s:~b\"
|
||||
""", [?PGSQL_HOST, ?PGSQL_DEFAULT_PORT])),
|
||||
RawConfig = list_to_binary(
|
||||
io_lib:format(
|
||||
""
|
||||
"\n"
|
||||
" auto_reconnect = true\n"
|
||||
" database = mqtt\n"
|
||||
" username= root\n"
|
||||
" password = public\n"
|
||||
" pool_size = 8\n"
|
||||
" server = \"~s:~b\"\n"
|
||||
" "
|
||||
"",
|
||||
[?PGSQL_HOST, ?PGSQL_DEFAULT_PORT]
|
||||
)
|
||||
),
|
||||
|
||||
{ok, Config} = hocon:binary(RawConfig),
|
||||
#{<<"config">> => Config}.
|
||||
|
|
|
|||
|
|
@ -80,8 +80,10 @@ t_sentinel_lifecycle(_Config) ->
|
|||
perform_lifecycle_check(PoolName, InitialConfig, RedisCommand) ->
|
||||
{ok, #{config := CheckedConfig}} =
|
||||
emqx_resource:check_config(?REDIS_RESOURCE_MOD, InitialConfig),
|
||||
{ok, #{state := #{poolname := ReturnedPoolName} = State,
|
||||
status := InitialStatus}} = emqx_resource:create_local(
|
||||
{ok, #{
|
||||
state := #{poolname := ReturnedPoolName} = State,
|
||||
status := InitialStatus
|
||||
}} = emqx_resource:create_local(
|
||||
PoolName,
|
||||
?CONNECTOR_RESOURCE_GROUP,
|
||||
?REDIS_RESOURCE_MOD,
|
||||
|
|
@ -90,20 +92,24 @@ perform_lifecycle_check(PoolName, InitialConfig, RedisCommand) ->
|
|||
),
|
||||
?assertEqual(InitialStatus, connected),
|
||||
% Instance should match the state and status of the just started resource
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{state := State,
|
||||
status := InitialStatus}}
|
||||
= emqx_resource:get_instance(PoolName),
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{
|
||||
state := State,
|
||||
status := InitialStatus
|
||||
}} =
|
||||
emqx_resource:get_instance(PoolName),
|
||||
?assertEqual(ok, emqx_resource:health_check(PoolName)),
|
||||
% Perform query as further check that the resource is working as expected
|
||||
?assertEqual({ok, <<"PONG">>}, emqx_resource:query(PoolName, {cmd, RedisCommand})),
|
||||
?assertEqual(ok, emqx_resource:stop(PoolName)),
|
||||
% Resource will be listed still, but state will be changed and healthcheck will fail
|
||||
% as the worker no longer exists.
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{state := State,
|
||||
status := StoppedStatus}}
|
||||
= emqx_resource:get_instance(PoolName),
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{
|
||||
state := State,
|
||||
status := StoppedStatus
|
||||
}} =
|
||||
emqx_resource:get_instance(PoolName),
|
||||
?assertEqual(StoppedStatus, disconnected),
|
||||
?assertEqual({error,health_check_failed}, emqx_resource:health_check(PoolName)),
|
||||
?assertEqual({error, health_check_failed}, emqx_resource:health_check(PoolName)),
|
||||
% Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself.
|
||||
?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)),
|
||||
% Can call stop/1 again on an already stopped instance
|
||||
|
|
@ -112,8 +118,8 @@ perform_lifecycle_check(PoolName, InitialConfig, RedisCommand) ->
|
|||
?assertEqual(ok, emqx_resource:restart(PoolName)),
|
||||
% async restart, need to wait resource
|
||||
timer:sleep(500),
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}}
|
||||
= emqx_resource:get_instance(PoolName),
|
||||
{ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} =
|
||||
emqx_resource:get_instance(PoolName),
|
||||
?assertEqual(ok, emqx_resource:health_check(PoolName)),
|
||||
?assertEqual({ok, <<"PONG">>}, emqx_resource:query(PoolName, {cmd, RedisCommand})),
|
||||
% Stop and remove the resource in one go.
|
||||
|
|
@ -136,14 +142,21 @@ redis_config_sentinel() ->
|
|||
redis_config_base("sentinel", "servers").
|
||||
|
||||
redis_config_base(Type, ServerKey) ->
|
||||
RawConfig = list_to_binary(io_lib:format("""
|
||||
auto_reconnect = true
|
||||
database = 1
|
||||
pool_size = 8
|
||||
redis_type = ~s
|
||||
password = public
|
||||
~s = \"~s:~b\"
|
||||
""", [Type, ServerKey, ?REDIS_HOST, ?REDIS_PORT])),
|
||||
RawConfig = list_to_binary(
|
||||
io_lib:format(
|
||||
""
|
||||
"\n"
|
||||
" auto_reconnect = true\n"
|
||||
" database = 1\n"
|
||||
" pool_size = 8\n"
|
||||
" redis_type = ~s\n"
|
||||
" password = public\n"
|
||||
" ~s = \"~s:~b\"\n"
|
||||
" "
|
||||
"",
|
||||
[Type, ServerKey, ?REDIS_HOST, ?REDIS_PORT]
|
||||
)
|
||||
),
|
||||
|
||||
{ok, Config} = hocon:binary(RawConfig),
|
||||
#{<<"config">> => Config}.
|
||||
|
|
|
|||
|
|
@ -19,10 +19,11 @@
|
|||
-include_lib("common_test/include/ct.hrl").
|
||||
-include_lib("eunit/include/eunit.hrl").
|
||||
|
||||
-export([ check_fields/1
|
||||
, start_apps/1
|
||||
, stop_apps/1
|
||||
]).
|
||||
-export([
|
||||
check_fields/1,
|
||||
start_apps/1,
|
||||
stop_apps/1
|
||||
]).
|
||||
|
||||
check_fields({FieldName, FieldValue}) ->
|
||||
?assert(is_atom(FieldName)),
|
||||
|
|
@ -30,10 +31,10 @@ check_fields({FieldName, FieldValue}) ->
|
|||
is_map(FieldValue) ->
|
||||
ct:pal("~p~n", [{FieldName, FieldValue}]),
|
||||
?assert(
|
||||
(maps:is_key(type, FieldValue)
|
||||
andalso maps:is_key(default, FieldValue))
|
||||
orelse ((maps:is_key(required, FieldValue)
|
||||
andalso maps:get(required, FieldValue) =:= false))
|
||||
(maps:is_key(type, FieldValue) andalso
|
||||
maps:is_key(default, FieldValue)) orelse
|
||||
(maps:is_key(required, FieldValue) andalso
|
||||
maps:get(required, FieldValue) =:= false)
|
||||
);
|
||||
true ->
|
||||
?assert(is_function(FieldValue))
|
||||
|
|
|
|||
|
|
@ -74,8 +74,8 @@ Note: `sample_interval` should be a divisor of 60."""
|
|||
}
|
||||
inet6 {
|
||||
desc {
|
||||
en: "Enable IPv6 support."
|
||||
zh: "启用IPv6"
|
||||
en: "Enable IPv6 support, default is false, which means IPv4 only."
|
||||
zh: "启用IPv6, 如果机器不支持IPv6,请关闭此选项,否则会导致仪表盘无法使用。"
|
||||
}
|
||||
label {
|
||||
en: "IPv6"
|
||||
|
|
@ -85,7 +85,7 @@ Note: `sample_interval` should be a divisor of 60."""
|
|||
ipv6_v6only {
|
||||
desc {
|
||||
en: "Disable IPv4-to-IPv6 mapping for the listener."
|
||||
zh: "禁用IPv4-to-IPv6映射"
|
||||
zh: "当开启 inet6 功能的同时禁用 IPv4-to-IPv6 映射。该配置仅在 inet6 功能开启时有效。"
|
||||
}
|
||||
label {
|
||||
en: "IPv6 only"
|
||||
|
|
@ -132,6 +132,16 @@ Note: `sample_interval` should be a divisor of 60."""
|
|||
zh: "HTTPS"
|
||||
}
|
||||
}
|
||||
listener_enable {
|
||||
desc {
|
||||
en: "Ignore or enable this listener"
|
||||
zh: "忽略或启用该监听器配置"
|
||||
}
|
||||
label {
|
||||
en: "Enable"
|
||||
zh: "启用"
|
||||
}
|
||||
}
|
||||
bind {
|
||||
desc {
|
||||
en: "Port without IP(18083) or port with specified IP(127.0.0.1:18083)."
|
||||
|
|
|
|||
|
|
@ -153,10 +153,13 @@ apps() ->
|
|||
].
|
||||
|
||||
listeners(Listeners) ->
|
||||
lists:map(
|
||||
lists:filtermap(
|
||||
fun({Protocol, Conf}) ->
|
||||
{Conf1, Bind} = ip_port(Conf),
|
||||
{listener_name(Protocol, Conf1), Protocol, Bind, ranch_opts(Conf1)}
|
||||
maps:get(enable, Conf) andalso
|
||||
begin
|
||||
{Conf1, Bind} = ip_port(Conf),
|
||||
{true, {listener_name(Protocol, Conf1), Protocol, Bind, ranch_opts(Conf1)}}
|
||||
end
|
||||
end,
|
||||
maps:to_list(Listeners)
|
||||
).
|
||||
|
|
@ -172,34 +175,34 @@ init_i18n() ->
|
|||
Lang = emqx_conf:get([dashboard, i18n_lang], en),
|
||||
init_i18n(File, Lang).
|
||||
|
||||
ranch_opts(RanchOptions) ->
|
||||
ranch_opts(Options) ->
|
||||
Keys = [
|
||||
{ack_timeout, handshake_timeout},
|
||||
handshake_timeout,
|
||||
connection_type,
|
||||
max_connections,
|
||||
num_acceptors,
|
||||
shutdown,
|
||||
socket
|
||||
],
|
||||
{S, R} = lists:foldl(fun key_take/2, {RanchOptions, #{}}, Keys),
|
||||
R#{socket_opts => maps:fold(fun key_only/3, [], S)}.
|
||||
|
||||
key_take(Key, {All, R}) ->
|
||||
{K, KX} =
|
||||
case Key of
|
||||
{K1, K2} -> {K1, K2};
|
||||
_ -> {Key, Key}
|
||||
RanchOpts = maps:with(Keys, Options),
|
||||
SocketOpts = maps:fold(
|
||||
fun filter_false/3,
|
||||
[],
|
||||
maps:without([enable, inet6, ipv6_v6only | Keys], Options)
|
||||
),
|
||||
InetOpts =
|
||||
case Options of
|
||||
#{inet6 := true, ipv6_v6only := true} ->
|
||||
[inet6, {ipv6_v6only, true}];
|
||||
#{inet6 := true, ipv6_v6only := false} ->
|
||||
[inet6];
|
||||
_ ->
|
||||
[inet]
|
||||
end,
|
||||
case maps:get(K, All, undefined) of
|
||||
undefined ->
|
||||
{All, R};
|
||||
V ->
|
||||
{maps:remove(K, All), R#{KX => V}}
|
||||
end.
|
||||
RanchOpts#{socket_opts => InetOpts ++ SocketOpts}.
|
||||
|
||||
key_only(K, true, S) -> [K | S];
|
||||
key_only(_K, false, S) -> S;
|
||||
key_only(K, V, S) -> [{K, V} | S].
|
||||
filter_false(_K, false, S) -> S;
|
||||
filter_false(K, V, S) -> [{K, V} | S].
|
||||
|
||||
listener_name(Protocol, #{port := Port, ip := IP}) ->
|
||||
Name =
|
||||
|
|
|
|||
|
|
@ -63,22 +63,42 @@ remove_handler() ->
|
|||
ok.
|
||||
|
||||
pre_config_update(_Path, UpdateConf0, RawConf) ->
|
||||
UpdateConf =
|
||||
case UpdateConf0 of
|
||||
#{<<"default_password">> := <<"******">>} ->
|
||||
maps:remove(<<"default_password">>, UpdateConf0);
|
||||
_ ->
|
||||
UpdateConf0
|
||||
end,
|
||||
UpdateConf = remove_sensitive_data(UpdateConf0),
|
||||
NewConf = emqx_map_lib:deep_merge(RawConf, UpdateConf),
|
||||
{ok, NewConf}.
|
||||
|
||||
-define(SENSITIVE_PASSWORD, <<"******">>).
|
||||
|
||||
remove_sensitive_data(Conf0) ->
|
||||
Conf1 =
|
||||
case Conf0 of
|
||||
#{<<"default_password">> := ?SENSITIVE_PASSWORD} ->
|
||||
maps:remove(<<"default_password">>, Conf0);
|
||||
_ ->
|
||||
Conf0
|
||||
end,
|
||||
case Conf1 of
|
||||
#{<<"listeners">> := #{<<"https">> := #{<<"password">> := ?SENSITIVE_PASSWORD}}} ->
|
||||
emqx_map_lib:deep_remove([<<"listeners">>, <<"https">>, <<"password">>], Conf1);
|
||||
_ ->
|
||||
Conf1
|
||||
end.
|
||||
|
||||
post_config_update(_, _Req, NewConf, OldConf, _AppEnvs) ->
|
||||
#{listeners := NewListeners} = NewConf,
|
||||
#{listeners := OldListeners} = OldConf,
|
||||
#{listeners := #{http := NewHttp, https := NewHttps}} = NewConf,
|
||||
#{listeners := #{http := OldHttp, https := OldHttps}} = OldConf,
|
||||
_ =
|
||||
case NewListeners =:= OldListeners of
|
||||
true -> ok;
|
||||
false -> erlang:send_after(500, ?MODULE, {update_listeners, OldListeners, NewListeners})
|
||||
case diff_listeners(OldHttp, NewHttp, OldHttps, NewHttps) of
|
||||
identical -> ok;
|
||||
{Stop, Start} -> erlang:send_after(500, ?MODULE, {update_listeners, Stop, Start})
|
||||
end,
|
||||
ok.
|
||||
|
||||
diff_listeners(Http, Http, Https, Https) ->
|
||||
identical;
|
||||
diff_listeners(OldHttp, NewHttp, Https, Https) ->
|
||||
{#{http => OldHttp}, #{http => NewHttp}};
|
||||
diff_listeners(Http, Http, OldHttps, NewHttps) ->
|
||||
{#{https => OldHttps}, #{https => NewHttps}};
|
||||
diff_listeners(OldHttp, NewHttp, OldHttps, NewHttps) ->
|
||||
{#{http => OldHttp, https => OldHttps}, #{http => NewHttp, https => NewHttps}}.
|
||||
|
|
|
|||
|
|
@ -77,7 +77,32 @@ fields("listeners") ->
|
|||
];
|
||||
fields("http") ->
|
||||
[
|
||||
{"bind", fun bind/1},
|
||||
enable(true),
|
||||
bind(18803)
|
||||
| common_listener_fields()
|
||||
];
|
||||
fields("https") ->
|
||||
[
|
||||
enable(false),
|
||||
bind(18804)
|
||||
| common_listener_fields() ++
|
||||
exclude_fields(
|
||||
["enable", "fail_if_no_peer_cert"],
|
||||
emqx_schema:server_ssl_opts_schema(#{}, true)
|
||||
)
|
||||
].
|
||||
|
||||
exclude_fields([], Fields) ->
|
||||
Fields;
|
||||
exclude_fields([FieldName | Rest], Fields) ->
|
||||
%% assert field exists
|
||||
case lists:keytake(FieldName, 1, Fields) of
|
||||
{value, _, New} -> exclude_fields(Rest, New);
|
||||
false -> error({FieldName, Fields})
|
||||
end.
|
||||
|
||||
common_listener_fields() ->
|
||||
[
|
||||
{"num_acceptors",
|
||||
sc(
|
||||
integer(),
|
||||
|
|
@ -126,25 +151,40 @@ fields("http") ->
|
|||
desc => ?DESC(ipv6_v6only)
|
||||
}
|
||||
)}
|
||||
];
|
||||
fields("https") ->
|
||||
fields("http") ++
|
||||
proplists:delete(
|
||||
"fail_if_no_peer_cert",
|
||||
emqx_schema:server_ssl_opts_schema(#{}, true)
|
||||
).
|
||||
].
|
||||
|
||||
desc("dashboard") -> ?DESC(desc_dashboard);
|
||||
desc("listeners") -> ?DESC(desc_listeners);
|
||||
desc("http") -> ?DESC(desc_http);
|
||||
desc("https") -> ?DESC(desc_https);
|
||||
desc(_) -> undefined.
|
||||
enable(Bool) ->
|
||||
{"enable",
|
||||
sc(
|
||||
boolean(),
|
||||
#{
|
||||
default => Bool,
|
||||
required => true,
|
||||
desc => ?DESC(listener_enable)
|
||||
}
|
||||
)}.
|
||||
|
||||
bind(type) -> hoconsc:union([non_neg_integer(), emqx_schema:ip_port()]);
|
||||
bind(default) -> 18083;
|
||||
bind(required) -> true;
|
||||
bind(desc) -> ?DESC(bind);
|
||||
bind(_) -> undefined.
|
||||
bind(Port) ->
|
||||
{"bind",
|
||||
sc(
|
||||
hoconsc:union([non_neg_integer(), emqx_schema:ip_port()]),
|
||||
#{
|
||||
default => Port,
|
||||
required => true,
|
||||
desc => ?DESC(bind)
|
||||
}
|
||||
)}.
|
||||
|
||||
desc("dashboard") ->
|
||||
?DESC(desc_dashboard);
|
||||
desc("listeners") ->
|
||||
?DESC(desc_listeners);
|
||||
desc("http") ->
|
||||
?DESC(desc_http);
|
||||
desc("https") ->
|
||||
?DESC(desc_https);
|
||||
desc(_) ->
|
||||
undefined.
|
||||
|
||||
default_username(type) -> binary();
|
||||
default_username(default) -> "admin";
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@ set_default_config(DefaultUsername) ->
|
|||
Config = #{
|
||||
listeners => #{
|
||||
http => #{
|
||||
enable => true,
|
||||
port => 18083
|
||||
}
|
||||
},
|
||||
|
|
|
|||
|
|
@ -90,4 +90,12 @@ emqx_gateway_api_authn {
|
|||
zh: """Client ID 模糊搜索"""
|
||||
}
|
||||
}
|
||||
|
||||
is_superuser {
|
||||
desc {
|
||||
en: """Is superuser"""
|
||||
zh: """是否是超级用户"""
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -195,7 +195,8 @@ parse_qstring(Qs) ->
|
|||
<<"page">>,
|
||||
<<"limit">>,
|
||||
<<"like_username">>,
|
||||
<<"like_clientid">>
|
||||
<<"like_clientid">>,
|
||||
<<"is_superuser">>
|
||||
],
|
||||
Qs
|
||||
).
|
||||
|
|
@ -397,6 +398,15 @@ params_fuzzy_in_qs() ->
|
|||
desc => ?DESC(like_clientid),
|
||||
example => <<"clientid">>
|
||||
}
|
||||
)},
|
||||
{is_superuser,
|
||||
mk(
|
||||
boolean(),
|
||||
#{
|
||||
in => query,
|
||||
required => false,
|
||||
desc => ?DESC(is_superuser)
|
||||
}
|
||||
)}
|
||||
].
|
||||
|
||||
|
|
|
|||
|
|
@ -415,6 +415,53 @@ t_listeners_authn_data_mgmt(_) ->
|
|||
),
|
||||
{204, _} = request(delete, "/gateway/stomp").
|
||||
|
||||
t_authn_fuzzy_search(_) ->
|
||||
GwConf = #{name => <<"stomp">>},
|
||||
{201, _} = request(post, "/gateway", GwConf),
|
||||
{204, _} = request(get, "/gateway/stomp/authentication"),
|
||||
|
||||
AuthConf = #{
|
||||
mechanism => <<"password_based">>,
|
||||
backend => <<"built_in_database">>,
|
||||
user_id_type => <<"clientid">>
|
||||
},
|
||||
{201, _} = request(post, "/gateway/stomp/authentication", AuthConf),
|
||||
{200, ConfResp} = request(get, "/gateway/stomp/authentication"),
|
||||
assert_confs(AuthConf, ConfResp),
|
||||
|
||||
Checker = fun({User, Fuzzy}) ->
|
||||
{200, #{data := [UserRespd]}} = request(
|
||||
get, "/gateway/stomp/authentication/users", Fuzzy
|
||||
),
|
||||
assert_confs(UserRespd, User)
|
||||
end,
|
||||
|
||||
Create = fun(User) ->
|
||||
{201, _} = request(post, "/gateway/stomp/authentication/users", User)
|
||||
end,
|
||||
|
||||
UserDatas = [
|
||||
#{
|
||||
user_id => <<"test">>,
|
||||
password => <<"123456">>,
|
||||
is_superuser => false
|
||||
},
|
||||
#{
|
||||
user_id => <<"foo">>,
|
||||
password => <<"123456">>,
|
||||
is_superuser => true
|
||||
}
|
||||
],
|
||||
|
||||
FuzzyDatas = [[{<<"like_username">>, <<"test">>}], [{<<"is_superuser">>, <<"true">>}]],
|
||||
|
||||
lists:foreach(Create, UserDatas),
|
||||
lists:foreach(Checker, lists:zip(UserDatas, FuzzyDatas)),
|
||||
|
||||
{204, _} = request(delete, "/gateway/stomp/authentication"),
|
||||
{204, _} = request(get, "/gateway/stomp/authentication"),
|
||||
{204, _} = request(delete, "/gateway/stomp").
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Asserts
|
||||
|
||||
|
|
|
|||
|
|
@ -141,7 +141,8 @@ fields(app) ->
|
|||
binary(),
|
||||
#{example => <<"Note">>, required => false}
|
||||
)},
|
||||
{enable, hoconsc:mk(boolean(), #{desc => "Enable/Disable", required => false})}
|
||||
{enable, hoconsc:mk(boolean(), #{desc => "Enable/Disable", required => false})},
|
||||
{expired, hoconsc:mk(boolean(), #{desc => "Expired", required => false})}
|
||||
];
|
||||
fields(name) ->
|
||||
[
|
||||
|
|
|
|||
|
|
@ -513,7 +513,10 @@ fields(keepalive) ->
|
|||
fields(subscribe) ->
|
||||
[
|
||||
{topic, hoconsc:mk(binary(), #{desc => <<"Topic">>})},
|
||||
{qos, hoconsc:mk(emqx_schema:qos(), #{desc => <<"QoS">>})}
|
||||
{qos, hoconsc:mk(emqx_schema:qos(), #{desc => <<"QoS">>})},
|
||||
{nl, hoconsc:mk(integer(), #{default => 0, desc => <<"No Local">>})},
|
||||
{rap, hoconsc:mk(integer(), #{default => 0, desc => <<"Retain as Published">>})},
|
||||
{rh, hoconsc:mk(integer(), #{default => 0, desc => <<"Retain Handling">>})}
|
||||
];
|
||||
fields(unsubscribe) ->
|
||||
[
|
||||
|
|
@ -536,9 +539,8 @@ authz_cache(delete, #{bindings := Bindings}) ->
|
|||
clean_authz_cache(Bindings).
|
||||
|
||||
subscribe(post, #{bindings := #{clientid := ClientID}, body := TopicInfo}) ->
|
||||
Topic = maps:get(<<"topic">>, TopicInfo),
|
||||
Qos = maps:get(<<"qos">>, TopicInfo, 0),
|
||||
subscribe(#{clientid => ClientID, topic => Topic, qos => Qos}).
|
||||
Opts = emqx_map_lib:unsafe_atom_key_map(TopicInfo),
|
||||
subscribe(Opts#{clientid => ClientID}).
|
||||
|
||||
unsubscribe(post, #{bindings := #{clientid := ClientID}, body := TopicInfo}) ->
|
||||
Topic = maps:get(<<"topic">>, TopicInfo),
|
||||
|
|
@ -548,11 +550,7 @@ unsubscribe(post, #{bindings := #{clientid := ClientID}, body := TopicInfo}) ->
|
|||
subscribe_batch(post, #{bindings := #{clientid := ClientID}, body := TopicInfos}) ->
|
||||
Topics =
|
||||
[
|
||||
begin
|
||||
Topic = maps:get(<<"topic">>, TopicInfo),
|
||||
Qos = maps:get(<<"qos">>, TopicInfo, 0),
|
||||
#{topic => Topic, qos => Qos}
|
||||
end
|
||||
emqx_map_lib:unsafe_atom_key_map(TopicInfo)
|
||||
|| TopicInfo <- TopicInfos
|
||||
],
|
||||
subscribe_batch(#{clientid => ClientID, topics => Topics}).
|
||||
|
|
@ -564,12 +562,14 @@ subscriptions(get, #{bindings := #{clientid := ClientID}}) ->
|
|||
{Node, Subs} ->
|
||||
Formatter =
|
||||
fun({Topic, SubOpts}) ->
|
||||
#{
|
||||
node => Node,
|
||||
clientid => ClientID,
|
||||
topic => Topic,
|
||||
qos => maps:get(qos, SubOpts)
|
||||
}
|
||||
maps:merge(
|
||||
#{
|
||||
node => Node,
|
||||
clientid => ClientID,
|
||||
topic => Topic
|
||||
},
|
||||
maps:with([qos, nl, rap, rh], SubOpts)
|
||||
)
|
||||
end,
|
||||
{200, lists:map(Formatter, Subs)}
|
||||
end.
|
||||
|
|
@ -659,21 +659,16 @@ clean_authz_cache(#{clientid := ClientID}) ->
|
|||
{500, #{code => <<"UNKNOW_ERROR">>, message => Message}}
|
||||
end.
|
||||
|
||||
subscribe(#{clientid := ClientID, topic := Topic, qos := Qos}) ->
|
||||
case do_subscribe(ClientID, Topic, Qos) of
|
||||
subscribe(#{clientid := ClientID, topic := Topic} = Sub) ->
|
||||
Opts = maps:with([qos, nl, rap, rh], Sub),
|
||||
case do_subscribe(ClientID, Topic, Opts) of
|
||||
{error, channel_not_found} ->
|
||||
{404, ?CLIENT_ID_NOT_FOUND};
|
||||
{error, Reason} ->
|
||||
Message = list_to_binary(io_lib:format("~p", [Reason])),
|
||||
{500, #{code => <<"UNKNOW_ERROR">>, message => Message}};
|
||||
{ok, Node} ->
|
||||
Response =
|
||||
#{
|
||||
clientid => ClientID,
|
||||
topic => Topic,
|
||||
qos => Qos,
|
||||
node => Node
|
||||
},
|
||||
Response = Sub#{node => Node},
|
||||
{200, Response}
|
||||
end.
|
||||
|
||||
|
|
@ -686,15 +681,18 @@ unsubscribe(#{clientid := ClientID, topic := Topic}) ->
|
|||
end.
|
||||
|
||||
subscribe_batch(#{clientid := ClientID, topics := Topics}) ->
|
||||
ArgList = [[ClientID, Topic, Qos] || #{topic := Topic, qos := Qos} <- Topics],
|
||||
ArgList = [
|
||||
[ClientID, Topic, maps:with([qos, nl, rap, rh], Sub)]
|
||||
|| #{topic := Topic} = Sub <- Topics
|
||||
],
|
||||
emqx_mgmt_util:batch_operation(?MODULE, do_subscribe, ArgList).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% internal function
|
||||
|
||||
do_subscribe(ClientID, Topic0, Qos) ->
|
||||
{Topic, Opts} = emqx_topic:parse(Topic0),
|
||||
TopicTable = [{Topic, Opts#{qos => Qos}}],
|
||||
do_subscribe(ClientID, Topic0, Options) ->
|
||||
{Topic, Opts} = emqx_topic:parse(Topic0, Options),
|
||||
TopicTable = [{Topic, Opts}],
|
||||
case emqx_mgmt:subscribe(ClientID, TopicTable) of
|
||||
{error, Reason} ->
|
||||
{error, Reason};
|
||||
|
|
|
|||
|
|
@ -43,7 +43,8 @@ schema("/cluster") ->
|
|||
responses => #{
|
||||
200 => [
|
||||
{name, ?HOCON(string(), #{desc => "Cluster name"})},
|
||||
{nodes, ?HOCON(?ARRAY(string()), #{desc => "Node name"})}
|
||||
{nodes, ?HOCON(?ARRAY(string()), #{desc => "Node name"})},
|
||||
{self, ?HOCON(string(), #{desc => "Self node name"})}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
@ -97,7 +98,8 @@ cluster_info(get, _) ->
|
|||
ClusterName = application:get_env(ekka, cluster_name, emqxcl),
|
||||
Info = #{
|
||||
name => ClusterName,
|
||||
nodes => mria_mnesia:running_nodes()
|
||||
nodes => mria_mnesia:running_nodes(),
|
||||
self => node()
|
||||
},
|
||||
{200, Info}.
|
||||
|
||||
|
|
|
|||
|
|
@ -72,7 +72,10 @@ fields(subscription) ->
|
|||
{node, hoconsc:mk(binary(), #{desc => <<"Access type">>})},
|
||||
{topic, hoconsc:mk(binary(), #{desc => <<"Topic name">>})},
|
||||
{clientid, hoconsc:mk(binary(), #{desc => <<"Client identifier">>})},
|
||||
{qos, hoconsc:mk(emqx_schema:qos(), #{desc => <<"QoS">>})}
|
||||
{qos, hoconsc:mk(emqx_schema:qos(), #{desc => <<"QoS">>})},
|
||||
{nl, hoconsc:mk(integer(), #{desc => <<"No Local">>})},
|
||||
{rap, hoconsc:mk(integer(), #{desc => <<"Retain as Published">>})},
|
||||
{rh, hoconsc:mk(integer(), #{desc => <<"Retain Handling">>})}
|
||||
].
|
||||
|
||||
parameters() ->
|
||||
|
|
@ -163,22 +166,20 @@ format(Items) when is_list(Items) ->
|
|||
[format(Item) || Item <- Items];
|
||||
format({{Subscriber, Topic}, Options}) ->
|
||||
format({Subscriber, Topic, Options});
|
||||
format({_Subscriber, Topic, Options = #{share := Group}}) ->
|
||||
QoS = maps:get(qos, Options),
|
||||
#{
|
||||
topic => filename:join([<<"$share">>, Group, Topic]),
|
||||
clientid => maps:get(subid, Options),
|
||||
qos => QoS,
|
||||
node => node()
|
||||
};
|
||||
format({_Subscriber, Topic, Options}) ->
|
||||
QoS = maps:get(qos, Options),
|
||||
#{
|
||||
topic => Topic,
|
||||
clientid => maps:get(subid, Options),
|
||||
qos => QoS,
|
||||
node => node()
|
||||
}.
|
||||
maps:merge(
|
||||
#{
|
||||
topic => get_topic(Topic, Options),
|
||||
clientid => maps:get(subid, Options),
|
||||
node => node()
|
||||
},
|
||||
maps:with([qos, nl, rap, rh], Options)
|
||||
).
|
||||
|
||||
get_topic(Topic, #{share := Group}) ->
|
||||
filename:join([<<"$share">>, Group, Topic]);
|
||||
get_topic(Topic, _) ->
|
||||
Topic.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Query Function
|
||||
|
|
|
|||
|
|
@ -129,17 +129,20 @@ ensure_not_undefined(undefined, Old) -> Old;
|
|||
ensure_not_undefined(New, _Old) -> New.
|
||||
|
||||
to_map(Apps) when is_list(Apps) ->
|
||||
Fields = record_info(fields, ?APP),
|
||||
lists:map(
|
||||
fun(Trace0 = #?APP{}) ->
|
||||
[_ | Values] = tuple_to_list(Trace0),
|
||||
maps:remove(api_secret_hash, maps:from_list(lists:zip(Fields, Values)))
|
||||
end,
|
||||
Apps
|
||||
);
|
||||
to_map(App0) ->
|
||||
[App] = to_map([App0]),
|
||||
App.
|
||||
[to_map(App) || App <- Apps];
|
||||
to_map(#?APP{name = N, api_key = K, enable = E, expired_at = ET, created_at = CT, desc = D}) ->
|
||||
#{
|
||||
name => N,
|
||||
api_key => K,
|
||||
enable => E,
|
||||
expired_at => ET,
|
||||
created_at => CT,
|
||||
desc => D,
|
||||
expired => is_expired(ET)
|
||||
}.
|
||||
|
||||
is_expired(undefined) -> false;
|
||||
is_expired(ExpiredTime) -> ExpiredTime < erlang:system_time(second).
|
||||
|
||||
create_app(Name, Enable, ExpiredAt, Desc) ->
|
||||
ApiSecret = generate_api_secret(),
|
||||
|
|
|
|||
|
|
@ -43,7 +43,9 @@ t_clients(_) ->
|
|||
|
||||
AuthHeader = emqx_mgmt_api_test_util:auth_header_(),
|
||||
|
||||
{ok, C1} = emqtt:start_link(#{username => Username1, clientid => ClientId1}),
|
||||
{ok, C1} = emqtt:start_link(#{
|
||||
username => Username1, clientid => ClientId1, proto_ver => v5
|
||||
}),
|
||||
{ok, _} = emqtt:connect(C1),
|
||||
{ok, C2} = emqtt:start_link(#{username => Username2, clientid => ClientId2}),
|
||||
{ok, _} = emqtt:connect(C2),
|
||||
|
|
@ -87,7 +89,7 @@ t_clients(_) ->
|
|||
?assertEqual("[]", Client1AuthzCache),
|
||||
|
||||
%% post /clients/:clientid/subscribe
|
||||
SubscribeBody = #{topic => Topic, qos => Qos},
|
||||
SubscribeBody = #{topic => Topic, qos => Qos, nl => 1, rh => 1},
|
||||
SubscribePath = emqx_mgmt_api_test_util:api_path([
|
||||
"clients",
|
||||
binary_to_list(ClientId1),
|
||||
|
|
@ -105,6 +107,32 @@ t_clients(_) ->
|
|||
?assertEqual(AfterSubTopic, Topic),
|
||||
?assertEqual(AfterSubQos, Qos),
|
||||
|
||||
%% get /clients/:clientid/subscriptions
|
||||
SubscriptionsPath = emqx_mgmt_api_test_util:api_path([
|
||||
"clients",
|
||||
binary_to_list(ClientId1),
|
||||
"subscriptions"
|
||||
]),
|
||||
{ok, SubscriptionsRes} = emqx_mgmt_api_test_util:request_api(
|
||||
get,
|
||||
SubscriptionsPath,
|
||||
"",
|
||||
AuthHeader
|
||||
),
|
||||
[SubscriptionsData] = emqx_json:decode(SubscriptionsRes, [return_maps]),
|
||||
?assertMatch(
|
||||
#{
|
||||
<<"clientid">> := ClientId1,
|
||||
<<"nl">> := 1,
|
||||
<<"rap">> := 0,
|
||||
<<"rh">> := 1,
|
||||
<<"node">> := _,
|
||||
<<"qos">> := Qos,
|
||||
<<"topic">> := Topic
|
||||
},
|
||||
SubscriptionsData
|
||||
),
|
||||
|
||||
%% post /clients/:clientid/unsubscribe
|
||||
UnSubscribePath = emqx_mgmt_api_test_util:api_path([
|
||||
"clients",
|
||||
|
|
|
|||
|
|
@ -72,19 +72,19 @@ t_wss_crud_listeners_by_id(_) ->
|
|||
crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type).
|
||||
|
||||
crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type) ->
|
||||
TcpPath = emqx_mgmt_api_test_util:api_path(["listeners", ListenerId]),
|
||||
OriginPath = emqx_mgmt_api_test_util:api_path(["listeners", ListenerId]),
|
||||
NewPath = emqx_mgmt_api_test_util:api_path(["listeners", NewListenerId]),
|
||||
TcpListener = request(get, TcpPath, [], []),
|
||||
OriginListener = request(get, OriginPath, [], []),
|
||||
|
||||
%% create with full options
|
||||
?assertEqual({error, not_found}, is_running(NewListenerId)),
|
||||
?assertMatch({error, {"HTTP/1.1", 404, _}}, request(get, NewPath, [], [])),
|
||||
NewConf = TcpListener#{
|
||||
NewConf = OriginListener#{
|
||||
<<"id">> => NewListenerId,
|
||||
<<"bind">> => <<"0.0.0.0:2883">>
|
||||
},
|
||||
Create = request(post, NewPath, [], NewConf),
|
||||
?assertEqual(lists:sort(maps:keys(TcpListener)), lists:sort(maps:keys(Create))),
|
||||
?assertEqual(lists:sort(maps:keys(OriginListener)), lists:sort(maps:keys(Create))),
|
||||
Get1 = request(get, NewPath, [], []),
|
||||
?assertMatch(Create, Get1),
|
||||
?assert(is_running(NewListenerId)),
|
||||
|
|
@ -93,20 +93,42 @@ crud_listeners_by_id(ListenerId, NewListenerId, MinListenerId, BadId, Type) ->
|
|||
MinPath = emqx_mgmt_api_test_util:api_path(["listeners", MinListenerId]),
|
||||
?assertEqual({error, not_found}, is_running(MinListenerId)),
|
||||
?assertMatch({error, {"HTTP/1.1", 404, _}}, request(get, MinPath, [], [])),
|
||||
MinConf = #{
|
||||
<<"id">> => MinListenerId,
|
||||
<<"bind">> => <<"0.0.0.0:3883">>,
|
||||
<<"type">> => Type
|
||||
},
|
||||
MinConf =
|
||||
case OriginListener of
|
||||
#{
|
||||
<<"ssl">> :=
|
||||
#{
|
||||
<<"cacertfile">> := CaCertFile,
|
||||
<<"certfile">> := CertFile,
|
||||
<<"keyfile">> := KeyFile
|
||||
}
|
||||
} ->
|
||||
#{
|
||||
<<"id">> => MinListenerId,
|
||||
<<"bind">> => <<"0.0.0.0:3883">>,
|
||||
<<"type">> => Type,
|
||||
<<"ssl">> => #{
|
||||
<<"cacertfile">> => CaCertFile,
|
||||
<<"certfile">> => CertFile,
|
||||
<<"keyfile">> => KeyFile
|
||||
}
|
||||
};
|
||||
_ ->
|
||||
#{
|
||||
<<"id">> => MinListenerId,
|
||||
<<"bind">> => <<"0.0.0.0:3883">>,
|
||||
<<"type">> => Type
|
||||
}
|
||||
end,
|
||||
MinCreate = request(post, MinPath, [], MinConf),
|
||||
?assertEqual(lists:sort(maps:keys(TcpListener)), lists:sort(maps:keys(MinCreate))),
|
||||
?assertEqual(lists:sort(maps:keys(OriginListener)), lists:sort(maps:keys(MinCreate))),
|
||||
MinGet = request(get, MinPath, [], []),
|
||||
?assertMatch(MinCreate, MinGet),
|
||||
?assert(is_running(MinListenerId)),
|
||||
|
||||
%% bad create(same port)
|
||||
BadPath = emqx_mgmt_api_test_util:api_path(["listeners", BadId]),
|
||||
BadConf = TcpListener#{
|
||||
BadConf = OriginListener#{
|
||||
<<"id">> => BadId,
|
||||
<<"bind">> => <<"0.0.0.0:2883">>
|
||||
},
|
||||
|
|
|
|||
|
|
@ -25,6 +25,10 @@
|
|||
|
||||
%% notice: integer topic for sort response
|
||||
-define(TOPIC1, <<"t/0000">>).
|
||||
-define(TOPIC1RH, 1).
|
||||
-define(TOPIC1RAP, false).
|
||||
-define(TOPIC1NL, false).
|
||||
-define(TOPIC1QOS, 1).
|
||||
-define(TOPIC2, <<"$share/test_group/t/0001">>).
|
||||
-define(TOPIC2_TOPIC_ONLY, <<"t/0001">>).
|
||||
|
||||
|
|
@ -41,9 +45,13 @@ end_per_suite(_) ->
|
|||
emqx_mgmt_api_test_util:end_suite().
|
||||
|
||||
t_subscription_api(_) ->
|
||||
{ok, Client} = emqtt:start_link(#{username => ?USERNAME, clientid => ?CLIENTID}),
|
||||
{ok, Client} = emqtt:start_link(#{username => ?USERNAME, clientid => ?CLIENTID, proto_ver => v5}),
|
||||
{ok, _} = emqtt:connect(Client),
|
||||
{ok, _, _} = emqtt:subscribe(Client, ?TOPIC1),
|
||||
{ok, _, _} = emqtt:subscribe(
|
||||
Client, [
|
||||
{?TOPIC1, [{rh, ?TOPIC1RH}, {rap, ?TOPIC1RAP}, {nl, ?TOPIC1NL}, {qos, ?TOPIC1QOS}]}
|
||||
]
|
||||
),
|
||||
{ok, _, _} = emqtt:subscribe(Client, ?TOPIC2),
|
||||
Path = emqx_mgmt_api_test_util:api_path(["subscriptions"]),
|
||||
{ok, Response} = emqx_mgmt_api_test_util:request_api(get, Path),
|
||||
|
|
@ -59,9 +67,21 @@ t_subscription_api(_) ->
|
|||
maps:get(T1, ?TOPIC_SORT) =< maps:get(T2, ?TOPIC_SORT)
|
||||
end,
|
||||
[Subscriptions1, Subscriptions2] = lists:sort(Sort, Subscriptions),
|
||||
?assertEqual(maps:get(<<"topic">>, Subscriptions1), ?TOPIC1),
|
||||
|
||||
?assertMatch(
|
||||
#{
|
||||
<<"topic">> := ?TOPIC1,
|
||||
<<"qos">> := ?TOPIC1QOS,
|
||||
<<"nl">> := _,
|
||||
<<"rap">> := _,
|
||||
<<"rh">> := ?TOPIC1RH,
|
||||
<<"clientid">> := ?CLIENTID,
|
||||
<<"node">> := _
|
||||
},
|
||||
Subscriptions1
|
||||
),
|
||||
|
||||
?assertEqual(maps:get(<<"topic">>, Subscriptions2), ?TOPIC2),
|
||||
?assertEqual(maps:get(<<"clientid">>, Subscriptions1), ?CLIENTID),
|
||||
?assertEqual(maps:get(<<"clientid">>, Subscriptions2), ?CLIENTID),
|
||||
|
||||
QS = uri_string:compose_query([
|
||||
|
|
@ -94,8 +114,8 @@ t_subscription_api(_) ->
|
|||
MatchMeta = maps:get(<<"meta">>, MatchData),
|
||||
?assertEqual(1, maps:get(<<"page">>, MatchMeta)),
|
||||
?assertEqual(emqx_mgmt:max_row_limit(), maps:get(<<"limit">>, MatchMeta)),
|
||||
?assertEqual(2, maps:get(<<"count">>, MatchMeta)),
|
||||
?assertEqual(1, maps:get(<<"count">>, MatchMeta)),
|
||||
MatchSubs = maps:get(<<"data">>, MatchData),
|
||||
?assertEqual(length(MatchSubs), 2),
|
||||
?assertEqual(1, length(MatchSubs)),
|
||||
|
||||
emqtt:disconnect(Client).
|
||||
|
|
|
|||
|
|
@ -143,10 +143,10 @@ store(DelayedMsg) ->
|
|||
gen_server:call(?SERVER, {store, DelayedMsg}, infinity).
|
||||
|
||||
enable() ->
|
||||
gen_server:call(?SERVER, enable).
|
||||
enable(true).
|
||||
|
||||
disable() ->
|
||||
gen_server:call(?SERVER, disable).
|
||||
enable(false).
|
||||
|
||||
set_max_delayed_messages(Max) ->
|
||||
gen_server:call(?SERVER, {set_max_delayed_messages, Max}).
|
||||
|
|
@ -238,21 +238,7 @@ update_config(Config) ->
|
|||
emqx_conf:update([delayed], Config, #{rawconf_with_defaults => true, override_to => cluster}).
|
||||
|
||||
post_config_update(_KeyPath, Config, _NewConf, _OldConf, _AppEnvs) ->
|
||||
case maps:get(<<"enable">>, Config, undefined) of
|
||||
undefined ->
|
||||
ignore;
|
||||
true ->
|
||||
emqx_delayed:enable();
|
||||
false ->
|
||||
emqx_delayed:disable()
|
||||
end,
|
||||
case maps:get(<<"max_delayed_messages">>, Config, undefined) of
|
||||
undefined ->
|
||||
ignore;
|
||||
Max ->
|
||||
ok = emqx_delayed:set_max_delayed_messages(Max)
|
||||
end,
|
||||
ok.
|
||||
gen_server:call(?SERVER, {update_config, Config}).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% gen_server callback
|
||||
|
|
@ -262,7 +248,7 @@ init([Opts]) ->
|
|||
erlang:process_flag(trap_exit, true),
|
||||
emqx_conf:add_handler([delayed], ?MODULE),
|
||||
MaxDelayedMessages = maps:get(max_delayed_messages, Opts, 0),
|
||||
{ok,
|
||||
State =
|
||||
ensure_stats_event(
|
||||
ensure_publish_timer(#{
|
||||
publish_timer => undefined,
|
||||
|
|
@ -271,7 +257,8 @@ init([Opts]) ->
|
|||
stats_fun => undefined,
|
||||
max_delayed_messages => MaxDelayedMessages
|
||||
})
|
||||
)}.
|
||||
),
|
||||
{ok, ensure_enable(emqx:get_config([delayed, enable]), State)}.
|
||||
|
||||
handle_call({set_max_delayed_messages, Max}, _From, State) ->
|
||||
{reply, ok, State#{max_delayed_messages => Max}};
|
||||
|
|
@ -293,12 +280,11 @@ handle_call(
|
|||
emqx_metrics:inc('messages.delayed'),
|
||||
{reply, ok, ensure_publish_timer(Key, State)}
|
||||
end;
|
||||
handle_call(enable, _From, State) ->
|
||||
emqx_hooks:put('message.publish', {?MODULE, on_message_publish, []}),
|
||||
{reply, ok, State};
|
||||
handle_call(disable, _From, State) ->
|
||||
emqx_hooks:del('message.publish', {?MODULE, on_message_publish}),
|
||||
{reply, ok, State};
|
||||
handle_call({update_config, Config}, _From, #{max_delayed_messages := Max} = State) ->
|
||||
Max2 = maps:get(<<"max_delayed_messages">>, Config, Max),
|
||||
State2 = State#{max_delayed_messages := Max2},
|
||||
State3 = ensure_enable(maps:get(<<"enable">>, Config, undefined), State2),
|
||||
{reply, ok, State3};
|
||||
handle_call(Req, _From, State) ->
|
||||
?tp(error, emqx_delayed_unexpected_call, #{call => Req}),
|
||||
{reply, ignored, State}.
|
||||
|
|
@ -320,10 +306,10 @@ handle_info(Info, State) ->
|
|||
?tp(error, emqx_delayed_unexpected_info, #{info => Info}),
|
||||
{noreply, State}.
|
||||
|
||||
terminate(_Reason, #{publish_timer := PublishTimer, stats_timer := StatsTimer}) ->
|
||||
terminate(_Reason, #{stats_timer := StatsTimer} = State) ->
|
||||
emqx_conf:remove_handler([delayed]),
|
||||
emqx_misc:cancel_timer(PublishTimer),
|
||||
emqx_misc:cancel_timer(StatsTimer).
|
||||
emqx_misc:cancel_timer(StatsTimer),
|
||||
ensure_enable(false, State).
|
||||
|
||||
code_change(_Vsn, State, _Extra) ->
|
||||
{ok, State}.
|
||||
|
|
@ -378,3 +364,23 @@ do_publish(Key = {Ts, _Id}, Now, Acc) when Ts =< Now ->
|
|||
|
||||
-spec delayed_count() -> non_neg_integer().
|
||||
delayed_count() -> mnesia:table_info(?TAB, size).
|
||||
|
||||
enable(Enable) ->
|
||||
case emqx:get_raw_config([delayed]) of
|
||||
#{<<"enable">> := Enable} ->
|
||||
ok;
|
||||
Cfg ->
|
||||
{ok, _} = update_config(Cfg#{<<"enable">> := Enable}),
|
||||
ok
|
||||
end.
|
||||
|
||||
ensure_enable(true, State) ->
|
||||
emqx_hooks:put('message.publish', {?MODULE, on_message_publish, []}),
|
||||
State;
|
||||
ensure_enable(false, #{publish_timer := PubTimer} = State) ->
|
||||
emqx_hooks:del('message.publish', {?MODULE, on_message_publish}),
|
||||
emqx_misc:cancel_timer(PubTimer),
|
||||
ets:delete_all_objects(?TAB),
|
||||
State#{publish_timer := undefined, publish_at := 0};
|
||||
ensure_enable(_, State) ->
|
||||
State.
|
||||
|
|
|
|||
|
|
@ -55,13 +55,26 @@ end_per_testcase(_Case, _Config) ->
|
|||
%% Test cases
|
||||
%%--------------------------------------------------------------------
|
||||
|
||||
t_load_case(_) ->
|
||||
t_enable_disable_case(_) ->
|
||||
emqx_delayed:disable(),
|
||||
Hooks = emqx_hooks:lookup('message.publish'),
|
||||
MFA = {emqx_delayed, on_message_publish, []},
|
||||
?assertEqual(false, lists:keyfind(MFA, 2, Hooks)),
|
||||
|
||||
ok = emqx_delayed:enable(),
|
||||
Hooks1 = emqx_hooks:lookup('message.publish'),
|
||||
?assertNotEqual(false, lists:keyfind(MFA, 2, Hooks1)),
|
||||
|
||||
Ts0 = integer_to_binary(erlang:system_time(second) + 10),
|
||||
DelayedMsg0 = emqx_message:make(
|
||||
?MODULE, 1, <<"$delayed/", Ts0/binary, "/publish">>, <<"delayed_abs">>
|
||||
),
|
||||
_ = on_message_publish(DelayedMsg0),
|
||||
?assertMatch(#{data := Datas} when Datas =/= [], emqx_delayed:list(#{})),
|
||||
|
||||
emqx_delayed:disable(),
|
||||
?assertEqual(false, lists:keyfind(MFA, 2, Hooks)),
|
||||
?assertMatch(#{data := []}, emqx_delayed:list(#{})),
|
||||
ok.
|
||||
|
||||
t_delayed_message(_) ->
|
||||
|
|
@ -76,7 +89,7 @@ t_delayed_message(_) ->
|
|||
|
||||
[#delayed_message{msg = #message{payload = Payload}}] = ets:tab2list(emqx_delayed),
|
||||
?assertEqual(<<"delayed_m">>, Payload),
|
||||
ct:sleep(2000),
|
||||
ct:sleep(2500),
|
||||
|
||||
EmptyKey = mnesia:dirty_all_keys(emqx_delayed),
|
||||
?assertEqual([], EmptyKey).
|
||||
|
|
|
|||
|
|
@ -98,6 +98,7 @@ t_status(_Config) ->
|
|||
|
||||
t_messages(_) ->
|
||||
clear_all_record(),
|
||||
emqx_delayed:enable(),
|
||||
|
||||
{ok, C1} = emqtt:start_link([{clean_start, true}]),
|
||||
{ok, _} = emqtt:connect(C1),
|
||||
|
|
@ -114,7 +115,7 @@ t_messages(_) ->
|
|||
end,
|
||||
|
||||
lists:foreach(Each, lists:seq(1, 5)),
|
||||
timer:sleep(500),
|
||||
timer:sleep(1000),
|
||||
|
||||
Msgs = get_messages(5),
|
||||
[First | _] = Msgs,
|
||||
|
|
@ -197,6 +198,7 @@ t_messages(_) ->
|
|||
|
||||
t_large_payload(_) ->
|
||||
clear_all_record(),
|
||||
emqx_delayed:enable(),
|
||||
|
||||
{ok, C1} = emqtt:start_link([{clean_start, true}]),
|
||||
{ok, _} = emqtt:connect(C1),
|
||||
|
|
@ -209,7 +211,7 @@ t_large_payload(_) ->
|
|||
[{qos, 0}, {retain, true}]
|
||||
),
|
||||
|
||||
timer:sleep(500),
|
||||
timer:sleep(1000),
|
||||
|
||||
[#{msgid := MsgId}] = get_messages(1),
|
||||
|
||||
|
|
@ -241,8 +243,13 @@ get_messages(Len) ->
|
|||
{ok, 200, MsgsJson} = request(get, uri(["mqtt", "delayed", "messages"])),
|
||||
#{data := Msgs} = decode_json(MsgsJson),
|
||||
MsgLen = erlang:length(Msgs),
|
||||
?assert(
|
||||
MsgLen =:= Len,
|
||||
lists:flatten(io_lib:format("message length is:~p~n", [MsgLen]))
|
||||
?assertEqual(
|
||||
Len,
|
||||
MsgLen,
|
||||
lists:flatten(
|
||||
io_lib:format("message length is:~p~nWhere:~p~nHooks:~p~n", [
|
||||
MsgLen, erlang:whereis(emqx_delayed), ets:tab2list(emqx_hooks)
|
||||
])
|
||||
)
|
||||
),
|
||||
Msgs.
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
%% -*- mode: erlang -*-
|
||||
|
||||
{deps, [ {emqx, {path, "../emqx"}}
|
||||
]}.
|
||||
{deps, [{emqx, {path, "../emqx"}}]}.
|
||||
|
||||
{project_plugins, [erlfmt]}.
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
%% -*- mode: erlang -*-
|
||||
{application, emqx_plugins,
|
||||
[{description, "EMQX Plugin Management"},
|
||||
{vsn, "0.1.0"},
|
||||
{modules, []},
|
||||
{mod, {emqx_plugins_app,[]}},
|
||||
{applications, [kernel,stdlib,emqx]},
|
||||
{env, []}
|
||||
]}.
|
||||
{application, emqx_plugins, [
|
||||
{description, "EMQX Plugin Management"},
|
||||
{vsn, "0.1.0"},
|
||||
{modules, []},
|
||||
{mod, {emqx_plugins_app, []}},
|
||||
{applications, [kernel, stdlib, emqx]},
|
||||
{env, []}
|
||||
]}.
|
||||
|
|
|
|||
|
|
@ -19,35 +19,37 @@
|
|||
-include_lib("emqx/include/emqx.hrl").
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
|
||||
-export([ ensure_installed/1
|
||||
, ensure_uninstalled/1
|
||||
, ensure_enabled/1
|
||||
, ensure_enabled/2
|
||||
, ensure_disabled/1
|
||||
, purge/1
|
||||
, delete_package/1
|
||||
]).
|
||||
-export([
|
||||
ensure_installed/1,
|
||||
ensure_uninstalled/1,
|
||||
ensure_enabled/1,
|
||||
ensure_enabled/2,
|
||||
ensure_disabled/1,
|
||||
purge/1,
|
||||
delete_package/1
|
||||
]).
|
||||
|
||||
-export([ ensure_started/0
|
||||
, ensure_started/1
|
||||
, ensure_stopped/0
|
||||
, ensure_stopped/1
|
||||
, restart/1
|
||||
, list/0
|
||||
, describe/1
|
||||
, parse_name_vsn/1
|
||||
]).
|
||||
-export([
|
||||
ensure_started/0,
|
||||
ensure_started/1,
|
||||
ensure_stopped/0,
|
||||
ensure_stopped/1,
|
||||
restart/1,
|
||||
list/0,
|
||||
describe/1,
|
||||
parse_name_vsn/1
|
||||
]).
|
||||
|
||||
-export([ get_config/2
|
||||
, put_config/2
|
||||
]).
|
||||
-export([
|
||||
get_config/2,
|
||||
put_config/2
|
||||
]).
|
||||
|
||||
%% internal
|
||||
-export([ do_ensure_started/1
|
||||
]).
|
||||
-export([do_ensure_started/1]).
|
||||
-export([
|
||||
install_dir/0
|
||||
]).
|
||||
install_dir/0
|
||||
]).
|
||||
|
||||
-ifdef(TEST).
|
||||
-compile(export_all).
|
||||
|
|
@ -58,8 +60,10 @@
|
|||
-include_lib("emqx/include/logger.hrl").
|
||||
-include("emqx_plugins.hrl").
|
||||
|
||||
-type name_vsn() :: binary() | string(). %% "my_plugin-0.1.0"
|
||||
-type plugin() :: map(). %% the parse result of the JSON info file
|
||||
%% "my_plugin-0.1.0"
|
||||
-type name_vsn() :: binary() | string().
|
||||
%% the parse result of the JSON info file
|
||||
-type plugin() :: map().
|
||||
-type position() :: no_move | front | rear | {before, name_vsn()} | {behind, name_vsn()}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
|
|
@ -86,22 +90,25 @@ do_ensure_installed(NameVsn) ->
|
|||
case erl_tar:extract(TarGz, [{cwd, install_dir()}, compressed]) of
|
||||
ok ->
|
||||
case read_plugin(NameVsn, #{}) of
|
||||
{ok, _} -> ok;
|
||||
{ok, _} ->
|
||||
ok;
|
||||
{error, Reason} ->
|
||||
?SLOG(warning, Reason#{msg => "failed_to_read_after_install"}),
|
||||
_ = ensure_uninstalled(NameVsn),
|
||||
{error, Reason}
|
||||
end;
|
||||
{error, {_, enoent}} ->
|
||||
{error, #{ reason => "failed_to_extract_plugin_package"
|
||||
, path => TarGz
|
||||
, return => not_found
|
||||
}};
|
||||
{error, #{
|
||||
reason => "failed_to_extract_plugin_package",
|
||||
path => TarGz,
|
||||
return => not_found
|
||||
}};
|
||||
{error, Reason} ->
|
||||
{error, #{ reason => "bad_plugin_package"
|
||||
, path => TarGz
|
||||
, return => Reason
|
||||
}}
|
||||
{error, #{
|
||||
reason => "bad_plugin_package",
|
||||
path => TarGz,
|
||||
return => Reason
|
||||
}}
|
||||
end.
|
||||
|
||||
%% @doc Ensure files and directories for the given plugin are delete.
|
||||
|
|
@ -110,13 +117,15 @@ do_ensure_installed(NameVsn) ->
|
|||
ensure_uninstalled(NameVsn) ->
|
||||
case read_plugin(NameVsn, #{}) of
|
||||
{ok, #{running_status := RunningSt}} when RunningSt =/= stopped ->
|
||||
{error, #{reason => "bad_plugin_running_status",
|
||||
hint => "stop_the_plugin_first"
|
||||
}};
|
||||
{error, #{
|
||||
reason => "bad_plugin_running_status",
|
||||
hint => "stop_the_plugin_first"
|
||||
}};
|
||||
{ok, #{config_status := enabled}} ->
|
||||
{error, #{reason => "bad_plugin_config_status",
|
||||
hint => "disable_the_plugin_first"
|
||||
}};
|
||||
{error, #{
|
||||
reason => "bad_plugin_config_status",
|
||||
hint => "disable_the_plugin_first"
|
||||
}};
|
||||
_ ->
|
||||
purge(NameVsn)
|
||||
end.
|
||||
|
|
@ -141,9 +150,10 @@ ensure_state(NameVsn, Position, State) when is_binary(NameVsn) ->
|
|||
ensure_state(NameVsn, Position, State) ->
|
||||
case read_plugin(NameVsn, #{}) of
|
||||
{ok, _} ->
|
||||
Item = #{ name_vsn => NameVsn
|
||||
, enable => State
|
||||
},
|
||||
Item = #{
|
||||
name_vsn => NameVsn,
|
||||
enable => State
|
||||
},
|
||||
tryit("ensure_state", fun() -> ensure_configured(Item, Position) end);
|
||||
{error, Reason} ->
|
||||
{error, Reason}
|
||||
|
|
@ -175,18 +185,19 @@ add_new_configured(Configured, {Action, NameVsn}, Item) ->
|
|||
SplitFun = fun(#{name_vsn := Nv}) -> bin(Nv) =/= bin(NameVsn) end,
|
||||
{Front, Rear} = lists:splitwith(SplitFun, Configured),
|
||||
Rear =:= [] andalso
|
||||
throw(#{error => "position_anchor_plugin_not_configured",
|
||||
hint => "maybe_install_and_configure",
|
||||
name_vsn => NameVsn
|
||||
}),
|
||||
throw(#{
|
||||
error => "position_anchor_plugin_not_configured",
|
||||
hint => "maybe_install_and_configure",
|
||||
name_vsn => NameVsn
|
||||
}),
|
||||
case Action of
|
||||
before -> Front ++ [Item | Rear];
|
||||
before ->
|
||||
Front ++ [Item | Rear];
|
||||
behind ->
|
||||
[Anchor | Rear0] = Rear,
|
||||
Front ++ [Anchor, Item | Rear0]
|
||||
end.
|
||||
|
||||
|
||||
%% @doc Delete the package file.
|
||||
-spec delete_package(name_vsn()) -> ok.
|
||||
delete_package(NameVsn) ->
|
||||
|
|
@ -198,9 +209,11 @@ delete_package(NameVsn) ->
|
|||
{error, enoent} ->
|
||||
ok;
|
||||
{error, Reason} ->
|
||||
?SLOG(error, #{msg => "failed_to_delete_package_file",
|
||||
path => File,
|
||||
reason => Reason}),
|
||||
?SLOG(error, #{
|
||||
msg => "failed_to_delete_package_file",
|
||||
path => File,
|
||||
reason => Reason
|
||||
}),
|
||||
{error, Reason}
|
||||
end.
|
||||
|
||||
|
|
@ -219,9 +232,11 @@ purge(NameVsn) ->
|
|||
{error, enoent} ->
|
||||
ok;
|
||||
{error, Reason} ->
|
||||
?SLOG(error, #{msg => "failed_to_purge_plugin_dir",
|
||||
dir => Dir,
|
||||
reason => Reason}),
|
||||
?SLOG(error, #{
|
||||
msg => "failed_to_purge_plugin_dir",
|
||||
dir => Dir,
|
||||
reason => Reason
|
||||
}),
|
||||
{error, Reason}
|
||||
end.
|
||||
|
||||
|
|
@ -235,10 +250,13 @@ ensure_started() ->
|
|||
-spec ensure_started(name_vsn()) -> ok | {error, term()}.
|
||||
ensure_started(NameVsn) ->
|
||||
case do_ensure_started(NameVsn) of
|
||||
ok -> ok;
|
||||
ok ->
|
||||
ok;
|
||||
{error, Reason} ->
|
||||
?SLOG(alert, #{msg => "failed_to_start_plugin",
|
||||
reason => Reason}),
|
||||
?SLOG(alert, #{
|
||||
msg => "failed_to_start_plugin",
|
||||
reason => Reason
|
||||
}),
|
||||
{error, Reason}
|
||||
end.
|
||||
|
||||
|
|
@ -250,11 +268,13 @@ ensure_stopped() ->
|
|||
%% @doc Stop a plugin from Management API or CLI.
|
||||
-spec ensure_stopped(name_vsn()) -> ok | {error, term()}.
|
||||
ensure_stopped(NameVsn) ->
|
||||
tryit("stop_plugin",
|
||||
fun() ->
|
||||
Plugin = do_read_plugin(NameVsn),
|
||||
ensure_apps_stopped(Plugin)
|
||||
end).
|
||||
tryit(
|
||||
"stop_plugin",
|
||||
fun() ->
|
||||
Plugin = do_read_plugin(NameVsn),
|
||||
ensure_apps_stopped(Plugin)
|
||||
end
|
||||
).
|
||||
|
||||
%% @doc Stop and then start the plugin.
|
||||
restart(NameVsn) ->
|
||||
|
|
@ -269,39 +289,45 @@ restart(NameVsn) ->
|
|||
list() ->
|
||||
Pattern = filename:join([install_dir(), "*", "release.json"]),
|
||||
All = lists:filtermap(
|
||||
fun(JsonFile) ->
|
||||
case read_plugin({file, JsonFile}, #{}) of
|
||||
{ok, Info} ->
|
||||
{true, Info};
|
||||
{error, Reason} ->
|
||||
?SLOG(warning, Reason),
|
||||
false
|
||||
end
|
||||
end, filelib:wildcard(Pattern)),
|
||||
fun(JsonFile) ->
|
||||
case read_plugin({file, JsonFile}, #{}) of
|
||||
{ok, Info} ->
|
||||
{true, Info};
|
||||
{error, Reason} ->
|
||||
?SLOG(warning, Reason),
|
||||
false
|
||||
end
|
||||
end,
|
||||
filelib:wildcard(Pattern)
|
||||
),
|
||||
list(configured(), All).
|
||||
|
||||
%% Make sure configured ones are ordered in front.
|
||||
list([], All) -> All;
|
||||
list([], All) ->
|
||||
All;
|
||||
list([#{name_vsn := NameVsn} | Rest], All) ->
|
||||
SplitF = fun(#{<<"name">> := Name, <<"rel_vsn">> := Vsn}) ->
|
||||
bin([Name, "-", Vsn]) =/= bin(NameVsn)
|
||||
end,
|
||||
bin([Name, "-", Vsn]) =/= bin(NameVsn)
|
||||
end,
|
||||
case lists:splitwith(SplitF, All) of
|
||||
{_, []} ->
|
||||
?SLOG(warning, #{msg => "configured_plugin_not_installed",
|
||||
name_vsn => NameVsn
|
||||
}),
|
||||
?SLOG(warning, #{
|
||||
msg => "configured_plugin_not_installed",
|
||||
name_vsn => NameVsn
|
||||
}),
|
||||
list(Rest, All);
|
||||
{Front, [I | Rear]} ->
|
||||
[I | list(Rest, Front ++ Rear)]
|
||||
end.
|
||||
|
||||
do_ensure_started(NameVsn) ->
|
||||
tryit("start_plugins",
|
||||
fun() ->
|
||||
Plugin = do_read_plugin(NameVsn),
|
||||
ok = load_code_start_apps(NameVsn, Plugin)
|
||||
end).
|
||||
tryit(
|
||||
"start_plugins",
|
||||
fun() ->
|
||||
Plugin = do_read_plugin(NameVsn),
|
||||
ok = load_code_start_apps(NameVsn, Plugin)
|
||||
end
|
||||
).
|
||||
|
||||
%% try the function, catch 'throw' exceptions as normal 'error' return
|
||||
%% other exceptions with stacktrace returned.
|
||||
|
|
@ -309,25 +335,28 @@ tryit(WhichOp, F) ->
|
|||
try
|
||||
F()
|
||||
catch
|
||||
throw : Reason ->
|
||||
throw:Reason ->
|
||||
%% thrown exceptions are known errors
|
||||
%% translate to a return value without stacktrace
|
||||
{error, Reason};
|
||||
error : Reason : Stacktrace ->
|
||||
error:Reason:Stacktrace ->
|
||||
%% unexpected errors, log stacktrace
|
||||
?SLOG(warning, #{ msg => "plugin_op_failed"
|
||||
, which_op => WhichOp
|
||||
, exception => Reason
|
||||
, stacktrace => Stacktrace
|
||||
}),
|
||||
?SLOG(warning, #{
|
||||
msg => "plugin_op_failed",
|
||||
which_op => WhichOp,
|
||||
exception => Reason,
|
||||
stacktrace => Stacktrace
|
||||
}),
|
||||
{error, {failed, WhichOp}}
|
||||
end.
|
||||
|
||||
%% read plugin info from the JSON file
|
||||
%% returns {ok, Info} or {error, Reason}
|
||||
read_plugin(NameVsn, Options) ->
|
||||
tryit("read_plugin_info",
|
||||
fun() -> {ok, do_read_plugin(NameVsn, Options)} end).
|
||||
tryit(
|
||||
"read_plugin_info",
|
||||
fun() -> {ok, do_read_plugin(NameVsn, Options)} end
|
||||
).
|
||||
|
||||
do_read_plugin(Plugin) -> do_read_plugin(Plugin, #{}).
|
||||
|
||||
|
|
@ -339,10 +368,11 @@ do_read_plugin({file, InfoFile}, Options) ->
|
|||
Info1 = plugins_readme(NameVsn, Options, Info0),
|
||||
plugin_status(NameVsn, Info1);
|
||||
{error, Reason} ->
|
||||
throw(#{error => "bad_info_file",
|
||||
path => InfoFile,
|
||||
return => Reason
|
||||
})
|
||||
throw(#{
|
||||
error => "bad_info_file",
|
||||
path => InfoFile,
|
||||
return => Reason
|
||||
})
|
||||
end;
|
||||
do_read_plugin(NameVsn, Options) ->
|
||||
do_read_plugin({file, info_file(NameVsn)}, Options).
|
||||
|
|
@ -352,7 +382,8 @@ plugins_readme(NameVsn, #{fill_readme := true}, Info) ->
|
|||
{ok, Bin} -> Info#{readme => Bin};
|
||||
_ -> Info#{readme => <<>>}
|
||||
end;
|
||||
plugins_readme(_NameVsn, _Options, Info) -> Info.
|
||||
plugins_readme(_NameVsn, _Options, Info) ->
|
||||
Info.
|
||||
|
||||
plugin_status(NameVsn, Info) ->
|
||||
{AppName, _AppVsn} = parse_name_vsn(NameVsn),
|
||||
|
|
@ -368,74 +399,91 @@ plugin_status(NameVsn, Info) ->
|
|||
end,
|
||||
Configured = lists:filtermap(
|
||||
fun(#{name_vsn := Nv, enable := St}) ->
|
||||
case bin(Nv) =:= bin(NameVsn) of
|
||||
true -> {true, St};
|
||||
false -> false
|
||||
end
|
||||
end, configured()),
|
||||
ConfSt = case Configured of
|
||||
[] -> not_configured;
|
||||
[true] -> enabled;
|
||||
[false] -> disabled
|
||||
end,
|
||||
Info#{ running_status => RunningSt
|
||||
, config_status => ConfSt
|
||||
case bin(Nv) =:= bin(NameVsn) of
|
||||
true -> {true, St};
|
||||
false -> false
|
||||
end
|
||||
end,
|
||||
configured()
|
||||
),
|
||||
ConfSt =
|
||||
case Configured of
|
||||
[] -> not_configured;
|
||||
[true] -> enabled;
|
||||
[false] -> disabled
|
||||
end,
|
||||
Info#{
|
||||
running_status => RunningSt,
|
||||
config_status => ConfSt
|
||||
}.
|
||||
|
||||
bin(A) when is_atom(A) -> atom_to_binary(A, utf8);
|
||||
bin(L) when is_list(L) -> unicode:characters_to_binary(L, utf8);
|
||||
bin(B) when is_binary(B) -> B.
|
||||
|
||||
check_plugin(#{ <<"name">> := Name
|
||||
, <<"rel_vsn">> := Vsn
|
||||
, <<"rel_apps">> := Apps
|
||||
, <<"description">> := _
|
||||
} = Info, NameVsn, File) ->
|
||||
check_plugin(
|
||||
#{
|
||||
<<"name">> := Name,
|
||||
<<"rel_vsn">> := Vsn,
|
||||
<<"rel_apps">> := Apps,
|
||||
<<"description">> := _
|
||||
} = Info,
|
||||
NameVsn,
|
||||
File
|
||||
) ->
|
||||
case bin(NameVsn) =:= bin([Name, "-", Vsn]) of
|
||||
true ->
|
||||
try
|
||||
[_ | _ ] = Apps, %% assert
|
||||
%% assert
|
||||
[_ | _] = Apps,
|
||||
%% validate if the list is all <app>-<vsn> strings
|
||||
lists:foreach(fun parse_name_vsn/1, Apps)
|
||||
catch
|
||||
_ : _ ->
|
||||
throw(#{ error => "bad_rel_apps"
|
||||
, rel_apps => Apps
|
||||
, hint => "A non-empty string list of app_name-app_vsn format"
|
||||
})
|
||||
_:_ ->
|
||||
throw(#{
|
||||
error => "bad_rel_apps",
|
||||
rel_apps => Apps,
|
||||
hint => "A non-empty string list of app_name-app_vsn format"
|
||||
})
|
||||
end,
|
||||
Info;
|
||||
false ->
|
||||
throw(#{ error => "name_vsn_mismatch"
|
||||
, name_vsn => NameVsn
|
||||
, path => File
|
||||
, name => Name
|
||||
, rel_vsn => Vsn
|
||||
})
|
||||
throw(#{
|
||||
error => "name_vsn_mismatch",
|
||||
name_vsn => NameVsn,
|
||||
path => File,
|
||||
name => Name,
|
||||
rel_vsn => Vsn
|
||||
})
|
||||
end;
|
||||
check_plugin(_What, NameVsn, File) ->
|
||||
throw(#{ error => "bad_info_file_content"
|
||||
, mandatory_fields => [rel_vsn, name, rel_apps, description]
|
||||
, name_vsn => NameVsn
|
||||
, path => File
|
||||
}).
|
||||
throw(#{
|
||||
error => "bad_info_file_content",
|
||||
mandatory_fields => [rel_vsn, name, rel_apps, description],
|
||||
name_vsn => NameVsn,
|
||||
path => File
|
||||
}).
|
||||
|
||||
load_code_start_apps(RelNameVsn, #{<<"rel_apps">> := Apps}) ->
|
||||
LibDir = filename:join([install_dir(), RelNameVsn]),
|
||||
RunningApps = running_apps(),
|
||||
%% load plugin apps and beam code
|
||||
AppNames =
|
||||
lists:map(fun(AppNameVsn) ->
|
||||
{AppName, AppVsn} = parse_name_vsn(AppNameVsn),
|
||||
EbinDir = filename:join([LibDir, AppNameVsn, "ebin"]),
|
||||
ok = load_plugin_app(AppName, AppVsn, EbinDir, RunningApps),
|
||||
AppName
|
||||
end, Apps),
|
||||
lists:map(
|
||||
fun(AppNameVsn) ->
|
||||
{AppName, AppVsn} = parse_name_vsn(AppNameVsn),
|
||||
EbinDir = filename:join([LibDir, AppNameVsn, "ebin"]),
|
||||
ok = load_plugin_app(AppName, AppVsn, EbinDir, RunningApps),
|
||||
AppName
|
||||
end,
|
||||
Apps
|
||||
),
|
||||
lists:foreach(fun start_app/1, AppNames).
|
||||
|
||||
load_plugin_app(AppName, AppVsn, Ebin, RunningApps) ->
|
||||
case lists:keyfind(AppName, 1, RunningApps) of
|
||||
false -> do_load_plugin_app(AppName, Ebin);
|
||||
false ->
|
||||
do_load_plugin_app(AppName, Ebin);
|
||||
{_, Vsn} ->
|
||||
case bin(Vsn) =:= bin(AppVsn) of
|
||||
true ->
|
||||
|
|
@ -443,10 +491,12 @@ load_plugin_app(AppName, AppVsn, Ebin, RunningApps) ->
|
|||
ok;
|
||||
false ->
|
||||
%% running but a different version
|
||||
?SLOG(warning, #{msg => "plugin_app_already_running", name => AppName,
|
||||
running_vsn => Vsn,
|
||||
loading_vsn => AppVsn
|
||||
})
|
||||
?SLOG(warning, #{
|
||||
msg => "plugin_app_already_running",
|
||||
name => AppName,
|
||||
running_vsn => Vsn,
|
||||
loading_vsn => AppVsn
|
||||
})
|
||||
end
|
||||
end.
|
||||
|
||||
|
|
@ -457,21 +507,31 @@ do_load_plugin_app(AppName, Ebin) ->
|
|||
Modules = filelib:wildcard(filename:join([Ebin, "*.beam"])),
|
||||
lists:foreach(
|
||||
fun(BeamFile) ->
|
||||
Module = list_to_atom(filename:basename(BeamFile, ".beam")),
|
||||
case code:load_file(Module) of
|
||||
{module, _} -> ok;
|
||||
{error, Reason} -> throw(#{error => "failed_to_load_plugin_beam",
|
||||
path => BeamFile,
|
||||
reason => Reason
|
||||
})
|
||||
end
|
||||
end, Modules),
|
||||
Module = list_to_atom(filename:basename(BeamFile, ".beam")),
|
||||
case code:load_file(Module) of
|
||||
{module, _} ->
|
||||
ok;
|
||||
{error, Reason} ->
|
||||
throw(#{
|
||||
error => "failed_to_load_plugin_beam",
|
||||
path => BeamFile,
|
||||
reason => Reason
|
||||
})
|
||||
end
|
||||
end,
|
||||
Modules
|
||||
),
|
||||
case application:load(AppName) of
|
||||
ok -> ok;
|
||||
{error, {already_loaded, _}} -> ok;
|
||||
{error, Reason} -> throw(#{error => "failed_to_load_plugin_app",
|
||||
name => AppName,
|
||||
reason => Reason})
|
||||
ok ->
|
||||
ok;
|
||||
{error, {already_loaded, _}} ->
|
||||
ok;
|
||||
{error, Reason} ->
|
||||
throw(#{
|
||||
error => "failed_to_load_plugin_app",
|
||||
name => AppName,
|
||||
reason => Reason
|
||||
})
|
||||
end.
|
||||
|
||||
start_app(App) ->
|
||||
|
|
@ -484,11 +544,12 @@ start_app(App) ->
|
|||
?SLOG(debug, #{msg => "started_plugin_app", app => App}),
|
||||
ok;
|
||||
{error, {ErrApp, Reason}} ->
|
||||
throw(#{error => "failed_to_start_plugin_app",
|
||||
app => App,
|
||||
err_app => ErrApp,
|
||||
reason => Reason
|
||||
})
|
||||
throw(#{
|
||||
error => "failed_to_start_plugin_app",
|
||||
app => App,
|
||||
err_app => ErrApp,
|
||||
reason => Reason
|
||||
})
|
||||
end.
|
||||
|
||||
%% Stop all apps installed by the plugin package,
|
||||
|
|
@ -496,18 +557,22 @@ start_app(App) ->
|
|||
ensure_apps_stopped(#{<<"rel_apps">> := Apps}) ->
|
||||
%% load plugin apps and beam code
|
||||
AppsToStop =
|
||||
lists:map(fun(NameVsn) ->
|
||||
{AppName, _AppVsn} = parse_name_vsn(NameVsn),
|
||||
AppName
|
||||
end, Apps),
|
||||
lists:map(
|
||||
fun(NameVsn) ->
|
||||
{AppName, _AppVsn} = parse_name_vsn(NameVsn),
|
||||
AppName
|
||||
end,
|
||||
Apps
|
||||
),
|
||||
case tryit("stop_apps", fun() -> stop_apps(AppsToStop) end) of
|
||||
{ok, []} ->
|
||||
%% all apps stopped
|
||||
ok;
|
||||
{ok, Left} ->
|
||||
?SLOG(warning, #{msg => "unabled_to_stop_plugin_apps",
|
||||
apps => Left
|
||||
}),
|
||||
?SLOG(warning, #{
|
||||
msg => "unabled_to_stop_plugin_apps",
|
||||
apps => Left
|
||||
}),
|
||||
ok;
|
||||
{error, Reason} ->
|
||||
{error, Reason}
|
||||
|
|
@ -516,9 +581,12 @@ ensure_apps_stopped(#{<<"rel_apps">> := Apps}) ->
|
|||
stop_apps(Apps) ->
|
||||
RunningApps = running_apps(),
|
||||
case do_stop_apps(Apps, [], RunningApps) of
|
||||
{ok, []} -> {ok, []}; %% all stopped
|
||||
{ok, Remain} when Remain =:= Apps -> {ok, Apps}; %% no progress
|
||||
{ok, Remain} -> stop_apps(Remain) %% try again
|
||||
%% all stopped
|
||||
{ok, []} -> {ok, []};
|
||||
%% no progress
|
||||
{ok, Remain} when Remain =:= Apps -> {ok, Apps};
|
||||
%% try again
|
||||
{ok, Remain} -> stop_apps(Remain)
|
||||
end.
|
||||
|
||||
do_stop_apps([], Remain, _AllApps) ->
|
||||
|
|
@ -553,11 +621,15 @@ unload_moudle_and_app(App) ->
|
|||
ok.
|
||||
|
||||
is_needed_by_any(AppToStop, RunningApps) ->
|
||||
lists:any(fun({RunningApp, _RunningAppVsn}) ->
|
||||
is_needed_by(AppToStop, RunningApp)
|
||||
end, RunningApps).
|
||||
lists:any(
|
||||
fun({RunningApp, _RunningAppVsn}) ->
|
||||
is_needed_by(AppToStop, RunningApp)
|
||||
end,
|
||||
RunningApps
|
||||
).
|
||||
|
||||
is_needed_by(AppToStop, AppToStop) -> false;
|
||||
is_needed_by(AppToStop, AppToStop) ->
|
||||
false;
|
||||
is_needed_by(AppToStop, RunningApp) ->
|
||||
case application:get_key(RunningApp, applications) of
|
||||
{ok, Deps} -> lists:member(AppToStop, Deps);
|
||||
|
|
@ -577,7 +649,8 @@ bin_key(Map) when is_map(Map) ->
|
|||
maps:fold(fun(K, V, Acc) -> Acc#{bin(K) => V} end, #{}, Map);
|
||||
bin_key(List = [#{} | _]) ->
|
||||
lists:map(fun(M) -> bin_key(M) end, List);
|
||||
bin_key(Term) -> Term.
|
||||
bin_key(Term) ->
|
||||
Term.
|
||||
|
||||
get_config(Key, Default) when is_atom(Key) ->
|
||||
get_config([Key], Default);
|
||||
|
|
@ -604,8 +677,10 @@ for_plugin(#{name_vsn := NameVsn, enable := true}, Fun) ->
|
|||
{error, Reason} -> [{NameVsn, Reason}]
|
||||
end;
|
||||
for_plugin(#{name_vsn := NameVsn, enable := false}, _Fun) ->
|
||||
?SLOG(debug, #{msg => "plugin_disabled",
|
||||
name_vsn => NameVsn}),
|
||||
?SLOG(debug, #{
|
||||
msg => "plugin_disabled",
|
||||
name_vsn => NameVsn
|
||||
}),
|
||||
[].
|
||||
|
||||
parse_name_vsn(NameVsn) when is_binary(NameVsn) ->
|
||||
|
|
@ -627,6 +702,9 @@ readme_file(NameVsn) ->
|
|||
filename:join([dir(NameVsn), "README.md"]).
|
||||
|
||||
running_apps() ->
|
||||
lists:map(fun({N, _, V}) ->
|
||||
{N, V}
|
||||
end, application:which_applications(infinity)).
|
||||
lists:map(
|
||||
fun({N, _, V}) ->
|
||||
{N, V}
|
||||
end,
|
||||
application:which_applications(infinity)
|
||||
).
|
||||
|
|
|
|||
|
|
@ -18,12 +18,14 @@
|
|||
|
||||
-behaviour(application).
|
||||
|
||||
-export([ start/2
|
||||
, stop/1
|
||||
]).
|
||||
-export([
|
||||
start/2,
|
||||
stop/1
|
||||
]).
|
||||
|
||||
start(_Type, _Args) ->
|
||||
ok = emqx_plugins:ensure_started(), %% load all pre-configured
|
||||
%% load all pre-configured
|
||||
ok = emqx_plugins:ensure_started(),
|
||||
{ok, Sup} = emqx_plugins_sup:start_link(),
|
||||
{ok, Sup}.
|
||||
|
||||
|
|
|
|||
|
|
@ -16,21 +16,23 @@
|
|||
|
||||
-module(emqx_plugins_cli).
|
||||
|
||||
-export([ list/1
|
||||
, describe/2
|
||||
, ensure_installed/2
|
||||
, ensure_uninstalled/2
|
||||
, ensure_started/2
|
||||
, ensure_stopped/2
|
||||
, restart/2
|
||||
, ensure_disabled/2
|
||||
, ensure_enabled/3
|
||||
]).
|
||||
-export([
|
||||
list/1,
|
||||
describe/2,
|
||||
ensure_installed/2,
|
||||
ensure_uninstalled/2,
|
||||
ensure_started/2,
|
||||
ensure_stopped/2,
|
||||
restart/2,
|
||||
ensure_disabled/2,
|
||||
ensure_enabled/3
|
||||
]).
|
||||
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
|
||||
-define(PRINT(EXPR, LOG_FUN),
|
||||
print(NameVsn, fun()-> EXPR end(), LOG_FUN, ?FUNCTION_NAME)).
|
||||
print(NameVsn, fun() -> EXPR end(), LOG_FUN, ?FUNCTION_NAME)
|
||||
).
|
||||
|
||||
list(LogFun) ->
|
||||
LogFun("~ts~n", [to_json(emqx_plugins:list())]).
|
||||
|
|
@ -43,9 +45,11 @@ describe(NameVsn, LogFun) ->
|
|||
%% this should not happen unless the package is manually installed
|
||||
%% corrupted packages installed from emqx_plugins:ensure_installed
|
||||
%% should not leave behind corrupted files
|
||||
?SLOG(error, #{msg => "failed_to_describe_plugin",
|
||||
name_vsn => NameVsn,
|
||||
cause => Reason}),
|
||||
?SLOG(error, #{
|
||||
msg => "failed_to_describe_plugin",
|
||||
name_vsn => NameVsn,
|
||||
cause => Reason
|
||||
}),
|
||||
%% do nothing to the CLI console
|
||||
ok
|
||||
end.
|
||||
|
|
@ -75,14 +79,18 @@ to_json(Input) ->
|
|||
emqx_logger_jsonfmt:best_effort_json(Input).
|
||||
|
||||
print(NameVsn, Res, LogFun, Action) ->
|
||||
Obj = #{action => Action,
|
||||
name_vsn => NameVsn},
|
||||
Obj = #{
|
||||
action => Action,
|
||||
name_vsn => NameVsn
|
||||
},
|
||||
JsonReady =
|
||||
case Res of
|
||||
ok ->
|
||||
Obj#{result => ok};
|
||||
{error, Reason} ->
|
||||
Obj#{result => not_ok,
|
||||
cause => Reason}
|
||||
Obj#{
|
||||
result => not_ok,
|
||||
cause => Reason
|
||||
}
|
||||
end,
|
||||
LogFun("~ts~n", [to_json(JsonReady)]).
|
||||
|
|
|
|||
|
|
@ -18,10 +18,11 @@
|
|||
|
||||
-behaviour(hocon_schema).
|
||||
|
||||
-export([ roots/0
|
||||
, fields/1
|
||||
, namespace/0
|
||||
]).
|
||||
-export([
|
||||
roots/0,
|
||||
fields/1,
|
||||
namespace/0
|
||||
]).
|
||||
|
||||
-include_lib("hocon/include/hoconsc.hrl").
|
||||
-include("emqx_plugins.hrl").
|
||||
|
|
@ -31,31 +32,41 @@ namespace() -> "plugin".
|
|||
roots() -> [?CONF_ROOT].
|
||||
|
||||
fields(?CONF_ROOT) ->
|
||||
#{fields => root_fields(),
|
||||
desc => ?DESC(?CONF_ROOT)
|
||||
};
|
||||
#{
|
||||
fields => root_fields(),
|
||||
desc => ?DESC(?CONF_ROOT)
|
||||
};
|
||||
fields(state) ->
|
||||
#{ fields => state_fields(),
|
||||
desc => ?DESC(state)
|
||||
}.
|
||||
#{
|
||||
fields => state_fields(),
|
||||
desc => ?DESC(state)
|
||||
}.
|
||||
|
||||
state_fields() ->
|
||||
[ {name_vsn,
|
||||
hoconsc:mk(string(),
|
||||
#{ desc => ?DESC(name_vsn)
|
||||
, required => true
|
||||
})}
|
||||
, {enable,
|
||||
hoconsc:mk(boolean(),
|
||||
#{ desc => ?DESC(enable)
|
||||
, required => true
|
||||
})}
|
||||
[
|
||||
{name_vsn,
|
||||
hoconsc:mk(
|
||||
string(),
|
||||
#{
|
||||
desc => ?DESC(name_vsn),
|
||||
required => true
|
||||
}
|
||||
)},
|
||||
{enable,
|
||||
hoconsc:mk(
|
||||
boolean(),
|
||||
#{
|
||||
desc => ?DESC(enable),
|
||||
required => true
|
||||
}
|
||||
)}
|
||||
].
|
||||
|
||||
root_fields() ->
|
||||
[ {states, fun states/1}
|
||||
, {install_dir, fun install_dir/1}
|
||||
, {check_interval, fun check_interval/1}
|
||||
[
|
||||
{states, fun states/1},
|
||||
{install_dir, fun install_dir/1},
|
||||
{check_interval, fun check_interval/1}
|
||||
].
|
||||
|
||||
states(type) -> hoconsc:array(hoconsc:ref(?MODULE, state));
|
||||
|
|
@ -66,7 +77,8 @@ states(_) -> undefined.
|
|||
|
||||
install_dir(type) -> string();
|
||||
install_dir(required) -> false;
|
||||
install_dir(default) -> "plugins"; %% runner's root dir
|
||||
%% runner's root dir
|
||||
install_dir(default) -> "plugins";
|
||||
install_dir(T) when T =/= desc -> undefined;
|
||||
install_dir(desc) -> ?DESC(install_dir).
|
||||
|
||||
|
|
|
|||
|
|
@ -29,7 +29,8 @@ init([]) ->
|
|||
%% TODO: Add monitor plugins change.
|
||||
Monitor = emqx_plugins_monitor,
|
||||
_Children = [
|
||||
#{id => Monitor,
|
||||
#{
|
||||
id => Monitor,
|
||||
start => {Monitor, start_link, []},
|
||||
restart => permanent,
|
||||
shutdown => brutal_kill,
|
||||
|
|
|
|||
|
|
@ -48,9 +48,12 @@ end_per_suite(Config) ->
|
|||
|
||||
init_per_testcase(TestCase, Config) ->
|
||||
emqx_plugins:put_configured([]),
|
||||
lists:foreach(fun(#{<<"name">> := Name, <<"rel_vsn">> := Vsn}) ->
|
||||
emqx_plugins:purge(bin([Name, "-", Vsn]))
|
||||
end, emqx_plugins:list()),
|
||||
lists:foreach(
|
||||
fun(#{<<"name">> := Name, <<"rel_vsn">> := Vsn}) ->
|
||||
emqx_plugins:purge(bin([Name, "-", Vsn]))
|
||||
end,
|
||||
emqx_plugins:list()
|
||||
),
|
||||
?MODULE:TestCase({init, Config}).
|
||||
|
||||
end_per_testcase(TestCase, Config) ->
|
||||
|
|
@ -59,35 +62,46 @@ end_per_testcase(TestCase, Config) ->
|
|||
|
||||
build_demo_plugin_package() ->
|
||||
build_demo_plugin_package(
|
||||
#{ target_path => "_build/default/emqx_plugrel"
|
||||
, release_name => "emqx_plugin_template"
|
||||
, git_url => "https://github.com/emqx/emqx-plugin-template.git"
|
||||
, vsn => ?EMQX_PLUGIN_TEMPLATE_VSN
|
||||
, workdir => "demo_src"
|
||||
, shdir => emqx_plugins:install_dir()
|
||||
}).
|
||||
#{
|
||||
target_path => "_build/default/emqx_plugrel",
|
||||
release_name => "emqx_plugin_template",
|
||||
git_url => "https://github.com/emqx/emqx-plugin-template.git",
|
||||
vsn => ?EMQX_PLUGIN_TEMPLATE_VSN,
|
||||
workdir => "demo_src",
|
||||
shdir => emqx_plugins:install_dir()
|
||||
}
|
||||
).
|
||||
|
||||
build_demo_plugin_package(#{ target_path := TargetPath
|
||||
, release_name := ReleaseName
|
||||
, git_url := GitUrl
|
||||
, vsn := PluginVsn
|
||||
, workdir := DemoWorkDir
|
||||
, shdir := WorkDir
|
||||
} = Opts) ->
|
||||
build_demo_plugin_package(
|
||||
#{
|
||||
target_path := TargetPath,
|
||||
release_name := ReleaseName,
|
||||
git_url := GitUrl,
|
||||
vsn := PluginVsn,
|
||||
workdir := DemoWorkDir,
|
||||
shdir := WorkDir
|
||||
} = Opts
|
||||
) ->
|
||||
BuildSh = filename:join([WorkDir, "build-demo-plugin.sh"]),
|
||||
Cmd = string:join([ BuildSh
|
||||
, PluginVsn
|
||||
, TargetPath
|
||||
, ReleaseName
|
||||
, GitUrl
|
||||
, DemoWorkDir
|
||||
],
|
||||
" "),
|
||||
Cmd = string:join(
|
||||
[
|
||||
BuildSh,
|
||||
PluginVsn,
|
||||
TargetPath,
|
||||
ReleaseName,
|
||||
GitUrl,
|
||||
DemoWorkDir
|
||||
],
|
||||
" "
|
||||
),
|
||||
case emqx_run_sh:do(Cmd, [{cd, WorkDir}]) of
|
||||
{ok, _} ->
|
||||
Pkg = filename:join([WorkDir, ReleaseName ++ "-" ++
|
||||
PluginVsn ++
|
||||
?PACKAGE_SUFFIX]),
|
||||
Pkg = filename:join([
|
||||
WorkDir,
|
||||
ReleaseName ++ "-" ++
|
||||
PluginVsn ++
|
||||
?PACKAGE_SUFFIX
|
||||
]),
|
||||
case filelib:is_regular(Pkg) of
|
||||
true -> Opts#{package => Pkg};
|
||||
false -> error(#{reason => unexpected_build_result, not_found => Pkg})
|
||||
|
|
@ -104,16 +118,19 @@ bin(B) when is_binary(B) -> B.
|
|||
t_demo_install_start_stop_uninstall({init, Config}) ->
|
||||
Opts = #{package := Package} = build_demo_plugin_package(),
|
||||
NameVsn = filename:basename(Package, ?PACKAGE_SUFFIX),
|
||||
[ {name_vsn, NameVsn}
|
||||
, {plugin_opts, Opts}
|
||||
| Config
|
||||
[
|
||||
{name_vsn, NameVsn},
|
||||
{plugin_opts, Opts}
|
||||
| Config
|
||||
];
|
||||
t_demo_install_start_stop_uninstall({'end', _Config}) -> ok;
|
||||
t_demo_install_start_stop_uninstall({'end', _Config}) ->
|
||||
ok;
|
||||
t_demo_install_start_stop_uninstall(Config) ->
|
||||
NameVsn = proplists:get_value(name_vsn, Config),
|
||||
#{ release_name := ReleaseName
|
||||
, vsn := PluginVsn
|
||||
} = proplists:get_value(plugin_opts, Config),
|
||||
#{
|
||||
release_name := ReleaseName,
|
||||
vsn := PluginVsn
|
||||
} = proplists:get_value(plugin_opts, Config),
|
||||
ok = emqx_plugins:ensure_installed(NameVsn),
|
||||
%% idempotent
|
||||
ok = emqx_plugins:ensure_installed(NameVsn),
|
||||
|
|
@ -129,8 +146,10 @@ t_demo_install_start_stop_uninstall(Config) ->
|
|||
ok = assert_app_running(map_sets, true),
|
||||
|
||||
%% running app can not be un-installed
|
||||
?assertMatch({error, _},
|
||||
emqx_plugins:ensure_uninstalled(NameVsn)),
|
||||
?assertMatch(
|
||||
{error, _},
|
||||
emqx_plugins:ensure_uninstalled(NameVsn)
|
||||
),
|
||||
|
||||
%% stop
|
||||
ok = emqx_plugins:ensure_stopped(NameVsn),
|
||||
|
|
@ -143,9 +162,15 @@ t_demo_install_start_stop_uninstall(Config) ->
|
|||
%% still listed after stopped
|
||||
ReleaseNameBin = list_to_binary(ReleaseName),
|
||||
PluginVsnBin = list_to_binary(PluginVsn),
|
||||
?assertMatch([#{<<"name">> := ReleaseNameBin,
|
||||
<<"rel_vsn">> := PluginVsnBin
|
||||
}], emqx_plugins:list()),
|
||||
?assertMatch(
|
||||
[
|
||||
#{
|
||||
<<"name">> := ReleaseNameBin,
|
||||
<<"rel_vsn">> := PluginVsnBin
|
||||
}
|
||||
],
|
||||
emqx_plugins:list()
|
||||
),
|
||||
ok = emqx_plugins:ensure_uninstalled(NameVsn),
|
||||
?assertEqual([], emqx_plugins:list()),
|
||||
ok.
|
||||
|
|
@ -164,23 +189,29 @@ t_position({init, Config}) ->
|
|||
#{package := Package} = build_demo_plugin_package(),
|
||||
NameVsn = filename:basename(Package, ?PACKAGE_SUFFIX),
|
||||
[{name_vsn, NameVsn} | Config];
|
||||
t_position({'end', _Config}) -> ok;
|
||||
t_position({'end', _Config}) ->
|
||||
ok;
|
||||
t_position(Config) ->
|
||||
NameVsn = proplists:get_value(name_vsn, Config),
|
||||
ok = emqx_plugins:ensure_installed(NameVsn),
|
||||
ok = emqx_plugins:ensure_enabled(NameVsn),
|
||||
FakeInfo = "name=position, rel_vsn=\"2\", rel_apps=[\"position-9\"],"
|
||||
"description=\"desc fake position app\"",
|
||||
FakeInfo =
|
||||
"name=position, rel_vsn=\"2\", rel_apps=[\"position-9\"],"
|
||||
"description=\"desc fake position app\"",
|
||||
PosApp2 = <<"position-2">>,
|
||||
ok = write_info_file(Config, PosApp2, FakeInfo),
|
||||
%% fake a disabled plugin in config
|
||||
ok = emqx_plugins:ensure_state(PosApp2, {before, NameVsn}, false),
|
||||
ListFun = fun() ->
|
||||
lists:map(fun(
|
||||
#{<<"name">> := Name, <<"rel_vsn">> := Vsn}) ->
|
||||
<<Name/binary, "-", Vsn/binary>>
|
||||
end, emqx_plugins:list())
|
||||
end,
|
||||
lists:map(
|
||||
fun(
|
||||
#{<<"name">> := Name, <<"rel_vsn">> := Vsn}
|
||||
) ->
|
||||
<<Name/binary, "-", Vsn/binary>>
|
||||
end,
|
||||
emqx_plugins:list()
|
||||
)
|
||||
end,
|
||||
?assertEqual([PosApp2, list_to_binary(NameVsn)], ListFun()),
|
||||
emqx_plugins:ensure_enabled(PosApp2, {behind, NameVsn}),
|
||||
?assertEqual([list_to_binary(NameVsn), PosApp2], ListFun()),
|
||||
|
|
@ -197,13 +228,15 @@ t_start_restart_and_stop({init, Config}) ->
|
|||
#{package := Package} = build_demo_plugin_package(),
|
||||
NameVsn = filename:basename(Package, ?PACKAGE_SUFFIX),
|
||||
[{name_vsn, NameVsn} | Config];
|
||||
t_start_restart_and_stop({'end', _Config}) -> ok;
|
||||
t_start_restart_and_stop({'end', _Config}) ->
|
||||
ok;
|
||||
t_start_restart_and_stop(Config) ->
|
||||
NameVsn = proplists:get_value(name_vsn, Config),
|
||||
ok = emqx_plugins:ensure_installed(NameVsn),
|
||||
ok = emqx_plugins:ensure_enabled(NameVsn),
|
||||
FakeInfo = "name=bar, rel_vsn=\"2\", rel_apps=[\"bar-9\"],"
|
||||
"description=\"desc bar\"",
|
||||
FakeInfo =
|
||||
"name=bar, rel_vsn=\"2\", rel_apps=[\"bar-9\"],"
|
||||
"description=\"desc bar\"",
|
||||
Bar2 = <<"bar-2">>,
|
||||
ok = write_info_file(Config, Bar2, FakeInfo),
|
||||
%% fake a disabled plugin in config
|
||||
|
|
@ -216,8 +249,10 @@ t_start_restart_and_stop(Config) ->
|
|||
%% fake enable bar-2
|
||||
ok = emqx_plugins:ensure_state(Bar2, rear, true),
|
||||
%% should cause an error
|
||||
?assertError(#{function := _, errors := [_ | _]},
|
||||
emqx_plugins:ensure_started()),
|
||||
?assertError(
|
||||
#{function := _, errors := [_ | _]},
|
||||
emqx_plugins:ensure_started()
|
||||
),
|
||||
%% but demo plugin should still be running
|
||||
assert_app_running(emqx_plugin_template, true),
|
||||
|
||||
|
|
@ -255,9 +290,13 @@ t_enable_disable(Config) ->
|
|||
?assertEqual([#{name_vsn => NameVsn, enable => false}], emqx_plugins:configured()),
|
||||
ok = emqx_plugins:ensure_enabled(bin(NameVsn)),
|
||||
?assertEqual([#{name_vsn => NameVsn, enable => true}], emqx_plugins:configured()),
|
||||
?assertMatch({error, #{reason := "bad_plugin_config_status",
|
||||
hint := "disable_the_plugin_first"
|
||||
}}, emqx_plugins:ensure_uninstalled(NameVsn)),
|
||||
?assertMatch(
|
||||
{error, #{
|
||||
reason := "bad_plugin_config_status",
|
||||
hint := "disable_the_plugin_first"
|
||||
}},
|
||||
emqx_plugins:ensure_uninstalled(NameVsn)
|
||||
),
|
||||
ok = emqx_plugins:ensure_disabled(bin(NameVsn)),
|
||||
ok = emqx_plugins:ensure_uninstalled(NameVsn),
|
||||
?assertMatch({error, _}, emqx_plugins:ensure_enabled(NameVsn)),
|
||||
|
|
@ -271,20 +310,28 @@ assert_app_running(Name, false) ->
|
|||
AllApps = application:which_applications(),
|
||||
?assertEqual(false, lists:keyfind(Name, 1, AllApps)).
|
||||
|
||||
t_bad_tar_gz({init, Config}) -> Config;
|
||||
t_bad_tar_gz({'end', _Config}) -> ok;
|
||||
t_bad_tar_gz({init, Config}) ->
|
||||
Config;
|
||||
t_bad_tar_gz({'end', _Config}) ->
|
||||
ok;
|
||||
t_bad_tar_gz(Config) ->
|
||||
WorkDir = proplists:get_value(data_dir, Config),
|
||||
FakeTarTz = filename:join([WorkDir, "fake-vsn.tar.gz"]),
|
||||
ok = file:write_file(FakeTarTz, "a\n"),
|
||||
?assertMatch({error, #{reason := "bad_plugin_package",
|
||||
return := eof
|
||||
}},
|
||||
emqx_plugins:ensure_installed("fake-vsn")),
|
||||
?assertMatch({error, #{reason := "failed_to_extract_plugin_package",
|
||||
return := not_found
|
||||
}},
|
||||
emqx_plugins:ensure_installed("nonexisting")),
|
||||
?assertMatch(
|
||||
{error, #{
|
||||
reason := "bad_plugin_package",
|
||||
return := eof
|
||||
}},
|
||||
emqx_plugins:ensure_installed("fake-vsn")
|
||||
),
|
||||
?assertMatch(
|
||||
{error, #{
|
||||
reason := "failed_to_extract_plugin_package",
|
||||
return := not_found
|
||||
}},
|
||||
emqx_plugins:ensure_installed("nonexisting")
|
||||
),
|
||||
?assertEqual([], emqx_plugins:list()),
|
||||
ok = emqx_plugins:delete_package("fake-vsn"),
|
||||
%% idempotent
|
||||
|
|
@ -292,8 +339,10 @@ t_bad_tar_gz(Config) ->
|
|||
|
||||
%% create a corrupted .tar.gz
|
||||
%% failed install attempts should not leave behind extracted dir
|
||||
t_bad_tar_gz2({init, Config}) -> Config;
|
||||
t_bad_tar_gz2({'end', _Config}) -> ok;
|
||||
t_bad_tar_gz2({init, Config}) ->
|
||||
Config;
|
||||
t_bad_tar_gz2({'end', _Config}) ->
|
||||
ok;
|
||||
t_bad_tar_gz2(Config) ->
|
||||
WorkDir = proplists:get_value(data_dir, Config),
|
||||
NameVsn = "foo-0.2",
|
||||
|
|
@ -310,45 +359,57 @@ t_bad_tar_gz2(Config) ->
|
|||
?assertEqual({error, enoent}, file:read_file_info(emqx_plugins:dir(NameVsn))),
|
||||
ok = emqx_plugins:delete_package(NameVsn).
|
||||
|
||||
t_bad_info_json({init, Config}) -> Config;
|
||||
t_bad_info_json({'end', _}) -> ok;
|
||||
t_bad_info_json({init, Config}) ->
|
||||
Config;
|
||||
t_bad_info_json({'end', _}) ->
|
||||
ok;
|
||||
t_bad_info_json(Config) ->
|
||||
NameVsn = "test-2",
|
||||
ok = write_info_file(Config, NameVsn, "bad-syntax"),
|
||||
?assertMatch({error, #{error := "bad_info_file",
|
||||
return := {parse_error, _}
|
||||
}},
|
||||
emqx_plugins:describe(NameVsn)),
|
||||
?assertMatch(
|
||||
{error, #{
|
||||
error := "bad_info_file",
|
||||
return := {parse_error, _}
|
||||
}},
|
||||
emqx_plugins:describe(NameVsn)
|
||||
),
|
||||
ok = write_info_file(Config, NameVsn, "{\"bad\": \"obj\"}"),
|
||||
?assertMatch({error, #{error := "bad_info_file_content",
|
||||
mandatory_fields := _
|
||||
}},
|
||||
emqx_plugins:describe(NameVsn)),
|
||||
?assertMatch(
|
||||
{error, #{
|
||||
error := "bad_info_file_content",
|
||||
mandatory_fields := _
|
||||
}},
|
||||
emqx_plugins:describe(NameVsn)
|
||||
),
|
||||
?assertEqual([], emqx_plugins:list()),
|
||||
emqx_plugins:purge(NameVsn),
|
||||
ok.
|
||||
|
||||
t_elixir_plugin({init, Config}) ->
|
||||
Opts0 =
|
||||
#{ target_path => "_build/prod/plugrelex/elixir_plugin_template"
|
||||
, release_name => "elixir_plugin_template"
|
||||
, git_url => "https://github.com/emqx/emqx-elixir-plugin.git"
|
||||
, vsn => ?EMQX_ELIXIR_PLUGIN_TEMPLATE_VSN
|
||||
, workdir => "demo_src_elixir"
|
||||
, shdir => emqx_plugins:install_dir()
|
||||
},
|
||||
#{
|
||||
target_path => "_build/prod/plugrelex/elixir_plugin_template",
|
||||
release_name => "elixir_plugin_template",
|
||||
git_url => "https://github.com/emqx/emqx-elixir-plugin.git",
|
||||
vsn => ?EMQX_ELIXIR_PLUGIN_TEMPLATE_VSN,
|
||||
workdir => "demo_src_elixir",
|
||||
shdir => emqx_plugins:install_dir()
|
||||
},
|
||||
Opts = #{package := Package} = build_demo_plugin_package(Opts0),
|
||||
NameVsn = filename:basename(Package, ?PACKAGE_SUFFIX),
|
||||
[ {name_vsn, NameVsn}
|
||||
, {plugin_opts, Opts}
|
||||
| Config
|
||||
[
|
||||
{name_vsn, NameVsn},
|
||||
{plugin_opts, Opts}
|
||||
| Config
|
||||
];
|
||||
t_elixir_plugin({'end', _Config}) -> ok;
|
||||
t_elixir_plugin({'end', _Config}) ->
|
||||
ok;
|
||||
t_elixir_plugin(Config) ->
|
||||
NameVsn = proplists:get_value(name_vsn, Config),
|
||||
#{ release_name := ReleaseName
|
||||
, vsn := PluginVsn
|
||||
} = proplists:get_value(plugin_opts, Config),
|
||||
#{
|
||||
release_name := ReleaseName,
|
||||
vsn := PluginVsn
|
||||
} = proplists:get_value(plugin_opts, Config),
|
||||
ok = emqx_plugins:ensure_installed(NameVsn),
|
||||
%% idempotent
|
||||
ok = emqx_plugins:ensure_installed(NameVsn),
|
||||
|
|
@ -368,8 +429,10 @@ t_elixir_plugin(Config) ->
|
|||
3 = 'Elixir.Kernel':'+'(1, 2),
|
||||
|
||||
%% running app can not be un-installed
|
||||
?assertMatch({error, _},
|
||||
emqx_plugins:ensure_uninstalled(NameVsn)),
|
||||
?assertMatch(
|
||||
{error, _},
|
||||
emqx_plugins:ensure_uninstalled(NameVsn)
|
||||
),
|
||||
|
||||
%% stop
|
||||
ok = emqx_plugins:ensure_stopped(NameVsn),
|
||||
|
|
@ -382,9 +445,15 @@ t_elixir_plugin(Config) ->
|
|||
%% still listed after stopped
|
||||
ReleaseNameBin = list_to_binary(ReleaseName),
|
||||
PluginVsnBin = list_to_binary(PluginVsn),
|
||||
?assertMatch([#{<<"name">> := ReleaseNameBin,
|
||||
<<"rel_vsn">> := PluginVsnBin
|
||||
}], emqx_plugins:list()),
|
||||
?assertMatch(
|
||||
[
|
||||
#{
|
||||
<<"name">> := ReleaseNameBin,
|
||||
<<"rel_vsn">> := PluginVsnBin
|
||||
}
|
||||
],
|
||||
emqx_plugins:list()
|
||||
),
|
||||
ok = emqx_plugins:ensure_uninstalled(NameVsn),
|
||||
?assertEqual([], emqx_plugins:list()),
|
||||
ok.
|
||||
|
|
|
|||
|
|
@ -23,23 +23,26 @@
|
|||
|
||||
ensure_configured_test_todo() ->
|
||||
meck_emqx(),
|
||||
try test_ensure_configured()
|
||||
after emqx_plugins:put_configured([])
|
||||
try
|
||||
test_ensure_configured()
|
||||
after
|
||||
emqx_plugins:put_configured([])
|
||||
end,
|
||||
meck:unload(emqx).
|
||||
|
||||
|
||||
test_ensure_configured() ->
|
||||
ok = emqx_plugins:put_configured([]),
|
||||
P1 =#{name_vsn => "p-1", enable => true},
|
||||
P2 =#{name_vsn => "p-2", enable => true},
|
||||
P3 =#{name_vsn => "p-3", enable => false},
|
||||
P1 = #{name_vsn => "p-1", enable => true},
|
||||
P2 = #{name_vsn => "p-2", enable => true},
|
||||
P3 = #{name_vsn => "p-3", enable => false},
|
||||
emqx_plugins:ensure_configured(P1, front),
|
||||
emqx_plugins:ensure_configured(P2, {before, <<"p-1">>}),
|
||||
emqx_plugins:ensure_configured(P3, {before, <<"p-1">>}),
|
||||
?assertEqual([P2, P3, P1], emqx_plugins:configured()),
|
||||
?assertThrow(#{error := "position_anchor_plugin_not_configured"},
|
||||
emqx_plugins:ensure_configured(P3, {before, <<"unknown-x">>})).
|
||||
?assertThrow(
|
||||
#{error := "position_anchor_plugin_not_configured"},
|
||||
emqx_plugins:ensure_configured(P3, {before, <<"unknown-x">>})
|
||||
).
|
||||
|
||||
read_plugin_test() ->
|
||||
meck_emqx(),
|
||||
|
|
@ -47,16 +50,20 @@ read_plugin_test() ->
|
|||
fun(_Dir) ->
|
||||
NameVsn = "bar-5",
|
||||
InfoFile = emqx_plugins:info_file(NameVsn),
|
||||
FakeInfo = "name=bar, rel_vsn=\"5\", rel_apps=[justname_no_vsn],"
|
||||
"description=\"desc bar\"",
|
||||
FakeInfo =
|
||||
"name=bar, rel_vsn=\"5\", rel_apps=[justname_no_vsn],"
|
||||
"description=\"desc bar\"",
|
||||
try
|
||||
ok = write_file(InfoFile, FakeInfo),
|
||||
?assertMatch({error, #{error := "bad_rel_apps"}},
|
||||
emqx_plugins:read_plugin(NameVsn, #{}))
|
||||
?assertMatch(
|
||||
{error, #{error := "bad_rel_apps"}},
|
||||
emqx_plugins:read_plugin(NameVsn, #{})
|
||||
)
|
||||
after
|
||||
emqx_plugins:purge(NameVsn)
|
||||
end
|
||||
end),
|
||||
end
|
||||
),
|
||||
meck:unload(emqx).
|
||||
|
||||
with_rand_install_dir(F) ->
|
||||
|
|
@ -91,7 +98,8 @@ delete_package_test() ->
|
|||
Dir = File,
|
||||
ok = filelib:ensure_dir(filename:join([Dir, "foo"])),
|
||||
?assertMatch({error, _}, emqx_plugins:delete_package("a-1"))
|
||||
end),
|
||||
end
|
||||
),
|
||||
meck:unload(emqx).
|
||||
|
||||
%% purge plugin's install dir should mostly work and return ok
|
||||
|
|
@ -110,15 +118,19 @@ purge_test() ->
|
|||
%% write a file for the dir path
|
||||
ok = file:write_file(Dir, "a"),
|
||||
?assertEqual(ok, emqx_plugins:purge("a-1"))
|
||||
end),
|
||||
end
|
||||
),
|
||||
meck:unload(emqx).
|
||||
|
||||
meck_emqx() ->
|
||||
meck:new(emqx, [unstick, passthrough]),
|
||||
meck:expect(emqx, update_config,
|
||||
meck:expect(
|
||||
emqx,
|
||||
update_config,
|
||||
fun(Path, Values, _Opts) ->
|
||||
emqx_config:put(Path, Values)
|
||||
end),
|
||||
end
|
||||
),
|
||||
%meck:expect(emqx, get_config,
|
||||
% fun(KeyPath, Default) ->
|
||||
% Map = emqx:get_raw_config(KeyPath, Default),
|
||||
|
|
|
|||
|
|
@ -1,23 +1,32 @@
|
|||
%% -*- mode: erlang -*-
|
||||
|
||||
{deps,
|
||||
[ {emqx, {path, "../emqx"}},
|
||||
%% FIXME: tag this as v3.1.3
|
||||
{prometheus, {git, "https://github.com/deadtrickster/prometheus.erl", {tag, "v4.8.1"}}},
|
||||
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.27.3"}}}
|
||||
]}.
|
||||
{deps, [
|
||||
{emqx, {path, "../emqx"}},
|
||||
%% FIXME: tag this as v3.1.3
|
||||
{prometheus, {git, "https://github.com/deadtrickster/prometheus.erl", {tag, "v4.8.1"}}},
|
||||
{hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.27.4"}}}
|
||||
]}.
|
||||
|
||||
{edoc_opts, [{preprocess, true}]}.
|
||||
{erl_opts, [warn_unused_vars,
|
||||
warn_shadow_vars,
|
||||
warn_unused_import,
|
||||
warn_obsolete_guard,
|
||||
debug_info,
|
||||
{parse_transform}]}.
|
||||
{erl_opts, [
|
||||
warn_unused_vars,
|
||||
warn_shadow_vars,
|
||||
warn_unused_import,
|
||||
warn_obsolete_guard,
|
||||
debug_info,
|
||||
{parse_transform}
|
||||
]}.
|
||||
|
||||
{xref_checks, [undefined_function_calls, undefined_functions,
|
||||
locals_not_used, deprecated_function_calls,
|
||||
warnings_as_errors, deprecated_functions]}.
|
||||
{xref_checks, [
|
||||
undefined_function_calls,
|
||||
undefined_functions,
|
||||
locals_not_used,
|
||||
deprecated_function_calls,
|
||||
warnings_as_errors,
|
||||
deprecated_functions
|
||||
]}.
|
||||
{cover_enabled, true}.
|
||||
{cover_opts, [verbose]}.
|
||||
{cover_export_enabled, true}.
|
||||
|
||||
{project_plugins, [erlfmt]}.
|
||||
|
|
|
|||
|
|
@ -1,15 +1,17 @@
|
|||
%% -*- mode: erlang -*-
|
||||
{application, emqx_prometheus,
|
||||
[{description, "Prometheus for EMQX"},
|
||||
{vsn, "5.0.0"}, % strict semver, bump manually!
|
||||
{modules, []},
|
||||
{registered, [emqx_prometheus_sup]},
|
||||
{applications, [kernel,stdlib,prometheus,emqx]},
|
||||
{mod, {emqx_prometheus_app,[]}},
|
||||
{env, []},
|
||||
{licenses, ["Apache-2.0"]},
|
||||
{maintainers, ["EMQX Team <contact@emqx.io>"]},
|
||||
{links, [{"Homepage", "https://emqx.io/"},
|
||||
{"Github", "https://github.com/emqx/emqx-prometheus"}
|
||||
]}
|
||||
]}.
|
||||
{application, emqx_prometheus, [
|
||||
{description, "Prometheus for EMQX"},
|
||||
% strict semver, bump manually!
|
||||
{vsn, "5.0.0"},
|
||||
{modules, []},
|
||||
{registered, [emqx_prometheus_sup]},
|
||||
{applications, [kernel, stdlib, prometheus, emqx]},
|
||||
{mod, {emqx_prometheus_app, []}},
|
||||
{env, []},
|
||||
{licenses, ["Apache-2.0"]},
|
||||
{maintainers, ["EMQX Team <contact@emqx.io>"]},
|
||||
{links, [
|
||||
{"Homepage", "https://emqx.io/"},
|
||||
{"Github", "https://github.com/emqx/emqx-prometheus"}
|
||||
]}
|
||||
]}.
|
||||
|
|
|
|||
|
|
@ -28,38 +28,44 @@
|
|||
-include_lib("prometheus/include/prometheus_model.hrl").
|
||||
-include_lib("emqx/include/logger.hrl").
|
||||
|
||||
-import(prometheus_model_helpers,
|
||||
[ create_mf/5
|
||||
, gauge_metric/1
|
||||
, counter_metric/1
|
||||
]).
|
||||
-import(
|
||||
prometheus_model_helpers,
|
||||
[
|
||||
create_mf/5,
|
||||
gauge_metric/1,
|
||||
counter_metric/1
|
||||
]
|
||||
).
|
||||
|
||||
-export([ update/1
|
||||
, start/0
|
||||
, stop/0
|
||||
, restart/0
|
||||
% for rpc
|
||||
, do_start/0
|
||||
, do_stop/0
|
||||
]).
|
||||
-export([
|
||||
update/1,
|
||||
start/0,
|
||||
stop/0,
|
||||
restart/0,
|
||||
% for rpc
|
||||
do_start/0,
|
||||
do_stop/0
|
||||
]).
|
||||
|
||||
%% APIs
|
||||
-export([start_link/1]).
|
||||
|
||||
%% gen_server callbacks
|
||||
-export([ init/1
|
||||
, handle_call/3
|
||||
, handle_cast/2
|
||||
, handle_info/2
|
||||
, code_change/3
|
||||
, terminate/2
|
||||
]).
|
||||
-export([
|
||||
init/1,
|
||||
handle_call/3,
|
||||
handle_cast/2,
|
||||
handle_info/2,
|
||||
code_change/3,
|
||||
terminate/2
|
||||
]).
|
||||
|
||||
%% prometheus_collector callback
|
||||
-export([ deregister_cleanup/1
|
||||
, collect_mf/2
|
||||
, collect_metrics/2
|
||||
]).
|
||||
-export([
|
||||
deregister_cleanup/1,
|
||||
collect_mf/2,
|
||||
collect_metrics/2
|
||||
]).
|
||||
|
||||
-export([collect/1]).
|
||||
|
||||
|
|
@ -72,8 +78,13 @@
|
|||
%%--------------------------------------------------------------------
|
||||
%% update new config
|
||||
update(Config) ->
|
||||
case emqx_conf:update([prometheus], Config,
|
||||
#{rawconf_with_defaults => true, override_to => cluster}) of
|
||||
case
|
||||
emqx_conf:update(
|
||||
[prometheus],
|
||||
Config,
|
||||
#{rawconf_with_defaults => true, override_to => cluster}
|
||||
)
|
||||
of
|
||||
{ok, #{raw_config := NewConfigRows}} ->
|
||||
case maps:get(<<"enable">>, Config, true) of
|
||||
true ->
|
||||
|
|
@ -131,13 +142,12 @@ handle_call(_Msg, _From, State) ->
|
|||
handle_cast(_Msg, State) ->
|
||||
{noreply, State}.
|
||||
|
||||
handle_info({timeout, R, ?TIMER_MSG}, State = #state{timer=R, push_gateway=Uri}) ->
|
||||
handle_info({timeout, R, ?TIMER_MSG}, State = #state{timer = R, push_gateway = Uri}) ->
|
||||
[Name, Ip] = string:tokens(atom_to_list(node()), "@"),
|
||||
Url = lists:concat([Uri, "/metrics/job/", Name, "/instance/",Name, "~", Ip]),
|
||||
Url = lists:concat([Uri, "/metrics/job/", Name, "/instance/", Name, "~", Ip]),
|
||||
Data = prometheus_text_format:format(),
|
||||
httpc:request(post, {Url, [], "text/plain", Data}, [{autoredirect, true}], []),
|
||||
{noreply, ensure_timer(State)};
|
||||
|
||||
handle_info(_Msg, State) ->
|
||||
{noreply, State}.
|
||||
|
||||
|
|
@ -176,14 +186,15 @@ collect(<<"json">>) ->
|
|||
Metrics = emqx_metrics:all(),
|
||||
Stats = emqx_stats:getstats(),
|
||||
VMData = emqx_vm_data(),
|
||||
#{stats => maps:from_list([collect_stats(Name, Stats) || Name <- emqx_stats()]),
|
||||
metrics => maps:from_list([collect_stats(Name, VMData) || Name <- emqx_vm()]),
|
||||
packets => maps:from_list([collect_stats(Name, Metrics) || Name <- emqx_metrics_packets()]),
|
||||
messages => maps:from_list([collect_stats(Name, Metrics) || Name <- emqx_metrics_messages()]),
|
||||
delivery => maps:from_list([collect_stats(Name, Metrics) || Name <- emqx_metrics_delivery()]),
|
||||
client => maps:from_list([collect_stats(Name, Metrics) || Name <- emqx_metrics_client()]),
|
||||
session => maps:from_list([collect_stats(Name, Metrics) || Name <- emqx_metrics_session()])};
|
||||
|
||||
#{
|
||||
stats => maps:from_list([collect_stats(Name, Stats) || Name <- emqx_stats()]),
|
||||
metrics => maps:from_list([collect_stats(Name, VMData) || Name <- emqx_vm()]),
|
||||
packets => maps:from_list([collect_stats(Name, Metrics) || Name <- emqx_metrics_packets()]),
|
||||
messages => maps:from_list([collect_stats(Name, Metrics) || Name <- emqx_metrics_messages()]),
|
||||
delivery => maps:from_list([collect_stats(Name, Metrics) || Name <- emqx_metrics_delivery()]),
|
||||
client => maps:from_list([collect_stats(Name, Metrics) || Name <- emqx_metrics_client()]),
|
||||
session => maps:from_list([collect_stats(Name, Metrics) || Name <- emqx_metrics_session()])
|
||||
};
|
||||
collect(<<"prometheus">>) ->
|
||||
prometheus_text_format:format().
|
||||
|
||||
|
|
@ -219,13 +230,11 @@ emqx_collect(emqx_connections_count, Stats) ->
|
|||
gauge_metric(?C('connections.count', Stats));
|
||||
emqx_collect(emqx_connections_max, Stats) ->
|
||||
gauge_metric(?C('connections.max', Stats));
|
||||
|
||||
%% sessions
|
||||
emqx_collect(emqx_sessions_count, Stats) ->
|
||||
gauge_metric(?C('sessions.count', Stats));
|
||||
emqx_collect(emqx_sessions_max, Stats) ->
|
||||
gauge_metric(?C('sessions.max', Stats));
|
||||
|
||||
%% pub/sub stats
|
||||
emqx_collect(emqx_topics_count, Stats) ->
|
||||
gauge_metric(?C('topics.count', Stats));
|
||||
|
|
@ -247,13 +256,11 @@ emqx_collect(emqx_subscriptions_shared_count, Stats) ->
|
|||
gauge_metric(?C('subscriptions.shared.count', Stats));
|
||||
emqx_collect(emqx_subscriptions_shared_max, Stats) ->
|
||||
gauge_metric(?C('subscriptions.shared.max', Stats));
|
||||
|
||||
%% retained
|
||||
emqx_collect(emqx_retained_count, Stats) ->
|
||||
gauge_metric(?C('retained.count', Stats));
|
||||
emqx_collect(emqx_retained_max, Stats) ->
|
||||
gauge_metric(?C('retained.max', Stats));
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Metrics - packets & bytes
|
||||
|
||||
|
|
@ -262,13 +269,11 @@ emqx_collect(emqx_bytes_received, Metrics) ->
|
|||
counter_metric(?C('bytes.received', Metrics));
|
||||
emqx_collect(emqx_bytes_sent, Metrics) ->
|
||||
counter_metric(?C('bytes.sent', Metrics));
|
||||
|
||||
%% received.sent
|
||||
emqx_collect(emqx_packets_received, Metrics) ->
|
||||
counter_metric(?C('packets.received', Metrics));
|
||||
emqx_collect(emqx_packets_sent, Metrics) ->
|
||||
counter_metric(?C('packets.sent', Metrics));
|
||||
|
||||
%% connect
|
||||
emqx_collect(emqx_packets_connect, Metrics) ->
|
||||
counter_metric(?C('packets.connect.received', Metrics));
|
||||
|
|
@ -278,7 +283,6 @@ emqx_collect(emqx_packets_connack_error, Metrics) ->
|
|||
counter_metric(?C('packets.connack.error', Metrics));
|
||||
emqx_collect(emqx_packets_connack_auth_error, Metrics) ->
|
||||
counter_metric(?C('packets.connack.auth_error', Metrics));
|
||||
|
||||
%% sub.unsub
|
||||
emqx_collect(emqx_packets_subscribe_received, Metrics) ->
|
||||
counter_metric(?C('packets.subscribe.received', Metrics));
|
||||
|
|
@ -294,7 +298,6 @@ emqx_collect(emqx_packets_unsubscribe_error, Metrics) ->
|
|||
counter_metric(?C('packets.unsubscribe.error', Metrics));
|
||||
emqx_collect(emqx_packets_unsuback_sent, Metrics) ->
|
||||
counter_metric(?C('packets.unsuback.sent', Metrics));
|
||||
|
||||
%% publish.puback
|
||||
emqx_collect(emqx_packets_publish_received, Metrics) ->
|
||||
counter_metric(?C('packets.publish.received', Metrics));
|
||||
|
|
@ -308,7 +311,6 @@ emqx_collect(emqx_packets_publish_auth_error, Metrics) ->
|
|||
counter_metric(?C('packets.publish.auth_error', Metrics));
|
||||
emqx_collect(emqx_packets_publish_dropped, Metrics) ->
|
||||
counter_metric(?C('packets.publish.dropped', Metrics));
|
||||
|
||||
%% puback
|
||||
emqx_collect(emqx_packets_puback_received, Metrics) ->
|
||||
counter_metric(?C('packets.puback.received', Metrics));
|
||||
|
|
@ -318,7 +320,6 @@ emqx_collect(emqx_packets_puback_inuse, Metrics) ->
|
|||
counter_metric(?C('packets.puback.inuse', Metrics));
|
||||
emqx_collect(emqx_packets_puback_missed, Metrics) ->
|
||||
counter_metric(?C('packets.puback.missed', Metrics));
|
||||
|
||||
%% pubrec
|
||||
emqx_collect(emqx_packets_pubrec_received, Metrics) ->
|
||||
counter_metric(?C('packets.pubrec.received', Metrics));
|
||||
|
|
@ -328,7 +329,6 @@ emqx_collect(emqx_packets_pubrec_inuse, Metrics) ->
|
|||
counter_metric(?C('packets.pubrec.inuse', Metrics));
|
||||
emqx_collect(emqx_packets_pubrec_missed, Metrics) ->
|
||||
counter_metric(?C('packets.pubrec.missed', Metrics));
|
||||
|
||||
%% pubrel
|
||||
emqx_collect(emqx_packets_pubrel_received, Metrics) ->
|
||||
counter_metric(?C('packets.pubrel.received', Metrics));
|
||||
|
|
@ -336,7 +336,6 @@ emqx_collect(emqx_packets_pubrel_sent, Metrics) ->
|
|||
counter_metric(?C('packets.pubrel.sent', Metrics));
|
||||
emqx_collect(emqx_packets_pubrel_missed, Metrics) ->
|
||||
counter_metric(?C('packets.pubrel.missed', Metrics));
|
||||
|
||||
%% pubcomp
|
||||
emqx_collect(emqx_packets_pubcomp_received, Metrics) ->
|
||||
counter_metric(?C('packets.pubcomp.received', Metrics));
|
||||
|
|
@ -346,77 +345,59 @@ emqx_collect(emqx_packets_pubcomp_inuse, Metrics) ->
|
|||
counter_metric(?C('packets.pubcomp.inuse', Metrics));
|
||||
emqx_collect(emqx_packets_pubcomp_missed, Metrics) ->
|
||||
counter_metric(?C('packets.pubcomp.missed', Metrics));
|
||||
|
||||
%% pingreq
|
||||
emqx_collect(emqx_packets_pingreq_received, Metrics) ->
|
||||
counter_metric(?C('packets.pingreq.received', Metrics));
|
||||
emqx_collect(emqx_packets_pingresp_sent, Metrics) ->
|
||||
counter_metric(?C('packets.pingresp.sent', Metrics));
|
||||
|
||||
%% disconnect
|
||||
emqx_collect(emqx_packets_disconnect_received, Metrics) ->
|
||||
counter_metric(?C('packets.disconnect.received', Metrics));
|
||||
emqx_collect(emqx_packets_disconnect_sent, Metrics) ->
|
||||
counter_metric(?C('packets.disconnect.sent', Metrics));
|
||||
|
||||
%% auth
|
||||
emqx_collect(emqx_packets_auth_received, Metrics) ->
|
||||
counter_metric(?C('packets.auth.received', Metrics));
|
||||
emqx_collect(emqx_packets_auth_sent, Metrics) ->
|
||||
counter_metric(?C('packets.auth.sent', Metrics));
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Metrics - messages
|
||||
|
||||
%% messages
|
||||
emqx_collect(emqx_messages_received, Metrics) ->
|
||||
counter_metric(?C('messages.received', Metrics));
|
||||
|
||||
emqx_collect(emqx_messages_sent, Metrics) ->
|
||||
counter_metric(?C('messages.sent', Metrics));
|
||||
|
||||
emqx_collect(emqx_messages_qos0_received, Metrics) ->
|
||||
counter_metric(?C('messages.qos0.received', Metrics));
|
||||
emqx_collect(emqx_messages_qos0_sent, Metrics) ->
|
||||
counter_metric(?C('messages.qos0.sent', Metrics));
|
||||
|
||||
emqx_collect(emqx_messages_qos1_received, Metrics) ->
|
||||
counter_metric(?C('messages.qos1.received', Metrics));
|
||||
emqx_collect(emqx_messages_qos1_sent, Metrics) ->
|
||||
counter_metric(?C('messages.qos1.sent', Metrics));
|
||||
|
||||
emqx_collect(emqx_messages_qos2_received, Metrics) ->
|
||||
counter_metric(?C('messages.qos2.received', Metrics));
|
||||
emqx_collect(emqx_messages_qos2_sent, Metrics) ->
|
||||
counter_metric(?C('messages.qos2.sent', Metrics));
|
||||
|
||||
emqx_collect(emqx_messages_publish, Metrics) ->
|
||||
counter_metric(?C('messages.publish', Metrics));
|
||||
|
||||
emqx_collect(emqx_messages_dropped, Metrics) ->
|
||||
counter_metric(?C('messages.dropped', Metrics));
|
||||
|
||||
emqx_collect(emqx_messages_dropped_expired, Metrics) ->
|
||||
counter_metric(?C('messages.dropped.await_pubrel_timeout', Metrics));
|
||||
|
||||
emqx_collect(emqx_messages_dropped_no_subscribers, Metrics) ->
|
||||
counter_metric(?C('messages.dropped.no_subscribers', Metrics));
|
||||
|
||||
emqx_collect(emqx_messages_forward, Metrics) ->
|
||||
counter_metric(?C('messages.forward', Metrics));
|
||||
|
||||
emqx_collect(emqx_messages_retained, Metrics) ->
|
||||
counter_metric(?C('messages.retained', Metrics));
|
||||
|
||||
emqx_collect(emqx_messages_delayed, Stats) ->
|
||||
counter_metric(?C('messages.delayed', Stats));
|
||||
|
||||
emqx_collect(emqx_messages_delivered, Stats) ->
|
||||
counter_metric(?C('messages.delivered', Stats));
|
||||
|
||||
emqx_collect(emqx_messages_acked, Stats) ->
|
||||
counter_metric(?C('messages.acked', Stats));
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Metrics - delivery
|
||||
|
||||
|
|
@ -432,7 +413,6 @@ emqx_collect(emqx_delivery_dropped_queue_full, Stats) ->
|
|||
counter_metric(?C('delivery.dropped.queue_full', Stats));
|
||||
emqx_collect(emqx_delivery_dropped_expired, Stats) ->
|
||||
counter_metric(?C('delivery.dropped.expired', Stats));
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Metrics - client
|
||||
|
||||
|
|
@ -450,7 +430,6 @@ emqx_collect(emqx_client_unsubscribe, Stats) ->
|
|||
counter_metric(?C('client.unsubscribe', Stats));
|
||||
emqx_collect(emqx_client_disconnected, Stats) ->
|
||||
counter_metric(?C('client.disconnected', Stats));
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Metrics - session
|
||||
|
||||
|
|
@ -464,31 +443,23 @@ emqx_collect(emqx_session_discarded, Stats) ->
|
|||
counter_metric(?C('session.discarded', Stats));
|
||||
emqx_collect(emqx_session_terminated, Stats) ->
|
||||
counter_metric(?C('session.terminated', Stats));
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% VM
|
||||
|
||||
emqx_collect(emqx_vm_cpu_use, VMData) ->
|
||||
gauge_metric(?C(cpu_use, VMData));
|
||||
|
||||
emqx_collect(emqx_vm_cpu_idle, VMData) ->
|
||||
gauge_metric(?C(cpu_idle, VMData));
|
||||
|
||||
emqx_collect(emqx_vm_run_queue, VMData) ->
|
||||
gauge_metric(?C(run_queue, VMData));
|
||||
|
||||
emqx_collect(emqx_vm_process_messages_in_queues, VMData) ->
|
||||
gauge_metric(?C(process_total_messages, VMData));
|
||||
|
||||
emqx_collect(emqx_vm_total_memory, VMData) ->
|
||||
gauge_metric(?C(total_memory, VMData));
|
||||
|
||||
emqx_collect(emqx_vm_used_memory, VMData) ->
|
||||
gauge_metric(?C(used_memory, VMData));
|
||||
|
||||
emqx_collect(emqx_cluster_nodes_running, ClusterData) ->
|
||||
gauge_metric(?C(nodes_running, ClusterData));
|
||||
|
||||
emqx_collect(emqx_cluster_nodes_stopped, ClusterData) ->
|
||||
gauge_metric(?C(nodes_stopped, ClusterData)).
|
||||
|
||||
|
|
@ -497,142 +468,157 @@ emqx_collect(emqx_cluster_nodes_stopped, ClusterData) ->
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
emqx_stats() ->
|
||||
[ emqx_connections_count
|
||||
, emqx_connections_max
|
||||
, emqx_sessions_count
|
||||
, emqx_sessions_max
|
||||
, emqx_topics_count
|
||||
, emqx_topics_max
|
||||
, emqx_suboptions_count
|
||||
, emqx_suboptions_max
|
||||
, emqx_subscribers_count
|
||||
, emqx_subscribers_max
|
||||
, emqx_subscriptions_count
|
||||
, emqx_subscriptions_max
|
||||
, emqx_subscriptions_shared_count
|
||||
, emqx_subscriptions_shared_max
|
||||
, emqx_retained_count
|
||||
, emqx_retained_max
|
||||
[
|
||||
emqx_connections_count,
|
||||
emqx_connections_max,
|
||||
emqx_sessions_count,
|
||||
emqx_sessions_max,
|
||||
emqx_topics_count,
|
||||
emqx_topics_max,
|
||||
emqx_suboptions_count,
|
||||
emqx_suboptions_max,
|
||||
emqx_subscribers_count,
|
||||
emqx_subscribers_max,
|
||||
emqx_subscriptions_count,
|
||||
emqx_subscriptions_max,
|
||||
emqx_subscriptions_shared_count,
|
||||
emqx_subscriptions_shared_max,
|
||||
emqx_retained_count,
|
||||
emqx_retained_max
|
||||
].
|
||||
|
||||
emqx_metrics_packets() ->
|
||||
[ emqx_bytes_received
|
||||
, emqx_bytes_sent
|
||||
, emqx_packets_received
|
||||
, emqx_packets_sent
|
||||
, emqx_packets_connect
|
||||
, emqx_packets_connack_sent
|
||||
, emqx_packets_connack_error
|
||||
, emqx_packets_connack_auth_error
|
||||
, emqx_packets_publish_received
|
||||
, emqx_packets_publish_sent
|
||||
, emqx_packets_publish_inuse
|
||||
, emqx_packets_publish_error
|
||||
, emqx_packets_publish_auth_error
|
||||
, emqx_packets_publish_dropped
|
||||
, emqx_packets_puback_received
|
||||
, emqx_packets_puback_sent
|
||||
, emqx_packets_puback_inuse
|
||||
, emqx_packets_puback_missed
|
||||
, emqx_packets_pubrec_received
|
||||
, emqx_packets_pubrec_sent
|
||||
, emqx_packets_pubrec_inuse
|
||||
, emqx_packets_pubrec_missed
|
||||
, emqx_packets_pubrel_received
|
||||
, emqx_packets_pubrel_sent
|
||||
, emqx_packets_pubrel_missed
|
||||
, emqx_packets_pubcomp_received
|
||||
, emqx_packets_pubcomp_sent
|
||||
, emqx_packets_pubcomp_inuse
|
||||
, emqx_packets_pubcomp_missed
|
||||
, emqx_packets_subscribe_received
|
||||
, emqx_packets_subscribe_error
|
||||
, emqx_packets_subscribe_auth_error
|
||||
, emqx_packets_suback_sent
|
||||
, emqx_packets_unsubscribe_received
|
||||
, emqx_packets_unsubscribe_error
|
||||
, emqx_packets_unsuback_sent
|
||||
, emqx_packets_pingreq_received
|
||||
, emqx_packets_pingresp_sent
|
||||
, emqx_packets_disconnect_received
|
||||
, emqx_packets_disconnect_sent
|
||||
, emqx_packets_auth_received
|
||||
, emqx_packets_auth_sent
|
||||
[
|
||||
emqx_bytes_received,
|
||||
emqx_bytes_sent,
|
||||
emqx_packets_received,
|
||||
emqx_packets_sent,
|
||||
emqx_packets_connect,
|
||||
emqx_packets_connack_sent,
|
||||
emqx_packets_connack_error,
|
||||
emqx_packets_connack_auth_error,
|
||||
emqx_packets_publish_received,
|
||||
emqx_packets_publish_sent,
|
||||
emqx_packets_publish_inuse,
|
||||
emqx_packets_publish_error,
|
||||
emqx_packets_publish_auth_error,
|
||||
emqx_packets_publish_dropped,
|
||||
emqx_packets_puback_received,
|
||||
emqx_packets_puback_sent,
|
||||
emqx_packets_puback_inuse,
|
||||
emqx_packets_puback_missed,
|
||||
emqx_packets_pubrec_received,
|
||||
emqx_packets_pubrec_sent,
|
||||
emqx_packets_pubrec_inuse,
|
||||
emqx_packets_pubrec_missed,
|
||||
emqx_packets_pubrel_received,
|
||||
emqx_packets_pubrel_sent,
|
||||
emqx_packets_pubrel_missed,
|
||||
emqx_packets_pubcomp_received,
|
||||
emqx_packets_pubcomp_sent,
|
||||
emqx_packets_pubcomp_inuse,
|
||||
emqx_packets_pubcomp_missed,
|
||||
emqx_packets_subscribe_received,
|
||||
emqx_packets_subscribe_error,
|
||||
emqx_packets_subscribe_auth_error,
|
||||
emqx_packets_suback_sent,
|
||||
emqx_packets_unsubscribe_received,
|
||||
emqx_packets_unsubscribe_error,
|
||||
emqx_packets_unsuback_sent,
|
||||
emqx_packets_pingreq_received,
|
||||
emqx_packets_pingresp_sent,
|
||||
emqx_packets_disconnect_received,
|
||||
emqx_packets_disconnect_sent,
|
||||
emqx_packets_auth_received,
|
||||
emqx_packets_auth_sent
|
||||
].
|
||||
|
||||
emqx_metrics_messages() ->
|
||||
[ emqx_messages_received
|
||||
, emqx_messages_sent
|
||||
, emqx_messages_qos0_received
|
||||
, emqx_messages_qos0_sent
|
||||
, emqx_messages_qos1_received
|
||||
, emqx_messages_qos1_sent
|
||||
, emqx_messages_qos2_received
|
||||
, emqx_messages_qos2_sent
|
||||
, emqx_messages_publish
|
||||
, emqx_messages_dropped
|
||||
, emqx_messages_dropped_expired
|
||||
, emqx_messages_dropped_no_subscribers
|
||||
, emqx_messages_forward
|
||||
, emqx_messages_retained
|
||||
, emqx_messages_delayed
|
||||
, emqx_messages_delivered
|
||||
, emqx_messages_acked
|
||||
[
|
||||
emqx_messages_received,
|
||||
emqx_messages_sent,
|
||||
emqx_messages_qos0_received,
|
||||
emqx_messages_qos0_sent,
|
||||
emqx_messages_qos1_received,
|
||||
emqx_messages_qos1_sent,
|
||||
emqx_messages_qos2_received,
|
||||
emqx_messages_qos2_sent,
|
||||
emqx_messages_publish,
|
||||
emqx_messages_dropped,
|
||||
emqx_messages_dropped_expired,
|
||||
emqx_messages_dropped_no_subscribers,
|
||||
emqx_messages_forward,
|
||||
emqx_messages_retained,
|
||||
emqx_messages_delayed,
|
||||
emqx_messages_delivered,
|
||||
emqx_messages_acked
|
||||
].
|
||||
|
||||
emqx_metrics_delivery() ->
|
||||
[ emqx_delivery_dropped
|
||||
, emqx_delivery_dropped_no_local
|
||||
, emqx_delivery_dropped_too_large
|
||||
, emqx_delivery_dropped_qos0_msg
|
||||
, emqx_delivery_dropped_queue_full
|
||||
, emqx_delivery_dropped_expired
|
||||
[
|
||||
emqx_delivery_dropped,
|
||||
emqx_delivery_dropped_no_local,
|
||||
emqx_delivery_dropped_too_large,
|
||||
emqx_delivery_dropped_qos0_msg,
|
||||
emqx_delivery_dropped_queue_full,
|
||||
emqx_delivery_dropped_expired
|
||||
].
|
||||
|
||||
emqx_metrics_client() ->
|
||||
[ emqx_client_connected
|
||||
, emqx_client_authenticate
|
||||
, emqx_client_auth_anonymous
|
||||
, emqx_client_authorize
|
||||
, emqx_client_subscribe
|
||||
, emqx_client_unsubscribe
|
||||
, emqx_client_disconnected
|
||||
[
|
||||
emqx_client_connected,
|
||||
emqx_client_authenticate,
|
||||
emqx_client_auth_anonymous,
|
||||
emqx_client_authorize,
|
||||
emqx_client_subscribe,
|
||||
emqx_client_unsubscribe,
|
||||
emqx_client_disconnected
|
||||
].
|
||||
|
||||
emqx_metrics_session() ->
|
||||
[ emqx_session_created
|
||||
, emqx_session_resumed
|
||||
, emqx_session_takenover
|
||||
, emqx_session_discarded
|
||||
, emqx_session_terminated
|
||||
[
|
||||
emqx_session_created,
|
||||
emqx_session_resumed,
|
||||
emqx_session_takenover,
|
||||
emqx_session_discarded,
|
||||
emqx_session_terminated
|
||||
].
|
||||
|
||||
emqx_vm() ->
|
||||
[ emqx_vm_cpu_use
|
||||
, emqx_vm_cpu_idle
|
||||
, emqx_vm_run_queue
|
||||
, emqx_vm_process_messages_in_queues
|
||||
, emqx_vm_total_memory
|
||||
, emqx_vm_used_memory
|
||||
[
|
||||
emqx_vm_cpu_use,
|
||||
emqx_vm_cpu_idle,
|
||||
emqx_vm_run_queue,
|
||||
emqx_vm_process_messages_in_queues,
|
||||
emqx_vm_total_memory,
|
||||
emqx_vm_used_memory
|
||||
].
|
||||
|
||||
emqx_vm_data() ->
|
||||
Idle = case cpu_sup:util([detailed]) of
|
||||
{_, 0, 0, _} -> 0; %% Not support for Windows
|
||||
{_Num, _Use, IdleList, _} -> ?C(idle, IdleList)
|
||||
end,
|
||||
Idle =
|
||||
case cpu_sup:util([detailed]) of
|
||||
%% Not support for Windows
|
||||
{_, 0, 0, _} -> 0;
|
||||
{_Num, _Use, IdleList, _} -> ?C(idle, IdleList)
|
||||
end,
|
||||
RunQueue = erlang:statistics(run_queue),
|
||||
[{run_queue, RunQueue},
|
||||
{process_total_messages, 0}, %% XXX: Plan removed at v5.0
|
||||
{cpu_idle, Idle},
|
||||
{cpu_use, 100 - Idle}] ++ emqx_vm:mem_info().
|
||||
[
|
||||
{run_queue, RunQueue},
|
||||
%% XXX: Plan removed at v5.0
|
||||
{process_total_messages, 0},
|
||||
{cpu_idle, Idle},
|
||||
{cpu_use, 100 - Idle}
|
||||
] ++ emqx_vm:mem_info().
|
||||
|
||||
emqx_cluster() ->
|
||||
[ emqx_cluster_nodes_running
|
||||
, emqx_cluster_nodes_stopped
|
||||
[
|
||||
emqx_cluster_nodes_running,
|
||||
emqx_cluster_nodes_stopped
|
||||
].
|
||||
|
||||
emqx_cluster_data() ->
|
||||
#{running_nodes := Running, stopped_nodes := Stopped} = mria_mnesia:cluster_info(),
|
||||
[{nodes_running, length(Running)},
|
||||
{nodes_stopped, length(Stopped)}].
|
||||
[
|
||||
{nodes_running, length(Running)},
|
||||
{nodes_stopped, length(Stopped)}
|
||||
].
|
||||
|
|
|
|||
|
|
@ -22,14 +22,16 @@
|
|||
|
||||
-import(hoconsc, [ref/2]).
|
||||
|
||||
-export([ api_spec/0
|
||||
, paths/0
|
||||
, schema/1
|
||||
]).
|
||||
-export([
|
||||
api_spec/0,
|
||||
paths/0,
|
||||
schema/1
|
||||
]).
|
||||
|
||||
-export([ prometheus/2
|
||||
, stats/2
|
||||
]).
|
||||
-export([
|
||||
prometheus/2,
|
||||
stats/2
|
||||
]).
|
||||
|
||||
-define(SCHEMA_MODULE, emqx_prometheus_schema).
|
||||
|
||||
|
|
@ -37,32 +39,38 @@ api_spec() ->
|
|||
emqx_dashboard_swagger:spec(?MODULE, #{check_schema => true}).
|
||||
|
||||
paths() ->
|
||||
[ "/prometheus"
|
||||
, "/prometheus/stats"
|
||||
[
|
||||
"/prometheus",
|
||||
"/prometheus/stats"
|
||||
].
|
||||
|
||||
schema("/prometheus") ->
|
||||
#{ 'operationId' => prometheus
|
||||
, get =>
|
||||
#{ description => <<"Get Prometheus config info">>
|
||||
, responses =>
|
||||
#{200 => prometheus_config_schema()}
|
||||
#{
|
||||
'operationId' => prometheus,
|
||||
get =>
|
||||
#{
|
||||
description => <<"Get Prometheus config info">>,
|
||||
responses =>
|
||||
#{200 => prometheus_config_schema()}
|
||||
},
|
||||
put =>
|
||||
#{
|
||||
description => <<"Update Prometheus config">>,
|
||||
'requestBody' => prometheus_config_schema(),
|
||||
responses =>
|
||||
#{200 => prometheus_config_schema()}
|
||||
}
|
||||
, put =>
|
||||
#{ description => <<"Update Prometheus config">>
|
||||
, 'requestBody' => prometheus_config_schema()
|
||||
, responses =>
|
||||
#{200 => prometheus_config_schema()}
|
||||
}
|
||||
};
|
||||
};
|
||||
schema("/prometheus/stats") ->
|
||||
#{ 'operationId' => stats
|
||||
, get =>
|
||||
#{ description => <<"Get Prometheus Data">>
|
||||
, responses =>
|
||||
#{200 => prometheus_data_schema()}
|
||||
#{
|
||||
'operationId' => stats,
|
||||
get =>
|
||||
#{
|
||||
description => <<"Get Prometheus Data">>,
|
||||
responses =>
|
||||
#{200 => prometheus_data_schema()}
|
||||
}
|
||||
}.
|
||||
}.
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% API Handler funcs
|
||||
|
|
@ -70,7 +78,6 @@ schema("/prometheus/stats") ->
|
|||
|
||||
prometheus(get, _Params) ->
|
||||
{200, emqx:get_raw_config([<<"prometheus">>], #{})};
|
||||
|
||||
prometheus(put, #{body := Body}) ->
|
||||
case emqx_prometheus:update(Body) of
|
||||
{ok, NewConfig} ->
|
||||
|
|
@ -100,21 +107,25 @@ stats(get, #{headers := Headers}) ->
|
|||
|
||||
prometheus_config_schema() ->
|
||||
emqx_dashboard_swagger:schema_with_example(
|
||||
ref(?SCHEMA_MODULE, "prometheus"),
|
||||
prometheus_config_example()).
|
||||
ref(?SCHEMA_MODULE, "prometheus"),
|
||||
prometheus_config_example()
|
||||
).
|
||||
|
||||
prometheus_config_example() ->
|
||||
#{ enable => true
|
||||
, interval => "15s"
|
||||
, push_gateway_server => <<"http://127.0.0.1:9091">>
|
||||
}.
|
||||
#{
|
||||
enable => true,
|
||||
interval => "15s",
|
||||
push_gateway_server => <<"http://127.0.0.1:9091">>
|
||||
}.
|
||||
|
||||
prometheus_data_schema() ->
|
||||
#{ description => <<"Get Prometheus Data">>
|
||||
, content =>
|
||||
#{ 'application/json' =>
|
||||
#{schema => #{type => object}}
|
||||
, 'text/plain' =>
|
||||
#{schema => #{type => string}}
|
||||
#{
|
||||
description => <<"Get Prometheus Data">>,
|
||||
content =>
|
||||
#{
|
||||
'application/json' =>
|
||||
#{schema => #{type => object}},
|
||||
'text/plain' =>
|
||||
#{schema => #{type => string}}
|
||||
}
|
||||
}.
|
||||
}.
|
||||
|
|
|
|||
|
|
@ -21,9 +21,10 @@
|
|||
-include("emqx_prometheus.hrl").
|
||||
|
||||
%% Application callbacks
|
||||
-export([ start/2
|
||||
, stop/1
|
||||
]).
|
||||
-export([
|
||||
start/2,
|
||||
stop/1
|
||||
]).
|
||||
|
||||
start(_StartType, _StartArgs) ->
|
||||
{ok, Sup} = emqx_prometheus_sup:start_link(),
|
||||
|
|
|
|||
|
|
@ -15,9 +15,10 @@
|
|||
%%--------------------------------------------------------------------
|
||||
-module(emqx_prometheus_mria).
|
||||
|
||||
-export([deregister_cleanup/1,
|
||||
collect_mf/2
|
||||
]).
|
||||
-export([
|
||||
deregister_cleanup/1,
|
||||
collect_mf/2
|
||||
]).
|
||||
|
||||
-include_lib("prometheus/include/prometheus.hrl").
|
||||
|
||||
|
|
@ -43,39 +44,45 @@ deregister_cleanup(_) -> ok.
|
|||
_Registry :: prometheus_registry:registry(),
|
||||
Callback :: prometheus_collector:callback().
|
||||
collect_mf(_Registry, Callback) ->
|
||||
case mria_rlog:backend() of
|
||||
rlog ->
|
||||
Metrics = metrics(),
|
||||
_ = [add_metric_family(Metric, Callback) || Metric <- Metrics],
|
||||
ok;
|
||||
mnesia ->
|
||||
ok
|
||||
end.
|
||||
case mria_rlog:backend() of
|
||||
rlog ->
|
||||
Metrics = metrics(),
|
||||
_ = [add_metric_family(Metric, Callback) || Metric <- Metrics],
|
||||
ok;
|
||||
mnesia ->
|
||||
ok
|
||||
end.
|
||||
|
||||
add_metric_family({Name, Metrics}, Callback) ->
|
||||
Callback(prometheus_model_helpers:create_mf( ?METRIC_NAME(Name)
|
||||
, <<"">>
|
||||
, gauge
|
||||
, catch_all(Metrics)
|
||||
)).
|
||||
Callback(
|
||||
prometheus_model_helpers:create_mf(
|
||||
?METRIC_NAME(Name),
|
||||
<<"">>,
|
||||
gauge,
|
||||
catch_all(Metrics)
|
||||
)
|
||||
).
|
||||
|
||||
%%====================================================================
|
||||
%% Internal functions
|
||||
%%====================================================================
|
||||
|
||||
metrics() ->
|
||||
Metrics = case mria_rlog:role() of
|
||||
replicant ->
|
||||
[lag, bootstrap_time, bootstrap_num_keys, message_queue_len, replayq_len];
|
||||
core ->
|
||||
[last_intercepted_trans, weight, replicants, server_mql]
|
||||
end,
|
||||
Metrics =
|
||||
case mria_rlog:role() of
|
||||
replicant ->
|
||||
[lag, bootstrap_time, bootstrap_num_keys, message_queue_len, replayq_len];
|
||||
core ->
|
||||
[last_intercepted_trans, weight, replicants, server_mql]
|
||||
end,
|
||||
[{MetricId, fun() -> get_shard_metric(MetricId) end} || MetricId <- Metrics].
|
||||
|
||||
get_shard_metric(Metric) ->
|
||||
%% TODO: only report shards that are up
|
||||
[{[{shard, Shard}], get_shard_metric(Metric, Shard)} ||
|
||||
Shard <- mria_schema:shards(), Shard =/= undefined].
|
||||
[
|
||||
{[{shard, Shard}], get_shard_metric(Metric, Shard)}
|
||||
|| Shard <- mria_schema:shards(), Shard =/= undefined
|
||||
].
|
||||
|
||||
get_shard_metric(replicants, Shard) ->
|
||||
length(mria_status:agents(Shard));
|
||||
|
|
@ -88,6 +95,8 @@ get_shard_metric(Metric, Shard) ->
|
|||
end.
|
||||
|
||||
catch_all(DataFun) ->
|
||||
try DataFun()
|
||||
catch _:_ -> undefined
|
||||
try
|
||||
DataFun()
|
||||
catch
|
||||
_:_ -> undefined
|
||||
end.
|
||||
|
|
|
|||
|
|
@ -20,11 +20,12 @@
|
|||
|
||||
-behaviour(hocon_schema).
|
||||
|
||||
-export([ namespace/0
|
||||
, roots/0
|
||||
, fields/1
|
||||
, desc/1
|
||||
]).
|
||||
-export([
|
||||
namespace/0,
|
||||
roots/0,
|
||||
fields/1,
|
||||
desc/1
|
||||
]).
|
||||
|
||||
namespace() -> "prometheus".
|
||||
|
||||
|
|
@ -32,25 +33,36 @@ roots() -> ["prometheus"].
|
|||
|
||||
fields("prometheus") ->
|
||||
[
|
||||
{push_gateway_server, sc(string(),
|
||||
#{ default => "http://127.0.0.1:9091"
|
||||
, required => true
|
||||
, desc => ?DESC(push_gateway_server)
|
||||
})},
|
||||
{interval, sc(emqx_schema:duration_ms(),
|
||||
#{ default => "15s"
|
||||
, required => true
|
||||
, desc => ?DESC(interval)
|
||||
})},
|
||||
{enable, sc(boolean(),
|
||||
#{ default => false
|
||||
, required => true
|
||||
, desc => ?DESC(enable)
|
||||
})}
|
||||
{push_gateway_server,
|
||||
sc(
|
||||
string(),
|
||||
#{
|
||||
default => "http://127.0.0.1:9091",
|
||||
required => true,
|
||||
desc => ?DESC(push_gateway_server)
|
||||
}
|
||||
)},
|
||||
{interval,
|
||||
sc(
|
||||
emqx_schema:duration_ms(),
|
||||
#{
|
||||
default => "15s",
|
||||
required => true,
|
||||
desc => ?DESC(interval)
|
||||
}
|
||||
)},
|
||||
{enable,
|
||||
sc(
|
||||
boolean(),
|
||||
#{
|
||||
default => false,
|
||||
required => true,
|
||||
desc => ?DESC(enable)
|
||||
}
|
||||
)}
|
||||
].
|
||||
|
||||
desc("prometheus") -> ?DESC(prometheus);
|
||||
desc(_) ->
|
||||
undefined.
|
||||
desc(_) -> undefined.
|
||||
|
||||
sc(Type, Meta) -> hoconsc:mk(Type, Meta).
|
||||
|
|
|
|||
|
|
@ -18,21 +18,24 @@
|
|||
|
||||
-behaviour(supervisor).
|
||||
|
||||
-export([ start_link/0
|
||||
, start_child/1
|
||||
, start_child/2
|
||||
, stop_child/1
|
||||
]).
|
||||
-export([
|
||||
start_link/0,
|
||||
start_child/1,
|
||||
start_child/2,
|
||||
stop_child/1
|
||||
]).
|
||||
|
||||
-export([init/1]).
|
||||
|
||||
%% Helper macro for declaring children of supervisor
|
||||
-define(CHILD(Mod, Opts), #{id => Mod,
|
||||
start => {Mod, start_link, [Opts]},
|
||||
restart => permanent,
|
||||
shutdown => 5000,
|
||||
type => worker,
|
||||
modules => [Mod]}).
|
||||
-define(CHILD(Mod, Opts), #{
|
||||
id => Mod,
|
||||
start => {Mod, start_link, [Opts]},
|
||||
restart => permanent,
|
||||
shutdown => 5000,
|
||||
type => worker,
|
||||
modules => [Mod]
|
||||
}).
|
||||
|
||||
start_link() ->
|
||||
supervisor:start_link({local, ?MODULE}, ?MODULE, []).
|
||||
|
|
@ -45,7 +48,7 @@ start_child(ChildSpec) when is_map(ChildSpec) ->
|
|||
start_child(Mod, Opts) when is_atom(Mod) andalso is_map(Opts) ->
|
||||
assert_started(supervisor:start_child(?MODULE, ?CHILD(Mod, Opts))).
|
||||
|
||||
-spec(stop_child(any()) -> ok | {error, term()}).
|
||||
-spec stop_child(any()) -> ok | {error, term()}.
|
||||
stop_child(ChildId) ->
|
||||
case supervisor:terminate_child(?MODULE, ChildId) of
|
||||
ok -> supervisor:delete_child(?MODULE, ChildId);
|
||||
|
|
|
|||
|
|
@ -18,11 +18,12 @@
|
|||
|
||||
-behaviour(emqx_bpapi).
|
||||
|
||||
-export([ introduced_in/0
|
||||
-export([
|
||||
introduced_in/0,
|
||||
|
||||
, start/1
|
||||
, stop/1
|
||||
]).
|
||||
start/1,
|
||||
stop/1
|
||||
]).
|
||||
|
||||
-include_lib("emqx/include/bpapi.hrl").
|
||||
|
||||
|
|
|
|||
|
|
@ -22,13 +22,14 @@
|
|||
-compile(export_all).
|
||||
|
||||
-define(CLUSTER_RPC_SHARD, emqx_cluster_rpc_shard).
|
||||
-define(CONF_DEFAULT, <<"
|
||||
prometheus {
|
||||
push_gateway_server = \"http://127.0.0.1:9091\"
|
||||
interval = \"1s\"
|
||||
enable = true
|
||||
}
|
||||
">>).
|
||||
-define(CONF_DEFAULT, <<
|
||||
"\n"
|
||||
"prometheus {\n"
|
||||
" push_gateway_server = \"http://127.0.0.1:9091\"\n"
|
||||
" interval = \"1s\"\n"
|
||||
" enable = true\n"
|
||||
"}\n"
|
||||
>>).
|
||||
|
||||
%%--------------------------------------------------------------------
|
||||
%% Setups
|
||||
|
|
|
|||
|
|
@ -67,9 +67,14 @@ t_prometheus_api(_) ->
|
|||
{ok, Response} = emqx_mgmt_api_test_util:request_api(get, Path, "", Auth),
|
||||
|
||||
Conf = emqx_json:decode(Response, [return_maps]),
|
||||
?assertMatch(#{<<"push_gateway_server">> := _,
|
||||
<<"interval">> := _,
|
||||
<<"enable">> := _}, Conf),
|
||||
?assertMatch(
|
||||
#{
|
||||
<<"push_gateway_server">> := _,
|
||||
<<"interval">> := _,
|
||||
<<"enable">> := _
|
||||
},
|
||||
Conf
|
||||
),
|
||||
|
||||
NewConf = Conf#{<<"interval">> := <<"2s">>},
|
||||
{ok, Response2} = emqx_mgmt_api_test_util:request_api(put, Path, "", Auth, NewConf),
|
||||
|
|
|
|||
|
|
@ -30,12 +30,13 @@
|
|||
}.
|
||||
-type resource_group() :: binary().
|
||||
-type create_opts() :: #{
|
||||
health_check_interval => integer(),
|
||||
health_check_timeout => integer(),
|
||||
waiting_connect_complete => integer()
|
||||
}.
|
||||
-type after_query() :: {[OnSuccess :: after_query_fun()], [OnFailed :: after_query_fun()]} |
|
||||
undefined.
|
||||
health_check_interval => integer(),
|
||||
health_check_timeout => integer(),
|
||||
waiting_connect_complete => integer()
|
||||
}.
|
||||
-type after_query() ::
|
||||
{[OnSuccess :: after_query_fun()], [OnFailed :: after_query_fun()]}
|
||||
| undefined.
|
||||
|
||||
%% the `after_query_fun()` is mainly for callbacks that increment counters or do some fallback
|
||||
%% actions upon query failure
|
||||
|
|
|
|||
|
|
@ -15,13 +15,17 @@
|
|||
%%--------------------------------------------------------------------
|
||||
|
||||
-define(SAFE_CALL(_EXP_),
|
||||
?SAFE_CALL(_EXP_, ok)).
|
||||
?SAFE_CALL(_EXP_, ok)
|
||||
).
|
||||
|
||||
-define(SAFE_CALL(_EXP_, _EXP_ON_FAIL_),
|
||||
fun() ->
|
||||
try (_EXP_)
|
||||
catch _EXCLASS_:_EXCPTION_:_ST_ ->
|
||||
fun() ->
|
||||
try
|
||||
(_EXP_)
|
||||
catch
|
||||
_EXCLASS_:_EXCPTION_:_ST_ ->
|
||||
_EXP_ON_FAIL_,
|
||||
{error, {_EXCLASS_, _EXCPTION_, _ST_}}
|
||||
end
|
||||
end()).
|
||||
end
|
||||
end()
|
||||
).
|
||||
|
|
|
|||
|
|
@ -1,9 +1,10 @@
|
|||
%% -*- mode: erlang -*-
|
||||
|
||||
{erl_opts, [ debug_info
|
||||
, nowarn_unused_import
|
||||
%, {d, 'RESOURCE_DEBUG'}
|
||||
]}.
|
||||
{erl_opts, [
|
||||
debug_info,
|
||||
nowarn_unused_import
|
||||
%, {d, 'RESOURCE_DEBUG'}
|
||||
]}.
|
||||
|
||||
{erl_first_files, ["src/emqx_resource_transform.erl"]}.
|
||||
|
||||
|
|
@ -11,9 +12,11 @@
|
|||
|
||||
%% try to override the dialyzer 'race_conditions' defined in the top-level dir,
|
||||
%% but it doesn't work
|
||||
{dialyzer, [{warnings, [unmatched_returns, error_handling]}
|
||||
]}.
|
||||
{dialyzer, [{warnings, [unmatched_returns, error_handling]}]}.
|
||||
|
||||
{deps, [ {jsx, {git, "https://github.com/talentdeficit/jsx", {tag, "v3.1.0"}}}
|
||||
, {emqx, {path, "../emqx"}}
|
||||
]}.
|
||||
{deps, [
|
||||
{jsx, {git, "https://github.com/talentdeficit/jsx", {tag, "v3.1.0"}}},
|
||||
{emqx, {path, "../emqx"}}
|
||||
]}.
|
||||
|
||||
{project_plugins, [erlfmt]}.
|
||||
|
|
|
|||
|
|
@ -1,19 +1,19 @@
|
|||
%% -*- mode: erlang -*-
|
||||
{application, emqx_resource,
|
||||
[{description, "An OTP application"},
|
||||
{vsn, "0.1.0"},
|
||||
{registered, []},
|
||||
{mod, {emqx_resource_app, []}},
|
||||
{applications,
|
||||
[kernel,
|
||||
stdlib,
|
||||
gproc,
|
||||
jsx,
|
||||
emqx
|
||||
]},
|
||||
{env,[]},
|
||||
{modules, []},
|
||||
{application, emqx_resource, [
|
||||
{description, "An OTP application"},
|
||||
{vsn, "0.1.0"},
|
||||
{registered, []},
|
||||
{mod, {emqx_resource_app, []}},
|
||||
{applications, [
|
||||
kernel,
|
||||
stdlib,
|
||||
gproc,
|
||||
jsx,
|
||||
emqx
|
||||
]},
|
||||
{env, []},
|
||||
{modules, []},
|
||||
|
||||
{licenses, ["Apache 2.0"]},
|
||||
{links, []}
|
||||
]}.
|
||||
{licenses, ["Apache 2.0"]},
|
||||
{links, []}
|
||||
]}.
|
||||
|
|
|
|||
|
|
@ -25,66 +25,93 @@
|
|||
|
||||
%% APIs for behaviour implementations
|
||||
|
||||
-export([ query_success/1
|
||||
, query_failed/1
|
||||
]).
|
||||
-export([
|
||||
query_success/1,
|
||||
query_failed/1
|
||||
]).
|
||||
|
||||
%% APIs for instances
|
||||
|
||||
-export([ check_config/2
|
||||
, check_and_create/4
|
||||
, check_and_create/5
|
||||
, check_and_create_local/4
|
||||
, check_and_create_local/5
|
||||
, check_and_recreate/4
|
||||
, check_and_recreate_local/4
|
||||
]).
|
||||
-export([
|
||||
check_config/2,
|
||||
check_and_create/4,
|
||||
check_and_create/5,
|
||||
check_and_create_local/4,
|
||||
check_and_create_local/5,
|
||||
check_and_recreate/4,
|
||||
check_and_recreate_local/4
|
||||
]).
|
||||
|
||||
%% Sync resource instances and files
|
||||
%% provisional solution: rpc:multicall to all the nodes for creating/updating/removing
|
||||
%% todo: replicate operations
|
||||
-export([ create/4 %% store the config and start the instance
|
||||
, create/5
|
||||
, create_local/4
|
||||
, create_local/5
|
||||
, create_dry_run/2 %% run start/2, health_check/2 and stop/1 sequentially
|
||||
, create_dry_run_local/2
|
||||
, recreate/4 %% this will do create_dry_run, stop the old instance and start a new one
|
||||
, recreate_local/4
|
||||
, remove/1 %% remove the config and stop the instance
|
||||
, remove_local/1
|
||||
, reset_metrics/1
|
||||
, reset_metrics_local/1
|
||||
]).
|
||||
|
||||
%% store the config and start the instance
|
||||
-export([
|
||||
create/4,
|
||||
create/5,
|
||||
create_local/4,
|
||||
create_local/5,
|
||||
%% run start/2, health_check/2 and stop/1 sequentially
|
||||
create_dry_run/2,
|
||||
create_dry_run_local/2,
|
||||
%% this will do create_dry_run, stop the old instance and start a new one
|
||||
recreate/4,
|
||||
recreate_local/4,
|
||||
%% remove the config and stop the instance
|
||||
remove/1,
|
||||
remove_local/1,
|
||||
reset_metrics/1,
|
||||
reset_metrics_local/1
|
||||
]).
|
||||
|
||||
%% Calls to the callback module with current resource state
|
||||
%% They also save the state after the call finished (except query/2,3).
|
||||
-export([ restart/1 %% restart the instance.
|
||||
, restart/2
|
||||
, health_check/1 %% verify if the resource is working normally
|
||||
, set_resource_status_connecting/1 %% set resource status to disconnected
|
||||
, stop/1 %% stop the instance
|
||||
, query/2 %% query the instance
|
||||
, query/3 %% query the instance with after_query()
|
||||
]).
|
||||
|
||||
%% restart the instance.
|
||||
-export([
|
||||
restart/1,
|
||||
restart/2,
|
||||
%% verify if the resource is working normally
|
||||
health_check/1,
|
||||
%% set resource status to disconnected
|
||||
set_resource_status_connecting/1,
|
||||
%% stop the instance
|
||||
stop/1,
|
||||
%% query the instance
|
||||
query/2,
|
||||
%% query the instance with after_query()
|
||||
query/3
|
||||
]).
|
||||
|
||||
%% Direct calls to the callback module
|
||||
-export([ call_start/3 %% start the instance
|
||||
, call_health_check/3 %% verify if the resource is working normally
|
||||
, call_stop/3 %% stop the instance
|
||||
]).
|
||||
|
||||
-export([ list_instances/0 %% list all the instances, id only.
|
||||
, list_instances_verbose/0 %% list all the instances
|
||||
, get_instance/1 %% return the data of the instance
|
||||
, list_instances_by_type/1 %% return all the instances of the same resource type
|
||||
, generate_id/1
|
||||
, list_group_instances/1
|
||||
]).
|
||||
%% start the instance
|
||||
-export([
|
||||
call_start/3,
|
||||
%% verify if the resource is working normally
|
||||
call_health_check/3,
|
||||
%% stop the instance
|
||||
call_stop/3
|
||||
]).
|
||||
|
||||
-optional_callbacks([ on_query/4
|
||||
, on_health_check/2
|
||||
]).
|
||||
%% list all the instances, id only.
|
||||
-export([
|
||||
list_instances/0,
|
||||
%% list all the instances
|
||||
list_instances_verbose/0,
|
||||
%% return the data of the instance
|
||||
get_instance/1,
|
||||
%% return all the instances of the same resource type
|
||||
list_instances_by_type/1,
|
||||
generate_id/1,
|
||||
list_group_instances/1
|
||||
]).
|
||||
|
||||
-optional_callbacks([
|
||||
on_query/4,
|
||||
on_health_check/2
|
||||
]).
|
||||
|
||||
%% when calling emqx_resource:start/1
|
||||
-callback on_start(instance_id(), resource_config()) ->
|
||||
|
|
@ -98,7 +125,7 @@
|
|||
|
||||
%% when calling emqx_resource:health_check/2
|
||||
-callback on_health_check(instance_id(), resource_state()) ->
|
||||
{ok, resource_state()} | {error, Reason:: term(), resource_state()}.
|
||||
{ok, resource_state()} | {error, Reason :: term(), resource_state()}.
|
||||
|
||||
-spec list_types() -> [module()].
|
||||
list_types() ->
|
||||
|
|
@ -111,24 +138,26 @@ discover_resource_mods() ->
|
|||
-spec is_resource_mod(module()) -> boolean().
|
||||
is_resource_mod(Module) ->
|
||||
Info = Module:module_info(attributes),
|
||||
Behaviour = proplists:get_value(behavior, Info, []) ++
|
||||
proplists:get_value(behaviour, Info, []),
|
||||
Behaviour =
|
||||
proplists:get_value(behavior, Info, []) ++
|
||||
proplists:get_value(behaviour, Info, []),
|
||||
lists:member(?MODULE, Behaviour).
|
||||
|
||||
-spec query_success(after_query()) -> ok.
|
||||
query_success(undefined) -> ok;
|
||||
query_success({OnSucc, _}) ->
|
||||
apply_query_after_calls(OnSucc).
|
||||
query_success({OnSucc, _}) -> apply_query_after_calls(OnSucc).
|
||||
|
||||
-spec query_failed(after_query()) -> ok.
|
||||
query_failed(undefined) -> ok;
|
||||
query_failed({_, OnFailed}) ->
|
||||
apply_query_after_calls(OnFailed).
|
||||
query_failed({_, OnFailed}) -> apply_query_after_calls(OnFailed).
|
||||
|
||||
apply_query_after_calls(Funcs) ->
|
||||
lists:foreach(fun({Fun, Args}) ->
|
||||
lists:foreach(
|
||||
fun({Fun, Args}) ->
|
||||
safe_apply(Fun, Args)
|
||||
end, Funcs).
|
||||
end,
|
||||
Funcs
|
||||
).
|
||||
|
||||
%% =================================================================================
|
||||
%% APIs for resource instances
|
||||
|
|
@ -149,11 +178,13 @@ create(InstId, Group, ResourceType, Config, Opts) ->
|
|||
create_local(InstId, Group, ResourceType, Config) ->
|
||||
create_local(InstId, Group, ResourceType, Config, #{}).
|
||||
|
||||
-spec create_local(instance_id(),
|
||||
resource_group(),
|
||||
resource_type(),
|
||||
resource_config(),
|
||||
create_opts()) ->
|
||||
-spec create_local(
|
||||
instance_id(),
|
||||
resource_group(),
|
||||
resource_type(),
|
||||
resource_config(),
|
||||
create_opts()
|
||||
) ->
|
||||
{ok, resource_data() | 'already_created'} | {error, Reason :: term()}.
|
||||
create_local(InstId, Group, ResourceType, Config, Opts) ->
|
||||
call_instance(InstId, {create, InstId, Group, ResourceType, Config, Opts}).
|
||||
|
|
@ -206,19 +237,25 @@ query(InstId, Request) ->
|
|||
query(InstId, Request, AfterQuery) ->
|
||||
case get_instance(InstId) of
|
||||
{ok, _Group, #{status := connecting}} ->
|
||||
query_error(connecting, <<"cannot serve query when the resource "
|
||||
"instance is still connecting">>);
|
||||
query_error(connecting, <<
|
||||
"cannot serve query when the resource "
|
||||
"instance is still connecting"
|
||||
>>);
|
||||
{ok, _Group, #{status := disconnected}} ->
|
||||
query_error(disconnected, <<"cannot serve query when the resource "
|
||||
"instance is disconnected">>);
|
||||
query_error(disconnected, <<
|
||||
"cannot serve query when the resource "
|
||||
"instance is disconnected"
|
||||
>>);
|
||||
{ok, _Group, #{mod := Mod, state := ResourceState, status := connected}} ->
|
||||
%% the resource state is readonly to Module:on_query/4
|
||||
%% and the `after_query()` functions should be thread safe
|
||||
ok = emqx_plugin_libs_metrics:inc(resource_metrics, InstId, matched),
|
||||
try Mod:on_query(InstId, Request, AfterQuery, ResourceState)
|
||||
catch Err:Reason:ST ->
|
||||
emqx_plugin_libs_metrics:inc(resource_metrics, InstId, exception),
|
||||
erlang:raise(Err, Reason, ST)
|
||||
try
|
||||
Mod:on_query(InstId, Request, AfterQuery, ResourceState)
|
||||
catch
|
||||
Err:Reason:ST ->
|
||||
emqx_plugin_libs_metrics:inc(resource_metrics, InstId, exception),
|
||||
erlang:raise(Err, Reason, ST)
|
||||
end;
|
||||
{error, not_found} ->
|
||||
query_error(not_found, <<"the resource id not exists">>)
|
||||
|
|
@ -258,9 +295,10 @@ list_instances_verbose() ->
|
|||
|
||||
-spec list_instances_by_type(module()) -> [instance_id()].
|
||||
list_instances_by_type(ResourceType) ->
|
||||
filter_instances(fun(_, RT) when RT =:= ResourceType -> true;
|
||||
(_, _) -> false
|
||||
end).
|
||||
filter_instances(fun
|
||||
(_, RT) when RT =:= ResourceType -> true;
|
||||
(_, _) -> false
|
||||
end).
|
||||
|
||||
-spec generate_id(term()) -> instance_id().
|
||||
generate_id(Name) when is_binary(Name) ->
|
||||
|
|
@ -276,7 +314,9 @@ call_start(InstId, Mod, Config) ->
|
|||
?SAFE_CALL(Mod:on_start(InstId, Config)).
|
||||
|
||||
-spec call_health_check(instance_id(), module(), resource_state()) ->
|
||||
{ok, resource_state()} | {error, Reason:: term()} | {error, Reason:: term(), resource_state()}.
|
||||
{ok, resource_state()}
|
||||
| {error, Reason :: term()}
|
||||
| {error, Reason :: term(), resource_state()}.
|
||||
call_health_check(InstId, Mod, ResourceState) ->
|
||||
?SAFE_CALL(Mod:on_health_check(InstId, ResourceState)).
|
||||
|
||||
|
|
@ -289,58 +329,82 @@ call_stop(InstId, Mod, ResourceState) ->
|
|||
check_config(ResourceType, Conf) ->
|
||||
emqx_hocon:check(ResourceType, Conf).
|
||||
|
||||
-spec check_and_create(instance_id(),
|
||||
resource_group(),
|
||||
resource_type(),
|
||||
raw_resource_config()) ->
|
||||
-spec check_and_create(
|
||||
instance_id(),
|
||||
resource_group(),
|
||||
resource_type(),
|
||||
raw_resource_config()
|
||||
) ->
|
||||
{ok, resource_data() | 'already_created'} | {error, term()}.
|
||||
check_and_create(InstId, Group, ResourceType, RawConfig) ->
|
||||
check_and_create(InstId, Group, ResourceType, RawConfig, #{}).
|
||||
|
||||
-spec check_and_create(instance_id(),
|
||||
resource_group(),
|
||||
resource_type(),
|
||||
raw_resource_config(),
|
||||
create_opts()) ->
|
||||
-spec check_and_create(
|
||||
instance_id(),
|
||||
resource_group(),
|
||||
resource_type(),
|
||||
raw_resource_config(),
|
||||
create_opts()
|
||||
) ->
|
||||
{ok, resource_data() | 'already_created'} | {error, term()}.
|
||||
check_and_create(InstId, Group, ResourceType, RawConfig, Opts) ->
|
||||
check_and_do(ResourceType, RawConfig,
|
||||
fun(InstConf) -> create(InstId, Group, ResourceType, InstConf, Opts) end).
|
||||
check_and_do(
|
||||
ResourceType,
|
||||
RawConfig,
|
||||
fun(InstConf) -> create(InstId, Group, ResourceType, InstConf, Opts) end
|
||||
).
|
||||
|
||||
-spec check_and_create_local(instance_id(),
|
||||
resource_group(),
|
||||
resource_type(),
|
||||
raw_resource_config()) ->
|
||||
-spec check_and_create_local(
|
||||
instance_id(),
|
||||
resource_group(),
|
||||
resource_type(),
|
||||
raw_resource_config()
|
||||
) ->
|
||||
{ok, resource_data()} | {error, term()}.
|
||||
check_and_create_local(InstId, Group, ResourceType, RawConfig) ->
|
||||
check_and_create_local(InstId, Group, ResourceType, RawConfig, #{}).
|
||||
|
||||
-spec check_and_create_local(instance_id(),
|
||||
resource_group(),
|
||||
resource_type(),
|
||||
raw_resource_config(),
|
||||
create_opts()) -> {ok, resource_data()} | {error, term()}.
|
||||
-spec check_and_create_local(
|
||||
instance_id(),
|
||||
resource_group(),
|
||||
resource_type(),
|
||||
raw_resource_config(),
|
||||
create_opts()
|
||||
) -> {ok, resource_data()} | {error, term()}.
|
||||
check_and_create_local(InstId, Group, ResourceType, RawConfig, Opts) ->
|
||||
check_and_do(ResourceType, RawConfig,
|
||||
fun(InstConf) -> create_local(InstId, Group, ResourceType, InstConf, Opts) end).
|
||||
check_and_do(
|
||||
ResourceType,
|
||||
RawConfig,
|
||||
fun(InstConf) -> create_local(InstId, Group, ResourceType, InstConf, Opts) end
|
||||
).
|
||||
|
||||
-spec check_and_recreate(instance_id(),
|
||||
resource_type(),
|
||||
raw_resource_config(),
|
||||
create_opts()) ->
|
||||
-spec check_and_recreate(
|
||||
instance_id(),
|
||||
resource_type(),
|
||||
raw_resource_config(),
|
||||
create_opts()
|
||||
) ->
|
||||
{ok, resource_data()} | {error, term()}.
|
||||
check_and_recreate(InstId, ResourceType, RawConfig, Opts) ->
|
||||
check_and_do(ResourceType, RawConfig,
|
||||
fun(InstConf) -> recreate(InstId, ResourceType, InstConf, Opts) end).
|
||||
check_and_do(
|
||||
ResourceType,
|
||||
RawConfig,
|
||||
fun(InstConf) -> recreate(InstId, ResourceType, InstConf, Opts) end
|
||||
).
|
||||
|
||||
-spec check_and_recreate_local(instance_id(),
|
||||
resource_type(),
|
||||
raw_resource_config(),
|
||||
create_opts()) ->
|
||||
-spec check_and_recreate_local(
|
||||
instance_id(),
|
||||
resource_type(),
|
||||
raw_resource_config(),
|
||||
create_opts()
|
||||
) ->
|
||||
{ok, resource_data()} | {error, term()}.
|
||||
check_and_recreate_local(InstId, ResourceType, RawConfig, Opts) ->
|
||||
check_and_do(ResourceType, RawConfig,
|
||||
fun(InstConf) -> recreate_local(InstId, ResourceType, InstConf, Opts) end).
|
||||
check_and_do(
|
||||
ResourceType,
|
||||
RawConfig,
|
||||
fun(InstConf) -> recreate_local(InstId, ResourceType, InstConf, Opts) end
|
||||
).
|
||||
|
||||
check_and_do(ResourceType, RawConfig, Do) when is_function(Do) ->
|
||||
case check_config(ResourceType, RawConfig) of
|
||||
|
|
@ -355,8 +419,7 @@ filter_instances(Filter) ->
|
|||
|
||||
inc_metrics_funcs(InstId) ->
|
||||
OnFailed = [{fun emqx_plugin_libs_metrics:inc/3, [resource_metrics, InstId, failed]}],
|
||||
OnSucc = [ {fun emqx_plugin_libs_metrics:inc/3, [resource_metrics, InstId, success]}
|
||||
],
|
||||
OnSucc = [{fun emqx_plugin_libs_metrics:inc/3, [resource_metrics, InstId, success]}],
|
||||
{OnSucc, OnFailed}.
|
||||
|
||||
call_instance(InstId, Query) ->
|
||||
|
|
|
|||
|
|
@ -15,23 +15,29 @@
|
|||
%%--------------------------------------------------------------------
|
||||
-module(emqx_resource_health_check).
|
||||
|
||||
-export([ start_link/3
|
||||
, create_checker/3
|
||||
, delete_checker/1
|
||||
]).
|
||||
-export([
|
||||
start_link/3,
|
||||
create_checker/3,
|
||||
delete_checker/1
|
||||
]).
|
||||
|
||||
-export([ start_health_check/3
|
||||
, health_check_timeout_checker/4
|
||||
]).
|
||||
-export([
|
||||
start_health_check/3,
|
||||
health_check_timeout_checker/4
|
||||
]).
|
||||
|
||||
-define(SUP, emqx_resource_health_check_sup).
|
||||
-define(ID(NAME), {resource_health_check, NAME}).
|
||||
|
||||
child_spec(Name, Sleep, Timeout) ->
|
||||
#{id => ?ID(Name),
|
||||
start => {?MODULE, start_link, [Name, Sleep, Timeout]},
|
||||
restart => transient,
|
||||
shutdown => 5000, type => worker, modules => [?MODULE]}.
|
||||
#{
|
||||
id => ?ID(Name),
|
||||
start => {?MODULE, start_link, [Name, Sleep, Timeout]},
|
||||
restart => transient,
|
||||
shutdown => 5000,
|
||||
type => worker,
|
||||
modules => [?MODULE]
|
||||
}.
|
||||
|
||||
start_link(Name, Sleep, Timeout) ->
|
||||
Pid = proc_lib:spawn_link(?MODULE, start_health_check, [Name, Sleep, Timeout]),
|
||||
|
|
@ -42,19 +48,22 @@ create_checker(Name, Sleep, Timeout) ->
|
|||
|
||||
create_checker(Name, Sleep, Retry, Timeout) ->
|
||||
case supervisor:start_child(?SUP, child_spec(Name, Sleep, Timeout)) of
|
||||
{ok, _} -> ok;
|
||||
{error, already_present} -> ok;
|
||||
{ok, _} ->
|
||||
ok;
|
||||
{error, already_present} ->
|
||||
ok;
|
||||
{error, {already_started, _}} when Retry == false ->
|
||||
ok = delete_checker(Name),
|
||||
create_checker(Name, Sleep, true, Timeout);
|
||||
Error -> Error
|
||||
Error ->
|
||||
Error
|
||||
end.
|
||||
|
||||
delete_checker(Name) ->
|
||||
case supervisor:terminate_child(?SUP, ?ID(Name)) of
|
||||
ok -> supervisor:delete_child(?SUP, ?ID(Name));
|
||||
Error -> Error
|
||||
end.
|
||||
end.
|
||||
|
||||
start_health_check(Name, Sleep, Timeout) ->
|
||||
Pid = self(),
|
||||
|
|
@ -63,13 +72,16 @@ start_health_check(Name, Sleep, Timeout) ->
|
|||
|
||||
health_check(Name) ->
|
||||
receive
|
||||
{Pid, begin_health_check} ->
|
||||
{Pid, begin_health_check} ->
|
||||
case emqx_resource:health_check(Name) of
|
||||
ok ->
|
||||
emqx_alarm:deactivate(Name);
|
||||
{error, _} ->
|
||||
emqx_alarm:activate(Name, #{name => Name},
|
||||
<<Name/binary, " health check failed">>)
|
||||
emqx_alarm:activate(
|
||||
Name,
|
||||
#{name => Name},
|
||||
<<Name/binary, " health check failed">>
|
||||
)
|
||||
end,
|
||||
Pid ! health_check_finish
|
||||
end,
|
||||
|
|
@ -81,8 +93,11 @@ health_check_timeout_checker(Pid, Name, SleepTime, Timeout) ->
|
|||
receive
|
||||
health_check_finish -> timer:sleep(SleepTime)
|
||||
after Timeout ->
|
||||
emqx_alarm:activate(Name, #{name => Name},
|
||||
<<Name/binary, " health check timeout">>),
|
||||
emqx_alarm:activate(
|
||||
Name,
|
||||
#{name => Name},
|
||||
<<Name/binary, " health check timeout">>
|
||||
),
|
||||
emqx_resource:set_resource_status_connecting(Name),
|
||||
receive
|
||||
health_check_finish -> timer:sleep(SleepTime)
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue